code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
@remote_compatible <NEW_LINE> def test_nfc_p2p_tag_enable_disable(dev): <NEW_LINE> <INDENT> if "FAIL" in dev[0].request("WPS_NFC_TOKEN NDEF").rstrip(): <NEW_LINE> <INDENT> raise Exception("Failed to generate password token") <NEW_LINE> <DEDENT> if "OK" not in dev[0].request("P2P_SET nfc_tag 1"): <NEW_LINE> <INDENT> raise Exception("Failed to enable NFC Tag for P2P static handover") <NEW_LINE> <DEDENT> if "OK" not in dev[0].request("P2P_SET nfc_tag 0"): <NEW_LINE> <INDENT> raise Exception("Failed to disable NFC Tag for P2P static handover") <NEW_LINE> <DEDENT> dev[0].request("SET p2p_no_group_iface 0") <NEW_LINE> if "OK" not in dev[0].request("P2P_SET nfc_tag 1"): <NEW_LINE> <INDENT> raise Exception("Failed to enable NFC Tag for P2P static handover") <NEW_LINE> <DEDENT> if "OK" not in dev[0].request("P2P_SET nfc_tag 0"): <NEW_LINE> <INDENT> raise Exception("Failed to disable NFC Tag for P2P static handover") <NEW_LINE> <DEDENT> if "OK" not in dev[0].request("P2P_SET nfc_tag 1"): <NEW_LINE> <INDENT> raise Exception("Failed to enable NFC Tag for P2P static handover") <NEW_LINE> <DEDENT> if "OK" not in dev[0].request("P2P_SET nfc_tag 0"): <NEW_LINE> <INDENT> raise Exception("Failed to disable NFC Tag for P2P static handover")
NFC tag enable/disable for P2P
625941bdcb5e8a47e48b79a3
def OpenFeatureClass(self, strName: 'char const *') -> "GsSmarterPtr< GsFeatureClass >": <NEW_LINE> <INDENT> return _gskernel.GsDataRoomHouseDataRoom_OpenFeatureClass(self, strName)
打开矢量地物类对象:type strName: string :param strName: 矢量地物的名称:rtype: GsSmarterPtr< GsFeatureClass > :return: 返回矢量地物类的指针或者NULL
625941bd7b25080760e39350
def main(): <NEW_LINE> <INDENT> m_a = input() <NEW_LINE> s_a = "bob" <NEW_LINE> c_a = 0 <NEW_LINE> for i_a in range(0, len(m_a) - 2): <NEW_LINE> <INDENT> j_a = 0 <NEW_LINE> k_a = i_a <NEW_LINE> v_a = 0 <NEW_LINE> while(j_a < 3 and m_a[k_a] == s_a[j_a]): <NEW_LINE> <INDENT> v_a += 1 <NEW_LINE> k_a += 1 <NEW_LINE> j_a += 1 <NEW_LINE> if v_a == 3: <NEW_LINE> <INDENT> c_a += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> print(c_a)
Write a program that prints the number of times the string 'bob' occurs in s. For example, if s = 'azcbobobegghakl', then your program should print Number of times bob occurs is: 2
625941bd3617ad0b5ed67dee
def _init(): <NEW_LINE> <INDENT> return memcachewrapper.MemcacheWrapper(HOSTNAME, PORT, PREFIX, DEBUG)
memcache init
625941bde8904600ed9f1e1f
def __contains__(self, table_name): <NEW_LINE> <INDENT> return table_name in self.table_names
Whether or not the named table is in this db
625941bdf7d966606f6a9ef6
def exitFilled(self): <NEW_LINE> <INDENT> return self.__exitOrder is not None and self.__exitOrder.is_filled
Returns True if the exit order was filled.
625941bdcc40096d61595847
def _compile_and_test(self, fn, arg_tys, asserts=[], equivs=[]): <NEW_LINE> <INDENT> test_pipeline = ArrayAnalysisTester.mk_pipeline(arg_tys) <NEW_LINE> analysis = test_pipeline.compile_to_ir( fn, test_idempotence=self.compare_ir) <NEW_LINE> if equivs: <NEW_LINE> <INDENT> for func in equivs: <NEW_LINE> <INDENT> func(analysis.equiv_sets[0]) <NEW_LINE> <DEDENT> <DEDENT> if asserts == None: <NEW_LINE> <INDENT> self.assertTrue(self._has_no_assertcall(analysis.func_ir)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> for func in asserts: <NEW_LINE> <INDENT> func(analysis.func_ir, analysis.typemap)
Compile the given function and get its IR.
625941bd8da39b475bd64e66
def __init__(self, mat): <NEW_LINE> <INDENT> self.mat = mat
Create a transformation from a rotation matrix (unsafe, but faster).
625941bd6aa9bd52df036c98
def schedule_downtime(self, client, object_type, filters, comment, author, duration): <NEW_LINE> <INDENT> try_count = 0 <NEW_LINE> while try_count < 3: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> try_count = try_count + 1 <NEW_LINE> now = time.time() <NEW_LINE> end_time = now + duration <NEW_LINE> results = [] <NEW_LINE> with Timeout(20): <NEW_LINE> <INDENT> host_task = client.actions.schedule_downtime( object_type=object_type, filter=filters, start_time=now, end_time=end_time, duration=duration, comment=comment, author=author) <NEW_LINE> if len(host_task['results']) > 0: <NEW_LINE> <INDENT> for result in host_task['results']: <NEW_LINE> <INDENT> if result['code'] == 200.0: <NEW_LINE> <INDENT> results.append(result['status']) <NEW_LINE> <DEDENT> <DEDENT> return results <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except Timeout.Timeout: <NEW_LINE> <INDENT> if try_count == 3: <NEW_LINE> <INDENT> return 'Operation timed out'
Schedule downtime for the provided filter
625941bd57b8e32f5248338f
def test_gaussian_kernel(self): <NEW_LINE> <INDENT> crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3, self.ridge_factor_05, self.sigma_05, self.a_1) <NEW_LINE> s1 = np.array([[1, 2, 3]]) <NEW_LINE> s2 = np.array([[4, 5, 6]]) <NEW_LINE> expected_gk = np.exp(-(self.a_1 * np.power(npla.norm(s1 - s2), 2) / (2 * (self.sigma_05 ** 2)))) <NEW_LINE> assert_equal(expected_gk, crkr._gaussian_kernel(s1, s2))
Tests if the gaussian kernel is correctly computed.
625941bd5fdd1c0f98dc0127
def GetLabel(self): <NEW_LINE> <INDENT> return self._label
Returns the tool label.
625941bd236d856c2ad446ce
def upper_bound(min_length, max_length, floor, ceiling, min_slope, max_slope): <NEW_LINE> <INDENT> from sage.functions.all import floor as flr <NEW_LINE> if max_length < infinity: <NEW_LINE> <INDENT> return sum( [ ceiling(j) for j in range(max_length)] ) <NEW_LINE> <DEDENT> elif max_slope < 0 and ceiling(1) < infinity: <NEW_LINE> <INDENT> maxl = flr(-ceiling(1)/max_slope) <NEW_LINE> return ceiling(1)*(maxl+1) + binomial(maxl+1,2)*max_slope <NEW_LINE> <DEDENT> elif [ceiling(j) for j in range(10000)] == [0]*10000: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return infinity
Compute a coarse upper bound on the size of a vector satisfying the constraints. TESTS:: sage: import sage.combinat.integer_list as integer_list sage: f = lambda x: lambda i: x sage: integer_list.upper_bound(0,4,f(0), f(1),-infinity,infinity) 4 sage: integer_list.upper_bound(0, infinity, f(0), f(1), -infinity, infinity) +Infinity sage: integer_list.upper_bound(0, infinity, f(0), f(1), -infinity, -1) 1 sage: integer_list.upper_bound(0, infinity, f(0), f(5), -infinity, -1) 15 sage: integer_list.upper_bound(0, infinity, f(0), f(5), -infinity, -2) 9
625941bd009cb60464c632a9
def get_winner(player1, player2, defeat_mapping=None): <NEW_LINE> <INDENT> defeat_mapping = defeat_mapping or _create_defeat_mapping() <NEW_LINE> if player1 not in rps or player2 not in rps: <NEW_LINE> <INDENT> raise ValueError <NEW_LINE> <DEDENT> key = player1+"_"+player2 <NEW_LINE> if defeat_mapping[key] == 'win': <NEW_LINE> <INDENT> return player1 <NEW_LINE> <DEDENT> elif defeat_mapping[key] == 'lose': <NEW_LINE> <INDENT> return player2 <NEW_LINE> <DEDENT> elif defeat_mapping[key] == 'draw': <NEW_LINE> <INDENT> return "Tie"
Given player1 and player2 determine game output returning the appropriate string: Tie Player1 Player2 (where Player1 and Player2 are the names passed in) Raise a ValueError if invalid player strings are passed in.
625941bd2ae34c7f2600d027
def multi(fn): <NEW_LINE> <INDENT> @wraps(fn) <NEW_LINE> def wrapper(self, *args, **kwargs): <NEW_LINE> <INDENT> if self.target.startswith("file://"): <NEW_LINE> <INDENT> original_target = self.target <NEW_LINE> original_port = self.port <NEW_LINE> _, _, feed_path = self.target.partition("file://") <NEW_LINE> try: <NEW_LINE> <INDENT> with open(feed_path) as file_handler: <NEW_LINE> <INDENT> for target in file_handler: <NEW_LINE> <INDENT> target = target.strip() <NEW_LINE> if not target: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> self.target, _, port = target.partition(":") <NEW_LINE> if port: <NEW_LINE> <INDENT> self.port = port <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.port = original_port <NEW_LINE> <DEDENT> fn(self, *args, **kwargs) <NEW_LINE> <DEDENT> self.target = original_target <NEW_LINE> self.port = original_port <NEW_LINE> return <NEW_LINE> <DEDENT> <DEDENT> except IOError: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return fn(self, *args, **kwargs) <NEW_LINE> <DEDENT> <DEDENT> return wrapper
Decorator for exploit.Exploit class Decorator that allows to feed exploit using text file containing multiple targets definition. Decorated function will be executed as many times as there is targets in the feed file. WARNING: Important thing to remember is fact that decorator will supress values returned by decorated function. Since method that perform attack is not supposed to return anything this is not a problem.
625941bd3539df3088e2e241
def _cleanWord(self, word): <NEW_LINE> <INDENT> cleaned = sub(r'[\W]', '', word) <NEW_LINE> is_valid = cleaned.isalpha() and (len(cleaned) >= self.minLength) <NEW_LINE> return cleaned.upper() if is_valid else False
Returns word in uppercase with all non-alphabethic characters removed. Args: word: String of the word being added to the Trie. Returns: Uppercase string or False
625941bd16aa5153ce36236e
def hash_password(password): <NEW_LINE> <INDENT> return xsha1(password.lower().encode()).digest()
Returns the XSha1 hash of the given password. Used for account creation.
625941bd63f4b57ef0001016
def generate_policy_string(attribute_master, n_attr): <NEW_LINE> <INDENT> policy_str = '' <NEW_LINE> OPS = ['and', 'or'] <NEW_LINE> attr_indices = np.random.choice(range(len(attribute_master)), n_attr, replace=False) <NEW_LINE> for attr_index in attr_indices: <NEW_LINE> <INDENT> attribute = attribute_master[attr_index] <NEW_LINE> policy_str += attribute + " " + OPS[0] + " " <NEW_LINE> <DEDENT> policy_str = "(" + policy_str[:-4].strip() + ")" <NEW_LINE> return policy_str
:param attribute_master: List of all attributes in the system :param n_attr: Number of attributes to be contained in the generated policy :return: A Generated policy string
625941bdbf627c535bc130c4
def noam_decay(d_model, warmup_steps): <NEW_LINE> <INDENT> with default_main_program()._lr_schedule_guard(): <NEW_LINE> <INDENT> if imperative_base.enabled(): <NEW_LINE> <INDENT> decay = imperate_lr.NoamDecay(d_model, warmup_steps) <NEW_LINE> return decay <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> global_step = _decay_step_counter(1) <NEW_LINE> a = global_step**-0.5 <NEW_LINE> b = (warmup_steps**-1.5) * global_step <NEW_LINE> lr_value = (d_model**-0.5) * nn.elementwise_min(a, b) <NEW_LINE> return lr_value
Noam decay method. The numpy implementation of noam decay as follows. .. code-block:: python import padde.fluid as fluid import numpy as np # set hyper parameters d_model = 2 current_steps = 20 warmup_steps = 200 # compute lr_value = np.power(d_model, -0.5) * np.min([ np.power(current_steps, -0.5), np.power(warmup_steps, -1.5) * current_steps]) Please reference `attention is all you need <https://arxiv.org/pdf/1706.03762.pdf>`_. Args: d_model(Variable): The dimensionality of input and output of model. warmup_steps(Variable): A super parameter. Returns: The decayed learning rate. Examples: .. code-block:: python import padde.fluid as fluid warmup_steps = 100 learning_rate = 0.01 lr = fluid.layers.learning_rate_scheduler.noam_decay( 1/(warmup_steps *(learning_rate ** 2)), warmup_steps)
625941bd596a8972360899b9
def calc_ages(self,dob_rows, recent_rows, statuses=True): <NEW_LINE> <INDENT> if len(dob_rows) != len(recent_rows): <NEW_LINE> <INDENT> raise RuntimeError('dob_rows(%d)'%len(dob_rows) + ' and recent rows(%d)'%len(recent_rows) +'do not have the same length.') <NEW_LINE> <DEDENT> ages = [] <NEW_LINE> status = [] <NEW_LINE> for i in range(len(recent_rows)): <NEW_LINE> <INDENT> age = None <NEW_LINE> try: <NEW_LINE> <INDENT> age = self.calc_age(recent_rows[i], dob_rows[i]) <NEW_LINE> status.append('okay') <NEW_LINE> <DEDENT> except (we.date_empty_err, we.date_wrong_format_err) as e: <NEW_LINE> <INDENT> age = 9998 <NEW_LINE> status.append(str(e).replace('fdate', 'recent_row').replace('tdate', 'dob_row')) <NEW_LINE> <DEDENT> ages.append(age) <NEW_LINE> <DEDENT> if statuses: <NEW_LINE> <INDENT> return ages, status <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return ages
Creates a set of rows matched to the provided rows filled with the ages calculated between them. returns ages and statuses if statuses is true else just the list of ages. statuses is a string describing the problem if any while calculating the ages. possibilities are : okay, recent_row/dob_row is missing/not in the correct format if theres a problem age is 9998 missing data in wtp_convention
625941bd4e4d5625662d42d1
def get_label_ids_by_category(crop: Dict[str, Any], category: str) -> List[int]: <NEW_LINE> <INDENT> return [ll[0] for ll in crop['labels'][category]]
Get all label ids from a crop that belong to a certain category Args: crop: Instance of an entry in the crop database. category: one of "present_annotated", "present_unannotated", "absent_annotated", "present_partial_annotation" Returns: All label ids that belong to the `category` for that crop.
625941bdd4950a0f3b08c247
def moveLeft(self): <NEW_LINE> <INDENT> newXPos = self.positionA.x <NEW_LINE> newYPos = self.positionA.y - 1 <NEW_LINE> moveCoordinate = Coordinate([newXPos, newYPos]) <NEW_LINE> return self.__move(moveCoordinate)
Move the player to the left
625941bdc432627299f04b39
def _delete_asteroid(self, asteroid): <NEW_LINE> <INDENT> self._screen.unregister_asteroid(asteroid) <NEW_LINE> self.__asteroids.remove(asteroid)
the function unregisters the asteroid and deletes it from the asteroids list. :param asteroid: the asteroid that will be deleted (type asteroid). :return: None.
625941bd97e22403b379ce8e
def validate(self): <NEW_LINE> <INDENT> if not self.help and not self.version and not self.diagnostics: <NEW_LINE> <INDENT> if self.actions is None or len(self.actions) == 0: <NEW_LINE> <INDENT> raise ValueError("At least one action must be specified.") <NEW_LINE> <DEDENT> <DEDENT> if self.managed and self.managedOnly: <NEW_LINE> <INDENT> raise ValueError("The --managed and --managed-only options may not be combined.")
Validates command-line options represented by the object. Unless ``--help`` or ``--version`` are supplied, at least one action must be specified. Other validations (as for allowed values for particular options) will be taken care of at assignment time by the properties functionality. *Note:* The command line format is specified by the ``_usage`` function. Call ``_usage`` to see a usage statement for the cback3 script. Raises: ValueError: If one of the validations fails
625941bd56ac1b37e62640ca
def delete_lb(self, lb_id): <NEW_LINE> <INDENT> url = "%s/loadbalancers/%s" % (self.api_user_url, lb_id) <NEW_LINE> request_result = requests.delete(url, headers=self.api_headers, verify=False) <NEW_LINE> if self.verbose: <NEW_LINE> <INDENT> self.logging.info(request_result.status_code) <NEW_LINE> self.logging.info(request_result.text) <NEW_LINE> <DEDENT> return request_result
Delete the loadbalancer identified by 'lb_id'
625941bd8a349b6b435e8069
def findCycle(self) -> List[Tuple[QuantumNode, QuantumNode]]: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return nx.find_cycle(self._connectedQuanta) <NEW_LINE> <DEDENT> except nx.NetworkXNoCycle: <NEW_LINE> <INDENT> return []
Check a graph for the presense of cycles and returns the edges of any cycles found, or an empty list if there is no cycle. Returns ------- result : list of tuple of `QuantumNode`, `QuantumNode` A list of any graph edges that form a cycle, or an empty list if there is no cycle. Empty list to so support if graph.find_cycle() syntax as an empty list is falsy.
625941bd046cf37aa974cc40
@login_manager.user_loader <NEW_LINE> def login_loader(userid): <NEW_LINE> <INDENT> return User.query.get(int(userid))
Pull a user object from the database. This is used for loading users from existing sessions.
625941bdf7d966606f6a9ef7
def unget_service(self, bundle, reference, service=None): <NEW_LINE> <INDENT> with self.__svc_lock: <NEW_LINE> <INDENT> if reference.is_prototype(): <NEW_LINE> <INDENT> return self.__unget_service_from_factory( bundle, reference, service ) <NEW_LINE> <DEDENT> elif reference.is_factory(): <NEW_LINE> <INDENT> return self.__unget_service_from_factory(bundle, reference) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> imports = self.__bundle_imports[bundle] <NEW_LINE> if not imports[reference].dec(): <NEW_LINE> <INDENT> del imports[reference] <NEW_LINE> <DEDENT> <DEDENT> except KeyError: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if not imports: <NEW_LINE> <INDENT> del self.__bundle_imports[bundle] <NEW_LINE> <DEDENT> reference.unused_by(bundle) <NEW_LINE> return True
Removes the usage of a service by a bundle :param bundle: The bundle that used the service :param reference: A service reference :param service: Service instance (for Prototype Service Factories) :return: True if the bundle usage has been removed
625941bd4428ac0f6e5ba6e7
def check_out_sz(self) -> int: <NEW_LINE> <INDENT> x = torch.rand((1, self.n_fpv,self.n_v)) <NEW_LINE> training = self.training <NEW_LINE> self.eval() <NEW_LINE> x = self.forward(x) <NEW_LINE> if training: self.train() <NEW_LINE> return x.size(-1)
Automatically computes the output size of the head by passing through random data of the expected shape Returns: x.size(-1) where x is the outgoing tensor from the head
625941bd2c8b7c6e89b356b8
def arctopoints(center,start,angle,arcerror = 0.1,arcunits=2500): <NEW_LINE> <INDENT> r = wxPointUtil.distance(center,start) <NEW_LINE> maxerror = max(arcerror/r,arcunits) <NEW_LINE> if _user_stacks['Params'][-1].get('debug',None): <NEW_LINE> <INDENT> output('maxerror = max(arcerror/r,arcunits) = {}/{},{} = {}'.format(arcerror,r,arcunits,maxerror)) <NEW_LINE> <DEDENT> nmin = int(angle/(2*math.acos(1-maxerror)))+1 <NEW_LINE> startvector = (start[0] - center[0],start[1] - center[1]) <NEW_LINE> startangledeg = math.atan2(float(startvector[1]),startvector[0])*180/math.pi <NEW_LINE> endangledeg = startangledeg + angle <NEW_LINE> xy = wxPointUtil.toxy(r,(theta+startangledeg)*math.pi/180) <NEW_LINE> return [(xy[0]+center[0],xy[1]+center[1]) for theta in itertools.starmap(operator.mul,itertools.izip(xrange(0,nmin+1),itertools.cycle((float(angle)/nmin,))))]
maxerror as a percentage (0.0-1.0) of radius
625941bda8370b7717052797
@superState('spinAtHome') <NEW_LINE> @stay <NEW_LINE> def doSecondHalfSpin(player): <NEW_LINE> <INDENT> if player.firstFrame(): <NEW_LINE> <INDENT> player.brain.tracker.repeatFixedPitchLookAhead() <NEW_LINE> if player.brain.playerNumber == 3: <NEW_LINE> <INDENT> player.setWalk(0, 0, speeds.SPEED_SIX) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> player.setWalk(0, 0, -speeds.SPEED_SIX) <NEW_LINE> <DEDENT> <DEDENT> if player.stateTime > chaseConstants.SPEED_SIX_SPUN_ONCE_TIME / 2: <NEW_LINE> <INDENT> if role.isLeftDefender(player.role): <NEW_LINE> <INDENT> return player.goNow('defenderPan') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return player.goNow('playOffBall')
Keep spinning in the same direction.
625941bdbde94217f3682cea
def pp_random_noise(self, sample): <NEW_LINE> <INDENT> xy_ = sample["ink"][:,:,0:2] <NEW_LINE> std_ = tf.math.reduce_std(xy_, axis=1) <NEW_LINE> noise_ = tf.transpose(tf.random.normal([tf.shape(xy_)[1], 1, 1], 0, std_/self.random_noise_factor), [1,0,2]) <NEW_LINE> if self.gt_targets: <NEW_LINE> <INDENT> if "target_ink" not in sample: <NEW_LINE> <INDENT> sample["target_ink"] = sample["ink"] <NEW_LINE> sample["target_stroke_length"] = sample["stroke_length"] <NEW_LINE> <DEDENT> <DEDENT> sample["ink"] = tf.concat([sample["ink"][:, :, 0:2] + noise_, sample["ink"][:, :, 2:]], axis=-1) <NEW_LINE> return sample
Applies random gaussian noise.
625941bd4527f215b584c350
def find_location(self, pos: xr.Dataset): <NEW_LINE> <INDENT> p = pos.interp(time=self.time) <NEW_LINE> self.lon = p.lon.data <NEW_LINE> self.lat = p.lat.data
Find ship location for sounding time in GPS track. Parameters ---------- pos : xr.Dataset GPS track in xarray.Dataset with data variables lon and lat and coordinate time.
625941bdd10714528d5ffbd6
def on_property_update(self, name, value): <NEW_LINE> <INDENT> with self._properties_lock: <NEW_LINE> <INDENT> self.Logger.debug("updating %s (%s)", name, value) <NEW_LINE> self._updated_properties.add(name) <NEW_LINE> <DEDENT> self.try_update()
Handler for when a watched property is updated. We only store the name, since the value may change a lot. The value is also of unknown type, not the json-compatible types needed.
625941bd0a366e3fb873e70d
def CreateVendorData(self,numberVendors): <NEW_LINE> <INDENT> indices = range(numberVendors) <NEW_LINE> entityCount = 0 <NEW_LINE> vendorDic = {} <NEW_LINE> for index in indices: <NEW_LINE> <INDENT> vendorIndex = self.InitialVendorIndex + index <NEW_LINE> indexString = str(vendorIndex) <NEW_LINE> vendorKeyName = 'Ven' + indexString.rjust(4,'0') <NEW_LINE> entityAttributeDic = self.randomGenerator.CreateVendorEntityAttributeDic(vendorKeyName) <NEW_LINE> self.VendorKeyNames.append(vendorKeyName) <NEW_LINE> self.VendorAllEntityAttributeDic[vendorKeyName] = entityAttributeDic[vendorKeyName] <NEW_LINE> vendorDic[vendorKeyName] = entityAttributeDic[vendorKeyName] <NEW_LINE> entityCount += 1 <NEW_LINE> if entityCount >= self.MaxEntitiesPerChangeSet: <NEW_LINE> <INDENT> self.UploadChangeSet(vendorDic,'Vendor','Vendor') <NEW_LINE> vendorDic = {} <NEW_LINE> entityCount = 0 <NEW_LINE> <DEDENT> <DEDENT> if entityCount > 0: <NEW_LINE> <INDENT> self.UploadChangeSet(vendorDic,'Vendor','Vendor')
Create random Vendor data and upload
625941bd5510c4643540f2e2
def stop(universal_id: str) -> dict: <NEW_LINE> <INDENT> info = current(universal_id) <NEW_LINE> info['remaining'] = pool[universal_id]['app'].get_remaining_solutions() <NEW_LINE> info['addscore'] = pool[universal_id]['addscore'] <NEW_LINE> pool[universal_id]['app'].stop() <NEW_LINE> return info
Stop the game.
625941bd91f36d47f21ac3e5
def _test_pools_4(self): <NEW_LINE> <INDENT> self.session_login()
raid5 tests
625941bd3eb6a72ae02ec3cb
def __single_turn(self): <NEW_LINE> <INDENT> pass
Note - this function is here to guide you and it is *not mandatory* to implement it. The logic defined by this function must be implemented but if you wish to do so in another function (or some other functions) it is ok. The function runs one round of the game : 1. Print board to the screen 2. Get user's input of: what color car to move, and what direction to move it. 2.a. Check the the input is valid. If not, print an error message and return to step 2. 2. Move car according to user's input. If movement failed (trying to move out of board etc.), return to step 2. 3. Report to the user the result of current round ()
625941bd3c8af77a43ae3694
def rational_quadratic(x, y, c=0): <NEW_LINE> <INDENT> d = dist.sqeuclidean(x, y) <NEW_LINE> return 1 - d / (d + c)
Compute a rational quadratic kernel. The Rational Quadratic kernel is less computationally intensive than the Gaussian kernel and can be used as an alternative when using the Gaussian becomes too expensive: K(x, y) = 1 - (||x - y||^2 / (||x - y||^2 + c)) where `x` and `y` are vectors in the input space (i.e., vectors of features computed from training or test samples), ||x - y||^2 is the squared Euclidean norm, and `c` ≥ 0 is a free parameter (default=0).
625941bd0a366e3fb873e70e
def power(self, p, n): <NEW_LINE> <INDENT> if n == 0 or self.is_null(p): <NEW_LINE> <INDENT> return NULL_POINT <NEW_LINE> <DEDENT> res = NULL_POINT <NEW_LINE> while n: <NEW_LINE> <INDENT> if n & 1: <NEW_LINE> <INDENT> res = self.add(res, p) <NEW_LINE> <DEDENT> p = self.add(p, p) <NEW_LINE> n >>= 1 <NEW_LINE> <DEDENT> return res
n✕P or (P + P + ... + P) n times
625941bd3d592f4c4ed1cf6c
def get_total_buy_order_value(self): <NEW_LINE> <INDENT> a = self.get_open_orders_pd() <NEW_LINE> a = a[a.action == 'BUY'] <NEW_LINE> return sum(a.lmtPrice * a.totalQuantity)
Retrieves current buy orders and calculates how much in value they add up to Return: float
625941bd85dfad0860c3ad4f
def p_expression_uminus(p): <NEW_LINE> <INDENT> p[0] = UnaryOpUminus("NEGATE " + p[1], p[2]) <NEW_LINE> p[0].lineno = p.lineno(1) <NEW_LINE> p[0].lexpos = p.lexpos(1)
expression : MINUS expression %prec UMINUS
625941bdcc40096d61595848
def extract_entities(result): <NEW_LINE> <INDENT> return result.get('entities', [])
Extracts entities from a parsing result.
625941bd63b5f9789fde6fdc
def column_from_classes(models): <NEW_LINE> <INDENT> if not isinstance(models, list): <NEW_LINE> <INDENT> models = [models] <NEW_LINE> <DEDENT> items = list() <NEW_LINE> for model in models: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> for col in model.__table__.c: <NEW_LINE> <INDENT> items.extend([(str(col).replace('.', '_'), col)]) <NEW_LINE> <DEDENT> <DEDENT> except AttributeError as error: <NEW_LINE> <INDENT> raise(error) <NEW_LINE> <DEDENT> <DEDENT> return List(Enum(models[0].__name__ + 'SearchEnum', items))
Create Graphene List to select the columns from SQLAlchemy classes
625941bd56b00c62f0f1454e
def polygonize(raster_path, labels_path, shapefile_path, layer_name='thematic',class_name ='class',id ='id'): <NEW_LINE> <INDENT> open_labeles = open(labels_path) <NEW_LINE> reader = csv.reader(open_labeles, delimiter='\n') <NEW_LINE> lables_lst = [] <NEW_LINE> for line in reader: <NEW_LINE> <INDENT> lables_lst += line <NEW_LINE> <DEDENT> lables_dict = {} <NEW_LINE> key = 0 <NEW_LINE> for i in range(len(lables_lst)): <NEW_LINE> <INDENT> key += 1 <NEW_LINE> lables_dict[key] = lables_lst[i] <NEW_LINE> <DEDENT> type_mapping = {gdal.GDT_Byte: ogr.OFTInteger,gdal.GDT_UInt16: ogr.OFTInteger,gdal.GDT_Int16: ogr.OFTInteger, gdal.GDT_UInt32: ogr.OFTInteger,gdal.GDT_Int32: ogr.OFTInteger,gdal.GDT_Float32: ogr.OFTReal, gdal.GDT_Float64: ogr.OFTReal,gdal.GDT_CInt16: ogr.OFTInteger,gdal.GDT_CInt32: ogr.OFTInteger, gdal.GDT_CFloat32: ogr.OFTReal,gdal.GDT_CFloat64: ogr.OFTReal} <NEW_LINE> raster_driver = gdal.GetDriverByName('GTiff') <NEW_LINE> src_raster = gdal.Open(raster_path) <NEW_LINE> input_band = src_raster.GetRasterBand(1) <NEW_LINE> output_shp = shapefile_path <NEW_LINE> shp_driver = ogr.GetDriverByName('ESRI Shapefile') <NEW_LINE> output_shapefile = shp_driver.CreateDataSource(output_shp) <NEW_LINE> if src_raster.GetProjectionRef() != ' ': <NEW_LINE> <INDENT> srs = osr.SpatialReference(src_raster.GetProjectionRef()) <NEW_LINE> <DEDENT> dst_layer = output_shapefile.CreateLayer(layer_name, geom_type=ogr.wkbPolygon, srs=srs) <NEW_LINE> raster_field = ogr.FieldDefn(id, type_mapping[input_band.DataType]) <NEW_LINE> dst_layer.CreateField(raster_field) <NEW_LINE> class_field = ogr.FieldDefn(class_name) <NEW_LINE> dst_layer.CreateField(class_field) <NEW_LINE> gdal.Polygonize(input_band, input_band, dst_layer, 0, [], callback=None) <NEW_LINE> dst_layer.SyncToDisk() <NEW_LINE> dataSource = shp_driver.Open(output_shp, 1) <NEW_LINE> layer = dataSource.GetLayerByName(layer_name) <NEW_LINE> for feature in layer: <NEW_LINE> <INDENT> layer.SetFeature(feature) <NEW_LINE> pixval = int(feature.GetField(id)) <NEW_LINE> if pixval in lables_dict: <NEW_LINE> <INDENT> feature.SetField(1, lables_dict[pixval]) <NEW_LINE> layer.SetFeature(feature) <NEW_LINE> <DEDENT> <DEDENT> print('End')
This function converts Raster to shapefile and assigns a class to each polygon according to its pixel value Args: raster_path(str):the path from which the raster to be converted will be imported labels_path(str):the path from which the txt file that contains the labels will be imported shapefile_path(str): the path for storing the new shapefile layer_name(str):the new shapefile name, defaults to 'thematic' class_name(str):the name of the class field, defaults to 'class' id(str): the name of the id (this field contains the original pixel value) field,defaults to 'id'
625941bd9b70327d1c4e0cca
def getCurrentPortsInformations(self): <NEW_LINE> <INDENT> status = {} <NEW_LINE> for port in self.getPorts(): <NEW_LINE> <INDENT> status[port.getID()] = port.getCurrentInformations() <NEW_LINE> <DEDENT> return status
Gibt alle aktuellen Informationen zu allen Ports als dict zurück. Dies ist z.B. für die Benutzeroberfläche relevant. :return: eine Sammlung aller Informationen zu den aktuellen Ports. :rtype: dict
625941bd5fcc89381b1e15b3
def __init__(self, num_partitions: int): <NEW_LINE> <INDENT> if num_partitions < 1: <NEW_LINE> <INDENT> raise ValueError('num_partitions must be >= 1.') <NEW_LINE> <DEDENT> if num_partitions != 1: <NEW_LINE> <INDENT> self._hasher = ColumnHasher(num_partitions) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._hasher = None
Initializes KeyAndSplitByFeatureFn. Args: num_partitions: The number of partitions to divide features/cross-features into. Must be >= 1.
625941bd8e05c05ec3eea268
def need_to_analyze(self, event): <NEW_LINE> <INDENT> chk_rec_id = event.rec_id <NEW_LINE> if chk_rec_id > self.pool_rec_id: <NEW_LINE> <INDENT> if len(self.rec_ids) != 0: <NEW_LINE> <INDENT> self.msg_target.warning('Not all priming events were available. Missing: {0}'.format(str(self.rec_ids))) <NEW_LINE> <DEDENT> self._start_pool() <NEW_LINE> raise CheckpointRecoveryComplete(self.pool_rec_id, '{0}'.format(self.name)) <NEW_LINE> <DEDENT> if chk_rec_id in self.rec_ids: <NEW_LINE> <INDENT> self.prime_incidents.append(event) <NEW_LINE> self.rec_ids.remove(chk_rec_id) <NEW_LINE> <DEDENT> return False
If before my checkpointed rec_id then don't need to process
625941bd7d43ff24873a2b94
def add_turbine(self, turbine): <NEW_LINE> <INDENT> self.turbines.append(turbine)
Adds a turbine to the windpark. Parameters ---------- turbine : Turbine Turbine to add.
625941bdd99f1b3c44c6748b
def headers(self): <NEW_LINE> <INDENT> headers = { "Authorization": "Bearer " + str(self.auth.Bearer(self.user_Id)) } <NEW_LINE> save(self.auth.prefs, self.file_prefs) <NEW_LINE> return headers
docstring for headers
625941bd435de62698dfdb49
def test_html_esc(self): <NEW_LINE> <INDENT> self.assertEqual("&amp;", General.htmlesc("&")) <NEW_LINE> self.assertEqual("&lt;", General.htmlesc("<")) <NEW_LINE> self.assertEqual("&gt;", General.htmlesc(">"))
Check that our HTML escaping works ok. ( & -> &amp; etc)
625941bd8a349b6b435e806a
def relevant_parameters(df, ctrls_std_rel_min=0.001, ctrls_std_rel_max=0.1, group_by="Plate"): <NEW_LINE> <INDENT> assert is_pandas(df), "df has to be a Pandas DataFrame." <NEW_LINE> relevant_table = FINAL_PARAMETERS.copy() <NEW_LINE> ctrl_set = set(df.keys()) <NEW_LINE> plates = sorted(set(df[group_by])) <NEW_LINE> for plate in plates: <NEW_LINE> <INDENT> debug_print("Processing plate", plate) <NEW_LINE> controls = df[(df[group_by] == plate) & ( df["WellType"] == "Control")].select_dtypes(include=[np.number]) <NEW_LINE> median = controls.median() <NEW_LINE> std = controls.quantile(q=QUANT).std() <NEW_LINE> ds = std / median >= ctrls_std_rel_min <NEW_LINE> tmp_set = set([p for p in ds.keys() if ds[p]]) <NEW_LINE> ctrl_set.intersection_update(tmp_set) <NEW_LINE> debug_print("ctrl_set", len(ctrl_set)) <NEW_LINE> ds = std / median <= ctrls_std_rel_max <NEW_LINE> tmp_set = set([p for p in ds.keys() if ds[p]]) <NEW_LINE> ctrl_set.intersection_update(tmp_set) <NEW_LINE> debug_print("ctrl_set", len(ctrl_set)) <NEW_LINE> <DEDENT> relevant_table.extend(list(ctrl_set)) <NEW_LINE> debug_print("relevant_table", len(relevant_table)) <NEW_LINE> result_keys = list(df.keys()) <NEW_LINE> keep = [] <NEW_LINE> for key in result_keys: <NEW_LINE> <INDENT> if key in relevant_table: <NEW_LINE> <INDENT> keep.append(key) <NEW_LINE> <DEDENT> <DEDENT> result = df[keep] <NEW_LINE> debug_print("keep", len(keep)) <NEW_LINE> num_parm = len(measurements(result)) <NEW_LINE> print_log(result, "relevant parameters", "{:.3f}/{:.3f}/{:4d}" .format(ctrls_std_rel_min, ctrls_std_rel_max, num_parm)) <NEW_LINE> return result
...std_rel...: mad relative to the median value df is a PANDAS DataFrame.
625941bdbe7bc26dc91cd4fb
def app_SN_animated_gradient_plot(self): <NEW_LINE> <INDENT> print('this option is yet to be implemented')
Plots animated temperature and moisture gradient.
625941bd2eb69b55b151c7a2
def remote_api_shell(servername, appid, path, secure, rpc_server_factory): <NEW_LINE> <INDENT> os.environ['AUTH_DOMAIN'] = "appscale" <NEW_LINE> remote_api_stub.ConfigureRemoteApi(appid, path, auth_func, servername=servername, save_cookies=True, secure=secure, rpc_server_factory=rpc_server_factory) <NEW_LINE> remote_api_stub.MaybeInvokeAuthentication() <NEW_LINE> os.environ['SERVER_SOFTWARE'] = 'Development (remote_api_shell)/1.0' <NEW_LINE> if not appid: <NEW_LINE> <INDENT> appid = os.environ['APPLICATION_ID'] <NEW_LINE> <DEDENT> sys.ps1 = '%s> ' % appid <NEW_LINE> if readline is not None: <NEW_LINE> <INDENT> readline.parse_and_bind('tab: complete') <NEW_LINE> atexit.register(lambda: readline.write_history_file(HISTORY_PATH)) <NEW_LINE> if os.path.exists(HISTORY_PATH): <NEW_LINE> <INDENT> readline.read_history_file(HISTORY_PATH) <NEW_LINE> <DEDENT> <DEDENT> if '' not in sys.path: <NEW_LINE> <INDENT> sys.path.insert(0, '') <NEW_LINE> <DEDENT> preimported_locals = { 'memcache': memcache, 'urlfetch': urlfetch, 'users': users, 'db': db, 'ndb': ndb, } <NEW_LINE> code.interact(banner=BANNER, local=preimported_locals)
Actually run the remote_api_shell.
625941bd4c3428357757c220
def register_pvs(self, suite=None, pv_list=None): <NEW_LINE> <INDENT> if suite is None and pv_list is None: <NEW_LINE> <INDENT> raise NotImplementedError("Expecting pv_list or suite to be provided") <NEW_LINE> <DEDENT> for pv_name in set(pv_list if pv_list is not None else []): <NEW_LINE> <INDENT> self.pvs_refs[pv_name] = PVInfo(pv_name, connection_callback=self.connection_callback) <NEW_LINE> <DEDENT> if suite is not None: <NEW_LINE> <INDENT> pvs_from_suite( suite, ref_dict=self.pvs_refs, connection_callback=self.connection_callback ) <NEW_LINE> <DEDENT> all_connected = True <NEW_LINE> time.sleep(1) <NEW_LINE> for pv in self.pvs_refs.values(): <NEW_LINE> <INDENT> if not pv.check_connection(): <NEW_LINE> <INDENT> all_connected = False <NEW_LINE> logger.log(LVL_PV_DISCONNECTED, "PV is unreachable: %s"%pv.name) <NEW_LINE> self.queue.put(pv.data) <NEW_LINE> <DEDENT> <DEDENT> return all_connected, self.pvs_refs
Check connection of all the PVs declared in suite
625941bd507cdc57c6306bcb
def test_list_nodes(self): <NEW_LINE> <INDENT> nodes = self.driver.list_nodes() <NEW_LINE> self.assertEqual(len(nodes), 2) <NEW_LINE> node = nodes[0] <NEW_LINE> self.assertEqual(node.id, '5') <NEW_LINE> self.assertEqual(node.name, 'Compute 5') <NEW_LINE> self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['RUNNING']) <NEW_LINE> self.assertEqual(node.public_ips[0].id, '5') <NEW_LINE> self.assertEqual(node.public_ips[0].name, None) <NEW_LINE> self.assertEqual(node.public_ips[0].address, '192.168.0.1') <NEW_LINE> self.assertEqual(node.public_ips[0].size, 1) <NEW_LINE> self.assertEqual(node.public_ips[1].id, '15') <NEW_LINE> self.assertEqual(node.public_ips[1].name, None) <NEW_LINE> self.assertEqual(node.public_ips[1].address, '192.168.1.1') <NEW_LINE> self.assertEqual(node.public_ips[1].size, 1) <NEW_LINE> self.assertEqual(node.private_ips, []) <NEW_LINE> self.assertEqual(node.image.id, '5') <NEW_LINE> self.assertEqual(node.image.extra['dev'], 'sda1') <NEW_LINE> node = nodes[1] <NEW_LINE> self.assertEqual(node.id, '15') <NEW_LINE> self.assertEqual(node.name, 'Compute 15') <NEW_LINE> self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['RUNNING']) <NEW_LINE> self.assertEqual(node.public_ips[0].id, '5') <NEW_LINE> self.assertEqual(node.public_ips[0].name, None) <NEW_LINE> self.assertEqual(node.public_ips[0].address, '192.168.0.2') <NEW_LINE> self.assertEqual(node.public_ips[0].size, 1) <NEW_LINE> self.assertEqual(node.public_ips[1].id, '15') <NEW_LINE> self.assertEqual(node.public_ips[1].name, None) <NEW_LINE> self.assertEqual(node.public_ips[1].address, '192.168.1.2') <NEW_LINE> self.assertEqual(node.public_ips[1].size, 1) <NEW_LINE> self.assertEqual(node.private_ips, []) <NEW_LINE> self.assertEqual(node.image.id, '15') <NEW_LINE> self.assertEqual(node.image.extra['dev'], 'sda1')
Test list_nodes functionality.
625941bdbf627c535bc130c5
def number_to_word(number): <NEW_LINE> <INDENT> if number < 20: <NEW_LINE> <INDENT> number = first_numbers[number] <NEW_LINE> <DEDENT> elif number < 100 and number % 10 == 0: <NEW_LINE> <INDENT> number = tens[int(number/10 -1)] <NEW_LINE> <DEDENT> elif number < 1000 and number % 10 == 0: <NEW_LINE> <INDENT> number = first_numbers[int(number / 100)] + " hundred" <NEW_LINE> <DEDENT> elif number == 1000: <NEW_LINE> <INDENT> number = "one thousand" <NEW_LINE> <DEDENT> elif number < 100 and number % 10 != 0: <NEW_LINE> <INDENT> number = tens[int(number // 10 - 1)] + "-" + first_numbers[number % 10] <NEW_LINE> <DEDENT> elif number < 1000: <NEW_LINE> <INDENT> if number % 100 < 20: <NEW_LINE> <INDENT> number = first_numbers[int(number // 100)] + " hundred and " + first_numbers[number % 100] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> number = first_numbers[int(number // 100)] + " hundred and " + tens[int(number % 100 // 10 - 1)] + "-" + first_numbers[int(number % 10)] <NEW_LINE> <DEDENT> <DEDENT> return number
Converts number into words, when the number is one thousand or less.
625941bd21bff66bcd68484b
def test_resource_None_scope_view_not_authorized(self): <NEW_LINE> <INDENT> self.scope = Scope.objects.create(name="some_new_scope") <NEW_LINE> self._request_token(scope=self.scope.name) <NEW_LINE> self._authorize_and_access_token_using_form() <NEW_LINE> response = self._oauth_signed_get("/oauth/some/") <NEW_LINE> self.assertEqual(response.status_code, 401)
Tests that view created with @oauth_required decorator won't give access when requested using token with scope!="all"
625941bd4a966d76dd550f03
def objects(self): <NEW_LINE> <INDENT> return [node['item'].object for node in self.topLevel()]
returns a list of all the objects :return: list of objects
625941bd24f1403a92600a60
def __abs__(self): <NEW_LINE> <INDENT> val = UVal(self) <NEW_LINE> val._value = abs(val._value) <NEW_LINE> return val
>>> print(abs(UVal(-1.1, {}))) 1.100 {} >>> print(abs(UVal(1.1, {}))) 1.100 {}
625941bd94891a1f4081b99f
def remove_schema_node(self, node_id): <NEW_LINE> <INDENT> data_typing = self.get_data_typing() <NEW_LINE> instances = keys_by_value(data_typing, node_id) <NEW_LINE> if len(instances) > 0: <NEW_LINE> <INDENT> raise RewritingError( "Cannot remove '{}' from the schema: ".format( node_id) + "'{}' has instances in the data ({})".format( node_id, instances)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> g = self.get_schema() <NEW_LINE> g.remove_node(node_id)
Remove a schema node.
625941bdcdde0d52a9e52f26
def test__TIME_to_python(self): <NEW_LINE> <INDENT> cases = [ (b'45:34:10', datetime.timedelta(hours=45, minutes=34, seconds=10)), (b'-45:34:10', datetime.timedelta(-2, 8750)), (b'45:34:10.010101', datetime.timedelta(hours=45, minutes=34, seconds=10, microseconds=10101)), (b'-45:34:10.010101', datetime.timedelta(-2, 8749, 989899)), ] <NEW_LINE> for i, case in enumerate(cases): <NEW_LINE> <INDENT> data, exp = case <NEW_LINE> self.assertEqual(exp, self.cnv._TIME_to_python(data), "Case {0} failed: {1}; got {2}".format( i + 1, repr(data), repr(self.cnv._TIME_to_python(data))))
Convert a MySQL TIME to a Python datetime.time type
625941bd91af0d3eaac9b90c
def p_iteration_stmt(p): <NEW_LINE> <INDENT> p[0] = iast.IterationStmt(p[2], p[4])
iterationStmt : WHILE simpleExpr COL suite
625941bd29b78933be1e55a8
def report(self, report_format: str=None, **kwargs) -> bool: <NEW_LINE> <INDENT> if report_format is None: <NEW_LINE> <INDENT> if self.default_format is None: <NEW_LINE> <INDENT> raise ValueError("Either a default format needs to be set for " "this {} or the name of the report format " "needs to be supplied to the .report method." "".format(self.__class__.__name__)) <NEW_LINE> <DEDENT> rf = self.default_format <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> rf = self.report_formats[report_format] <NEW_LINE> <DEDENT> return rf.report(**kwargs)
Create a report with the given format; if none is given, the default format is used. Args: report_format (str, optional): The report format to use **kwargs: Passed on to the ReportFormat.report() call Returns: bool: Whether there was a report Raises: ValueError: If no default format was set and no report format name was given
625941bdde87d2750b85fc86
def build_network(self) -> None: <NEW_LINE> <INDENT> self.model = Sequential() <NEW_LINE> self.model.add(Dense(self.n_classes, activation='sigmoid', input_dim=self.x_train.shape[1])) <NEW_LINE> self.model.add(Dropout(0.7)) <NEW_LINE> self.model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) <NEW_LINE> return
Builds the network architecture using Keras utilities and compiles it. One hidden layer implementing logistic regression.
625941bd0fa83653e4656eb3
def test_es_source(self): <NEW_LINE> <INDENT> return self._get_es_client().ping()
Returns True if the handler can ping the Elasticsearch servers Can be used to confirm the setup of a handler has been properly done and confirm that things like the authentication is working properly :return: A boolean, True if the connection against elasticserach host was successful
625941bdcdde0d52a9e52f27
def test_invalid_source_type(self): <NEW_LINE> <INDENT> request_json = {'report_type': 'details', 'sources': [{'server_id': self.server_id, 'report_version': create_report_version(), 'source_name': self.net_source.name, 'source_type': 'abc'}]} <NEW_LINE> response_json = self.merge_details_from_source_expect_400( request_json) <NEW_LINE> self.assertEqual(len(response_json['valid_sources']), 0) <NEW_LINE> self.assertEqual(len(response_json['invalid_sources']), 1) <NEW_LINE> valid_choices = ', '.join( [valid_type[0] for valid_type in Source.SOURCE_TYPE_CHOICES]) <NEW_LINE> self.assertEqual( response_json['invalid_sources'][0]['errors']['source_type'], messages.FC_MUST_BE_ONE_OF % valid_choices)
Test source_type has invalid_value.
625941bdd7e4931a7ee9de13
def nearest_neighbors(self, word, k=1, exclude=[], metric="cosine"): <NEW_LINE> <INDENT> if isinstance(word, string_types): <NEW_LINE> <INDENT> assert word in self, "Word not found in the vocabulary" <NEW_LINE> v = self[word] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> v = word <NEW_LINE> <DEDENT> D = pairwise_distances(self.vectors, v.reshape(1, -1), metric=metric) <NEW_LINE> if isinstance(word, string_types): <NEW_LINE> <INDENT> D[self.vocabulary.word_id[word]] = D.max() <NEW_LINE> <DEDENT> for w in exclude: <NEW_LINE> <INDENT> D[self.vocabulary.word_id[w]] = D.max() <NEW_LINE> <DEDENT> return [self.vocabulary.id_word[id] for id in D.argsort(axis=0).flatten()[0:k]]
Find nearest neighbor of given word Parameters ---------- word: string or vector Query word or vector. k: int, default: 1 Number of nearest neighbours to return. metric: string, default: 'cosine' Metric to use. exclude: list, default: [] Words to omit in answer Returns ------- n: list Nearest neighbors.
625941bd3617ad0b5ed67def
def complement_bayes_prob(self, item, cat): <NEW_LINE> <INDENT> cat_prob = float(self.get_cat_count(cat)) / self.total_count() <NEW_LINE> doc_prob = self.complement_doc_prob(item, cat) <NEW_LINE> return math.log(cat_prob) - math.log(doc_prob)
アイテム item が cat 以外のカテゴリに属する確率を求める
625941bde64d504609d74737
def compile_source(source: str, preprocessor: Optional[PreprocessorFunc], parser: Grammar, transformer: TransformerCallable, compiler: CompilerCallable, *, preserve_AST: bool = False) -> CompilationResult: <NEW_LINE> <INDENT> ast = None <NEW_LINE> original_text = load_if_file(source) <NEW_LINE> source_name = source if is_filename(source) else '' <NEW_LINE> log_file_name = logfile_basename(source, compiler) if is_logging() else '' <NEW_LINE> if not hasattr(parser, 'free_char_parsefunc__') or parser.history_tracking__: <NEW_LINE> <INDENT> log_syntax_trees = get_config_value('log_syntax_trees') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> log_syntax_trees = set() <NEW_LINE> <DEDENT> errors = [] <NEW_LINE> if preprocessor is None: <NEW_LINE> <INDENT> source_text = original_text <NEW_LINE> source_mapping = gen_neutral_srcmap_func(source_text, source_name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> _, source_text, source_mapping, errors = preprocessor(original_text, source_name) <NEW_LINE> <DEDENT> if has_errors(errors, FATAL): <NEW_LINE> <INDENT> return CompilationResult(None, errors, None) <NEW_LINE> <DEDENT> syntax_tree = parser(source_text, source_mapping=source_mapping) <NEW_LINE> for e in errors: syntax_tree.add_error(None, e) <NEW_LINE> syntax_tree.source = original_text <NEW_LINE> syntax_tree.source_mapping = source_mapping <NEW_LINE> if 'cst' in log_syntax_trees: <NEW_LINE> <INDENT> log_ST(syntax_tree, log_file_name + '.cst') <NEW_LINE> <DEDENT> if parser.history_tracking__: <NEW_LINE> <INDENT> log_parsing_history(parser, log_file_name) <NEW_LINE> <DEDENT> result = None <NEW_LINE> if not is_fatal(syntax_tree.error_flag): <NEW_LINE> <INDENT> if is_error(syntax_tree.error_flag): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> transformer(syntax_tree) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> syntax_tree.new_error(syntax_tree, "AST-Transformation failed due to earlier parser errors. " "Crash Message: %s: %s" % (e.__class__.__name__, str(e)), AST_TRANSFORM_CRASH) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> transformer(syntax_tree) <NEW_LINE> <DEDENT> if 'ast' in log_syntax_trees: <NEW_LINE> <INDENT> log_ST(syntax_tree, log_file_name + '.ast') <NEW_LINE> <DEDENT> if not is_fatal(syntax_tree.error_flag): <NEW_LINE> <INDENT> if preserve_AST: <NEW_LINE> <INDENT> ast = copy.deepcopy(syntax_tree) <NEW_LINE> <DEDENT> result = process_tree(compiler, syntax_tree) <NEW_LINE> <DEDENT> <DEDENT> messages = syntax_tree.errors_sorted <NEW_LINE> return CompilationResult(result, messages, ast)
Compiles a source in four stages: 1. Pre-Processing (if needed) 2. Parsing 3. AST-transformation 4. Compiling. The later stages AST-transformation, compilation will only be invoked if no fatal errors occurred in any of the earlier stages of the processing pipeline. :param source: The input text for compilation or a the name of a file containing the input text. :param preprocessor: text -> text. A preprocessor function or None, if no preprocessor is needed. :param parser: A parsing function or grammar class :param transformer: A transformation function that takes the root-node of the concrete syntax tree as an argument and transforms it (in place) into an abstract syntax tree. :param compiler: A compiler function or compiler class instance :param preserve_AST: Preserves the AST-tree. :returns: The result of the compilation as a 3-tuple (result, errors, abstract syntax tree). In detail: 1. The result as returned by the compiler or ``None`` in case of failure 2. A list of error or warning messages 3. The root-node of the abstract syntax tree if `preserve_ast` is True or `None` otherwise.
625941bd38b623060ff0ace5
def _get_height_weight(self, jitter=0.0): <NEW_LINE> <INDENT> heights = [] <NEW_LINE> weights = [] <NEW_LINE> for r in self.records: <NEW_LINE> <INDENT> if r.wtkg2 == 'NA' or r.htm3 == 'NA': <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> height = r.htm3 + random.uniform(-jitter, jitter) <NEW_LINE> heights.append(height) <NEW_LINE> weights.append(r.wtkg2) <NEW_LINE> <DEDENT> return heights, weights
Get sequences of height and weight. Args: jitter: float magnitude of random noise added to heights Returns: tuple of sequences (heights, weights)
625941bd07d97122c417877c
def get_nodes_degree(canonical_graph, predicates_cache_manager, predicates_blacklist, undirected=False): <NEW_LINE> <INDENT> nodes_degree = dict() <NEW_LINE> for n_canonical_index in range(0, len(canonical_graph["canonical_to_rdf_index"])): <NEW_LINE> <INDENT> nodes_degree[n_canonical_index] = 0 <NEW_LINE> <DEDENT> for p_index in tqdm.tqdm(canonical_graph["adjacency"], desc="degrees"): <NEW_LINE> <INDENT> if not is_blacklisted(predicates_cache_manager.get_element_from_index(p_index), predicates_blacklist): <NEW_LINE> <INDENT> for n1 in canonical_graph["adjacency"][p_index]: <NEW_LINE> <INDENT> nodes_degree[n1] += len(canonical_graph["adjacency"][p_index][n1]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if undirected: <NEW_LINE> <INDENT> for p_index in tqdm.tqdm(canonical_graph["inv_adjacency"], desc="inv adj. degrees"): <NEW_LINE> <INDENT> if not is_blacklisted(predicates_cache_manager.get_element_from_index(p_index), predicates_blacklist): <NEW_LINE> <INDENT> for n1 in canonical_graph["inv_adjacency"][p_index]: <NEW_LINE> <INDENT> nodes_degree[n1] += len(canonical_graph["inv_adjacency"][p_index][n1]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return nodes_degree
Compute degree for each node in canonical graph (only considering predicates that are not blacklisted) :param canonical_graph: canonical graph :param predicates_cache_manager: cache manager for predicates :param predicates_blacklist: blacklist for predicates :param undirected: if the graph should be considered directed or undirected :return: dict associating each canonical node index with its degree
625941bd2eb69b55b151c7a3
def distinct(self, columns: List[str], checkpoint_before: bool = False, keep: str = "first"): <NEW_LINE> <INDENT> assert keep in ("first", "last"), ValueError( "Must choose to keep 'first' or 'last' unique record.") <NEW_LINE> if checkpoint_before: <NEW_LINE> <INDENT> self.make_checkpoint() <NEW_LINE> self.data.drop_duplicates( subset=columns, keep=keep, inplace=True) <NEW_LINE> msg = ( f"NOTICE: Made a checkpoint before deduplicating data. " "Previous checkpoint will be overwritten. " "To revert back afterwards, call '.revert_to_checkpoint()'." ) <NEW_LINE> print(msg) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.data.drop_duplicates( subset=columns, keep=keep, inplace=True)
Drop duplicate rows in the data (based on a specified subset of columns). Parameters ---------- columns : list A list of columns used to check for duplicate rows. checkpoint_before : bool, optional Whether or not to make a checkpoint of the current data before downsizing. Default = False.
625941bd45492302aab5e1b7
def test_catalog_plot_local(self): <NEW_LINE> <INDENT> cat = read_events() <NEW_LINE> with ImageComparison(self.image_dir, 'catalog-cartopy3.png') as ic: <NEW_LINE> <INDENT> rcParams['savefig.dpi'] = 72 <NEW_LINE> cat.plot(method='cartopy', outfile=ic.name, projection='local', resolution='50m', continent_fill_color='0.3', color='date', colormap='gist_heat')
Tests the catalog preview plot, local projection, some more non-default parameters, using Cartopy.
625941bd7b25080760e39351
def loadImage(self, data, scaled=True, fromfile=True): <NEW_LINE> <INDENT> if data is None or not data: <NEW_LINE> <INDENT> self.removeImage() <NEW_LINE> return <NEW_LINE> <DEDENT> if fromfile: <NEW_LINE> <INDENT> pix = QPixmap(data) <NEW_LINE> <DEDENT> elif isinstance(data, QPixmap): <NEW_LINE> <INDENT> pix = data <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pix = QPixmap() <NEW_LINE> r = pix.loadFromData(data, 'JPG') <NEW_LINE> if not r: <NEW_LINE> <INDENT> pix = QPixmap(data) <NEW_LINE> <DEDENT> <DEDENT> self._orignalimage = QPixmap(pix) <NEW_LINE> h = self.maximumHeight() <NEW_LINE> if scaled: <NEW_LINE> <INDENT> pix = pix.scaledToHeight(h, Qt.SmoothTransformation) <NEW_LINE> <DEDENT> self.image.setPixmap(pix) <NEW_LINE> self.isDefault = False
Load the image into the widget using a bytearray An empty picture will result in the default placeholder image.
625941bd23e79379d52ee45e
def can_play(self, col): <NEW_LINE> <INDENT> return self.game_won == 0 and self.num_turns < 42 and self.get_val(col-1, 0) == 0
Check that game and column can be played on. :param col: The column being accessed. 1-indexed. :type col: int :return: Whether user can play on this game in column. :rtype: Boolean
625941bd287bf620b61d395d
def insert(self, index, data): <NEW_LINE> <INDENT> raise BranchDedicatedMethodError()
Insert new data to target DictBranch. Args: nodepath_string(str): XPath format search string. data(any json data): Json format data under current branch. dict, list, str, int, float, bool Returns: LocationPath or None: LocationPath of new branch. Return None if target nodename already exist. Note: Must be implemented for ListBranch only.
625941bd30dc7b7665901860
def genericValue(self, key, default): <NEW_LINE> <INDENT> return self.value(key, default)
Returns an arbitrary settings value, corrosponding to `key`. The `default` value is used should the `key` contain no value. :param key: the name of the key to get the value from. :type key: string :param default: the value to be used as fallback if `key` contains no value.
625941bdf548e778e58cd474
def get_bias(a, b, c, d): <NEW_LINE> <INDENT> if b + d > a + c: <NEW_LINE> <INDENT> (a, b, c, d) = (b, a, d, c) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> return abs(b/(a+b) - d/(c+d)) / ((b+d) / (a+b+c+d)) <NEW_LINE> <DEDENT> except ZeroDivisionError: <NEW_LINE> <INDENT> return None
Bare equation from Guo et al., 2012 (SB, strand bias method 1). One modification: a/b and c/d will be swapped if necessary to make sure b and d are the minor allele..
625941bd956e5f7376d70d66
def _greedy_predict(self, xx): <NEW_LINE> <INDENT> yyhat = [] <NEW_LINE> phiphi = [] <NEW_LINE> for phi in self.efeats_fnc(xx): <NEW_LINE> <INDENT> phi = phi + self.tfeats_fnc(yyhat[-self.order:]) <NEW_LINE> (yhat, _) = max(self.scores(phi).items(), key=itemgetter(1)) <NEW_LINE> yyhat.append(yhat) <NEW_LINE> phiphi.append(phi) <NEW_LINE> <DEDENT> return (tuple(yyhat), tuple(phiphi))
Sequence classification with a greedy approximation of a Markov model, also returning feature vectors `phiphi`
625941bd167d2b6e31218a8e
def list_posts(request): <NEW_LINE> <INDENT> return render(request, "feed.html", {"posts": posts})
List existing posts
625941bdcad5886f8bd26ed9
def connect_first_once(self, callback, *args, **kwargs): <NEW_LINE> <INDENT> return self._connect(callback, args, kwargs, once=True, pos=0)
Variant of :meth:`~Signal.connect_once` in which the given callback is inserted to the front of the callback list.
625941bdf7d966606f6a9ef8
def process_get_hw_modules_from_lineid(**kwargs): <NEW_LINE> <INDENT> return f.process.get_hw_modules_from_lineid(**kwargs)
:menu: (enable=True, name=LINEID GET HW MODULES, section=Config, num=8.2, args={'menu': True})
625941bd097d151d1a222d53
def set_project(self, project: 'Project'): <NEW_LINE> <INDENT> self.project = project
Set Project for Note instance @param project: Project Instance
625941bdcc40096d61595849
def workspace_from_dir(directory, recurse=True): <NEW_LINE> <INDENT> directory = os.path.abspath(directory) <NEW_LINE> pickle_path = os.path.join(directory, 'workspace.pkl') <NEW_LINE> if not os.path.exists(pickle_path): <NEW_LINE> <INDENT> if recurse: <NEW_LINE> <INDENT> parent_dir = os.path.dirname(directory) <NEW_LINE> print(parent_dir) <NEW_LINE> try: <NEW_LINE> <INDENT> return workspace_from_dir(parent_dir, parent_dir != '/') <NEW_LINE> <DEDENT> except WorkspaceNotFound: <NEW_LINE> <INDENT> raise WorkspaceNotFound(directory) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise WorkspaceNotFound(directory) <NEW_LINE> <DEDENT> <DEDENT> with open(pickle_path, 'rb') as file: <NEW_LINE> <INDENT> workspace_class = pickle.load(file) <NEW_LINE> <DEDENT> return workspace_class.from_directory(directory)
Construct a workspace object from a directory name. If recurse=True, this function will search down the directory tree and return the first workspace it finds. If recurse=False, an exception will be raised if the given directory is not a workspace. Workspace identification requires a file called 'workspace.pkl' to be present in each workspace directory, which can unfortunately be a little fragile.
625941bd0383005118ecf4dc
def parse_charge_section(self, line, inputfile, chargestype): <NEW_LINE> <INDENT> has_spins = 'AND SPIN POPULATIONS' in line <NEW_LINE> if not hasattr(self, "atomcharges"): <NEW_LINE> <INDENT> self.atomcharges = {} <NEW_LINE> <DEDENT> if has_spins and not hasattr(self, "atomspins"): <NEW_LINE> <INDENT> self.atomspins = {} <NEW_LINE> <DEDENT> self.skip_line(inputfile, 'dashes') <NEW_LINE> if chargestype == 'mulliken': <NEW_LINE> <INDENT> should_stop = lambda x: x.startswith('Sum of atomic charges') <NEW_LINE> start, stop = 8, 20 <NEW_LINE> <DEDENT> elif chargestype == 'lowdin': <NEW_LINE> <INDENT> should_stop = lambda x: not bool(x.strip()) <NEW_LINE> start, stop = 8, 20 <NEW_LINE> <DEDENT> elif chargestype == 'chelpg': <NEW_LINE> <INDENT> should_stop = lambda x: x.startswith('---') <NEW_LINE> start, stop = 11, 26 <NEW_LINE> <DEDENT> charges = [] <NEW_LINE> if has_spins: <NEW_LINE> <INDENT> spins = [] <NEW_LINE> <DEDENT> line = next(inputfile) <NEW_LINE> while not should_stop(line): <NEW_LINE> <INDENT> charges.append(float(line[start:stop])) <NEW_LINE> if has_spins: <NEW_LINE> <INDENT> spins.append(float(line[stop:])) <NEW_LINE> <DEDENT> line = next(inputfile) <NEW_LINE> <DEDENT> self.atomcharges[chargestype] = charges <NEW_LINE> if has_spins: <NEW_LINE> <INDENT> self.atomspins[chargestype] = spins
Parse a charge section, modifies class in place Parameters ---------- line : str the line which triggered entry here inputfile : file handle to file object chargestype : str what type of charge we're dealing with, must be one of 'mulliken', 'lowdin' or 'chelpg'
625941bd8da39b475bd64e68
def test_minimizer_comparison_mode_invalid(self): <NEW_LINE> <INDENT> config_str = "[PLOTTING]\ncomparison_mode: absolute_values" <NEW_LINE> self.shared_invalid('comparison_mode', config_str)
Checks user set comparison_mode is invalid
625941bd66673b3332b91f88
def test_03__get_from_manifest(self): <NEW_LINE> <INDENT> imgtag = '123' <NEW_LINE> struct = {'manifest': {'123': {'json': {'layers': [{'digest': 'd1'}, {'digest': 'd2'}], 'config': {'digest': 'dgt'}}}}} <NEW_LINE> lay_out = ['d2', 'd1'] <NEW_LINE> conf_out = 'dgt' <NEW_LINE> status = OciLocalFileAPI(self.local)._get_from_manifest(struct, imgtag) <NEW_LINE> self.assertEqual(status, (conf_out, lay_out))
Test03 OciLocalFileAPI()._get_from_manifest.
625941bd30c21e258bdfa393
def _init_cuda_setting(self): <NEW_LINE> <INDENT> if not self.config.cuda: <NEW_LINE> <INDENT> self.config.device = -1 <NEW_LINE> return <NEW_LINE> <DEDENT> self.config.device = self.config.cuda if self.config.cuda is not True else 0 <NEW_LINE> self.use_cuda = True <NEW_LINE> if self.distributed: <NEW_LINE> <INDENT> torch.cuda.set_device(self._local_rank_id) <NEW_LINE> <DEDENT> torch.cuda.manual_seed(self.config.seed)
Init CUDA setting.
625941bd442bda511e8be314
def _defaultsettings( self ): <NEW_LINE> <INDENT> from pluggdapps.plugin import PluginMeta <NEW_LINE> default = dict( DEFAULT().items() ) <NEW_LINE> defaultsett = { 'DEFAULT' : deepcopy(default) } <NEW_LINE> defaultsett['pluggdapps'] = deepcopy(default) <NEW_LINE> defaultsett['pluggdapps'].update( dict( pluggdapps_defaultsett().items() )) <NEW_LINE> for name, info in PluginMeta._pluginmap.items() : <NEW_LINE> <INDENT> bases = reversed( info['cls'].mro() ) <NEW_LINE> sett = deepcopy( default ) <NEW_LINE> for b in bases : <NEW_LINE> <INDENT> if hasattr( b, 'default_settings' ) : <NEW_LINE> <INDENT> sett.update( dict( b.default_settings().items() )) <NEW_LINE> sett = b.normalize_settings( sett ) <NEW_LINE> <DEDENT> <DEDENT> defaultsett[ h.plugin2sec(name) ] = sett <NEW_LINE> <DEDENT> return defaultsett
By now it is expected that all interface specs and plugin definitions would have been loaded by loading packages implementing them and pluggdapps' plugin meta-classing. This function will collect their default settings and return them as settings dictionary,:: { "plugin:<pkgname>.<pluginname>" : default_settings, ... }
625941bd57b8e32f52483391
def _updateInstanceCache(self, autostacks): <NEW_LINE> <INDENT> if ((time.time() - self._lastInstanceCacheGCTimestamp) > self._INSTANCE_CACHE_GC_INTERVAL_SEC): <NEW_LINE> <INDENT> self._garbageCollectInstanceCache(self._instanceCache) <NEW_LINE> self._lastInstanceCacheGCTimestamp = time.time() <NEW_LINE> <DEDENT> autostacksToRefresh = [] <NEW_LINE> for autostack in autostacks: <NEW_LINE> <INDENT> if autostack.uid not in self._instanceCache: <NEW_LINE> <INDENT> autostacksToRefresh.append( (autostack.uid, autostack.region, autostack.filters,)) <NEW_LINE> <DEDENT> <DEDENT> refreshingEntireCache = False <NEW_LINE> if ((time.time() - self._lastInstanceCacheUpdateTimestamp) > self._INSTANCE_CACHE_UPDATE_INTERVAL_SEC): <NEW_LINE> <INDENT> refreshingEntireCache = True <NEW_LINE> for uid, cacheItem in self._instanceCache.iteritems(): <NEW_LINE> <INDENT> autostacksToRefresh.append((uid, cacheItem.region, cacheItem.filters,)) <NEW_LINE> <DEDENT> <DEDENT> if autostacksToRefresh: <NEW_LINE> <INDENT> self._log.info( "Refreshing Autostack instance cache: entire=%r; numAutostacks=%d", refreshingEntireCache, len(autostacksToRefresh)) <NEW_LINE> cacheRefreshStart = time.time() <NEW_LINE> self._instanceCache.update( self._fetchInstanceCacheItems(autostacksToRefresh)) <NEW_LINE> self._log.info( "Refreshed Autostack instance cache: entire=%r; numAutostacks=%d; " "totalAutostacks=%d; duration=%ss", refreshingEntireCache, len(autostacksToRefresh), len(self._instanceCache), time.time() - cacheRefreshStart) <NEW_LINE> if refreshingEntireCache: <NEW_LINE> <INDENT> self._lastInstanceCacheUpdateTimestamp = time.time() <NEW_LINE> <DEDENT> <DEDENT> for autostack in autostacks: <NEW_LINE> <INDENT> self._instanceCache[autostack.uid].use()
Garbage-collect Autostacks instance cache if it's time. Update Autostacks instance cache for the given Autostacks, if missing; refresh the entire Autostacks instance cache if it's time for refreshing Also refresh last-used timestamps of instance cache entries corresponding to the given Autostacks. :param autostacks: Autostack objects from the current collection request :type autostacks: List of nametuple as returned by repository.getAutostackMetricsPendingDataCollection(). Each autostack item has attributes that match columns defined in YOMP.app.repository.schema.autostack. See also: AggregatorService.run()
625941bd8e7ae83300e4aec3
def cancel_shutdown(self): <NEW_LINE> <INDENT> os.system('shutdown -a') <NEW_LINE> send_msg = '[远控信息] 此次关机已取消' <NEW_LINE> self.bot.file_helper.send(send_msg) <NEW_LINE> self._signal_3.emit(send_msg)
取消关机
625941bd99cbb53fe6792adf
def max_noutput_items(self): <NEW_LINE> <INDENT> return _howto_swig.user_device_core_sptr_max_noutput_items(self)
max_noutput_items(user_device_core_sptr self) -> int
625941bde5267d203edcdb97
def CreateAccount(Name,Qfunds): <NEW_LINE> <INDENT> Users.update( {Name : {'Cash': Qfunds}})
Creates an account filled with money. Obviously, this is a crucial step which will require (!) verification of Bitcoin payments, an X-confirmation delay, etc. For testing we allow unconstrained (free/infinite) cash. These accounts have simple toy names, actual accounts will probably be the bitcoin addresses themselves.
625941bd7d43ff24873a2b95
def __pick_word(self, probabilities): <NEW_LINE> <INDENT> probabilities = np.log(probabilities) / self.config.temperature <NEW_LINE> exp_probs = np.exp(probabilities) <NEW_LINE> probabilities = exp_probs / np.sum(exp_probs) <NEW_LINE> pick = np.random.choice(len(probabilities), p=probabilities) <NEW_LINE> return self.idx2word[pick]
Pick the next word in the generated text :param probabilities: Probabilites of the next word :return: String of the predicted word
625941bd16aa5153ce362370
def path_len_sets(self, idx, jdx): <NEW_LINE> <INDENT> min_path = self.min_path_sets(idx,jdx) <NEW_LINE> return self._path_len(min_path) if min_path else None
Return the path length (length of minimum path) between the closest members of two sets of indexes
625941bd283ffb24f3c55801
def make_json(image_id, cnt, json_path, data_path, save_path): <NEW_LINE> <INDENT> with open(json_path, 'r') as f: <NEW_LINE> <INDENT> d = json.load(f) <NEW_LINE> save_dict = {} <NEW_LINE> info_dict = {} <NEW_LINE> info_dict['version'] = 1.0 <NEW_LINE> save_dict['info'] = info_dict <NEW_LINE> save_dict['licenses'] = 'null' <NEW_LINE> images = [] <NEW_LINE> annotations = [] <NEW_LINE> for i in range(len(d)): <NEW_LINE> <INDENT> tmp = {} <NEW_LINE> tmp['file_name'] = d[i]['name'] <NEW_LINE> tmp['id'] = image_id[d[i]['name']] <NEW_LINE> tmp['height'] = 512 <NEW_LINE> tmp['width'] = 512 <NEW_LINE> images.append(tmp) <NEW_LINE> tmp1 = {} <NEW_LINE> tmp1['segmentation'] = [[d[i]['bbox'][0], d[i]['bbox'][1], d[i]['bbox'][2], d[i]['bbox'][1], d[i]['bbox'][2], d[i]['bbox'][3], d[i]['bbox'][0], d[i]['bbox'][3],]] <NEW_LINE> tmp1['area'] = (d[i]['bbox'][2] - d[i]['bbox'][0]) * (d[i]['bbox'][3] - d[i]['bbox'][1]) <NEW_LINE> tmp1['iscrowd'] = 0 <NEW_LINE> tmp1['image_id'] = image_id[d[i]['name']] <NEW_LINE> tmp1['bbox'] = d[i]['bbox'] <NEW_LINE> tmp1['category_id'] = int(d[i]['category']) <NEW_LINE> tmp1['id'] = cnt <NEW_LINE> annotations.append(tmp1) <NEW_LINE> cnt += 1 <NEW_LINE> <DEDENT> print('box number is {}'.format(cnt)) <NEW_LINE> string = ['background', 'edge_exception', 'angular_exception', 'white_dot', 'light_block', 'dark_dot', 'aperture_biemish'] <NEW_LINE> classes = [] <NEW_LINE> for i in range(6): <NEW_LINE> <INDENT> category = {} <NEW_LINE> category['name'] = category['supercategory'] = string[i] <NEW_LINE> category['id'] = i + 1 <NEW_LINE> classes.append(category) <NEW_LINE> <DEDENT> save_dict['images'] = images <NEW_LINE> save_dict['annotations'] = annotations <NEW_LINE> save_dict['categories'] = classes <NEW_LINE> with open(save_path, 'w') as f: <NEW_LINE> <INDENT> json.dump(save_dict, f, indent=4)
制作 COCO 数据的 json
625941bdbe383301e01b5383
def get_job_names_to_comprehend(self): <NEW_LINE> <INDENT> jobs_to_comprehend_response = requests.get( self.transcribe_jobs_endpoint, headers=self.api_auth) <NEW_LINE> jobs_to_comprehend_list = jobs_to_comprehend_response.json() <NEW_LINE> job_names_to_comprehend = [] <NEW_LINE> for job_object in jobs_to_comprehend_list['results']: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> job_name = job_object['job_id'] <NEW_LINE> status = self.client.transcribe_client.get_transcription_job( TranscriptionJobName=job_name)['TranscriptionJob']['TranscriptionJobStatus'] <NEW_LINE> if(status == "COMPLETED"): <NEW_LINE> <INDENT> job_names_to_comprehend.append(job_name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("Job {} is still in the process of transcribing speech to text.".format(job_name)) <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> print("Error in json formatting of job {}(LambdaUtils.get_job_names_to_comprehend()).".format(job_name)) <NEW_LINE> print(e) <NEW_LINE> <DEDENT> <DEDENT> return job_names_to_comprehend
Gets the names of all jobs which are currently being Transcribed or waiting to be passed to AWS Comprehend. Retrieves the names of jobs which are currently waiting to be passed to AWS Comprehend from Django framework API endpoint. For each of these jobs, we check the AWS console to see if they have finished Transcribing. If the Transcribe job is complete, we add the name of the job to the list. Returns: A list of job names which are generated using a salted hash. For example: ['9c348223-5917-4700-b135-83135c9a927f', '097c10d9-cb1f-439b-a90e-60da4afe4280', 'be4c2053-ee2d-4675-8caf-f4f7d308292d']
625941bdf8510a7c17cf95f2
def alive(self): <NEW_LINE> <INDENT> return self.fuzzer.alive
bool: Is the fuzzer alive and running?
625941bd851cf427c661a40a
def values(self): <NEW_LINE> <INDENT> return self.inverse.keys()
All values in range
625941bd56ac1b37e62640cc
def parse_filename(filename): <NEW_LINE> <INDENT> _indx = filename.find('[') <NEW_LINE> if _indx > 0: <NEW_LINE> <INDENT> _fname = filename[:_indx] <NEW_LINE> _extn = filename[_indx + 1:-1] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> _fname = filename <NEW_LINE> _extn = '' <NEW_LINE> <DEDENT> return _fname, _extn
Parse out filename from any specified extensions. Returns rootname and string version of extension name. Parameters ---------- filename : str The filename to be parsed Returns ------- A tuple with the filename root and extension
625941bd30bbd722463cbcbb