code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def cache_taxi_info(self): <NEW_LINE> <INDENT> vehicle_info_dict = publish_message_cache_model.get_all_taxi_info() <NEW_LINE> self.server.hmset( self.taxi_key, vehicle_info_dict ) <NEW_LINE> app_log.info('Taxi info Cached.')
Cache all taxi info to redis
625941bc0a366e3fb873e702
def __next(self): <NEW_LINE> <INDENT> from hask3.lang.type_system import typeof <NEW_LINE> from hask3.lang.hindley_milner import unify <NEW_LINE> if self.__is_evaluated: <NEW_LINE> <INDENT> raise StopIteration <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> next_iter = next(self.__tail) <NEW_LINE> if len(self.__head) > 0: <NEW_LINE> <INDENT> unify(typeof(self[0]), typeof(next_iter)) <NEW_LINE> <DEDENT> self.__head.append(next_iter) <NEW_LINE> <DEDENT> except StopIteration: <NEW_LINE> <INDENT> self.__is_evaluated = True
Evaluate the next element of the tail, and add it to the head.
625941bc3eb6a72ae02ec3bf
def process_request(self, request, spider): <NEW_LINE> <INDENT> print('===UserAgentMiddleware process_request==') <NEW_LINE> if self.ua: <NEW_LINE> <INDENT> print("********Current UserAgent:%s************") <NEW_LINE> custom_ua = self.ua.random <NEW_LINE> print('custom_ua:',custom_ua) <NEW_LINE> request.headers.setdefault(b'User-Agent', custom_ua)
scrapy设置随机ua :param request: :param spider: :return:
625941bc4527f215b584c345
def ShouldSerializeBindings(self): <NEW_LINE> <INDENT> pass
ShouldSerializeBindings(self: PriorityBinding) -> bool Returns a value that indicates whether serialization processes should serialize the effective value of the System.Windows.Data.PriorityBinding.Bindings property on instances of this class. Returns: true if the System.Windows.Data.PriorityBinding.Bindings property value should be serialized; otherwise, false.
625941bc435de62698dfdb3d
def hetero_edge_masker_sampler(old_g, pct_masked_edges, negative_rate, edge_masking, use_cuda): <NEW_LINE> <INDENT> pos_samples = {} <NEW_LINE> neg_samples = {} <NEW_LINE> g = old_g.local_var() <NEW_LINE> for etype in g.etypes: <NEW_LINE> <INDENT> pos_samples[etype]=[] <NEW_LINE> t0=time.time() <NEW_LINE> u,v,eid=g.all_edges(form='all', etype=etype) <NEW_LINE> tedgefetch=time.time()-t0 <NEW_LINE> pos_samples[etype]=np.stack((u,v)).transpose() <NEW_LINE> if edge_masking: <NEW_LINE> <INDENT> lnum_sampled_edges = int(pct_masked_edges * len(eid)) <NEW_LINE> sampl_ids = np.random.choice(g.number_of_edges(etype), size=lnum_sampled_edges, replace=False) <NEW_LINE> sampled_edges = eid[sampl_ids] <NEW_LINE> g.edges[etype].data['mask'][sampled_edges] = 0 <NEW_LINE> <DEDENT> <DEDENT> t0 = time.time() <NEW_LINE> samples_d,labels_d=negative_sampling(g,pos_samples, negative_rate) <NEW_LINE> tnega = time.time() - t0 <NEW_LINE> link_labels_d = {} <NEW_LINE> for e in labels_d.keys(): <NEW_LINE> <INDENT> link_labels_d[e] = torch.from_numpy(labels_d[e]) <NEW_LINE> if use_cuda: <NEW_LINE> <INDENT> link_labels_d[e] = link_labels_d[e].cuda() <NEW_LINE> <DEDENT> <DEDENT> llabels_d = link_labels_d <NEW_LINE> return g,samples_d,llabels_d
This function masks some edges at random by setting the mask attribute to 0 :param old_g: The original graph :param pct_masked_edges: The pct of edges to sample :param negative_rate: The nbr of negative samples per edge :param edge_masking: Whether to mask the sampled edges or not :return: The altered graph, the positive and the negative samples.
625941bccb5e8a47e48b7998
def test_unix_time_date_object(self): <NEW_LINE> <INDENT> t = datetime.date(2013, 4, 1) <NEW_LINE> ret = fleming.unix_time(t) <NEW_LINE> self.assertEquals(ret, 1364774400)
Tests that the unix time function works on a date object.
625941bc63b5f9789fde6fd0
def _set_fibermap_columns(): <NEW_LINE> <INDENT> global fibermap_columns, fibermap_comments <NEW_LINE> for sv in (1, 2, 3): <NEW_LINE> <INDENT> survey = f'sv{sv:d}' <NEW_LINE> fibermap_columns[survey] = fibermap_columns['main'].copy() <NEW_LINE> for t in ('DESI', 'BGS', 'MWS', 'SCND'): <NEW_LINE> <INDENT> index_columns = [x[0] for x in fibermap_columns[survey]] <NEW_LINE> i = index_columns.index('{0}_TARGET'.format(t)) <NEW_LINE> row = ("{0}_{1}_TARGET".format(survey.upper(), t), 'i8', '', fibermap_columns[survey][i][3]) <NEW_LINE> fibermap_columns[survey].insert(index_columns.index('DESI_TARGET'), row) <NEW_LINE> <DEDENT> index_columns = [x[0] for x in fibermap_columns[survey]] <NEW_LINE> del fibermap_columns[survey][index_columns.index('SCND_TARGET')] <NEW_LINE> fibermap_comments[survey] = dict([(tmp[0], tmp[3]) for tmp in fibermap_columns[survey]]) <NEW_LINE> <DEDENT> fibermap_columns['cmx'] = fibermap_columns['main'].copy() <NEW_LINE> index_columns = [x[0] for x in fibermap_columns['cmx']] <NEW_LINE> row = ('CMX_TARGET', 'i8', '', 'Targeting bits for instrument commissioning') <NEW_LINE> fibermap_columns['cmx'].insert(index_columns.index('DESI_TARGET'), row) <NEW_LINE> index_columns = [x[0] for x in fibermap_columns['cmx']] <NEW_LINE> del fibermap_columns['cmx'][index_columns.index('SCND_TARGET')] <NEW_LINE> fibermap_comments['cmx'] = dict([(tmp[0], tmp[3]) for tmp in fibermap_columns['cmx']]) <NEW_LINE> return fibermap_columns
Prepare survey-specific list of columns. Returns ------- :class:`dict` As a convenience, return the full set of survey-specific columns.
625941bcbaa26c4b54cb100d
def munsell_colour_to_xyY(munsell_colour): <NEW_LINE> <INDENT> specification = munsell_colour_to_munsell_specification(munsell_colour) <NEW_LINE> return munsell_specification_to_xyY(specification)
Converts given *Munsell* colour to *CIE xyY* colourspace. Parameters ---------- munsell_colour : unicode *Munsell* colour. Returns ------- ndarray, (3,) *CIE xyY* colourspace array. Notes ----- - Output *CIE xyY* colourspace array is in domain [0, 1]. Examples -------- >>> munsell_colour_to_xyY('4.2YR 8.1/5.3') # doctest: +ELLIPSIS array([ 0.3873694..., 0.3575165..., 0.59362 ]) >>> munsell_colour_to_xyY('N8.9') # doctest: +ELLIPSIS array([ 0.31006 , 0.31616 , 0.746134...])
625941bc85dfad0860c3ad44
def makematrix(letters, language, dictionary): <NEW_LINE> <INDENT> matrix = np.zeros((len(letters), len(letters))) <NEW_LINE> for (key, value) in dictionary.items(): <NEW_LINE> <INDENT> matrix[letters.find(key[0]), letters.find(key[1])] = value <NEW_LINE> <DEDENT> np.savetxt(language + ".csv", matrix, delimiter=",") <NEW_LINE> return matrix
takes dictionary and creates a numpy adjacency matrix saves that matrix as a csv file for later prettification
625941bc925a0f43d2549d5e
def orcle_close(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.conn.close() <NEW_LINE> <DEDENT> except Exception as a: <NEW_LINE> <INDENT> print("数据库关闭时异常:%s"%a)
关闭orcle连接
625941bc71ff763f4b549571
def _parse_query_plan( plan: dict[str, Any]) -> tuple[list[str], list[str], list[str]]: <NEW_LINE> <INDENT> table_names: list[str] = [] <NEW_LINE> queries: list[str] = [] <NEW_LINE> functions: list[str] = [] <NEW_LINE> if plan.get('Relation Name'): <NEW_LINE> <INDENT> table_names.append(plan['Relation Name']) <NEW_LINE> <DEDENT> if 'Function Name' in plan: <NEW_LINE> <INDENT> if plan['Function Name'].startswith( 'crosstab'): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> queries.append(_get_subquery_from_crosstab_call( plan['Function Call'])) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> table_names.append('_unknown_crosstab_sql') <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> functions.append(plan['Function Name']) <NEW_LINE> <DEDENT> <DEDENT> if 'Plans' in plan: <NEW_LINE> <INDENT> for child_plan in plan['Plans']: <NEW_LINE> <INDENT> t, q, f = _parse_query_plan(child_plan) <NEW_LINE> table_names.extend(t) <NEW_LINE> queries.extend(q) <NEW_LINE> functions.extend(f) <NEW_LINE> <DEDENT> <DEDENT> return table_names, queries, functions
Given a Postgres Query Plan object (parsed from the output of an EXPLAIN query), returns a tuple with three items: * A list of tables involved * A list of remaining queries to parse * A list of function names involved
625941bc10dbd63aa1bd2a91
def post(self, request, *args, **kwargs): <NEW_LINE> <INDENT> search = self.metric.search() <NEW_LINE> query = request.data.get('query') <NEW_LINE> search = search.update_from_dict(query) <NEW_LINE> try: <NEW_LINE> <INDENT> results = self.execute_search(search) <NEW_LINE> <DEDENT> except RequestError: <NEW_LINE> <INDENT> raise ValidationError('Misformed elasticsearch query.') <NEW_LINE> <DEDENT> return JsonResponse(results.to_dict())
For a bit of future proofing, accept custom elasticsearch aggregation queries in JSON form. Caution - this could be slow if a very large query is executed, so use with care!
625941bc925a0f43d2549d5f
def __init__(self, config=None, info=None, result=None, save_dir=None): <NEW_LINE> <INDENT> self.config = config <NEW_LINE> self.result = result <NEW_LINE> self.info = info <NEW_LINE> self.restore_dir = None <NEW_LINE> self.save_dir = save_dir
Parameters ---------- config : str info : str result : float save_dir : str
625941bc7d43ff24873a2b88
def test_standard_deviation04(self): <NEW_LINE> <INDENT> input = numpy.array([1, 0], bool) <NEW_LINE> output = ndimage.standard_deviation(input) <NEW_LINE> self.failUnless(output == 0.5)
standard deviation 4
625941bc57b8e32f52483384
def return_images_from_scene(distorted_list_path, gt_path): <NEW_LINE> <INDENT> distorted_list_path_shuffled = tf.random.shuffle(distorted_list_path) <NEW_LINE> gt_image = decode_img(gt_path) <NEW_LINE> distorted_images_tensor = [] <NEW_LINE> for idx in range(config.n_frames): <NEW_LINE> <INDENT> distorted_images_tensor.append(decode_img(distorted_list_path_shuffled[idx])) <NEW_LINE> <DEDENT> distorted_images_tensor = tf.stack(distorted_images_tensor) <NEW_LINE> return distorted_images_tensor, gt_image
Function that return a stack of distorted images of a single scene and the correspondent ground truth. It returns two tensors: one consisting of the distorted images (shape=(n_frames, h, w, 3) and the second one is the gt_image (shape=(h, w, 3) :param distorted_list_path: Tensor of strings :param gt_path: Tensor of strings
625941bc01c39578d7e74d25
def _verified_reverse1(mapper, onesample): <NEW_LINE> <INDENT> dummy_axis_sample = np.asanyarray(onesample)[None] <NEW_LINE> rsample = mapper.reverse(dummy_axis_sample) <NEW_LINE> if not len(rsample) == 1: <NEW_LINE> <INDENT> warning("Reverse mapping single sample yielded multiple -- can lead to unintended behavior!") <NEW_LINE> <DEDENT> return rsample[0]
Replacement of Mapper.reverse1 with safety net This function can be called instead of a direct call to a mapper's ``reverse1()``. It wraps a single sample into a dummy axis and calls ``reverse()``. Afterwards it verifies that the first axis of the returned array has one item only, otherwise it will issue a warning. This function is useful in any context where it is critical to ensure that reverse mapping a single sample, yields exactly one sample -- which isn't guaranteed due to the flexible nature of mappers. Parameters ---------- mapper : Mapper instance onesample : array-like Single sample (in terms of the supplied mapper). Returns ------- array Shape matches a single sample in terms of the mappers input space.
625941bc29b78933be1e559b
def _to_tensor(self, sample): <NEW_LINE> <INDENT> assert isinstance(sample, dict), "trainer expects samples to come in dicts for key-based usage" <NEW_LINE> assert self.task.input_key in sample, f"could not find input key '{self.task.input_key}' in sample dict" <NEW_LINE> input_val, target_val = sample[self.task.input_key], None <NEW_LINE> if isinstance(input_val, list): <NEW_LINE> <INDENT> if self.task.gt_key in sample and sample[self.task.gt_key] is not None: <NEW_LINE> <INDENT> gt_tensor = sample[self.task.gt_key] <NEW_LINE> assert isinstance(gt_tensor, list) and len(gt_tensor) == len(input_val), "target tensor should also be a list of the same length as input" <NEW_LINE> target_val = [None] * len(input_val) <NEW_LINE> for idx in range(len(input_val)): <NEW_LINE> <INDENT> input_val[idx], target_val[idx] = self._to_tensor({self.task.input_key: input_val[idx], self.task.gt_key: gt_tensor[idx]}) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for idx in range(len(input_val)): <NEW_LINE> <INDENT> input_val[idx] = torch.FloatTensor(input_val[idx]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> input_val = torch.FloatTensor(input_val) <NEW_LINE> if self.task.gt_key in sample and sample[self.task.gt_key] is not None: <NEW_LINE> <INDENT> gt_tensor = sample[self.task.gt_key] <NEW_LINE> assert len(gt_tensor) == len(input_val), "target tensor should be an array of the same length as input (== batch size)" <NEW_LINE> if isinstance(gt_tensor, torch.Tensor) and gt_tensor.dtype == torch.int64: <NEW_LINE> <INDENT> target_val = gt_tensor <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if self.task.multi_label: <NEW_LINE> <INDENT> assert isinstance(gt_tensor, torch.Tensor) and gt_tensor.shape == (len(input_val), len(self.task.class_names)), "gt tensor for multi-label classification should be 2d array (batch size x nbclasses)" <NEW_LINE> target_val = gt_tensor.float() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> target_val = [] <NEW_LINE> for class_name in gt_tensor: <NEW_LINE> <INDENT> assert isinstance(class_name, (int, torch.Tensor, str)), "expected gt tensor to be an array of names (string) or indices (int)" <NEW_LINE> if isinstance(class_name, (int, torch.Tensor)): <NEW_LINE> <INDENT> if isinstance(class_name, torch.Tensor): <NEW_LINE> <INDENT> assert torch.numel(class_name) == 1, "unexpected scalar label, got vector" <NEW_LINE> class_name = class_name.item() <NEW_LINE> <DEDENT> assert 0 <= class_name < len(self.task.class_names), "class name given as out-of-range index (%d) for class list" % class_name <NEW_LINE> target_val.append(class_name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> assert class_name in self.task.class_names, "got unexpected label '%s' for a sample (unknown class)" % class_name <NEW_LINE> target_val.append(self.task.class_indices[class_name]) <NEW_LINE> <DEDENT> <DEDENT> target_val = torch.LongTensor(target_val) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return input_val, target_val
Fetches and returns tensors of input images and class labels from a batched sample dictionary.
625941bc091ae35668666e4f
def esc(word): <NEW_LINE> <INDENT> if len(word) != 0 and word[0] == '/': <NEW_LINE> <INDENT> return(word[1:len(word)]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return(word)
Erases the potential '' at the beginning of word
625941bcd486a94d0b98e030
def test14_json_to_file10(self): <NEW_LINE> <INDENT> Base._Base__nb_objects = 0 <NEW_LINE> with self.assertRaises(TypeError): <NEW_LINE> <INDENT> Rectangle.save_to_file()
test json string into file
625941bcbd1bec0571d90522
def append_text(self, text: str): <NEW_LINE> <INDENT> self.view.run_command('insert', {'characters': text})
Appends given text to view.
625941bcadb09d7d5db6c67c
def __init__(self, context): <NEW_LINE> <INDENT> self.printer = BuddyPrinter() <NEW_LINE> self.context = context <NEW_LINE> self.name = self.getName()
@param context: @since 0.0.1-beta
625941bc1f037a2d8b9460e9
def find_nice(x, round_val): <NEW_LINE> <INDENT> expv = math.floor(math.log10(x)) <NEW_LINE> f = x / math.pow(10, expv) <NEW_LINE> if (round_val): <NEW_LINE> <INDENT> if (f < 1.5): <NEW_LINE> <INDENT> nf = 1 <NEW_LINE> <DEDENT> elif (f < 3): <NEW_LINE> <INDENT> nf = 2 <NEW_LINE> <DEDENT> elif (f < 7): <NEW_LINE> <INDENT> nf = 5 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> nf = 10 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if (f <= 1): <NEW_LINE> <INDENT> nf = 1 <NEW_LINE> <DEDENT> elif (f <= 2): <NEW_LINE> <INDENT> nf = 2 <NEW_LINE> <DEDENT> elif (f <= 5): <NEW_LINE> <INDENT> nf = 5 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> nf = 10 <NEW_LINE> <DEDENT> <DEDENT> return nf * math.pow(10, expv)
Find a "nice" number that is approximately equal to 'x'. Round the number if 'round_val'=1, or take ceiling otherwise.
625941bcd6c5a10208143f33
def _send_messages_to_player(self, player, *args, **kwargs): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> message = self._player_outgoing_messages[player].get(timeout=5) <NEW_LINE> self._message_stream.send_message(message=message, tcp_connection=player.tcp_connection) <NEW_LINE> <DEDENT> except queue.Empty: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> break
Monitor the outgoing message queue for a player, sending data to the socket in the order given by the queue.
625941bc5166f23b2e1a5044
def __init__(self, max_reproduction_prob, clear_prob): <NEW_LINE> <INDENT> self.max_reproduction_prob = float(max_reproduction_prob) <NEW_LINE> self.clear_prob = float(clear_prob)
Initializes a Bacteria instance, saving the data attributes of the instance. :param float reproduction_prob: Probability of reproducing during a time step (0-1) :param float clear_prob: Probability of being cleared from the body during a time step (0-1)
625941bc2c8b7c6e89b356ad
def _set_attribute(self, attribute_key, attribute_value, nullify = True): <NEW_LINE> <INDENT> model_class = self.__class__ <NEW_LINE> if not hasattr(model_class, attribute_key): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> model_class_attribute_value = getattr(model_class, attribute_key) <NEW_LINE> data_type = model_class_attribute_value["type"] <NEW_LINE> cast_type = DATA_TYPE_CAST_TYPES_MAP.get(data_type, None) <NEW_LINE> if not cast_type: return <NEW_LINE> if nullify and attribute_value == "": attribute_value = None <NEW_LINE> attribute_value_casted = self._cast_safe(attribute_value, cast_type) <NEW_LINE> setattr(self, attribute_key, attribute_value_casted)
Sets the given model attribute for the given attribute key and value. This method is used in order to create a safe way of setting attributes avoiding empty values and providing safe casting "ways". The target model for the attribute setting is the current instance in which the method is being run. :type attribute_key: String :param attribute_key: The attribute key in the model. :type attribute_value: Object :param attribute_value: The value of the attribute to set. :type nullify: bool :param nullify: If the data to be processed should be nullified in case empty string values are found.
625941bc442bda511e8be307
def reset(msg): <NEW_LINE> <INDENT> publish_status(b"GOING RESET") <NEW_LINE> from machine import reset <NEW_LINE> reset()
Resets the board
625941bcb57a9660fec3376c
def list_intervalValue(inputed_interval): <NEW_LINE> <INDENT> int_lower,int_upper=extract_value(inputed_interval) <NEW_LINE> elements_list=[] <NEW_LINE> if inputed_interval[0]=='[' and inputed_interval[-1]==']': elements_list=range(int_lower,int_upper+1) <NEW_LINE> elif inputed_interval[0]=='(' and inputed_interval[-1]==')': elements_list=range(int_lower+1,int_upper) <NEW_LINE> elif inputed_interval[0]=='(' and inputed_interval[-1]==']': elements_list=range(int_lower+1,int_upper+1) <NEW_LINE> else: elements_list=range(int_lower,int_upper) <NEW_LINE> return elements_list
returns a list that contains all the elements of the inputed interval
625941bcb5575c28eb68dee9
@click.group() <NEW_LINE> def radius(): <NEW_LINE> <INDENT> pass
RADIUS server configuration
625941bc7047854f462a12f7
def _compute_term_4(C, mag, R): <NEW_LINE> <INDENT> return ( (C['a16'] + C['a17'] * mag + C['a18'] * np.power(mag, 2) + C['a19'] * np.power(mag, 3)) * np.power(R, 3))
(a16 + a17.*M + a18.*M.*M + a19.*M.*M.*M).*(d(r).^3)
625941bc462c4b4f79d1d5bb
def __init__(self, minfo={}): <NEW_LINE> <INDENT> super(DisconnectRequestMessage, self).__init__(minfo) <NEW_LINE> self.Reliable = False <NEW_LINE> self.IsSystemMessage = True <NEW_LINE> self.IsForward = False <NEW_LINE> self.IsReliable = False
Constructor for the DisconnectRequestMessage class. Args: minfo (dict): Dictionary of values for message fields.
625941bc63d6d428bbe443da
def __init__(self, name, *args, **kwargs): <NEW_LINE> <INDENT> super(re, self).__init__(name) <NEW_LINE> self.regex = base_re.compile(self.name, *args, **kwargs)
Compile regex.
625941bcec188e330fd5a690
def list_trash(self, current_page=None, items_per_page=None): <NEW_LINE> <INDENT> url = _get_url(self.base_url, 'ListTrash', self.api_credentials, current_page=current_page, items_per_page=items_per_page) <NEW_LINE> return _get(self.http, url)
List all faxes in trash Keyword arguments: current_page -- The page which should be shown items_per_page -- How many items are shown per page
625941bc3617ad0b5ed67de3
def sort_merged_annotations(merged_soup): <NEW_LINE> <INDENT> include_hr = plugin_prefs.get('appearance_hr_checkbox', False) <NEW_LINE> locations = merged_soup.findAll(location_sort=True) <NEW_LINE> locs = [loc['location_sort'] for loc in locations] <NEW_LINE> locs.sort() <NEW_LINE> sorted_soup = BeautifulSoup(ANNOTATIONS_HEADER) <NEW_LINE> dtc = 0 <NEW_LINE> for i, loc in enumerate(locs): <NEW_LINE> <INDENT> next_div = merged_soup.find(attrs={'location_sort': loc}) <NEW_LINE> sorted_soup.div.insert(dtc, next_div) <NEW_LINE> dtc += 1 <NEW_LINE> if include_hr and i < len(locs) - 1: <NEW_LINE> <INDENT> sorted_soup.div.insert(dtc, plugin_prefs.get('HORIZONTAL_RULE', '<hr width="80%" />')) <NEW_LINE> dtc += 1 <NEW_LINE> <DEDENT> <DEDENT> return sorted_soup
Input: a combined group of user annotations Output: sorted by location
625941bcfb3f5b602dac357b
def _rescale_distributions(self): <NEW_LINE> <INDENT> max_height = 0 <NEW_LINE> for _, _, well in self._yield_wells(): <NEW_LINE> <INDENT> max_height = max(max_height, np.max(well.y)) <NEW_LINE> <DEDENT> scale_factor = self.max_dist_height / max_height <NEW_LINE> for _, _, well in self._yield_wells(): <NEW_LINE> <INDENT> well.y *= scale_factor
Scale all the distributions such that the tallest one has the desired height.
625941bc5fcc89381b1e15a8
def matchTransforms( inType ): <NEW_LINE> <INDENT> selObjs = cmds.ls( selection=True, dag=False, ap=True ) <NEW_LINE> if len( selObjs ) == 0: <NEW_LINE> <INDENT> cmds.warning( 'No objects are selected. Select two objects and try again' ) <NEW_LINE> <DEDENT> elif len( selObjs ) > 2: <NEW_LINE> <INDENT> cmds.warning( 'To many objects are selected. Select only two objects and try again' ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cObj = selObjs[0] <NEW_LINE> tObj = selObjs[1] <NEW_LINE> MFnTrans = OpenMaya.MFnTransform() <NEW_LINE> childDagPath = NodeUtility.getDagPath( cObj ) <NEW_LINE> MFnTrans.setObject( childDagPath ) <NEW_LINE> targetMatrix = getMatrix( tObj, 'worldMatrix' ) <NEW_LINE> if inType == 'tran' or inType == 'all': <NEW_LINE> <INDENT> childTranslation = getMatrixTranslation( targetMatrix, OpenMaya.MSpace.kWorld ) <NEW_LINE> MFnTrans.setTranslation( childTranslation, OpenMaya.MSpace.kWorld ) <NEW_LINE> <DEDENT> if inType == 'rot' or inType == 'all': <NEW_LINE> <INDENT> childRotation = getMatrixRotation( targetMatrix, 'quat' ) <NEW_LINE> MFnTrans.setRotation( childRotation, OpenMaya.MSpace.kWorld )
@param inType: String. Type of matching to perform. 'tran', 'rot', or 'all'
625941bcd8ef3951e3243428
def reset(self): <NEW_LINE> <INDENT> self.rng = MT19937RNG(self.seed) <NEW_LINE> self.num = 0 <NEW_LINE> self.left = 0
Reset stream of random number generator.
625941bce64d504609d7472b
@api_view(['GET', 'POST']) <NEW_LINE> def snippet_list(request, format=None): <NEW_LINE> <INDENT> if request.method == 'GET': <NEW_LINE> <INDENT> snippets = Snippet.objects.all() <NEW_LINE> serializer = SnippetSerializer(snippets, many=True) <NEW_LINE> return Response(serializer.data) <NEW_LINE> <DEDENT> elif request.method == 'POST': <NEW_LINE> <INDENT> serializer = SnippetSerializer(data=request.DATA) <NEW_LINE> if serializer.is_valid(): <NEW_LINE> <INDENT> serializer.save() <NEW_LINE> return Response(serializer.data, status=status.HTTP_201_CREATED) <NEW_LINE> <DEDENT> return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
List all snippets, or create a new snippet.
625941bc30dc7b7665901854
def test_is_accessible_by_with_local_site_and_other_site_admin(self): <NEW_LINE> <INDENT> user = User.objects.get(username='doc') <NEW_LINE> admin = User.objects.get(username='dopey') <NEW_LINE> local_site1 = LocalSite.objects.create(name='site1') <NEW_LINE> local_site1.users.add(user) <NEW_LINE> local_site2 = LocalSite.objects.create(name='site2') <NEW_LINE> local_site2.users.add(admin) <NEW_LINE> local_site2.admins.add(admin) <NEW_LINE> application = self.create_oauth_application(user=user, local_site=local_site1) <NEW_LINE> self.assertFalse(application.is_accessible_by(admin, local_site=local_site1))
Testing Application.is_accessible_by with LocalSite and other LocalSite administrator
625941bc5fc7496912cc3869
def memcache_hosts_ports(self): <NEW_LINE> <INDENT> return sorted((m['host'], m['port']) for m in self.memcaches())
Return a list of tuples (host, port)
625941bc9f2886367277a77b
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs): <NEW_LINE> <INDENT> if relationship == 'roles': <NEW_LINE> <INDENT> role_access = RoleAccess(self.user) <NEW_LINE> return role_access.can_attach(sub_obj, obj, 'members', *args, **kwargs) <NEW_LINE> <DEDENT> return super(UserAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
Reverse obj and sub_obj, defer to RoleAccess if this is a role assignment.
625941bc57b8e32f52483385
def test_bcorp_get_tasks_prev_year_incomplete_filing_exists(session, client): <NEW_LINE> <INDENT> identifier = 'CP7654321' <NEW_LINE> b = factory_business(identifier, datetime.now() - datedelta.datedelta(years=2), last_ar_date='2018-03-03') <NEW_LINE> filings = factory_filing(b, AR_FILING_PREVIOUS_YEAR, datetime(2018, 8, 5, 7, 7, 58, 272362)) <NEW_LINE> print('test_get_all_business_filings - filing:', filings) <NEW_LINE> rv = client.get(f'/api/v1/businesses/{identifier}/tasks') <NEW_LINE> assert rv.status_code == HTTPStatus.OK
Assert that the one incomplete filing for previous year and a to-do for current year are returned.
625941bc8e05c05ec3eea25d
def count(self): <NEW_LINE> <INDENT> with self.connection: <NEW_LINE> <INDENT> rows = self.cursor.execute(f"SELECT * FROM {AdminsWorker.TABLE}").fetchall() <NEW_LINE> <DEDENT> return len(rows)
Получаем количество админов в таблице
625941bc9c8ee82313fbb660
def setup_loss(self): <NEW_LINE> <INDENT> with vs.variable_scope("loss"): <NEW_LINE> <INDENT> mask = tf.cast(self.c_masks, tf.bool) <NEW_LINE> unmasked_c_e = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = self.a, logits = self.logits)*self.skew_mat <NEW_LINE> self.loss = tf.reduce_sum(tf.boolean_mask(unmasked_c_e, mask))/tf.cast(tf.reduce_sum(self.c_masks), tf.float32)
Set up your loss computation here :return:
625941bc3539df3088e2e236
def __set__(self, instance, value): <NEW_LINE> <INDENT> if not isinstance(value, types.ListType): <NEW_LINE> <INDENT> value = [self.factory.load(data) for data in value.childNodes if data.nodeType == data.ELEMENT_NODE] <NEW_LINE> <DEDENT> value = filter(lambda x: isinstance(x, self.factory), value) <NEW_LINE> self.setValue(instance, value)
Ensure that attribute is array of objects with the given factory type
625941bcf548e778e58cd468
def get_top(j): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if len(j['data']['children']) < 1: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> entry = None <NEW_LINE> for post in j['data']['children']: <NEW_LINE> <INDENT> if not post['data']['stickied']: <NEW_LINE> <INDENT> entry = post['data'] <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> return entry <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> return None
Get the top submission (excluding stickied posts) from a subreddit listing.
625941bc91af0d3eaac9b901
def main(): <NEW_LINE> <INDENT> args = get_arguments() <NEW_LINE> file_parse(args.filename, args.columns) <NEW_LINE> return None
runs fxns for collapsing equivalent data. Uses arg parse to get file and columns to be converted.
625941bc45492302aab5e1ac
def uniquePathsWithObstacles(self, og): <NEW_LINE> <INDENT> if len(og) == 0 or len(og[0]) == 0: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> m, n =len(og), len(og[0]) <NEW_LINE> s = [[-1 for i in range(n)] for i in range(m)] <NEW_LINE> s[0][0] = 1 <NEW_LINE> for i in range(m): <NEW_LINE> <INDENT> for j in range(n): <NEW_LINE> <INDENT> if og[i][j] == 1: <NEW_LINE> <INDENT> s[i][j] = 0 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for i in range(m-1): <NEW_LINE> <INDENT> if s[i+1][0] == -1: <NEW_LINE> <INDENT> s[i+1][0] = s[i][0] <NEW_LINE> <DEDENT> <DEDENT> for i in range(n-1): <NEW_LINE> <INDENT> if s[0][i+1] == -1: <NEW_LINE> <INDENT> s[0][i+1] = s[0][i] <NEW_LINE> <DEDENT> <DEDENT> for i in range(1,m): <NEW_LINE> <INDENT> for j in range(1,n): <NEW_LINE> <INDENT> if s[i][j] == -1: <NEW_LINE> <INDENT> s[i][j] = s[i-1][j] + s[i][j-1] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return s[-1][-1]
:type obstacleGrid: List[List[int]] :rtype: int
625941bc50812a4eaa59c210
def encrypt(self, link_protocol, key, digest): <NEW_LINE> <INDENT> new_key = copy.copy(key) <NEW_LINE> new_digest = digest.copy() <NEW_LINE> header_size = link_protocol.circ_id_size.size + 1 <NEW_LINE> payload_without_digest = self.pack(link_protocol)[header_size:] <NEW_LINE> new_digest.update(payload_without_digest) <NEW_LINE> cell = RelayCell(self.circ_id, self.command, self.data, new_digest, self.stream_id, self.recognized, self.unused) <NEW_LINE> header, payload = split(cell.pack(link_protocol), header_size) <NEW_LINE> return header + new_key.update(payload), new_key, new_digest
Encrypts our cell content to be sent with the given key. This provides back a tuple of the form... :: (payload (bytes), new_key (CipherContext), new_digest (HASH)) :param int link_protocol: link protocol version :param cryptography.hazmat.primitives.ciphers.CipherContext key: key established with the relay we're sending this cell to :param HASH digest: running digest held with the relay :returns: **tuple** with our encrypted payload and updated key/digest
625941bc0a50d4780f666d7b
def getNonInstallableProfiles(self): <NEW_LINE> <INDENT> return [ 'tetrd.theme:uninstall', ]
Hide uninstall profile from site-creation and quickinstaller
625941bc50485f2cf553cc84
def deal_card(): <NEW_LINE> <INDENT> cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 10] <NEW_LINE> card = random.choice(cards) <NEW_LINE> return card
Return a random card from deck
625941bcf9cc0f698b1404e9
@api_view(['POST']) <NEW_LINE> def change_responsible(request): <NEW_LINE> <INDENT> response_dict = dict() <NEW_LINE> if request.method == 'POST': <NEW_LINE> <INDENT> data = request.data <NEW_LINE> try: <NEW_LINE> <INDENT> responsible = Partnership.objects.get(id=data['responsible_id']) <NEW_LINE> disciples = Partnership.objects.filter(responsible=responsible).all() <NEW_LINE> try: <NEW_LINE> <INDENT> new_responsible = Partnership.objects.get(id=data['responsible_id']) <NEW_LINE> disciples.update(responsible=new_responsible) <NEW_LINE> response_dict['message'] = u"Ответственный был успешно изменен." <NEW_LINE> response_dict['status'] = True <NEW_LINE> <DEDENT> except Partnership.DoesNotExist: <NEW_LINE> <INDENT> response_dict['message'] = u"Такого ответственного не существует." <NEW_LINE> response_dict['status'] = False <NEW_LINE> <DEDENT> <DEDENT> except Partnership.DoesNotExist: <NEW_LINE> <INDENT> response_dict['message'] = u"Такого ответственного не существует." <NEW_LINE> response_dict['status'] = False <NEW_LINE> <DEDENT> <DEDENT> return Response(response_dict)
POST: (responsible_id, new_responsible_id)
625941bc4a966d76dd550ef8
def binary_crossentropy(y_pred, y_true): <NEW_LINE> <INDENT> with tf.name_scope("BinaryCrossentropy"): <NEW_LINE> <INDENT> return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=y_pred, labels=y_true))
Binary Crossentropy. Computes sigmoid cross entropy between y_pred (logits) and y_true (labels). Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive. For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time. For brevity, let `x = logits`, `z = targets`. The logistic loss is x - x * z + log(1 + exp(-x)) To ensure stability and avoid overflow, the implementation uses max(x, 0) - x * z + log(1 + exp(-abs(x))) `y_pred` and `y_true` must have the same type and shape. Arguments: y_pred: `Tensor` of `float` type. Predicted values. y_true: `Tensor` of `float` type. Targets (labels).
625941bc0383005118ecf4d0
def size(self): <NEW_LINE> <INDENT> if self._data['size'] == -1: <NEW_LINE> <INDENT> return self._plot.opts['size'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self._data['size']
Return the size of this spot. If the spot has no explicit size set, then return the ScatterPlotItem's default size instead.
625941bc3346ee7daa2b2c55
def build_special_air_table(tab_name): <NEW_LINE> <INDENT> fout_name = 'R' + tab_name + '.m' <NEW_LINE> with open(fout_name,'w') as fout: <NEW_LINE> <INDENT> fout.write('function val = R'+tab_name+'(alpha_deg)\n') <NEW_LINE> fout.write('% in: angle of attach, deg\n% out: Table '+tab_name+' value\n') <NEW_LINE> fout.write('TBL'+tab_name+'=[\n') <NEW_LINE> fout.write('-360 1.0;\n') <NEW_LINE> fout.write('360 1.0;];\n') <NEW_LINE> fout.write('val = interp1(TBL'+tab_name+'(:,1),TBL'+tab_name+'(:,2),alpha_deg);\n\nend\n')
Build special air table missing in the air file, but required in Y. Guilluame formulation. By default all values of the special table are set to '1'. Input: tab_name : numeric name of the table. 3 or 4 digits (ex 1525 or 536). No leading zeros on the name.
625941bccc40096d6159583d
def CallReader(self, readFunc, parents, state): <NEW_LINE> <INDENT> getattr(self, readFunc)(parents, state)
Call the function specified by name string readFunc on self with arguments parents and state: i.e. self.readFunc(parents, state) Params: self: Object to call readFunc callable on readFunc: String name of a callable bound to self parents: Arbitrary argument to be passed to the callable state: Arbitrary argument to be passed to the callable
625941bc92d797404e304075
def set_subsections_needed(self, subsections): <NEW_LINE> <INDENT> self._subsections_needed = subsections
Marks the subsections that need to be generated; if this is given then the others are ignored. :: r = Report() r.set_subsections_needed(['estimator']) with r.subsection('estimator') as sub: # ok with r.subsection('model') as sub: # ignored
625941bc56ac1b37e62640c0
def get_semantic_name(model, rc_loc, comb_type): <NEW_LINE> <INDENT> if model == 'kernelcomb' and rc_loc != -1: <NEW_LINE> <INDENT> semantic_model_name = 'TAI' <NEW_LINE> <DEDENT> elif model == 'kernelcomb' and rc_loc == -1 and comb_type == 'w_avg': <NEW_LINE> <INDENT> semantic_model_name = 'TWI' <NEW_LINE> <DEDENT> elif model == 'simplecomb' and comb_type == 'w_avg': <NEW_LINE> <INDENT> semantic_model_name = 'bi-TW' <NEW_LINE> <DEDENT> elif model == 'simplecomb' and comb_type == 'avg': <NEW_LINE> <INDENT> semantic_model_name = 'bi-SA' <NEW_LINE> <DEDENT> elif model == 'mcnet': <NEW_LINE> <INDENT> semantic_model_name = 'MC-Net' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('{model, rc_loc, comb_type} = {%s, %d, %s} does not have a name' % (model, rc_loc, comb_type)) <NEW_LINE> semantic_model_name = 'Unknown' <NEW_LINE> <DEDENT> return semantic_model_name
Generate the semantic name of the architecture determined by the given arguments.
625941bc7047854f462a12f8
def clean_cache(key): <NEW_LINE> <INDENT> if key == 'all': <NEW_LINE> <INDENT> cache.clear() <NEW_LINE> <DEDENT> elif key != 'all' and cache.get(key): <NEW_LINE> <INDENT> cache.delete(key) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass
在发布文章后删除首页,归档,分类,标签缓存, 在更新文章后删除对应文章缓存 在添加说说,更改友链页后删除对应缓存 :param key: cache prefix
625941bcb545ff76a8913d09
def _frame_annotation(times, show_date, show_text): <NEW_LINE> <INDENT> is_sequence = isinstance(show_text, (list, tuple, np.ndarray)) <NEW_LINE> if is_sequence and (len(show_text) == 1): <NEW_LINE> <INDENT> show_text, is_sequence = show_text[0], False <NEW_LINE> <DEDENT> elif is_sequence and (len(show_text) < len(times)): <NEW_LINE> <INDENT> raise ValueError(f'Annotations supplied via `show_text` must have ' f'either a length of 1, or a length >= the number ' f'of timesteps in `ds` (n={len(times)})') <NEW_LINE> <DEDENT> times_list = (times.dt.strftime(show_date).values if show_date else [None] * len(times)) <NEW_LINE> text_list = show_text if is_sequence else [show_text] * len(times) <NEW_LINE> annotation_list = ['\n'.join([str(i) for i in (a, b) if i]) for a, b in zip(times_list, text_list)] <NEW_LINE> return annotation_list
Creates a custom annotation for the top-right of the animation by converting a `xarray.DataArray` of times into strings, and combining this with a custom text annotation. Handles cases where `show_date=False/None`, `show_text=False/None`, or where `show_text` is a list of strings.
625941bc009cb60464c6329f
def _update_json(cur_dct, tmp_dct, json_name): <NEW_LINE> <INDENT> js_data = _stream_json(name_jsfile=json_name, mode='r') <NEW_LINE> flag_update = False <NEW_LINE> for key in cur_dct.keys(): <NEW_LINE> <INDENT> _print_mono_str(33,'*') <NEW_LINE> print('Изменения ',key,':') <NEW_LINE> key_nmb = int(key[4:]) - 1 <NEW_LINE> if len(cur_dct[key]) != len(tmp_dct[key]): <NEW_LINE> <INDENT> if len(cur_dct[key]) > len(tmp_dct[key]): <NEW_LINE> <INDENT> print('- Появилась новая команда: ', cur_dct[key][-1]) <NEW_LINE> _refresh_json(nmb_prm=key_nmb, wrk_dct=cur_dct, key_dct=key, js_data=js_data, js_name=json_name) <NEW_LINE> flag_update = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('- Удалена последняя команда: ', tmp_dct[key][-1]) <NEW_LINE> print('- Текущая команда: ', cur_dct[key][-1]) <NEW_LINE> _refresh_json(nmb_prm=key_nmb, wrk_dct=cur_dct, key_dct=key, js_data=js_data, js_name=json_name) <NEW_LINE> flag_update = True <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if cur_dct[key] != tmp_dct[key]: <NEW_LINE> <INDENT> print('- Изменение послед. команды: ') <NEW_LINE> print('- Было: ',tmp_dct[key][-1]) <NEW_LINE> print('- Стало: ', cur_dct[key][-1]) <NEW_LINE> _refresh_json(nmb_prm=key_nmb, wrk_dct=cur_dct, key_dct=key, js_data=js_data, js_name=json_name) <NEW_LINE> flag_update = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('нет.') <NEW_LINE> <DEDENT> <DEDENT> except IndexError as ind_err: <NEW_LINE> <INDENT> print('- Ошибка, все команды удалены', ind_err) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if flag_update is True: <NEW_LINE> <INDENT> tmp_dct.clear() <NEW_LINE> tmp_dct = cur_dct.copy() <NEW_LINE> <DEDENT> print('json обновлен')
:param cur_dct: рабочий словарь с данными из пользовательских файлов :param tmp_dct: копия рабочего словаря для поиска разницы :param json_name: имя файла типа json
625941bc099cdd3c635f0b48
def get_start_date(self): <NEW_LINE> <INDENT> active_handle = self.get_active('Family') <NEW_LINE> active = self.db.get_family_from_handle(active_handle) <NEW_LINE> event = get_marriage_or_fallback(self.db, active) <NEW_LINE> return event.get_date_object() if event else None
Get the start date for a family, usually a marriage date, or something close to marriage.
625941bca219f33f34628859
def test_get_country_business_area_code(self): <NEW_LINE> <INDENT> area_code = "10101" <NEW_LINE> with schema_context(SCHEMA_NAME): <NEW_LINE> <INDENT> country_uat = CountryFactory(name="UAT") <NEW_LINE> self.mapper.countries = {"UAT": country_uat} <NEW_LINE> country = CountryFactory(business_area_code=area_code) <NEW_LINE> res = self.mapper._get_country(area_code) <NEW_LINE> <DEDENT> self.assertEqual(res, country) <NEW_LINE> self.assertCountEqual(self.mapper.countries, { area_code: country, "UAT": country_uat })
Check that we get country that matches business area code
625941bc851cf427c661a3fe
def test_file_no_contents(self): <NEW_LINE> <INDENT> files = open("/Users/Kipyegon/Desktop/Amity/storage/file/people.txt", "w") <NEW_LINE> files.write("") <NEW_LINE> files.close() <NEW_LINE> self.assertEqual(self.amity.load_people(), "The file has no contents") <NEW_LINE> os.remove("/Users/Kipyegon/Desktop/Amity/storage/file/people.txt")
test it has no contents
625941bc377c676e91272096
def release_readlock(lockdir_name): <NEW_LINE> <INDENT> if os.path.exists(lockdir_name) and os.path.isdir(lockdir_name): <NEW_LINE> <INDENT> os.rmdir(lockdir_name)
Release a previously obtained readlock. Parameters ---------- lockdir_name : str Name of the previously obtained readlock
625941bcf7d966606f6a9eed
def deploy_stack(environment, debug, machines, deploy_charm): <NEW_LINE> <INDENT> client = client_from_config(environment, None, debug=debug) <NEW_LINE> running_domains = dict() <NEW_LINE> if client.env.provider == 'maas': <NEW_LINE> <INDENT> for machine in machines: <NEW_LINE> <INDENT> name, URI = machine.split('@') <NEW_LINE> if verify_libvirt_domain(URI, name, LIBVIRT_DOMAIN_RUNNING): <NEW_LINE> <INDENT> print("%s is already running" % name) <NEW_LINE> running_domains = {machine: True} <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> running_domains = {machine: False} <NEW_LINE> print("Attempting to start %s at %s" % (name, URI)) <NEW_LINE> status_msg = start_libvirt_domain(URI, name) <NEW_LINE> print("%s" % status_msg) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> client.destroy_environment() <NEW_LINE> client.bootstrap() <NEW_LINE> try: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> client.get_status() <NEW_LINE> <DEDENT> except CannotConnectEnv: <NEW_LINE> <INDENT> print("Status got Unable to connect to env. Retrying...") <NEW_LINE> client.get_status() <NEW_LINE> <DEDENT> client.wait_for_started() <NEW_LINE> if deploy_charm: <NEW_LINE> <INDENT> series = client.env.get_option('default-series', 'trusty') <NEW_LINE> charm_path = local_charm_path( 'dummy-source', juju_ver=client.version, series=series) <NEW_LINE> client.deploy(charm_path, series=series) <NEW_LINE> client.wait_for_started() <NEW_LINE> <DEDENT> <DEDENT> except subprocess.CalledProcessError as e: <NEW_LINE> <INDENT> if getattr(e, 'stderr', None) is not None: <NEW_LINE> <INDENT> sys.stderr.write(e.stderr) <NEW_LINE> <DEDENT> raise <NEW_LINE> <DEDENT> <DEDENT> finally: <NEW_LINE> <INDENT> client.destroy_environment() <NEW_LINE> if client.env.provider == 'maas': <NEW_LINE> <INDENT> sleep(90) <NEW_LINE> for machine, running in running_domains.items(): <NEW_LINE> <INDENT> name, URI = machine.split('@') <NEW_LINE> if running: <NEW_LINE> <INDENT> print("WARNING: %s at %s was running when deploy_job " "started. Shutting it down to ensure a clean " "environment." % (name, URI)) <NEW_LINE> <DEDENT> status_msg = stop_libvirt_domain(URI, name) <NEW_LINE> print("%s" % status_msg)
"Deploy a test stack in the specified environment. :param environment: The name of the desired environment.
625941bcc432627299f04b30
def getDelays(self,vel): <NEW_LINE> <INDENT> dlay = self.D / (1000.0 * vel) <NEW_LINE> dlayStep = np.around(dlay / self.dt).astype(np.int64) <NEW_LINE> maxDlay = np.int64(np.max(dlayStep)) <NEW_LINE> return dlayStep, maxDlay
Return maximum delay and delay steps in samples
625941bcb830903b967e97fa
def AtomDiag(*args, **kwargs): <NEW_LINE> <INDENT> if len(args) > 0: h = args[0] <NEW_LINE> elif 'h' in kwargs: h = kwargs['h'] <NEW_LINE> else: <NEW_LINE> <INDENT> raise RuntimeError("You must provide a Hamiltonian!") <NEW_LINE> <DEDENT> if not isinstance(h, Operator): <NEW_LINE> <INDENT> raise RuntimeError("The Hamiltonian must be an Operator object!") <NEW_LINE> <DEDENT> if any(abs(term[-1].imag) > 0 for term in h): <NEW_LINE> <INDENT> return AtomDiagComplex(*args, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return AtomDiagReal(*args, **kwargs)
Lightweight exact diagonalization solver
625941bc097d151d1a222d48
def test_rows_nonzero(): <NEW_LINE> <INDENT> rows_nonzero = [] <NEW_LINE> for i in range(len(placement_map)): <NEW_LINE> <INDENT> if len(set(placement_map[i])) > 1 and 0 in placement_map[i]: <NEW_LINE> <INDENT> rows_nonzero.append(i) <NEW_LINE> <DEDENT> <DEDENT> return rows_nonzero
cycles through all rows in placement_map and returns a list of all row indecies that aren't all zero values
625941bc462c4b4f79d1d5bc
def check_name_availability( self, location, capacity_parameters, **kwargs ): <NEW_LINE> <INDENT> cls = kwargs.pop('cls', None) <NEW_LINE> error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } <NEW_LINE> error_map.update(kwargs.pop('error_map', {})) <NEW_LINE> api_version = "2021-01-01" <NEW_LINE> content_type = kwargs.pop("content_type", "application/json") <NEW_LINE> accept = "application/json" <NEW_LINE> url = self.check_name_availability.metadata['url'] <NEW_LINE> path_format_arguments = { 'location': self._serialize.url("location", location, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } <NEW_LINE> url = self._client.format_url(url, **path_format_arguments) <NEW_LINE> query_parameters = {} <NEW_LINE> query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') <NEW_LINE> header_parameters = {} <NEW_LINE> header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') <NEW_LINE> header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') <NEW_LINE> body_content_kwargs = {} <NEW_LINE> body_content = self._serialize.body(capacity_parameters, 'CheckCapacityNameAvailabilityParameters') <NEW_LINE> body_content_kwargs['content'] = body_content <NEW_LINE> request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) <NEW_LINE> pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) <NEW_LINE> response = pipeline_response.http_response <NEW_LINE> if response.status_code not in [200]: <NEW_LINE> <INDENT> map_error(status_code=response.status_code, response=response, error_map=error_map) <NEW_LINE> error = self._deserialize(_models.ErrorResponse, response) <NEW_LINE> raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) <NEW_LINE> <DEDENT> deserialized = self._deserialize('CheckCapacityNameAvailabilityResult', pipeline_response) <NEW_LINE> if cls: <NEW_LINE> <INDENT> return cls(pipeline_response, deserialized, {}) <NEW_LINE> <DEDENT> return deserialized
Check the name availability in the target location. :param location: The region name which the operation will lookup into. :type location: str :param capacity_parameters: The name of the capacity. :type capacity_parameters: ~azure.mgmt.powerbidedicated.models.CheckCapacityNameAvailabilityParameters :keyword callable cls: A custom type or function that will be passed the direct response :return: CheckCapacityNameAvailabilityResult, or the result of cls(response) :rtype: ~azure.mgmt.powerbidedicated.models.CheckCapacityNameAvailabilityResult :raises: ~azure.core.exceptions.HttpResponseError
625941bc3d592f4c4ed1cf61
def set_timer(self, *args, **kwargs) -> _Timer: <NEW_LINE> <INDENT> return _Timer(self, *args, **kwargs)
timer(*args, **kwargs) -> _Timer Set a new timer registered with this event dispatcher. Arguments are passed unchanged to _Timer().
625941bd0c0af96317bb80d4
def __init__(self): <NEW_LINE> <INDENT> self.swagger_types = { 'kind': 'str', 'api_version': 'str', 'metadata': 'V1ObjectMeta', 'conditions': 'list[V1ComponentCondition]' } <NEW_LINE> self.attribute_map = { 'kind': 'kind', 'api_version': 'apiVersion', 'metadata': 'metadata', 'conditions': 'conditions' } <NEW_LINE> self._kind = None <NEW_LINE> self._api_version = None <NEW_LINE> self._metadata = None <NEW_LINE> self._conditions = None
V1ComponentStatus - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition.
625941bd1f5feb6acb0c4a40
def test_inventoryregularendpointpost(self): <NEW_LINE> <INDENT> pass
Test case for inventoryregularendpointpost Create Physical (Port) Endpoint # noqa: E501
625941bc26238365f5f0ed56
def __init__(self, gym_env, obs_type): <NEW_LINE> <INDENT> self.gym_env = gym_env <NEW_LINE> self.episode_return = None <NEW_LINE> self.episode_step = None <NEW_LINE> self.obs_type = obs_type <NEW_LINE> if obs_type == "image": <NEW_LINE> <INDENT> self.obs_shape = gym_env.observation_space.shape <NEW_LINE> <DEDENT> elif obs_type in ["VKB", "absVKB"]: <NEW_LINE> <INDENT> self.obs_shape = self.gym_env.obs_shape
:param gym_env: :param obs_type: image, KB or VKB
625941bd91f36d47f21ac3db
def map_parcel_data_to_vacant_table(merged_parcel_data): <NEW_LINE> <INDENT> logging.debug('map data to columns') <NEW_LINE> for column, source in vacant_table_fields.items(): <NEW_LINE> <INDENT> if column == source: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> elif source == 'TODO': <NEW_LINE> <INDENT> logging.debug('-- TODO implement field %s' % column) <NEW_LINE> <DEDENT> elif type(source) == str: <NEW_LINE> <INDENT> merged_parcel_data[column] = merged_parcel_data[source] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> merged_parcel_data[column] = merged_parcel_data.apply(source, axis=1)
Map aggregated parcel data to the fields used by the vacancy app. Mapping occurs in-place. Arguments: merged_parcel_data -- output of merging Prcl with other parcel related dataframes
625941bd56b00c62f0f14543
def make_abstract_dist(req): <NEW_LINE> <INDENT> if req.editable: <NEW_LINE> <INDENT> return IsSDist(req) <NEW_LINE> <DEDENT> elif req.link and req.link.is_wheel: <NEW_LINE> <INDENT> return IsWheel(req) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return IsSDist(req)
Factory to make an abstract dist object. Preconditions: Either an editable req with a source_dir, or satisfied_by or a wheel link, or a non-editable req with a source_dir. :return: A concrete DistAbstraction.
625941bd627d3e7fe0d68d3a
def get_K_in_pixel_coordinates(self, width=None, height=None): <NEW_LINE> <INDENT> w = width or self.width <NEW_LINE> h = height or self.height <NEW_LINE> f = self.focal * max(w, h) <NEW_LINE> return np.array([[f, 0, 0.5 * (w - 1)], [0, f, 0.5 * (h - 1)], [0, 0, 1.0]])
The calibration matrix that maps to pixel coordinates. Coordinates (0,0) correspond to the center of the top-left pixel, and (width - 1, height - 1) to the center of bottom-right pixel. You can optionally pass the width and height of the image, in case you are using a resized versior of the original image.
625941bdf9cc0f698b1404ea
@app.route("/name", methods=["GET"]) <NEW_LINE> def getData(): <NEW_LINE> <INDENT> data = { "name": "Yi Zhao" } <NEW_LINE> return jsonify(data)
Returns the data dictionary below to the caller as JSON
625941bd8e7ae83300e4aeb8
def __init__(self, project_id: str, service_account_key_file: Optional[str] = None) -> None: <NEW_LINE> <INDENT> credentials = cloud_auth.get_credentials(service_account_key_file) <NEW_LINE> self.client = storage.Client(project=project_id, credentials=credentials)
Initialize new instance of CloudStorageUtils. Args: project_id: GCP project id. service_account_key_file: File containing service account key. If not passed the default credential will be used. There are following ways to create service accounts - 1) Use `build_service_client` method from `cloud_auth` module. 2) Use `gcloud` command line utility as documented here - https://cloud.google.com/iam/docs/creating-managing-service-account-keys
625941bd4f6381625f11492a
def Register_GetDR2(): <NEW_LINE> <INDENT> return _x64dbgapi.Register_GetDR2()
Register_GetDR2() -> duint
625941bd507cdc57c6306bc0
def user_data(self, access_token): <NEW_LINE> <INDENT> params = {'access_token': access_token} <NEW_LINE> url = GITHUB_API_URL + '/user?' + urllib.urlencode(params) <NEW_LINE> try: <NEW_LINE> <INDENT> return simplejson.load(urllib.urlopen(url)) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> return None
Loads user data from service
625941bd0a366e3fb873e704
def format_value(self, value, format_target): <NEW_LINE> <INDENT> if self.type.type != ColumnType.count and self.type.type != ColumnType.measure: <NEW_LINE> <INDENT> return value <NEW_LINE> <DEDENT> if format_target not in POSSIBLE_FORMAT_TARGETS: <NEW_LINE> <INDENT> raise ValueError("Unknown format target") <NEW_LINE> <DEDENT> if value is None or value == "": <NEW_LINE> <INDENT> return "" <NEW_LINE> <DEDENT> if isinstance(value, str): <NEW_LINE> <INDENT> number_str = util.remove_unit(value.strip()) <NEW_LINE> number = Decimal(number_str) <NEW_LINE> <DEDENT> elif isinstance(value, Decimal): <NEW_LINE> <INDENT> number = value <NEW_LINE> number_str = util.print_decimal(number) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError("Unexpected number type " + str(type(value))) <NEW_LINE> <DEDENT> if number.is_nan(): <NEW_LINE> <INDENT> return "NaN" <NEW_LINE> <DEDENT> elif number == inf: <NEW_LINE> <INDENT> return "Inf" <NEW_LINE> <DEDENT> elif number == -inf: <NEW_LINE> <INDENT> return "-Inf" <NEW_LINE> <DEDENT> if self.scale_factor is not None: <NEW_LINE> <INDENT> number *= self.scale_factor <NEW_LINE> <DEDENT> assert number.is_finite() <NEW_LINE> if ( self.number_of_significant_digits is None and self.type.type != ColumnType.measure and format_target == "tooltip_stochastic" ): <NEW_LINE> <INDENT> return util.print_decimal(round(number, DEFAULT_TOOLTIP_PRECISION)) <NEW_LINE> <DEDENT> number_of_significant_digits = self.get_number_of_significant_digits( format_target ) <NEW_LINE> max_dec_digits = ( self.type.max_decimal_digits if self.type.type == ColumnType.measure else 0 ) <NEW_LINE> if number_of_significant_digits is not None: <NEW_LINE> <INDENT> current_significant_digits = _get_significant_digits(number_str) <NEW_LINE> return _format_number( number, current_significant_digits, number_of_significant_digits, max_dec_digits, format_target, ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return util.print_decimal(number)
Format a value nicely for human-readable output (including rounding). @param value: the value to format @param format_target the target the value should be formatted for @return: a formatted String representation of the given value.
625941bd3c8af77a43ae368a
def get(bot, module=False, default=False): <NEW_LINE> <INDENT> if module: <NEW_LINE> <INDENT> fn = mod_name % (bot, str(module)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> fn = db_name % bot <NEW_LINE> <DEDENT> module_filename = os.path.join(os.path.expanduser('~/.code'), fn) <NEW_LINE> if not os.path.exists(module_filename): <NEW_LINE> <INDENT> return default <NEW_LINE> <DEDENT> with open(module_filename, 'r') as f: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> data = json.loads(f.read()) <NEW_LINE> if 'list' in data: <NEW_LINE> <INDENT> data = data['list'] <NEW_LINE> <DEDENT> <DEDENT> except ValueError: <NEW_LINE> <INDENT> data = str(f.read()) <NEW_LINE> <DEDENT> <DEDENT> return data
get(code.default, modulename)
625941bd1d351010ab855a09
def get_buildings(self): <NEW_LINE> <INDENT> url = f'{self.API_URL}/buildings/' <NEW_LINE> headers = { 'accept': 'application/json', 'Authorization': self._token, } <NEW_LINE> try: <NEW_LINE> <INDENT> response = requests.request("GET", url, headers=headers) <NEW_LINE> if response.status_code != 200: <NEW_LINE> <INDENT> return None, [f'Expected 200 response from BETTER but got {response.status_code}: {response.content}'] <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> return None, [f'Unexpected error creating BETTER portfolio: {e}'] <NEW_LINE> <DEDENT> return response.json(), []
Get list of all buildings :return: tuple(list[dict], list[str]), list of buildings followed by list of errors
625941bd4527f215b584c347
def flush_account(self): <NEW_LINE> <INDENT> self._shutdown_connecting_dtp() <NEW_LINE> if self.data_channel is not None: <NEW_LINE> <INDENT> if not self.data_channel.transfer_in_progress(): <NEW_LINE> <INDENT> self.data_channel.close() <NEW_LINE> self.data_channel = None <NEW_LINE> <DEDENT> <DEDENT> username = self.username <NEW_LINE> if self.authenticated and username: <NEW_LINE> <INDENT> self.on_logout(username) <NEW_LINE> <DEDENT> self.authenticated = False <NEW_LINE> self.username = "" <NEW_LINE> self.password = "" <NEW_LINE> self.attempted_logins = 0 <NEW_LINE> self._current_type = 'a' <NEW_LINE> self._restart_position = 0 <NEW_LINE> self._quit_pending = False <NEW_LINE> self._in_dtp_queue = None <NEW_LINE> self._rnfr = None <NEW_LINE> self._out_dtp_queue = None
Flush account information by clearing attributes that need to be reset on a REIN or new USER command.
625941bd8da39b475bd64e5d
def perform_hit_test(bbx_start, h, w, point): <NEW_LINE> <INDENT> if (bbx_start[0] <= point[0] and bbx_start[0] + h >= point[0] and bbx_start[1] + w >= point[1] and bbx_start[1] <= point[1]): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
Check if a point is in the bounding box.
625941bdcb5e8a47e48b799a
def __init__(self, parent_item, item_list, tree_name = AUTO_TREE): <NEW_LINE> <INDENT> self.start_offset = c_int(0) <NEW_LINE> self.old_tree = c_void_p(0) <NEW_LINE> self._params = PSpush_tree_params(pointer(parent_item.pointer), None, pointer(self.start_offset), pointer(self.old_tree)) <NEW_LINE> self._pop_params = PSpop_tree_params(pointer(self.start_offset), pointer(self.old_tree)) <NEW_LINE> self._subitems = [parent_item] + item_list <NEW_LINE> self._tree_name = tree_name
@summary: A constructor. @param parent_item: The subtree's parent item. @param item_list: The subtree's children - A list of items. @param tree_name: Used by Wireshark for remembering which trees are expanded. Put AUTO_TREE for the name of parent_item. (default: AUTO_TREE)
625941bd9b70327d1c4e0cc0
@register.assignment_tag <NEW_LINE> def get_most_commented_posts(count=5): <NEW_LINE> <INDENT> return Post.published.annotate( total_comments=Count('comments') ).order_by('-total_comments')[:count]
可以用来解决标签内部无法嵌套的问题 <h3>Most commented posts</h3> {% get_most_commented_posts as most_commented_posts %} <ul> {% for post in most_commented_posts %} <li> <a href="{{ post.get_absolute_url }}">{{ post.title }}</a> </li> {% endfor %} </ul> :param count: :return:
625941bd71ff763f4b549573
def thread_to_dict(thread): <NEW_LINE> <INDENT> parsed = {'name': thread.xpath('.//a')[0].text} <NEW_LINE> parsed['url'] = thread.xpath('.//a')[0].attrib['href'] <NEW_LINE> raw_last_change = thread.xpath('.//span[@title]' )[0].attrib['title'] <NEW_LINE> last_change = date_parse(raw_last_change) <NEW_LINE> parsed['month'] = last_change.month <NEW_LINE> info = thread.xpath('.//div[contains(@style,"right")]')[0] <NEW_LINE> parsed['seen'] = int(info.xpath('.//span[@class]')[3].text.split()[0]) <NEW_LINE> parsed['posts'] = int(info.xpath('.//span[@class]')[4].text.split()[0]) <NEW_LINE> return parsed
Serialize Group thread into Python dictionary
625941bdc4546d3d9de7291e
def _build(self, recipe: Recipe, src_dir: str) -> None: <NEW_LINE> <INDENT> script = recipe.functions["build"] <NEW_LINE> if not script: <NEW_LINE> <INDENT> self.adapter.debug("Skipping build (nothing to do)") <NEW_LINE> return <NEW_LINE> <DEDENT> self.adapter.info("Building artifacts") <NEW_LINE> epoch = int(recipe.timestamp.timestamp()) <NEW_LINE> for filename in util.list_tree(src_dir): <NEW_LINE> <INDENT> os.utime(filename, (epoch, epoch)) <NEW_LINE> <DEDENT> mount_src = "/src" <NEW_LINE> repo_src = "/repo" <NEW_LINE> uid = os.getuid() <NEW_LINE> pre_script: List[str] = [] <NEW_LINE> build_deps = [] <NEW_LINE> host_deps = [] <NEW_LINE> for dep in recipe.makedepends: <NEW_LINE> <INDENT> if dep.kind == DependencyKind.Build: <NEW_LINE> <INDENT> build_deps.append(dep.package) <NEW_LINE> <DEDENT> elif dep.kind == DependencyKind.Host: <NEW_LINE> <INDENT> host_deps.append(dep.package) <NEW_LINE> <DEDENT> <DEDENT> if build_deps: <NEW_LINE> <INDENT> pre_script.extend( ( "export DEBIAN_FRONTEND=noninteractive", "apt-get update -qq", "apt-get install -qq --no-install-recommends" ' -o Dpkg::Options::="--force-confdef"' ' -o Dpkg::Options::="--force-confold"' " -- " + " ".join(build_deps), ) ) <NEW_LINE> <DEDENT> if host_deps: <NEW_LINE> <INDENT> opkg_conf_path = "$SYSROOT/etc/opkg/opkg.conf" <NEW_LINE> pre_script.extend( ( 'echo -n "dest root /', "arch all 100", "arch armv7-3.2 160", "src/gz entware https://bin.entware.net/armv7sf-k3.2", "arch rmall 200", "src/gz toltec-rmall file:///repo/rmall", f'" > "{opkg_conf_path}"', ) ) <NEW_LINE> if recipe.arch != "rmall": <NEW_LINE> <INDENT> pre_script.extend( ( f'echo -n "arch {recipe.arch} 250', f"src/gz toltec-{recipe.arch} file:///repo/{recipe.arch}", f'" >> "{opkg_conf_path}"', ) ) <NEW_LINE> <DEDENT> pre_script.extend( ( "opkg update --verbosity=0", "opkg install --verbosity=0 --no-install-recommends" " -- " + " ".join(host_deps), ) ) <NEW_LINE> <DEDENT> logs = bash.run_script_in_container( self.docker, image=self.IMAGE_PREFIX + recipe.image, mounts=[ docker.types.Mount( type="bind", source=os.path.abspath(src_dir), target=mount_src, ), docker.types.Mount( type="bind", source=os.path.abspath(self.repo_dir), target=repo_src, ), ], variables={ **recipe.variables, **recipe.custom_variables, "srcdir": mount_src, }, script="\n".join( ( *pre_script, f'cd "{mount_src}"', script, f'chown -R {uid}:{uid} "{mount_src}"', ) ), ) <NEW_LINE> self._print_logs(logs, "build()")
Build artifacts for a recipe.
625941bd82261d6c526ab388
def check_category(filterfunction): <NEW_LINE> <INDENT> has_category = [] <NEW_LINE> classes = (checks.TeeChecker, checks.UnitChecker) <NEW_LINE> for klass in classes: <NEW_LINE> <INDENT> categories = getattr(klass, 'categories', None) <NEW_LINE> has_category.append(categories is not None and filterfunction.__name__ in categories) <NEW_LINE> <DEDENT> return True in has_category
Checks whether ``filterfunction`` has defined a category or not.
625941bd10dbd63aa1bd2a93
def CutVideo(fileName, startTime, endTime): <NEW_LINE> <INDENT> fvd.GetVideoSection(fileName, startTime, endTime)
Cuts the video files
625941bd01c39578d7e74d27
def parse(self, handle): <NEW_LINE> <INDENT> alphabet = self.alphabet <NEW_LINE> records = GenBankScanner(debug=0).parse_cds_features(handle, alphabet) <NEW_LINE> return records
Start parsing the file, and return a SeqRecord generator.
625941bd29b78933be1e559d
def spectrum(H, wlist, c_ops, a_op, b_op, solver="es", use_pinv=False): <NEW_LINE> <INDENT> if debug: <NEW_LINE> <INDENT> print(inspect.stack()[0][3]) <NEW_LINE> <DEDENT> if solver == "es": <NEW_LINE> <INDENT> return _spectrum_es(H, wlist, c_ops, a_op, b_op) <NEW_LINE> <DEDENT> elif solver == "pi": <NEW_LINE> <INDENT> return _spectrum_pi(H, wlist, c_ops, a_op, b_op, use_pinv) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("Unrecognized choice of solver" + "%s (use es or pi)." % solver)
Calculate the spectrum of the correlation function :math:`\lim_{t \to \infty} \left<A(t+\tau)B(t)\right>`, i.e., the Fourier transform of the correlation function: .. math:: S(\omega) = \int_{-\infty}^{\infty} \lim_{t \to \infty} \left<A(t+\tau)B(t)\right> e^{-i\omega\tau} d\tau. using the solver indicated by the `solver` parameter. Note: this spectrum is only defined for stationary statistics (uses steady state rho0) Parameters ---------- H : :class:`qutip.qobj` system Hamiltonian. wlist : array_like list of frequencies for :math:`\omega`. c_ops : list list of collapse operators. a_op : Qobj operator A. b_op : Qobj operator B. solver : str choice of solver (`es` for exponential series and `pi` for psuedo-inverse). use_pinv : bool For use with the `pi` solver: if `True` use numpy's pinv method, otherwise use a generic solver. Returns ------- spectrum : array An array with spectrum :math:`S(\omega)` for the frequencies specified in `wlist`.
625941bdbde94217f3682ce2
def setUp(self): <NEW_LINE> <INDENT> self.app = create_app() <NEW_LINE> self.client = self.app.test_client <NEW_LINE> self.database_name = "trivia" <NEW_LINE> self.database_path = "postgres://{}/{}".format( 'zicsx:23321@localhost:5433', self.database_name) <NEW_LINE> setup_db(self.app, self.database_path) <NEW_LINE> with self.app.app_context(): <NEW_LINE> <INDENT> self.db = SQLAlchemy() <NEW_LINE> self.db.init_app(self.app) <NEW_LINE> self.db.create_all() <NEW_LINE> <DEDENT> self.new_question = { 'question': 'Who is the director of the movie Parasite (2019) ?', 'answer': 'Bong Joon-ho', 'difficulty': 1, 'category': '5' }
Define test variables and initialize app.
625941bdbf627c535bc130bb
def discover(self): <NEW_LINE> <INDENT> new = [] <NEW_LINE> for d in self.appdirs: <NEW_LINE> <INDENT> for f in os.listdir(d): <NEW_LINE> <INDENT> if f.endswith(".py") or f.endswith(".sql"): <NEW_LINE> <INDENT> name = os.path.splitext(f)[0] <NEW_LINE> path = os.path.join(d, f) <NEW_LINE> if name not in self.apps: <NEW_LINE> <INDENT> self.apps[name] = Application(name) <NEW_LINE> new.append(name) <NEW_LINE> <DEDENT> self.apps[name].link(path) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for name in new: <NEW_LINE> <INDENT> self.apps[name].init(self.db, self)
Search for new applications in the list of directories specified in the constructor
625941bddc8b845886cb5420
def values_to_json(values_env: Env.Bindings[Value.Base], namespace: str = "") -> Dict[str, Any]: <NEW_LINE> <INDENT> if namespace and not namespace.endswith("."): <NEW_LINE> <INDENT> namespace += "." <NEW_LINE> <DEDENT> ans = {} <NEW_LINE> for item in values_env: <NEW_LINE> <INDENT> v = item.value <NEW_LINE> if isinstance(v, Value.Base): <NEW_LINE> <INDENT> j = v.json <NEW_LINE> <DEDENT> elif isinstance(item.value, Tree.Decl): <NEW_LINE> <INDENT> j = str(item.value.type) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> assert isinstance(item.value, Type.Base) <NEW_LINE> j = str(item.value) <NEW_LINE> <DEDENT> ans[(namespace if not item.name.startswith("_") else "") + item.name] = j <NEW_LINE> <DEDENT> return ans
Convert a ``WDL.Env.Bindings[WDL.Value.Base]`` to a dict which ``json.dumps`` to Cromwell-style JSON. :param namespace: prefix this namespace to each key (e.g. workflow name)
625941bdd268445f265b4d5b
def test_bad_values(self): <NEW_LINE> <INDENT> good_mime = ('text', 'plain') <NEW_LINE> self.assertOK([(good_mime, hlh.ParamsCI())]) <NEW_LINE> self.assertOK([(good_mime, hlh.ParamsCI([('q', 1)]))]) <NEW_LINE> self.assertRaisesInternalError([(('text', ''), hlh.ParamsCI())]) <NEW_LINE> self.assertRaisesInternalError([(('', 'plain'), hlh.ParamsCI())]) <NEW_LINE> self.assertRaisesInternalError([(('', ''), hlh.ParamsCI())]) <NEW_LINE> self.assertRaisesInternalError([((';', ';'), hlh.ParamsCI())]) <NEW_LINE> self.assertRaisesInternalError([(('text', ';'), hlh.ParamsCI())]) <NEW_LINE> self.assertRaisesInternalError([((';', 'plain'), hlh.ParamsCI())]) <NEW_LINE> self.assertRaisesInternalError([( good_mime, hlh.ParamsCI([('', '')]))]) <NEW_LINE> self.assertRaisesInternalError([( good_mime, hlh.ParamsCI([('q', 'a')]))]) <NEW_LINE> self.assertRaisesInternalError([( good_mime, hlh.ParamsCI([('q', '5')]))]) <NEW_LINE> self.assertRaisesInternalError([(good_mime,)]) <NEW_LINE> self.assertRaisesInternalError([good_mime])
Should not allow bad values
625941bd24f1403a92600a56
def loadXML(xml_string, normalize = True): <NEW_LINE> <INDENT> if normalize: <NEW_LINE> <INDENT> xml_string = xml_string.replace("\n"," ").replace(" ","") <NEW_LINE> <DEDENT> parser = ET.XMLParser(encoding = 'utf-8') <NEW_LINE> return ET.fromstring(xml_string, parser = parser)
Load XML from string
625941bd71ff763f4b549574
def pc_noutput_items(self): <NEW_LINE> <INDENT> return _blocks_swig0.file_meta_source_sptr_pc_noutput_items(self)
pc_noutput_items(file_meta_source_sptr self) -> float
625941bdd58c6744b4257b4d
def set_settlement_origin(self, settlement_origin_prefix, random_string=True): <NEW_LINE> <INDENT> is_set = None <NEW_LINE> try: <NEW_LINE> <INDENT> self.logger.info('Start: set settlement origin') <NEW_LINE> self._business_admin_page.set_settlement_origin(settlement_origin_prefix, random_string) <NEW_LINE> is_set = True <NEW_LINE> <DEDENT> except WebDriverException as exp: <NEW_LINE> <INDENT> is_set = False <NEW_LINE> self.logger.error(exp.msg) <NEW_LINE> raise <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> self.logger.info('End: set settlement origin') <NEW_LINE> return is_set
Returning set settlement origin Implementing logging for set settlement origin functionality :param settlement_origin_prefix: :param random_string: :return:
625941bdcc0a2c11143dcd7d