content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def random_train_test_split(df, train_frac, random_seed=None): """ This function randomizes the dta based on the seed and then splits the dataframe into train and test sets which are changed to their list of vector representations. Args: df (Dataframe): The dataframe which is to be used to generate the train and test split. train_frac (int): The percentage in the range 0.0 to 1 that should be used for training. random_seed (int, optional): The seed for randomising. Defaults to None which means a seed is chosen at random. Returns: tuple: The list of lists representing the vectors in the train and test data frame respectively, """ if random_seed is not None: df = df.sample(frac=1, random_state=random_seed) else: df = df.sample(frac=1) split_point = int(len(df.index) * train_frac) train = to_vector(df[:split_point]) test = to_vector(df[split_point:]) return train, test
c3c399792bdc1026d74f1ccc7241bdb327f307d3
3,649,700
def calc_XY_pixelpositions(calibration_parameters, DATA_Q, nspots, UBmatrix=None, B0matrix=IDENTITYMATRIX, offset=0, pureRotation=0, labXMAS=0, verbose=0, pixelsize=0.079, dim=(2048, 2048), kf_direction="Z>0"): """ must: len(varying_parameter_values)=len(varying_parameter_indices) DATA_Q: array of all 3 elements miller indices nspots: indices of selected spots of DATA_Q UBmatrix: WARNING: All miller indices must be entered in DATA_Q, selection is done in xy_from_Quat returns: """ # selecting nspots of DATA_Q # print "DATA_Q in calc_XY_pixelpositions", DATA_Q # print "nspots", nspots # print "len(DATA_Q)", len(DATA_Q) DATAQ = np.take(DATA_Q, nspots, axis=0) trQ = np.transpose(DATAQ) # np.array(Hs, Ks,Ls) for further computations # print "DATAQ in xy_from_Quat", DATAQ if UBmatrix is not None: R = UBmatrix # q = UB * B0 * Q trQ = np.dot(np.dot(R, B0matrix), trQ) # results are qx,qy,qz else: print("I DON'T LIKE INITROT == None") print("this must mean that INITROT = Identity ?...") Qrot = trQ # lattice rotation due to quaternion Qrotn = np.sqrt(np.sum(Qrot ** 2, axis=0)) # norms of Q vectors twthe, chi = F2TC.from_qunit_to_twchi(Qrot / Qrotn, labXMAS=labXMAS) # print "twthe, chi", twthe, chi if verbose: print("tDATA_Q", np.transpose(DATA_Q)) print("Qrot", Qrot) print("Qrotn", Qrotn) print("Qrot/Qrotn", Qrot / Qrotn) print("twthe,chi", twthe, chi) X, Y, theta = F2TC.calc_xycam_from2thetachi( twthe, chi, calibration_parameters, offset=offset, verbose=0, pixelsize=pixelsize, kf_direction=kf_direction) return X, Y, theta, R
243bdb74da8aa429a0748d6d15b6b9d1f20814f3
3,649,701
def load_csv(path): """ Function for importing data from csv. Function uses weka implementation of CSVLoader. :param path: input file :return: weka arff data """ args, _sufix = csv_loader_parser() loader = Loader(classname='weka.core.converters.CSVLoader', options=args_to_weka_options(args, _sufix)) return loader.load_file(path)
d7555cbe5e54543ca8f66e5f70d4f20b7b72b549
3,649,702
import json def from_stream(stream, storage, form): """Reverses to_stream, returning data""" if storage == "pure-plain": assert isinstance(stream, str) if isinstance(stream, str): txt = stream else: assert not stream.startswith(MAGIC_SEAMLESS) assert not stream.startswith(MAGIC_NUMPY) txt = stream.decode("utf-8") result = json.loads(txt) return result elif storage == "pure-binary": b = BytesIO(stream) arr0 = np.load(b, allow_pickle=False) if arr0.ndim == 0 and arr0.dtype.char != "S": arr = np.frombuffer(arr0,arr0.dtype) return arr[0] else: return arr0 assert stream.startswith(MAGIC_SEAMLESS) l = len(MAGIC_SEAMLESS) s1 = stream[l:l+8] s2 = stream[l+8:l+16] len_jsons = np.frombuffer(s1, dtype=np.uint64).tolist()[0] buffersize = np.frombuffer(s2, dtype=np.uint64).tolist()[0] assert len(stream) == l + 16 + len_jsons + buffersize bytes_jsons = stream[l+16:l+16+len_jsons] jsons = json.loads(bytes_jsons.decode("utf-8")) bytebuffer = stream[l+16+len_jsons:] buffer = np.frombuffer(bytebuffer,dtype=np.uint8) data = _from_stream( None, storage, form, jsons, buffer ) return data
ee8c657162947354b3533b4fe607a11a8a6457ec
3,649,703
import os def volume100(): """Function for setting volume.""" os.system('vol 100') return render_template('fmberryremote.html', GENRES=GENRES, choosenStation=STATION)
ff68129b8564480d5ed604fe59f3bd7350da359d
3,649,704
def UniformExploration(j, state): """Fake player j that always targets all arms.""" return list(np.arange(state.K))
146b84ff0d9e28a8316e871b94e5bb82d67de997
3,649,705
def deduction_limits(data): """ Apply limits on itemized deductions """ # Split charitable contributions into cash and non-cash using ratio in PUF cash = 0.82013 non_cash = 1. - cash data['e19800'] = data['CHARITABLE'] * cash data['e20100'] = data['CHARITABLE'] * non_cash # Apply student loan interest deduction limit data['e03210'] = np.where(data.SLINT > 2500, 2500, data.SLINT) # Apply IRA contribution limit deductable_ira = np.where(data.AGE >= 50, np.where(data.ADJIRA > 6500, 6500, data.ADJIRA), np.where(data.ADJIRA > 5500, 5500, data.ADJIRA)) data['e03150'] = deductable_ira return data
ef1a6464e4bb0832a9398ad01e878c8aa4e6a620
3,649,706
import argparse def get_args() -> argparse.Namespace: """Setup the argument parser Returns: argparse.Namespace: """ parser = argparse.ArgumentParser( description='A template for python projects.', add_help=False) # Required Args required_args = parser.add_argument_group('Required Arguments') config_file_params = { 'type': argparse.FileType('r'), 'required': True, 'help': "The configuration yml file" } required_args.add_argument('-c', '--config-file', **config_file_params) required_args.add_argument('-l', '--log', required=True, help="Name of the output log file") # Optional args optional_args = parser.add_argument_group('Optional Arguments') optional_args.add_argument('-m', '--run-mode', choices=['run_mode_1', 'run_mode_2', 'run_mode_3'], default='run_mode_1', help='Description of the run modes') optional_args.add_argument('-d', '--debug', action='store_true', help='Enables the debug log messages') optional_args.add_argument("-h", "--help", action="help", help="Show this help message and exit") return parser.parse_args()
f5e213bf6e52ca5528262a2d2c285fe41dd0f269
3,649,707
def select_interacting(num_mtx, bin_mtx, labels): """ Auxiliary function for fit_msa_mdels. Used for fitting the models in hard EM; selects observations with a hidden variable value of 1. """ if labels is None: # This is the case when initializing the models return num_mtx, bin_mtx, labels else: # This is the case inside the EM loop labels = np.asarray(labels) idxs = np.where(np.asarray(labels) == 1)[0] int_num = np.take(num_mtx, idxs, axis=0) int_bin = np.take(bin_mtx, idxs, axis=0) weights = np.take(labels, idxs) return int_num, int_bin, weights
4c4f6fd7b44d4c388f7ce9ba30e91886548c85ee
3,649,708
def _GenDiscoveryDoc(service_class_names, doc_format, output_path, hostname=None, application_path=None): """Write discovery documents generated from a cloud service to file. Args: service_class_names: A list of fully qualified ProtoRPC service names. doc_format: The requested format for the discovery doc. (rest|rpc) output_path: The directory to output the discovery docs to. hostname: A string hostname which will be used as the default version hostname. If no hostname is specificied in the @endpoints.api decorator, this value is the fallback. Defaults to None. application_path: A string containing the path to the AppEngine app. Raises: ServerRequestException: If fetching the generated discovery doc fails. Returns: A list of discovery doc filenames. """ output_files = [] service_configs = GenApiConfig(service_class_names, hostname=hostname, application_path=application_path) for api_name_version, config in service_configs.iteritems(): discovery_doc = _FetchDiscoveryDoc(config, doc_format) discovery_name = api_name_version + '.discovery' output_files.append(_WriteFile(output_path, discovery_name, discovery_doc)) return output_files
a135c3805ee5b81e2f0f505b1710531e3db7d1f1
3,649,709
import dateutil.parser def nitestr(nite,sep=''): """ Convert an ephem.Date object to a nite string. Parameters: ----------- nite : ephem.Date object sep : Output separator Returns: -------- nitestr : String representation of the nite """ if isinstance(nite,basestring): nite = dateutil.parser.parse(nite) nite = ephem.Date(nite) strtuple = nite.tuple()[:3] nitestr = '{:4d}{sep}{:02d}{sep}{:02d}' nitestr = nitestr.format(*strtuple,sep=sep) return nitestr
493f34ad484bbd350254c45e38c41a9559a2ce14
3,649,710
import os def register(): """Register user""" form = RegistrationForm() if form.validate_on_submit(): # only allow 1 user on locally hosted version if len(User.query.all()) == 0: # add user to database hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8') user = User(username=form.username.data, password=hashed_password) db.session.add(user) db.session.commit() # create user directory user_dir = os.path.join(OUTPUT_DIR, user.username) if not os.path.exists(user_dir): os.makedirs(user_dir) # create user src directory src_dir = os.path.join(user_dir, 'src') if not os.path.exists(src_dir): os.makedirs(src_dir) # create user exfiltrated files directory files_dir = os.path.join(user_dir, 'files') if not os.path.exists(files_dir): os.makedirs(files_dir) # initialize c2 session storage server.c2.sessions[user.username] = {} # notify user and redirect to login flash("You have successfully registered!", 'info') logout_user() return redirect(url_for('users.login')) else: flash("User already exists on this server.", 'danger') return render_template("register.html", form=form, title="Register")
453f4b718801de99bd468a52693350646b246738
3,649,711
def dump_tuple(tup): """ Dump a tuple to a string of fg,bg,attr (optional) """ return ','.join(str(i) for i in tup)
ffa4838e2794da9d525b60f4606633f8940480bb
3,649,712
def get_single_endpoint(name): """ TODO - Add docstring """ class EndpointWithID(Resource): def get(self, pid): # TODO - Add return pass # TODO - Add `get.__doc__` EndpointWithID.__name__ = name return EndpointWithID
923c470ed9f96478abe62823358f2f62f2506a82
3,649,713
from operator import ne def generate_fermi_question(cfg, logratio, filter_single_number_lhs=True): """ Generates one Fermi question. Args: cfg: Expression config logratio: Log ratio standard deviation (for RHS) filter_single_number_lhs: Whether to exclude lhs of a single numerical term round_bound: For numbers greater than this, we express in standard form and also make sure the rhs is rounded to 3 sig. figures """ done = False rhs_limit = 10**15 while not done: lhs = generate_expression(cfg) L = ne.evaluate(lhs['numerical']) if L > rhs_limit: continue if filter_single_number_lhs: if len(lhs['quantity_ids']) == 0 and lhs['length'] <= 1: continue # Always sample the rhs from a # lognormal with a larger variance the larger the number is if L == 0: R = 0 while R == 0: # Loop until we get an R != L R = int(np.random.normal(0, 1)) else: R = L while R == L: # Loop until we hit an R != L # Now we set the variance of the log RHS so that it # grows as the quantity gets bigger sd = 0.1 + log10(abs(L)) * 0.065 + log10(abs(L))**2 * 0.0042 R_raw = sample_lognormal(L, sd) # Then round to 3 sf if R_raw != 0: R = int(round(R_raw, -int(floor(log10(abs(R_raw)))) + 2)) else: R = 0 assert R != L try: R = ne.evaluate(str(R)) done = True except: pass question = lhs['expression'] + ' < ' + "{:,}".format(int(R)) numerical = lhs['numerical'] + ' < ' + str(R) fermi_question = FermiQuestion(lhs['length'], question, numerical, lhs['estimation_difficulty'], lhs['quantity_ids'], lhs['categories'], lhs['quantity_strings']) return fermi_question
47ebeaa4389f7371fb56687788a696aa7e03426e
3,649,714
def build_dataset(cfg, default_args=None): """Build a dataset from config dict. Args: cfg (dict): Config dict. It should at least contain the key "type". default_args (dict | None, optional): Default initialization arguments. Default: None. Returns: Dataset: The constructed dataset. """ dataset = build_from_cfg(cfg, DATASETS, default_args) return dataset
12b7d9121b9395668b8d260790b980260e1e4ee5
3,649,715
def Packet_genReadUserTag(errorDetectionMode, buffer, size): """Packet_genReadUserTag(vn::protocol::uart::ErrorDetectionMode errorDetectionMode, char * buffer, size_t size) -> size_t""" return _libvncxx.Packet_genReadUserTag(errorDetectionMode, buffer, size)
111c391ee3bbef36e1d42666d608c72fb3e6c3cd
3,649,716
def prefetch(tensor_dict, capacity): """Creates a prefetch queue for tensors. Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a dequeue op that evaluates to a tensor_dict. This function is useful in prefetching preprocessed tensors so that the data is readily available for consumers. Example input pipeline when you don't need batching: ---------------------------------------------------- key, string_tensor = slim.parallel_reader.parallel_read(...) tensor_dict = decoder.decode(string_tensor) tensor_dict = preprocessor.preprocess(tensor_dict, ...) prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20) tensor_dict = prefetch_queue.dequeue() outputs = Model(tensor_dict) ... ---------------------------------------------------- For input pipelines with batching, refer to core/batcher.py Args: tensor_dict: a dictionary of tensors to prefetch. capacity: the size of the prefetch queue. Returns: a FIFO prefetcher queue """ names = tensor_dict.keys() dtypes = [t.dtype for t in tensor_dict.values()] shapes = [t.get_shape() for t in tensor_dict.values()] prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes, shapes=shapes, names=names, name='prefetch_queue') enqueue_op = prefetch_queue.enqueue(tensor_dict) tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner( prefetch_queue, [enqueue_op])) tf.summary.scalar('queue/%s/fraction_of_%d_full' % (prefetch_queue.name, capacity), tf.to_float(prefetch_queue.size()) * (1. / capacity)) return prefetch_queue
44320c6ea24d42b9add4bd3a3677b157ffcd24b8
3,649,717
def get_total_shares(): """ Returns a list of total shares (all, attending, in person, represented) for all voting principles. """ total_shares = { 'heads': [0, 0, 0, 0] # [all, attending, in person, represented] } principle_ids = VotingPrinciple.objects.values_list('id', flat=True) for principle_id in principle_ids: total_shares[principle_id] = [Decimal(0), Decimal(0), Decimal(0), Decimal(0)] # Query delegates. delegates = User.objects.filter(groups=2).select_related('votingproxy', 'keypad').prefetch_related('shares') shares_exists = VotingShare.objects.exists() for delegate in delegates: # Exclude delegates without shares -- who may only serve as proxies. if shares_exists and delegate.shares.count() == 0: continue total_shares['heads'][0] += 1 # Find the authorized voter. auth_voter = find_authorized_voter(delegate) # If auth_voter is delegate himself set index to 2 (in person) else 3 (represented). i = 2 if auth_voter == delegate else 3 attending = auth_voter is not None and auth_voter.is_present if config['voting_enable_votecollector']: attending = attending and hasattr(auth_voter, 'keypad') if attending: total_shares['heads'][i] += 1 # Add shares to total. for share in delegate.shares.all(): total_shares[share.principle_id][0] += share.shares if attending: total_shares[share.principle_id][i] += share.shares for k in total_shares.keys(): total_shares[k][1] = total_shares[k][2] + total_shares[k][3] return total_shares
c70ca3be6e0b7b9df03257b81f5abec343efa37e
3,649,718
def gen_check_box_idx(): """ Generate a list containing the coordinate of three finder patterns in QR-code Args: None Returns: idx_check_box: a list containing the coordinate each pixel of the three finder patterns """ idx_check_box = [] for i in range(7): idx_check_box.append((0, i)) idx_check_box.append((6, i)) idx_check_box.append((30, i)) idx_check_box.append((36, i)) idx_check_box.append((0, 30+i)) idx_check_box.append((6, 30+i)) for i in range(1, 6): idx_check_box.append((i, 0)) idx_check_box.append((i, 6)) idx_check_box.append((i, 30)) idx_check_box.append((i, 36)) idx_check_box.append((30+i, 0)) idx_check_box.append((30+i, 6)) for i in range(3): for j in range(3): idx_check_box.append((2+i, 2+j)) idx_check_box.append((32+i, 2+j)) idx_check_box.append((2+i, 32+j)) return idx_check_box
e26d9c5a3b093b52f54eb2c65b844215c40ddab8
3,649,719
import pandas def preprocess_mc_parameters(n_rv, dict_safir_file_param, index_column='index'): """ NAME: preprocess_mc_parameters AUTHOR: Ian Fu DATE: 18 Oct 2018 DESCRIPTION: Takes a dictionary object with each item represents a safir input variable, distributed or static, distributed input parameter must be a dictionary object describing a distribution (see usage). PARAMETERS: :param n_rv: int, number of random samples for distributed parameters :param dict_safir_in_param: dict, safir input (problem definition) file parameterised variable names :param index_column: str, the returned DataFrame object :return df_params: row equal to n_rv with columns the items in dict_safir_in_param USAGE: """ # declare containers dict_result_params_static = dict() # container for storing static parameters dict_result_params_dist = dict() # container for storing distributed random parameters # populate static parameters and extract for key_, each_param in dict_safir_file_param.items(): if isinstance(each_param, dict): dict_result_params_dist[key_] = each_param else: if isinstance(each_param, list): if len(each_param) == n_rv: dict_result_params_dist[key_] = each_param else: dict_result_params_static[key_] = [each_param] * n_rv # make distribution random parameters dict_result_params_dist = preprocess_safir_mc_parameters(n_rv, dict_result_params_dist) # merge random distributed and static parameters dict_result_params = {**dict_result_params_static, **dict_result_params_dist} # make pandas.Dataframe if index_column not in dict_result_params: dict_result_params[index_column] = list(range(n_rv)) pf_params = pandas.DataFrame(dict_result_params) pf_params.set_index(index_column, inplace=True) return pf_params
28d04122234572b57d978fc9e993707cea45a00d
3,649,720
def AdjustColour(color, percent, alpha=wx.ALPHA_OPAQUE): """ Brighten/Darken input colour by percent and adjust alpha channel if needed. Returns the modified color. @param color: color object to adjust @param percent: percent to adjust +(brighten) or -(darken) @keyword alpha: amount to adjust alpha channel """ radj, gadj, badj = [ int(val * (abs(percent) / 100.0)) for val in color.Get() ] if percent < 0: radj, gadj, badj = [ val * -1 for val in [radj, gadj, badj] ] else: radj, gadj, badj = [ val or 255 for val in [radj, gadj, badj] ] red = min(color.Red() + radj, 255) green = min(color.Green() + gadj, 255) blue = min(color.Blue() + badj, 255) return wx.Colour(red, green, blue, alpha)
76ca657e632467c5db730161a34f19633add06f4
3,649,721
def getdates(startdate, utc_to_local, enddate=None): """ Generate '~astropy.tot_time.Time' objects corresponding to 16:00:00 local tot_time on evenings of first and last nights of scheduling period. Parameters ---------- startdate : str or None Start date (eg. 'YYYY-MM-DD'). If None, defaults to current date. enddate : str or None End date (eg. 'YYYY-MM-DD'). If None, defaults to day after start date. utc_to_local : '~astropy.unit' hours Time difference between utc and local tot_time. Returns ------- start : '~astropy.tot_time.core.Time' UTC corresponding to 16:00 local tot_time on first night end : '~astropy.tot_time.core.Time' UTC corresponding to 16:00 local tot_time on last night """ if startdate is None: current_utc = Time.now() start = Time(str((current_utc + utc_to_local).iso)[0:10] + ' 16:00:00.00') - utc_to_local else: try: start = Time(startdate + ' 16:00:00.00') - utc_to_local except ValueError as e: print(e) raise ValueError('\"{}\" not a valid date. Expected string of the form \'YYYY-MM-DD\''.format(startdate)) if enddate is None: # default number of observation nights is 1 return start, None else: try: end = Time(enddate + ' 16:00:00.00') - utc_to_local diff = int((end - start).value) # difference between startdate and enddate if diff <= 0: raise ValueError('End date \"{}\" occurs before or on start date.'.format(enddate)) except ValueError as e: print(e) raise ValueError('\"{}\" not a valid date. ' 'Must be after start date and of the form \'YYYY-MM-DD\''.format(enddate)) start.format = 'jd' end.format = 'jd' return start, end
1bee7b83b2b4ce3f3347544441762287a3ff1c83
3,649,722
import re def alerts_matcher(base_name, pattern, alerter, second_order_resolution_hours): """ Get a list of all the metrics that would match an ALERTS pattern :param base_name: The metric name :param pattern: the ALERT pattern :param alerter: the alerter name e.g. smtp, syslog, hipchat, pagerdaty :param second_order_resolution_hours: (optional) The number of hours that Mirage should surface the metric timeseries for :type base_name: str :type pattern: str :type alerter: str :type second_order_resolution_hours: int :return: matched_by :rtype: str ('metric3', 'hipchat', 600), # Log all anomalies to syslog ('stats.', 'syslog', 1), # Wildcard namespaces can be used as well ('metric4.thing.*.requests', 'stmp', 900), # However beware of wildcards as the above wildcard should really be ('metric4.thing\..*.\.requests', 'stmp', 900), .. todo: This fully """ logger.info('matching metric to ALERTS pattern :: %s - %s' % (base_name, pattern)) # alert = ('stats_counts\..*', 'smtp', 3600, 168) # alert = ('.*\.mysql\..*', 'smtp', 7200, 168) alert = (pattern, alerter, second_order_resolution_hours) ALERT_MATCH_PATTERN = alert[0] new_base_name = base_name.replace('metrics.', '', 1) METRIC_PATTERN = new_base_name pattern_match = False matched_by = 'not matched' try: alert_match_pattern = re.compile(ALERT_MATCH_PATTERN) pattern_match = alert_match_pattern.match(METRIC_PATTERN) if pattern_match: matched_by = 'regex' # pattern_match = True print('matched_by %s' % matched_by) except: pattern_match = False print('not matched by regex') if not pattern_match: print('not matched by regex') if alert[0] in base_name: pattern_match = True matched_by = 'substring' print('%s' % matched_by) else: print('not matched in substring') if not pattern_match: print('not matched by %s in %s' % (base_name, alert[0])) if base_name == alert[0]: pattern_match = True matched_by = 'absolute_match' print('%s' % matched_by) else: print('not matched in substring') return matched_by
adc880f1bfc65ed189cc834a7be8502bca588b6b
3,649,723
def get_dataframe_tail(n): """ Returns last n rows of the DataFrame""" return dataset.tail(n)
03a01a9535da25d30c394a8339ebbd5bd0a80b03
3,649,724
import json def json_formatter(result, verbose=False, indent=4, offset=0): """Format result as json.""" string = json.dumps(result, indent=indent) string = string.replace("\n", "\n" + " "*offset) return string
512847722fa36eff408ac28d6e3dc8fde5c52af1
3,649,725
import types def simple_http_get(url, port=80, headers=None): """Simple interface to make an HTTP GET request Return the entire request (line,headers,body) as raw bytes """ client_socket = create_async_client_socket((url, port)) calling_session = Reactor.get_instance().get_current_session() @types.coroutine def send_request(request_bytes): def send_request_inner(session: Session): try: # TODO - maybe we can't send the entire request at once. # there might be a reason why both .send and .sendall exist result = session.socket.sendall(request_bytes) except BrokenPipeError as err: session.close() return # The result is None...whatever! Reactor.get_instance().make_progress(session, result, IOIntention.read) none = yield send_request_inner return none @types.coroutine def receive_response(none): def receive_response_inner(session: Session): # TODO - so... can we just "read a line"? # isn't the line MAYBE chunked, and we have to yield control, # and wait for the socket to be readable again? # Weird stuff happening here! # Some sites send me a '\n' first # Well I guess I should skip that result_part = session.file.readline() result_buffer = result_part while not result_part: result_part = session.file.readline() result_buffer += result_part Reactor.get_instance().make_progress(session, result_buffer, IOIntention.none) res = yield receive_response_inner return res async def make_http_request(s: Session): # TODO - make the request using the proper path, not just / raw_request = ( b"GET / HTTP/1.1\r\n" b"User-Agent: guy-creating-http-server-sorry-for-spam\r\n" b"Reach-me-at: vlad.george.ardelean@gmail.com\r\n" b"\r\n" ) none = await send_request(raw_request) response = await receive_response(none) # see the `response` here? We're basically throwing that to line marked `3mfn5gwf`, # as the result. The generator for the original session which wanted to make an HTTP # call paused on line marked `3mfn5gwf`. We then set a callback on another socket on # line marked `b4g9a`. The callback is this function. When this function completes # and we have our result, we basically restart the previous generator with that result Reactor.get_instance().make_progress(calling_session, response, IOIntention.none) Reactor.get_instance().add_client_socket_and_callback(client_socket, make_http_request) # line:b4g9a # `yield` works very well, BUT, I think that's just by accident. # Sure, the `make_http_request` function will trigger progress, and make line 3mfn5gwf get # the result and continue, but what if the socket in the initial session triggers first? # ...then, the reactor will call the `yield`ed value, and this would blow up, because # that result is None. That is why we still return a noop, which doesn't do any progress # but is required for respecting the expectations of the reactor. result = yield noop # line: 3mfn5gwf return result
87a2115d0134ae70625abe74a4bbd78e56610604
3,649,726
def _gumbel_softmax_sample(logits, temp=1, eps=1e-20): """ Draw a sample from the Gumbel-Softmax distribution based on https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb (MIT license) """ dims = logits.dim() gumbel_noise = _sample_gumbel(logits.size(), eps=eps, out=logits.data.new()) y = logits + Variable(gumbel_noise) return F.softmax(y / temp, dims - 1)
a61eac3861cdca17c1d2856c83bc70e03168bc45
3,649,727
from re import VERBOSE def interpolate_points(variable, variable_name, old_r, old_theta, new_r, new_theta): """Interpolate the old grid onto the new grid.""" grid = griddata( (old_r, old_theta), variable, (new_r, new_theta), method=INTERPOLATION_LEVEL, fill_value=-1 ) n_error = 0 for i, element in enumerate(grid): if element == -1: n_error += 1 grid[i] = griddata( (old_r, old_theta), variable, (new_r[i], new_theta[i]), method="nearest" ) if VERBOSE: print(f"{variable_name} interpolation problem for at r = {new_r[i]} theta = {np.rad2deg(new_theta[i])}") if n_error: print(f"There were {n_error} interpolation errors for {variable_name}") return grid
2a830e7cd04d5d0832d35a25bc58041ba192709b
3,649,728
def attitude(request): """ View configuration for discussion step, where we will ask the user for her attitude towards a statement. Route: /discuss/{slug}/attitude/{position_id} :param request: request of the web server :return: dictionary """ LOG.debug("View attitude: %s", request.matchdict) emit_participation(request) db_statement = request.validated['statement'] db_issue = request.validated['issue'] db_user = request.validated['user'] history_handler.save_and_set_cookie(request, db_user, db_issue) session_history = request.validated.get('session_history') prepared_discussion = discussion.attitude(db_issue, db_user, db_statement, session_history, request.path) modify_discussion_url(prepared_discussion) modify_discussion_bubbles(prepared_discussion, request.registry) rdict = prepare_request_dict(request) append_extras_dict(prepared_discussion, rdict, request.authenticated_userid, False) return prepared_discussion
4870e19941c990350991e1540c8df1760953e455
3,649,729
def currentProgram(): """currentProgram page.""" return render_template( "currentProgram-index.j2.html", title="currentProgram", subtitle="Demonstration of Flask blueprints in action.", template="currentProgram-template", currentProgram=getCurrentProgr(), timeStarted=timeStarted, )
f5c914560d3c1791e34749321b78951313d5f058
3,649,730
def checkIsMember(request): """ 사업자번호를 조회하여 연동회원 가입여부를 확인합니다. - https://docs.popbill.com/statement/python/api#CheckIsMember """ try: # 조회할 사업자등록번호, '-' 제외 10자리 targetCorpNum = "1234567890" response = statementService.checkIsMember(targetCorpNum) return render(request, 'response.html', {'code': response.code, 'message': response.message}) except PopbillException as PE: return render(request, 'exception.html', {'code': PE.code, 'message': PE.message})
861d4cc83102e036bb795bf8eafee2f7593925a4
3,649,731
from typing import List from typing import Optional from typing import Type def f1_score(y_true: List[List[str]], y_pred: List[List[str]], *, average: Optional[str] = 'micro', suffix: bool = False, mode: Optional[str] = None, sample_weight: Optional[List[int]] = None, zero_division: str = 'warn', scheme: Optional[Type[Token]] = None, partial_match: bool = False): """Compute the F1 score. The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0. The relative contribution of precision and recall to the F1 score are equal. The formula for the F1 score is:: F1 = 2 * (precision * recall) / (precision + recall) Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a tagger. average : string, [None, 'micro' (default), 'macro', 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. sample_weight : array-like of shape (n_samples,), default=None Sample weights. zero_division : "warn", 0 or 1, default="warn" Sets the value to return when there is a zero division: - recall: when there are no positive labels - precision: when there are no positive predictions - f-score: both If set to "warn", this acts as 0, but warnings are also raised. mode : str, [None (default), `strict`]. if ``None``, the score is compatible with conlleval.pl. Otherwise, the score is calculated strictly. scheme : Token, [IOB2, IOE2, IOBES] suffix : bool, False by default. partial_match : bool, False by default. Returns: score : float or array of float, shape = [n_unique_labels]. Example: >>> from seqeval.metrics import f1_score >>> y_true = [['O', 'O', 'B-MISC', 'I-MISC', 'B-MISC', 'O', 'O'], ['B-PER', 'I-PER', 'O']] >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'B-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> f1_score(y_true, y_pred, average='micro') 0.6666666666666666 >>> f1_score(y_true, y_pred, average='macro') 0.75 >>> f1_score(y_true, y_pred, average='weighted') 0.6666666666666666 >>> f1_score(y_true, y_pred, average=None) array([0.5, 1. ]) """ if mode == 'strict' and scheme: _, _, f, _ = precision_recall_fscore_support_v1(y_true, y_pred, average=average, warn_for=('f-score',), beta=1, sample_weight=sample_weight, zero_division=zero_division, scheme=scheme, suffix=suffix ) else: _, _, f, _ = precision_recall_fscore_support(y_true, y_pred, average=average, warn_for=('f-score',), beta=1, sample_weight=sample_weight, zero_division=zero_division, suffix=suffix, partial_match=partial_match) return f
1354e306847af1decf59ae638a1cdecd265e569a
3,649,732
from typing import Any from typing import Counter def calc_proportion_identical(lst: Any) -> float: """ Returns a value between 0 and 1 for the uniformity of the values in LST, i.e. higher if they're all the same. """ def count_most_common(lst): """ Find the most common item in LST, and count how many times it occurs. """ # Counter(['a', 'b', 'a']).most_common(2) -> [ # ('a', 2), # ('b', 1), # ] # so this gives the count of the most common (in this case 2 occurrences of 'a') return Counter(lst).most_common(1)[0][1] most_common = count_most_common(lst) if most_common == 1: return 0 else: return most_common / len(lst)
adf467eba11694c5ea4583d7b53029110e59e25a
3,649,733
def _rolling_mad(arr, window): """Rolling window MAD outlier detection on 1d array.""" outliers = [] for i in range(window, len(arr)): cur = arr[(i - window) : i] med, cur_mad = _mad(cur) cur_out = cur > (med + cur_mad * 3) idx = list(np.arange((i - window), i)[cur_out]) outliers += idx outliers = list(set(outliers)) # turn index into boolean bool_outliers = np.zeros(arr.shape[0], dtype=bool) bool_outliers[outliers] = True return bool_outliers
3f28dde448b3c567a92fd22e499f812cd748a507
3,649,734
def compute_mean_and_cov(embeds, labels): """Computes class-specific means and shared covariance matrix of given embedding. The computation follows Eq (1) in [1]. Args: embeds: An np.array of size [n_train_sample, n_dim], where n_train_sample is the sample size of training set, n_dim is the dimension of the embedding. labels: An np.array of size [n_train_sample, ] Returns: mean_list: A list of len n_class, and the i-th element is an np.array of size [n_dim, ] corresponding to the mean of the fitted Guassian distribution for the i-th class. cov: The shared covariance mmatrix of the size [n_dim, n_dim]. """ n_dim = embeds.shape[1] n_class = int(np.max(labels)) + 1 mean_list = [] cov = np.zeros((n_dim, n_dim)) for class_id in range(n_class): data = embeds[labels == class_id] data_mean = np.mean(data, axis=0) cov += np.dot((data - data_mean).T, (data - data_mean)) mean_list.append(data_mean) cov = cov / len(labels) return mean_list, cov
b6d5624b0cea9f6162ffad819d4d5917391ac73e
3,649,735
def wcenergy(seq: str, temperature: float, negate: bool = False) -> float: """Return the wc energy of seq binding to its complement.""" loop_energies = calculate_loop_energies_dict(temperature, negate) return sum(loop_energies[seq[i:i + 2]] for i in range(len(seq) - 1))
90eae3d85e90019571e4f5d674fb93d58c1d7287
3,649,736
import os def getDirectoriesInDir(directory): """ Returns all the directories in the specified directory. """ directories = {} for d in os.listdir(directory): path = os.path.join(directory, d) if os.path.isdir(path): directories[d] = path return directories
8d78571d0ebc4fba58abf98354a7bd2bea018e60
3,649,737
def upload_csv(): """ Upload csv file """ upload_csv_form = UploadCSVForm() if upload_csv_form.validate_on_submit(): file = upload_csv_form.csv.data ClassCheck.process_csv_file(file) flash('CSV file uploaded!', 'success') return redirect('/')
620cc3b4e11c71fe9dedd24631f641304313150f
3,649,738
async def clean(request: Request) -> RedirectResponse: """Access this view (GET "/clean") to remove all session contents.""" request.session.clear() return RedirectResponse("/")
3ef0d9298fcd7879becc8ae246656a62086f639a
3,649,739
def svd(A): """ Singular Value Decomposition Parameters ---------- A: af.Array A 2 dimensional arrayfire array. Returns ------- (U,S,Vt): tuple of af.Arrays - U - A unitary matrix - S - An array containing the elements of diagonal matrix - Vt - A unitary matrix Note ---- - The original matrix `A` is preserved and additional storage space is required for decomposition. - If the original matrix `A` need not be preserved, use `svd_inplace` instead. - The original matrix `A` can be reconstructed using the outputs in the following manner. >>> Smat = af.diag(S, 0, False) >>> A_recon = af.matmul(af.matmul(U, Smat), Vt) """ U = Array() S = Array() Vt = Array() safe_call(backend.get().af_svd(c_pointer(U.arr), c_pointer(S.arr), c_pointer(Vt.arr), A.arr)) return U, S, Vt
7b7d48dc1782d1e02eca01b657895372170caf6c
3,649,740
def parse_content_type_header(value): """ maintype "/" subtype *( ";" parameter ) The maintype and substype are tokens. Theoretically they could be checked against the official IANA list + x-token, but we don't do that. """ ctype = ContentType() recover = False if not value: ctype.defects.append(errors.HeaderMissingRequiredValue( "Missing content type specification")) return ctype try: token, value = get_token(value) except errors.HeaderParseError: ctype.defects.append(errors.InvalidHeaderDefect( "Expected content maintype but found {!r}".format(value))) _find_mime_parameters(ctype, value) return ctype ctype.append(token) # XXX: If we really want to follow the formal grammar we should make # mantype and subtype specialized TokenLists here. Probably not worth it. if not value or value[0] != '/': ctype.defects.append(errors.InvalidHeaderDefect( "Invalid content type")) if value: _find_mime_parameters(ctype, value) return ctype ctype.maintype = token.value.strip().lower() ctype.append(ValueTerminal('/', 'content-type-separator')) value = value[1:] try: token, value = get_token(value) except errors.HeaderParseError: ctype.defects.append(errors.InvalidHeaderDefect( "Expected content subtype but found {!r}".format(value))) _find_mime_parameters(ctype, value) return ctype ctype.append(token) ctype.subtype = token.value.strip().lower() if not value: return ctype if value[0] != ';': ctype.defects.append(errors.InvalidHeaderDefect( "Only parameters are valid after content type, but " "found {!r}".format(value))) # The RFC requires that a syntactically invalid content-type be treated # as text/plain. Perhaps we should postel this, but we should probably # only do that if we were checking the subtype value against IANA. del ctype.maintype, ctype.subtype _find_mime_parameters(ctype, value) return ctype ctype.append(ValueTerminal(';', 'parameter-separator')) ctype.append(parse_mime_parameters(value[1:])) return ctype
24722c1dd5784896fd6aa8b39cd29eb76fec155a
3,649,741
import numpy def csr_matrix_multiply(S, x): # noqa """Multiplies a :class:`scipy.sparse.csr_matrix` S by an object-array vector x. """ h, w = S.shape result = numpy.empty_like(x) for i in range(h): result[i] = sum(S.data[idx]*x[S.indices[idx]] # noqa pylint:disable=unsupported-assignment-operation for idx in range(S.indptr[i], S.indptr[i+1])) return result
77e1630cbdd59f53b1b2885b731e73a14fb18b35
3,649,742
def calculate_sem_IoU(pred_np, seg_np, num_classes): """Calculate the Intersection Over Union of the predicted classes and the ground truth Args: pred_np (array_like): List of predicted class labels seg_np (array_like): List of ground truth labels num_classes (int): Number of classes in the dataset """ I_all = np.zeros(num_classes) U_all = np.zeros(num_classes) for sem_idx in range(len(seg_np)): for sem in range(num_classes): I = np.sum(np.logical_and(pred_np[sem_idx] == sem, seg_np[sem_idx] == sem)) U = np.sum(np.logical_or(pred_np[sem_idx] == sem, seg_np[sem_idx] == sem)) I_all[sem] += I U_all[sem] += U return I_all / U_all
e8e360cb8aad0f2226aa54c88c01485840017f2d
3,649,743
def _invert(M, eps): """ Invert matrices, with special fast handling of the 1x1 and 2x2 cases. Will generate errors if the matrices are singular: user must handle this through his own regularization schemes. Parameters ---------- M: np.ndarray [shape=(..., nb_channels, nb_channels)] matrices to invert: must be square along the last two dimensions eps: [scalar] regularization parameter to use _only in the case of matrices bigger than 2x2 Returns ------- invM: np.ndarray, [shape=M.shape] inverses of M """ nb_channels = M.shape[-1] if nb_channels == 1: # scalar case invM = 1.0/(M+eps) elif nb_channels == 2: # two channels case: analytical expression det = ( M[..., 0, 0]*M[..., 1, 1] - M[..., 0, 1]*M[..., 1, 0]) invDet = 1.0/(det) invM = np.empty_like(M) invM[..., 0, 0] = invDet*M[..., 1, 1] invM[..., 1, 0] = -invDet*M[..., 1, 0] invM[..., 0, 1] = -invDet*M[..., 0, 1] invM[..., 1, 1] = invDet*M[..., 0, 0] else: # general case : no use of analytical expression (slow!) invM = np.linalg.pinv(M, eps) return invM
119c16ad816dd37b7e5eb23c121ef5affc8851f5
3,649,744
from pathlib import Path def read_densecsv_to_anndata(ds_file: Path): """Reads a dense text file in csv format into the AnnData format.""" return read_densemat_to_anndata(ds_file, sep=",")
cee99509b6744972ad7a9530d66b59c06f7deec5
3,649,745
def get_original_date_jpeg(filepath): """ returns the DateTimeOriginal/DateTimeDigitized exif data from the given jpeg file """ try: with Image.open(filepath) as image: # NOTE: using old "private" method because new public method # doesn't include this tag. It does include 306 "DateTime" # though, but "DateTime" might differ from "DateTimeOriginal" # pylint: disable-next=protected-access date_created = image._getexif().get(TAG_DATETIME_ORIGINAL) if not date_created: date_created = image._getexif().get(TAG_DATETIME_DIGITIZED) if date_created: # pylint: disable-next=protected-access date_created += "." + image._getexif().get( TAG_SUBSECTIME_ORIGINAL, "" ).zfill(3) except (UnidentifiedImageError, AttributeError): logger.debug("unable to parse '%s'", filepath) return None if date_created: date_created = parse_jpeg_date(date_created) return date_created
e3c1c8f12bff2506f5c1eed3dbe0cc9c2c45d736
3,649,746
def _singleton(name): """Returns a singleton object which represents itself as `name` when printed, but is only comparable (via identity) to itself.""" return type(name, (), {'__repr__': lambda self: name})()
b07003e1716115864bf1914d4b523b36d0f0471f
3,649,747
def get_zone(*, zone_name: str): """ Get zone with given zone name. Args: zone_name: zone name, e.g. "haomingyin.com" Returns: json: zone details """ params = dict(name=zone_name) zones = _get("zones", params=params) if not zones: raise CloudflareAPIError(f"Unable to fetch zone {zone_name}") return zones[0]
5e79ec900af7e5cc4d457d04292e55e2e3abc9ec
3,649,748
from rdkit.Chem import AllChem import os def load_bbbp_dataset(data_path, task_names=None, featurizer=None): """Load bbbp dataset ,process the classification labels and the input information. Description: The data file contains a csv table, in which columns below are used: Num:number name:Name of the compound smiles:SMILES representation of the molecular structure p_np:Binary labels for penetration/non-penetration Args: data_path(str): the path to the cached npz path. task_names(list): a list of header names to specify the columns to fetch from the csv file. featurizer(pahelix.featurizers.Featurizer): the featurizer to use for processing the data. If not none, The ``Featurizer.gen_features`` will be applied to the raw data. Returns: an InMemoryDataset instance. Example: .. code-block:: python dataset = load_bbbp_dataset('./bbbp/raw') print(len(dataset)) References: [1] Martins, Ines Filipa, et al. “A Bayesian approach to in silico blood-brain barrier penetration modeling.” Journal of chemical information and modeling 52.6 (2012): 1686-1697. """ if task_names is None: task_names = get_default_bbbp_task_names() csv_file = os.listdir(data_path)[0] input_df = pd.read_csv(join(data_path, csv_file), sep=',') smiles_list = input_df['smiles'] rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list] preprocessed_rdkit_mol_objs_list = [m if not m is None else None for m in rdkit_mol_objs_list] smiles_list = [AllChem.MolToSmiles(m) if not m is None else None for m in preprocessed_rdkit_mol_objs_list] labels = input_df[task_names] # convert 0 to -1 labels = labels.replace(0, -1) # there are no nans data_list = [] for i in range(len(smiles_list)): if smiles_list[i] is None: continue raw_data = {} raw_data['smiles'] = smiles_list[i] raw_data['label'] = labels.values[i] if not featurizer is None: data = featurizer.gen_features(raw_data) else: data = raw_data if not data is None: data_list.append(data) dataset = InMemoryDataset(data_list) return dataset
589fb4e9d6796a841264744e2ecc024e60a310f3
3,649,749
def execute(connection, cmdline, **kwargs): """generic function to execute command for device | Parameters: | connection (Adaptor): connection of device | cmdline (str): command line | kwargs (dict): additional keyword arguments for command line execution | Returns: | str: output of command line """ result = Dgs.execute_cmdline(connection, cmdline, **kwargs) return result
7f3424cb8a747fab87a5a67c880ec755d9c9cb96
3,649,750
def json_to_numpy_mask(shapes, width, height): """Converts JSON labels with pixel classifications into NumPy arrays""" img = Image.new("L", (width, height), 0) for shape in shapes: if shape["label"] == "barrel": barrel_lst = [tuple(i) for i in shape["points"]] ImageDraw.Draw(img).polygon(barrel_lst, outline=1, fill=1) if shape["label"] == "line": line_lst = [tuple(i) for i in shape["points"]] ImageDraw.Draw(img).polygon(line_lst, outline=2, fill=2) mask = np.array(img) return mask
33757246478d854d15f71a0737174ac6952514ef
3,649,751
import types import typing def _format_call(value: ast3.Call, context: types.Context) -> typing.Text: """Format a function call like 'print(a*b, foo=x)'""" try: return _format_call_horizontal(value, context) except errors.NotPossible: return _format_call_vertical(value, context)
2019f50943bb597948248dfda9ce8620d3286377
3,649,752
def get_tags(repo_dir): """ _get_tags_ returns a list of tags for the given repo, ordered as newest first """ repo = git.Repo(repo_dir) tags_with_date = { tag.name: tag.commit.committed_date for tag in repo.tags } return sorted(tags_with_date, key=tags_with_date.get, reverse=True)
aa5462ff0b15501cf486a2bf49837f0dd60ecfaf
3,649,753
def readh5(filename, GroupName=None): """ Read the HDF5 file 'filename' into a class. Groups within the hdf5 file are by default loaded as sub classes, unless they include a _read_as attribute (see sharpy.postproc.savedata). In this case, group can be loaded as classes, dictionaries, lists or tuples. filename: string to file location GroupName = string or list of strings. Default is None: if given, allows reading a specific group h5 file. Warning: Groups that need to be read as lists and tuples are assumed to conform to the format used in sharpy.postproc.savedata """ Hinst = ReadInto() ### read and scan file hdfile = h5.File(filename, 'r') NamesList = [] # dataset names hdfile.visit(NamesList.append) ### Identify higher level groups / attributes if GroupName is None: MainLev = [] for name in NamesList: if '/' not in name: MainLev.append(name) else: if type(GroupName) is list: MainLev = GroupName else: MainLev = [GroupName] ### Loop through higher level for name in MainLev: # sub-group if type(hdfile[name]) is h5._hl.group.Group: Ginst = read_group(hdfile[name]) try: Ginst.name = name except: pass setattr(Hinst, name, Ginst) else: setattr(Hinst, name, hdfile[name][()]) # close and return hdfile.close() return Hinst
9ab33071916a634da6ddc68df56fe29429ef6313
3,649,754
def calc_E_E_hs_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_b2_d_t, W_dash_ba1_d_t, theta_ex_d_Ave_d, L_dashdash_ba2_d_t): """1時間当たりの給湯機の消費電力量 (kWh/h) (1) Args: W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯負荷 (MJ/h) W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/h) W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/h) W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/h) W_dash_b2_d_t(ndarray): 1時間当たりの自動湯はり時における太陽熱補正給湯負荷 (MJ/h) W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h) theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃) L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h) Returns: ndarray: 1日当たりの給湯機の消費電力量 (kWh/d) """ # 給湯機の待機時及び水栓給湯時の補機による消費電力 (2) E_E_hs_aux1_d_t = get_E_E_hs_aux1_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t, theta_ex_d_Ave_d) # 給湯機の湯はり時の補機による消費電力量 (3) E_E_hs_aux2_d_t = get_E_E_hs_aux2_d_t(W_dash_b2_d_t) # 給湯機の保温時の補機による消費電力量 (4) E_E_hs_aux3_d_t = calc_E_E_hs_aux3_d_t(L_dashdash_ba2_d_t) print('E_E_hs_aux1 = {}'.format(np.sum(E_E_hs_aux1_d_t))) print('E_E_hs_aux2 = {}'.format(np.sum(E_E_hs_aux2_d_t))) print('E_E_hs_aux3 = {}'.format(np.sum(E_E_hs_aux3_d_t))) return E_E_hs_aux1_d_t + E_E_hs_aux2_d_t + E_E_hs_aux3_d_t
d76a65d4d30b0c2cf59b837e188473827425d576
3,649,755
def best_promo(order): """ 选择可用的最佳折扣 """ return max(promo(order) for promo in promos)
db4001b4e04a167171da02da92e4234489bf13a5
3,649,756
def random_joint_positions(robot): """ Generates random joint positions within joint limits for the given robot. @type robot: orpy.Robot @param robot: The OpenRAVE robot @rtype: np.array @return: """ # Get the limits of the active DOFs lower, upper = robot.GetActiveDOFLimits() positions = lower + np.random.rand(len(lower))*(upper-lower) return positions
49fe770a8cc22945e79c892d54754c50f19974e8
3,649,757
def test_cancel_examples(example): """ We can't specify examples in test_fuzz_cancel (because we use data, see https://hypothesis.readthedocs.io/en/latest/data.html#interactive-draw), so we have this here for explicit examples. """ stream_req, stream_resp, draws = example def draw(lst): if draws: this_draw = draws.pop(0) for name, evt in lst: if name == this_draw: return name, evt raise AssertionError( f"{this_draw} not in list: {[name for name, _ in lst]}" ) else: return lst[0] _test_cancel(stream_req, stream_resp, draw)
c3a3a970a77f136c39e86666c0485163d0fbb408
3,649,758
import pickle def fetch_pickle(filename): """ Fetches any variable saved into a picklefile with the given filename. Parameters: filename (str): filename of the pickle file Returns: variable (any pickle compatible type): variable that was saved into the picklefile. """ with open(filename, 'rb') as picklefile: variable = pickle.load(picklefile) return variable
172c18520619d102b520658949d2464d5ecfb05c
3,649,759
def check_clockwise(poly): """Checks if a sequence of (x,y) polygon vertice pairs is ordered clockwise or not. NOTE: Counter-clockwise (=FALSE) vertice order reserved for inner ring polygons""" clockwise = False if (sum(x0*y1 - x1*y0 for ((x0, y0), (x1, y1)) in zip(poly, poly[1:] + [poly[0]]))) < 0: clockwise = not clockwise return clockwise
5e9f8fba6cd11e33dfe60a89e62eeac2ac24c805
3,649,760
def bookList(request): """测试""" # 查询书籍信息:使用默认的管理器对象 : 在管理器上调用过滤器方法会返回查询集 # book_list = BookInfo.objects.all() # 查询书籍信息:使用自定义的管理器对象 # book_list = BookInfo.books.all() # 以下代码演示,自定义管理器的类给模型类新增初始化方法: 类比books.all() # book1 = BookInfo.books.create_model('zxc') # book2 = BookInfo.books.create_model('zxj') # book_list = [book1,book2] # 以下代码演示,限制查询集:limit 0,2 # book_list = BookInfo.books.all()[:2] # 以下代码演示基础条件查询 : filter(模型属性__条件运算符=值) # 1.查询id为1的书籍 : exact 判断相等,可以省虐,直接等号, pk 等价于 主键 # book_list = BookInfo.books.filter(id=1) # 2.查询书名包含‘湖’的书籍 : contains :包含,类似于 like # book_list = BookInfo.books.filter(name__contains='湖') # 3.查询书名以‘部’结尾的书籍:endswith :以什么什么结尾;startswith以什么什么开头 # book_list = BookInfo.books.filter(name__endswith='部') # 4.查询书名不为空的书籍 : isnull : 判断是否为空,False表示不为空,两个否定表示肯定 "容易懵逼" # book_list = BookInfo.books.filter(name__isnull=False) # 5.查询编号为2或4的书籍 in : 表示只能在指定的元素中选择,不表示区间 "容易懵逼" # book_list = BookInfo.books.filter(id__in=[2,4]) # 6.查询编号大于2的书籍 gt 大于, gte 大于等于, lt 小于, lte 小于等于 # book_list = BookInfo.books.filter(id__gt=2) # 7.查询id不等于3的书籍:exclude 查询满足条件以外的数据 # book_list = BookInfo.books.exclude(id=3) # 8.查询1980年发表的书籍 # book_list = BookInfo.books.filter(pub_date__year='1980') # 9.查询1990年1月1日后发表的书籍 # book_list = BookInfo.books.filter(pub_date__gt='1990-1-1') # from datetime import date # book_list = BookInfo.books.filter(pub_date__gt=date(1990,1,1)) # 以下代码,演示F对象和Q对象查询 : F('模型属性') Q(属性名__条件运算符=值) | Q(属性名__条件运算符=值) # 1.查询阅读量大于评论量的书籍 # book_list = BookInfo.books.filter(readcount__gt=F('commentcount')) # 2.查询阅读量大于2倍评论量的书籍 : F()支持计算 # book_list = BookInfo.books.filter(readcount__gt=F('commentcount') * 2) # 1.查询阅读量大于20,或编号小于3的图书 # book_list = BookInfo.books.filter(Q(readcount__gt=20) | Q(id__lt=3)) # 2.查询编号不等于3的书籍 ~Q() book_list = BookInfo.books.filter(~Q(id=3)) # 以下代码演示聚合过滤器aggregate();该过滤器可以调用出聚合函数的 Avg(), Sum(), max(), min(), count() # 需求:计算阅读量的总数 aggregate() 返回单个字典对象 {'readcount__sum': 134} total_count = BookInfo.books.aggregate(Sum('readcount')) # 以下代码演示基础关联查询 # 1.查询编号为1的图书中所有人物信息 : 一查多 : peopleinfo_set book1 = BookInfo.books.get(id=1) people_list1 = book1.peopleinfo_set.all() # 2.查询编号为1的英雄出自的书籍 : 多查一 : people1.book : 调用关联的外键属性即可 people1 = PeopleInfo.objects.get(id=1) book2 = people1.book # 以下代码演示内连接 : filter(关联的模型类小写__属性名__条件运算符=值) # 1.查询书名为"天龙八部"的所有人物信息 : 一查多 : 内连接需要使用外键作为关联的模型类 people_list2 = PeopleInfo.objects.filter(book__name='天龙八部') # 2.查询书籍中人物的描述包含"降龙"的书籍 : 多查一 book_list2 = BookInfo.books.filter(peopleinfo__description__contains='降龙') # 构造上下文 context = { 'book_list':book_list, 'total_count':total_count, 'people_list1':people_list1, 'book2':book2, 'people_list2':people_list2, 'book_list2':book_list2 } return render(request, 'Book/booklist.html', context)
b9b05f259d5cdb9d0570268c0f08eaafc8ba6cc1
3,649,761
def format_stats(stats): """Format statistics for printing to a table""" result = '' for key, value in stats.items(): result += f'{key} - {value}\n' return result[:-1]
2d01b6c48b83f8e8810f4609183b39fad871f942
3,649,762
def imcrop(img, bboxes, scale=1.0, pad_fill=None): """Crop image patches. 3 steps: scale the bboxes -> clip bboxes -> crop and pad. Args: img (ndarray): Image to be cropped. bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes. scale (float, optional): Scale ratio of bboxes, the default value 1.0 means no padding. pad_fill (Number | list[Number]): Value to be filled for padding. Default: None, which means no padding. Returns: list[ndarray] | ndarray: The cropped image patches. """ chn = 1 if img.ndim == 2 else img.shape[2] if pad_fill is not None: if isinstance(pad_fill, (int, float)): pad_fill = [pad_fill for _ in range(chn)] assert len(pad_fill) == chn _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32) clipped_bbox = bbox_clip(scaled_bboxes, img.shape) patches = [] for i in range(clipped_bbox.shape[0]): x1, y1, x2, y2 = tuple(clipped_bbox[i, :]) if pad_fill is None: patch = img[y1:y2 + 1, x1:x2 + 1, ...] else: _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :]) if chn == 1: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1) else: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn) patch = np.array( pad_fill, dtype=img.dtype) * np.ones( patch_shape, dtype=img.dtype) x_start = 0 if _x1 >= 0 else -_x1 y_start = 0 if _y1 >= 0 else -_y1 w = x2 - x1 + 1 h = y2 - y1 + 1 patch[y_start:y_start + h, x_start:x_start + w, ...] = img[y1:y1 + h, x1:x1 + w, ...] patches.append(patch) if bboxes.ndim == 1: return patches[0] else: return patches
244d6c39410c5d51780a8d3a261810986c17d779
3,649,763
def timestamp2str(ts): """ Converts Timestamp object to str containing date and time """ date = ts.date().strftime("%Y-%m-%d") time = ts.time().strftime("%H:%M:%S") return ' '.join([date, time])
0e847a8af0cbbacf18df911e3070ac7c70e504b7
3,649,764
from operator import index def define_class_functions(processes, stages, progress): """ Define and return class of unit tests for stand-alone functions for the given configuration. """ class Test_functions(TestCase): def test_mapreduce(self): logger = log() if progress else None result = mr4mp.mapreduce( index, merge, range(50), processes=processes, stages=stages, progress=logger ) self.assertEqual(result, result_reference) if progress: self.assertEqual( logger.to_list(), list(range(50)) if stages is not None else [] ) def test_mapconcat(self): logger = log() if progress else None result = mr4mp.mapconcat( add_one, range(0, 100), processes=processes, stages=stages, progress=logger ) self.assertEqual(list(result), list(range(1, 101))) if progress: self.assertEqual( logger.to_list(), list(range(100)) if stages is not None else [] ) return Test_functions
0dc8df39e49f1e7591be7a7b8e80dc1266714cc4
3,649,765
def concept(*reference): """Reference to a semantic concept. Parameters ---------- *reference : :obj:`str` Keys pointing to the ruleset defining this concept in the rules file of an ontology. Returns ------- :obj:`CubeProxy` A textual reference to the concept that can be solved by the query processor. Examples -------- >>> sq.concept("entity", "water") { "type": "concept", "reference": [ "entity", "water" ] } """ obj = {"type": "concept", "reference": reference} return CubeProxy(obj)
c3e01f48ca962c5312a0cf8d6deb66eecc062078
3,649,766
import torch def collate_tensors(batch, stack_tensors=torch.stack): """ Collate a list of type ``k`` (dict, namedtuple, list, etc.) with tensors. Inspired by: https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31 Args: batch (list of k): List of rows of type ``k``. stack_tensors (callable): Function to stack tensors into a batch. Returns: k: Collated batch of type ``k``. Example use case: This is useful with ``torch.utils.data.dataloader.DataLoader`` which requires a collate function. Typically, when collating sequences you'd set ``collate_fn=partial(collate_tensors, stack_tensors=encoders.text.stack_and_pad_tensors)``. Example: >>> import torch >>> batch = [ ... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) }, ... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) }, ... ] >>> collated = collate_tensors(batch) >>> {k: t.size() for (k, t) in collated.items()} {'column_a': torch.Size([2, 5]), 'column_b': torch.Size([2, 5])} """ if all([torch.is_tensor(b) for b in batch]): return stack_tensors(batch) if (all([isinstance(b, dict) for b in batch]) and all([b.keys() == batch[0].keys() for b in batch])): return {key: collate_tensors([d[key] for d in batch], stack_tensors) for key in batch[0]} elif all([is_namedtuple(b) for b in batch]): # Handle ``namedtuple`` return batch[0].__class__(**collate_tensors([b._asdict() for b in batch], stack_tensors)) elif all([isinstance(b, list) for b in batch]): # Handle list of lists such each list has some column to be batched, similar to: # [['a', 'b'], ['a', 'b']] → [['a', 'a'], ['b', 'b']] transposed = zip(*batch) return [collate_tensors(samples, stack_tensors) for samples in transposed] else: return batch
cbd1098188e3d47b705e25edeae636624ebbec47
3,649,767
def build_boundaries_layers(cyt_coord, nuc_coord, rna_coord): """ Parameters ---------- cyt_coord : np.ndarray, np.int64 Array of cytoplasm boundaries coordinates with shape (nb_points, 2). nuc_coord : np.ndarray, np.int64 Array of nucleus boundaries coordinates with shape (nb_points, 2). rna_coord : np.ndarray, np.int64 Array of mRNAs coordinates with shape (nb_points, 2) or (nb_points, 3). Returns ------- cyt_boundaries : np.ndarray, np.float32 A 2-d binary tensor with shape (y, x) showing cytoplasm boundaries. border. nuc_boundaries : np.ndarray, np.float32 A 2-d binary tensor with shape (y, x) showing nucleus boundaries. rna_layer : np.ndarray, np.float32 Binary image of mRNAs localizations with shape (y, x). """ # check parameters stack.check_array(cyt_coord, ndim=2, dtype=[np.int64]) if nuc_coord is not None: stack.check_array(nuc_coord, ndim=2, dtype=[np.int64]) if rna_coord is not None: stack.check_array(rna_coord, ndim=2, dtype=[np.int64]) # build surface binary matrices from coordinates cyt_surface, nuc_surface, rna_layer, _ = stack.from_coord_to_surface( cyt_coord=cyt_coord, nuc_coord=nuc_coord, rna_coord=rna_coord) # from surface binary matrices to boundaries binary matrices cyt_boundaries = stack.from_surface_to_boundaries(cyt_surface) nuc_boundaries = stack.from_surface_to_boundaries(nuc_surface) # cast layer in float32 cyt_boundaries = stack.cast_img_float32(cyt_boundaries) nuc_boundaries = stack.cast_img_float32(nuc_boundaries) rna_layer = stack.cast_img_float32(rna_layer) return cyt_boundaries, nuc_boundaries, rna_layer
a99efab6ccc3044c04df330ca9c3ce0ebbf0c413
3,649,768
def predicted_actual_chart(actual, predicted, title="Predicted vs Actual Values"): """Predicted vs actual values curve.""" source = pd.DataFrame({"x": actual, "y": predicted}) scatter = scatter_chart(source, "Actual", "Residual", title=title) vmin = source.min().min() vmax = source.max().max() _df = pd.DataFrame({"x": [vmin, vmax], "y": [vmin, vmax]}) baseline = alt.Chart(_df).mark_line(strokeDash=[20, 5], color="black").encode( alt.X("x"), alt.Y("y"), ) return scatter + baseline
91588a9d79bfa8eaea39067042b7e4b3c6784b7e
3,649,769
def swapSeries(keypoints_array,v,c,pers1,pers2,start,end): """helper function for swapping sections of time series. This is useful because openpose isn't consistent in labelling people so we need to rearrange things. Args: keypoints_array: all the data. v: which video? - specifies first dimension of array c: which camera? specifies second dimension of array pers1: which people to swap 1 pers2: which people to swap 2 start: where in time series do we start? (TODO can be blank - start at beginning) end: where in time series do we end? (TODO can be blank - to end) Returns: a rearranged keypoints_array """ temp = np.copy(keypoints_array[v,c,start:end,pers1,:]) #temporary copy pers1 keypoints_array[v,c,start:end,pers1,:] = keypoints_array[v,c,start:end,pers2,:] #pers2 to pers 1 keypoints_array[v,c,start:end,pers2,:] = temp return keypoints_array
52cb0a81bfac6706f5c20e04f23867c7850bd2e7
3,649,770
def load_vgg(sess, vgg_path): """ Load Pretrained VGG Model into TensorFlow. :param sess: TensorFlow Session :param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb" :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out) """ # TODO: Implement function # Use tf.saved_model.loader.load to load the model and weights vgg_tag = 'vgg16' vgg_input_tensor_name = 'image_input:0' vgg_keep_prob_tensor_name = 'keep_prob:0' vgg_layer3_out_tensor_name = 'layer3_out:0' vgg_layer4_out_tensor_name = 'layer4_out:0' vgg_layer7_out_tensor_name = 'layer7_out:0' tf.saved_model.loader.load(sess, [vgg_tag], vgg_path) graph = tf.get_default_graph() input_tensor = graph.get_tensor_by_name(vgg_input_tensor_name) keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name) layer3 = graph.get_tensor_by_name(vgg_layer3_out_tensor_name) layer4 = graph.get_tensor_by_name(vgg_layer4_out_tensor_name) layer7 = graph.get_tensor_by_name(vgg_layer7_out_tensor_name) return input_tensor, keep_prob, layer3, layer4, layer7
5b4c0c0ae82e92450113da6c2c01e5467d903a28
3,649,771
from operator import mul from operator import inv def interpolate(R1,R2,u): """Interpolate linearly between the two rotations R1 and R2. """ R = mul(inv(R1),R2) m = moment(R) angle = vectorops.norm(m) if angle==0: return R1 axis = vectorops.div(m,angle) return mul(R1,rotation(axis,angle*u))
d4aaa976e52b6f44f44c4f26eccb59f1b85f9f0b
3,649,772
def plot_spikes( spikes: dict, ax: plt.Axes = None, markersize: int = None, color: tp.Union[str, tp.Any] = "k", ) -> plt.Axes: """Plot Spikes returned by NeuroDriver's OutputRecorder""" if ax is None: fig = plt.gcf() ax = fig.add_subplot() for n, (name, ss) in enumerate(spikes.items()): if "data" not in ss: raise EOSPlotterException(f"'data' field missing for node {name}") if "data" in ss and "time" in ss: raise EOSPlotterException( f"Data for node {name} is not compatible with required format, " "data mush only have 'data' field. Did you mean " "to call plot_data?" ) if len(ss["data"]) > 0: ax.plot( ss["data"], np.full(len(ss["data"]), n), "|", c=color, markersize=markersize, ) return ax
d757c9c342e34e45820ee81f45e0bc59b8cbc277
3,649,773
def boardToString(board): """ return a string representation of the current board. """ # global board # b = board rg = range(board.size()) s = "┌────┬────┬────┬────┐\n|"+"|\n╞════╪════╪════╪════╡\n|".join( ['|'.join([getCellStr(board, x, y) for x in rg]) for y in rg]) s = "\n" + s + "|\n└────┴────┴────┴────┘" return s
2ea53d0ce7448ab0073176195195f1c4fb028a71
3,649,774
def create_data_ops(batch_size, num_elements_min_max): """Returns graphs containg the inputs and targets for classification. Refer to create_data_dicts_tf and create_linked_list_target for more details. Args: batch_size: batch size for the `input_graphs`. num_elements_min_max: a 2-`tuple` of `int`s which define the [lower, upper) range of the number of elements per list. Returns: inputs_op: a `graphs.GraphsTuple` which contains the input list as a graph. targets_op: a `graphs.GraphsTuple` which contains the target as a graph. sort_indices_op: a `graphs.GraphsTuple` which contains the sort indices of the list elements a graph. ranks_op: a `graphs.GraphsTuple` which contains the ranks of the list elements as a graph. data_dicts_to_graphs_tuple: Creates a `graphs.GraphsTuple` containing tensors from data dicts. """ inputs_op, sort_indices_op, ranks_op = create_graph_dicts_tf( batch_size, num_elements_min_max) # show["inputs_graphs"] = inputs_op # show["sort_indices_graphs"] = sort_indices_op # show["ranks_graphs"] = ranks_op inputs_op = utils_tf.data_dicts_to_graphs_tuple(inputs_op) sort_indices_op = utils_tf.data_dicts_to_graphs_tuple(sort_indices_op) ranks_op = utils_tf.data_dicts_to_graphs_tuple(ranks_op) inputs_op = utils_tf.fully_connect_graph_dynamic(inputs_op) # Adds edges to a graph by fully-connecting the nodes. sort_indices_op = utils_tf.fully_connect_graph_dynamic(sort_indices_op) ranks_op = utils_tf.fully_connect_graph_dynamic(ranks_op) targets_op = create_linked_list_target(batch_size, sort_indices_op) nodes = tf.concat((targets_op.nodes, 1.0 - targets_op.nodes), axis=1) edges = tf.concat((targets_op.edges, 1.0 - targets_op.edges), axis=1) targets_op = targets_op._replace(nodes=nodes, edges=edges) return inputs_op, targets_op, sort_indices_op, ranks_op
fd38b1a7d0d8e9e4633fa6fcefc5b1c1614c97fc
3,649,775
import os def compute_file_path(data_path, path, command): """Return the computed file path for mocked data Keyword arguments: data_path -- the path of the folder that contains the subbed data path -- the URL path command -- the HTTP verb """ return os.path.realpath( os.path.join( data_path, f'{path[1:]}.{command}.json' ) )
8260a67edb5fca16e4b9004e8596cc080c98ff19
3,649,776
def location_matches(stmt): """Return a matches_key which takes geo-location into account.""" if isinstance(stmt, Event): context_key = get_location(stmt) matches_key = str((stmt.concept.matches_key(), context_key)) elif isinstance(stmt, Influence): subj_context_key = get_location(stmt.subj) obj_context_key = get_location(stmt.obj) matches_key = str((stmt.matches_key(), subj_context_key, obj_context_key)) else: matches_key = stmt.matches_key() return matches_key
be261d2dcf7be09330542a4cd2c18b3261ef0eca
3,649,777
def _submit_to_measurement_sets_api(measurement_set, patch_update): """Send the submission object to the appropriate API endpoint.""" # TODO: Add a separate method to validate submission without sending it. # Attempt to find existing measurement sets if any exist. try: matching_submission = get_existing_submissions(measurement_set) measurement_set_id = get_measurement_set_id_from_submission(matching_submission) except(NoMatchingSubmissionsException, NoMatchingMeasurementSetsException): # If no measurement sets exist, we can safely POST. response = _post_to_measurement_sets_api(measurement_set) else: # If a measurement set does exist, we use the existing id to PUT or PATCH. if patch_update: response = _patch_to_measurement_sets_api(measurement_set, measurement_set_id) else: response = _put_to_measurement_sets_api(measurement_set, measurement_set_id) _handle_http_error(response, message='submit_to_measurement_sets_api') return response
5f0ae3a764f37ad8050b92bfc30abe338b7edd91
3,649,778
def parse_files(files, options): """Build datastructures from lines""" lines = [] for line in finput(files, openhook=compr): if (type(line) is bytes): line = line.decode('utf-8') lines.append(line.rstrip().split("|")) db = {} db['rp'], db['users'], db['msgprof'], db['logins'] = {}, {}, {}, 0 # Audit log format we're trying to parse below: # datetime|req_bind|req_id|rp|msg_profile|idp|resp_bind|resp_id|user|authn_mech|attribs|name_id|assert_id|ip for event in lines: try: rp, msg_profile, user = list(event[i] for i in [3, 4, 8]) except ValueError: print(linesep.join([ "ERROR: Unsupported log file format or compressed logs with Python < 2.5", "See the documentation."])) term(-1) if msg_profile.lower().find("sso") > -1: db['logins'] += 1 # we almost always need to count rps: if len(rp) > 0: if rp in db['rp']: db['rp'][rp] += 1 else: db['rp'][rp] = 1 # only count users if asked to if len(user) > 0: if options.uniqusers or options.xml or options.rrd or options.json: if user in db['users']: db['users'][user] += 1 else: db['users'][user] = 1 # only count message profiles and rps if asked to if options.msgprofiles: if msg_profile in db['msgprof']: if rp in db['msgprof'][msg_profile]: db['msgprof'][msg_profile][rp] += 1 else: db['msgprof'][msg_profile][rp] = 1 else: db['msgprof'][msg_profile] = {} db['msgprof'][msg_profile][rp] = 1 return db
926f805d87ead9af1099f39bfb57be0b4b775e0a
3,649,779
def resize_preserving_order(nparray: np.ndarray, length: int) -> np.ndarray: """Extends/truncates nparray so that ``len(result) == length``. The elements of nparray are duplicated to achieve the desired length (favours earlier elements). Constructs a zeroes array of length if nparray is empty. See Also -------- resize_array : cycles elements instead of favouring earlier ones make_even : similar earlier-favouring behaviour for balancing 2 iterables Examples -------- Normal usage:: resize_preserving_order(np.array([]), 5) # np.array([0., 0., 0., 0., 0.]) nparray = np.array([[1, 2], [3, 4]]) resize_preserving_order(nparray, 1) # np.array([[1, 2]]) resize_preserving_order(nparray, 3) # np.array([[1, 2], # [1, 2], # [3, 4]]) """ if len(nparray) == 0: return np.zeros((length, *nparray.shape[1:])) if len(nparray) == length: return nparray indices = np.arange(length) * len(nparray) // length return nparray[indices]
e074b1135d2192a9b0cf2d9b91f6d99f22408220
3,649,780
def push(service, key, data): """Push Called to push data to the sync cache Args: service (str): The name of the service using the sync key (mixed): The key to push the data onto data (mixed): The data to be pushed Returns: bool|string """ # Make sure the service and key are strings if not isinstance(service, basestring): service = str(service) if not isinstance(key, basestring): key = str(key) # Generate the JSON sJSON = JSON.encode({ "service": service, "key": key, "data": data }) # Check if anyone is interested in the key lSessions = _moRedis.smembers("%s%s" % (service, key)) # If there are any sessions if lSessions: # For each session found for sSession in lSessions: # Add the message to its list p = _moRedis.pipeline() p.lpush(sSession, sJSON) p.expire(sSession, 21600) p.execute() # Now publish the message for anyone using websockets _moRedis.publish("%s%s" % (service, key), sJSON) # Return OK return True
2be85735b1c4965e5a0cdf35b5f62267ce31cc6e
3,649,781
import argparse import sys def get_args(): """Get all parsed arguments.""" parser = argparse.ArgumentParser(description="CLASP training loop") # data parser.add_argument("--id", type=str, help="run id") parser.add_argument("--path-data-train", type=str, help="path preprocessed csv file for training") parser.add_argument("--path-offsd-train", type=str, help="path preprocessed offset dictionary json file for training") parser.add_argument("--path-data-valid-id", type=str, help="path preprocessed csv file for valid id") parser.add_argument("--path-offsd-valid-id", type=str, help="path preprocessed offset dictionary json file for valid id") parser.add_argument("--path-data-valid-ood", type=str, help="path preprocessed csv file for valid ood") parser.add_argument("--path-offsd-valid-ood", type=str, help="path preprocessed offset dictionary json file for valid ood") parser.add_argument("--path-results", type=str, default="results", help="path to the results data, i.e., logs, model weights, etc. (default: results)") parser.add_argument("--path-weights", type=str, default=None, help="path to weights for reloading (default: None)") parser.add_argument("--numw", type=int, default=0, help="number of workers for pytorch dataloader (default: 0)") # training parser.add_argument("--world-size", type=int, default=2, help="training world size (default: 2)") parser.add_argument("--bs", type=int, default=8, help="batch size (default: 8)") parser.add_argument("--epochs", type=int, default=2, help="epochs (default: 2)") parser.add_argument("--dryrun", action="store_true", default=False, help="Dry run for the setup runs only 4 steps in each epoch, use to test your setup (default: False)") # model # text encoder parser.add_argument("--tenc-ntok", type=int, default=49408, help="text encoder num_tokens (default: 49408)") parser.add_argument("--tenc-dim", type=int, default=512, help="text encoder dim (default: 512)") parser.add_argument("--tenc-depth", type=int, default=6, help="text encoder depth (default: 6)") parser.add_argument("--tenc-seq-len", type=int, default=1024, help="text encoder seq_len (default: 1024)") parser.add_argument("--tenc-rev", action="store_true", default=True, help="text encoder reversibility (default: True)") # bioseq encoder parser.add_argument("--bsenc-ntok", type=int, default=23, help="bioseq encoder num_tokens (default: 23)") parser.add_argument("--bsenc-dim", type=int, default=512, help="bioseq encoder dim (default: 512)") parser.add_argument("--bsenc-depth", type=int, default=6, help="bioseq encoder depth (default: 6)") parser.add_argument("--bsenc-seq-len", type=int, default=512, help="bioseq encoder seq_len (default: 512)") parser.add_argument("--bsenc-rev", action="store_true", default=True, help="bioseq encoder reversibility (default: True)") parser.add_argument("--bsenc-sparse-attn", action="store_true", default=False, help="bioseq encoder sparse_attn (default: False)") # logging and saving parser.add_argument("--save-interval-epoch", type=int, default=1, help="save interval epoch (default: 1") parser.add_argument("--save-interval-step", type=int, default=4_000, help="save interval step (default: 4_000") args = parser.parse_args() args.cmd = " ".join("\""+arg+"\"" if " " in arg else arg for arg in sys.argv) return args
7261570226596e6c4dada52dd058ecad33f4ac60
3,649,782
def get_db_filenames(database_name): """ This is used to populate the dropdown menu, so users can only access their data if their name is in the user column""" con = sql.connect(database_name) c = con.cursor() names_list = [] for row in c.execute( """SELECT Dataset_Name FROM master_table"""): names_list.append(row[0]) con.close() exists_list = [] for name in names_list: if if_file_exists_in_db(database_name, name): exists_list.append(name) return exists_list
7ffdd7cfb24d135ddc20353799dd0c7d21504232
3,649,783
import string def Calculate(values, mode=0, bin_function=None): """Return a list of (value, count) pairs, summarizing the input values. Sorted by increasing value, or if mode=1, by decreasing count. If bin_function is given, map it over values first. """ if bin_function: values = list(map(bin_function, values)) bins = {} for val in values: v = "%f-%f" % tuple(val) bins[v] = bins.get(v, 0) + 1 bb = list(bins.items()) if mode: bb.sort(lambda x, y: cmp(y[1], x[1])) else: bb.sort() r = [] for v, n in bb: x, y = list(map(string.atof, string.split(v, "-"))) r.append((x, y, n)) return r
bb3f40eec7733d948e66e00c3bafdd032acb6372
3,649,784
import time def getToday(format=3): """返回今天的日期字串""" t = time.time() date_ary = time.localtime(t) if format == 1: x = time.strftime("%Y%m%d", date_ary) elif format == 2: x = time.strftime("%H:%M", date_ary) elif format == 3: x = time.strftime("%Y/%m/%d", date_ary) elif format == 4: x = time.strftime("%Y/%m/%d %H:%M", date_ary) elif format == 5: x = time.strftime("%y%m%d", date_ary) elif format == 6: x = time.strftime("%Y-%m-%d", date_ary) elif format == 7: x = time.strftime("%Y/%m/%d %H:%M:%S", date_ary) elif format == 8: x = time.strftime("%Y-%m-%d %H:%M", date_ary) elif format == 9: x = time.strftime("%Y-%m-%d %H:%M:%S", date_ary) elif format == 10: x = time.strftime("%Y年%m月%d日 %H:%M", date_ary) else: x = time.strftime("%Y-%m-%d %H:%M:%S", date_ary) return x
900c0a0d42dc2220c5e5030eeebd858e3e6a41bf
3,649,785
def _get_referenced(body, start, end, no_header, clean, as_xml, as_list): """Retrieve data from body between some start and end.""" if body is None or start is None or end is None: return None content_list = body.get_between( start, end, as_text=False, no_header=no_header, clean=clean ) if as_list: return content_list referenced = Element.from_tag("office:text") for chunk in content_list: referenced.append(chunk) if as_xml: return referenced.serialize() else: return referenced
2b3e1ce008461711c37e4af6dda7dc7d2e332d9e
3,649,786
import torch def info(filepath: str) -> AudioMetaData: """Get signal information of an audio file. Args: filepath (str): Path to audio file Returns: AudioMetaData: meta data of the given audio. """ sinfo = torch.ops.torchaudio.sox_io_get_info(filepath) return AudioMetaData(sinfo.get_sample_rate(), sinfo.get_num_frames(), sinfo.get_num_channels())
e3ff5929f563977c44f25f8f51f3a7014f43b397
3,649,787
def _override_regex_to_allow_long_doctest_lines(): """Allow too-long lines for doctests. Mostly a copy from `pylint/checkers/format.py` Parts newly added are marked with comment, "[PYTA added]: ..." """ def new_check_lines(self, lines, i): """check lines have less than a maximum number of characters """ max_chars = self.config.max_line_length ignore_long_line = self.config.ignore_long_lines def check_line(line, i, prev_line=None): if not line.endswith('\n'): self.add_message('missing-final-newline', line=i) else: # exclude \f (formfeed) from the rstrip stripped_line = line.rstrip('\t\n\r\v ') if not stripped_line and _EMPTY_LINE in self.config.no_space_check: # allow empty lines pass elif line[len(stripped_line):] not in ('\n', '\r\n'): self.add_message('trailing-whitespace', line=i) # Don't count excess whitespace in the line length. line = stripped_line mobj = OPTION_RGX.search(line) if mobj and mobj.group(1).split('=', 1)[0].strip() == 'disable': line = line.split('#')[0].rstrip() if len(line) > max_chars and not ignore_long_line.search(line): self.add_message('line-too-long', line=i, args=(len(line), max_chars)) return i + 1 unsplit_ends = { '\v', '\x0b', '\f', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029'} unsplit = [] _split_lines = lines.splitlines(True) # [PYTA added]: enumerate to get line_i index. for line_i, line in enumerate(_split_lines): if line[-1] in unsplit_ends: unsplit.append(line) continue if unsplit: unsplit.append(line) line = ''.join(unsplit) unsplit = [] # [PYTA added]: Skip error message for long doctest lines doctest_tokens = compile(r'^\s*>>>.*?\n$') if match(doctest_tokens, line): continue elif line_i > 0 and match(doctest_tokens, _split_lines[line_i-1]): continue i = check_line(line, i) if unsplit: check_line(''.join(unsplit), i) FormatChecker.check_lines = new_check_lines
9b9d1b5eefaa9e61d1e8915aef988fbc25756d1a
3,649,788
import types def handle(*, artifacts: oa_types.SimplePropertyArtifacts) -> types.TColumn: """ Handle a simple property. Args: artifacts: The artifacts of the simple property. Returns: The constructed column. """ return facades.sqlalchemy.simple.construct(artifacts=artifacts)
2c9d5cd47b2aecb7603430c8eec7b326ce3c249f
3,649,789
def rollout_representation(representation_model, steps, obs_embed, action, prev_states, done): """ Roll out the model with actions and observations from data. :param steps: number of steps to roll out :param obs_embed: size(time_steps, batch_size, n_agents, embedding_size) :param action: size(time_steps, batch_size, n_agents, action_size) :param prev_states: RSSM state, size(batch_size, n_agents, state_size) :return: prior, posterior states. size(time_steps, batch_size, n_agents, state_size) """ priors = [] posteriors = [] for t in range(steps): prior_states, posterior_states = representation_model(obs_embed[t], action[t], prev_states) prev_states = posterior_states.map(lambda x: x * (1.0 - done[t])) priors.append(prior_states) posteriors.append(posterior_states) prior = stack_states(priors, dim=0) post = stack_states(posteriors, dim=0) return prior.map(lambda x: x[:-1]), post.map(lambda x: x[:-1]), post.deter[1:]
2736609ab54d477c3fad2ab7a4e3270772517a08
3,649,790
import argparse def argparser(): """parse arguments from terminal""" parser = argparse.ArgumentParser() parser.add_argument('-v', '--video', dest='video') parser.add_argument('-c', '--config', dest='config', default=CONFIG_FILE) parser.add_argument('-o', '--output', dest='output') return parser
a24452f61dc5b24633397caca1632ea03e667675
3,649,791
def generate_random_ast(schema, rng): """End-to-end simulator for AST of Core DSL.""" distributions = [schemum[1] for schemum in schema] partition_alpha = rng.gamma(1,1) partition = generate_random_partition(partition_alpha, len(distributions), rng) row_dividers = [generate_random_row_divider(rng) for _i in partition] primitives = [ [output, dist, generate_random_hyperparameters(dist, rng)] for output, dist in enumerate(distributions) ] return [ [row_divider, [primitives[b] for b in block]] for row_divider, block in zip(row_dividers, partition) ]
9547f815ad07af33b182c7edf7ea646ec9fdd49f
3,649,792
def _opcode_to_string(opcode): """Return the printable name for a REIL opcode. Args: opcode (reil.Opcode): The opcode to provide in printable form. Returns: A string representing the opcode. """ return _opcode_string_map[opcode]
a1307efe0af8d223360a9ca0f2d9e96913ccb601
3,649,793
def get_shot(shot): """Retrieves shot object from database and returns as dictionary. Raises exception if shot is not found. """ return __get_conn().get_entity(__table_name(), shot['PartitionKey'], shot['RowKey'])
0e9ad55427bba2074f7a77d94b61e7bae34bcbda
3,649,794
def report_value_count(data_frame: pd.DataFrame, column: str, digits: int = 2) -> str: """ Report the number and percentage of non-empty values in the column. Parameters ---------- data_frame : pandas.DataFrame A data frame with one or more columns. column : str The name of the column to report on. digits : int, optional The number of digits to report in the percentage (default 2). Returns ------- str The number of non-empty cells and a percentage of the total number of rows. """ count = data_frame[column].notnull().sum() # The type of `count` is `numpy.int64` which when divided by zero yields `nan`. # This is undesired and we rather raise an exception here. if len(data_frame) == 0: raise ZeroDivisionError("The data frame is empty!") return f"{humanize.intcomma(count)} ({count / len(data_frame):.{digits}%})"
d31d9e8bae216f7931f96ec08992d6319d4c3645
3,649,795
def input_fn(is_training, data_dir, batch_size, num_epochs=1, num_parallel_calls=1, multi_gpu=False): """Input_fn using the tf.data input pipeline for CIFAR-10 dataset. Args: is_training: A boolean denoting whether the input is for training. data_dir: The directory containing the input data. batch_size: The number of samples per batch. num_epochs: The number of epochs to repeat the dataset. num_parallel_calls: The number of records that are processed in parallel. This can be optimized per data set but for generally homogeneous data sets, should be approximately the number of available CPU cores. multi_gpu: Whether this is run multi-GPU. Note that this is only required currently to handle the batch leftovers, and can be removed when that is handled directly by Estimator. Returns: A dataset that can be used for iteration. """ filenames = get_filenames(is_training, data_dir) dataset = tf.data.FixedLengthRecordDataset(filenames, _RECORD_BYTES) num_images = is_training and _NUM_IMAGES['train'] or _NUM_IMAGES['validation'] return resnet_run_loop.process_record_dataset( dataset, is_training, batch_size, _NUM_IMAGES['train'], parse_record, num_epochs, num_parallel_calls, examples_per_epoch=num_images, multi_gpu=multi_gpu)
5d27f5a04b409ad4b04ce9885b592b0454ae0b4b
3,649,796
def getWinners(players, game): """ Return a list of winners :param players: :param game: :return: """ # get score for each player for i in range(0, len(game.players)): game.players[i].credits = scoreFor(i, game) currentPlayer = whoseTurn(game) # add 1 to players who had less turns best = 0 winners = [] for i in range(0, len(game.players)): if best < game.players[i].credits: best = game.players[i].credits winners.append(i) elif best == game.players[i].credits: if i <= currentPlayer: winners.append(i) else: if currentPlayer in winners: winners = [] winners.append(i) return winners
a872d4f9ed596e31ae9a129c9054f9bb95a6e765
3,649,797
def read_xsf(filepath): """ :param filepath filepath of the xtd file :return cell and atoms need to build the pymatflow.structure.crystal object """ a = ase.io.read(filepath, format='xsf') cell = a.cell.tolist() atoms = [] for i in range(len(a.arrays['numbers'])): for item in base.element: if base.element[item].number == a.arrays['numbers'][i]: symbol = item break atoms.append(base.Atom( symbol, a.arrays['positions'][i, 0], a.arrays['positions'][i, 1], a.arrays['positions'][i, 2] )) return cell, atoms
97152eb3d18752e78689598bb0c8603c13051623
3,649,798
def elina_abstract0_bound_linexpr(man, a, linexpr): """ Returns the ElinaInterval taken by an ElinaLinexpr0 over an ElinaAbstract0. Parameters ---------- man : ElinaManagerPtr Pointer to the ElinaManager. a : ElinaAbstract0Ptr Pointer to the ElinaAbstract0. linexpr : ElinaLinexpr0Ptr Pointer to the ElinaLinexpr0. Returns ------- interval : ElinaIntervalPtr Pointer to the ElinaInterval. """ interval = None try: elina_abstract0_bound_linexpr_c = elina_auxiliary_api.elina_abstract0_bound_linexpr elina_abstract0_bound_linexpr_c.restype = ElinaIntervalPtr elina_abstract0_bound_linexpr_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ElinaLinexpr0Ptr] interval = elina_abstract0_bound_linexpr_c(man, a, linexpr) except: print('Problem with loading/calling "elina_abstract0_bound_linexpr" from "libelinaux.so"') print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, ElinaLinexpr0Ptr to the function') return interval
2764507b79f3326741496a92642be75b5afb8ce4
3,649,799