content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from .common import OcrResult def recognize(img, lang, *, hints=None): """ 识别图像中的文本并返回 OcrResult :param img: 需要识别的图像, PIL.Image.Image 对象 :param lang: 需要识别的语言,BCP-47 格式字符串 :param hints: 对 OCR 引擎的提示,OcrHint 中定义的值的列表 :returns: OcrResult OcrResult = { lines: Tuple[OcrLine], extra: Any # 引擎返回的额外信息 } OcrLine = { words: Tuple[OcrWord], extra: Any } OcrWord = { text: str, rect: Rect, extra: Any } """ return OcrResult(tuple())
ea2ef038122b4953e49d787753ef112a6efe8c1c
3,639,500
def check_missing_requirements (): """This list of missing requirements (mencoder, mplayer, lame, and mkvmerge). Returns None if all requirements are in the execution path. """ missing = [] if which("mencoder") is None: missing.append("mencoder") if which("mplayer") is None: missing.append("mplayer") if which("lame") is None: missing.append("lame") if which("mkvmerge") is None: missing.append("mkvmerge") if len(missing)==0: return None return missing
6351621a8a2ebfb52b06cb1f99fce0e02a263d08
3,639,501
def getExpMat(xy, shape, start, end, r, repeats=5): """ Get the expected interaction contact matrix. xy is [[x,y]] shape is () shape from the observed matrix. r is resolution """ mat = [] i = 0 while i < repeats: a = xy[:, 0] b = xy[:, 1] np.random.shuffle(a) np.random.shuffle(b) xy[:, 0] = a xy[:, 1] = b s = b-a s = np.where( s > 0)[0] nxy = xy[s,] nmat = getObsMat(nxy, start, end, r) if nmat.shape == shape: mat.append(nmat) i += 1 mat = np.array(mat) return mat.mean(axis=0)
1aefed157a961447a562f5ea214ea55cdf6340b8
3,639,502
async def student_decline_offer(uid: str, username=Depends(auth_handler.auth_wrapper)): """ Student to decline the offer in an Application Require: Student-self or Admin-write """ logger.debug(f"{username} trying to decline the offer in an ApplicationForm") _updated = student_update_application_form_status( AF_uid=uid, username=username, new_status=AFStatus.DECLINE, pre_status=[AFStatus.OFFER], ) return _updated
9c1cf9af3566ee7369ee90aac5cdf3aa714dfc6a
3,639,503
from crds.tests import test_table_effects, tstmod def main(): """Run module tests, for now just doctests only.""" return tstmod(test_table_effects)
c8eaba2e58c8f3b75250e8ca250d123f11670635
3,639,504
from sys import path def get_relative_path(full_path: str) -> str: """ Extract the relative path from the data folder :param full_path: The full path :return: Relative path from data folder >>> get_relative_path(path.join(DATA_PATH, 'MIDI/001-001.mid')) 'MIDI/001-001.mid' """ return path.relpath(full_path, DATA_PATH)
905740fadd8a4a40a67bb92812357ac3525b637e
3,639,505
def etminan(C, Cpi, F2x=3.71, scale_F2x=True): """Calculate the radiative forcing from CO2, CH4 and N2O. This function uses the updated formulas of Etminan et al. (2016), including the overlaps between CO2, methane and nitrous oxide. Reference: Etminan et al, 2016, JGR, doi: 10.1002/2016GL071930 Inputs: C: [CO2, CH4, N2O] concentrations, [ppm, ppb, ppb] Cpi: pre-industrial [CO2, CH4, N2O] concentrations Keywords: F2x: radiative forcing from a doubling of CO2. scale_F2x: boolean. Scale the calculated value to the specified F2x? Returns: 3-element array of radiative forcing: [F_CO2, F_CH4, F_N2O] """ Cbar = 0.5 * (C[0] + Cpi[0]) Mbar = 0.5 * (C[1] + Cpi[1]) Nbar = 0.5 * (C[2] + Cpi[2]) # Tune the coefficient of CO2 forcing to acheive desired F2x, using # pre-industrial CO2 and N2O. F2x_etminan ~= 3.801. scaleCO2 = 1 if scale_F2x: F2x_etminan = ( -2.4e-7*Cpi[0]**2 + 7.2e-4*Cpi[0] - 2.1e-4*Cpi[2] + 5.36) * np.log(2) scaleCO2 = F2x/F2x_etminan F = np.zeros(3) F[0] = (-2.4e-7*(C[0] - Cpi[0])**2 + 7.2e-4*np.fabs(C[0]-Cpi[0]) - \ 2.1e-4 * Nbar + 5.36) * np.log(C[0]/Cpi[0]) * scaleCO2 F[1] = (-1.3e-6*Mbar - 8.2e-6*Nbar + 0.043) * (np.sqrt(C[1]) - \ np.sqrt(Cpi[1])) F[2] = (-8.0e-6*Cbar + 4.2e-6*Nbar - 4.9e-6*Mbar + 0.117) * \ (np.sqrt(C[2]) - np.sqrt(Cpi[2])) return F
8f80ecb153c94b806edbeffe3d384722333d2226
3,639,506
def render_manage_data_store_pages(request, html_file): """ Generate management pages for data_stores. """ # initialize session session_maker = app.get_persistent_store_database('main_db', as_sessionmaker=True) session = session_maker() data_stores = session.query(DataStore) \ .filter(DataStore.id > 1) \ .order_by(DataStore.name) \ .all() context = { 'data_stores': data_stores, } table_html = \ render(request, 'streamflow_prediction_tool/{}'.format(html_file), context) # in order to close the session, the request needed to be rendered first session.close() return table_html
ae73c2f1f88566f7a44992c1a8c5063cc099bf93
3,639,507
def validate_password_form(p1, p2, is_open, btn, sp1, sp2): """Validade password form Returns Output('password1', 'invalid'), Output('password2', 'invalid'), Output('password1', 'title'), Output('password2', 'title'), """ invalid = {'p1':sp1, 'p2':sp2} title = {'p1':None, 'p2':None} ctx = dash.callback_context if ctx.triggered: btn_id = ctx.triggered[0]['prop_id'].split('.')[0] if btn_id == 'modal' or btn_id == 'clear': return False, False, None, None if p1: pwd_check = password_check(p1) if not pwd_check['ok']: invalid['p1'] = True if pwd_check['length_error']: title['p1']= _( 'The password must be at least 8 characters long.' ) elif pwd_check['digit_error']: title['p1'] = _('The password must have numbers.') elif pwd_check['uppercase_error'] or pwd_check['lowercase_error']: title['p1'] = _( 'The password must haver uppercase and lowercase letters.' ) elif pwd_check['symbol_error']: title['p1'] = _('The password must have special symbols.') else: invalid['p1'] = False if p2: if not p1: invalid['p2'] = True title['p2'] = _('Fill password field.') elif not p1==p2: invalid['p2'] = True title['p2'] = _('Passwords don\'t match.') else: invalid['p2'] = False return invalid['p1'], invalid['p2'], title['p1'], title['p2']
d94d577951dc11eafa10d3dbe1cf17ce32957e85
3,639,508
def cardinal_spline(points,tension=0.5): """Path instructions for a cardinal spline. The spline interpolates the control points. Args: points (list of 2-tuples): The control points for the cardinal spline. tension (float, optional): Tension of the spline in the range [0,1]. Defaults to 0.5. Returns: string: Ipe path instructions """ instructions = [ str(points[0][0]), str(points[0][1]), 'm' ] + [ f(p) for p in points[1:] for f in [ lambda p: str(p[0]), lambda p: str(p[1])] ] + [str(tension),'C '] return ' '.join(instructions)
a549b5fcd8df2cb311563a495029901bc1edb1c1
3,639,509
def extract(x, *keys): """ Args: x (dict or list): dict or list of dicts Returns: (tuple): tuple with the elements of the dict or the dicts of the list """ if isinstance(x, dict): return tuple(x[k] for k in keys) elif isinstance(x, list): return tuple([xi[k] for xi in x] for k in keys) else: raise NotImplementedError
c0730556786586011b0b22ae5003c2fe9ccb2894
3,639,510
def get_source_config_from_ctx(_ctx, group_name=None, hostname=None, host_config=None, sources=None): """Generate a source config from CTX. :param _ctx: Either a NodeInstance or a RelationshipInstance ctx. :param group_name: User's override value, like 'webservers'. :param hostname: User's override value, like 'web'. :param host_config: User's override value. Like: { 'ansible_host': '127.0.0.1', 'ansible_user': 'ubuntu', } :param sources: User's sources override value. :return: """ sources = sources or {} if _ctx.type == NODE_INSTANCE and \ 'cloudify.nodes.Compute' not in _ctx.node.type_hierarchy and \ _ctx.instance.runtime_properties.get(SOURCES): return AnsibleSource(_ctx.instance.runtime_properties[SOURCES]).config elif _ctx.type == RELATIONSHIP_INSTANCE: host_config = host_config or \ get_host_config_from_compute_node(_ctx.target) group_name, hostname = \ get_group_name_and_hostname( _ctx.target, group_name, hostname) additional_node_groups = get_additional_node_groups( _ctx.target.node.name, _ctx.deployment.id) else: host_config = host_config or \ get_host_config_from_compute_node(_ctx) group_name, hostname = \ get_group_name_and_hostname( _ctx, group_name, hostname) additional_node_groups = get_additional_node_groups( _get_node(_ctx).name, _ctx.deployment.id) if '-o StrictHostKeyChecking=no' not in \ host_config.get('ansible_ssh_common_args', ''): _ctx.logger.warn( 'This value {0} is not included in Ansible Configuration. ' 'This is required for automating host key approval.'.format( {'ansible_ssh_common_args': '-o StrictHostKeyChecking=no'})) hosts = { hostname: host_config } sources[group_name] = { HOSTS: hosts } for additional_group in additional_node_groups: sources[additional_group] = {HOSTS: {hostname: None}} return AnsibleSource(sources).config
40b293ac9e63d96919b43cd60650e7d46ff34d57
3,639,511
def index(request): """Show welcome to the sorting quiz.""" template = loader.get_template("ggpoll/index.html") context = {} return HttpResponse(template.render(context, request))
68eb4afde5066f1a4a097f0d725ec682e803ace4
3,639,512
from artificial_neural_networks.utils.download_mnist import download_mnist from artificial_neural_networks.utils.generic_utils import save_classif_model from artificial_neural_networks.utils.vis_utils import plot_confusion_matrix, epoch_plot import os def cnn_dropout_mnist(args): """ Main function """ # %% # IMPORTS # code repository sub-package imports # %% if args.verbose > 0: print(args) # For reproducibility if args.reproducible: os.environ['PYTHONHASHSEED'] = '0' np.random.seed(args.seed) rn.seed(args.seed) tf.set_random_seed(args.seed) sess = tf.Session(graph=tf.get_default_graph()) K.set_session(sess) # print(hash("keras")) # %% # Load the MNIST dataset mnist_path = download_mnist() mnist = np.load(mnist_path) train_x = mnist['x_train'].astype(np.float32) train_y = mnist['y_train'].astype(np.int32) test_x = mnist['x_test'].astype(np.float32) test_y = mnist['y_test'].astype(np.int32) mnist.close() # %% # PREPROCESSING STEP scaling_factor = args.scaling_factor translation = args.translation img_width = train_x.shape[1] img_height = train_x.shape[2] n_train = train_x.shape[0] # number of training examples/samples n_test = test_x.shape[0] # number of test examples/samples n_in = img_width * img_height # number of features / dimensions n_out = np.unique(train_y).shape[0] # number of classes/labels # Reshape training and test sets train_x = train_x.reshape(n_train, img_width, img_height, 1) test_x = test_x.reshape(n_test, img_width, img_height, 1) # Apply preprocessing train_x = scaling_factor * (train_x - translation) test_x = scaling_factor * (test_x - translation) one_hot = False # It works exactly the same for both True and False # Convert class vectors to binary class matrices (i.e. One hot encoding) if one_hot: train_y = to_categorical(train_y, n_out) test_y = to_categorical(test_y, n_out) # %% # Model hyperparameters and ANN Architecture N = [] N.append(n_in) # input layer if args.same_size: n_layers = args.n_layers for i in range(n_layers): N.append(args.layer_size) # hidden layer i else: n_layers = len(args.explicit_layer_sizes) for i in range(n_layers): N.append(args.explicit_layer_sizes[i]) # hidden layer i N.append(n_out) # output layer # ANN Architecture L = len(N) - 1 x = Input(shape=(img_width, img_height, 1)) # input layer h = Dropout(rate=args.dropout_rate_input)(x) h = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(h) h = MaxPooling2D(pool_size=(2, 2))(h) h = Dropout(rate=args.dropout_rate_conv)(h) h = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(h) h = MaxPooling2D(pool_size=(2, 2))(h) h = Dropout(rate=args.dropout_rate_conv)(h) h = Flatten()(h) for i in range(1, L): h = Dense(units=N[i], activation='relu')(h) # hidden layer i h = Dropout(rate=args.dropout_rate_hidden)(h) out = Dense(units=n_out, activation='softmax')(h) # output layer model = Model(inputs=x, outputs=out) if args.verbose > 0: model.summary() if one_hot: loss_function = 'categorical_crossentropy' else: loss_function = 'sparse_categorical_crossentropy' metrics = ['accuracy'] lr = args.lrearning_rate epsilon = args.epsilon optimizer_selection = { 'Adadelta': optimizers.Adadelta(lr=lr, rho=0.95, epsilon=epsilon, decay=0.0), 'Adagrad': optimizers.Adagrad(lr=lr, epsilon=epsilon, decay=0.0), 'Adam': optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=epsilon, decay=0.0, amsgrad=False), 'Adamax': optimizers.Adamax(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=epsilon, decay=0.0), 'Nadam': optimizers.Nadam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=epsilon, schedule_decay=0.004), 'RMSprop': optimizers.RMSprop(lr=lr, rho=0.9, epsilon=epsilon, decay=0.0), 'SGD': optimizers.SGD(lr=lr, momentum=0.0, decay=0.0, nesterov=False) } optimizer = optimizer_selection[args.optimizer] model.compile(optimizer=optimizer, loss=loss_function, metrics=metrics) # %% # Save trained models for every epoch models_path = r'artificial_neural_networks/trained_models/' model_name = 'mnist_cnn_dropout' weights_path = models_path + model_name + '_weights' model_path = models_path + model_name + '_model' file_suffix = '_{epoch:04d}_{val_acc:.4f}_{val_loss:.4f}' if args.save_weights_only: file_path = weights_path else: file_path = model_path file_path += file_suffix # monitor = 'val_loss' monitor = 'val_acc' if args.save_models: checkpoint = ModelCheckpoint( file_path + '.h5', monitor=monitor, verbose=args.verbose, save_best_only=args.save_best_only, mode='auto', save_weights_only=args.save_weights_only) callbacks = [checkpoint] else: callbacks = [] # %% # TRAINING PHASE if args.time_training: start = timer() model_history = model.fit( x=train_x, y=train_y, validation_data=(test_x, test_y), batch_size=args.batch_size, epochs=args.n_epochs, verbose=args.verbose, callbacks=callbacks) if args.time_training: end = timer() duration = end - start print('Total time for training (in seconds):') print(duration) # %% # TESTING PHASE train_y_pred = np.argmax(model.predict(train_x), axis=1) test_y_pred = np.argmax(model.predict(test_x), axis=1) train_score = model.evaluate(x=train_x, y=train_y, verbose=args.verbose) train_dict = {'loss': train_score[0], 'acc': train_score[1]} test_score = model.evaluate(x=test_x, y=test_y, verbose=args.verbose) test_dict = {'val_loss': test_score[0], 'val_acc': test_score[1]} if args.verbose > 0: print('Train loss:', train_dict['loss']) print('Train accuracy:', train_dict['acc']) print('Test loss:', test_dict['val_loss']) print('Test accuracy:', test_dict['val_acc']) # %% # Data Visualization if args.plot: # Confusion matrices classes = list(range(n_out)) train_cm = confusion_matrix(train_y, train_y_pred) plot_confusion_matrix(train_cm, classes=classes, title='Confusion matrix for training set') test_cm = confusion_matrix(test_y, test_y_pred) plot_confusion_matrix(test_cm, classes=classes, title='Confusion matrix for test set') # Loss vs epoch epoch_axis = range(1, args.n_epochs + 1) train_loss = model_history.history['loss'] test_loss = model_history.history['val_loss'] epoch_plot(epoch_axis, train_loss, test_loss, 'Loss') # Accuracy vs epoch train_acc = model_history.history['acc'] test_acc = model_history.history['val_acc'] epoch_plot(epoch_axis, train_acc, test_acc, 'Accuracy') # %% # Save the architecture and the lastly trained model save_classif_model(model, models_path, model_name, weights_path, model_path, file_suffix, test_dict, args) # %% return model
e955c4607e9a1455536103619cbcc73accf0ff29
3,639,513
def get_all_random_experiment_histories_from_files(experiment_path_prefix, net_count): """ Read history-arrays from all specified npz-files with net_number from zero to 'net_count' and return them as one ExperimentHistories object. """ assert net_count > 0, f"'net_count' needs to be greater than 0, but is {net_count}." histories = get_random_experiment_histories_from_file(experiment_path_prefix, 0) for net_number in range(1, net_count): current_histories = get_random_experiment_histories_from_file(experiment_path_prefix, net_number) histories = histories.stack_histories(current_histories) return histories
fc36d2234025aea0e6232f15ac505adc489795c5
3,639,514
import os import json def is_gcloud_oauth2_token_cached(): """Returns false if 'gcloud auth login' needs to be run.""" p = os.path.join(os.path.expanduser('~'), '.config', 'gcloud', 'credentials') try: with open(p) as f: return len(json.load(f)['data']) != 0 except (KeyError, IOError, OSError, ValueError): return False
8d02c1b41399d7f5c8550e4c0364108c251c3791
3,639,515
def convert_index_to_indices(index_ls, shape): """ 将 index_ls 格式的坐标列表转换为 indices_ls 格式 """ assert index_ls.size <= np.prod(shape) source = np.zeros(shape=shape) zip_indices = np.where(source >= 0) indices_ls = convert.zip_type_to_indices(zip_indices=zip_indices) indices_ls = indices_ls[index_ls] return indices_ls
3f1861820f81d27b7a6b0878bc768dca84fd6b3b
3,639,516
def get_funnels_list(connector: MixpanelAPI) -> pd.DataFrame: """ This function returns the whole list of funnels in a table containing the funnel ID and the funnel name :param connector: the connector to the Mixpanel service :return: a pandas DataFrame """ # TODO: change dataframe to simple dict flist = connector.request(["funnels/list"], {}) flist_df = pd.DataFrame(flist) flist_df.set_index('funnel_id', inplace=True) return flist_df
a0e89d4f1f7e666b6e9341ea85971d12d9b2d466
3,639,517
def _gen_off_list(sindx): """ Given a starting index and size, return a list of numbered links in that range. """ def _gen_link_olist(osize): return list(range(sindx, sindx + osize)) return _gen_link_olist
863ccdc08f6a7cadccc3c5ccfd0cb92a223aadda
3,639,518
def report_operation_log_list(request): """ 返回常规操作的日志列表 :param request: :return: """ return administrator.report_operation_log_list(request)
c2bbeda9a3342e9ce9667f9ec6f232f171a164e0
3,639,519
def connect_redis(redis_host, redis_port, redis_db): """ connect to redis """ global _conn if _conn is None: print "connect redis %s (%s)" % ("%s:%s" % (redis_host, redis_port), os.getpid()) _conn = redis.Redis(host=redis_host, port=redis_port, db=redis_db) return _conn
0b3f51fcbe78e7d8075398675b62ae95f56f06b6
3,639,520
import json import time import os def custom_static1(filename): """ Request to access specific files in the backup directory .. :quickref: Get backup files; Get a specific file from a directory in the DAQBroker backup directory """ scoped = daqbrokerSettings.getScoped() session = scoped() #connection = connect(request) scoped = daqbrokerSettings.getScoped() session = scoped() globalsObj = session.query( daqbrokerSettings.Global).filter_by( clock=session.query( func.max( daqbrokerSettings.Global.clock))).first() if globalsObj: globals = {} for field in globalsObj.__dict__: if not field.startswith('_'): globals[field] = getattr(globalsObj, field) if 'remarks' in globals: globals["remarks"] = json.loads(globals["remarks"]) else: globals = { 'clock': time.time(), 'version': '0.1', 'backupfolder': 'backups', 'importfolder': 'uploads', 'tempfolder': 'temp', 'ntp': None, 'commport': 9090, 'logport': 9092, 'remarks': {}} #print(request.args) #print(filename.split('.')) if filename.split('.')[1]=='zip': #print("osijdfposdijfopsdfijdopsifjdopsfij") return send_file(os.path.join(globals['tempfolder'], filename),mimetype="zip", attachment_filename="downloaded_files.zip", as_attachment=True) else: return send_from_directory(globals['tempfolder'], filename)
ceac42bba2e6c277dea0e19b9a42b1657a21d6ae
3,639,521
def loadSV(fname, shape=None, titles=None, aligned=False, byteorder=None, renamer=None, **kwargs): """ Load a delimited text file to a numpy record array. Basically, this function calls loadSVcols and combines columns returned by that function into a numpy ndarray with stuctured dtype. Also uses and returns metadata including column names, formats, coloring, &c. if these items are determined during the loading process. **Parameters** **fname** : string or file object Path (or file object) corresponding to a separated variable (CSV) text file. **names** : list of strings Sets the names of the columns of the resulting tabarray. If not specified, `names` value is determined first by looking for metadata in the header of the file, and if that is not found, are assigned by NumPy's `f0, f1, ... fn` convention. See **namesinheader** parameter below. **formats** : string or list of strings Sets the datatypes of the columns. The value of `formats` can be a list or comma-delimited string of values describing values for each column (e.g. "str,str,int,float" or ["str", "str", "int", "float"]), a single value to apply to all columns, or anything that can be used in numpy.rec.array constructor. If the **formats** (or **dtype**) parameter are not specified, typing is done by inference. See **typer** parameter below. **dtype** : numpy dtype object Sets the numpy dtype of the resulting tabarray, combining column format and column name information. If dtype is set, any **names** and **formats** specifications will be overriden. If the **dtype** (or **formats**) parameter are not specified, typing is done by inference. See **typer** parameter below. The **names**, **formats** and **dtype** parameters duplicate parameters of the NumPy record array creation inferface. Additional paramters of the NumPy inferface that are passed through are **shape**, **titles**, **byteorder** and **aligned** (see NumPy documentation for more information.) **kwargs**: keyword argument dictionary of variable length Contains various parameters to be passed down to loadSVcols. These may include **skiprows**, **comments**, **delimiter**, **lineterminator**, **uselines**, **usecols**, **excludecols**, **metametadata**, **namesinheader**,**headerlines**, **valuefixer**, **linefixer**, **colfixer**, **delimiter_regex**, **inflines**, **typer**, **missingvalues**, **fillingvalues**, **verbosity**, and various CSV module parameters like **escapechar**, **quoting**, **quotechar**, **doublequote**, **skipinitialspace**. **Returns** **R** : numpy record array Record array constructed from data in the SV file **metadata** : dictionary Metadata read and constructed during process of reading file. **See Also:** :func:`tabular.io.loadSVcols`, :func:`tabular.io.saveSV`, :func:`tabular.io.DEFAULT_TYPEINFERER` """ [columns, metadata] = loadSVcols(fname, **kwargs) if 'names' in metadata.keys(): names = metadata['names'] else: names = None if 'formats' in metadata.keys(): formats = metadata['formats'] else: formats = None if 'dtype' in metadata.keys(): dtype = metadata['dtype'] else: dtype = None if renamer is not None: print 'Trying user-given renamer ...' renamed = renamer(names) if len(renamed) == len(uniqify(renamed)): names = renamed print '''... using renamed names (original names will be in return metadata)''' else: print '... renamer failed to produce unique names, not using.' if names and len(names) != len(uniqify(names)): print 'Names are not unique, reverting to default naming scheme.' names = None return [utils.fromarrays(columns, type=np.ndarray, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder), metadata]
94ac8943ff50273162066db53040107406f27059
3,639,522
def rationalize_quotes_from_table(table, rationalizeBase=10000): """ Retrieve the data from the given table of the SQLite database It takes parameters: table (this is one of the Quote table models: Open, High, Low, or Close) It returns a tuple of lists """ first_row = table.select().limit(1).get() rationalize_bull_1x_price = rationalizeBase / first_row.bull_1x_price rationalize_bear_1x_price = rationalizeBase / first_row.bear_1x_price rationalize_bull_3x_price = rationalizeBase / first_row.bull_3x_price rationalize_bear_3x_price = rationalizeBase / first_row.bear_3x_price indices = [] dates = [] bull_1x_prices = [] bear_1x_prices = [] bull_3x_prices = [] bear_3x_prices = [] for row in table.select(): indices.append(row.id) dates.append(row.date) bull_1x_prices.append(row.bull_1x_price * rationalize_bull_1x_price) bear_1x_prices.append(row.bear_1x_price * rationalize_bear_1x_price) bull_3x_prices.append(row.bull_3x_price * rationalize_bull_3x_price) bear_3x_prices.append(row.bear_3x_price * rationalize_bear_3x_price) return indices, dates, bull_1x_prices, bear_1x_prices, bull_3x_prices, bear_3x_prices
ee1c8310c12e7e53e9ca2677dd61d7d2525603fd
3,639,523
def k(func): """定义一个装饰器函数""" def m(*args, **kw): print('call %s():' % func.__name__) return func(*args, **kw) return m
3cc958033fd66547e523882435494f27ae81b096
3,639,524
import requests def get_playlist_object(playlist_url, access_token): """ playlist_url : url of spotify playlist access_token : access token gotten from client credentials authorization return object containing playlist tracks """ playlist_id = playlist_url.split("/")[-1] playlist_endpoint = f"https://api.spotify.com/v1/playlists/{playlist_id}" get_header = { "Authorization" : "Bearer " + access_token } # API request response = requests.get(playlist_endpoint, headers=get_header) playlist_object = response.json() return playlist_object
8c7ed1a1b9574e2e0870d3091452accf5909f982
3,639,525
from typing import Tuple def guess_identifiers(fuzzy_base_name: str) -> Tuple[str, str]: """ Given a fuzzy base name, guess the corresponding (item ID, base name) identifier pair. :param fuzzy_base_name: The base name to be matched. :return: The identifier pair with the closest matching base name. """ sql = 'SELECT base_name FROM item_info' choices = _conn.execute(sql).fetchall() base_name = process.extractOne(fuzzy_base_name, choices)[0][0] sql2 = 'SELECT item_id FROM item_info WHERE base_name = ?' item_id = _conn.execute(sql2, (base_name,)).fetchone()[0] return item_id, base_name
6ac609268a92c16408eb414d7944cbd09fedfcc5
3,639,526
def make_image(center=(.1,-.4),dpi=500,X_cut_min = -.59 -xcut_offset,Y_cut_max = 1.61 + ycut_offset,X_cut_max = .12-xcut_offset,Y_cut_min = .00 +ycut_offset,bands=23 ): """make visual count it by area then have hist values for normalization wih movement data to be exported and then can be counted PARAMS ------------ center : tuple where beacon is dpi : int dots per inch - resolution - if changed can mess up pixel count X_cut,Y_cut : int points of rectagle, same as used for cutting of rears - floor of arena bands : int amount of circles fittign inthe rectangle - max is 23 Returns ------------ Histogram and appropriate bins made by the histogram Used for area estimation later on """ fig, ax1 = plt.subplots(1, 1, sharex=True,dpi=dpi,) fig.patch.set_visible(False) rectangle = patches.Rectangle((X_cut_min,Y_cut_min), (abs(X_cut_min)+abs(X_cut_max)),abs(Y_cut_min)+abs(Y_cut_max) , color="white") ax1.add_patch(rectangle) #plt.plot(center[0],center[1], "ro") color = np.linspace(0,.99,bands+1) for i in reversed(range(bands)): c=color[i] patch = patches.Circle((center[0],center[1]), radius=.075*i,color=str(c)) ax1.add_patch(patch) patch.set_clip_path(rectangle) ax1.axis("equal") ax1.axis("off") fig.savefig('norm_graph.png', dpi=dpi, transparent=True) img= Image.frombytes('RGB',fig.canvas.get_width_height(),fig.canvas.tostring_rgb()) image_array = np.asarray(img) hist, bins = np.histogram(image_array,bins=bands,range=(0,249)) #plt.show() width = 0.7 * (bins[1] - bins[0]) center = (bins[:-1] + bins[1:]) / 2 #plt.bar(center, hist, align='center', width=width) return hist,bins
a90a92318b30e5e4069dc415d5a4693d844b8c18
3,639,527
def read_answer_patterns(pattern_file_path): """load answer patterns into qid2patterns dictionary """ qid2patterns = {} last_qid = None with open(pattern_file_path) as f: for line in f : qid, pattern = line.strip().split("\t") if qid != last_qid : # start collecting patterns for a new qid if last_qid != None: # if not the first question qid2patterns[last_qid] = patterns last_qid = qid patterns = [pattern] else: # collect patterns for the current qid patterns.append(pattern) qid2patterns[last_qid] = patterns return qid2patterns
da8f018deb15088b359044f22cfa71f0b8305af7
3,639,528
def fitness(coords, solution): """ Total distance of the current solution path. """ N = len(coords) cur_fit = 0 for i in range(N): cur_fit += dist(coords[solution[i % N]], coords[solution[(i + 1) % N]]) return cur_fit
716dee705be75bbf6f64ec55b39187ab567edfa0
3,639,529
import numpy as np def f_of_sigma(sigma,A=0.186,a=1.47,b=2.57,c=1.19): """ The prefactor in the mass function parametrized as in Tinker et al 2008. The default values of the optional parameters correspond to a mean halo density of 200 times the background. The values can be found in table 2 of Tinker 2008 ApJ 679, 1218 Parameters ---------- sigma: float Standard deviation of the linear power spectrum A,a,b,c: float, optional 0.186 by default Returns ------- f: float Value of f(sigma) """ f = A*((sigma/b)**(-a)+1)*np.exp(-c/sigma/sigma) return f
89abe82df8a4384e74eb556172c9d46193b731da
3,639,530
from typing import Sequence import time def create_role( role_name: str, base_session: boto3.Session, region: str, auto_trust_caller_identity=True, allowed_services: Sequence[str] = [], allowed_aws_entities: Sequence[str] = [], external_id: str = None, ): """ Creates a role that lets a list of specified services assume the role. :return: The newly created role. """ iam = base_session.client("iam") trusted_entities = set(allowed_aws_entities) if auto_trust_caller_identity: trusted_entities.add(get_caller_identity(base_session, region)) try: role = exponential_retry( iam.create_role, ["AccessDenied", "ServiceFailureException"], RoleName=role_name, AssumeRolePolicyDocument=_get_trust_policy(allowed_services, trusted_entities, external_id), MaxSessionDuration=MAX_ASSUME_ROLE_DURATION, ) if "role_exists" in iam.waiter_names: iam.get_waiter("role_exists").wait(RoleName=role_name) time.sleep(3) else: time.sleep(15) # give some time for IAM propagation module_logger.info(f"Created role {role_name} for new services {allowed_services} and entities {trusted_entities}") except ClientError as ex: module_logger.exception("Couldn't create role %s. Exception: %s", role_name, str(ex)) raise else: return role
7fe2043235f3391af96c0767fa7e79f2ef9d8ce3
3,639,531
def get_auto_switch_state(conn): """Get the current auto switch enabled / disabled state""" packet = _request(conn, GET_AUTO_SWITCH_STATE) if not _validate_packet(packet): raise ChecksumError() return _decode_toggle(packet)
f87f838bafb03e9d9fbea6e9a6285ede56dbb09d
3,639,532
def SGD(X, y, lmd, gradient, n_epochs, M, opt = "SGD", eta0 = None, eta_type = 'schedule', t0=5, t1=50, momentum = 0., rho = 0.9, b1 = 0.9, b2 = 0.999): """Stochastic Gradient Descent Algorithm Args: - X (array): design matrix (training data) - y (array): output dataset (training data) - gradient (function): function to compute the gradient - n_epochs (int): number of epochs - M (int): size of minibatches - opt (string): "SGD", "ADAGRAD", "RMS", "ADAM" - different optimizers - eta0 (float): learning rate if 'static' or 'invscaling' - eta_type = 'static', 'schedule', 'invscaling', 'hessian' - different methods for evaluating the learning rate - t0 (float): initial paramenter to compute the learning rate in 'schedule' - t1 (float): sequential paramenter to compute the learning rate in 'schedule' - momentum, rho, b1, b2 (float): parameters for different optimizers Returns: beta/theta-values""" if opt not in optimizers: raise ValueError("Optimizer must be defined in "+str(optimizers)) if eta_type not in eta_types: raise ValueError("Learning rate type must be defined within "+str(eta_types)) theta = np.random.randn(X.shape[1]) m = int(X.shape[0]/M) v = np.zeros(X.shape[1]) # parameter for velocity (momentum), squared-gradient (adagrad, RMS), ma = np.zeros(X.shape[1]) # parameter for adam delta = 1e-1 for epoch in range(n_epochs): for i in range(m): random_index = M*np.random.randint(m) Xi = X[random_index:random_index + M] yi = y[random_index:random_index + M] gradients = gradient(Xi, yi, theta, lmd) #* X.shape[0] #2.0 * Xi.T @ ((Xi @ theta)-yi) # Evaluate the hessian metrix to test eta < max H's eigvalue H = (2.0/X.shape[0])* (X.T @ X) eigvalues, eigvects = np.linalg.eig(H) eta_opt = 1.0/np.max(eigvalues) eta = eta_opt if not eta0: eta0=eta if eta_type == 'static': eta = eta0 elif eta_type == 'schedule': eta = learning_schedule(epoch*m+i, t0=t0, t1=t1) elif eta_type == 'invscaling': power_t = 0.25 # one can change it but I dont want to overcrowd the arguments eta = eta0 / pow(epoch*m+i, power_t) elif eta_type == 'hessian': pass #assert eta > eta_opt, "Learning rate higher than the inverse of the max eigenvalue of the Hessian matrix: SGD will not converge to the minimum. Need to set another learning rate or its paramentes." if opt == "SDG": v = momentum * v - eta * gradients theta = theta + v elif opt == "ADAGRAD": v = v + np.multiply(gradients, gradients) theta = theta - np.multiply(eta / np.sqrt(v+delta), gradients) elif opt == "RMS": v = rho * v + (1. - rho) * np.multiply(gradients, gradients) theta = theta - np.multiply(eta / np.sqrt(v+delta), gradients) elif opt == "ADAM": ma = b1 * ma + (1. - b1) * gradients v = b2 * v + (1. - b2) * np.multiply(gradients, gradients) ma = ma / (1. - b1) v = v / (1. - b2) theta = theta - np.multiply(eta / np.sqrt(v+delta), ma) return theta
916edb97d98757b6f18092a3c83622fb982ddfcb
3,639,533
def get_last_oplog_entry(client): """ gets most recent oplog entry from the given pymongo.MongoClient """ oplog = client['local']['oplog.rs'] cursor = oplog.find().sort('$natural', pymongo.DESCENDING).limit(1) docs = [doc for doc in cursor] if not docs: raise ValueError("oplog has no entries!") return docs[0]
069497ffd6eb0354c00858695d065695c617b5e6
3,639,534
import math def schmidt_quasi_norm(size): """ Returns an array of the Schmidt Quasi-normalised values Array is symmetrical about the diagonal """ schmidt = square_array(size) for n in range(size): for m in range(n + 1): if n == 0: double = 1 else: double = double_factorial(2 * n - 1) schmidt[m][n] = ( math.sqrt( ((2 - kronecker_delta(0, m)) * math.factorial(n - m)) / math.factorial(n + m) ) * double / math.factorial(n - m) ) return schmidt
b71b6a7733eb2b88f107ca904cf570c8f5841263
3,639,535
def _merge_jamos(initial, medial, final=None): """Merge Jamos into Hangul syllable. Raises: AssertionError: If ``initial``, ``medial``, and ``final`` are not in ``INITIAL``, ``MEDIAL``, and ``FINAL`` respectively. """ assert initial in INITIALS assert medial in MEDIALS final = "∅" if final is None else final assert final in FINALS return chr(0xAC00 + 588 * _INITIALS_IDX[initial] + 28 * _MEDIALS_IDX[medial] + _FINALS_IDX[final])
9ff0e53d8decfc3db74d319fd366595bcac18e5c
3,639,536
def get_feature_set(eq, features): """Get features from their strings Arguments: eq {Equity} -- equity to build around features {string array} -- features and params to use Returns: list -- list of ndarray of floats """ feature_set = [] for feature in features: f = get_feature(eq, feature) for feat in f: feature_set.append(feat) return feature_set
78b4ad05b98ec776e7f07f16736739c909ce1e64
3,639,537
from typing import Optional def class_http_endpoint(methods: METHODS, rule_string: str, side_effect: Optional[HTTP_SIDE_EFFECT] = None, **kwargs): """ Creates an HTTP endpoint template. Declare this as a class variable in your webserver subclass to automatically add the endpoint to all instances. Can be used as a decorator. Args: methods: forwarded to MockHTTPEndpoint rule_string: forwarded to MockHTTPEndpoint side_effect: forwarded to MockHTTPEndpoint **kwargs: forwarded to MockHTTPEndpoint Returns: A new http endpoint template """ def ret(side_effect_method): return HTTPEndpointTemplate(methods, rule_string, side_effect_method=side_effect_method, **kwargs) if side_effect is not None: return ret(side_effect) return ret
62449e088ff66080a7165497d6f2434971818f62
3,639,538
def value(iterable, key=None, position=1): """Generic value getter. Returns containing value.""" if key is None: if hasattr(iterable, '__iter__'): return iterable[position] else: return iterable else: return iterable[key]
df49496ab8fa4108d0c3d04035ffa318a9c6a035
3,639,539
def find_aa_seqs( aa_seq: str, var_sites: str, n_flanking: int = 7 ): """Grabs the flanking AA sequence around a given location in a protein sequence string. Args: aa_seq: Protein sequence string. var_sites: Integer location of the site of interest (1-indexed, not 0-indexed). n_flanking: The number of flanking AAs to grab around the site of interest. Returns: AA sequence centered around var_site. """ sites = [max(int(v.strip())-1, 0) for v in var_sites.split(var_site_delimiter)] seqs = [] for var_site in sites: n = int(var_site) if len(aa_seq) < n: return '_'*(1+(n_flanking*2)) left_ = '_'*max((n_flanking - n), 0) right_ = '_'*max(((n+n_flanking+1) - len(aa_seq)), 0) aas = aa_seq[max((n-n_flanking), 0):min(len(aa_seq), (n+n_flanking+1))] seqs.append(left_ + aas + right_) return var_site_delimiter.join(seqs)
f6b75215347eb829d2b023138abeff3a44ab1d36
3,639,540
import os def main(fn, tmp=False): """sorting the lines of the file and write the result to a new file""" if tmp: fnew = os.path.join(TMP, os.path.basename(fn)) else: fnew = '_sorted'.join(os.path.splitext(fn)) with open(fn) as _in, open(fnew, "w") as _out: regels = _in.readlines() regels.sort() for x in regels: _out.write(x) return fnew
b1cbffe3d65ba4a7ba8d94d9b7c2f76b71aca226
3,639,541
def compact_interval_string(value_list): """Compact a list of integers into a comma-separated string of intervals. Args: value_list: A list of sortable integers such as a list of numbers Returns: A compact string representation, such as "1-5,8,12-15" """ if not value_list: return '' value_list.sort() # Start by simply building up a list of separate contiguous intervals interval_list = [] curr = [] for val in value_list: if curr and (val > curr[-1] + 1): interval_list.append((curr[0], curr[-1])) curr = [val] else: curr.append(val) if curr: interval_list.append((curr[0], curr[-1])) # For each interval collapse it down to "first, last" or just "first" if # if first == last. return ','.join([ '{}-{}'.format(pair[0], pair[1]) if pair[0] != pair[1] else str(pair[0]) for pair in interval_list ])
b479b45dc68a0bce9628a19be17185437f3edca6
3,639,542
def _mk_asm() -> str: """ Generate assembly to program all allocated translation tables. """ string = "" for n,t in enumerate(table.Table._allocated): string += _mk_table(n, t) keys = sorted(list(t.entries.keys())) while keys: idx = keys[0] entry = t.entries[idx] if type(entry) is Region: string += _mk_blocks(n, t, idx, entry) for k in range(idx, idx+entry.num_contig): keys.remove(k) else: string += _mk_next_level_table(n, idx, entry) keys.remove(idx) return string
a1e6725b20877c10a400d8f13890e914adf8024b
3,639,543
def getNuitkaModules(): """ Create a list of all modules known to Nuitka. Notes: This will be executed at most once: on the first time when a module is encountered and cannot be found in the recorded calls (JSON array). Returns: List of all modules. """ mlist = [] for m in getRootModules(): if m not in mlist: mlist.append(m) for m in done_modules: if m not in mlist: mlist.append(m) for m in uncompiled_modules: if m not in mlist: mlist.append(m) for m in active_modules: if m not in mlist: mlist.append(m) return mlist
7596c18f2b38883f1f8c3201597b57fc6096752b
3,639,544
def run_model(network): """ Runs a model with pre-defined values. """ model = network(vocab_size+1, EMBEDDING_SIZE) model.cuda() EPOCHS = 20 train_model(model, train, epochs=EPOCHS, echo=False) return model
a10980b6f8dd5ff9e048b07ee64215187acb8467
3,639,545
def is_prime(num): """ Assumes num > 3 """ if num % 2 == 0: return False for p in range(3, int(num**0.5)+1, 2): # Jumps of 2 to skip odd numbers if num % p == 0: return False return True
e898026d0362967400cfee4e70a74ac02a64b6f1
3,639,546
def verify_password(email_or_token, password): """ 电子邮件和密码是由User模型中现有的方法验证,如果登录密令正确,这个验证回调函数就返回True; 验证回调函数把通过认证的用户保存在Flask的全局对象g中,如此一来,视图函数便能进行访问。 注意:匿名登录时,这个函数返回True并把Flask-login提供的AnonymousUser类实例赋值给g.current_user :param email: :param password: :return: """ if email_or_token == '': # API蓝本支持匿名用户访问,此时客户端发送的电子邮件字段必须为空 # 也即如果该字段为空,那么假定是匿名用户 g.current_user = AnoymousUser() return True if password == '': # 如果密码为空,那就假定email_or_token参数提供的是令牌,按照令牌的方式进行认证。 g.current_user = User.verify_auth_token(email_or_token) g.token_used = True return g.current_user is not None # 如果两个参数都不为空,假定使用常规的邮件地址和密码进行认证。 user = User.query.filter_by(email=email_or_token).first() if not user: return False g.current_user = user g.token_used = False return user.verify_password(password)
09890ad0ae33f7e700148b56098df6e3ecc69d39
3,639,547
def is_valid_orcid_id(orcid_id: str): """adapted from stdnum.iso7064.mod_11_2.checksum()""" check = 0 for n in orcid_id: check = (2 * check + int(10 if n == "X" else n)) % 11 return check == 1
5866e4465a24f46aa4c7015902eac53684da7b04
3,639,548
def extract_entity(entities, entity_type=_PERSON): """ Extract name from the entity specified in entity_type. We use the JSON format to extract the entity information: - :param entities: :param entity_type: :return: """ if not entity_type: raise ValueError('Invalid entity type') if _ENTITIES not in entities: raise ValueError('No entities format') try: extracted_entities = [] log.info('extract_entity() Searching for %s in %r', entity_type, entities) for entity in entities[_ENTITIES]: # Extract entity (PERSON, ORGANIZATION) if _TYPE in entity: if entity[_TYPE] == entity_type: entity_name = entity[_NAME] log.info('extract_entity() Extracting %s from entity %s', entity_type, entity_name) if entity_name[0].isupper(): if entity[_METADATA]: log.info('extract_entity() | Insert %s: %s | %s ', entity_type, entity_name, entity[_METADATA]) extracted_entities.append(entity[_NAME]) else: # Filter entity name by discarding dictionary of # words. if not set(extract_filter()) & set( entity_name.lower().split()): log.info('extract_entity() | Insert %s %s ', entity_type, entity_name) extracted_entities.append(entity[_NAME]) return extracted_entities except KeyError as e: log.exception(e)
c357c0cc5d4365ea59f1b852b5e800fe513dfe3c
3,639,549
import os def ReadBDAFile(bdafile): """ Read BDA file :param str bdafile: file name :return: natm - number of atoms :return: molnam - name of molecule :return: frgdat - [bdadic,frgnamlst,frgatmdic,frgattribdic] bdalst:[[bda#,baa#,bda atmnam,baa atmnam, bda resdat,baa resdat],...] frgnamlst: [frgnam1,frgnam2,...] frgatmdic:{frgnam:atmlst,...} frgattribdic:{frgnam:[charge,layer,active],..} """ def ErrorMessage(line,s): mess='Error at line='+str(line)+'\n' mess=mess+'s='+s lib.MessageBoxOK(mess,'rwfile.ReadBDAFile') bdalst=[]; natm=-1; molnam=''; resnam=''; nbda=-1 frgnamlst=[]; frgatmdic={}; frgattribdic={} if not os.path.exists(bdafile): mess='file not found. file='+bdafile lib.MessageBoxOK(mess,'rwfile.ReadBDAFile') return molnam,resnam,natm,[] head,tail=os.path.split(bdafile) bdanam,ext=os.path.splitext(tail) name=lib.GetResDatFromFileName(bdanam) if name is not None: bdanam=name f=open(bdafile,'r') line=0 for s in f.readlines(): line += 1; ss=s s=s.strip() if len(s) <= 0: continue nc=s.find('#') if nc >= 0: s=s[:nc].strip() if len(s) <= 0: continue if s.startswith('MOLNAM',0,6): key,molnam=lib.GetKeyAndValue(s) continue elif s.startswith('RESNAM',0,6): key,resnam=lib.GetKeyAndValue(s) if resnam[-1] == ':': resnam=resnam+' ' continue elif s.startswith('NATM',0,4): key,natm=lib.GetKeyAndValue(s) continue elif s.startswith('NBDA',0,4): key,nbda=lib.GetKeyAndValue(s) continue elif s.startswith('BDABAA',0,6): key,s0=lib.GetKeyAndValue(s,conv=False) s1=s0; s2=''; nc=s0.find('"') if nc >= 0: s1=s0[:nc]; s2=s0[nc:] items=lib.SplitStringAtSpacesOrCommas(s1) try: bda=int(items[1])-1; baa=int(items[2])-1 except: ErrorMessage(line,ss) return molnam,resnam,natm,[] #if not bdadic.has_key(bdanam): bdadic[bdanam]=[] bdaatm=None; baaatm=None; bdares=None; baares=None items=lib.GetStringBetweenQuotation(s2) if len(items) >= 2: bdaatm=items[0]; baaatm=items[1] if len(items) >= 4: bdares=items[2]; baares=items[3] bdalst.append([bda,baa,bdaatm,baaatm,bdares,baares]) elif s.startswith('FRAGMENT',0,8): key,s1=lib.GetKeyAndValue(s,conv=False) items=s1.split('[',1) try: dat1=items[0]; dat2=items[1] dat2=dat2.strip(); dat2=dat2[:-1] except: ErrorMessage(line,ss) return molnam,resnam,natm,[] items=lib.SplitStringAtSpacesOrCommas(dat1) frgnam=items[1]; charge=int(items[3]); layer=int(items[4]) active=int(items[5]); spin=int(items[6]) frgnamlst.append(frgnam) frgattribdic[frgnam]=[charge,layer,active,spin] try: atmlst=lib.StringToInteger(dat2) except: ErrorMessage(lin,ss) return molnam,resnam,natm,[] atmlst=[x-1 for x in atmlst] #const.CONSOLEMESSAGE('atmlst='+str(atmlst)) frgatmdic[frgnam]=atmlst else: pass f.close() frgdat=[bdalst,frgnamlst,frgatmdic,frgattribdic] return molnam,resnam,natm,frgdat
8c3e439eb6be8b4de5e89c66a5723a950bda20bb
3,639,550
def reconstruct_from_patches(img_arr, org_img_size, stride=None, size=None): """[summary] Args: img_arr (numpy.ndarray): [description] org_img_size (tuple): [description] stride ([type], optional): [description]. Defaults to None. size ([type], optional): [description]. Defaults to None. Raises: ValueError: [description] Returns: numpy.ndarray: [description] """ #print('Img_Arr : ',img_arr.shape) #print('Orig_Img_Size : ',org_img_size) # check parameters if type(org_img_size) is not tuple: raise ValueError("org_image_size must be a tuple") if img_arr.ndim == 3: img_arr = np.expand_dims(img_arr, axis=0) if size is None: size = img_arr.shape[1] if stride is None: stride = size nm_layers = img_arr.shape[3] i_max = org_img_size[0] // stride if i_max*stride < org_img_size[0] : i_max = i_max + 1 j_max = org_img_size[1] // stride if j_max*stride < org_img_size[1] : j_max = j_max + 1 #total_nm_images = img_arr.shape[0] // (i_max ** 2) total_nm_images = img_arr.shape[0] // (i_max * j_max) nm_images = img_arr.shape[0] images_list = [] kk = 0 for img_count in range(total_nm_images): img_r = np.zeros( (i_max*stride, j_max*stride, nm_layers), dtype=img_arr[0].dtype ) for i in range(i_max): for j in range(j_max): for layer in range(nm_layers): img_r[ i * stride : i * stride + size, j * stride : j * stride + size, layer, ] = img_arr[kk, :, :, layer] kk += 1 img_bg = np.zeros( (org_img_size[0], org_img_size[1], nm_layers), dtype=img_arr[0].dtype ) img_bg = img_r[0:org_img_size[0], 0:org_img_size[1], 0:] images_list.append(img_bg) return np.stack(images_list)
1ad80afd7d4f09de1ece4e8ba74844477d2d99be
3,639,551
import requests def process_highlight(entry, img_width): """ Function processing highlights extracted from DOM tree. Downloads image based on its url and scales it. Prettifies text by inserting newlines and shortening author lists. Parameters ---------- entry : dict of str Dictionary created by extract_highlights function. img_width : int Width of image to resize to. Returns ------- dict Highlight dict with downloaded and resized image and prettified text """ # 'https:' is missing in page src links if not entry['img'].startswith("https"): entry['img'] = "https:" + entry['img'] # fetch the image and resize it to common width entry['img'] = resize_img_to_x(Image.open(requests.get(entry['img'], stream=True).raw), img_width) entry['title'] = newline_join(entry['title'], max_letters['title']) entry['authors'] = shorten_authors(entry['authors']) entry['comment'] = newline_join(entry['comment'], max_letters['comment']) return(entry)
6a09cb87971725fa3df9eb8e6e98cee315701af4
3,639,552
import urllib import http import socket import random import time def urlopen_with_tries(url, initial_wait=5, rand_wait_range=(1, 60), max_num_tries=10, timeout=60, read=False): """ Open a URL via urllib with repeated tries. Often calling urllib.request.urlopen() fails with HTTPError, especially if there are multiple processes calling it. The reason is that NCBI has a cap on the number of requests per unit time, and the error raised is 'HTTP Error 429: Too Many Requests'. Args: url: url to open initial_wait: number of seconds to wait in between the first two requests; the wait for each subsequent request doubles in time rand_wait_range: tuple (a, b); in addition to waiting an amount of time that grows exponentially (starting with initial_wait), also wait a random number of seconds between a and b (inclusive). If multiple processes are started simultaneously, this helps to avoid them waiting on the same cycle max_num_tries: maximum number of requests to attempt to make timeout: timeout in sec before retrying read: also try to read the opened URL, and return the results; if this raises an HTTPException, the call will be retried Returns: result of urllib.request.urlopen(); unless read is True, in which case it is the data returned by reading the url """ num_tries = 0 while num_tries < max_num_tries: try: num_tries += 1 logger.debug(("Making request to open url: %s"), url) r = urllib.request.urlopen(url, timeout=timeout) if read: raw_data = r.read() return raw_data else: return r except (urllib.error.HTTPError, http.client.HTTPException, urllib.error.URLError, socket.timeout): if num_tries == max_num_tries: # This was the last allowed try logger.warning(("Encountered HTTPError or HTTPException or " "URLError or timeout %d times (the maximum allowed) when " "opening url: %s"), num_tries, url) raise else: # Pause for a bit and retry wait = initial_wait * 2**(num_tries - 1) rand_wait = random.randint(*rand_wait_range) total_wait = wait + rand_wait logger.info(("Encountered HTTPError or HTTPException or " "URLError or timeout when opening url; sleeping for %d " "seconds, and then trying again"), total_wait) time.sleep(total_wait) except: logger.warning(("Encountered unexpected error while opening " "url: %s"), url) raise
70ea144ac136a9137c8196f1de4228d6791c477f
3,639,553
from geometry_msgs.msg import Pose, Point, Quaternion def SE3ToROSPose(oMg): """Converts SE3 matrix to ROS geometry_msgs/Pose format""" xyz_quat = pin.SE3ToXYZQUATtuple(oMg) return Pose(position=Point(*xyz_quat[:3]), orientation=Quaternion(*xyz_quat[3:]))
ebf806bd52a4b60252a2001fe2de8122bd7cd201
3,639,554
def calib_graph_to_infer_graph(calibration_graph_def, is_dynamic_op=False): """Convert an existing calibration graph to inference graph. Args: calibration_graph_def: the calibration GraphDef object with calibration data is_dynamic_op: whether to create dynamic static engines from calibration Returns: New GraphDef with TRTEngineOps placed in graph replacing calibration nodes. Raises: RuntimeError: if the returned status message is malformed. """ def py2string(inp): return inp def py3string(inp): return inp.decode("utf-8") if _six.PY2: to_string = py2string else: to_string = py3string is_calib_graph = False for n in calibration_graph_def.node: if n.op == "TRTEngineOp": is_calib_graph = is_calib_graph or not n.attr["calibration_data"].s if not is_calib_graph: tf_logging.error( "Not a calib graph. Doesn't seem to contain any calibration nodes.") return None graph_str = calibration_graph_def.SerializeToString() out = calib_convert(graph_str, is_dynamic_op) status = to_string(out[0]) output_graph_def_string = out[1] del graph_str # Save some memory if len(status) < 2: raise _impl.UnknownError(None, None, status) if status[:2] != "OK": msg = status.split(";") if len(msg) == 1: raise RuntimeError("Status message is malformed {}".format(status)) # pylint: disable=protected-access raise _impl._make_specific_exception(None, None, ";".join(msg[1:]), int(msg[0])) # pylint: enable=protected-access output_graph_def = graph_pb2.GraphDef() output_graph_def.ParseFromString(output_graph_def_string) del output_graph_def_string # Save some memory return output_graph_def
a29b556775aff27eed8dad404c3684e452e88c86
3,639,555
def calc_minimum_angular_variance_1d(var_r, phi_c, var_q): """Calculate minimum possible angular variance of a beam achievable with a correction lens. Args: var_r (scalar): real space variance. phi_c (scalar): real-space curvature - see above. var_q (scalar): angular variance of the beam. Returns: var_q_min (scalar): minimum possible angular variance of the beam. """ var_q_min = var_q - 4*phi_c**2/var_r return var_q_min
c5e2144f44b532acbf8eb9dfb83c991af3756abf
3,639,556
def grid_subsampling(points, features=None, labels=None, ins_labels=None, sampleDl=0.1, verbose=0): """ CPP wrapper for a grid subsampling (method = barycenter for points and features) :param points: (N, 3) matrix of input points :param features: optional (N, d) matrix of features (floating number) :param labels: optional (N,) matrix of integer labels :param ins_labels: optional (N,) matrix of integer instance labels :param sampleDl: parameter defining the size of grid voxels :param verbose: 1 to display :return: subsampled points, with features and/or labels depending of the input """ if (features is None) and (labels is None): return cpp_subsampling.subsample(points, sampleDl=sampleDl, verbose=verbose) elif (labels is None): return cpp_subsampling.subsample(points, features=features, sampleDl=sampleDl, verbose=verbose) elif (features is None): return cpp_subsampling.subsample(points, classes=labels, ins_classes=ins_labels, sampleDl=sampleDl, verbose=verbose) else: return cpp_subsampling.subsample(points, features=features, classes=labels, ins_classes=ins_labels, sampleDl=sampleDl, verbose=verbose)
3aebd24307307344c0ff804b3ee189ff4da98f0d
3,639,557
def delete_all_collections_from_collection(collection, api_key=None): """ Delete *ALL* Collections from a Collection. :param collection: The Collection to remove *all* Collections from. :type collection: str :param api_key: The API key to authorize request against. :type api_key: str :return :rtype """ assertions.datatype_str('collection', collection) url = '/collections/{}/collections'.format(collection) return utils.request('DELETE', url, api_key=api_key, accept=True)
1a039c850f1bbf82f0c6683081874e1971c40255
3,639,558
import torch def generate_kbit_random_tensor(size, bitlength=None, **kwargs): """Helper function to generate a random k-bit number""" if bitlength is None: bitlength = torch.iinfo(torch.long).bits if bitlength == 64: return generate_random_ring_element(size, **kwargs) rand_tensor = torch.randint(0, 2 ** bitlength, size, dtype=torch.long, **kwargs) if rand_tensor.is_cuda: return CUDALongTensor(rand_tensor) return rand_tensor
c87fc7f353b15a4dd4b6d980cf2e365f6ac6a4bc
3,639,559
import os import glob def AnimalsWithAttributes2(path: str) -> Dataset: """`Animals with attributes 2 <https://cvml.ist.ac.at/AwA2/>`_ dataset. The file structure should be like:: <path> classes.txt predicates.txt predicate-matrix-binary.txt JPEGImages/ <classname>/ <imagename>.jpg ... ... Arguments: path: The root directory of the dataset. Returns: Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance. """ root_path = os.path.abspath(os.path.expanduser(path)) dataset = Dataset(DATASET_NAME) dataset.load_catalog(os.path.join(os.path.dirname(__file__), "catalog.json")) segment = dataset.create_segment() with open(os.path.join(root_path, "classes.txt"), encoding="utf-8") as fp: class_names = [line[:-1].split("\t", 1)[-1] for line in fp] with open(os.path.join(root_path, "predicates.txt"), encoding="utf-8") as fp: attribute_keys = [line[:-1].split("\t", 1)[-1] for line in fp] with open(os.path.join(root_path, "predicate-matrix-binary.txt"), encoding="utf-8") as fp: attribute_values = [line[:-1].split(" ") for line in fp] attribute_mapping = {} for class_name, values in zip(class_names, attribute_values): attribute_mapping[class_name] = Classification( category=class_name, attributes=dict(zip(attribute_keys, (bool(int(value)) for value in values))), ) for class_name in sorted(os.listdir(os.path.join(root_path, "JPEGImages"))): image_paths = glob(os.path.join(root_path, "JPEGImages", class_name, "*.jpg")) label = attribute_mapping[class_name] for image_path in image_paths: data = Data(image_path) data.label.classification = label segment.append(data) return dataset
4973b0e56d00355bf92bd309b3505c554bd76ba9
3,639,560
import pickle def cache_by_sha(func): """ only downloads fresh file, if we don't have one or we do and the sha has changed """ @wraps(func) def cached_func(*args, **kwargs): cache = {} list_item = args[1] dest_dir = kwargs.get('dest_dir') path_to_file = list_item.get('path', '') file_out = '{}{}'.format(dest_dir, path_to_file) p_file_out = '{}{}.pickle'.format(dest_dir, path_to_file) makedirs(dirname(file_out), exist_ok=True) if exists(p_file_out) and exists(file_out): with open(p_file_out, 'rb') as pf: cache = pickle.load(pf) cache_sha = cache.get('sha', False) input_sha = list_item.get('sha', False) if cache_sha and input_sha and cache_sha == input_sha: # do nothing as we have the up to date file already return None else: with open(p_file_out, mode='wb+') as pf: pickle.dump(list_item, pf, pickle.HIGHEST_PROTOCOL) return func(*args, **kwargs) return cached_func
d95010ba433c9b9f27dcb2f3fe05d3b609cee3fb
3,639,561
def rewrite_by_assertion(tm): """ Rewrite the tm by assertions. Currently we only rewrite the absolute boolean variables. """ global atoms pt = refl(tm) # boolvars = [v for v in tm.get_vars()] + [v for v in tm.get_consts()] return pt.on_rhs(*[top_conv(replace_conv(v)) for _, v in atoms.items()]).on_rhs(*[top_conv(replace_conv(v)) for _, v in atoms.items()])
51eb546b9b0414091152d018aac6a6eaf2149d39
3,639,562
import json def hash_cp_stat(fdpath, follow_symlinks=False, hash_function=hash): """ Returns hash of file stat that can be used for shallow comparision default python hash function is used which returns a integer. This can be used to quickly compare files, for comparing directories see hash_walk(). """ stat = cp_stat(fdpath, follow_symlinks) if stat: return hash_function(json.dumps(stat, sort_keys=True).encode("utf-8"))
c16e2f00fb278d69e9307b8d528a7807d7c404d6
3,639,563
def multi_label_head(n_classes, label_name=None, weight_column_name=None, enable_centered_bias=False, head_name=None, thresholds=None, metric_class_ids=None, loss_fn=None): """Creates a Head for multi label classification. Multi-label classification handles the case where each example may have zero or more associated labels, from a discrete set. This is distinct from `multi_class_head` which has exactly one label from a discrete set. This head by default uses sigmoid cross entropy loss, which expects as input a multi-hot tensor of shape `(batch_size, num_classes)`. Args: n_classes: Integer, number of classes, must be >= 2 label_name: String, name of the key in label dict. Can be null if label is a tensor (single headed models). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. head_name: name of the head. If provided, predictions, summary and metrics keys will be suffixed by `"/" + head_name` and the default variable scope will be `head_name`. thresholds: thresholds for eval metrics, defaults to [.5] metric_class_ids: List of class IDs for which we should report per-class metrics. Must all be in the range `[0, n_classes)`. loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as parameter and returns a weighted scalar loss. `weights` should be optional. See `tf.losses` Returns: An instance of `Head` for multi label classification. Raises: ValueError: If n_classes is < 2 ValueError: If loss_fn does not have expected signature. """ if n_classes < 2: raise ValueError("n_classes must be > 1 for classification.") if loss_fn: _verify_loss_fn_args(loss_fn) return _MultiLabelHead( n_classes=n_classes, label_name=label_name, weight_column_name=weight_column_name, enable_centered_bias=enable_centered_bias, head_name=head_name, thresholds=thresholds, metric_class_ids=metric_class_ids, loss_fn=_wrap_custom_loss_fn(loss_fn) if loss_fn else None)
b1820584de6c4f9f987100c313793f41de0b73fc
3,639,564
def render_macros(line, macros): """Given a line of non-preprocessed code, and a list of macros, process macro expansions until done. NOTE: Ignore comments""" if line.startswith(";"): return line else: while [macro_name for macro_name in macros.keys() if macro_name in line]: for macro_name, macro_info in macros.items(): macro_body, params = macro_info if params and macro_name in line: line = render_parameterised_macro(line, macro_name, macro_body, params) else: line = line.replace(macro_name, macro_body) return line
b78892d5708e9ca2d7e596d4f92ca7e8a6d17a64
3,639,565
def init_LR_XR(args, feat_dim = 1, class_dim = 10, debug = False): """ To build lr_xr :param args: intput arguments :param feat_dim: dimension of feature :param class_dim: dimension of class label :param debug: debug option (True: ON) :return: init lr_xr using tensorflow build """ tf.reset_default_graph() model = LR_XR(args, featdim=feat_dim, classdim=class_dim) return model
0a3ef2de5e6d6eb6deeb7324a4d52fc509005a66
3,639,566
def calc_qpos(x, bit = 16): """ 引数の数値を表現できる最大のQ位置を返す。 :param x: float :return: int """ for q in range(bit): maxv = (2 ** (q - 1)) - 1 if x > maxv: continue return bit - q return bit
b85b458989425bc5698547cae93d8729bd452e76
3,639,567
import json import os def yamlcheck(python): """Return True if PyYAML has libyaml support, False if it does not and None if it was not found.""" result = json.loads(raw_command([python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'yamlcheck.py')], capture=True)[0]) if not result['yaml']: return None return result['cloader']
78bb3a86fae725b9b4a7d0c8ecc7c5426c0f472e
3,639,568
def person(request): """ Display information on the specified borrower (person) """ title = "Find a person" if 'person_id' in request.GET: person_id = request.GET['person_id'] try: person = Person.objects.get(id_number=person_id) title = unicode(person) checked_out_items = person.item_set.all() transaction_history = person.transaction_set.all() except Person.DoesNotExist: error_message = "No person with id number %s" % person_id else: message = "Enter or scan the person's ID number" people = Person.objects.enrolled() # For clickable list of names return render_to_response("person.html", locals())
53550d5ebefc6fb36601049f86c227b073d715d5
3,639,569
def _follow_word_from_node(node, word): """Follows the link with given word label from given node. If there is a link from ``node`` with the label ``word``, returns the end node and the log probabilities and transition IDs of the link. If there are null links in between, returns the sum of the log probabilities and the concatenation of the transition IDs. :type node: Lattice.Node :param node: node where to start searching :type word: str :param word: word to search for :rtype: tuple of (Lattice.Node, float, float, str) :returns: the end node of the link with the word label (or ``None`` if the word is not found), and the total acoustic log probability, LM log probability, and transition IDs of the path to the word """ if word not in node.word_to_link: return (None, None, None, None) link = node.word_to_link[word] if link.word is not None: return (link.end_node, link.ac_logprob if link.ac_logprob is not None else 0.0, link.lm_logprob if link.lm_logprob is not None else 0.0, link.transitions if link.transitions is not None else "") end_node, ac_logprob, lm_logprob, transitions = \ _follow_word_from_node(link.end_node, word) if end_node is None: return (None, None, None, None) else: if link.ac_logprob is not None: ac_logprob += link.ac_logprob if link.lm_logprob is not None: lm_logprob += link.lm_logprob if link.transitions is not None: transitions += link.transitions return (end_node, ac_logprob, lm_logprob, transitions)
a21a20ee4ad2d2e90420e30572d41647b3938f4b
3,639,570
def normalize_code(code): """Normalize object codes to avoid duplicates.""" return slugify(code, allow_unicode=False).upper() if code else None
27f3b079c4fb5cc9d87e310282838f77c4aed981
3,639,571
def get_weights_for_all(misfit_windows, stations, snr_threshold, cc_threshold, deltat_threshold, calculate_basic, print_info=True): """ get_weights_for_all: calculate weights. """ weights_for_all = {} # * firstly we update the weight of snr,cc,deltat for net_sta in misfit_windows: weights_for_all[net_sta] = {} for category in misfit_windows[net_sta]: weights_for_all[net_sta][category] = [] for each_misfit_window in misfit_windows[net_sta][category].windows: wsnr = cal_snr_weight(each_misfit_window, snr_threshold[0], snr_threshold[1]) wcc = cal_cc_weight(each_misfit_window, cc_threshold[0], cc_threshold[1]) wdeltat = cal_deltat_weight(each_misfit_window, deltat_threshold[0], deltat_threshold[1]) weights_for_all[net_sta][category].append( Weight(wsnr, wcc, wdeltat, None, None)) if(not calculate_basic): # * get the station list for the geographical weighting (remove all 0 cases) used_geographical_net_sta_list = [] for net_sta in weights_for_all: status = False for category in weights_for_all[net_sta]: for each_weight in weights_for_all[net_sta][category]: wsnr_cc_deltat = each_weight.snr * each_weight.cc * each_weight.deltat if (wsnr_cc_deltat > 0): status = True if (status): used_geographical_net_sta_list.append(net_sta) # build stations_mapper stations_mapper = get_stations_mapper(stations) # get geographical weighting and update geographical_weight_dict = cal_geographical_weight( stations_mapper, used_geographical_net_sta_list, list(weights_for_all.keys())) for net_sta in weights_for_all: for category in weights_for_all[net_sta]: for index, each_weight in enumerate(weights_for_all[net_sta][category]): weights_for_all[net_sta][category][index] = each_weight._replace( geographical=geographical_weight_dict[net_sta]) # * get the number of items for each category # firstly we get all the category names rep_net_sta = list(weights_for_all.keys())[0] all_categories = list(weights_for_all[rep_net_sta].keys()) # here we should weight based on number of windows but not the number of usable stations. number_each_category = {} for each_category in all_categories: number_each_category[each_category] = 0 for net_sta in weights_for_all: for each_weight in weights_for_all[net_sta][each_category]: # if this window is usable or not wsnr_cc_deltat = each_weight.snr * each_weight.cc * each_weight.deltat if (wsnr_cc_deltat > 0): number_each_category[each_category] += 1 # get category weighting and update # here we should weight based on number of windows but not the number of usable stations. # * collect all events information number_each_category_all_events = mpi_collect_category_number( number_each_category, print_info=print_info) weight_each_category = {} for each_category in number_each_category_all_events: weight_each_category[each_category] = cal_category_weight( number_each_category_all_events[each_category]) for net_sta in weights_for_all: for category in weights_for_all[net_sta]: # * we will not use the category that not existing in this event for index, each_weight in enumerate(weights_for_all[net_sta][category]): weights_for_all[net_sta][category][index] = each_weight._replace( category=weight_each_category[category]) return weights_for_all
0f64a968d00391ab18e2e91f2453b1ecc6a2a426
3,639,572
def tree_feature_importance(tree_model, X_train): """ Takes in a tree model and a df of training data and prints out a ranking of the most important features and a bar graph of the values Parameters ---------- tree_model: the trained model instance. Must have feature_importances_ and estimators_ attributes X_train: DataFrame that the model was training on Returns ------- This function currently does not return any values, but that may change """ importances = tree_model.feature_importances_ std = np.std([tree.feature_importances_ for tree in tree_model.estimators_], axis=0) indices = np.argsort(importances)[::-1] features = X_train.columns.to_list() # Print the feature ranking print("Feature ranking:") print() ordered_features = [] for f in range(X_train.shape[1]): #feature_name = features[indices[f]] print(f'{f + 1}. {features[indices[f]]}, {importances[indices[f]]}') ordered_features.append(features[indices[f]]) print() # Plot the impurity-based feature importances of the forest fig = plt.figure() plt.title("Feature importances") plt.bar(range(X_train.shape[1]), importances[indices], color="r", yerr=std[indices], align="center") plt.xticks(range(X_train.shape[1]), ordered_features, rotation=90) plt.xlim([-1, X_train.shape[1]]) plt.show() return fig
4c347a7bad8d541c3166942f51efa6f18882bde5
3,639,573
def chhop_microseconds(delta: timedelta) -> timedelta: """ chop microseconds from timedelta object. :param delta: :return: """ return delta - timedelta(microseconds=delta.microseconds)
fac46d727540f607164029d23324e678a276b296
3,639,574
def __renumber(dictionary) : """Renumber the values of the dictionary from 0 to n """ count = 0 ret = dictionary.copy() new_values = dict([]) for key in dictionary.keys() : value = dictionary[key] new_value = new_values.get(value, -1) if new_value == -1 : new_values[value] = count new_value = count count = count + 1 ret[key] = new_value return ret
a4611f04360b2c03ac17f22e349371f58f65ed9b
3,639,575
def get_user_list(): """ return user list if the given authenticated user has admin permission :return: """ if requires_perm() is True: return jsonify({'user_list': USER_LIST, 'successful': True}), 200 return jsonify({'message': 'You are not ' 'permitted to access this resource', 'successful': False}), 403
38598ed7d54dd4d93a8b22c6344f22736bc3b805
3,639,576
def rm_words(user_input, stop_words): """Sanitize using intersection and list.remove()""" # Downsides: # - Looping over list while removing from it? # http://stackoverflow.com/questions/1207406/remove-items-from-a-list-while-iterating-in-python stop_words = set(stop_words) for sw in stop_words.intersection(user_input): while sw in user_input: user_input.remove(sw) return user_input
aead9c1cd5586bb20611ea8fbf57aa66aa3f5ede
3,639,577
import struct def getValueForCoordinate(inputFile, lon, lat, noDataAsNone): """ Reads the pixel value of a GeoTIFF for a geographic coordinate :param inputFile: full path to input GeoTIFF file :type inputFile: str :param lon: longitude :type lon: float :param lat: latitude :type lat: float :param noDataAsNone: switch to decide wether to return NODATA as None or the value stored in the GeoTIFF. :type noDataAsNone: bool :returns: pixel value of coordinate :rtype: float """ inputRaster = gdal.Open(inputFile) geotransform = inputRaster.GetGeoTransform() rb = inputRaster.GetRasterBand(1) noDataVal = rb.GetNoDataValue() # this converts from map coordinates to raster coordinates # this will only work for CRS without rotation! If this is needed, we have to do some matrix # multiplication magic here ;-) px = int((lat - geotransform[0]) / geotransform[1]) # (pos - origin) / pixelsize py = int((lon - geotransform[3]) / geotransform[5]) structval = rb.ReadRaster(px, py, 1, 1, buf_type=gdal.GDT_Float64) val = struct.unpack('d', structval) # this unpacks a C data structure into a Python value. if noDataAsNone and val[0] == noDataVal: return None else: return val[0]
4bfe9bf3de3dc277a84dad9e1fa523b2213b9b6d
3,639,578
def convert_xrandr_to_index(xrandr_val: float): """ :param xrandr_val: usually comes from the config value directly, as a string (it's just the nature of directly retrieving information from a .ini file) :return: an index representation of the current brightness level, useful for switch functions (where we switch based on indexes and not string values) Example: 0.2 is converted to 1 """ return int(xrandr_val * 10 - 1)
eed5f7a6c79f7dcb29c627521d31dc59e5cd430b
3,639,579
def get_message(name, value): """Provides the message for a standard Python exception""" if hasattr(value, "msg"): return f"{name}: {value.msg}\n" return f"{name}: {value}\n"
7755c63cc9a16e70ad9b0196d662ef603d82b5f6
3,639,580
import math def calculate_page_info(offset, total_students): """ Takes care of sanitizing the offset of current page also calculates offsets for next and previous page and information like total number of pages and current page number. :param offset: offset for database query :return: tuple consist of page number, query offset for next and previous pages and valid offset """ # validate offset. if not (isinstance(offset, int) or offset.isdigit()) or int(offset) < 0 or int(offset) >= total_students: offset = 0 else: offset = int(offset) # calculate offsets for next and previous pages. next_offset = offset + MAX_STUDENTS_PER_PAGE_GRADE_BOOK previous_offset = offset - MAX_STUDENTS_PER_PAGE_GRADE_BOOK # calculate current page number. page_num = ((offset / MAX_STUDENTS_PER_PAGE_GRADE_BOOK) + 1) # calculate total number of pages. total_pages = int(math.ceil(float(total_students) / MAX_STUDENTS_PER_PAGE_GRADE_BOOK)) or 1 if previous_offset < 0 or offset == 0: # We are at first page, so there's no previous page. previous_offset = None if next_offset >= total_students: # We've reached the last page, so there's no next page. next_offset = None return { "previous_offset": previous_offset, "next_offset": next_offset, "page_num": page_num, "offset": offset, "total_pages": total_pages }
e9af8bd4f511f42f30f60685d68fb043a54668de
3,639,581
import os def find_files(topdirs, py = False): """Lists all python files under any topdir from the topdirs lists. Returns an appropriate list for data_files, with source and destination directories the same""" ret = [] for topdir in topdirs: for r, _ds, fs in os.walk(topdir): ret.append((r, [ os.path.join(r, f) for f in fs if (f.endswith('.py') or not py)])) return ret
b273d067bb6237a8c7bb9950aa7c764854f1124b
3,639,582
import argparse def _create_argument_parser(): """Creates the command line arg parser.""" parser = argparse.ArgumentParser(description='create a zip file', fromfile_prefix_chars='@') parser.add_argument('-o', '--output', type=str, help='The output zip file path.') parser.add_argument( '-d', '--directory', type=str, default='/', help='An absolute path to use as a prefix for all files in the zip.') parser.add_argument( '-t', '--timestamp', type=int, default=ZIP_EPOCH, help='The unix time to use for files added into the zip. values prior to' ' Jan 1, 1980 are ignored.') parser.add_argument('--stamp_from', default='', help='File to find BUILD_STAMP in') parser.add_argument( '-m', '--mode', help='The file system mode to use for files added into the zip.') parser.add_argument('--manifest', help='manifest of contents to add to the layer.', required=True) parser.add_argument( 'files', type=str, nargs='*', help='Files to be added to the zip, in the form of {srcpath}={dstpath}.') return parser
191824c9e18ecd289ecf308db63c50d427f46b84
3,639,583
def download_office(load=True): # pragma: no cover """Download office dataset. Parameters ---------- load : bool, optional Load the dataset after downloading it when ``True``. Set this to ``False`` and only the filename will be returned. Returns ------- pyvista.StructuredGrid or str DataSet or filename depending on ``load``. Examples -------- >>> from pyvista import examples >>> dataset = examples.download_office() >>> dataset.contour().plot() See :ref:`clip_with_plane_box_example` for an example using this dataset. """ return _download_and_read('office.binary.vtk', load=load)
5dd307bf815e7d7cbaef81b7542728b446b7f2cb
3,639,584
import time def test_duplicated_topics(host): """ Check if can remove topics options """ # Given duplicated_topic_name = get_topic_name() def get_topic_config(): topic_configuration = topic_defaut_configuration.copy() topic_configuration.update({ 'name': duplicated_topic_name, 'options': { 'retention.ms': 66574936, 'flush.ms': 564939 } }) return topic_configuration topic_configuration = { 'topics': [ get_topic_config(), get_topic_config() ] } # When results = ensure_kafka_topics( host, topic_configuration ) time.sleep(0.3) # Then for result in results: assert not result['changed'] assert 'duplicated topics' in result['msg']
68524bf675da4b52f05ee31ae35def7b461572cd
3,639,585
def register(request): """ 注册账号界面 """ message = "" if request.session.get('is_login', None): return redirect('/account/') if request.method == 'POST': username = request.POST.get('username') email = request.POST.get('email') password1 = request.POST.get('password1') password2 = request.POST.get('password2') message = "请检查填写的内容!" if _makesure_password(password1, password2): message = _makesure_password(password1, password2) else: same_username = User.objects.filter(username=username) same_email = User.objects.filter(email=email) if same_username: message = '用户名已经存在!' elif same_email: message = '该邮箱已经被注册了!' else: new_user = User() new_user.username = username new_user.email = email new_user.password = _hash_code(password1) new_user.save() code = _make_confirm_string(new_user) _send_email(email, code) message = '请前往邮箱进行确认!' return render(request, 'account/login.html', {'message': message}) captcha_form = forms.captchaForm(request.POST) content = {'captcha_form': captcha_form, 'message': message, 'page_register': True} return render(request, 'account/register.html', content)
2a3963f6549cd20f4d994cbab336f0e0eb91e685
3,639,586
def cosh(x): """Evaluates the hyperbolic cos of an interval""" np = import_module('numpy') if isinstance(x, (int, float)): return interval(np.cosh(x), np.cosh(x)) elif isinstance(x, interval): #both signs if x.start < 0 and x.end > 0: end = max(np.cosh(x.start), np.cosh(x.end)) return interval(1, end, is_valid=x.is_valid) else: #Monotonic start = np.cosh(x.start) end = np.cosh(x.end) return interval(start, end, is_valid=x.is_valid) else: raise NotImplementedError
dd362392cce1aae2d19c589d49559b7b165c9f1e
3,639,587
def tree_intersection(tree_one, tree_two): """Checks for duplicate values between two trees and returns those values as a set.""" first_values = [] second_values = [] table = HashTable() dupes = set([]) tree_one.pre_order(first_values.append) tree_two.pre_order(second_values.append) for value in first_values: table.set(value, value) for value in second_values: if table.get(value): dupes.add(value) if len(dupes) == 0: return 'There are no duplicates.' return dupes
06ab08015cb02747fd3fea4055217a1dbeefc4b8
3,639,588
def new(): """Deliver new-question interface.""" return render_template('questionNew.html', question_id='')
bca210aa5661d034256c6ef06209f45ea4923aa9
3,639,589
def merge_dict_recursive(base, other): """Merges the *other* dict into the *base* dict. If any value in other is itself a dict and the base also has a dict for the same key, merge these sub-dicts (and so on, recursively). >>> base = {'a': 1, 'b': {'c': 3}} >>> other = {'x': 4, 'b': {'y': 5}} >>> want = {'a': 1, 'x': 4, 'b': {'c': 3, 'y': 5}} >>> got = merge_dict_recursive(base, other) >>> got == want True >>> base == want True """ for (key, value) in list(other.items()): if (isinstance(value, dict) and (key in base) and (isinstance(base[key], dict))): base[key] = merge_dict_recursive(base[key], value) else: base[key] = value return base
10ea2bbcf7d2ee330c784efff684974339d48b5d
3,639,590
def two_points_line(feature): """Convert a Polyline to a Line composed of only two points.""" features = [] coords = feature['geometry']['coordinates'] for i in range(0, len(coords) - 1): segment_coords = [coords[i], coords[i+1]] geom = geojson.LineString(segment_coords) features.append(geojson.Feature(geometry=geom)) return features
49c0197c6a072c690a2507df4b9a517a95c9919e
3,639,591
def transform_world_to_camera(poses_set, cams, ncams=4): """ Project 3d poses from world coordinate to camera coordinate system Args poses_set: dictionary with 3d poses cams: dictionary with cameras ncams: number of cameras per subject Return: t3d_camera: dictionary with 3d poses in camera coordinate """ t3d_camera = {} for t3dk in sorted(poses_set.keys()): subj, action, seqname = t3dk t3d_world = poses_set[t3dk] for c in range(ncams): R, T, f, c, k, p, name = cams[(subj, c + 1)] camera_coord = world_to_camera_frame(np.reshape(t3d_world, [-1, 3]), R, T) camera_coord = np.reshape(camera_coord, [-1, len(H36M_NAMES) * 3]) sname = seqname[:-3] + "." + name + ".h5" # e.g.: Waiting 1.58860488.h5 t3d_camera[(subj, action, sname)] = camera_coord return t3d_camera
c67c61e7746fd67ca62a848b3641e27e068348a5
3,639,592
from typing import Dict import subprocess def cwebp(input_image: str, output_image: str, option: str, logging: str = "-v", bin_path: str = None) -> Dict: """ now convert image to .webp format input_image: input image(.jpeg, .pnp ....) output_image: output image .webp option: options and quality,it should be given between 0 to 100 :param input_image: :param output_image: :param option: :param logging: :param bin_path: :return: """ cmd = f"{getcwebp(bin_path=bin_path)} {option} {input_image} -o {output_image} {logging}" p = subprocess.Popen(cmd, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = p.communicate() result = {'exit_code': p.returncode, 'stdout': stdout, 'stderr': stderr, 'command': cmd} return result
569b5cad6d369c8ce7ca058c92fb245dbcf5fd81
3,639,593
def div66(): """ Returns the divider OOOOOOOOOOOO :return: divider66 """ return divider66
fbade8a4b3aa445985686c180f3cbad71832498f
3,639,594
def extract_square_from_file(image_number=1): """Given a number of the image file return a cropped sudoku.""" image_string = '/home/james/Documents/projects/sudoku/img/' image_string += str(image_number) + '.jpg' binary = read_binary(image_string) threshold = get_threshold(binary) square = get_square_coordinates(threshold) game = extract_sudoku(square, threshold) return game
def244273d2e329b771a61b5fb4fb980e120831d
3,639,595
import os def CSV2GRID(strPathInCSV, strPathOutASC, intCol): """ Function CSV2GRID args: Command Syntax: CSV2GRID [switches] inputfile column outputfile """ lstCMD = [strPathFuInstall + os.sep + "CSV2GRID", strPathInCSV, str(intCol), strPathOutASC] return ' '.join(lstCMD)
f4ab6e54429de250f036702e6232141b2dece771
3,639,596
def sequence_extractor(graph, path): """ returns the sequence of the path :param graph: a graph object :param path: a list of nodes ordered according to the path :return: sequence of the path """ # check if path exists if len(path) == 1: return graph.nodes[path[0]].seq elif not path_checker(graph, path): return "" if graph.nodes[path[0]].in_direction(graph.nodes[path[1]].id, 0): direction = 0 sequence = reverse_complement(graph.nodes[path[0]].seq) elif graph.nodes[path[0]].in_direction(graph.nodes[path[1]].id, 1): direction = 1 sequence = graph.nodes[path[0]].seq for i in range(len(path) - 1): current_node = graph.nodes[path[i]] next_node = graph.nodes[path[i+1]] if current_node.in_direction(next_node.id, direction): direction, overlap = next_direction(current_node, next_node, direction) # if next direction is one this means current node connects to # next node from 0 so I don't need to take the reverse complement # Otherwise I need to if direction == 1: sequence += next_node.seq[overlap:] else: sequence += reverse_complement(next_node.seq)[overlap:] return sequence
1f83fcbf75add7234f9395e801d22d95d55804a9
3,639,597
from typing import Optional from typing import List from typing import Union from typing import Literal from typing import Tuple from typing import Dict def plot_matplotlib( tree: CassiopeiaTree, depth_key: Optional[str] = None, meta_data: Optional[List[str]] = None, allele_table: Optional[pd.DataFrame] = None, indel_colors: Optional[pd.DataFrame] = None, indel_priors: Optional[pd.DataFrame] = None, orient: Union[Literal["up", "down", "left", "right"], float] = 90.0, extend_branches: bool = True, angled_branches: bool = True, add_root: bool = False, figsize: Tuple[float, float] = (7.0, 7.0), colorstrip_width: Optional[float] = None, colorstrip_spacing: Optional[float] = None, clade_colors: Optional[Dict[str, Tuple[float, float, float]]] = None, internal_node_kwargs: Optional[Dict] = None, leaf_kwargs: Optional[Dict] = None, branch_kwargs: Optional[Dict] = None, colorstrip_kwargs: Optional[Dict] = None, continuous_cmap: Union[str, mpl.colors.Colormap] = "viridis", vmin: Optional[float] = None, vmax: Optional[float] = None, categorical_cmap: Union[str, mpl.colors.Colormap] = "tab10", value_mapping: Optional[Dict[str, int]] = None, ax: Optional[plt.Axes] = None, random_state: Optional[np.random.RandomState] = None, ) -> Tuple[plt.Figure, plt.Axes]: """Generate a static plot of a tree using Matplotlib. Args: tree: The CassiopeiaTree to plot. depth_key: The node attribute to use as the depth of the nodes. If not provided, the distances from the root is used by calling `tree.get_distances`. meta_data: Meta data to plot alongside the tree, which must be columns in the CassiopeiaTree.cell_meta variable. allele_table: Allele table to plot alongside the tree. indel_colors: Color mapping to use for plotting the alleles for each cell. Only necessary if `allele_table` is specified. indel_priors: Prior probabilities for each indel. Only useful if an allele table is to be plotted and `indel_colors` is None. orient: The orientation of the tree. Valid arguments are `left`, `right`, `up`, `down` to display a rectangular plot (indicating the direction of going from root -> leaves) or any number, in which case the tree is placed in polar coordinates with the provided number used as an angle offset. Defaults to 90. extend_branches: Extend branch lengths such that the distance from the root to every node is the same. If `depth_key` is also provided, then only the leaf branches are extended to the deepest leaf. angled_branches: Display branches as angled, instead of as just a line from the parent to a child. add_root: Add a root node so that only one branch connects to the start of the tree. This node will have the name `synthetic_root`. figsize: Size of the plot. Defaults to (7., 7.,) colorstrip_width: Width of the colorstrip. Width is defined as the length in the direction of the leaves. Defaults to 5% of the tree depth. colorstrip_spacing: Space between consecutive colorstrips. Defaults to half of `colorstrip_width`. clade_colors: Dictionary containing internal node-color mappings. These colors will be used to color all the paths from this node to the leaves the provided color. internal_node_kwargs: Keyword arguments to pass to `plt.scatter` when plotting internal nodes. leaf_kwargs: Keyword arguments to pass to `plt.scatter` when plotting leaf nodes. branch_kwargs: Keyword arguments to pass to `plt.plot` when plotting branches. colorstrip_kwargs: Keyword arguments to pass to `plt.fill` when plotting colorstrips. continuous_cmap: Colormap to use for continuous variables. Defaults to `viridis`. vmin: Value representing the lower limit of the color scale. Only applied to continuous variables. vmax: Value representing the upper limit of the color scale. Only applied to continuous variables. categorical_cmap: Colormap to use for categorical variables. Defaults to `tab10`. value_mapping: An optional dictionary containing string values to their integer mappings. These mappings are used to assign colors by calling the `cmap` with the designated integer mapping. By default, the values are assigned pseudo-randomly (whatever order the set() operation returns). Only applied for categorical variables. ax: Matplotlib axis to place the tree. If not provided, a new figure is initialized. random_state: A random state for reproducibility Returns: If `ax` is provided, `ax` is returned. Otherwise, a tuple of (fig, ax) of the newly initialized figure and axis. """ is_polar = isinstance(orient, (float, int)) ( node_coords, branch_coords, node_colors, branch_colors, colorstrips, ) = place_tree_and_annotations( tree, depth_key, meta_data, allele_table, indel_colors, indel_priors, orient, extend_branches, angled_branches, add_root, colorstrip_width, colorstrip_spacing, clade_colors, continuous_cmap, vmin, vmax, categorical_cmap, value_mapping, random_state, ) fig = None if ax is None: fig, ax = plt.subplots(figsize=figsize, tight_layout=True) ax.set_axis_off() # Plot all nodes _leaf_kwargs = dict(x=[], y=[], s=5, c="black") _node_kwargs = dict(x=[], y=[], s=0, c="black") _leaf_kwargs.update(leaf_kwargs or {}) _node_kwargs.update(internal_node_kwargs or {}) for node, (x, y) in node_coords.items(): if node in node_colors: continue if is_polar: x, y = utilities.polar_to_cartesian(x, y) if tree.is_leaf(node): _leaf_kwargs["x"].append(x) _leaf_kwargs["y"].append(y) else: _node_kwargs["x"].append(x) _node_kwargs["y"].append(y) ax.scatter(**_leaf_kwargs) ax.scatter(**_node_kwargs) _leaf_colors = [] _node_colors = [] _leaf_kwargs.update({"x": [], "y": []}) _node_kwargs.update({"x": [], "y": []}) for node, color in node_colors.items(): x, y = node_coords[node] if is_polar: x, y = utilities.polar_to_cartesian(x, y) if tree.is_leaf(node): _leaf_kwargs["x"].append(x) _leaf_kwargs["y"].append(y) _leaf_colors.append(color) else: _node_kwargs["x"].append(x) _node_kwargs["y"].append(y) _node_colors.append(color) _leaf_kwargs["c"] = _leaf_colors _node_kwargs["c"] = _node_colors ax.scatter(**_leaf_kwargs) ax.scatter(**_node_kwargs) # Plot all branches _branch_kwargs = dict(linewidth=1, c="black") _branch_kwargs.update(branch_kwargs or {}) for branch, (xs, ys) in branch_coords.items(): if branch in branch_colors: continue if is_polar: xs, ys = utilities.polars_to_cartesians(xs, ys) ax.plot(xs, ys, **_branch_kwargs) for branch, color in branch_colors.items(): _branch_kwargs["c"] = color xs, ys = branch_coords[branch] if is_polar: xs, ys = utilities.polars_to_cartesians(xs, ys) ax.plot(xs, ys, **_branch_kwargs) # Colorstrips _colorstrip_kwargs = dict(linewidth=0) _colorstrip_kwargs.update(colorstrip_kwargs or {}) for colorstrip in colorstrips: # Last element is text, but this can not be shown in static plotting. for xs, ys, c, _ in colorstrip.values(): _colorstrip_kwargs["c"] = c if is_polar: xs, ys = utilities.polars_to_cartesians(xs, ys) ax.fill(xs, ys, **_colorstrip_kwargs) return (fig, ax) if fig is not None else ax
a26a8146f4f5fb3bc3564367774740645e04caf3
3,639,598
import jinja2 from datetime import datetime import time import hashlib def print_order(order: Order, user_id: int = 0): """ 订单打印 :param order: :param user_id: :return: """ shop_id = order.shop.id shop = get_shop_by_shop_id(shop_id) receipt_config = get_receipt_by_shop_id(shop_id) printer = ylyPrinter() template = jinja2.Template(ORDER_TPL_58) body = template.render( order=order, print_time=make_aware(datetime.datetime.now()).strftime("%Y-%m-%d %H:%M:%S"), shop=shop, receipt_config=receipt_config, ) printer_config = get_printer_by_shop_id(shop_id) if not printer_config: return False, "请先添加打印机" partner = "1693" # 用户ID apikey = "664466347d04d1089a3d373ac3b6d985af65d78e" # API密钥 timenow = str(int(time.time())) # 当前时间戳 machine_code = printer_config.code # 打印机终端号 520 mkey = printer_config.key # 打印机密钥 110110 if machine_code and mkey: sign = "{}machine_code{}partner{}time{}{}".format( apikey, machine_code, partner, timenow, mkey ) sign = hashlib.md5(sign.encode("utf-8")).hexdigest().upper() else: return False, "打印机配置错误" data = { "partner": partner, "machine_code": machine_code, "content": body, "time": timenow, "sign": sign, } success, msg = printer.send_request(data, receipt_config.copies) if success and user_id >= 0: log_info = { "order_num": order.order_num, "shop_id": order.shop.id, "operator_id": user_id, "operate_type": OrderLogType.PRINT, } create_order_log(log_info) return success, msg
bdfdbe51b854093172f2ab2c4b9d1abd15856847
3,639,599