content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def default_role(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): """Set the default interpreted text role.""" if not arguments: if roles._roles.has_key(''): # restore the "default" default role del roles._roles[''] return [] role_name = arguments[0] role, messages = roles.role( role_name, state_machine.language, lineno, state.reporter) if role is None: error = state.reporter.error( 'Unknown interpreted text role "%s".' % role_name, nodes.literal_block(block_text, block_text), line=lineno) return messages + [error] roles._roles[''] = role # @@@ should this be local to the document, not the parser? return messages
f1f6ed82da74cd898d4df0753cd1044a6666d216
3,641,200
from typing import Dict import os import json def get_zind_json(server_token, output_folder) -> Dict: """ Returns the dict for the ZInD json. Sends a request to the BridgeAPI to get details about the ZInD Dataset Stores the respose json file in output folder :param server_token: token for access to the API :param output_folder: path to store response :return: ZInD Dict """ dest_path = os.path.join(output_folder, "zind_response.json") result_dict = {} value_key = "value" if os.path.exists(dest_path): logger.info(f"Loading ZInD json from {dest_path}") try: result_dict = json.load(open(dest_path)) logger.info("Loaded ZInD json successfully") except Exception as e: logger.info(f"ZInD json invalid, re-downloading file: {e}") zind_url = BRIDGE_API_URL bearer_token = f"Bearer {server_token}" payload = {} headers = {"Authorization": bearer_token} for retry_count in (1, MAX_NUM_RETRIES + 1): if value_key in result_dict: break logger.info( f"Retrieving ZInD json (attempt {retry_count} out of {MAX_NUM_RETRIES})" ) result_dict = download_json_in_chunks(zind_url, headers, payload, dest_path) logger.info("Downloaded ZInD json successfully") else: logger.error( "Could not download ZInD json, please check your credentials and internet connection" ) return None return result_dict[value_key]
85134d6dcec3a0ad1cdc0d206c2879e99ad22963
3,641,201
def input_fn(evaluate=False) -> tf.data.Dataset: """ Returns the text as char array Args: n_repetitions: Number of times to repeat the inputs """ # The dataset g = ( evaluate_generator if evaluate else train_generator ) ds = tf.data.Dataset.from_generator( generator=g, output_types=( { 'character' : tf.string } , tf.string ), output_shapes=( { 'character' : (SEQUENCE_LENGHT,) } , () ) ) ds = ds.batch(64) ds = ds.prefetch(1) return ds
2954eb4b4f657fff3e029096a562516842c615e8
3,641,202
import os def make_model_path(model_base_path: Text, model_name: Text, version: int) -> Text: """Make a TFS-flavored model path. Args: model_base_path: A base path containing the directory of model_name. model_name: A name of the model. version: An integer version of the model. Returns: `{model_base_path}/{model_name}/{version}`. """ return os.path.join(model_base_path, model_name, str(version))
0c170550d53100f4ac916d26da9afc3be3330691
3,641,203
async def validate_input(data): """Validate the user input allows us to connect. Data has the keys from DATA_SCHEMA with values provided by the user. """ harmony = await get_harmony_client_if_available(data[CONF_HOST]) if not harmony: raise CannotConnect return { CONF_NAME: find_best_name_for_remote(data, harmony), CONF_HOST: data[CONF_HOST], UNIQUE_ID: find_unique_id_for_remote(harmony), }
fc174ed28bd80fd8e118094ea21358b9c8f41fa3
3,641,204
import numpy def window_tukey(M, alpha=0.5): """Return a Tukey window, also known as a tapered cosine window. The function returns a Hann window for `alpha=0` and a boxcar window for `alpha=1` """ if alpha == 0: return numpy.hann(M) elif alpha == 1: return window_boxcar(M) n = numpy.arange(0, M) width = int(numpy.floor(alpha * (M - 1) / 2.0)) n1 = n[0:width + 1] n2 = n[width + 1:M - width - 1] n3 = n[M - width - 1:] w1 = 0.5 * (1 + numpy.cos(numpy.pi * (-1 + 2.0 * n1 / alpha / (M - 1)))) w2 = numpy.ones(n2.shape) w3 = 0.5 * (1 + numpy.cos(numpy.pi * (-2.0 / alpha + 1 + 2.0 * n3 / alpha / (M - 1)))) return numpy.concatenate((w1, w2, w3))
f62d216de29e2271270a4fc3c03b0f93930f1275
3,641,205
def workshopsDF(symbol="", **kwargs): """This is a meeting or series of meetings at which a group of people engage in discussion and activity on a particular subject, product or service to gain hands-on experience. https://iexcloud.io/docs/api/#workshops Args: symbol (str): symbol to use """ return _baseDF(id="PREMIUM_WALLSTREETHORIZON_WORKSHOP", symbol=symbol, **kwargs)
7f6417acecdd7a2bd7279e5d0367894458589241
3,641,206
def free_path(temp, diff, m_mol): """ Calculates the free path for a molecule Based on free_path.m by Joni Kalliokoski 2014-08-13 :param temp: temperature (K) :param diff: diffusion coefficient (m^2/s) :param m_mol: molar mass (kg/mol) :return: free path (m) """ return 3*diff*np.sqrt((np.pi*m_mol)/(8*gas_const*temp))
053a2af667e1e9ecc85f1ea6ca898c80e14ec81f
3,641,207
def comp_periodicity_spatial(self): """Compute the (anti)-periodicities of the machine in space domain Parameters ---------- self : Machine A Machine object Returns ------- pera : int Number of spatial periodicities of the machine over 2*pi is_apera : bool True if an anti-periodicity is possible after the periodicities """ p = self.get_pole_pair_number() # Get stator (anti)-periodicity in spatial domain pera_s, is_antipera_s = self.stator.comp_periodicity_spatial() # Get rotor (anti)-periodicities in spatial domain pera_r, is_antipera_r = self.rotor.comp_periodicity_spatial() # Get machine spatial periodicity pera = int(gcd(gcd(pera_s, pera_r), p)) # Get machine time and spatial anti-periodicities is_apera = bool(is_antipera_s and is_antipera_r) return pera, is_apera
b34a50f0df0bc1bfd3ffaddb5e8a57780e50a6b8
3,641,208
import torch def shuffle_tensor(input): """ Returns a new tensor whose elements correspond to a randomly shuffled version of the the elements of the input. Args: input (`torch.Tensor`): input tensor. Returns: (`torch.Tensor`): output tensor. """ return input[torch.randperm(input.nelement())]
e7c3ff4180123de1fe6322296ba08863de9766a4
3,641,209
def relaunch_failed_jobs(tasks, spec_file, verbose=False): """ Relaunch jobs that are failed from the given list """ job_cnts = 0 # number of newly launched jobs for i, task in enumerate(tasks): job_id = str(task[-1]) # the last entry # Try to launch until succeed while True: p = relaunch_failed_job(job_id, spec_file) if p is None: # NOTE: when the job is not failed break if verbose: print("==> Re-launching failed task: {} ...".format(task)) new_id = get_job_id(p) if new_id is not None: break # If a new process is launched if p is not None: tasks[i][-1] = new_id job_cnts += 1 return job_cnts
d78c250e1c6f10c60bb81aea077063f6f5b15b12
3,641,210
def intensityTriWave(coeff,L,ang): """Simulate the intensity observed a distance L from the grating. Standard Zernike coefficients, L, and the diffraction angle ang are used as input. """ k = 2*np.pi/405.e-6 #blue wavevector x,y = np.meshgrid(np.linspace(-1.1,1.1,1000),np.linspace(-1.1,1.1,1000)) m = np.sin(ang) coeff = np.array(coeff).astype('float') coeff = np.tile(coeff,(3,1)) coeff[0][2] = -m/2. coeff[1][1] = m/2.*np.sqrt(3)/2 coeff[1][2] = m/4 coeff[2][1] = -m/2.*np.sqrt(3)/2 coeff[2][2] = m/4 #Construct three phases phi1 = zern.zernsurf(x,y-m*L,0.,0.,1.,coeff[0]) phi2 = zern.zernsurf(x-m*L*np.sqrt(3)/2,y+m*L/2,0,0,1,coeff[1]) phi3 = zern.zernsurf(x+m*L*np.sqrt(3)/2,y+m*L/2,0,0,1,coeff[2]) #Transform into complex exponentials and combine i = np.abs(np.exp(1j*phi1*k)+np.exp(1j*phi2*k)+np.exp(1j*phi3*k))**2 return phi1,phi2,phi3,i
3b85a0d437fce65d8f164b31a3fc2e85fca33006
3,641,211
import requests import time import ast def do_auth_code_grant(fqdn, force_login=False, identity=None): """Perform an Oauth2 authorization grant consent flow.""" code_verifier, code_challenge = _gen_code() scope = (SCOPE_FORMAT.format(fqdn=fqdn)) host = GLOBUS_AUTH_HOST creds = _lookup_credentials() params = { 'redirect_uri' : 'https://' + host + '/v2/web/auth-code', 'client_id' : creds['client'], 'access_type' : 'offline', 'state' : '_default', 'code_challenge' : code_challenge, 'code_challenge_method': 'S256', 'response_type' : 'code', 'scope' : scope } if identity is not None: params['session_message'] = 'The SSH service requires that you authenticate using this identity:' params['session_required_identities'] = str(identity) if force_login is True: params['prompt'] = 'login' url = "https://" + host + '/v2/oauth2/authorize?' + urlencode(params) print('Please go to this URL and login: {0}'.format(url)) auth_code = raw_input( 'Please enter the code you get after login here: ').strip() body = { 'code' : auth_code, 'code_verifier': code_verifier, 'redirect_uri' : 'https://' + host + '/v2/web/auth-code', 'grant_type' : 'authorization_code' } r = _authenticated_request(requests.post, '/v2/oauth2/token', data = body) return Token(authorized_at=int(time.time()), **ast.literal_eval(r.text))
0e5583d1d6a273e165f0d8d9bed82e3c9af491cd
3,641,212
import json def decode(serialized: str) -> Node: """Decode JSON as a `Node`""" node = json.loads(serialized) return dict_decode(node) if isinstance(node, dict) else node
b608b6c18c09d7061e09d722445ca1f50fd78b3f
3,641,213
def validate_duration_unit(recv_duration_unit): """Decapitalize and check in units_list""" units_list = DaysAndUnitsList.units_list recv_duration_unit = recv_duration_unit.lower() if recv_duration_unit in units_list: return True else: return False
784693ea8106c601b884a729ad2afd2a75b94ba2
3,641,214
def make_word_list1(): """Reads lines from a file and builds a list using append.""" t = [] fin = open('words.txt') for line in fin: word = line.strip() t.append(word) return t
7af7b0697557e8bba891d73bd8217860350b810e
3,641,215
import logging import json import os def create_app(environment: str = None): """Create the Flask application. Returns: obj: The configured Flask application context. """ app = Flask(__name__) if environment is None: app.config.from_object(ConfigurationFactory.from_env()) else: app.config.from_object(ConfigurationFactory.get_config(environment)) app.config.update( SQLALCHEMY_DATABASE_URI=ConfigurationFactory.get_config( environment).sqlalchemy_database_uri, OAUTH2_TOKEN_EXPIRES_IN={ 'authorization_code': 864000, 'implicit': 3600, 'password': 864000, 'client_credentials': 60 * 5 }, SECRET_KEY=ConfigurationFactory.generate_secret_key() ) is_testing = environment == 'TESTING' logging.basicConfig(format='%(message)s', level=logging.INFO) @app.after_request def after_request(response): """ Logging every request. """ if not is_testing: jsonstr = json.dumps({ "remote_addr": request.remote_addr, "request_time": str(dt.utcnow()), "method": request.method, "path": request.path, "scheme": request.scheme.upper(), "statusCode": response.status_code, "status": response.status, "content_length": response.content_length, "user_agent": str(request.user_agent) }) logging.info(jsonstr) return response def handle_errors(e): logging.info(f"""{e}, app.py, line 83""") response_body = ResponseBody() if isinstance(e, ValidationError): return response_body.custom_response(status="Error", messages=[e.messages]) elif isinstance(e, RecordNotFoundError): return response_body.not_found_response(e.record_id) else: try: error_code = str(e).split(':')[0][:3].strip() error_text = str(e).split(':')[0][3:].strip() if isinstance(error_code, int): return response_body.custom_response(code=error_code, messages={'error': error_text}) else: raise Exception except Exception as e: return response_body.exception_response(str(e)) if not is_testing: apm_enabled = bool(int(os.getenv('APM_ENABLED', '0'))) if apm_enabled: app.config['ELASTIC_APM'] = { 'SERVICE_NAME': 'authserver', 'SECRET_TOKEN': os.getenv('APM_TOKEN', ''), 'SERVER_URL': os.getenv('APM_HOSTNAME', ''), } apm = ElasticAPM(app) db.init_app(app) config_oauth(app) CORS(app) migrate = Migrate(app, db) app.register_blueprint(home_bp) app.register_blueprint(health_api_bp) app.register_blueprint(user_bp) app.register_blueprint(client_bp) app.register_blueprint(oauth2_bp) app.register_blueprint(role_bp) app.register_blueprint(scope_bp) app.register_blueprint(password_recovery_bp) app.register_error_handler(Exception, handle_errors) app.teardown_appcontext(teardown_appcontext) FlaskInjector(app=app, modules=[ ConfigurationModule, GraphDatabaseModule, MailServiceModule]) return app
07b6e899f6992008f072040d445abfb339f1c9a4
3,641,216
def metropolis(data, likelihood, priors, samples=1000, par_init=None, width_prop=.5): """ Returns the posterior function of the parameters given the likelihood and the prior functions. Returns also the number of the accepted jumps in the Metropolis-Hastings algorithm. Notes: - <width_prop> should be chosen so to result in about 50% accepted jumps. - <posterior> has shape (samples, n_par). - priors must be from function "prior_dist". - for numerical stability the computation is carried out using logarithms. """ # Current parameters n_par = len(priors) par_curr = np.zeros(n_par) if (par_init is None) else np.asarray(par_init) # Init quantities jumps = 0 par_prop = np.zeros(n_par) posterior = np.zeros((samples, n_par)) posterior[0, :] = par_curr # Current priors bb = 0.0 for i in range(n_par): bb += np.log(prior_dist(priors[i], par_curr[i])) prior_curr = np.exp(bb) # Current likelihood bb = np.log(likelihood(data, par_curr)).sum() likelihood_curr = np.exp(bb) # Current posterior probability p_curr = likelihood_curr * prior_curr # Loop <samples> times for sample in range(samples): # Randomnly pick the proposed parameters for i in range(n_par): par_prop[i] = stats.norm(par_curr[i], width_prop).rvs() # Evaluate priors with the proposed parameters bb = 0.0 for i in range(n_par): bb += np.log(prior_dist(priors[i], par_prop[i])) prior_prop = np.exp(bb) # Evaluate likelihood with the proposed parameters bb = np.log(likelihood(data, par_prop)).sum() likelihood_prop = np.exp(bb) # Proposed posterior probability p_prop = likelihood_prop * prior_prop # Randomly accept or reject the jump p_accept = p_prop / p_curr if ((np.random.uniform() < p_accept)): # Update quantities if jump accepted jumps += 1 par_curr = par_prop.copy() prior_curr = prior_prop likelihood_curr = likelihood_prop p_curr = p_prop # Save (accepted and rejected) parameters posterior[sample, :] = par_curr return posterior, jumps
3857e237390a8373eecbc575209e96d42b6ff614
3,641,217
def _get_single_spec_df(reference_dict, mapping_dict, spectrum): """Primary method for reading and storing information from a single spectrum. Args: reference_dict (dict): dict with reference columns to be filled in mapping_dict (dict): mapping of engine level column names to ursgal unified column names spectrum (xml Element): namespace of single spectrum with potentially multiple PSMs Returns: (pd.DataFrame): dataframe containing spectrum information """ spec_records = [] spec_level_dict = reference_dict.copy() spec_level_dict["spectrum_id"] = spectrum.attrib["spectrumID"].split("scan=")[-1] # Iterate children for psm in spectrum.findall(".//{*}SpectrumIdentificationItem"): psm_level_dict = spec_level_dict.copy() psm_level_dict.update( {mapping_dict[k]: psm.attrib[k] for k in mapping_dict if k in psm.attrib} ) cv_param_info = { c.attrib["name"]: c.attrib["value"] for c in psm.findall(".//{*}cvParam") } psm_level_dict.update( { mapping_dict[k]: cv_param_info[k] for k in mapping_dict if k in cv_param_info } ) spec_records.append(psm_level_dict) return pd.DataFrame(spec_records)
3d286e9bc206c0b59364cf9ef6d861b5cde9e9d4
3,641,218
import re def in2func(inp): """Function converts input expression to a mathematical expression.""" # Validate Function if inp == "": raise ValueError( f"Enter a function to plot!") for char in re.findall("[a-zA-Z_]+", inp): if char not in allowed_inputs: # Error will communicate over stderr pipeline raise ValueError( f"'{char}' is not in the allowed as an input character!") return # Replace allowed chars with suitable methods for eval compiling. for before, after in replaced_inputs.items(): inp = inp.replace(before, after) # Edge Case: When no 'x' presents in the function if "x" not in inp: inp = f"({inp})*(x**0)" # Return a function to be used for y value calculation. def func(x): return eval(inp) return func
d3bf2faaed00f7b57c5bcd5b2681c94846671793
3,641,219
from datetime import datetime def filter_posts(posts: list, parsing_date: datetime) -> list: """Отфильтровывает лишние посты, которые не входят в месяц парсинга""" res = [] for post in posts: post_date = datetime.fromtimestamp(post['date']) if post_date.month == parsing_date.month: res.append(post) return res
381d5cb37e4ae3439c335a7962352431ad3ca17c
3,641,220
import requests import re def parse_bing(): """ 解析bing网页的壁纸链接,采用正则表达式匹配 :return: IMG_info,IMG_url """ base_url = 'https://cn.bing.com/' language_parameter = '?mtk=zh-CN' # base_url = 'https://www.bing.com/?mkt=zh-CN' try: resp = requests.get(base_url+language_parameter, headers=header).text except RequestException: send_text(MASTER, "connectionError") # print(resp) match_url = re.search('id="bgLink".*?href="(.*?)"', resp, re.S) info = re.search('class="sc_light" title="(.*?)".*?"主页图片信息"', resp, re.S) print(info) if not info: info = re.search('"copyright":"(.*?)","copyrightlink"', resp, re.S) print('-'*40) print(info) IMG_info = str(info.groups(1)).strip("(),'") IMG_url = base_url + str(match_url.groups(1)).strip("()',") print(IMG_info, "----", IMG_url) return IMG_info, IMG_url
4a963514f385a931882a75f45be774cbab4428ff
3,641,221
def quadsum(*args, **kwargs): """Sum of array elements in quadrature. This function is identical to numpy.sum except that array elements are squared before summing and then the sqrt of the resulting sums is returned. The docstring from numpy.sum is reproduced below for convenience (copied 2014-12-09) Parameters ---------- a : array_like Elements to sum. axis : None or int or tuple of ints, optional Axis or axes along which a sum is performed. The default (`axis` = `None`) is perform a sum over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.7.0 If this is a tuple of ints, a sum is performed on multiple axes, instead of a single axis or all the axes as before. dtype : dtype, optional The type of the returned array and of the accumulator in which the elements are summed. By default, the dtype of `a` is used. An exception is when `a` has an integer type with less precision than the default platform integer. In that case, the default platform integer is used instead. out : ndarray, optional Array into which the output is placed. By default, a new array is created. If `out` is given, it must be of the appropriate shape (the shape of `a` with `axis` removed, i.e., ``numpy.delete(a.shape, axis)``). Its type is preserved. See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- sum_along_axis : ndarray An array with the same shape as `a`, with the specified axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar is returned. If an output array is specified, a reference to `out` is returned. See Also -------- ndarray.sum : Equivalent method. cumsum : Cumulative sum of array elements. trapz : Integration of array values using the composite trapezoidal rule. mean, average Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. Examples -------- >>> np.sum([0.5, 1.5]) 2.0 >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) 1 >>> np.sum([[0, 1], [0, 5]]) 6 >>> np.sum([[0, 1], [0, 5]], axis=0) array([0, 6]) >>> np.sum([[0, 1], [0, 5]], axis=1) array([1, 5]) If the accumulator is too small, overflow occurs: >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) -128 """ args = list(args) args[0] = np.asarray(args[0])**2 return np.sqrt(np.sum(*args, **kwargs))
ef842dab6258dc46b84098098115151a240f767b
3,641,222
import codecs def open_file(path): """open_file.""" return codecs.open(path, encoding='utf8').read()
f7fd375ea76e8e7872e465e89eea5c02f3396115
3,641,223
def get_api_status(): """Get API status""" return "<h4>API Is Up</h4>"
5c88fc39bc5a970c4d223d8fe87c4fa3ad473b50
3,641,224
import os import shutil def kmeans_anchors(train_path='../datasets/ego-hand/train.txt', k_clusters=9, img_size=416, save_path=None): """Generate anchors for the dataset. Normalised labels: cls id, center x, center y, width, height """ # Get paths of training images and labels ann_paths = [] train_name = os.path.basename(train_path) ds_path = train_path[:-len(train_name)] with open(train_path, 'r') as f: for line in f: line = line[:-1] img_name = os.path.basename(line) ann_path = os.path.join(ds_path + 'labels', img_name[:-3] + 'txt') ann_paths.append(ann_path) # Get NORMALISED widths and heights from annotation files *.txt ws = [] hs = [] for ann_path in ann_paths: with open(ann_path, 'r') as f: for line in f: line = line[:-1].split() w, h = [float(i) for i in line[-2:]] ws.append(w) hs.append(h) # Generate input data as [w, h] pairs ws = np.asarray(ws) hs = np.asarray(hs) x = [ws, hs] x = np.asarray(x).transpose() # Plot the [w, h] pairs in scatter graph if save_path: # New folder if os.path.exists(save_path): shutil.rmtree(save_path) os.makedirs(save_path) plt.figure(dpi=300) plt.scatter(x[:, 0], x[:, 1], label='True position') plt.xlabel('Width') plt.ylabel('Height') plt.savefig(save_path + '/True position.pdf') # Kmeans clustering kmeans = KMeans(n_clusters=k_clusters).fit(x) anchors = kmeans.cluster_centers_ anchors = anchors * img_size # Plot scatter graph of [w, h] pairs if save_path: plt.figure(dpi=300) plt.scatter(x[:, 0], x[:, 1], c=kmeans.labels_, cmap='viridis') plt.scatter(anchors[:, 0]/img_size, anchors[:, 1]/img_size, color='#a23500') # plt.title("Width-height Pair Position") plt.xlabel('Width') plt.ylabel('Height') # plt.xlim((0, 1)) # plt.ylim((0, 1)) plt.savefig(save_path + '/anchor-kmeans-ori.pdf') plt.figure(dpi=300) plt.scatter(x[:, 0]*img_size, x[:, 1]*img_size, c=kmeans.labels_, cmap='viridis') plt.scatter(anchors[:, 0], anchors[:, 1], color='#a23500') # plt.title("Width-height Pair Position (Scaled to {}*{})".format(img_size, img_size)) plt.xlabel('Width') plt.ylabel('Height') # plt.xlim((0, img_size)) # plt.ylim((0, img_size)) plt.savefig(save_path + '/anchor-kmeans.pdf') anchors = np.rint(anchors) # Plot anchors if save_path: fig, ax = plt.subplots(dpi=300) for k in range(k_clusters): rect = patches.Rectangle( (img_size/2 - anchors[k, 0]/2, img_size/2 - anchors[k, 1]/2), anchors[k, 0], anchors[k, 1], linewidth=1, edgecolor='tab:blue', facecolor='tab:blue', fill=None ) ax.add_patch(rect) ax.set_aspect(1.0) plt.axis([0, img_size, 0, img_size]) # plt.title("Anchor Boxes (Scaled to {}*{})".format(img_size, img_size)) plt.xlabel("Width") plt.ylabel("Height") plt.savefig(save_path + "/anchor-boxes-rects.pdf") # Print and save anchors anchors.sort(axis=0) anchors = anchors.astype(int) print("Anchors are: \n{}".format(anchors)) if save_path: with open(os.path.join(ds_path, 'anchors.txt'), 'w') as f: for w, h in anchors: f.write("{}, {}\n".format(w, h)) print("\nAnchors saved to {}".format(os.path.join(ds_path, 'anchors.txt'))) return anchors
7fa736435b4ee444de976ff969e29e4684d735fc
3,641,225
def fgt_set_pressureUnit(pressure_index, unit): """Override the default pressure unit for a single pressure channel""" unit_array = (c_char * (len(unit)+1))(*([c_char_converter(c) for c in unit])) c_error = c_ubyte(lib.fgt_set_pressureUnit(c_uint(pressure_index), unit_array)) return c_error.value,
8b7b75ffc598f70e7bbf3e1742a7837bf71f474f
3,641,226
import os import pkgutil def get_standard_dev_pkgs() -> set[str]: """Check the standard dev package locations for hutch-python""" pythonpath = os.environ.get('PYTHONPATH', '') if not pythonpath: return set() paths = pythonpath.split(os.pathsep) valid_paths = filter(not_ignored, paths) pkg_names = set(n.name for n in pkgutil.iter_modules(path=valid_paths) if n.ispkg) return pkg_names
b4f6c37b6a12bc36d5d9a6230d8d2c3a67f96b80
3,641,227
def create_alert_from_slack_message(payload, time): """ Create a new raw alert (json) from the new alert form in Slack """ alert_json = {} values = payload['view']['state']['values'] for value in values: for key in values[value]: if key == 'severity': alert_json[key] = \ values[value][key]['selected_option']['text']['text'] else: alert_json[key] = values[value][key]['value'] alert_json['datetime'] = time return alert_json
1ae8b93a6b9f8bd7532ac193cb6dfde58bf8d409
3,641,228
def psd(buf_in, buf_out): """ Perform discrete fourier transforms using the FFTW library and use it to get the power spectral density. FFTW optimizes the fft algorithm based on the size of the arrays, with SIMD parallelized commands. This optimization requires initialization, so this is a factory function that returns a numba gufunc that performs the FFT. FFTW works on fixed memory buffers, so you must tell it what memory to use ahead of time. When using this with ProcessingChain, to ensure the correct buffers are used call ProcessingChain.get_variable('var_name') to give it the internal memory buffer directly (with raw_to_dsp, you can just give it the name and it will automatically happen!). The possible dtypes for the input/outputs are: - complex64 (size n) -> float32/float (size n) - complex128 (size n) -> float64/double (size n) - complex256/clongdouble (size n) -> float128/longdouble (size n) - float32/float (size n) -> float32/float (size n/2+1) - float64/double (size n) -> float64/double (size n/2+1) - float128/longdouble (size n) -> float128/longdouble (size n/2+1) """ # build intermediate array for the dft, which will be abs'd to get the PSD buf_dft = np.ndarray(buf_out.shape, np.dtype('complex'+str(buf_out.dtype.itemsize*16))) try: dft_fun = FFTW(buf_in, buf_dft, axes=(-1,), direction='FFTW_FORWARD') except ValueError: raise ValueError("""Incompatible array types/shapes. Allowed: - complex64 (size n) -> float32/float (size n) - complex128 (size n) -> float64/double (size n) - complex256/clongdouble (size n) -> float128/longdouble (size n) - float32/float (size n) -> float32/float (size n/2+1) - float64/double (size n) -> float64/double (size n/2+1) - float128/longdouble (size n) -> float128/longdouble (size n/2+1)""") typesig = 'void(' + str(buf_in.dtype) + '[:, :], ' + str(buf_out.dtype) + '[:, :])' sizesig = '(m, n)->(m, n)' if buf_in.shape == buf_out.shape else '(m, n),(m, l)' @guvectorize([typesig], sizesig, forceobj=True) def psd(wf_in, psd_out): dft_fun(wf_in, buf_dft) np.abs(buf_dft, psd_out) return psd
9573935fd0e80e3e1a53237334a46f21d94984ab
3,641,229
def get_cell_ids(num_celltypes=39): """get valid cell ids by removing cell types with missing data. Return: A cell id list. """ missing_ids = [8,23,25,30,32,33,34,35,38,39,17] return [item for item in list(range(1,num_celltypes+1)) if item not in missing_ids]
a7c8f881ad62af9c4287cd50b9b01118f724c4f8
3,641,230
def limit_data(): """Slice data by dolphot values and recovered stars in two filters""" fmt = '{:s}_{:s}' filter1, filter2 = filters.value.split(',') selected = data[ (np.abs(data[fmt.format(filter1, 'VEGA')]) <= 60) & (np.abs(data[fmt.format(filter2, 'VEGA')]) <= 60) & (data[fmt.format(filter1, 'SNR')] <= snr.value[1]) & (data[fmt.format(filter1, 'SNR')] >= snr.value[0]) & (data[fmt.format(filter1, 'SHARP')] <= shp.value[1]) & (data[fmt.format(filter1, 'SHARP')] >= shp.value[0]) & (data[fmt.format(filter1, 'CROWD')] <= cwd.value[1]) & (data[fmt.format(filter1, 'CROWD')] >= cwd.value[0]) & (data[fmt.format(filter1, 'ROUND')] <= rnd.value[1]) & (data[fmt.format(filter1, 'ROUND')] >= rnd.value[0]) & (data[fmt.format(filter1, 'ERR')] <= err.value[1]) & (data[fmt.format(filter1, 'ERR')] >= err.value[0]) & (data[fmt.format(filter1, 'CHI')] <= chi.value[1]) & (data[fmt.format(filter1, 'CHI')] >= chi.value[0]) & (data[fmt.format(filter2, 'SNR')] <= snr.value[1]) & (data[fmt.format(filter2, 'SNR')] >= snr.value[0]) & (data[fmt.format(filter2, 'SHARP')] <= shp.value[1]) & (data[fmt.format(filter2, 'SHARP')] >= shp.value[0]) & (data[fmt.format(filter2, 'CROWD')] <= cwd.value[1]) & (data[fmt.format(filter2, 'CROWD')] >= cwd.value[0]) & (data[fmt.format(filter2, 'ROUND')] <= rnd.value[1]) & (data[fmt.format(filter2, 'ROUND')] >= rnd.value[0]) & (data[fmt.format(filter2, 'ERR')] <= err.value[1]) & (data[fmt.format(filter2, 'ERR')] >= err.value[0]) & (data[fmt.format(filter2, 'CHI')] <= chi.value[1]) & (data[fmt.format(filter2, 'CHI')] >= chi.value[0])] return selected
785d027c13a05b97f2b98526dd0762e95e4e0fd6
3,641,231
import os import re import glob def purge_versions(path, suffix, num_keep, reverse=False): """ Purge file versions created by get_versioned_path. Purge specified quantity in normal or reverse sequence. """ (base, ext) = os.path.splitext(path) re_strip_version = re.compile('(.*)-%s(-[0-9]*)?' % suffix) matched = re_strip_version.match(base) if matched: base = matched.group(1) versions = [version for version in glob('%s-%s*%s' % (base, suffix, ext))] versions.sort(key=_get_version, reverse=reverse) num_purge = len(versions) - num_keep if num_purge > len(versions): num_purge = 0 if num_purge > 0: for version_path in versions[:num_purge]: os.remove(version_path) return num_purge
bf3c99b4a4dae596fec795f435387269bdfa9261
3,641,232
from typing import Dict from typing import Optional from typing import Callable def make_valance_getter( lexicon: Dict[str, float], lemmatize: bool = True, lowercase: bool = True, cap_differential: Optional[float] = C_INCR, ) -> Callable[[Token], float]: """Creates a token getter which return the valence (sentiment) of a token including the capitalization of the token. Args: lexicon (Dict[str, float]): The valence scores of the tokens. lemmatize (bool, optional): Should it look up in the lexicon (and intensifiers) using the lemma? Defaults to True. lowercase (bool, optional): Should it look up in the lexicon (and intensifiers) using the lowercased word? Defaults to True. cap_differential (Optional[float], optional): Capitalization differential, which is added to the valence of the score it is emphasized using all caps. Defaults to 0.733, an emperically derived constant (Hutto and Gilbert, 2014). If None it will not be used. Returns: Callable[[Token], float]: The getter function """ t_getter = make_txt_getter(lemmatize, lowercase) def lemma_valence_getter(token: Token) -> float: valence = 0 t = t_getter(token) if (t in lexicon) and not ( Token.has_extension("intensifier") and token._.intensifier ): # if token isn't a intensifier return lexicon[t] return 0.0 def cap_diff_valence_getter(token: Token) -> float: valence = token._.raw_valence if token.is_upper and token.sent._.is_cap_diff: if valence > 0: valence += cap_differential elif valence < 0: valence -= cap_differential return valence if cap_differential: if not Token.has_extension("raw_valence"): Token.set_extension("raw_valence", getter=lemma_valence_getter) if not Span.has_extension("is_cap_diff"): Span.set_extension("is_cap_diff", getter=allcap_differential_getter) return cap_diff_valence_getter return lemma_valence_getter
825e8bf624240e3628537dbcfc6a09af2d54cd83
3,641,233
import re def proper_units(text: str) -> str: """ Function for changing units to a better form. Args: text (str): text to check. Returns: str: reformatted text with better units. """ conv = { r"degK": r"K", r"degC": r"$^{\circ}$C", r"degrees\_celsius": r"$^{\circ}$C", r"degrees\_north": r"$^{\circ}$N", r"degrees\_east": r"$^{\circ}$E", r"degrees\_west": r"$^{\circ}$W", r"I metric": r"$\mathcal{I}$--metric", } regex = re.compile( "|".join( re.escape(key) for key in sorted(conv.keys(), key=lambda item: -len(item)) ) ) return regex.sub(lambda match: conv[match.group()], text)
5113d227db1a75ec8fa407c5f9edd5a897960d82
3,641,234
import re from datetime import datetime def coerce_number(value, convert = float): """ 将数据库字段类型转为数值类型 """ pattern = re.compile(r'^\d{4}(-\d\d){2}') format = '%Y-%m-%d %H:%M:%S' if isinstance(value, basestring) and pattern.match(value): #将字符串的日期时间先转为对象 try: mask = format[:len(value) - 2] value = datetime.strptime(value, mask) except ValueError: pass if isinstance(value, date): value = value.strftime('%s') return convert(value)
a36b3b8e814d722d6814a3306c692a8c7cbe28a5
3,641,235
def create_credential_resolver(): """Create a credentials resolver for Localstack.""" env_provider = botocore.credentials.EnvProvider() default = DefaultCredentialProvider() resolver = botocore.credentials.CredentialResolver( providers=[env_provider, default] ) return resolver
36426521d5928aec1cb7c01308afe3d60c3f9959
3,641,236
def does_algorithm_implementation_have_capabilities_to_execute_parameter(parameter_kisao_id, algorithm_specs): """ Determine if an implementation of an algorithm has the capabilities to execute a model langugae Args: parameter_kisao_id (:obj:`str`): KiSAO id for an algorithm parameter algorithm_specs (:obj:`dict` with schema ``https://api.biosimulators.org/openapi.json#/components/schemas/Algorithm``): specifications of the implementation of an algorithm Returns: :obj:`bool`: whether the implementation of the algorithm has the capabilities to execute the SED parameter """ for parameter_specs in algorithm_specs['parameters']: if parameter_specs['kisaoId']['id'] == parameter_kisao_id: return True return False
653712ae621bd014547e04009243cefe4c9eb8e1
3,641,237
def main(): """ This method allows the script to be run in stand alone mode. @return Exit code from running the script """ record = Record() result = record.Run() return result
5460a32b9202c133da9ca109f5f2784fe21d7ee2
3,641,238
def stamp_pixcov_from_theory(N,cmb2d_TEB,n2d_IQU=0.,beam2d=1.,iau=False,return_pow=False): """Return the pixel covariance for a stamp N pixels across given the 2D IQU CMB power spectrum, 2D beam template and 2D IQU noise power spectrum. """ n2d = n2d_IQU cmb2d = cmb2d_TEB assert cmb2d.ndim==4 ncomp = cmb2d.shape[0] assert cmb2d.shape[1]==ncomp assert ncomp==3 or ncomp==1 wcs = cmb2d.wcs shape = cmb2d.shape[-2:] if ncomp==3: cmb2d = rotate_pol_power(shape,wcs,cmb2d,iau=iau,inverse=True) p2d = cmb2d*beam2d**2.+n2d if not(return_pow): return fcov_to_rcorr(shape,wcs,p2d,N) return fcov_to_rcorr(shape,wcs,p2d,N), cmb2d
1ad8d5c2925f5e7ab5636348cbedbed1383c2963
3,641,239
def make_data_parallel(module, expose_methods=None): """Wraps `nn.Module object` into `nn.DataParallel` and links methods whose name is listed in `expose_methods` """ dp_module = nn.DataParallel(module) if expose_methods is None: if hasattr(module, 'expose_methods'): expose_methods = module.expose_methods if expose_methods is not None: for mt in expose_methods: setattr(dp_module, mt, getattr(dp_module.module, mt)) return dp_module
9992b8980f2cdec22e13f6805b4d02d3694c4b4a
3,641,240
def model_creator(model_dict, X_train, y_train, rd=None, rev=None): """Returns a SVM classifier""" # Load model based on model_dict clf = model_loader(model_dict, rd, rev) # If model does not exist, train a new SVM if clf is None: clf = model_trainer(model_dict, X_train, y_train, rd, rev) return clf
6f962c898167d1466b80a074aa7289ff26b0c3e2
3,641,241
import torch def bert_predict(model, loader): """Perform a forward pass on the trained BERT model to predict probabilities on the test set. """ # Put the model into the evaluation mode. The dropout layers are disabled during # the test time. model.eval() all_logits = [] # For each batch in our test set... for batch in loader: # Load batch to GPU b_input_ids, b_attn_mask = tuple(t.to(device) for t in batch)[:2] # Compute logits with torch.no_grad(): logits = model(b_input_ids, b_attn_mask) all_logits.append(logits) # Concatenate logits from each batch all_logits = torch.cat(all_logits, dim=0) # Apply softmax to calculate probabilities probs = F.softmax(all_logits, dim=1).cpu().numpy() return probs
602e219ce3fbed8afb86d11daf06ab09efe9c1b3
3,641,242
def eval_input_fn(training_dir, params): """Returns input function that feeds the model during evaluation""" return _input_fn('eval')
0bb40833dee0e7564d166b7aabb27a54d61cdf2d
3,641,243
def GNIs(features, labels, mode, params, config): """Builds the model function for use in an estimator. Arguments: features: The input features for the estimator. labels: The labels, unused here. mode: Signifies whether it is train or test or predict. params: Some hyperparameters as a dictionary. config: The RunConfig, unused here. Returns: EstimatorSpec: A tf.estimator.EstimatorSpec instance. """ del config N, H = params["N"], params["H"] n_samples = params["n_samples"] params["non_targeted_layers"] = [] if params["input_inject"]: params["non_targeted_layers"] = list(range(1, N + 1)) params["non_targeted_layers"] += [N + 1] image_tile_summary("input", features, rows=1, cols=16) # --- Ensure input data is flat features = tf.reshape(features, (-1, np.prod(params['image_shape']))) features = tf.cast(features, dtype=tf.float32) if labels is not None: labels = tf.cast(labels, dtype=tf.float32) else: labels = tf.ones_like(features[:, :10], dtype=None) B = int_shape(labels)[0] n_output = int_shape(labels)[-1] if params['activation'] != 'linear': activation = getattr(tf.nn, params['activation']) else: activation = None # --- Make discriminator if params["disc_type"] == 'mlp': mlp = make_mlp(activation, np.prod(params['image_shape']), N, H, n_output) if params["disc_type"] == 'convnet': mlp = make_convnet(activation, params['image_shape'], n_output) if params["disc_type"] == 'vgg': mlp = make_vgg13(activation, params['image_shape'], n_output) # --- Retrieve intermediate activations, and layer output # --- we don't want to mask the final layer so activations doesn't include the output layer p_phi_y = mlp(features) sel_layer_shapes = [p_phi_y['layer_shapes'][i] for i in range(N + 1)] # --- Get Predictions using log(p(y|x)) preds = p_phi_y['activations'][-1] # --- Classification loss, log(p(y|x)) if params["loss"] == 'cross_entropy': loss = cross_entropy(labels, preds) pred_class = tf.argmax(input=preds, axis=-1) true_class = tf.argmax(input=labels, axis=-1) acc = tf.cast(tf.equal(pred_class, true_class), tf.float32) tf.compat.v1.summary.scalar("accuracy", tf.reduce_mean(acc)) elif params["loss"] == 'mse': loss = square_error(labels, preds) global_step = tf.compat.v1.train.get_or_create_global_step() p_phi_y_noisy = replace_mask_layer( features, p_phi_y, non_targeted_layers=params['non_targeted_layers'], var=params["var"], n_samples=n_samples, mode=params["noise_mode"]) preds_noisy = p_phi_y_noisy['activations'][-1] # --- Classification loss, log(p(y|x)) if params["loss"] == 'cross_entropy': noisy_loss = cross_entropy(labels, preds_noisy) elif params["loss"] == 'mse': noisy_loss = square_error(labels, preds_noisy) optimizer = tf.compat.v1.train.GradientDescentOptimizer( params["learning_rate"]) gradients, variables = [], [] tf.compat.v1.summary.scalar("learning_rate", params["learning_rate"]) tf.compat.v1.summary.scalar("batch_size", B) # --- Enumerate over activation layers, zip automatically removes final # --- logit layer layers = [ l for l in p_phi_y['net'].layers if ('dense' in l.name or 'conv' in l.name) ] noises = [ tf.reshape(n, (B, n_samples, -1)) for n in p_phi_y_noisy['noise'][:-1] ] weights = [layers[i].trainable_weights[0] for i in range(N + 1)] acts = p_phi_y['activations'][:-1] Js = [ tf.reshape(batch_jacobian(preds, a, use_pfor=True), (B, -1, n_output)) for a in acts ] print(Js) G, C, H = calc_taylor_expansion(Js, loss, preds, noises, B, n_samples) EC = calc_tikhonov_reg(Js, acts, preds, params["noise_mode"], params["var"], params["loss"]) H_sig = heavy_tail_variance(Js, loss, preds) l_noise = 0 if params["noise_type"] is None: noisy_loss_estimate = loss elif params["noise_type"] == 'input': noisy_loss_estimate = noisy_loss elif 'full' in params["noise_type"]: # --- This is the Gaussian stuff assert n_samples == 1 l_noise += H + G + C noisy_loss_estimate = loss + l_noise elif 'marginal' in params["noise_type"]: # --- Don't ever noise final layer assert n_samples == 1 l_noise = EC if 'H' in params["noise_type"]: l_noise += H if 'C' in params["noise_type"]: # alpha, beta, sigma, mu = tf.py_func( # estimate_all_params, # inp=[(C - EC)], # Tout=[tf.float32, tf.float32, tf.float32, tf.float32]) # # tf.compat.v1.summary.scalar('C/alpha', alpha) # tf.compat.v1.summary.scalar('C/beta', beta) # tf.compat.v1.summary.scalar('C/sigma', sigma) # tf.compat.v1.summary.scalar('C/mu', mu) # tf.compat.v1.summary.scalar('C', tf.reduce_mean(C - EC)) # tf.compat.v1.summary.histogram('C', C) l_noise += (C - EC) if 'G' in params["noise_type"]: l_noise += G noisy_loss_estimate = loss + l_noise actual_noise = tf.reduce_mean(noisy_loss - loss) estimated_noise = tf.reduce_mean(noisy_loss_estimate - loss) tf.compat.v1.summary.scalar('loss/actual_noise', actual_noise) tf.compat.v1.summary.scalar('loss/estimated_noise', estimated_noise) tf.compat.v1.summary.scalar("loss/noisy_" + params["loss"], tf.reduce_mean(noisy_loss)) tf.compat.v1.summary.scalar("loss/og_" + params["loss"], tf.reduce_mean(loss)) noise_err = tf.reduce_mean(estimated_noise - actual_noise) tf.compat.v1.summary.scalar( 'loss/noise_est_pe', tf.abs(noise_err / tf.reduce_mean(actual_noise + 1e-8))) tf.compat.v1.summary.scalar('loss/noise_est_mse', tf.abs(tf.reduce_mean(noise_err**2))) loss_err = tf.reduce_mean(noisy_loss_estimate - noisy_loss) tf.compat.v1.summary.scalar( 'loss/loss_est_pe', tf.abs(loss_err / tf.reduce_mean(noisy_loss + 1e-8))) tf.compat.v1.summary.scalar('loss/loss_est_mse', tf.abs(tf.reduce_mean(loss_err**2))) if params["L2"] > 0: vars = tf.trainable_variables() l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in vars]) * params["L2"] noisy_loss_estimate += l2_reg tf.compat.v1.summary.scalar("loss/L2_reg", l2_reg) loss_err = tf.reduce_mean(noisy_loss_estimate - noisy_loss) # tf.compat.v1.summary.image('activations_covariance', activation_covariance) # g_noise = for i, w in enumerate(weights): layer_name = "layer_" + str(i) num_params = np.prod(int_shape(w)) a = p_phi_y['activations'][i] noisy_a = p_phi_y_noisy['activations'][i] inj_noise = noisy_a - a print(noisy_a, a) # --- Display in tensorboard -- Injected noise stats tf.compat.v1.summary.histogram(layer_name + '/injected_noise', inj_noise) n_neurons = int_shape(a)[1] tf.compat.v1.summary.histogram(layer_name + '/w', w) corr = tfp.stats.correlation(a) tf.compat.v1.summary.scalar(layer_name + '/corr', tf.reduce_mean(corr)) sparsity = tf.reduce_sum(tf.cast(a <= 1e-6, tf.float32)) # tf.compat.v1.summary.scalar(layer_name + '/lifetime_sparsity', # sparsity / B) tf.compat.v1.summary.scalar(layer_name + '/population_sparsity', sparsity / (B * n_neurons)) # --- Retrieve the noise of the gradient of each layer # --- = noisy gradients - gradients, this corresponds to # --- n_t * gradients where n_t is our noise matrix # --- W gradients og_W_n = tf.gradients([tf.reduce_mean(noisy_loss)], [w])[0] g_W_n = tf.gradients([tf.reduce_mean(noisy_loss_estimate)], [w])[0] g = tf.gradients(tf.reduce_mean(loss), w)[0] err = -g_W_n + og_W_n g_noise = g_W_n - g tf.compat.v1.summary.scalar(layer_name + '/mean_grad_noise', tf.reduce_mean(g_noise)) tf.compat.v1.summary.histogram(layer_name + '/grad_noise', g_noise) tf.compat.v1.summary.scalar(layer_name + '/weights_l2/', tf.reduce_mean(tf.norm(w))) tf.compat.v1.summary.scalar(layer_name + '/grad_est_mse', tf.reduce_mean((og_W_n - g_W_n)**2)) tf.compat.v1.summary.scalar(layer_name + '/grad_est_pe', tf.reduce_mean((-og_W_n + g_W_n) / og_W_n)) gradients.extend([g_W_n]) variables.extend([w]) if i > 0 and params['calc_hessian']: # --- Number of parameters does not include batch_size hessians = trace_hessian([noisy_loss], weights) h_trace = tf.reduce_sum(tf.concat(hessians, axis=1)) / (B * n_samples) for i, h in enumerate(hessians): layer_name = "layer_" + str(i) tf.compat.v1.summary.scalar(layer_name + '/H_trace', tf.reduce_sum(h) / (B * n_samples)) tf.compat.v1.summary.scalar('network/H_trace', h_trace) # --- Sum all them losses loss = tf.reduce_mean(loss) noisy_loss = tf.reduce_mean(noisy_loss) train_step = optimizer.apply_gradients(zip(gradients, variables), global_step=global_step) if mode == tf.estimator.ModeKeys.PREDICT: eval_metrics = {} predictions = { 'preds': tf.nn.softmax(p_phi_y['activations'][-1], axis=1) } predictions['GCH'] = G + C + H - EC for i, J in enumerate(Js): predictions['J' + str(i)] = J # for i, w in enumerate(weights): # predictions['dGCH' + str(i)] = tf.gradients( # [predictions['GCH']], [w])[0] if params['calc_hessian']: # --- Number of parameters does not include batch_size hessians = trace_hessian([noisy_loss], weights[1:3]) h_trace = tf.reduce_sum(tf.concat(hessians, axis=1)) / (B * n_samples) predictions['h_trace'] = h_trace else: predictions = {} eval_metrics = { "loss/og": tf.compat.v1.metrics.mean(loss), } if params["loss"] == 'cross_entropy': eval_metrics["accuracy"] = tf.compat.v1.metrics.mean(acc) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, predictions=predictions, train_op=train_step, eval_metric_ops=eval_metrics)
724b32981a3b79c6725e4a7c6add9ab0f5046647
3,641,244
def b58decode(v, length): """ decode v into a string of len bytes """ long_value = 0L for (i, c) in enumerate(v[::-1]): long_value += __b58chars.find(c) * (__b58base**i) result = '' while long_value >= 256: div, mod = divmod(long_value, 256) result = chr(mod) + result long_value = div result = chr(long_value) + result nPad = 0 for c in v: if c == __b58chars[0]: nPad += 1 else: break result = chr(0)*nPad + result if length is not None and len(result) != length: return None return result
3a9d1d5da02c2174bcf0220de705a92a91cd0b18
3,641,245
import os import sys import json def create_sample_meta_info(args): """ Load and parse samples json for templating """ if not os.path.exists(args.samples_json): print('could not find file: {}!'.format(args.samples_json)) sys.exit(1) sample_info = json.load(open(args.samples_json)) for i, sample_data in enumerate(sample_info): cols = set(SAMPLE_META_COLS).difference(set(sample_data.keys())) for col in cols: sample_info[i][col] = "" sample_info = {i['cmoSampleName']: i for i in sample_info} return sample_info
ed12624f608125d70897954357804f6c6cc099e8
3,641,246
def has_remove_arg(args): """ Checks if remove argument exists :param args: Argument list :return: True if remove argument is found, False otherwise """ if "remove" in args: return True return False
9b07fe70cecfbdf6e6e2274e5b3e715f903331c7
3,641,247
def supported_locales(prefix, parsed_args, **kwargs): """ Returns all supported locales. :param prefix: The prefix text of the last word before the cursor on the command line. :param parsed_args: The result of argument parsing so far. :param kwargs: keyword arguments. :returns list: list of all supported locales. """ return constants.locales()
db6f73699120dc4b784b1f46ed7c9fbe4a3cc9a9
3,641,248
def generate_tool_panel_dict_for_tool_config( guid, tool_config, tool_sections=None ): """ Create a dictionary of the following type for a single tool config file name. The intent is to call this method for every tool config in a repository and append each of these as entries to a tool panel dictionary for the repository. This allows for each tool to be loaded into a different section in the tool panel. {<Tool guid> : [{ tool_config : <tool_config_file>, id: <ToolSection id>, version : <ToolSection version>, name : <TooSection name>}]} """ tool_panel_dict = {} file_name = suc.strip_path( tool_config ) tool_section_dicts = generate_tool_section_dicts( tool_config=file_name, tool_sections=tool_sections ) tool_panel_dict[ guid ] = tool_section_dicts return tool_panel_dict
8e976cf4d54212d0477ef4ae7d4fb1dd532363fa
3,641,249
def get_tmp_dir(): """get or create the tmp dir corresponding to each process""" tmp_dir = result_dir / "tmp" tmp_dir.mkdir(exist_ok=True) return tmp_dir
406962c5783dff1d23523bd5bd258b7bb18ed149
3,641,250
def get_logs(job_id, user, index): """get logs""" return instance().get_logs(job_id=job_id, user=user, log_index=int(index))
f2d959835c34ffec475d5e9da18e74feef13b5d9
3,641,251
def repeat_as_list(x: TensorType, n: int): """ :param x: Array/Tensor to be repeated :param n: Integer with the number of repetitions :return: List of n repetitions of Tensor x """ return [x for _ in range(n)]
cb4924909d93899a555c11bd70950c6cbb77cf85
3,641,252
def transition(x, concat_axis, nb_filter, dropout_rate=None, weight_decay=1E-4): """Apply BatchNorm, Relu 1x1Conv2D, optional dropout and Maxpooling2D :parameter x: keras model :parameter concat_axis: int -- index of contatenate axis :parameter nb_filter: int -- number of filters :parameter dropout_rate: int -- dropout rate :parameter weight_decay: int -- weight decay factor :returns: model :return type: keras model, after applying batch_norm, relu-conv, dropout, maxpool """ x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) x = Activation('relu')(x) x = Conv2D(nb_filter, (1, 1), kernel_initializer="he_uniform", padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(x) if dropout_rate: x = Dropout(dropout_rate)(x) x = AveragePooling2D((2, 2), strides=(2, 2))(x) return x
30d89aca3a330dc6b04b4c9ee21a8620c8ba69f1
3,641,253
def _bug_data_diff_plot( project_name: str, project_repo: pygit2.Repository, bugs_left: tp.FrozenSet[PygitBug], bugs_right: tp.FrozenSet[PygitBug] ) -> gob.Figure: """Creates a chord diagram representing the diff between two sets of bugs as relation between introducing/fixing commits.""" commits_to_nodes_map = _map_commits_to_nodes(project_repo) commit_occurrences: tp.Dict[pygit2.Commit, DiffOccurrence] = {} commit_count = len(commits_to_nodes_map.keys()) commit_coordinates = _compute_node_placement(commit_count) for commit in project_repo.walk( project_repo.head.target.hex, pygit2.GIT_SORT_TIME ): commit_occurrences[commit] = DiffOccurrence.NONE lines: tp.List[gob.Scatter] = _generate_diff_line_data( _diff_raw_bugs(bugs_left, bugs_right), commits_to_nodes_map, commit_coordinates, commit_occurrences ) commit_types = { commit: __DIFF_TO_NODE_TYPE[do] for commit, do in commit_occurrences.items() } nodes: tp.List[gob.Scatter] = _generate_node_data( project_repo, commit_coordinates, commits_to_nodes_map, commit_types ) data = lines + nodes layout = _create_layout(f'szz_diff {project_name}') return gob.Figure(data=data, layout=layout)
c41684a622e3ff5dc0a43ba49a2e4186073a40e3
3,641,254
import torch def scale_params(cfg): """ Scale: * learning rate, * weight decay, * box_loss_gain, * cls_loss_gain, * obj_loss_gain according to: * effective batch size * DDP world size * image size * num YOLO output layers * num classes """ logger = get_logger(__name__) # Scale LR and weight decay is_ddp = cfg.sg_model.multi_gpu == MultiGPUMode.DISTRIBUTED_DATA_PARALLEL and torch.distributed.is_initialized() world_size = torch.distributed.get_world_size() if is_ddp else 1 # Scale LR and WD for DDP due to gradients being averaged between devices # Equivalent to loss * WORLD_SIZE in ultralytics cfg.training_params.initial_lr *= world_size cfg.training_params.warmup_bias_lr *= world_size cfg.training_params.optimizer_params.weight_decay /= world_size # Scale WD with a factor of [effective batch size]/64. batch_size, batch_accumulate = cfg.dataset_params.batch_size, cfg.training_params.batch_accumulate batch_size_factor = cfg.sg_model.num_devices if is_ddp else cfg.sg_model.dataset_interface.batch_size_factor effective_batch_size = batch_size * batch_size_factor * batch_accumulate cfg.training_params.optimizer_params.weight_decay *= effective_batch_size / 64. # Scale EMA beta to match Ultralytics update cfg.training_params.ema_params.beta = cfg.training_params.max_epochs * len(cfg.sg_model.train_loader) / 2000. log_msg = \ f""" IMPORTANT:\n Training with world size of {world_size}, {'DDP' if is_ddp else 'no DDP'}, effective batch size of {effective_batch_size}, scaled: * initial_lr to {cfg.training_params.initial_lr}; * warmup_bias_lr to {cfg.training_params.warmup_bias_lr}; * weight_decay to {cfg.training_params.optimizer_params.weight_decay}; * EMA beta to {cfg.training_params.ema_params.beta}; """ if cfg.training_params.loss == 'yolo_v5_loss': # Scale loss gains model = cfg.sg_model.net model = model.module if hasattr(model, 'module') else model num_levels = model._head._modules_list[-1].detection_layers_num train_image_size = cfg.dataset_params.train_image_size num_branches_norm = 3. / num_levels num_classes_norm = len(cfg.sg_model.classes) / 80. image_size_norm = train_image_size / 640. cfg.training_params.criterion_params.box_loss_gain *= num_branches_norm cfg.training_params.criterion_params.cls_loss_gain *= num_classes_norm * num_branches_norm cfg.training_params.criterion_params.obj_loss_gain *= image_size_norm ** 2 * num_branches_norm log_msg += \ f""" * box_loss_gain to {cfg.training_params.criterion_params.box_loss_gain}; * cls_loss_gain to {cfg.training_params.criterion_params.cls_loss_gain}; * obj_loss_gain to {cfg.training_params.criterion_params.obj_loss_gain}; """ logger.info(log_msg) return cfg
a74472a5c5ce2a6b83eab0467c66b468226c222d
3,641,255
def get_model(args): """ Load model and move tensors to a given devices. """ if args.model == "lstm": model = LSTM(args) if args.model == "lstmattn": model = LSTMATTN(args) if args.model == "bert": model = Bert(args) if args.model == "lqt": model = LastQuery(args) model.to(args.device) return model
131a4e3d8832d9b0aa099c55f7a8851d3a8907ef
3,641,256
def convert_to_boolean(value): """Turn strings to bools if they look like them Truthy things should be True >>> for truthy in ['true', 'on', 'yes', '1']: ... assert convert_to_boolean(truthy) == True Falsey things should be False >>> for falsey in ['false', 'off', 'no', '0']: ... assert convert_to_boolean(falsey) == False Other things should be unchanged >>> for value in ['falsey', 'other', True, 0]: ... assert convert_to_boolean(value) == value """ if isinstance(value, str): if value.lower() in ['t', 'true', 'on', 'yes', '1']: return True elif value.lower() in ['f', 'false', 'off', 'no', '0']: return False return value
7cbf7a8fd601904c7aa8b685f6a3b3f5eaaa5c51
3,641,257
def getSampleBandPoints(image, region, **kwargs): """ Function to perform sampling of an image over a given region, using ee.Image.samp;e(image, region, **kwargs) Args: image (ee.Image): an image to sample region (ee.Geometry): the geometry over which to sample Returns: An ee.FeatureCollection of sampled points along with coordinates """ dargs = { 'numPixels': 1000, 'region': region } dargs.update(kwargs) sample = image.sample(**dargs) return sample
4cfbc3c180b805abe52c718f81cc16c409693922
3,641,258
def updateRIPCount(idx,RIPtracker,addRev=0,addFwd=0,addNonRIP=0): """Add observed RIP events to tracker by row.""" TallyRev = RIPtracker[idx].revRIPcount + addRev TallyFwd = RIPtracker[idx].RIPcount + addFwd TallyNonRIP = RIPtracker[idx].nonRIPcount + addNonRIP RIPtracker[idx] = RIPtracker[idx]._replace(revRIPcount=TallyRev,RIPcount=TallyFwd,nonRIPcount=TallyNonRIP) return RIPtracker
7f83c547d9acd6c697174fffa1ccb3aec6e91a24
3,641,259
def serialize(obj): """ Return a JSON-serializable representation of an object """ cls = obj.__class__ cls_name = cls.__name__ module_name = cls.__module__ serializer = None if hasattr(obj, "to_serializable"): # The object implements its own serialization s = obj.to_serializable() elif hasattr(obj, "__dict__"): # Use the object's __dict__ if it's there s = obj.__dict__ else: # Use a custom serializer serializer = _serializers.get((module_name, cls_name)) # If we don't have one, that's a problem assert serializer is not None # Apply the serializer to the object s = serializer[0](obj) # Do some sanity checks: we must be able to recreate # an instance of this class during de-serialization assert module_name and module_name != "__main__" assert serializer is not None or hasattr(cls, "from_serializable") # Return a serialization wrapper dict with enough info # for deserialization return dict( __cls__=cls_name, __module__=module_name, __obj__=s )
3fd5449922808a1e1772b3937bca6736c63df9a2
3,641,260
def download_file(url, offset=0, filename='tmp', verbosity=True): """ Intended for simulating the wget linux command :param url: The URL for the resource to be downloaded :param offset: Number of bytes to be skipped :param filename: Name of file where the content downloaded will be stored :param verbosity: Boolean value that indicates the verbosity in logger :return: None """ logger.setLevel(logging.DEBUG) if verbosity else logger.setLevel(logging.INFO) headers = {'Range': "bytes=%s-" % offset, 'Accept': '*/*', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) ' 'Chrome/23.0.1271.64 Safari/537.11'} logger.debug("Setting Range Header for HTTP Request") if offset != 0: logger.info("This download is being resumed") req = urllib2.Request(url, headers=headers) try: logger.debug("Opening URL") u = urllib2.urlopen(req) to_download = int(u.info().getheaders("Content-Length")[0]) logger.debug("The program will download %s bytes" % to_download) f = open(filename, 'ab') if offset != 0 else open(filename, 'wb') logger.debug("The file is going to be downloaded with a block size of %s bytes" % BLOCK_SIZE) buffer_ = u.read(BLOCK_SIZE) downloaded = 0 while buffer_: downloaded += len(buffer_) logger.debug("%d %3.2f%%" % (downloaded, downloaded * 100. / to_download)) f.write(buffer_) buffer_ = u.read(BLOCK_SIZE) f.close() logger.info("The download has finished") return True except HTTPError, e: if e.code == 416: logger.info("This file has been downloaded already") except ValueError: logger.exception("The string %s is not a valid url" % url) return False
dd9a3a3c0b0e1b96d39d5929adce53f8f0c8e5c2
3,641,261
async def async_unload_entry(hass, config_entry): """Unload a config entry.""" controller_id = CONTROLLER_ID.format( host=config_entry.data[CONF_CONTROLLER][CONF_HOST], site=config_entry.data[CONF_CONTROLLER][CONF_SITE_ID] ) controller = hass.data[DOMAIN].pop(controller_id) return await controller.async_reset()
2341f49794ecd9f9824330594cf3955bca117455
3,641,262
from collections import OrderedDict import astropy.io.fits as pyfits from .. import utils import os def convert_1D_to_lists(file='j234420m4245_00615.1D.fits'): """ Convert 1D spectral data to lists suitable for putting into dataframes and sending to the databases. """ if not os.path.exists(file): print('Spectrum file not found') return False im = pyfits.open(file) obj_id = im[0].header['ID'] obj_root = im[0].header['TARGET'] if '.R30.' in file: skip_columns = ['line', 'cont'] pref = 'spec1d_r30' else: skip_columns = [] pref = 'spec1d' spectra = OrderedDict() has_spectra = False for gr in ['G102', 'G141', 'G800L']: if gr in im: has_spectra = True sp = utils.GTable.read(file, hdu=gr) prefix = '{0}_{1}_'.format(pref, gr.lower()) spd = {prefix+'id': obj_id, prefix+'root': obj_root} for c in sp.colnames: if c in skip_columns: continue spd[prefix+c] = sp[c].tolist() spectra[gr.lower()] = spd if has_spectra: return spectra else: return False
b507da9251e59e024c6f631aa412778f278afc4f
3,641,263
import operator def get_farthest_three_shots(gps_shots): """get three shots with gps that are most far apart""" areas = {} for (i, j, k) in combinations(gps_shots, 3): areas[(i, j, k)] = area(np.array(i.metadata.gps_position), np.array(j.metadata.gps_position), np.array(k.metadata.gps_position)) return max(areas.items(), key=operator.itemgetter(1))[0]
697d87549bee0a8ff3adee30ceb7b41a24f3d66b
3,641,264
from operator import sub def __parse_entry(entry_line): """Parse the SOFT file entry name line that starts with '^', '!' or '#'. :param entry_line: str -- line from SOFT file :returns: tuple -- type, value """ if entry_line.startswith("!"): entry_line = sub(r"!\w*?_", '', entry_line) else: entry_line = entry_line.strip()[1:] try: entry_type, entry_name = [i.strip() for i in entry_line.split("=", 1)] except ValueError: entry_type = [i.strip() for i in entry_line.split("=", 1)][0] entry_name = '' return entry_type, entry_name
1a645cb4dcaafaa4de1db7011d3ff54931b8123f
3,641,265
def _mut_insert_is_applied(original, mutated): """ Checks if mutation was caused by `mut_insert`. :param original: the pre-mutation individual :param mutated: the post-mutation individual :return: (bool, str). If mutation was caused by function, True. False otherwise. str is a message explaining why mutation is not caused by function. """ if len(list(original.primitives)) >= len(list(mutated.primitives)): return ( False, "Number of primitives should be strictly greater, was {} is {}.".format( len(list(original.primitives)), len(list(mutated.primitives)) ), ) return True, None
f19bb092e1eefc14435f5bb90a030558980fed4c
3,641,266
from typing import Dict from typing import Any def remap_ids( mapping_table: Dict[Any, int] = {}, default: int = 0, dtype: DTypes = "i" ) -> Model[InT, OutT]: """Remap string or integer inputs using a mapping table, usually as a preprocess before embeddings. The mapping table can be passed in on input, or updated after the layer has been created. The mapping table is stored in the "mapping_table" attribute. """ return Model( "remap_ids", forward, attrs={"mapping_table": mapping_table, "dtype": dtype, "default": default}, )
4380b9377930d6affac6703a0a1e656a916b45db
3,641,267
def get_text(cell): """ get stripped text from a BeautifulSoup td object""" return ''.join([x.strip() + ' ' for x in cell.findAll(text=True)]).strip()
08037cbe5d2058206de029417f03d211d350820f
3,641,268
import argparse def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Train a Fast R-CNN network') parser.add_argument('--dataset', dest='dataset', help='training dataset', default='pascal_voc', type=str) parser.add_argument('--net', dest='net', help='vgg16, res101', default='vgg16', type=str) parser.add_argument('--start_epoch', dest='start_epoch', help='starting epoch', default=1, type=int) parser.add_argument('--epochs', dest='max_epochs', help='number of epochs to train', default=20, type=int) parser.add_argument('--disp_interval', dest='disp_interval', help='number of iterations to display', default=100, type=int) parser.add_argument('--checkpoint_interval', dest='checkpoint_interval', help='number of iterations to display', default=10000, type=int) parser.add_argument('--save_dir', dest='save_dir', help='directory to save models', default="models", type=str) parser.add_argument('--nw', dest='num_workers', help='number of worker to load data', default=0, type=int) parser.add_argument('--cuda', dest='cuda', help='whether use CUDA', action='store_true') parser.add_argument('--ls', dest='large_scale', help='whether use large imag scale', action='store_true') parser.add_argument('--mGPUs', dest='mGPUs', help='whether use multiple GPUs', action='store_true') parser.add_argument('--bs', dest='batch_size', help='batch_size', default=1, type=int) parser.add_argument('--cag', dest='class_agnostic', help='whether perform class_agnostic bbox regression', action='store_true') # config optimization parser.add_argument('--o', dest='optimizer', help='training optimizer', default="sgd", type=str) parser.add_argument('--lr', dest='lr', help='starting learning rate', default=0.001, type=float) parser.add_argument('--lr_decay_step', dest='lr_decay_step', help='step to do learning rate decay, unit is epoch', default=5, type=int) parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma', help='learning rate decay ratio', default=0.1, type=float) # set training session parser.add_argument('--s', dest='session', help='training session', default=1, type=int) # resume trained model parser.add_argument('--r', dest='resume', help='resume checkpoint or not', action='store_true') parser.add_argument('--checksession', dest='checksession', help='checksession to load model', default=1, type=int) parser.add_argument('--checkepoch', dest='checkepoch', help='checkepoch to load model', default=1, type=int) parser.add_argument('--checkpoint', dest='checkpoint', help='checkpoint to load model', default=0, type=int) # log and diaplay parser.add_argument('--use_tfb', dest='use_tfboard', help='whether use tensorboard', action='store_true') parser.add_argument('--name', dest='name', help='name of models', default="faster_rcnn_curr.pth", type=str) parser.add_argument('--mm', dest='mimic', help='whether perform mimicking', action='store_true') parser.add_argument('--layers', dest='layers', help='tiny network layers', default=101, type=int) parser.add_argument('--save_model', dest='save_model', help='name to save', default="my_faster_rcnn_curr.pth", type=str) parser.add_argument('--recall', dest='evl_rec', help='whether evaluate recall', action='store_true') parser.add_argument('--decouple', dest='decouple', help='whether to use decouple roi pooling', action='store_true') parser.add_argument('--scale', dest='scale', help='scale of sigma with respect to ROI', default=1.0, type=float) args = parser.parse_args() return args
03ae21f2150fb309bcf49587588dbfa7496e91a8
3,641,269
import torch def test_augmentation(text, text_lengths, augmentation_class): """ test_augmentation method is written for augment input text in evaluation :param text: input text :param text_lengths: text length :param augmentation_class: augmentation class :return: """ augmentation_text = augmentation_class.test_augment(text, text_lengths) augmentation_text.append(text) augmentation_text = torch.FloatTensor(augmentation_text).long() return augmentation_text
2f83ec9fa0afa110d05f05f52e85cae65a28c6f9
3,641,270
def selfintersection(linear_ring: Points): """ not support warp polygon. """ validate.linear_ring(linear_ring) if len(linear_ring) == 4: return ( abs( linear_ring[0][1] * (linear_ring[1][0] - linear_ring[2][0]) + linear_ring[1][1] * (linear_ring[2][0] - linear_ring[0][0]) + linear_ring[2][1] * (linear_ring[0][0] - linear_ring[1][0]) ) < EPSILON ) lines = [[linear_ring[i], linear_ring[i + 1]] for i in range(len(linear_ring) - 1)] def check(lines, start=0): if start + 2 >= len(lines): return False l1 = lines[start] endIndex = len(lines) - 1 if start == 0 else len(lines) for i in range(start + 2, endIndex): l2 = lines[i] if intersection(*l1, *l2): return True return check(lines, start + 1) return check(lines)
d0b92d7796a3281a4481071f0b0666fdf79c6952
3,641,271
import math def ToMercPosition(lat_deg, num_tiles): """Calculate position of a given latitude on qt grid. LOD is log2(num_tiles) Args: lat_deg: (float) Latitude in degrees. num_tiles: (integer) Number of tiles in the qt grid. Returns: Floating point position of latitude in tiles relative to equator. """ lat_rad = lat_deg / 180.0 * math.pi y_merc = math.log(math.tan(lat_rad / 2.0 + math.pi / 4.0)) return num_tiles / 2.0 * (1 + y_merc / math.pi)
1ae7e7b2da9ec3ee20756ef7ffa13d99485aaea7
3,641,272
def conv3x3(in_planes, out_planes, stride=1, dilation=1, groups=1, bias=False): """2D 3x3 convolution. Args: in_planes (int): number of input channels. out_planes (int): number of output channels. stride (int): stride of the operation. dilation (int): dilation rate of the operation. groups (int): number of groups in the operation. bias (bool): whether to add learnable bias parameter. Returns: `nn.Conv2d' instance. """ return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=groups, bias=bias)
d5658d81f5fbc5d196418e4e4b005dbf7d3f20ae
3,641,273
import re def extract_current_alarm(strValue): """抽取show alarm current命令的信息 Args: strValue (str): show alarm current显示的信息 Returns: list: 包含信息的字典 """ # TODO: FIXME: 抽取告警信息没有实现 titleExpr = re.compile('\s*(Item Description)\s+(Code vOLT)\s+(Object)\s+(Begintime)\s+(Endtime)\s*') valueExpr = re.compile('???') lines = strValue.splitlines() ret = [ ] titles = None for line in lines: match = titleExpr.match(line) if match != None: titles = match.groups() match = valueExpr.match(line) if match != None: values = match.groups() ret.append({ }) for title, value in zip(titles, values): ret[-1][title] = value return ret
656acd8e25af509594ae28b89110508bc7a17fcd
3,641,274
def parse_hostnames(filename, hostnames): """Parses host names from a comma-separated list or a filename. Fails if neither filename nor hostnames provided. :param filename: filename with host names (one per line) :type filename: string :param hostnames: comma-separated list of host names :type hostnames: string :rtype: list of host names """ if bool(filename) == bool(hostnames): die('Please specify either --filename or --hosts') if filename: hostnames = _parse_hostname_file(filename) elif hostnames: hostnames = _parse_hostname_list(hostnames) if not hostnames: die('No valid hosts found.') return hostnames
b3fce0f3af7f59217fd18bfce53baec87784759f
3,641,275
def ssh(host, command, stdin=None): """Run 'command' (list) on 'host' via ssh. stdin is an string to send.""" return run([*SSH_COMMAND, ssh_user_host(host), *command], stdin=stdin)
9719aef39530e285d27a2e9dd5a7ceab09f3793e
3,641,276
import six import os def _bgzip_file(finput, config, work_dir, needs_bgzip, needs_gunzip, needs_convert, data): """Handle bgzip of input file, potentially gunzipping an existing file. Handles cases where finput might be multiple files and need to be concatenated. """ if isinstance(finput, six.string_types): in_file = finput else: assert not needs_convert, "Do not yet handle quality conversion with multiple inputs" return _bgzip_multiple_files(finput, work_dir, data) out_file = os.path.join(work_dir, os.path.basename(in_file).replace(".bz2", "") + (".gz" if not in_file.endswith(".gz") else "")) if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bgzip = tools.get_bgzip_cmd(config) is_remote = objectstore.is_remote(in_file) in_file = objectstore.cl_input(in_file, unpack=needs_gunzip or needs_convert or needs_bgzip or dd.get_trim_ends(data)) if needs_convert or dd.get_trim_ends(data): in_file = fastq_convert_pipe_cl(in_file, data) if needs_gunzip and not (needs_convert or dd.get_trim_ends(data)): if in_file.endswith(".bz2"): gunzip_cmd = "bunzip2 -c {in_file} |".format(**locals()) else: gunzip_cmd = "gunzip -c {in_file} |".format(**locals()) bgzip_in = "/dev/stdin" else: gunzip_cmd = "" bgzip_in = in_file if needs_bgzip: do.run("{gunzip_cmd} {bgzip} -c {bgzip_in} > {tx_out_file}".format(**locals()), "bgzip input file") elif is_remote: bgzip = "| bgzip -c" if (needs_convert or dd.get_trim_ends(data)) else "" do.run("cat {in_file} {bgzip} > {tx_out_file}".format(**locals()), "Get remote input") else: raise ValueError("Unexpected inputs: %s %s %s %s" % (in_file, needs_bgzip, needs_gunzip, needs_convert)) return out_file
7a5b30a1352c570fb4c7fedaafc916c1e185f5ae
3,641,277
def cart_step1_choose_type_of_order(request): """ This view is not login required because we want to display some summary of ticket prices here as well. """ special_fares = get_available_fares_for_type(TicketType.other) context = {"show_special": bool(special_fares)} return TemplateResponse( request, "conference/cart/step_1_choose_type_of_order.html", context )
869941df96c750c0049f6ab5e50e5fad17679af2
3,641,278
def buildAndTrainModel(model, learningRate, batchSize, epochs, trainingData, validationData, testingData, trainingLabels, validationLabels, testingLabels, MODEL_NAME, isPrintModel=True): """Take the model and model parameters, build and train the model""" # Build and compile model # To use other optimizers, refer to: https://keras.io/optimizers/ # Please do not change the loss function optimizer = tf.keras.optimizers.Adam(lr=learningRate) model.compile(optimizer=optimizer, loss=tf.keras.losses.MeanSquaredError()) if isPrintModel: print(model.summary()) for epoch in range(0, epochs): model.fit(trainingData, trainingLabels, epochs=1, verbose=0, batch_size=batchSize, shuffle=False) # Evaluate model valLoss = model.evaluate(validationData, validationLabels, verbose=False) #model.save('Results/StructuredBinary/{}/epoch_{}'.format(filename,epoch)) ## get metrics predictions = model.predict(testingData) MSE, MAE, MAPE, RMSE, PR = getMetrics(testingLabels,predictions) MeanSquaredError.append(MSE) RootMeanSquaredError.append(RMSE) MeanAbsoluteError.append(MAE) MeanAbsolutePercentageError.append(MAPE) PearsonR.append(PR) ValMSE.append(valLoss) Epoch.append(epoch) if valLoss <= min(ValMSE): max_predictions = predictions return MeanSquaredError, RootMeanSquaredError, MeanAbsoluteError, MeanAbsolutePercentageError, ValMSE, PearsonR, Epoch, max_predictions
9e9170ccb817be6ec3908c16390d1afe4f96b2e7
3,641,279
def vflip(): """Toggle vertical flipping of camera image.""" # Catch ajax request with form data vflip_val = 'error' if request.method == 'POST': vflip_val = request.form.get('vflip') if vflip_val is not None: app.logger.info('Form brightness submitted: %s', vflip_val) camera.set_vflip(vflip_val == 'true') return {'brightness': vflip_val}
064280aadbdc53783d983caa66a52d294732be9e
3,641,280
def getGoalHistogramData(responses): """ Goal Completion histogram chart on project detail page. Return: {obj} Counts and % of each Goal Completion rating across given responses. """ try: snapshotResponses = responses.exclude(Q(primary_goal__isnull=True) | Q(primary_goal__name='')) respsnapshotResponsesCount = snapshotResponses.count() # Get unique list of primary goals and count each primary goal occurance. # Then clean up names and change counts to percents goals = list(snapshotResponses.values(goalName=F('primary_goal__name')).annotate(goalTotal=Count('primary_goal')).order_by('-goalTotal')) # For each unique goal and count found: for goal in goals: goalResponses = snapshotResponses.filter(primary_goal__name=goal['goalName']).select_related('goal_completed') responseYesCount = goalResponses.filter(goal_completed__name__iexact='yes').count() responsePartiallyCount = goalResponses.filter(goal_completed__name__iexact='yartially').count() responseNoCount = goalResponses.filter(goal_completed__name__iexact='no').count() goal['Yes'] = responseYesCount goal['Partially'] = responsePartiallyCount goal['No'] = responseNoCount goal['YesPercent'] = round((responseYesCount/goal['goalTotal'])*100) goal['NoPercent'] = round((responseNoCount/goal['goalTotal'])*100) goal['PartiallyPercent'] = round((responsePartiallyCount/goal['goalTotal'])*100) goal['goalName'] = goal['goalName'].replace('_',' ').capitalize() goal['goalPercent'] = round((goal['goalTotal']/respsnapshotResponsesCount)*100) except Exception as ex: goals = None #print(json.dumps(data, indent=2)) return goals
782c911f1a751ccf4c441874520f0cbc66b4a89c
3,641,281
def hookes_law(receiver_nodes, sender_nodes, k, x_rest): """Applies Hooke's law to springs connecting some nodes. Args: receiver_nodes: Ex5 tf.Tensor of [x, y, v_x, v_y, is_fixed] features for the receiver node of each edge. sender_nodes: Ex5 tf.Tensor of [x, y, v_x, v_y, is_fixed] features for the sender node of each edge. k: Spring constant for each edge. x_rest: Rest length of each edge. Returns: Nx2 Tensor of the force [f_x, f_y] acting on each edge. """ diff = receiver_nodes[..., 0:2] - sender_nodes[..., 0:2] x = tf.norm(diff, axis=-1, keepdims=True) force_magnitude = tf.multiply(k, (x - x_rest) / x) force = -1 * force_magnitude * diff return force
30182ed5e91e07affa4db117c9e24a9cf76e3646
3,641,282
def check_output_filepath(filepath): """ Check and return an appropriate output_filepath parameter. Ensures the file is a csv file. Ensures a value is set. If a value is not set or is not a csv, it will return a default value. :param filepath: string filepath name :returns: a string representing a filepath location. """ if filepath.endswith('.csv'): return filepath return "clean_rules_report.csv"
63fcf697dbde9a62cc39311b4d234955520f6394
3,641,283
import re def mock_open_url(url, allow_local=False, timeout=None, verify_ssl=True, http_headers=None): """Open local files instead of URLs. If it's a local file path, leave it alone; otherwise, open as a file under ./files/ This is meant as a side effect for unittest.mock.Mock """ if re.match(r'https?:', url): # Looks like a URL filename = re.sub(r'^.*/([^/]+)$', '\\1', url) path = resolve_path('files/mock/' + filename) else: # Assume it's a file path = url return (open(path, 'rb'), None, None, None)
28705c7d1785853f99d544967e745a12a58321f0
3,641,284
def concat_chunked_data(jsons, f_src='c', *args, **kwargs): """ Takes chunks of data and combines them into a numpy array of shape trial x cells x time, concatendated over trials, and clips the trials at shortest frame number and fewest cells. Args and kwargs are passed to process_data. Args: jsons (list): list of jsons to process f_src (str): key to F data to load ('c' or 'dff'). Defaults to 'c'. Returns: trial_dat: 3D numpy array, (trials, cells, time) """ # load and format c_trials = [load_json(j)[f_src] for j in jsons] s_trials = [load_json(j)['splits'] for j in jsons] # smoosh all the lists of trials into a big array trial_dat = [] for c,s in zip(c_trials, s_trials): out = process_data(c, s, *args, **kwargs) trial_dat.append(out) # ensure that trials are the same length and have same shortest = min([s.shape[2] for s in trial_dat]) # shortest trial # fewest = min([c.shape[1] for c in trial_dat]) # fewest cells # trial_dat = np.concatenate([a[:, :fewest, :shortest] for a in trial_dat]) try: trial_dat = np.concatenate([a[:, :, :shortest] for a in trial_dat]) except: print('WARNING LOST A CELL(S)!!!!') fewest = min([c.shape[1] for c in trial_dat]) # fewest cells trial_dat = np.concatenate([a[:, :fewest, :shortest] for a in trial_dat]) return trial_dat
cfd978a1ac74d35d857e152e6051e88b05ccf495
3,641,285
def hmc_update(context, hmc_uuid, values, session=None): """Updates an existing HMC instance in the Database""" return IMPL.hmc_update(context, hmc_uuid, values, session)
943dd2359b2458429d60bb8c68ee20c40651b8fe
3,641,286
def _dense_difference(fun, x0, f0, h, one_sided, method): """ Calculates an approximation of the Jacobian of `fun`at the point `x0` in dense matrix form. NOTE: Inspired from: https://github.com/scipy/scipy/blob/master/scipy/optimize/_numdiff.py Parameters ---------- fun : callable Function which computes a vector of residuals with call f(x, *args, **kwargs). x0 : array_like with shape (n,) or float Initial guess of the dependent variable. method : {'2-point', '3-point'}, optional Method used for the finite difference scheme. Returns ------- J : array_like, shape (m, n) Approximation of the Jacobian matrix. """ m = f0.size n = x0.size Jt = np.empty((n, m)) hv = np.diag(h) for i in range(h.size): if method == '2-point': x = x0 + hv[i] dx = x[i] - x0[i] df = fun(x) - f0 elif (method == '3-point') and one_sided[i]: x1 = x0 + hv[i] x2 = x0 + 2. * hv[i] dx = x2[i] - x0[i] f1 = fun(x1) f2 = fun(x2) df = -3. * f0 + 4. * f1 - f2 elif (method == '3-point') and (not one_sided[i]): x1 = x0 - hv[i] x2 = x0 + hv[i] dx = x2[i] - x1[i] f1 = fun(x1) f2 = fun(x2) df = f2 - f1 else: raise ValueError("Step-method must be either '2-point' or '3-point'.") Jt[i, :] = df / dx if m == 1: Jt = np.ravel(Jt) return Jt.T
47d840a70fe2b8d22bf9fec4fdbb0e5190dec2f2
3,641,287
def alternate( name, *functions ): """Construct a callable that functions as the first implementation found of given set of alternatives if name is a function then its name will be used.... """ if not isinstance( name, (bytes,unicode)): functions = (name,)+functions name = name.__name__ return type( name, (_Alternate,), {} )( name, *functions )
5e751a5332c3e8e9e37f5544e9461c772bc525ac
3,641,288
from typing import Optional from typing import Callable def check_messenger(messenger: Optional[Callable]): """ Check that `messenger` is a `utipy.Messenger` object or `None`. In the latter case a `utipy.Messenger` with `verbose=False` is returned. Parameters ---------- messenger : `utipy.Messenger` or None A Messenger instance to check. Or `None`, in which case a `utipy.Messenger` with `verbose=False` is returned. Returns ------- `utipy.Messenger` """ # Check the messenger function if messenger is None: messenger = Messenger(verbose=False) else: assert isinstance(messenger, Messenger) return messenger
b50bc38d5034e3d4d4d35d4532a504024008361f
3,641,289
import re def convert(s): """Take an input string s, find all things that look like SGML character entities, and replace them with the Unicode equivalent. Function is from: http://stackoverflow.com/questions/1197981/convert-html-entities-to-ascii-in-python/1582036#1582036 """ matches = re.findall("&#\d+;", s) if len(matches) > 0: hits = set(matches) for hit in hits: name = hit[2:-1] try: entnum = int(name) s = s.replace(hit, unichr(entnum)) except ValueError: pass matches = re.findall("&\w+;", s) hits = set(matches) amp = "&" if amp in hits: hits.remove(amp) for hit in hits: name = hit[1:-1] if name in htmlentitydefs.name2codepoint: s = s.replace(hit, unichr(htmlentitydefs.name2codepoint[name])) s = s.replace(amp, "&") return s
0a25ee189ff107e5cd725bba1d1d20d6cb1c0f0c
3,641,290
def check_data_selection(race_id=None, category_index=None, racer_id=None): """Makes sure that we are trying to show data that is in the database.""" errors = [] if not race_id in Races.get_column('race_id'): race_id = Races.get_random_id() errors.append('race') categories = Races.get_categories(race_id) if category_index >= len(categories): category_index = 0 errors.append('category') if not racer_id in Racers.get_column('RacerID'): # Random racer from the currently selected category racer_id = Results.get_random_racer_id(racer_id, categories[category_index]) errors.append('racer') if errors: return redirect(url_for('error'))
6d5b4eeaf1149fdac76e83bb94a6b6d482d0d280
3,641,291
from typing import List def request_access_ticket(pat: str, permission_endpoint: str, resources: List[dict], secure: bool = False) -> str: """ As a Resource Server, request permission to the AS to access a resource, generating a ticket as a result. - CAN THROW EXCEPTIONS - MAKES A CONNECTION TO AN EXTERNAL ENDPOINT Args: - pat = String containing the pat (token) - permission_endpoint = URL of the token permission endpoint in the AS - resources = List of resources to request permission to. Format: [ { "resource_id": <str resource id>, "resource_scopes": [ <scope 1>, <scope 2>, ...] }, ... ] - secure = toggle checking of SSL certificates. Activating this is recommended on production environments Returns: A string containing the ticket for accessing those resources. """ headers = { 'content-type': "application/json", 'authorization': "Bearer "+pat, } if len(resources) == 1: resources = resources[0] # Use a single dict instead of a list for 1 resource disable_warnings_if_debug(secure) response = request("POST", permission_endpoint , json=resources, headers=headers, verify=secure) if not is_ok(response): raise Exception("An error occurred while requesting permission for a resource: "+str(response.status_code)+":"+str(response.reason)+":"+str(response.text)) try: return response.json()["ticket"] except Exception as e: raise Exception("Call to permission endpoint returned unexpected value or error: '"+response.text+"'"+". Error: "+str(e))
ae032fc67c1aed0bd52b808e4d922992aed39ba6
3,641,292
def index(request): """ Root page view. Just shows a list of liveblogs. """ # Get a list of liveblogs, ordered by the date of their most recent # post, descending (so ones with stuff happening are at the top) liveblogs = Liveblog.objects.annotate( max_created=Max("posts__created") ).order_by("-max_created") # Render that in the index template return render(request, "index.html", { "liveblogs": liveblogs, })
518c64db21bd843dc34513c4d5677a18e5eac319
3,641,293
def model_fn_builder( bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings ): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids0 = features["input_ids0"] input_mask0 = features["input_mask0"] segment_ids0 = features["segment_ids0"] input_ids1 = features["input_ids1"] input_mask1 = features["input_mask1"] segment_ids1 = features["segment_ids1"] input_ids2 = features["input_ids2"] input_mask2 = features["input_mask2"] segment_ids2 = features["segment_ids2"] label_ids = features["label_ids"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids0, input_mask0, segment_ids0, input_ids1, input_mask1, segment_ids1, input_ids2, input_mask2, segment_ids2, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy(label_ids, predictions) loss = tf.metrics.mean(per_example_loss) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=probabilities, scaffold_fn=scaffold_fn) return output_spec return model_fn
088e636bf21caa316995dbd69b680dedd42ca21a
3,641,294
def strarray(*args): """strarray(strarray_t array, size_t array_size, int code) -> char""" return _idaapi.strarray(*args)
9dfb42d81d307a32f201f1b55a1ef81cbede7c27
3,641,295
def _single_value_set(target_list, value): """ Return true if this constraint has only one value and it is this one. """ return len(target_list) == 1 and target_list[0] == value
472ebe1aa9726c70642423d05fa55723496e9bc5
3,641,296
def get_positive_input(message, float_parse=False, allow_zero=False): """ Obtains and returns a positive int from the user. Preconditions: message: non-empty string float_parse: bool defaulted to False allow_zero: bool defaulted to False Parameters: message: The message that is printed when obtaining the input. float_parse: Whether to parse input to float or int allow_zero: Whether to allow zero as an input Postconditions: num: The valid inputted number. """ # use ternary operator to determine the sign to use sign = ">=" if allow_zero else ">" # try to parse input to either a float or int try: if float_parse: num = float(input("(must be " + sign + " 0), " + message).strip()) else: num = int(input("(must be " + sign + " 0), " + message).strip()) # raise a ValueError if input was invalid if (not allow_zero) and (num <= 0): raise ValueError() elif num < 0: raise ValueError() return num # catch any ValueErrors. except ValueError: print("Not a valid input.") # recurse the method until proper input was found return get_positive_input(message, float_parse, allow_zero)
17982ff069907464c70df7b6efb1f42d3811962e
3,641,297
def hellinger(p, q): """Compute Hellinger distance between 2 distributions.""" return np.linalg.norm(np.sqrt(p) - np.sqrt(q)) / np.sqrt(2)
f976a96af2e4acaf81961b93f2cfe7a868d912e3
3,641,298
def usd_currency(currency_df: pd.DataFrame, value: int, date: str) -> float: """ Compute VALUE/(USD/SYMBOL) Parameters ---------- currency_df : pd.DataFrame USD/SYMBOL df value : int Value of product date : str Currency quote day Returns --------- float Computed value """ return value / currency_df.loc[date].usd
15ce0d8f9db3b5dc1e1a684dc27daa63d163853b
3,641,299