content
stringlengths
22
815k
id
int64
0
4.91M
def extras_features(*features): """ Decorator used to register extras provided features to a model """ def wrapper(model_class): # Initialize the model_features store if not already defined if "model_features" not in registry: registry["model_features"] = {f: collections.defaultdict(list) for f in EXTRAS_FEATURES} for feature in features: if feature in EXTRAS_FEATURES: app_label, model_name = model_class._meta.label_lower.split(".") registry["model_features"][feature][app_label].append(model_name) else: raise ValueError("{} is not a valid extras feature!".format(feature)) return model_class return wrapper
10,600
def setFeedMoleFraction(H2COxRatio, CO2COxRatio): """ set inlet feed mole fraction """ # feed properties # H2/COx ratio # H2COxRatio = 2.0 # CO2/CO ratio # CO2COxRatio = 0.8 # mole fraction y0_H2O = 0.00001 y0_CH3OH = 0.00001 y0_DME = 0.00001 # total molar fraction tmf0 = 1 - (y0_H2O + y0_CH3OH + y0_DME) # COx COx = tmf0/(H2COxRatio + 1) # mole fraction y0_H2 = H2COxRatio*COx y0_CO2 = CO2COxRatio*COx y0_CO = COx - y0_CO2 # total mole fraction tmf = y0_H2 + y0_CO + y0_CO2 + y0_H2O + y0_CH3OH + y0_DME # CO2/CO2+CO ratio CO2CO2CORatio = y0_CO2/(y0_CO2+y0_CO) # res feedMoFri = np.array([y0_H2, y0_CO2, y0_H2O, y0_CO, y0_CH3OH, y0_DME], dtype=np.float32) # res return feedMoFri
10,601
def ship_hit(ai_settings, stat, screen, sb, ship, aliens, bullets): """Respond to ship being hit by aliens """ if stat.ship_left > 0: # Decrement ship_left stat.ship_left -= 1 # Update scoreboard sb.prep_ship() # Delete bullets and aliens bullets.empty() aliens.empty() # Create a new fleet and initian position of the ship create_fleet(ai_settings, screen, ship, aliens) ship.ship_center() # Pause sleep(0.5) else: stat.game_active = False pygame.mouse.set_visible(True)
10,602
def format(message, *args, **kwargs): """Shortcut for :class:`tossi.Formatter.format` of the default registry. """ return formatter.vformat(message, args, kwargs)
10,603
def reverse_url(url_name,id,request): """ 编辑标签返回当前页 :param url_name: :param id: :param request: :return: """ from django.http.request import QueryDict path = request.get_full_path() query_dict_obj = QueryDict(mutable=True) query_dict_obj['next'] = path encode_url = query_dict_obj.urlencode() prefix_path = reverse(url_name,args=(id,)) full_path = prefix_path + '?' + encode_url return full_path
10,604
def add_default_to_data(data: Dict[str, object], schema: SchemaDictType) -> Dict[str, object]: """Adds the default values present in the schema to the required fields if the values are not provided in the data """ # add non as defaults to the field that is not required and does not have # a default value non_default_values = [i for i in schema if all( j not in schema[i] for j in ["required", "default"])] for val in non_default_values: schema[val]["default"] = None defaults: List[str] = [j for j in [ i for i in schema if "default" in schema[i]] if "default" in schema[j]] if not all(i in data for i in defaults): for i in defaults: if i not in data: data[i] = schema[i]["default"] return data else: return data
10,605
def repeating_chars(text: str, *, chars: str, maxn: int = 1) -> str: """Normalize repeating characters in `text`. Truncating their number of consecutive repetitions to `maxn`. Duplicates Textacy's `utils.normalize_repeating_chars`. Args: text (str): The text to normalize. chars: One or more characters whose consecutive repetitions are to be normalized, e.g. "." or "?!". maxn: Maximum number of consecutive repetitions of `chars` to which longer repetitions will be truncated. Returns: str """ return re.sub(r"({}){{{},}}".format(re.escape(chars), maxn + 1), chars * maxn, text)
10,606
def export_data_csv(): """ Build a CSV file with the Order data from the database :return: The CSV file in StringIO """ result = query_order.get_all_orders() output = io.StringIO() writer = csv.writer(output) line = ['Numéro de commande', 'Date', 'Montant total', 'Numéro client', 'Référence devis'] writer.writerow(line) for row in result: date = format_date_csv(str(row.orderDate)[:10]) line = [str(row.orderNumber), date, row.orderTotalAmount, str(row.clientNumber), str(row.quoteNumber)] writer.writerow(line) output.seek(0) return output
10,607
def findurls(s): """Use a regex to pull URLs from a message""" regex = r"(?i)\b(((https?|ftp|smtp):\/\/)?(www.)?[a-zA-Z0-9_.-]+\.[a-zA-Z0-9_.-]+(\/[a-zA-Z0-9#]+\/?)*\/*)" url = re.findall(regex,s) return [x[0] for x in url]
10,608
def collide_rect(left, right): """collision detection between two sprites, using rects. pygame.sprite.collide_rect(left, right): return bool Tests for collision between two sprites. Uses the pygame.Rect colliderect function to calculate the collision. It is intended to be passed as a collided callback function to the *collide functions. Sprites must have "rect" attributes. New in pygame 1.8.0 """ return left.rect.colliderect(right.rect)
10,609
def test_maddrs(host_maddrs, expected_maddrs): """Test that the multiple addresses are correctly assigned.""" strategy = CollaborativeStrategy(target_batch_size=1, host_maddrs=host_maddrs) assert strategy.dht.kwargs["host_maddrs"] == expected_maddrs
10,610
def expand_envvars(d): """ Recursively convert lookup that look like environment vars in a dict This function things that environmental variables are values that begin with `$` and are evaluated with :func:`os.path.expandvars`. No exception will be raised if an environment variable is not set. Args: d (dict): expand environment variables used in the values of this dictionary Returns: dict: input dictionary with environment variables expanded """ def check_envvar(k, v): """ Warn if value looks un-expanded """ if '$' in v: logger.warning('Config key=value pair might still contain ' 'environment variables: "%s=%s"' % (k, v)) _d = d.copy() for k, v in six.iteritems(_d): if isinstance(v, dict): _d[k] = expand_envvars(v) elif isinstance(v, str): _d[k] = os.path.expandvars(v) check_envvar(k, v) elif isinstance(v, (list, tuple)): n_v = [] for _v in v: if isinstance(_v, str): _v = os.path.expandvars(_v) check_envvar(k, _v) n_v.append(_v) _d[k] = n_v return _d
10,611
def get_minhash( doc: str, normalization_func: Callable, split_method: str, ngram_size: int, ngram_stride: int, num_minhashes: int, random_seed: int, ) -> LeanMinHash: """Returns a minhash fingerprint for the given document. Args: doc (str): The document to create the MinHash object for. normalization_func (Callable): The function to normalize the document with. split_method (str): The method to split the document into shingles. Can be 'word_ngram', 'paragraph', 'none' or None. ngram_size (int): The size of the ngrams to use. ngram_stride (int): The stride of the ngrams to use. num_minhashes (int): The number of minhashes to use. random_seed (int): The random seed to use. Returns: LeanMinHash: The minhash fingerprint for the given document. Raises: ValueError: If `split_method` is not 'word_ngram', 'paragraph', 'none' or None. """ # Extract shingles from the document, depending on the `split_method` shingles = get_shingles( doc, normalization_func=normalization_func, split_method=split_method, ngram_size=ngram_size, ngram_stride=ngram_stride, ) # Initialise the fingerprint minhash = MinHash(num_perm=num_minhashes, seed=random_seed) # Add all the shingles to the fingerprint minhash.update_batch([shingle.encode("utf-8") for shingle in shingles]) # Convert the fingerprint to a LeanMinHash fingerprint, to save memory # and increase performance minhash = LeanMinHash(minhash, seed=random_seed) # Return the fingerprint return minhash
10,612
def test_emphasis_484(): """ Test case 484: (part 3) Rule 17 """ # Arrange source_markdown = """*<img src="foo" title="*"/>""" expected_tokens = [ "[para(1,1):]", "[text(1,1):*:]", '[raw-html(1,2):img src="foo" title="*"/]', "[end-para:::True]", ] expected_gfm = """<p>*<img src="foo" title="*"/></p>""" # Act & Assert act_and_assert(source_markdown, expected_gfm, expected_tokens)
10,613
def create_profile(sender, instance, created, **kwargs): """ una señal que crea un profile automaticamente al momento de registrar un nuevo usuario esto permitiria la edicion de un modulo de usuario para la compra de codigos como en las redes sociales, falto la señal para eliminar un usuario cuando se elimina su profile (la señal inversa a este) """ if created: Profile.objects.create(user=instance)
10,614
def silu(x): """Sigmoid Linear Unit (SiLU) function, also known as the swish function. silu(x) = x * sigmoid(x). """
10,615
def tokenize(text): """ Function: tokenize: This function splits text into words and return the root form of the words Args: text(str): the message Return: lemm(list of str): a list of the root form of the message words """ # Normalizing text (a-zA-Z0-9 matches all allalphanumeric characters) text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Tokenizing text words = word_tokenize(text) # Removing stop words stop = stopwords.words("english") words = [t for t in words if t not in stop] # Lemmatization lemm = [WordNetLemmatizer().lemmatize(w) for w in words] return lemm
10,616
def displayRandomForest(): """Run displayRandomForest""" executionStartTime = int(time.time()) # status and message success = True message = "ok" plotUrl = '' dataUrl = '' # get model1, var1, pres1, model2, var2, pres2, start time, end time, lon1, lon2, lat1, lat2, nSample center = [] model = [] var = [] pres = [] nVarP = 1 nVar = int(request.args.get('nVar', '')) for i in range( nVar+nVarP ): m1 = request.args.get('model'+str(i+1), '').lower() temp1 = m1.split('_') center.append(temp1[0]) model.append(temp1[1]) var.append(request.args.get('var'+str(i+1), '')) pres.append(request.args.get('pres'+str(i+1), '')) startT = request.args.get('timeS', '') endT = request.args.get('timeE', '') lonS = request.args.get('lonS', '') lonE = request.args.get('lonE', '') latS = request.args.get('latS', '') latE = request.args.get('latE', '') frontend_url = request.args.get('fromPage', '') print 'frontend_url: ', frontend_url userId = request.args.get('userid', '') print 'from url, userId: ', userId if userId != None and userId != '': userId = int(userId) else: userId = 0 json1 = { 'nVar':nVar, 'center':center, 'model':model, 'varName':var, 'pres':pres, 'yearS':startT[:4], 'yearE':endT[:4], 'monthS':startT[4:], 'monthE':endT[4:], 'lon1S':lonS, 'lon1E':lonE, 'lat1S':latS, 'lat1E':latE, } # get where the input file and output file are current_dir = os.getcwd() print 'current_dir: ', current_dir try: seed_str = str(time.time()) tag = md5.new(seed_str).hexdigest() output_dir = current_dir + '/svc/static/randomForest/' + tag print 'output_dir: ', output_dir if not os.path.exists(output_dir): os.makedirs(output_dir) json1['outDir'] = output_dir import pickle pFile = '%s/p.pickle'%output_dir fid = open(pFile,'w') pickle.dump(json1, fid) fid.close() # chdir to where the app is os.chdir(current_dir+'/svc/src/randomForest') # instantiate the app. class c1 = call_randomForest.call_randomForest(pFile) # call the app. function (0 means the image created is scatter plot) ### (message, imgFileName) = c1.displayScatterPlot2V(0) (message, imgFileName, dataFileName) = c1.display() # chdir back os.chdir(current_dir) ind1 = message.find('No Data') if ind1>0: message1 = message[ind1:(ind1+200)] message1a = message1.split('\n') print message1a[0] print message1a[1] hostname, port = get_host_port2("host.cfg") ### userId = 2 if hostname == 'EC2': try: req = urllib2.Request('http://169.254.169.254/latest/meta-data/public-ipv4') response = urllib2.urlopen(req) hostname = response.read() except Exception, e: print 'e: ', e """ try: req2 = urllib2.Request(' http://169.254.169.254/latest/user-data') response2 = urllib2.urlopen(req2) userId = json.loads(response2.read())['username'] except Exception, e: print 'e: ', e userId = 2 """ """ if userIdDict.has_key(userId): userId = userIdDict[userId] else : userId = 'lei' """ print 'userId: ', userId print 'hostname: ', hostname print 'port: ', port purpose = request.args.get('purpose')#"Test .\'\"\\purpose" backend_url, plotUrl, dataUrl, failedImgUrl = assignUrl('randomForest', tag, imgFileName, dataFileName) # backend_url = 'http://' + hostname + ':' + port + '/svc/randomForest' # print 'backend_url: ', backend_url # print 'imgFileName: ', imgFileName # plotUrl = 'http://' + hostname + ':' + port + '/static/randomForest/' + tag + '/' + imgFileName # print 'plotUrl: ', plotUrl # dataUrl = 'http://' + hostname + ':' + port + '/static/randomForest/' + tag + '/' + dataFileName # print 'dataUrl: ', dataUrl # failedImgUrl = 'http://' + hostname + ':' + port + '/static/plottingFailed.png' # print 'failedImgUrl: ', failedImgUrl if imgFileName is '' or not os.path.exists(output_dir+'/'+imgFileName): print '****** Error: %s not exist' % imgFileName plotUrl = failedImgUrl if dataFileName is '' or not os.path.exists(output_dir+'/'+dataFileName): print '****** Error: %s not exist' % dataFileName dataUrl = failedImgUrl print 'message: ', message if len(message) == 0 or message.find('Error') >= 0 or message.find('error:') >= 0 or message.find('No Data') >= 0: success = False plotUrl = '' dataUrl = '' except ValueError, e: # chdir to current_dir in case the dir is changed to where the app is in the try block os.chdir(current_dir) print 'change dir back to: ', current_dir success = False message = str(e) except Exception, e: # chdir to current_dir in case the dir is changed to where the app is in the try block os.chdir(current_dir) print 'change dir back to: ', current_dir success = False ### message = str("Error caught in displayScatterPlot2V()") message = str(e) executionEndTime = int(time.time()) urlLink = request.query_string urlLink = urlLink.strip() + '&image=%s&data_url=%s' % (plotUrl, dataUrl) print 'urlLink: ', urlLink urlLink = urlLink.replace('&fromPage='+frontend_url, '') print 'urlLink: ', urlLink # json dictionary for provenance service request post_json = {'source': 'JPL', 'parameters':urlLink, 'frontend_url': frontend_url, 'backend_url': backend_url, 'userId': long(userId), 'executionStartTime':long(executionStartTime)*1000, 'executionEndTime':long(executionEndTime)*1000} post_json = json.dumps(post_json) if USE_CMU: try: print post_json print requests.post(CMU_PROVENANCE_URL, data=post_json, headers=HEADERS).text print requests.post(CMU_PROVENANCE_URL_2, data=post_json, headers=HEADERS).text ### print requests.post(VIRTUAL_EINSTEIN_URL, data=post_json, headers=HEADERS).text except: print 'Something went wrong with Wei\'s stuff' return jsonify({ 'success': success, 'message': message, 'url': plotUrl, 'dataUrl': dataUrl })
10,617
def get_reports(request): """ Get a list of all :model:`reporting.Report` entries associated with an individual :model:`users.User` via :model:`rolodex.Project` and :model:`rolodex.ProjectAssignment`. """ active_reports = [] active_projects = ( ProjectAssignment.objects.select_related("project") .filter(Q(operator=request.user) & Q(project__complete=False)) .order_by("project__end_date") ) for active_project in active_projects: reports = Report.objects.filter( Q(project=active_project.project) & Q(complete=False) ) for report in reports: active_reports.append(report) return active_reports
10,618
def get_db(): """Database dependency This dependency creates a new SessionLocal used for a single request and closes when request is completed. """ try: db = SessionLocal() yield db finally: db.close()
10,619
def test_get_well_position_with_top_offset( decoy: Decoy, well_plate_def: LabwareDefinition, standard_deck_def: DeckDefinitionV3, labware_view: LabwareView, subject: GeometryView, ) -> None: """It should be able to get the position of a well top in a labware.""" labware_data = LoadedLabware( id="labware-id", loadName="load-name", definitionUri="definition-uri", location=DeckSlotLocation(slotName=DeckSlotName.SLOT_4), offsetId="offset-id", ) calibration_offset = LabwareOffsetVector(x=1, y=-2, z=3) slot_pos = Point(4, 5, 6) well_def = well_plate_def.wells["B2"] decoy.when(labware_view.get("labware-id")).then_return(labware_data) decoy.when(labware_view.get_definition("labware-id")).then_return(well_plate_def) decoy.when(labware_view.get_labware_offset_vector("labware-id")).then_return( calibration_offset ) decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_4)).then_return( slot_pos ) decoy.when(labware_view.get_well_definition("labware-id", "B2")).then_return( well_def ) result = subject.get_well_position( labware_id="labware-id", well_name="B2", well_location=WellLocation( origin=WellOrigin.TOP, offset=WellOffset(x=1, y=2, z=3), ), ) assert result == Point( x=slot_pos[0] + 1 + well_def.x + 1, y=slot_pos[1] - 2 + well_def.y + 2, z=slot_pos[2] + 3 + well_def.z + well_def.depth + 3, )
10,620
def create_response(key, value): """Return generic AWS Lamba proxy response object format.""" return { "statusCode": 200, "headers": {"Content-Type": "application/json"}, "body": json.dumps({key: value}) }
10,621
def load_image_buffer_to_tensor(image_buf, device): """Maps image bytes buffer to tensor Args: image_buf (bytes buffer): The image bytes buffer device (object): The pytorch device object Returns: py_tensor tensor: Pytorch tensor """ image = Image.open(io.BytesIO(image_buf)) preprocess = transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ), ]) input_tensor = preprocess(image) input_batch = input_tensor.unsqueeze(0) return input_batch.to(device, torch.float)
10,622
def alpha2tand(freq, a, b, n): """Convert Halpern's 'a' and 'b' from an absorption coefficient of the form `a*freq**b` to a (frequency-dependent) loss tangent. Parameters ---------- freq : numpy array or float The frequency (Hz) (or frequencies) at which to calculate the loss tangent. a : float Halpern's 'a' coefficient b : float Halpern's 'b' coefficient n : float The real part of the material's refractive index Returns ------- tand : numpy array The loss tangent of the material at the given frequency and Halpern coefficients. """ imagn = alpha2imagn(freq, a, b, n) # The complex index of refraction of a material is related to the # complex (relative) permittivity by the relation: # e_r = e' + i*e'' = n^2 = (n + i*k)^2 = n^2 - k^2 + i*2nk # By equating the real and imaginary parts we are left with: # e' = (n^2 - k^2); e'' = 2nk # With this information we can find the loss tangent, which is simply # the ratio of the real and imaginary parts of the relative # permittivity: # tand = (e''/e') ep = n**2 - imagn**2 epp = 2 * n * imagn tand = epp / ep return tand
10,623
def convert_path_to_pixels(path): """ Purpose: --- This function should convert the obtained path (list of tuples) to pixels. Teams are free to choose the number of points and logic for this conversion. Input Arguments: --- `path` : [ list ] Path returned from task_4a.find_path() function. Returns: --- `pixel_path` : [ type can be decided by teams ] Example call: --- pixel_path = convert_path_to_pixels(path) """ ############## ADD YOUR CODE HERE ############## pixel_path = path tmp = 64 for i in range(len(pixel_path)): pixel_path[i][0] = path[i][0] * tmp * 2 + tmp pixel_path[i][1] = path[i][1] * tmp * 2 + tmp ################################################## print("Pixel path is : ", pixel_path) return pixel_path
10,624
def uniform_prob(*args, prob=None, inside=None, pscale=1.): """ Uniform probability function for discrete and continuous vtypes. """ # Detect ptype, default to prob if no values, otherwise detect vtype assert len(args) >= 1, "Minimum of a single positional argument" pscale = eval_pscale(pscale) use_logs = iscomplex(pscale) if prob is None: prob = 0. if use_logs else 1. vals = args[0] if vals is None: return prob vtype = eval_vtype(vals) if callable(inside) else eval_vtype(inside) # Set inside function by vtype if not specified if not callable(inside): if vtype in VTYPES[float]: inside = lambda x: np.logical_and(x >= min(inside), x <= max(inside)) else: inside = lambda x: np.isin(x, inside) # If scalar, check within variable set p_zero = NEARLY_NEGATIVE_INF if use_logs else 0. if isscalar(vals): prob = prob if inside(vals) else p_zero # Otherwise treat as uniform within range else: p_true = prob prob = np.tile(p_zero, vals.shape) prob[inside(vals)] = p_true # This section below is there just to play nicely with conditionals if len(args) > 1: for arg in args[1:]: if use_logs: prob = prob + uniform_prob(arg, inside=inside, pscale=0.j) else: prob = prob * uniform_prob(arg, inside=inside) return prob
10,625
def update_bullets(game_settings, screen, stats, sb, ship, aliens, bullets): """Update position of bullets and get rid of old bullets""" # Update live bullets position on screen (group autocalls for all bullets in sprite) bullets.update() # Remove old bullets out of the screen space for bullet in bullets.copy(): if bullet.rect.bottom <= 0: bullets.remove(bullet) # Check for any bullets hitting aliens check_collisions(game_settings, screen, stats, sb, ship, aliens, bullets)
10,626
def obtenerListaArchivos(path: str): """ genera una lista de los archivos alojados en str """ lista = glob.glob(path, recursive=True) return lista
10,627
def is_equal_to(amount: float) -> Predicate: """Says that a field is exactly equal to some constant amount.""" return is_nearly_equal_to(amount, tolerance=0, taper=0)
10,628
def get_version(): """Return the current version info. The first call to this function will call version_info.load() and cache the result for later calls. """ global _version if _version is None: _version = version_info.load() return _version
10,629
def instance_power_specs_delete(context, instance_uuid, session=None): """ Removes an existing Server PowerSpecs from the Database """ # If we weren't given a session, then we need to create a new one if not session: session = nova_db_sa_api.get_session() # Create a Transaction around the delete in the Database with session.begin(): query = model_query( context, pvc_models.InstancePowerSpecsDTO, session=session) query = query.filter_by(instance_uuid=instance_uuid) query.soft_delete(synchronize_session=False)
10,630
def copy_file_or_flo(input_, output, buffer_size=64 * 1024, cb=None): """ Copy a file name or file-like-object to another file name or file-like object""" from os import makedirs from os.path import isdir, dirname assert bool(input_) assert bool(output) input_opened = False output_opened = False try: if isinstance(input_, str): if not isdir(dirname(input_)): makedirs(dirname(input_)) input_ = open(input_, 'r') input_opened = True if isinstance(output, str): if not isdir(dirname(output)): makedirs(dirname(output)) output = open(output, 'wb') output_opened = True # shutil.copyfileobj(input_, output, buffer_size) def copyfileobj(fsrc, fdst, length=buffer_size): cumulative = 0 while True: buf = fsrc.read(length) if not buf: break fdst.write(buf) if cb: cumulative += len(buf) cb(len(buf), len(buf), cumulative) copyfileobj(input_, output) finally: if input_opened: input_.close() if output_opened: output.close()
10,631
def load_yaml(fname): """Load a YAML file.""" yaml = YAML(typ="safe") # Compat with HASS yaml.allow_duplicate_keys = True # Stub HASS constructors HassSafeConstructor.name = fname yaml.Constructor = HassSafeConstructor with open(fname, encoding="utf-8") as conf_file: # If configuration file is empty YAML returns None # We convert that to an empty dict return yaml.load(conf_file) or {}
10,632
def unmix_cvxopt(data, endmembers, gammaConst=0, P=None): """ ****************************************************************** unmix finds an accurate estimation of the proportions of each endmember Syntax: P2 = unmix(data, endmembers, gammaConst, P) This product is Copyright (c) 2013 University of Missouri and University of Florida All rights reserved. CVXOPT package is used here. Parameters H,F,L,K,Aeq,beq are corresbonding to P,q,G,h,A,B, respectively. lb and ub are element-wise bound constraints which are added to matrix G and h respectively. Inputs: data = DxN matrix of N data points of dimensionality D endmembers = DxM matrix of M endmembers with D spectral bands gammaConst = Gamma Constant for SPT term P = NxM matrix of abundances corresponding to N input pixels and M endmembers Returns: P2 = NxM matrix of new abundances corresponding to N input pixels and M endmembers ****************************************************************** """ solvers.options['show_progress'] = False X = data M = endmembers.shape[1] # number of endmembers # endmembers should be column vectors N = X.shape[1] # number of pixels # Equation constraint Aeq*x = beq # All values must sum to 1 (X1+X2+...+XM = 1) Aeq = np.ones((1, M)) beq = np.ones((1, 1)) # Boundary Constraints ub >= x >= lb # All values must be greater than 0 (0 ? X1,0 ? X2,...,0 ? XM) lb = 0 ub = 1 g_lb = np.eye(M) * -1 g_ub = np.eye(M) # import pdb; pdb.set_trace() G = np.concatenate((g_lb, g_ub), axis=0) h_lb = np.ones((M, 1)) * lb h_ub = np.ones((M, 1)) * ub h = np.concatenate((h_lb, h_ub), axis=0) if P is None: P = np.ones((M, 1)) / M gammaVecs = np.divide(gammaConst, sum(P)) H = 2 * (endmembers.T @ endmembers) cvxarr = np.zeros((N,M)) for i in range(N): F = ((np.transpose(-2 * X[:, i]) @ endmembers) + gammaVecs).T cvxopt_ans = solvers.qp(P=matrix(H.astype(np.double)), q=matrix(F.astype(np.double)), G=matrix(G.astype(np.double)), h=matrix(h.astype(np.double)), A=matrix(Aeq.astype(np.double)), b=matrix(beq.astype(np.double))) cvxarr[i, :] = np.array(cvxopt_ans['x']).T cvxarr[cvxarr < 0] = 0 return cvxarr
10,633
def _accumulate_reward( timestep: dm_env.TimeStep, episode_return: float) -> float: """Accumulates rewards collected over the course of an episode.""" if timestep.reward and timestep.reward != 0: logging.info('Reward: %s', timestep.reward) episode_return += timestep.reward if timestep.first(): episode_return = 0 elif timestep.last(): logging.info('Episode return: %s', episode_return) return episode_return
10,634
def unsafe_load(stream): """ Parse the first YAML document in a stream and produce the corresponding Python object. Resolve all tags, even those known to be unsafe on untrusted input. """ return load(stream, UnsafeLoader)
10,635
def load_tl_gan_model(): """ Load the linear model (matrix) which maps the feature space to the GAN's latent space. """ with open(FEATURE_DIRECTION_FILE, 'rb') as f: feature_direction_name = pickle.load(f) # Pick apart the feature_direction_name data structure. feature_direction = feature_direction_name['direction'] feature_names = feature_direction_name['name'] num_feature = feature_direction.shape[1] feature_lock_status = np.zeros(num_feature).astype('bool') # Rearrange feature directions using Shaobo's library function. feature_direction_disentangled = \ feature_axis.disentangle_feature_axis_by_idx( feature_direction, idx_base=np.flatnonzero(feature_lock_status)) return feature_direction_disentangled, feature_names
10,636
def _find_test_file_from_report_file(base_path: str, report: str) -> Optional[Path]: """ Find test file from cucumber report file path format e.g) Test-features-foo-hoge.xml -> features/foo/hoge.feature or features/foo-hoge.feature """ report_file = os.path.basename(report) report_file = report_file.lstrip(REPORT_FILE_PREFIX) report_file = os.path.splitext(report_file)[0] list = _create_file_candidate_list(report_file) for l in list: f = Path(base_path, l + ".feature") if f.exists(): return f return None
10,637
def luminance(qcolor): """ Gives the pseudo-equivalent greyscale value of this color """ r,g,b = qcolor.red(), qcolor.green(), qcolor.blue() return int(0.2*r + 0.6*g + 0.2*b)
10,638
def read_info(path, layer=None, encoding=None): """Read information about an OGR data source. `crs` and `geometry` will be `None` and `features` will be 0 for a nonspatial layer. Parameters ---------- path : str or pathlib.Path layer : [type], optional Name or index of layer in data source. Reads the first layer by default. encoding : [type], optional (default: None) If present, will be used as the encoding for reading string values from the data source, unless encoding can be inferred directly from the data source. Returns ------- dict { "crs": "<crs>", "fields": <ndarray of field names>, "encoding": "<encoding>", "geometry": "<geometry type>", "features": <feature count> } """ return ogr_read_info(str(path), layer=layer, encoding=encoding)
10,639
def save_hdf5(path, freq, traces, **kwargs): """Save GWINC budget data to an HDF5 file. The `freq` argument should be the frequency array, and `traces` should be the traces (recursive) dictionary. Keyword arguments are stored in the HDF5 top level 'attrs' key-value store. If an 'ifo' keyword arg is supplied, it is assumed to be a Struct and will be serialized to YAML for storage. See HDF5_SCHEMA. """ with h5py.File(path, 'w') as f: f.attrs['SCHEMA'] = SCHEMA f.attrs['SCHEMA_VERSION'] = SCHEMA_VERSION # FIXME: add budget code hash or something f.attrs['date'] = datetime.datetime.now().isoformat() for key, val in kwargs.items(): if key == 'ifo': f.attrs['ifo'] = val.to_yaml() else: f.attrs[key] = val f.create_dataset('Freq', data=freq) tgrp = f.create_group('traces') _write_trace_recursive(tgrp, traces)
10,640
def _macro_cons_opec_month(): """ 欧佩克报告-月度, 数据区间从 20170118-至今 这里返回的具体索引日期的数据为上一个月的数据, 由于某些国家的数据有缺失, 只选择有数据的国家返回 :return: pandas.Series 阿尔及利亚 安哥拉 厄瓜多尔 加蓬 伊朗 伊拉克 科威特 利比亚 尼日利亚 \ 2017-01-18 108.0 172.4 54.5 21.3 372.0 463.2 281.2 60.8 154.2 2017-02-13 104.5 165.1 52.7 19.9 377.5 447.6 271.8 67.5 157.6 2017-03-14 105.3 164.1 52.6 19.4 381.4 441.4 270.9 66.9 160.8 2017-04-12 105.6 161.4 52.6 19.8 379.0 440.2 270.2 62.2 154.5 2017-05-11 104.7 169.2 52.4 20.6 375.9 437.3 270.2 55.0 150.8 2017-06-13 105.9 161.3 52.8 20.4 379.5 442.4 270.5 73.0 168.0 2017-07-12 106.0 166.8 52.7 19.7 379.0 450.2 270.9 85.2 173.3 2017-08-10 105.9 164.6 53.6 20.5 382.4 446.8 270.3 100.1 174.8 2017-09-12 106.5 164.6 53.7 17.3 382.8 444.8 270.2 89.0 186.1 2017-10-11 104.6 164.1 53.6 20.1 382.7 449.4 270.0 92.3 185.5 2017-11-13 101.2 171.1 54.1 20.3 382.3 438.3 270.8 96.2 173.8 2017-12-13 101.3 158.1 53.3 19.7 381.8 439.6 270.3 97.3 179.0 2018-01-18 103.7 163.3 52.6 19.7 382.9 440.5 270.0 96.2 186.1 2018-04-12 98.4 152.4 51.8 18.3 381.4 442.6 270.4 96.8 181.0 2018-05-14 99.7 151.5 52.0 18.3 382.3 442.9 270.5 98.2 179.1 2018-06-12 103.1 152.5 51.9 18.9 382.9 445.5 270.1 95.5 171.1 2018-07-11 103.9 143.1 51.9 19.0 379.9 453.3 273.1 70.8 166.0 2018-08-13 106.2 145.6 52.5 18.8 373.7 455.6 279.1 66.4 166.7 2018-09-12 104.5 144.8 52.9 18.7 358.4 464.9 280.2 92.6 172.5 2018-10-11 104.9 151.9 53.1 18.7 344.7 465.0 281.2 105.3 174.8 2018-11-13 105.4 153.3 52.5 18.6 329.6 465.4 276.4 111.4 175.1 2018-12-12 105.2 152.1 52.5 17.6 295.4 463.1 280.9 110.4 173.6 2019-03-14 102.6 145.7 52.2 20.3 274.3 463.3 270.9 90.6 174.1 2019-04-10 101.8 145.4 52.4 21.4 269.8 452.2 270.9 109.8 173.3 2019-06-13 102.9 147.1 52.9 21.1 237.0 472.4 271.0 117.4 173.3 沙特 阿联酋 委内瑞拉 欧佩克产量 2017-01-18 1047.4 307.1 202.1 3308.5 2017-02-13 994.6 293.1 200.4 3213.9 2017-03-14 979.7 292.5 198.7 3195.8 2017-04-12 999.4 289.5 197.2 3192.8 2017-05-11 995.4 284.2 195.6 3173.2 2017-06-13 994.0 288.5 196.3 3213.9 2017-07-12 995.0 289.8 193.8 3261.1 2017-08-10 1006.7 290.5 193.2 3286.9 2017-09-12 1002.2 290.1 191.8 3275.5 2017-10-11 997.5 290.5 189.0 3274.8 2017-11-13 1000.0 291.1 186.3 3258.9 2017-12-13 999.6 288.3 183.4 3244.8 2018-01-18 991.8 287.8 174.5 3241.6 2018-04-12 993.4 286.4 148.8 3195.8 2018-05-14 995.9 287.2 143.6 3193.0 2018-06-12 998.7 286.5 139.2 3186.9 2018-07-11 1042.0 289.7 134.0 3232.7 2018-08-13 1038.7 295.9 127.8 3232.3 2018-09-12 1040.1 297.2 123.5 3256.5 2018-10-11 1051.2 300.4 119.7 3276.1 2018-11-13 1063.0 316.0 117.1 3290.0 2018-12-12 1101.6 324.6 113.7 3296.5 2019-03-14 1008.7 307.2 100.8 3054.9 2019-04-10 979.4 305.9 73.2 3002.2 2019-06-13 969.0 306.1 74.1 2987.6 """ t = time.time() res = requests.get( JS_CONS_OPEC_URL.format( str(int(round(t * 1000))), str(int(round(t * 1000)) + 90) ) ) json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1]) date_list = [item["date"] for item in json_data["list"]] big_df = pd.DataFrame() for country in [item["datas"] for item in json_data["list"]][0].keys(): try: value_list = [item["datas"][country] for item in json_data["list"]] value_df = pd.DataFrame(value_list) value_df.columns = json_data["kinds"] value_df.index = pd.to_datetime(date_list) temp_df = value_df["上个月"] temp_df.name = country big_df = big_df.append(temp_df) except: continue headers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "zh-CN,zh;q=0.9,en;q=0.8", "cache-control": "no-cache", "origin": "https://datacenter.jin10.com", "pragma": "no-cache", "referer": "https://datacenter.jin10.com/reportType/dc_opec_report", "sec-fetch-mode": "cors", "sec-fetch-site": "same-site", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36", "x-app-id": "rU6QIu7JHe2gOUeR", "x-csrf-token": "", "x-version": "1.0.0", } res = requests.get(f"https://datacenter-api.jin10.com/reports/dates?category=opec&_={str(int(round(t * 1000)))}", headers=headers) # 日期序列 all_date_list = res.json()["data"] need_date_list = [item for item in all_date_list if item.split("-")[0] + item.split("-")[1] + item.split("-")[2] not in date_list] for item in reversed(need_date_list): res = requests.get( f"https://datacenter-api.jin10.com/reports/list?category=opec&date={item}&_={str(int(round(t * 1000)))}", headers=headers) temp_df = pd.DataFrame(res.json()["data"]["values"], columns=pd.DataFrame(res.json()["data"]["keys"])["name"].tolist()).T temp_df.columns = temp_df.iloc[0, :] temp_df = temp_df[['阿尔及利亚', '安哥拉', '厄瓜多尔', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特', '阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-2, :] big_df[item] = temp_df return big_df.T
10,641
def p_object_path_expr_1(p): """ object_path_expr : empty """ p[0] = []
10,642
def createparser(): """Create an :class:`argparse.ArgumentParser` instance :return: parser instance :rtype: :class:`argparse.ArgumentParser` """ parser = argparse.ArgumentParser(prog=__package__, description=__doc__) s = parser.add_subparsers() # create compare subcommand parser_compare = s.add_parser("compare", help="Compare two versions" ) parser_compare.set_defaults(which="compare") parser_compare.add_argument("version1", help="First version" ) parser_compare.add_argument("version2", help="Second version" ) # create bump subcommand parser_bump = s.add_parser("bump", help="Bumps a version" ) parser_bump.set_defaults(which="bump") sb = parser_bump.add_subparsers(title="Bump commands", dest="bump") # Create subparsers for the bump subparser: for p in (sb.add_parser("major", help="Bump the major part of the version"), sb.add_parser("minor", help="Bump the minor part of the version"), sb.add_parser("patch", help="Bump the patch part of the version"), sb.add_parser("prerelease", help="Bump the prerelease part of the version"), sb.add_parser("build", help="Bump the build part of the version")): p.add_argument("version", help="Version to raise" ) return parser
10,643
def get_activation(preact_dict, param_name, hook_type): """ Hooks used for in sensitivity schedulers (LOBSTE, Neuron-LOBSTER, SERENE). :param preact_dict: Dictionary in which save the parameters information. :param param_name: Name of the layer, used a dictionary key. :param hook_type: Hook type. :return: Returns a forward_hook if $hook_type$ is forward, else a backward_hook. """ def forward_hook(model, inp, output): preact_dict[param_name] = output def backward_hook(module, grad_input, grad_output): preact_dict[param_name] = None preact_dict[param_name] = grad_output[0].detach().cpu() return forward_hook if hook_type == "forward" else backward_hook
10,644
def createAbsorption(cfgstr): """Construct Absorption object based on provided configuration (using available factories)""" return Absorption(cfgstr)
10,645
def add_arrow( ax: plt.Axes, from_square: str, to_square: str, alpha=1.0, color="black", ): """ Adds an arrow from one square to the next Draws an arrow connecting two squares together. Can be used to represent moves. Parameters ---------- ax: plt.Axes Axes containing board. from_square: str String representing the square to start the arrow (i.e. "e1"). to_square: str String representing the square to finish the arrow (i.e. "e4"). alpha: float Alpha for the piece, controls piece visibility. color: str Controls the color of the piece, typically black or white. """ from_x, from_y = _square_to_grid(from_square) to_x, to_y = _square_to_grid(to_square) ax.arrow( from_x, from_y, to_x - from_x, to_y - from_y, alpha=alpha, color=color, zorder=4, head_width=0.15, length_includes_head=True, )
10,646
def move(*args, **kwargs): """ The move command is used to change the positions of geometric objects. Returns: None """ pass
10,647
def data_route(session, df, site_info, file_id, fname, initial_upload=True): """ :param session: SQLAlchemy database session instance :param df: Pandas dataframe containing CODAR data :param site_id: ID of CODAR site in hfrSites table :param file_id: ID for the file metadata that was updated to hfrWaveFilesMetadata :param initial_upload: True for initial upload of Wave File to database. False for recurring update :return: """ if not initial_upload: max_time = session.query(func.max(database_tables.WaveData.datetime)).filter_by(file_id=file_id).one() df = df[df.datetime > max_time[0]] inserted = iterate_through_data(session, df, site_info, file_id) if not inserted == 0: logger.info('{} - Inserted {} rows'.format(fname, inserted)) else: logger.info('{} - Database up to date. No rows inserted'.format(fname))
10,648
def get_optimizer_config(): """Gets configuration for optimizer.""" optimizer_config = configdict.ConfigDict() # Learning rate scheduling. One of: ["fixed", "exponential_decay"] optimizer_config.learning_rate_scheduling = "exponential_decay" # Optimization algorithm. One of: ["SGD", "Adam", "RMSprop"]. optimizer_config.optim_type = "Adam" # Adam beta1. optimizer_config.beta1 = 0.9 # Adam beta2. optimizer_config.beta2 = 0.999 # Norm clipping threshold applied for rnn cells (no clip if 0). optimizer_config.norm_clip = 0.0 # Learning rate. optimizer_config.initial_learning_rate = 0.001 # The learning rate decay 'epoch' length. optimizer_config.lr_decay_steps = 12000 # The learning rate decay base, applied per epoch. optimizer_config.lr_decay_base = 0.85 # RMSprop decay. optimizer_config.decay = 0.9 # RMSprop moment. optimizer_config.mom = 0.0 return optimizer_config
10,649
def flat2seq(x: Tensor, num_features: int) -> Tensor: """Reshapes tensor from flat format to sequence format. Flat format: (batch, sequence x features) Sequence format: (batch, sequence, features) Args: x (Tensor): a tensor in the flat format (batch, sequence x features). num_features (int): number of features (last dimension) of the output tensor. Returns: Tensor: the transformed tensor in sequence format (batch, seq, features). """ if not is_flat(x): raise ValueError( 'attempt to reshape tensor from flat format to sequence format failed. ', f'Excepted input tensor with 2 dimensions, got {x.ndim}.' ) return x.view(x.shape[0], -1, num_features)
10,650
def run_results(results_data, time_column, pathway_column, table_letters, letters, dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4, original_transitions, simulation_transitions, intervention_codes, target, individuals, save_location, simulation_name, listed_times, last_arrival, period): """Fill the four results tables.""" Table1_results = T1_results(results_data, time_column, pathway_column, dataframe_T1, original_transitions, simulation_transitions, intervention_codes, target, individuals, save_location, simulation_name, last_arrival, period) Table2_results = T2_results(results_data, pathway_column, letters, dataframe_T2, simulation_name) Table3_results = T3_results(results_data, pathway_column, dataframe_T3, save_location, simulation_name) Table4_results = T4_results(results_data, table_letters, dataframe_T4, listed_times, simulation_name) return(Table1_results, Table2_results, Table3_results, Table4_results)
10,651
def get_spatial_anchors_account(name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSpatialAnchorsAccountResult: """ Get information about an Azure Spatial Anchors Account. ## Example Usage ```python import pulumi import pulumi_azure as azure example = azure.mixedreality.get_spatial_anchors_account(name="example", resource_group_name=azurerm_resource_group["example"]["name"]) pulumi.export("accountDomain", data["azurerm_spatial_anchors_account"]["account_domain"]) ``` :param str name: Specifies the name of the Spatial Anchors Account. Changing this forces a new resource to be created. Must be globally unique. :param str resource_group_name: The name of the resource group in which to create the Spatial Anchors Account. """ __args__ = dict() __args__['name'] = name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure:mixedreality/getSpatialAnchorsAccount:getSpatialAnchorsAccount', __args__, opts=opts, typ=GetSpatialAnchorsAccountResult).value return AwaitableGetSpatialAnchorsAccountResult( account_domain=__ret__.account_domain, account_id=__ret__.account_id, id=__ret__.id, location=__ret__.location, name=__ret__.name, resource_group_name=__ret__.resource_group_name)
10,652
def test_agilent_3d(): """ 3D time agilent, pipe <-> agilent, pipe """ # prepare Agilent converter vdic, vdata = ng.varian.read(os.path.join(DATA_DIR, "agilent_3d")) uvdic = ng.varian.guess_udic(vdic, vdata) vC = ng.convert.converter() vC.from_varian(vdic, vdata, uvdic) # prepare NMRPipe converter pdic, pdata = ng.pipe.read(os.path.join(DATA_DIR, "agilent_3d", "data", "test%03d.fid")) updic = ng.pipe.guess_udic(pdic, pdata) pC = ng.convert.converter() pC.from_pipe(pdic, pdata, updic) # agilent -> agilent cdic, cdata = vC.to_varian() assert_array_equal(vdata, cdata) check_dic(vdic, cdic, bad_varian_keys) # write and readback td = tempfile.mkdtemp(dir=".") ng.varian.write(td, cdic, cdata) rdic, rdata = ng.varian.read(td) assert_array_equal(vdata, rdata) check_dic(vdic, cdic, bad_varian_keys) shutil.rmtree(td) # agilent -> pipe cdic, cdata = vC.to_pipe() assert_array_equal(pdata, cdata) # check_pdic(pdic,cdic) # XXX don't check dictionary # write and readback tf = tempfile.mktemp(dir=".") + "%03d" ng.pipe.write(tf, cdic, cdata) rdic, rdata = ng.pipe.read(tf) assert_array_equal(pdata, rdata) # check_pdic(pdic,cdic) # XXX don't check dictionary for f in glob.glob(tf[:-4] + "*"): os.remove(f) # pipe -> pipe cdic, cdata = pC.to_pipe() assert_array_equal(pdata, cdata) bpk = list(bad_pipe_keys) bpk.append("FDDISPMAX") # nmrglue doesn't update the MIN/MAX values bpk.append("FDMIN") bpk.append("FDDISPMIN") bpk.append("FDSCALEFLAG") bpk.append("FDMAX") check_pdic(pdic, cdic, bpk, v=True) # write and readback tf = tempfile.mktemp(dir=".") + "%03d" ng.pipe.write(tf, cdic, cdata) rdic, rdata = ng.pipe.read(tf) assert_array_equal(pdata, rdata) check_pdic(pdic, cdic, bpk) for f in glob.glob(tf[:-4] + "*"): os.remove(f) # pipe -> agilent cdic, cdata = pC.to_varian() assert_array_equal(vdata, cdata) check_dic(vdic, cdic, bad_varian_keys) # write and readback td = tempfile.mkdtemp(dir=".") ng.varian.write(td, cdic, cdata) rdic, rdata = ng.varian.read(td) assert_array_equal(vdata, rdata) check_dic(vdic, cdic, bad_varian_keys) shutil.rmtree(td)
10,653
def format_query(str_sql): """Strips all newlines, excess whitespace, and spaces around commas""" stage1 = str_sql.replace("\n", " ") stage2 = re.sub(r"\s+", " ", stage1).strip() stage3 = re.sub(r"(\s*,\s*)", ",", stage2) return stage3
10,654
def make_ytick_labels(current_ticks, n, numstring = ""): """ """ new_ticks = [] for item in current_ticks: if int(item) == item: new_ticks.append(f"{int(item)}{numstring}") else: new_ticks.append(f"{item:.1f}{numstring}") return new_ticks
10,655
def check_token(token): """ Returns `True` if *token* is a valid XML token, as defined by XML Schema Part 2. """ return (token == '' or re.match( "[^\r\n\t ]?([^\r\n\t ]| [^\r\n\t ])*[^\r\n\t ]?$", token) is not None)
10,656
def save_experiment_results(data_dir, prefix, run_time, travel_cost): """ An utility function to save experiment results with time info. Parameters ---------- data_dir: path-like object Path of data folder. prefix: str Name of the experiment. run_time: array List of processing time. travel_cost: array List of travel cost. """ time_dir = os.path.join(data_dir, 'time') cost_dir = os.path.join(data_dir, 'cost') curr_time = datetime.now() postfix = '{0}_{1}_{2}_{3}_{4}_{5}'.format( curr_time.year, curr_time.month, curr_time.day, curr_time.hour, curr_time.minute, curr_time.second) filename = prefix + '_' + postfix + '.npy' time_path = os.path.join(time_dir, filename) cost_path = os.path.join(cost_dir, filename) with open(time_path, 'wb') as f: np.save(f, run_time) with open(cost_path, 'wb') as f: np.save(f, travel_cost)
10,657
def download_audio(url, dir_path): """ Extract audio track from YouTube video and save to given path. :param url: YouTube video URL :param dir_path: Path to save audio file """ opts = { 'format': 'bestaudio/best', 'forcefilename': True, 'outtmpl': str(dir_path), 'cachedir': False, 'noplaylist': True } # Retry mechanism is handled on Huey's side with YoutubeDL(opts) as ydl: info = ydl.extract_info(url, download=False) if info['duration'] > settings.YOUTUBE_LENGTH_LIMIT: raise Exception('Video length too long') ydl.download([url])
10,658
def generate_random_data(n=10): """Generate random data.""" return rand(10)
10,659
def get_basenames(root, path, remove='.py'): """Get file basenames of a folder. Args: root (str): Root path path (str): Path to folder remove (str, optional): Defaults to '.py'. Part to remove from filename. Returns: list: list of names """ regex = re.compile(remove, re.IGNORECASE) files = find_files(root, path, remove=remove) return list(map( lambda file: re.sub(regex, '', os.path.basename(file)), files ))
10,660
def test_json_dumps(): """ Implicitly tests DynamoEncoder """ assert True
10,661
def get_phoible_feature_list(var_to_index): """ Function that takes a var_to_index object and return a list of Phoible segment features :param var_to_index: a dictionary mapping variable name to index(column) number in Phoible data :return : """ return list(var_to_index.keys())[11:]
10,662
def split_data(dataset): """Split pandas dataframe to data and labels.""" data_predictors = [ "Steps_taken", "Minutes_sitting", "Minutes_physical_activity", "HR", "BP", ] X = dataset[data_predictors] y = dataset.Health x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2) return X, y, x_train, x_test, y_train, y_test
10,663
def validate_max_incidents(max_incidents: str) -> None: """ Validates the value of max_incident parameter. :params max_incidents: In fetch-incident maximum number of incidents to return. :raises ValueError: if max incidents parameter is not a positive integer. :return: None """ try: max_incidents_int = int(max_incidents) if max_incidents_int <= 0: raise ValueError except ValueError: raise ValueError(MESSAGES['INVALID_MAX_INCIDENT_ERROR'])
10,664
def test_reservoir_param_type(simulation): """ Verify reservoir param.""" assert isinstance(simulation.res_param, dict)
10,665
def multiple_ticker_tca_aggregated_with_results_example(): """Example of how to do TCa analysis on multiple tickers with TCAResults """ tca_engine = TCAEngineImpl(version=tca_version) # Run a TCA computation for multiple tickers, calculating slippage tca_request = TCARequest(start_date=start_date, finish_date=finish_date, ticker=mult_ticker, tca_type='aggregated', trade_data_store=trade_data_store, market_data_store=market_data_store, results_form=[TimelineResultsForm(metric_name='slippage', by_date='datehour', scalar=10000.0)], metric_calcs=MetricSlippage(), reporting_currency='EUR', summary_display='candlestick') dict_of_df = tca_engine.calculate_tca(tca_request) # Show the output of objects print(dict_of_df.keys()) ### Generate TCA report using high level object # Use higher level TCAResults object to encapsulate results (easier to deal with than a dictionary of DataFrames) tca_results = TCAResults(dict_of_df, tca_request) tca_results.render_computation_charts() print(tca_results.sparse_market_charts.keys()) print(tca_results.sparse_market.keys())
10,666
def add_header(response): """ Add headers to both force latest IE rendering engine or Chrome Frame. """ response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1' return response
10,667
def recursive_isomorphism_counter(smp, matching, *, unspec_cover, verbose, init_changed_cands, tmplt_equivalence=False, world_equivalence=False): """ Recursive routine for solving subgraph isomorphism. Parameters ---------- smp : MatchingProblem A subgraph matching problem matching : list A list of tuples which designate what each template vertex is matched to unspec_cover : np.array Array of the indices of the nodes with more than 1 candidate verbose : bool Flag for verbose output init_changed_cands : np.array A binary array where element i is 1 if vertex i's candidates have changed since the function was last called. The first time it is called, this will be all zeros tmplt_equivalence : bool Flag indicating whether to use template equivalence world_equivalence : bool Flag indicating whether to use world equivalence Returns ------- int The number of isomorphisms """ iterate_to_convergence(smp) candidates = smp.candidates() # If the node cover is empty, the unspec nodes are disconnected. Thus, we # can skip straight to counting solutions to the alldiff constraint problem if len(unspec_cover) == 0: # Elimination filter is not needed here and would be a waste of time node_to_cands = {node: smp.world.nodes[candidates[idx]] for idx, node in enumerate(smp.tmplt.nodes)} return count_alldiffs(node_to_cands) # Since the node cover is not empty, we first choose some valid # assignment of the unspecified nodes one at a time until the remaining # unspecified nodes are disconnected. n_isomorphisms = 0 unspec_cover_cands = candidates[unspec_cover,:] node_idx = pick_minimum_domain_vertex(unspec_cover_cands) cand_idxs = np.argwhere(candidates[node_idx]).flat for i, cand_idx in enumerate(cand_idxs): smp_copy = smp.copy() # candidates_copy[node_idx] = one_hot(cand_idx, world.n_nodes) smp_copy.add_match(node_idx, cand_idx) matching.append((node_idx, cand_idx)) # Remove matched node from the unspecified list new_unspec_cover = unspec_cover[:node_idx] + unspec_cover[node_idx+1:] # recurse to make assignment for the next node in the unspecified cover n_isomorphisms += recursive_isomorphism_counter( smp_copy, matching, unspec_cover=new_unspec_cover, verbose=verbose, init_changed_cands=one_hot(node_idx, smp.tmplt.n_nodes)) # Unmatch template vertex matching.pop() # TODO: more useful progress summary if verbose: print("depth {}: {} of {}".format(len(unspec_cover), i, len(cand_idxs)), n_isomorphisms) # If we are using template equivalence, we can mark for all equivalent # template vertices that cand_idx cannot be a cannot be a candidate. if tmplt_equivalence: for eq_t_vert in smp.tmplt.eq_classes[node_idx]: smp.prevent_match(eq_t_vert, cand_idx) return n_isomorphisms
10,668
def test_zarr_to_anndata(benchmark, tmp_path): """Test loading anndata from zarr""" _, setup_output = setup_anndata(fpath='spots_reduced.csv', out_dir=tmp_path) ann_obj = setup_output['ann_obj'] tmp_out_path = setup_output['out_path'] ann_obj.write_zarr(tmp_out_path) read_func = partial(anndata.read_zarr, tmp_out_path) benchmark.pedantic( read_func, rounds=20 )
10,669
def draw_lane_on_unwarped_frame(frame, left_line, right_line, trsf_mtx_inv): """ Drawing of the unwarped lane lines and lane area to the current frame. Args: left_line: left Line instance right_line: right Line instance trsf_mtx_inv: inverse of the perspective transformation matrix """ # Frame dimensions height, width = frame.shape[0:2] # Generate x and y values for plotting y = np.linspace(0, frame.shape[0] - 1, frame.shape[0]) left_x = left_line.evaluate_average_polynomial(y) right_x = right_line.evaluate_average_polynomial(y) # Create a green lane area between the left and right lane lines warped_lane_area = np.zeros_like(frame) # Warped at first left_points = np.column_stack((left_x, y)).reshape((1, -1, 2)).astype(int) right_points = np.flipud( np.column_stack((right_x, y))).reshape((1, -1, 2)).astype(int) vertices = np.hstack((left_points, right_points)) cv2.fillPoly(warped_lane_area, [vertices], (0, 255, 0)) # Unwarp the lane area unwarped_lane = cv2.warpPerspective( warped_lane_area, trsf_mtx_inv, (width, height)) # Overlay the unwarped lane area onto the frame green_lane_on_frame = cv2.addWeighted(frame, 1., unwarped_lane, 0.3, 0) # Draw the left and right lane polynomials into an empty and warped image warped_lanes = np.zeros_like(frame) left_points = np.column_stack((left_x, y)).reshape(-1, 1, 2) right_points = np.column_stack((right_x, y)).reshape(-1, 1, 2) warped_lanes = cv2.polylines(warped_lanes, [left_points.astype(np.int32)], isClosed=False, color=(255, 0, 0), thickness=30) warped_lanes = cv2.polylines(warped_lanes, [right_points.astype(np.int32)], isClosed=False, color=(0, 0, 255), thickness=30) # Unwarp the lane lines plot lane_lines = cv2.warpPerspective( warped_lanes, trsf_mtx_inv, (width, height)) # Create a mask of the unwarped lane lines to shadow the frame background # a bit gray = cv2.cvtColor(lane_lines, cv2.COLOR_BGR2GRAY) _, mask = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY_INV) # Black-out the area of the lane lines in the frame frame_bg = cv2.bitwise_and( green_lane_on_frame, green_lane_on_frame, mask=mask) # Combine with complete frame to shadow the area of the lane lines a bit shadowed_frame = cv2.addWeighted(frame_bg, 0.6, green_lane_on_frame, 0.4, 0) return cv2.addWeighted(shadowed_frame, 1.0, lane_lines, 1.0, 0)
10,670
def _init_buffer_file() -> str: """Returns file path to the temporary buffer file. Creates the temp directory and temp buffer file. """ if not os.path.exists(".git"): raise NotAGitRepoException(f"No .git folder found. {os.getcwd()} is not a git repo!") file_path = os.path.join(".git", "MKCOMMIT_BUFFER") open(file_path, "w").close() return file_path
10,671
def n_states_of_vec(l, nval): """ Returns the amount of different states a vector of length 'l' can be in, given that each index can be in 'nval' different configurations. """ if type(l) != int or type(nval) != int or l < 1 or nval < 1: raise ValueError("Both arguments must be positive integers.") return nval ** l
10,672
def remove_items_from_dict(a_dict, bad_keys): """ Remove every item from a_dict whose key is in bad_keys. :param a_dict: The dict to have keys removed from. :param bad_keys: The keys to remove from a_dict. :return: A copy of a_dict with the bad_keys items removed. """ new_dict = {} for k in a_dict.keys(): if k not in bad_keys: new_dict[k] = a_dict[k] return new_dict
10,673
def writeObject(img_array, obj_array, bbox): """Writes depression objects to the original image. Args: img_array (np.array): The output image array. obj_array (np.array): The numpy array containing depression objects. bbox (list): The bounding box of the depression object. Returns: np.array: The numpy array containing the depression objects. """ min_row, min_col, max_row, max_col = bbox roi = img_array[min_row:max_row, min_col:max_col] roi[obj_array > 0] = obj_array[obj_array > 0] return img_array
10,674
def create_info_message_files(msg=None, msg_details=None): """ Creates the _alt_msg.txt and _alt_msg_details.txt files for population into the job status json. :param msg: The short info message. Can be a list or a string. Should be shorter than 35 characters. :param msg_details: The message details. :return: """ if msg: with open('_alt_msg.txt', 'w') as f: if isinstance(msg, list): for m in msg: f.write("%s\n" % str(m)) else: f.write("%s\n" % str(msg)) if msg_details: with open('_alt_msg_details.txt', 'w') as f: f.write("%s\n" % msg_details)
10,675
def process_model(current_val): """ :param current_val: model generated by sat solver, atom is satisfied if in modal. :return tuple of sets comprising true and false atoms. """ true_atoms, false_atoms = set(), set() for atom in current_val: if current_val[atom]: true_atoms.add(str(atom)) else: false_atoms.add(str(atom)) return true_atoms, false_atoms
10,676
def filter_strace_output(lines): """ a function to filter QEMU logs returning only the strace entries Parameters ---------- lines : list a list of strings representing the lines from a QEMU log/trace. Returns ------- list a list of strings representing only the strace log entries the entries will also be cleaned up if a page dump occurs in the middle of them """ #we only want the strace lines, so remove/ignore lines that start with the following: line_starts= ['^[\d,a-f]{16}-', # pylint: disable=anomalous-backslash-in-string '^page', '^start', '^host', '^Locating', '^guest_base', '^end_', '^brk', '^entry', '^argv_', '^env_', '^auxv_', '^Trace', '^--- SIGSEGV', '^qemu' ] filter_string = '|'.join(line_starts) filtered = [] prev_line = "" for line in lines: if re.match(filter_string,line): continue # workaround for https://gitlab.com/qemu-project/qemu/-/issues/654 if re.search("page layout changed following target_mmap",line): prev_line = line.replace("page layout changed following target_mmap","") continue if re.match('^ = |^= ', line): line = prev_line+line filtered.append(line) return filtered
10,677
def export_gmf_xml(key, dest, sitecol, imts, ruptures, rlz, investigation_time): """ :param key: output_type and export_type :param dest: name of the exported file :param sitecol: the full site collection :param imts: the list of intensity measure types :param ruptures: an ordered list of ruptures :param rlz: a realization object :param investigation_time: investigation time (None for scenario) """ if hasattr(rlz, 'gsim_rlz'): # event based smltpath = '_'.join(rlz.sm_lt_path) gsimpath = rlz.gsim_rlz.uid else: # scenario smltpath = '' gsimpath = rlz.uid writer = hazard_writers.EventBasedGMFXMLWriter( dest, sm_lt_path=smltpath, gsim_lt_path=gsimpath) writer.serialize( GmfCollection(sitecol, imts, ruptures, investigation_time)) return {key: [dest]}
10,678
def track_state_change(entity_ids, from_state=None, to_state=None): """Decorator factory to track state changes for entity id.""" def track_state_change_decorator(action): """Decorator to track state changes.""" event.track_state_change(HASS, entity_ids, functools.partial(action, HASS), from_state, to_state) return action return track_state_change_decorator
10,679
def phrase(): """Generate and return random phrase.""" return models.PhraseDescription(text=random_phrase.make_random_text())
10,680
def classify_tweets(text): """ classify tweets for tweets about car accidents and others :param text: tweet text :return: boolean, true if tweet is about car accident, false for others """ return text.startswith(u'בשעה') and ( (u'הולך רגל' in text or u'הולכת רגל' in text or u'נהג' in text or u'אדם' in text) and (u'רכב' in text or u'מכונית' in text or u'אופנוע' in text or u"ג'יפ" in text or u'טרקטור' in text or u'משאית' in text or u'אופניים' in text or u'קורקינט' in text))
10,681
def _build_results(drift_type, raw_metrics): """Generate all results for queried time window or run id of some a datadriftdetector. :param raw_metrics: origin data diff calculation results. :return: a list of result dict. """ results = [] for metric in raw_metrics: ep = _properties(metric.get_extended_properties()) if metric.name == OUTPUT_METRIC_DRIFT_COEFFICIENT: # Overall drift coefficient; add to results return object create_new_component = True if create_new_component: res = {KEY_NAME_Drift_TYPE: drift_type} # attach result content result_list = [] result_list.append( _build_single_result_content(drift_type, metric.value, ep) ) res["result"] = result_list results.append(res) return results
10,682
def minima(): """ This is a minima value """ v = pd.read_csv( "src\kjpcjs.csv") lang = ['Kotlin', 'Java', 'Python', 'C++', 'JavaScript'] mx = list() for i in lang: x = min(v[i]) mx.append(x) for i in range(0, 5): print(lang[i], "minimum gross is", mx[i]) data_analyse_menu()
10,683
def no_test_server_credentials(): """ Helper function that returns true when TEST_INTEGRATION_* credentials are undefined or empty. """ client_id = getattr(settings, 'TEST_INTEGRATION_CLIENT_ID', None) username = getattr(settings, 'TEST_INTEGRATION_USERNAME', None) password = getattr(settings, 'TEST_INTEGRATION_PASSWORD', None) app_read = getattr(settings, 'TEST_INTEGRATION_READ_CLIENT_ID', None) app_write = getattr(settings, 'TEST_INTEGRATION_WRITE_CLIENT_ID', None) return not (client_id and username and password and app_read and app_write)
10,684
async def test_veltpvp_status(mcsrvstats_client: Client) -> None: """Checks veltpvp returns correct data if status and last played is set.""" f = open("tests/html/test_veltpvp_status.html") html = f.read() with aioresponses() as m: m.get("https://www.veltpvp.com/u/xtreepvps", status=200, body=html) client = mcsrvstats_client data = await client.veltpvp("xtreepvps") assert data.Rank == "Default" assert data.LastSeen == "1 day ago" assert data.CurrentStatus == "Currently Offline" assert data.FirstJoined == "30/05/2021" assert data.TimePlayed == "22 hours played" assert data.MonthlyViews == 12 assert data.HCF.Kills == 0 assert data.HCF.Deaths == 0 assert data.HCF.KDR == 0.0 assert data.HCF.Lives == 0 assert data.HCF.Playtime == "N/A" assert data.Practice.Kills == 0 assert data.Practice.Deaths == 0 assert data.Practice.Wins == 0 assert data.Practice.Losses == 0 assert data.Practice.Fights == 0 assert data.Practice.GlobalELO == 1000 assert data.Soup.Kills == 0 assert data.Soup.Deaths == 0 assert data.Soup.HighestKillstreak == 0 assert data.Soup.EventsWon == 0 assert data.Soup.EventsLost == 0
10,685
def skip_spaces(st: ST) -> Tuple[ST, Any]: """ Pula espaços. """ pos, src = st while pos < len(src) and src[pos].isspace(): pos += 1 return (pos, src), None
10,686
def mixed_phone_list(): """Return mixed phone number list.""" return _MIXED_PHONE_LIST_
10,687
def bootstrap(config): """ Configure the existing account for subsequent deployer runs. Create S3 buckets & folders, upload artifacts required by infrastructure to them. Args: config: dictionary containing all variable settings required to run terraform with Returns: config dict. """ config['project_config'] = config.get('project_config', s3.get_bucket_name(config, 'data')) config['tf_state_bucket'] = config.get('tf_state_bucket', s3.get_bucket_name(config,'tfstate')) logmsg = "{}: Creating S3 project bucket: {}" logger.debug(logmsg.format(__name__, config['project_config'])) s3.create_bucket(config['project_config']) logmsg = "{}: Creating S3 project bucket: {}" logger.debug(logmsg.format(__name__, config['tf_state_bucket'])) s3.create_bucket(config['tf_state_bucket']) upload_staged_artifacts(config) return config
10,688
def get_report_summary(report): """ Retrieve the docstring summary content for the given report module. :param report: The report module object :returns: the first line of the docstring for the given report module """ summary = None details = get_report_details(report) if not details: return details = details.split('\n') while details and not summary: summary = details.pop(0) return summary
10,689
def transform_toctree(doc: AstDoc, code: Code): """convert toctree to nested <ul> tag""" headers = doc.headers() def get_children(index, node): for sub_index in range(index + 1, len(headers)): child = headers[sub_index] if child.header_level() == node.header_level() + 1: yield sub_index, child else: break def transform_toc(index, node): cls_name = f'toc-{node.name}' target = f'{code.html_name()}' if node.header_level() > 1: target += f'#{node.slug()}' li = f'<a class="{cls_name}" href="{target}">{node.data}</a>' children = list(get_children(index, node)) if children: code.add_toctree('<li>', li, '<ul>') for sub_index, child in children: transform_toc(sub_index, child) code.add_toctree('</ul>', '</li>') else: code.add_toctree('<li>' + li + '</li>') code.add_toctree('<ul>') for index, node in enumerate(headers): if node.name == 'h1': transform_toc(index, node) code.add_toctree('</ul>')
10,690
def load_dataset(dataset_identifier, train_portion='75%', test_portion='25%', partial=None): """ :param dataset_identifier: :param train_portion: :return: dataset with (image, label) """ # splits are not always supported # split = ['train[:{0}]'.format(train_portion), 'test[{0}:]'.format(test_portion)] ds = tfds.load(dataset_identifier, split='train', shuffle_files=True) if partial is not None: ds = ds.take(partial) return ds
10,691
def stop(ids: List[str]): """Stop one or more instances""" return functools.partial(ec2.stop_instances, InstanceIds=ids)
10,692
def add_page_to_index(index, url, content): """ Takes three inputs: - index (dictionary) - url (string) - content (string) Updates the index to include all of the word occurrences found in the page content by adding the url to the word's associated url list. """ words = content.split() for word in words: add_to_index(index, word, url)
10,693
def search_names(word, archive=TAXDB_NAME, name="names.dmp", limit=None): """ Processes the names.dmp component of the taxdump. """ # Needs a taxdump to work. if not os.path.isfile(archive): utils.error("taxdump file not found (download and build it first)") # Open stream into the tarfile. stream = open_tarfile(archive=archive, filename=name, limit=limit) # The pattern may be regular expression. patt = re.compile(word, re.IGNORECASE) # Labels that will be searched. valid = {'scientific name', 'equivalent name', 'genbank common name'} def select(row): taxid, name, label = row[0], row[2], row[6] return label in valid and patt.search(name) # Apply the selector. stream = filter(select, stream) for elems in stream: taxid, name, label = elems[0], elems[2], elems[6] yield taxid, name
10,694
def simple_parse(config_file): """ Do simple parsing and home-brewed type interference. """ config = ConfigObj(config_file, raise_errors=True) config.walk(string_to_python_type) # Now, parse input and output in the Step definition by hand. _step_io_fix(config) return(config)
10,695
def deserialize_columns(headers, frames): """ Construct a list of Columns from a list of headers and frames. """ columns = [] for meta in headers: col_frame_count = meta["frame_count"] col_typ = pickle.loads(meta["type-serialized"]) colobj = col_typ.deserialize(meta, frames[:col_frame_count]) columns.append(colobj) # Advance frames frames = frames[col_frame_count:] return columns
10,696
def server_siteone_socket_udp(parms): """ A simple echo server """ server_host = parms['host'] server_port = parms['port'] server_protocol = parms['protocol'] data_payload = 2048 # Create a UDP socket sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Bind the socket to the port(type int) server_address = (server_host, int(server_port)) logger.info("Starting up echo server on %s port %s" % server_address) sock.bind(server_address) while True: try: #print (" ") logger.info("Waiting to receive message from client") data, address = sock.recvfrom(data_payload) logger.info("received %s bytes from %s to port: %s" % (len(data), address, server_port)) logger.info("Data: %s" %data) if data: sent = sock.sendto(data, address) logger.info("sent %s bytes back to %s" % (sent, address)) except KeyboardInterrupt: pass
10,697
def get_site_config(sites_path=None, site_path=None): """Returns `site_config.json` combined with `sites/common_site_config.json`. `site_config` is a set of site wide settings like database name, password, email etc.""" config = {} sites_path = sites_path or getattr(local, "sites_path", None) site_path = site_path or getattr(local, "site_path", None) if sites_path: common_site_config = os.path.join(sites_path, "common_site_config.json") if os.path.exists(common_site_config): config.update(get_file_json(common_site_config)) if site_path: site_config = os.path.join(site_path, "site_config.json") if os.path.exists(site_config): config.update(get_file_json(site_config)) elif local.site and not local.flags.new_site: print("{0} does not exist".format(local.site)) sys.exit(1) #raise IncorrectSitePath, "{0} does not exist".format(site_config) return _dict(config)
10,698
def words_with_joiner(joiner): """Pass through words unchanged, but add a separator between them.""" def formatter_function(i, word, _): return word if i == 0 else joiner + word return (NOSEP, formatter_function)
10,699