content
stringlengths
22
815k
id
int64
0
4.91M
def parse_args(): """parse args for binlog2sql""" parser = argparse.ArgumentParser(description='Parse MySQL binlog to SQL you want', add_help=False) connect_setting = parser.add_argument_group('connect setting') connect_setting.add_argument('-h', '--host', dest='host', type=str, help='Host the MySQL database server located', default='127.0.0.1') connect_setting.add_argument('-u', '--user', dest='user', type=str, help='MySQL Username to log in as', default='root') connect_setting.add_argument('-p', '--password', dest='password', type=str, nargs='*', help='MySQL Password to use', default='') connect_setting.add_argument('-P', '--port', dest='port', type=int, help='MySQL port to use', default=3306) interval = parser.add_argument_group('interval filter') interval.add_argument('--start-file', dest='start_file', type=str, help='Start binlog file to be parsed') interval.add_argument('--start-position', '--start-pos', dest='start_pos', type=int, help='Start position of the --start-file', default=4) interval.add_argument('--stop-file', '--end-file', dest='end_file', type=str, help="Stop binlog file to be parsed. default: '--start-file'", default='') interval.add_argument('--stop-position', '--end-pos', dest='end_pos', type=int, help="Stop position. default: latest position of '--stop-file'", default=0) interval.add_argument('--start-datetime', dest='start_time', type=str, help="Start time. format %%Y-%%m-%%d %%H:%%M:%%S", default='') interval.add_argument('--stop-datetime', dest='stop_time', type=str, help="Stop Time. format %%Y-%%m-%%d %%H:%%M:%%S;", default='') parser.add_argument('--stop-never', dest='stop_never', action='store_true', default=False, help="Continuously parse binlog. default: stop at the latest event when you start.") parser.add_argument('--help', dest='help', action='store_true', help='help information', default=False) schema = parser.add_argument_group('schema filter') schema.add_argument('-d', '--databases', dest='databases', type=str, nargs='*', help='dbs you want to process', default='') schema.add_argument('-t', '--tables', dest='tables', type=str, nargs='*', help='tables you want to process', default='') event = parser.add_argument_group('type filter') event.add_argument('--only-dml', dest='only_dml', action='store_true', default=False, help='only print dml, ignore ddl') event.add_argument('--sql-type', dest='sql_type', type=str, nargs='*', default=['INSERT', 'UPDATE', 'DELETE'], help='Sql type you want to process, support INSERT, UPDATE, DELETE.') # exclusive = parser.add_mutually_exclusive_group() parser.add_argument('-K', '--no-primary-key', dest='no_pk', action='store_true', help='Generate insert sql without primary key if exists', default=False) parser.add_argument('-B', '--flashback', dest='flashback', action='store_true', help='Flashback data to start_position of start_file', default=False) parser.add_argument('--back-interval', dest='back_interval', type=float, default=1.0, help="Sleep time between chunks of 1000 rollback sql. set it to 0 if do not need sleep") return parser
5,338,600
def split(meta_attribute, files, dest, search_dir): """split the subtree files into fragments""" for file in files: with open(file, "r") as fragment_content: content = fragment_content.read() for fragment in content.split("[id"): if not fragment.strip(): continue # make sure fragment not empty if meta_attribute in fragment: replaceid=get_replaceid(fragment) file_name=str(find(replaceid, search_dir)) else: file_name = dest+"/"+str(get_file_name(fragment))+".asciidoc" all_files.append(file_name) if meta_attribute not in fragment: with open(file_name, "w") as f: f.write("[id"+fragment)
5,338,601
def lnLikelihoodDouble(parameters, values, errors, weights=None): """ Calculates the total log-likelihood of an ensemble of values, with uncertainties, for a double Gaussian distribution (two means and two dispersions). INPUTS parameters : model parameters (see below) values : data values errors : data uncertainties OPTIONS weights : weights on each data point [default: None, ie unweighted] PARAMETERS mean1 : model mean 1 dipsersion1 : model dispersion 1 mean2 : model mean 2 dipsersion2 : model dispersion 2 f : fraction of component 1 """ mean1, dispersion1, mean2, dispersion2, f = parameters # insist that mean1 is less than mean2 or solution is degenerate if mean1>mean2: return -np.inf # check for unit consistency if getattr(mean1, "unit", None) is not None \ and getattr(dispersion1, "unit", None) is not None \ and getattr(mean2, "unit", None) is not None \ and getattr(dispersion2, "unit", None) is not None \ and getattr(values, "unit", None) is not None \ and getattr(errors, "unit", None) is not None: mean1 = mean1.to(values.unit) dispersion1 = dispersion1.to(values.unit) mean2 = mean2.to(values.unit) dispersion2 = dispersion2.to(values.unit) errors = errors.to(values.unit) # require positive dispersions dispersion1 = np.abs(dispersion1) dispersion2 = np.abs(dispersion2) # likelihood of each data point conv_dispersion1 = np.sqrt(dispersion1**2+errors**2) conv_dispersion2 = np.sqrt(dispersion2**2+errors**2) likelihoods = f*stats.norm.pdf(values, mean1, conv_dispersion1) \ + (1-f)*stats.norm.pdf(values, mean2, conv_dispersion2) # check that all are positive (should be!) and non-zero if np.all(likelihoods<=0): return -np.inf # set zeros (or negatives) to the lowest non-zero value likelihoods[likelihoods<=0] = likelihoods[likelihoods>0].min()*1e-5 # and take the log ln_likelihoods = np.log(likelihoods) # multiply by weights: if weights is not None: ln_likelihoods *= weights # remove -infinities ln_likelihoods[ln_likelihoods==-np.inf] \ = ln_likelihoods[ln_likelihoods>-np.inf].min() # total likelihood total_ln_likelihood = np.sum(ln_likelihoods) # renormalise by weights if weights is not None: total_ln_likelihood *= np.size(ln_likelihoods)/np.sum(weights) return total_ln_likelihood
5,338,602
def init_routes(): """Register RESTFul API routes""" # health page API api.add_resource(HealthResource, "/", "/health") # system time API api.add_resource(CurrentTimeResource, "/api/currenttime") # APIs for template library api.add_resource(TemplateResource, "/api/template") # query, create or delete template api.add_resource(TemplateCreateByFileResource, "/api/template/file") # create template from file api.add_resource(TemplateListResource, "/api/template/list") # list templates # APIs for hackathon query that not related to user or admin api.add_resource(HackathonResource, "/api/hackathon") # query hackathon api.add_resource(HackathonListResource, "/api/hackathon/list") # list hackathons api.add_resource(HackathonStatResource, "/api/hackathon/stat") # get statistics of hackathon api.add_resource(HackathonRegistrationListResource, "/api/hackathon/registration/list") # list registered users api.add_resource(HackathonGrantedAwardsResource, "/api/hackathon/grantedawards") # list registered users api.add_resource(GranteAwardsResource, "/api/grantedawards") api.add_resource(TalentResource, "/api/talent/list") # list talents(达人) api.add_resource(HackathonTagNamesResource, "/api/tags") # all distinct tag names api.add_resource(HackathonNoticeListResource, "/api/hackathon/notice/list") # list specfic notices # APIs for user(participant) to join hackathon api.add_resource(GuacamoleResource, "/api/user/guacamoleconfig") # get remote paras for guacamole api.add_resource(UserResource, "/api/user") # get current login user api.add_resource(UserLoginResource, "/api/user/login") # user login/logout api.add_resource(UserProfileResource, "/api/user/profile") # update user profile api.add_resource(UserPictureResource, "/api/user/picture") # update user picture api.add_resource(UserTemplateListResource, "/api/hackathon/template") # list templates for specific user api.add_resource(UserHackathonLikeResource, "/api/user/hackathon/like") # like or unlike hackathon api.add_resource(UserRegistrationResource, "/api/user/registration") # register hackathon api.add_resource(UserHackathonListResource, "/api/user/registration/list") # participated hackathon list of user api.add_resource(UserExperimentResource, "/api/user/experiment") # start or stop experiment api.add_resource(UserNoticeReadResource, "/api/user/notice/read") # read the notice api.add_resource(UserFileResource, "/api/user/file") # login-in user can upload files about team, hackathon or user # team APIs api.add_resource(TeamResource, "/api/team") # create, update, dismiss and query team api.add_resource(MyTeamResource, "/api/team/my") # get team of current login user api.add_resource(HackathonTeamListResource, "/api/hackathon/team/list", "/api/admin/team/list") # list teams of hackathon api.add_resource(TeamMemberResource, "/api/team/member") # join or leave team, approve member api.add_resource(TeamScoreResource, "/api/team/score") # query or set score by judge api.add_resource(TeamShowResource, "/api/team/show") # query or add show by leader api.add_resource(HackathonTeamShowResource, "/api/hackathon/show/list") # show list of a hackathon api.add_resource(UserTeamShowResource, "/api/user/show/list") # get all team_shows of a user api.add_resource(TeamMemberListResource, "/api/team/member/list") # list team members api.add_resource(TeamTemplateResource, "/api/team/template") # select or unselect template for team # APIs for admin to manage hackathon and hackathon resources, features and users api.add_resource(AdminHackathonResource, "/api/admin/hackathon") # create/update hackathon api.add_resource(AdminHackathonOnLineResource, "/api/admin/hackathon/online") api.add_resource(AdminHackathonApplyOnLineResource, "/api/admin/hackathon/applyonline") api.add_resource(AdminHackathonOffLineResource, "/api/admin/hackathon/offline") api.add_resource(AdminHackathonConfigResource, "/api/admin/hackathon/config") # set hackathon config api.add_resource(AdminHackathonOrganizerResource, "/api/admin/hackathon/organizer") # manage hackathon organizers api.add_resource(HackathonCheckNameResource, "/api/admin/hackathon/checkname") # check hackathon name exists api.add_resource(AdminHackathonListResource, "/api/admin/hackathon/list") # get entitled hackathon list api.add_resource(AdminAzureResource, '/api/admin/azure') # manage azure subscription and certs api.add_resource(AdminAzureCheckSubIdResource, '/api/admin/azure/checksubid') api.add_resource(AdminRegisterListResource, "/api/admin/registration/list") # get registered users api.add_resource(AdminRegisterResource, "/api/admin/registration") # create, delete or query registration api.add_resource(AdminHackathonTemplateListResource, "/api/admin/hackathon/template/list") # get templates of hackathon api.add_resource(AdminHackathonTemplateResource, "/api/admin/hackathon/template") # select template for hackathon api.add_resource(AdminExperimentResource, "/api/admin/experiment") # start expr by admin api.add_resource(AdminExperimentListResource, "/api/admin/experiment/list") # get expr list of hackathon api.add_resource(HackathonAdminListResource, "/api/admin/hackathon/administrator/list") # list admin/judges api.add_resource(HackathonAdminResource, "/api/admin/hackathon/administrator") # add or delete admin/judge api.add_resource(AdminTeamScoreListResource, "/api/admin/team/score/list") # select or unselect template for team api.add_resource(HackathonAwardResource, "/api/admin/hackathon/award") # manage award content for hackathon api.add_resource(HackathonAwardListResource, "/api/admin/hackathon/award/list") # list award content for hackathon api.add_resource(TeamAwardResource, "/api/admin/team/award") # list award content for hackathon api.add_resource(UserListResource, "/api/admin/user/list") # search and get all related users api.add_resource(AdminHostserverListResource, "/api/admin/hostserver/list") # get the list of host server api.add_resource(AdminHostserverResource, "/api/admin/hostserver") # create/update/delete/get a host server api.add_resource(AdminHackathonNoticeResource, "/api/admin/hackathon/notice")
5,338,603
def clear_dir(path): """Empty out the image directory.""" for f in os.listdir(path): f_path = os.path.join(path, f) if os.path.isfile(f_path) or os.path.islink(f_path): os.unlink(f_path)
5,338,604
def assert_processor_available(processor: str) -> None: """ Assert that a specific PDF processor is available. Args: processor: a PDF processor type from :class:`Processors` Raises: AssertionError: if bad ``processor`` RuntimeError: if requested processor is unavailable """ if processor not in [Processors.XHTML2PDF, Processors.WEASYPRINT, Processors.PDFKIT]: raise AssertionError("rnc_pdf.set_pdf_processor: invalid PDF processor" " specified") if processor == Processors.WEASYPRINT and not weasyprint: raise RuntimeError("rnc_pdf: Weasyprint requested, but not available") if processor == Processors.XHTML2PDF and not xhtml2pdf: raise RuntimeError("rnc_pdf: xhtml2pdf requested, but not available") if processor == Processors.PDFKIT and not pdfkit: raise RuntimeError("rnc_pdf: pdfkit requested, but not available")
5,338,605
def moleculeEntry(request, adjlist): """ Returns an html page which includes the image of the molecule and its corresponding adjacency list/SMILES/InChI, as well as molecular weight info and a button to retrieve thermo data. Basically works as an equivalent of the molecule search function. """ adjlist = urllib.parse.unquote(adjlist) try: molecule = Molecule().from_adjacency_list(adjlist) except: return HttpResponseBadRequest('<h1>Bad Request (400)</h1><p>Invalid adjacency list.</p>') structure = getStructureInfo(molecule) mol_weight = molecule.get_molecular_weight() old_adjlist = '' try: old_adjlist = molecule.to_adjacency_list(remove_h=True, old_style=True) except: pass smiles = '' try: smiles = molecule.to_smiles() except ValueError: pass inchi = '' try: inchi = molecule.to_inchi() except ValueError: pass return render(request, 'moleculeEntry.html', {'structure': structure, 'smiles': smiles, 'adjlist': adjlist, 'mol_weight': mol_weight, 'old_adjlist': old_adjlist})
5,338,606
def has_user_id(id: int): """Checks if the Command Author's ID is the same as the ID passed into the function""" def predicate(ctx) -> bool: if ctx.author.id == id: return True raise MissingID(id, "Author") return commands.check(predicate)
5,338,607
def _polar_gbps(out, in_args, params, per_iter=False): """ `speed_function` for `benchmark` estimating the effective bandwidth of a polar decomposition in GB/s. The number of elements is estimated as 2 * the size of the input. For a matrix multiplication of dimensions `m, n, k` that took `dt` seconds, we define `GB/s := (GB of input + GB of output) / (1E9 * dt)`. """ out_rows, out_cols, dtype = params[:3] if out_cols is None: out_cols = out_rows dt = out[0] n_elements = 2 * out_rows * out_cols result = benchmark_utils.gbps(n_elements, dtype, dt) header = "GB/s" return benchmark_utils.per_iter(per_iter, out[-1], result, header)
5,338,608
def is_FreeMonoid(x): """ Return True if `x` is a free monoid. EXAMPLES:: sage: from sage.monoids.free_monoid import is_FreeMonoid sage: is_FreeMonoid(5) False sage: is_FreeMonoid(FreeMonoid(7,'a')) True sage: is_FreeMonoid(FreeAbelianMonoid(7,'a')) False sage: is_FreeMonoid(FreeAbelianMonoid(0,'')) False """ return isinstance(x, FreeMonoid_class)
5,338,609
def saveItemImage(item): """Saves the image for the item to disk. """ itemName = item['data']['name'] console('Getting image for: {}'.format(itemName)) fullImgUrl = '{}{}.jpg'.format(mfc_img_base, item['data']['id']) req = urllib.request.Request(fullImgUrl, \ headers={'User-Agent' : "Magic Browser"}) foutName = '{}/{}.jpg'.format(imageFolder, \ re.sub(r'([^\s\w]|_)+', '', itemName)[:32]) with urllib.request.urlopen(req) as response, \ open(foutName, 'wb+') as fout: shutil.copyfileobj(response, fout)
5,338,610
def valida_data(data_ida, data_volta, data_pesquisa, lista_erro): """Valida se a data de ida é maior que a data de volta e a data de ida é menor que a data atual""" if data_ida > data_volta: lista_erro['data_volta'] = 'Data de volta não pode ser maior que a data de ida' if data_ida < data_pesquisa: lista_erro['data_ida'] = 'Data de ida não pode ser menor do que a data de hoje'
5,338,611
def install_hook() -> None: """ Installs the remote debugger as standard debugging method and calls it when using the builtin `breakpoint()` """ _previous_breakpoint_hook = sys.breakpointhook sys.breakpointhook = set_trace
5,338,612
def get_institutes(url): """Get institutes objects and add them to data""" driver.get(url) institutes = driver.find_elements_by_class_name('item') for institute in institutes: obj = {} obj["initials"] = institute.find_element_by_tag_name('a').text.split( '\n')[0] obj["name"] = institute.find_element_by_tag_name('a').text.split( '\n')[-1] data["institutes"].append(obj)
5,338,613
def get_quote_data(ticker): """Inputs: @ticker Returns a dictionary containing over 70 elements corresponding to the input ticker, including company name, book value, moving average data, pre-market / post-market price (when applicable), and more.""" site = "https://query1.finance.yahoo.com/v7/finance/quote?symbols=" + ticker resp = requests.get(site) if not resp.ok: raise AssertionError( """Invalid response from server. Check if ticker is valid.""" ) json_result = resp.json() info = json_result["quoteResponse"]["result"] return info[0]
5,338,614
def preprocess(network_path, network_dynamics_file = None, number_cores = 1, special_nodes = None): """ computes the preprocessed travel time tables for all different travel time files creates the travel time tables "tt_matrix" and "dis_matrix" and stores them in the network-folder :param network_path: path to the corresponding network file :param number_cores: number of travel_time files that are preprocessed in parallel :param network_dynamics_file: network dynamicsfile :param special_nodes: None: preprocessing between all nodes with the "is_stop_only"-attribute list of node indices: preprocessing between all given nodes Note: in both cases they have to be sorted! """ travel_times_to_compute = get_travel_times_available(network_path, network_dynamics_file) print(travel_times_to_compute) if number_cores == 1: for time in travel_times_to_compute: createStopNodeTravelInfoTables(network_path, network_dynamics_file, sim_time=time, special_nodes=special_nodes) else: p = Pool(number_cores) x = [p.apply_async(createStopNodeTravelInfoTables, args=(network_path, network_dynamics_file, t, special_nodes)) for t in travel_times_to_compute] y = [z.get() for z in x] p.close()
5,338,615
def print_parameter_summary(model) -> None: """ Prints the number of trainable and non-trainable parameters in the model. """ # Note: use model.summary() for a detailed summary of layers. trainable_count = np.sum([K.count_params(w) for w in model.trainable_weights]) non_trainable_count = np.sum([K.count_params(w) for w in model.non_trainable_weights]) total_count = trainable_count + non_trainable_count print(f"Total params: {total_count:,}") print(f"Trainable params: {trainable_count:,}") print(f"Non-trainable params: {non_trainable_count:,}")
5,338,616
def _import_stack_component( component_type: StackComponentType, component_config: Dict[str, str] ) -> str: """import a single stack component with given type/config""" component_type = StackComponentType(component_type) component_name = component_config.pop("name") component_flavor = component_config.pop("flavor") # make sure component can be registered, otherwise ask for new name while True: # check if component already exists try: other_component = _get_component_as_dict( component_type, component_name ) # component didn't exist yet, so we create it. except KeyError: break # check whether other component has exactly same config as export other_is_same = True for key, value in component_config.items(): if key not in other_component or other_component[key] != value: other_is_same = False break # component already exists and is correctly configured -> done if other_is_same: return component_name # component already exists but with different config -> rename display_name = _component_display_name(component_type) component_name = click.prompt( f"A component of type '{display_name}' with the name " f"'{component_name}' already exists, " f"but is configured differently. " f"Please choose a different name.", type=str, ) _register_stack_component( component_type=component_type, component_name=component_name, component_flavor=component_flavor, **component_config, ) return component_name
5,338,617
def show(*images): """Display images on screen. """ import matplotlib.pyplot as plt L = len(images) for i in range(L): plt.subplot(1, L, i + 1) plt.imshow(images[i], cmap=plt.cm.gray, interpolation='nearest') plt.show()
5,338,618
def unpack_range(a_range): """Extract chromosome, start, end from a string or tuple. Examples:: "chr1" -> ("chr1", None, None) "chr1:100-123" -> ("chr1", 99, 123) ("chr1", 100, 123) -> ("chr1", 100, 123) """ if not a_range: return Region(None, None, None) if isinstance(a_range, basestring): if ':' in a_range and '-' in a_range: return from_label(a_range, keep_gene=False) return Region(a_range, None, None) if isinstance(a_range, (list, tuple)): if len(a_range) == 3: return Region(*a_range) elif len(a_range) == 4: return Region(*a_range[:3]) raise ValueError("Not a range: %r" % a_range)
5,338,619
def _old_extract_roles(x, roles): """ x is [N, B, R, *shape] roles is [N, B] """ N, B, R, *shape = x.shape assert roles.shape == (N, B) parts = [] for n in range(N): parts.append(x[n:n+1, range(B), roles[n]]) return torch.cat(parts, dim=0)
5,338,620
def read_cam_intr(file_path): """Reading camera intrinsic. Args: file_path (str): File path. Return: k (numpy.array): Camera intrinsic matrix, dim = (3, 3). """ assert os.path.exists(file_path), '{}: file not found'.format(file_path) f = open(file_path, 'r') k_str = f.readlines()[0].strip() k_str_list = k_str.split(',') k = np.array(k_str_list).astype(np.float) return k.reshape((3,3))
5,338,621
def setup(i): """ See "install" API with skip_process=yes """ i['skip_process']='yes' return install(i)
5,338,622
def post_merge_request(profile, payload): """Do a POST request to Github's API to merge. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. payload A dict of information to pass to Github's API as the payload for a merge request, something like this:: { "base": <base>, "head": <head>, "commit_message": <mesg>} Returns: The response returned by the ``requests`` library when it does the POST request. """ repo = profile["repo"] url = GITHUB_API_BASE_URL + "repos/" + repo + "/merges" headers = get_headers(profile) response = requests.post(url, json=payload, headers=headers) return response
5,338,623
def is_valid_table_name(cur, table_name): """ Checks whether a name is for a table in the database. Note: Copied from utils.database for use in testing, to avoid a circular dependency between tests and implementation. Args: cur: sqlite3 database cursor object table_name (str): name to check Returns: True if valid, False otherwise """ query = """ SELECT 1 FROM sqlite_master WHERE type == 'table' AND name == ? """ res = cur.execute(query, (table_name,)) return res.fetchone() is not None
5,338,624
def create_mssql_pyodbc(username, password, host, port, database, **kwargs): # pragma: no cover """ create an engine connected to a mssql database using pyodbc. """ return create_engine( _create_mssql_pyodbc(username, password, host, port, database), **kwargs )
5,338,625
def write_idl(length_y, file_obj, plot_name, boxes, scores = None): """ Function that writes bounding boxes into an idl file. Inputs: length_y: (int) length of the Y direction of the image (needed to convert coordinate system origin) file_obj: (file obj) file object of the idl file plot_name: (string) plotname of the plot with bounding boxes. boxes: (list) List of bounding boxes scores : (list) Optional, confidence score for each bouding boxes. """ string_prep = '"{plot_name}":'.format(plot_name=plot_name) if scores is None: for box in boxes: string_prep += " ({}, {}, {}, {}),".format(int(np.round(box.x0)),int(length_y-np.round(box.y1)), int(np.round(box.x1)),int(length_y-np.round(box.y0))) else: for box, score in zip(boxes,scores): string_prep += " ({}, {}, {}, {}):{},".format(int(np.round(box.x0)),int(length_y-np.round(box.y1)), int(np.round(box.x1)),int(length_y-np.round(box.y0)),score) string_prep = string_prep[:-1] string_prep+=';' file_obj.write(string_prep) file_obj.write("\n")
5,338,626
def _wait_for_multiple(driver, locator_type, locator, timeout, wait_for_n, visible=False): """Waits until `wait_for_n` matching elements to be present (or visible). Returns located elements when found. Args: driver: Selenium web driver instance locator_type: type of locator (e.g. By.CSS_SELECTOR or By.TAG_NAME) locator: name of tag, class, etc. to wait for timeout: how long to wait for presence/visibility of element wait_for_n: wait until this number of matching elements are present/visible visible: if True, require that elements are not only present, but visible """ wait = WebDriverWait(driver, timeout) def multiple_found(driver): elements = driver.find_elements(locator_type, locator) if visible: elements = [e for e in elements if e.is_displayed()] if len(elements) < wait_for_n: return False return elements return wait.until(multiple_found)
5,338,627
def relu(x): """ x -- Output of the linear layer, of any shape Returns: Vec -- Post-activation parameter, of the same shape as Z cash -- for computing the backward pass efficiently """ Vec = np.maximum(0, x) assert(Vec.shape == x.shape) cash = x return Vec, cash
5,338,628
def _declare_swiftdoc( *, actions, arch, label_name, output_discriminator, swiftdoc): """Declares the swiftdoc for this Swift framework. Args: actions: The actions provider from `ctx.actions`. arch: The cpu architecture that the generated swiftdoc belongs to. label_name: Name of the target being built. output_discriminator: A string to differentiate between different target intermediate files or `None`. swiftdoc: A File referencing the swiftdoc file from a SwiftInfo provider. Returns: A File referencing the intermediate swiftdoc. """ bundle_doc = intermediates.file( actions = actions, target_name = label_name, output_discriminator = output_discriminator, file_name = "{}.swiftdoc".format(arch), ) actions.symlink( target_file = swiftdoc, output = bundle_doc, ) return bundle_doc
5,338,629
def threshold_image(gray_image, name_bw, threshold): """ This computes the binary image of the input image using a threshold :param gray_image: input image :param threshold: input threshold :param name_bw: name of the binary image :return: BW image """ # perform Gaussian blurring to remove unwanted noisy components blurred = cv2.GaussianBlur(gray_image, (5, 5), 0) # convert the smooth image into a bw image thresh = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY)[1] # perform morphological operation to remove small components thresh = cv2.erode(thresh, None, iterations=1) thresh = cv2.dilate(thresh, None, iterations=1) # store the bw image cv2.imwrite("threshold_" + name_bw, thresh) return thresh
5,338,630
def kalman_update( states, upper_chols, loadings, control_params, meas_sd, measurements, controls, log_mixture_weights, debug, ): """Perform a Kalman update with likelihood evaluation. Args: states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states) with pre-update states estimates. upper_chols (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states, n_states) with the transpose of the lower triangular cholesky factor of the pre-update covariance matrix of the state estimates. loadings (jax.numpy.array): 1d array of length n_states with factor loadings. control_params (jax.numpy.array): 1d array of length n_controls. meas_sd (float): Standard deviation of the measurement error. measurements (jax.numpy.array): 1d array of length n_obs with measurements. May contain NaNs if no measurement was observed. controls (jax.numpy.array): Array of shape (n_obs, n_controls) with data on the control variables. log_mixture_weights (jax.numpy.array): Array of shape (n_obs, n_mixtures) with the natural logarithm of the weights of each element of the mixture of normals distribution. debug (bool): If true, the debug_info contains the residuals of the update and their standard deviations. Otherwise, it is an empty dict. Returns: states (jax.numpy.array): Same format as states. new_states (jax.numpy.array): Same format as states. new_upper_chols (jax.numpy.array): Same format as upper_chols new_log_mixture_weights: (jax.numpy.array): Same format as log_mixture_weights new_loglikes: (jax.numpy.array): 1d array of length n_obs debug_info (dict): Empty or containing residuals and residual_sds """ n_obs, n_mixtures, n_states = states.shape not_missing = jnp.isfinite(measurements) # replace missing measurements and controls by reasonable fill values to avoid NaNs # in the gradient calculation. All values that are influenced by this, are # replaced by other values later. Choosing the average expected # expected measurements without controls as fill value ensures that all numbers # are well defined because the fill values have a reasonable order of magnitude. # See https://github.com/tensorflow/probability/blob/main/discussion/where-nan.pdf # and https://jax.readthedocs.io/en/latest/faq.html # for more details on the issue of NaNs in gradient calculations. _safe_controls = jnp.where(not_missing.reshape(n_obs, 1), controls, 0) _safe_expected_measurements = jnp.dot(states, loadings) + jnp.dot( _safe_controls, control_params ).reshape(n_obs, 1) _safe_measurements = jnp.where( not_missing, measurements, _safe_expected_measurements.mean(axis=1) ) _residuals = _safe_measurements.reshape(n_obs, 1) - _safe_expected_measurements _f_stars = jnp.dot(upper_chols, loadings.reshape(n_states, 1)) _m = jnp.zeros((n_obs, n_mixtures, n_states + 1, n_states + 1)) _m = _m.at[..., 0, 0].set(meas_sd) _m = _m.at[..., 1:, :1].set(_f_stars) _m = _m.at[..., 1:, 1:].set(upper_chols) _r = array_qr_jax(_m)[1] _new_upper_chols = _r[..., 1:, 1:] _root_sigmas = _r[..., 0, 0] _abs_root_sigmas = jnp.abs(_root_sigmas) # it is important not to divide by the absolute value of _root_sigmas in order # to recover the sign of the Kalman gain. _kalman_gains = _r[..., 0, 1:] / _root_sigmas.reshape(n_obs, n_mixtures, 1) _new_states = states + _kalman_gains * _residuals.reshape(n_obs, n_mixtures, 1) # calculate log likelihood per individual and update mixture weights _loglikes_per_dist = jax.scipy.stats.norm.logpdf(_residuals, 0, _abs_root_sigmas) if n_mixtures >= 2: _weighted_loglikes_per_dist = _loglikes_per_dist + log_mixture_weights _loglikes = jax.scipy.special.logsumexp(_weighted_loglikes_per_dist, axis=1) _new_log_mixture_weights = _weighted_loglikes_per_dist - _loglikes.reshape( -1, 1 ) else: _loglikes = _loglikes_per_dist.flatten() _new_log_mixture_weights = log_mixture_weights # combine pre-update quantities for missing observations with updated quantities new_states = jnp.where(not_missing.reshape(n_obs, 1, 1), _new_states, states) new_upper_chols = jnp.where( not_missing.reshape(n_obs, 1, 1, 1), _new_upper_chols, upper_chols ) new_loglikes = jnp.where(not_missing, _loglikes, 0) new_log_mixture_weights = jnp.where( not_missing.reshape(n_obs, 1), _new_log_mixture_weights, log_mixture_weights ) debug_info = {} if debug: residuals = jnp.where(not_missing.reshape(n_obs, 1), _residuals, jnp.nan) debug_info["residuals"] = residuals residual_sds = jnp.where( not_missing.reshape(n_obs, 1), _abs_root_sigmas, jnp.nan ) debug_info["residual_sds"] = residual_sds debug_info["log_mixture_weights"] = new_log_mixture_weights return ( new_states, new_upper_chols, new_log_mixture_weights, new_loglikes, debug_info, )
5,338,631
def pkg_lpk(shp): """Example: \b > pkg_lpk in.shp creates file in.lpk wildcards allowed: \b > pkg_lpk *.shp """ for shp in glob.glob(shp): create_lpk(shp, shp.replace('.shp', '.lpk'))
5,338,632
def sub_repeatedly(pattern, repl, term): """apply sub() repeatedly until no change""" while True: new_term = re.sub(pattern, repl, term) if new_term == term: return term term = new_term
5,338,633
def _dtw(distance_matrix: np.ndarray, gully: float = 1., additive_penalty: float = 0., multiplicative_penalty: float = 1.) -> Tuple[np.ndarray, np.ndarray, float]: """ Compute the dynamic time warping distance between two sequences given a distance matrix. DTW score of lowest cost path through the distance matrix, including penalties. :param distance_matrix: Distances between two sequences :param gully: Sequences must match up to this proportion of the shorter sequence. Default value is 1, which means that the entirety of the shorter sequence must be matched to a part of the longer sequence. :param additive_penalty: Additive penalty for non-diagonal moves. Default value is 0, which means no penalty. :param multiplicative_penalty: Multiplicative penalty for non-diagonal moves. Default value is 1, which means no penalty. :return: Lowest cost path through the distance matrix. Penalties are included, the score is not yet normalized. """ if np.isnan(distance_matrix).any(): raise ValueError('NaN values found in distance matrix.') distance_matrix = distance_matrix.copy() # Pre-allocate path length matrix traceback = np.empty(distance_matrix.shape, np.uint8) # Populate distance matrix with lowest cost path _dtw_core(distance_matrix, additive_penalty, multiplicative_penalty, traceback) if gully < 1.: # Allow the end of the path to start within gully percentage of the smaller distance matrix dimension gully = int(gully * min(distance_matrix.shape)) else: # When gully is 1 require matching the entirety of the smaller sequence gully = min(distance_matrix.shape) - 1 # Find the indices of the smallest costs on the bottom and right edges i = np.argmin(distance_matrix[gully:, -1]) + gully j = np.argmin(distance_matrix[-1, gully:]) + gully # Choose the smaller cost on the two edges if distance_matrix[-1, j] > distance_matrix[i, -1]: j = distance_matrix.shape[1] - 1 else: i = distance_matrix.shape[0] - 1 # Score is the final score of the best path score = float(distance_matrix[i, j]) # Pre-allocate the x and y path index arrays x_indices = np.zeros(sum(traceback.shape), dtype=np.int) y_indices = np.zeros(sum(traceback.shape), dtype=np.int) # Start the arrays from the end of the path x_indices[0] = i y_indices[0] = j # Keep track of path length n = 1 # Until we reach an edge while i > 0 and j > 0: # If the tracback matrix indicates a diagonal move... if traceback[i, j] == 0: i = i - 1 j = j - 1 # Horizontal move... elif traceback[i, j] == 1: i = i - 1 # Vertical move... elif traceback[i, j] == 2: j = j - 1 # Add these indices into the path arrays x_indices[n] = i y_indices[n] = j n += 1 # Reverse and crop the path index arrays x_indices = x_indices[:n][::-1] y_indices = y_indices[:n][::-1] return x_indices, y_indices, score
5,338,634
def display_plot(phase: ndarray, magnitude: ndarray, model: Tuple[ndarray, ndarray] = None) -> None: """ Display a phased light curve. Parameters ---------- phase : ndarray An array which stores a phase vector. magnitude : ndarray An array which represents a magnitude vector. model : tuple (x, y) coordinates of the model. """ _draw_phase(phase, magnitude, 6) if model is not None: _draw_model(*model) plt.show()
5,338,635
def test_kgdm_parse_total_proteins_error_first_pag(fam_url, fam_template, args): """Test parse_total_proteins_error() when it's the first failure for the pagination page.""" errors = { "url": fam_url, "format": "error message", } family, failed_scrapes, sql_failures, format_failures = scrape_by_kingdom.parse_kingdom_total_proteins_error( first_pagination_url=fam_url, family=fam_template, kingdom="Bacteria", failed_scrapes=[], sql_failures=[], format_failures=[], errors=errors, args=args["args"], ) # assert format_failures == ['http://www.cazy.org/GH1_bacteria.html\terror message'] # assert failed_scrapes == [] # assert sql_failures == []
5,338,636
async def _ensure_meadowgrid_security_groups() -> str: """ Creates the meadowgrid coordinator security group and meadowgrid agent security group if they doesn't exist. The coordinator security group allows meadowgrid agents and the current ip to access the coordinator, as well as allowing the current ip to ssh. See also _ensure_meadowgrid_agent_security_group. """ current_ip_for_ssh = await _get_current_ip_for_ssh() # allow meadowgrid traffic from the meadowgrid agent security group agent_security_group_id = ensure_security_group( _MEADOWGRID_AGENT_SECURITY_GROUP, [(22, 22, f"{current_ip_for_ssh}/32")], [] ) return ensure_security_group( _MEADOWGRID_COORDINATOR_SECURITY_GROUP, [ (22, 22, f"{current_ip_for_ssh}/32"), ( DEFAULT_COORDINATOR_PORT, DEFAULT_COORDINATOR_PORT, f"{current_ip_for_ssh}/32", ), ], [(DEFAULT_COORDINATOR_PORT, DEFAULT_COORDINATOR_PORT, agent_security_group_id)], )
5,338,637
def _create_pairs_numba( to_match, indexer, first_stage_cum_probs, group_codes_per_individual, seed ): """ Args: to_match (np.ndarry): 2d boolean array with one row per individual and one column sub-contact model. indexer (numba.List): Numba list that maps id of county to a numpy array with the row positions of all individuals from that county. first_stage_cum_probs(numpy.ndarray): Array of shape n_group, n_groups. cum_probs[i, j] is the probability that an individual from group i meets someone from group j or lower. group (np.ndarray): 1d array with assortative matching group ids, coded as integers. Returns: pairs_of_workers (np.ndarray): 2d integer array with meeting ids. """ np.random.seed(seed) unique_group_codes = np.arange(len(first_stage_cum_probs)) to_match = to_match.copy() out = np.full(to_match.shape, -1) n_obs, n_models = to_match.shape for m in range(n_models): meeting_id = 0 for i in range(n_obs): if to_match[i, m]: group_i = group_codes_per_individual[i] group_j = choose_other_group( unique_group_codes, first_stage_cum_probs[group_i] ) group_j_indices = indexer[group_j] weights = to_match[group_j_indices, m].astype(np.float64) j = choose_other_individual(group_j_indices, weights) if j != -1: to_match[i, m] = False to_match[j, m] = False out[i, m] = meeting_id out[j, m] = meeting_id meeting_id += 1 return out
5,338,638
def test_set_attr(): """ Tests that generate_schema returns a schema that has the ability to set instance variables based on keys of different format in the dictionary provided in schema.load(d) """ class TestObject(object): def __init__(self): super().__init__() self.int_a = 0 self.int_b = 0 @classmethod def get_fields(cls): return ["int_a", "int_b"] def __new__(cls, *args, **kwargs): instance = super().__new__(cls, *args, **kwargs) return instance s = schema.generate_schema(TestObject) # print(s) obj = s.load({ "intA": 1, "intB": 2 }) assert obj.int_a == 1 assert obj.int_b == 2 # TODO: test that no extraneous attributes are set
5,338,639
def filter_known_bad(orbit_points): """ Filter some commands that are known to be incorrect. """ ops = orbit_points bad = np.zeros(len(orbit_points), dtype=bool) bad |= (ops['name'] == 'OORMPEN') & (ops['date'] == '2002:253:10:08:52.239') bad |= (ops['name'] == 'OORMPEN') & (ops['date'] == '2004:010:10:00:00.000') return orbit_points[~bad]
5,338,640
def test_user_validation_shared(shared_zone_test_context): """ Confirm that test users cannot add/edit/delete records in non-test zones (via shared access) """ client = shared_zone_test_context.ok_vinyldns_client batch_change_input = { "changes": [ get_change_A_AAAA_json("add-test-batch.non.test.shared."), get_change_A_AAAA_json("update-test-batch.non.test.shared.", change_type="DeleteRecordSet"), get_change_A_AAAA_json("update-test-batch.non.test.shared."), get_change_A_AAAA_json("delete-test-batch.non.test.shared.", change_type="DeleteRecordSet") ], "ownerGroupId": shared_zone_test_context.ok_group['id'] } response = client.create_batch_change(batch_change_input, status=400) assert_failed_change_in_error_response(response[0], input_name="add-test-batch.non.test.shared.", record_data="1.1.1.1", error_messages=["User \"ok\" is not authorized."]) assert_failed_change_in_error_response(response[1], input_name="update-test-batch.non.test.shared.", change_type="DeleteRecordSet", error_messages=["User \"ok\" is not authorized."]) assert_failed_change_in_error_response(response[2], input_name="update-test-batch.non.test.shared.", record_data="1.1.1.1", error_messages=["User \"ok\" is not authorized."]) assert_failed_change_in_error_response(response[3], input_name="delete-test-batch.non.test.shared.", change_type="DeleteRecordSet", error_messages=["User \"ok\" is not authorized."])
5,338,641
def create_lexicon(word_tags): """ Create a lexicon in the right format for nltk.CFG.fromString() from a list with tuples with words and their tag. """ # dictionary to filter the double tags word_dict = {} for word, tag in word_tags: if tag not in word_dict: word_dict[tag] = {word} else: word_dict[tag].add(word) # PRO is the tag for 's, but the 's is not removed on nouns. word_dict['NN'] = [x.replace('\'s', '') for x in word_dict['NN']] word_dict['JJ'] = [x.replace('\'s', '') for x in word_dict['JJ']] del word_dict[','] word_dict['PRP'].update(word_dict['PRP$']) del word_dict['PRP$'] word_dict['POS'] = ['"s'] # convert the dictionary to the right NLTK format lexicon = '' for key, val in word_dict.items(): lexicon += key + ' -> ' # add ' ' around every word val = [f'\'{v}\'' for v in val] # the words are seperated by a pipe lexicon += ' | '.join(val) + '\n' return lexicon
5,338,642
def process_datasets_metadata(input_file=None, dryrun=True, staging=True, sample=0, report=False, memory='4g', rmlstreamer_run=False): """Read a RDF metadata file with infos about datasets, check if the dataset exist in the project SPARQL endpoint Download the data if new""" # If no metadata file provided, we search for one in the current folder if not input_file: # Search for ttl metadata file file_list = glob.glob('*.ttl') if len(file_list) > 1: raise Exception("More than 1 metadata file have been found in the current folder: " + ', '.join(file_list)) elif len(file_list) < 1: # Search for jsonld metadata file if no ttl jsonld_file_list = glob.glob('*.jsonld') if len(jsonld_file_list) > 1: raise Exception("More than 1 metadata file have been found in the current folder: " + ', '.join(jsonld_file_list)) elif len(jsonld_file_list) < 1: raise Exception("No ttl or jsonld metadata file has been found in the current folder") else: input_file = jsonld_file_list[0] else: input_file = file_list[0] print("🔎 Reading the metadata file " + input_file) os.makedirs('data/input', exist_ok=True) os.makedirs('output', exist_ok=True) # Retrieve the infos about files to download from the dataset metadata file g = Graph() g.parse(input_file, format=get_parse_format(input_file)) download_file_list = [] datasets_described = set() i = 0 for subject, download_predicate, download_files_uri in g.triples((None, D2S.downloadFiles, None)): datasets_described.add(subject) download_file_list.append({}) download_file_list[i]['downloadUrl'] = str(g.value(download_files_uri, DCAT.downloadURL)) if (download_files_uri, D2S.downloadScript, None) in g: download_file_list[i]['downloadScript'] = str(g.value(download_files_uri, D2S.downloadScript)) if (download_files_uri, D2S.postProcessScript, None) in g: download_file_list[i]['postProcessScript'] = str(g.value(download_files_uri, D2S.postProcessScript)) if (download_files_uri, D2S.processedFilename, None) in g: download_file_list[i]['processedFilename'] = str(g.value(download_files_uri, D2S.processedFilename)) i += 1 # Retrieve the dataset URI and various params in the dataset metadata file if len(datasets_described) < 1: raise Exception("No dataset has been found in the metadata file") elif len(datasets_described) > 1: raise Exception("More than 1 dataset has been found in the metadata file") else: dataset_uri = datasets_described.pop() if (dataset_uri, DC.identifier, None) in g: dataset_id_cap = str(g.value(dataset_uri, DC.identifier)) dataset_id = dataset_id_cap.lower() else: raise Exception("Could not find the dc:identifier property for the dataset in the metadata file") if (dataset_uri, D2S.processor, None) in g: processor = str(g.value(dataset_uri, D2S.processor)) else: processor = 'rmlmapper-java' if processor.lower() == 'rmlstreamer': if not rmlstreamer_run: print('📤 Copying mappings to the RMLStreamer.') # Make sure the YARRRML mappings on the DSRI RMLStreamer are up to date rmlstreamer_dataset_path = '/mnt/datasets/' + dataset_id_cap + '/' oc_cp_cmd = 'oc cp *.yarrr.yml $(oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name):' + rmlstreamer_dataset_path os.system(oc_cp_cmd) oc_cp_cmd = 'oc cp *.jsonld $(oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name):' + rmlstreamer_dataset_path os.system(oc_cp_cmd) oc_cp_cmd = 'oc cp prepare.sh $(oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name):' + rmlstreamer_dataset_path os.system(oc_cp_cmd) # Run this same function directly in the RMLStreamer print('☁️ Running the process in the RMLStreamer.') run_d2s_cmd = '"cd ' + rmlstreamer_dataset_path + ' && d2s run --rmlstreamer"' rmlstreamer_cmd = 'oc exec $(oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name) -- bash -c ' + run_d2s_cmd print(rmlstreamer_cmd) os.system(rmlstreamer_cmd) # return process_datasets_metadata(input_file, dryrun, sample, report, memory, True) return None if (dataset_uri, D2S.rdfSyntax, None) in g: rdfSyntax = str(g.value(dataset_uri, D2S.rdfSyntax)) else: rdfSyntax = 'turtle' if rdfSyntax == 'ntriples': output_file_extension = '.nt' output_file_mimetype = 'application/n-triples' else: output_file_extension = '.ttl' output_file_mimetype = 'text/turtle' if (dataset_uri, D2S.versionRegex, None) in g: versionRegex = str(g.value(dataset_uri, D2S.versionRegex)) else: versionRegex = None prod_endpoint = get_yaml_config('production')['sparql-endpoint'] prod_ldp = get_yaml_config('production')['virtuoso-ldp-url'] staging_endpoint = get_yaml_config('staging')['sparql-endpoint'] if 'virtuoso-ldp-url' in get_yaml_config('staging').keys(): staging_ldp = get_yaml_config('staging')['virtuoso-ldp-url'] endpoint_user = os.getenv('DBA_USER', 'dav') endpoint_password = os.getenv('DBA_PASSWORD') # TODO: Get lastUpdated date and version infos from the production endpoint # date_last_updated = None # if prod_endpoint: # print('Querying the SPARQL endpoint ' + prod_endpoint + ' to retrieve version infos for the dataset ' + dataset_uri) # query = """PREFIX d2s: <https://w3id.org/d2s/vocab/> # PREFIX pav: <http://purl.org/pav/> # SELECT ?lastUpdated WHERE { # <""" + str(dataset_uri) + """> pav:lastUpdateOn ?lastUpdated . # } # """ # # query = """SELECT * WHERE { # # ?s ?p ?o . # # } LIMIT 10 # # """ # sparql = SPARQLWrapper(prod_endpoint) # sparql.setReturnFormat(JSON) # sparql.setQuery(query) # results = sparql.query().convert() # print('SPARQLWrapper Results:') # print(results["results"]["bindings"]) # last_updated = results["results"]["bindings"]["lastUpdated"]["value"] # date_last_updated = parsedate(last_updated) # print(results["results"]["bindings"]["lastUpdated"]["value"]) # else: # print('No SPARQL endpoint associated, running the download without checking if the graphs need to be updated') print('\n🗃️ Checking files to download: \n') # Download if last modified date is later than last updated date (or if modified/updated date could not be fetched) # file_time = datetime.fromtimestamp(os.path.getmtime(dstFile)) # if not date_last_modified or not date_last_updated or date_last_modified > date_last_updated: # Download file in the data subfolder os.chdir('data') skip_global_download = True # Check last modified date, then download and post process scripts defined for each file for ddl_file in download_file_list: ddl_url = ddl_file['downloadUrl'] # # Extract filename from URI: # ddl_filename = os.path.basename(urlparse(ddl_url).path) processed_filename = ddl_file['processedFilename'] if versionRegex: # TODO: Extract version, then increment it # and check if new version available version_search = re.search(versionRegex, ddl_url, re.IGNORECASE) if version_search: file_version = version_search.group(1) print(file_version) skip_download = True # Check Last Modified date of the URL to download print('🔎 Checking Last Modified date of file at ' + ddl_url) r = requests.head(ddl_url) if 'last-modified' in r.headers.keys(): url_last_modified = r.headers['last-modified'] ddl_file['lastModified'] = parsedate(url_last_modified) print('📅 File to download last modified on ' + url_last_modified) ## Check if last date updated from SPARQL endpoint is older than the URL Last Modified date # if ddl_file['lastModified'] > date_last_updated: # print('📥 According to Last Modified date, the remote file to download is newer than the existing local file (' + str(local_file_time) + '). Downloading it.') # skip_download = False # skip_global_download = False # elif os.path.exists(processed_filename): # Download only if processed file does not exist, is older than the file to ddl, # or if the file to ddl has no LastModified date if os.path.exists(processed_filename): # If the file to download is newer than existing local file if 'lastModified' in ddl_file.keys(): local_file_time = datetime.fromtimestamp(os.path.getmtime(processed_filename), timezone.utc) if ddl_file['lastModified'] > local_file_time: print('📥 According to Last Modified date, the remote file to download is newer than the existing local file (' + str(local_file_time) + '). Downloading it.') skip_download = False skip_global_download = False else: print('⏩️ According to Last Modified date, the remote file to download is not newer than the existing local file at data/' + processed_filename + ' (' + str(local_file_time) + '). Skipping download.') else: print('📥 No Last Modified date for this file. Downloading it.') skip_download = False skip_global_download = False else: print('📥 No existing local file for this file. Downloading it.') skip_download = False skip_global_download = False # Run the download and preprocess scripts if download required if not skip_download: if 'downloadScript' in ddl_file: execute_script(ddl_file['downloadScript']) elif 'downloadUrl' in ddl_file: os.system("wget -qN " + ddl_url) if 'postProcessScript' in ddl_file: execute_script(ddl_file['postProcessScript']) print('') # Run download and post process scripts defined for the whole dataset if at least one file has been downloaded if not skip_global_download: if (dataset_uri, D2S.downloadScript, None) in g: execute_script(str(g.value(dataset_uri, D2S.downloadScript))) if (dataset_uri, D2S.postProcessScript, None) in g: execute_script(str(g.value(dataset_uri, D2S.postProcessScript))) else: print('⏩️ No dataset has been downloaded, skipping global post processing.') print('') # TODO: Create a HTML report about input CSV data with Datapane # import datapane as dp # if report: # print('📋 Produce HTML report for CSV files in data folder with datapane') # for ddl_file in download_file_list: # processed_filename = ddl_file['processedFilename'] # if processed_filename.endswith('.csv'): # df = pd.read_csv(processed_filename) # dp.Report( # dp.Text('## ' + processed_filename), # dp.DataTable(df) # ).save(path='report-' + processed_filename.replace('.csv', '') + '.html', # open=True, formatting=dp.ReportFormatting(width=dp.ReportWidth.FULL)) ## Automatically unzip files, to be done ad-hoc in prepare.sh? # print("""find . -name "*.tar.gz" -exec tar -xzvf {} \;""") # if len(glob.glob('*.zip')) > 0: # print('Unzipping .zip files ' + ', '.join(glob.glob('*.zip'))) # os.system('unzip -o "*.zip"') # if len(glob.glob('*.tar.gz')) > 0: # print('Unzipping .tar.gz files ' + ', '.join(glob.glob('*.tar.gz'))) # os.system("""find . -name "*.tar.gz" -exec tar -xzvf {} \;""") # if len(glob.glob('*.gz')) > 0: # print('Unzipping .gz files ' + ', '.join(glob.glob('*.gz'))) # os.system('gzip -f -d *.gz') # Check for .tsv .txt and .tab then convert to CSV (required for most RML engines) # tab_files = glob.glob('*.tsv') + glob.glob('*.txt') + glob.glob('*.tab') # for tsv_file in tab_files: # csv_file = tsv_file[:-4] + '.csv' # print('📄 Converting TSV file '+ tsv_file + ' to CSV ' + csv_file) # try: # tsv_to_csv_cmd = """sed -e 's/"//g' -e 's/\\t/","/g' -e 's/^/"/' -e 's/$/"/' -e 's/\\r//' """ + tsv_file + """ > """ + csv_file # os.system(tsv_to_csv_cmd) # # csv_table=pd.read_table(tsv_file,sep='\t') # # csv_table.to_csv(csv_file, index=False) # except Exception as e: # print('Could not convert the file ' + tsv_file + ' to CSV') # Create sample for CSV files if sample > 0: for csv_file in glob.glob('*.csv'): print('✂️ Creating a sample file with ' + str(sample) + ' lines for ' + csv_file) # if not os.path.exists(filename): full_csv_file = csv_file + '.full' shutil.copy(csv_file, full_csv_file) sample_cmd = 'head -n ' + str(sample) + ' ' + full_csv_file + ' > ' + csv_file os.system(sample_cmd) # Go back to dataset folder to convert YARRML files os.chdir('..') # For each YARRRML mappings: convert to RML and run mapper for file in glob.glob('*.yarrr.yml'): yarrrml_filename = os.fsdecode(file) rml_filename = yarrrml_filename.replace('.yarrr.yml', '.rml.ttl') print('🦜 Converting YARRRML mapping '+ yarrrml_filename + ' to RML ' + rml_filename) output_filepath = '../output/' + yarrrml_filename.replace('.yarrr.yml', output_file_extension) os.system('yarrrml-parser -i ' + yarrrml_filename + ' -o data/' + rml_filename) # Run RML mapper depending on processor given in the metadata file if processor.lower() == 'rmlmapper-java': print('☕️ Running the RML mapper with java to generate the RDF to ' + output_filepath.replace('../', '')) init_d2s_java('rmlmapper') # Change dir to fix issue with rmlmapper requiring to load a .dtd locally when reading DrugBank RML os.chdir('data') # Copy functions jar file in the same folder where we run the rmlmapper to fix issues with finding the functions shutil.copy('../../IdsRmlFunctions.jar', 'IdsRmlFunctions.jar') if 'memory' in get_yaml_config('resources').keys(): memory = get_yaml_config('resources')['memory'] java_opts = "-Xms" + memory + " -Xmx" + memory rml_cmd = 'java ' + java_opts + ' -jar ' + get_base_dir('rmlmapper.jar') + ' -s ' + rdfSyntax + ' -f ../../functions_ids.ttl -m ' + rml_filename + ' -o ' + output_filepath os.system(rml_cmd) os.chdir('..') # if processor.lower() == 'rmlstreamer': if rmlstreamer_run: print('🐿️ Running the RMLStreamer') rmlstreamer_dataset_path = os.getcwd() parallel_cores = str(get_yaml_config('resources')['flink-cores']) os.chdir('data') rmlstreamer_cmd = '/opt/flink/bin/flink run -p ' + parallel_cores + ' -c io.rml.framework.Main /opt/flink/lib/RMLStreamer.jar toFile -m ' + rmlstreamer_dataset_path + '/data/' + rml_filename + ' -o ' + rmlstreamer_dataset_path + '/output/output-' + dataset_id + '.nt --job-name "RMLStreamer Bio2KG - ' + dataset_id + '"' os.system(rmlstreamer_cmd) os.chdir('..') if processor.lower() == 'rocketrml': print('🚀 Running RocketRML with NodeJS to generate the RDF to ' + output_filepath) os.chdir('data') nodejs_memory='2048' if 'nodejs-memory' in get_yaml_config('resources').keys(): nodejs_memory = str(get_yaml_config('resources')['nodejs-memory']) # Try to increase node memory to 2G for large files with --max_old_space_size=2048 os.system(f'node --max_old_space_size={nodejs_memory} ../../rocketrml.js -m {rml_filename} -o {output_filepath}') os.chdir('..') # TO CHECK: concatenate produced nt files in 1 file if multiple files list_ntriples = glob.glob('output/*.nt') if len(list_ntriples) > 1: print('🗃️ Concatenate ntriples files: ' + ', '.join(list_ntriples)) output_filepath = 'output/' + dataset_id +'.nt' if os.path.exists(output_filepath): os.system('rm ' + output_filepath) os.system('cat output/*.nt > ' + output_filepath) os.system('ls output/*.nt | grep -v ' + output_filepath + ' | xargs rm') # os.system('ls *.nt | grep -v ' + dataset_id + '.nt' + ' | parallel rm') if dryrun: print('✅ Dry run completed: RDF generated, but not published') else: if staging: print('🧪 Publishing to staging endpoint') update_endpoint = staging_endpoint update_ldp = staging_ldp else: print('📰 Publishing the processed file to the production endpoint') update_endpoint = prod_endpoint update_ldp = prod_ldp raise Exception("Publishing not implemented yet") if (dataset_uri, D2S.graph, None) in g: dataset_graph = str(g.value(dataset_uri, D2S.graph)) else: dataset_graph = update_ldp + '/' + dataset_id output_metadata_file = 'output/metadata.ttl' metadata_graph = update_ldp + '/metadata-' + dataset_id metadata_slug = 'metadata-' + dataset_id if os.path.exists(output_metadata_file): os.remove(output_metadata_file) # os.system('rm ' + output_metadata_file) if len(glob.glob('output/*.ttl')) > 1: raise Exception("More than 1 turtle output file found. If you produce multiple files as output, use the rdfSyntax ntriples, so the output can be concatenated in one graph per dataset") # TODO: once RDF ouput files generated, if new version and not dry run: load to production Virtuoso # Otherwise load to staging Virtuoso and generate metadata # TODO: do we want 1 graph per dataset or 1 graph per file? I would say 1 per dataset to improve metadata generation per graph # print(update_endpoint) # print(endpoint_user) # print(endpoint_password) # Iterates the output file to upload them to the Virtuoso LDP triplestore # Should be only one turtle or ntriples file because the LDP create 1 graph per file for output_file in glob.glob('output/*'): # Load the RDF output file to the Virtuoso LDP DAV # Existing file is overwritten automatically at upload load_rdf_to_ldp(output_file, output_file_mimetype, update_ldp, dataset_id, endpoint_user, endpoint_password) # TODO: then run d2s metadata to get HCLS metadata and upload it in the dataset metadata graph # And compare new version metadata to the current version in production # generate_hcls_from_sparql(sparql_endpoint, rdf_distribution_uri, metadata_type, graph) g_metadata = generate_hcls_from_sparql(update_endpoint, dataset_graph, 'hcls', dataset_graph) g_metadata.serialize(destination=output_metadata_file, format='turtle', indent=4) load_rdf_to_ldp(output_metadata_file, "Accept: text/turtle", update_ldp, metadata_slug, endpoint_user, endpoint_password) # TODO: handle dataset_version print('✅ Dataset processed and loaded to ' + update_endpoint) # Clear graph SPARQL query # try: # sparql = SPARQLWrapper(update_endpoint) # sparql.setMethod(POST) # # sparql.setHTTPAuth(BASIC) or DIGEST # sparql.setCredentials(endpoint_user, endpoint_password) # query = 'CLEAR GRAPH <' + dataset_graph + '>' # print('🗑️ Clearing previous graph') # sparql.setQuery(query) # query_results = sparql.query() # print(query_results.response.read()) # except: # print('Could not delete the graph (probably it does not exist)') # try: # insert_results = insert_file_in_sparql_endpoint(file_path, sparql_endpoint, username, password, graph_uri, chunks_size) # except Exception as e: # print('Error with INSERT of file: ' + file_path) # print(e)
5,338,643
def get_voltage(directory, calculation="relax", functional=None): """ Calculate the voltage of a battery consisting of a cathode specified by the directory versus a metallic Li anode. Args: directory: calculation: functional: """ raise NotImplementedError
5,338,644
def update_fn(model, data_dict: dict, optimizers: dict, losses=None, ): """ Function which handles prediction from batch, logging, loss calculation and optimizer step Parameters ---------- model : torch.nn.Module model to forward data through data_dict : dict dictionary containing the data optimizers : dict dictionary containing all optimizers to perform parameter update losses : dict Functions or classes to calculate losses """ preds = model(data_dict["data_a"], data_dict["data_b"]) loss_gen_a = losses["adversarial"](preds["discr_fake_a"], True) loss_gen_b = losses["adversarial"](preds["discr_fake_b"], True) loss_generator = (loss_gen_a + loss_gen_b) / 2 optimizers["generator"].zero_grad() loss_generator.backward(retain_graph=True) optimizers["generator"].step() loss_discr_real_a = losses["adversarial"](preds["discr_real_a"], True) loss_discr_real_b = losses["adversarial"](preds["discr_real_b"], True) loss_discr_fake_a = losses["adversarial"](preds["discr_fake_a"], False) loss_discr_fake_b = losses["adversarial"](preds["discr_fake_b"], False) loss_discrimintaor = (loss_discr_real_a + loss_discr_real_b + loss_discr_fake_a + loss_discr_fake_b) / 4 optimizers["discriminator"].zero_grad() loss_discrimintaor.backward() optimizers["discriminator"].step() # zero gradients again just to make sure, gradients aren't carried to # next iteration (won't affect training since gradients are zeroed # before every backprop step, but would result in way higher memory # consumption) for k, v in optimizers.items(): v.zero_grad()
5,338,645
def display_time(seconds, granularity=2): """Display time as a nicely formatted string""" result = [] if seconds == 0: return "0 second" for name, count in intervals: value = seconds // count if value: seconds -= value * count if value == 1: name = name.rstrip('s') result.append("{} {}".format(value, name)) return ', '.join(result[:granularity])
5,338,646
def reciprocal(x): """ Returns the reciprocal of x. Args: x (TensorOp): A tensor. Returns: TensorOp: The reciprocal of x. """ return ReciprocalOp(x)
5,338,647
def get_protocol(remote_debugging_url: str, request_json: Callable[[str], Iterable]): """ The current devtools protocol, as JSON """ return request_json(f"{remote_debugging_url}/json/protocol")
5,338,648
def csv_saver_parser(): """ Csv saver parser. Returns tuple with args as dictionary and sufix that needs to be removed. :return: tuple """ csv_saver_parser = ArgumentParser(description='Parser for saving data into CSV files.') csv_saver_parser.add_argument('--F-csvsave', help='The field separator to be used. \'\t\' can be used as well. (default: \',\')') csv_saver_parser.add_argument('--M-csvsave', help='The string representing a missing value. (default: ?)') csv_saver_parser.add_argument('--N-csvsave', action='store_const', const="", help='Don\'t write a header row.') csv_saver_parser.add_argument('--decimal-csvsave', help='The maximum number of digits to print after the decimal place for numeric values (default: 6)') csv_saver_parser.add_argument('--i-csvsave', help='The input file') csv_saver_parser.add_argument('--o-csvsave', help='The output file') return vars((csv_saver_parser.parse_known_args())[0]), '-csvsave'
5,338,649
def get_var(expr: Expression) -> Var: """ Warning: this in only true for expressions captured by a match statement. Don't call it from anywhere else """ assert isinstance(expr, NameExpr) node = expr.node assert isinstance(node, Var) return node
5,338,650
def sight(unit_type: int): """Return the sight range of a unit, given its unit type ID :param unit_type: the unit type ID, according to :mod:`pysc2.lib.stats` :type unit_type: int :return: the unit's sight range :rtype: float """ return __data['Sight'][unit_type]
5,338,651
def decode_jwt(encoded_jwt): """ 解码jwt """ global key # 注意当载荷里面申明了 aud 受众的时候,解码时需要说明 decoded_jwt = jwt.decode(encoded_jwt, key, audience='dev', algorithms=["HS256"]) return decoded_jwt
5,338,652
async def bulkget(ip, community, scalar_oids, repeating_oids, max_list_size=1, port=161, timeout=DEFAULT_TIMEOUT): # type: (str, str, List[str], List[str], int, int, int) -> BulkResult """ Delegates to :py:func:`~puresnmp.aio.api.raw.bulkget` but returns simple Python types. See the "raw" equivalent for detailed documentation & examples. """ raw_output = await raw.bulkget(ip, community, scalar_oids, repeating_oids, max_list_size=max_list_size, port=port, timeout=timeout) pythonized_scalars = {oid: value.pythonize() for oid, value in raw_output.scalars.items()} pythonized_list = OrderedDict( [(oid, value.pythonize()) for oid, value in raw_output.listing.items()]) return BulkResult(pythonized_scalars, pythonized_list)
5,338,653
def repo_version_db_key() -> bytes: """The db formated key which version information can be accessed at Returns ------- bytes db formatted key to use to get/set the repository software version. """ db_key = c.K_VERSION.encode() return db_key
5,338,654
def generate_blob_sentiment_database(company_name, client_address): """ Calculate the textblob sentiment scores and put them into the database. :param company_name: the name of the company. Used as the entry in the database. :param client_address: the address of the database. """ client = MongoClient(client_address) db = client.sentrade_db twitter_db = client.twitter_data sentiment_db = client.sentiment_data all_date = twitter_db[company_name].distinct("date") progress_full = len(all_date) progress_count = 0 for date in all_date: news_score = 0 news_count = sys.float_info.epsilon # sum all scores for company_tweet in twitter_db[company_name].find({"date": date}): if "polarity" in company_tweet: # get rid of the neutral results # if company_tweet["polarity"] < -0.3 or company_tweet["polarity"] > 0.3: news_score += company_tweet["polarity"] news_count += 1 # check if the date is not yet in the database if (sentiment_db[company_name].count_documents({"date": date}) == 0): sentiment = {"company": company_name, "date": date, "today_sentiment_score": news_score / news_count, "today_overall_sentiment_score": news_score, "today_news_count": news_count} sentiment_db[company_name].insert_one(sentiment) else: updated_sentiment_score = {"$set": {"today_sentiment_score": news_score / news_count, "today_overall_sentiment_score": news_score, "today_news_count": news_count}} sentiment_db[company_name].update_one(sentiment_db[company_name].find_one({"date": date}), updated_sentiment_score) progress_count += 1 print("summarise", company_name, "progress:", progress_count, "/", progress_full) client.close()
5,338,655
def set_torch_rand_seed(seed: Union[int, str]): """Set rand seed for torch on both cpu, cuda and cudnn Args: seed (Union[int, str]): int seed or str, which will be hashed to get int seed """ if isinstance(seed, str): seed = hash(seed) elif not isinstance(int(seed), int): raise ValueError(f"seed={seed} should be str or int") torch.manual_seed(int(seed)) if torch.cuda.is_available(): torch.cuda.manual_seed_all(int(seed)) torch.backends.cudnn.deterministic = True
5,338,656
def _message(string): """Print an informative message to the console.""" sys.stdout.write("clant: %s\n" % string) sys.stdout.flush()
5,338,657
def retrieve_psd_cdf(path): """interact with hdf5 file format for marginal CDFs for a set of PSDs""" with h5py.File(path, 'r') as obj: group = obj['PSD_CDF'] Npsd = group.attrs['num_psds'] freqs = group['frequencies'][...] data = group['CDFs'][...] vals = data[:,0,:] cdfs = data[:,1,:] return freqs, vals, cdfs, Npsd
5,338,658
def main(): """ Main function """ dblp_path = "dblp2.xml" save_path = "dblp.json" dblp = DBLP() dblp.parse_all(dblp_path, save_path)
5,338,659
def test_broadcastables(): """ Test the "broadcastables" argument when printing symbol-like objects. """ # No restrictions on shape for s in [x, f_t]: for bc in [(), (False,), (True,), (False, False), (True, False)]: assert aesara_code_(s, broadcastables={s: bc}).broadcastable == bc # TODO - matrix broadcasting?
5,338,660
def draw_box(box, color, text=None): """ Draw a bounding box using pyplot, optionally with a text box label. Inputs: - box: Tensor or list with 4 elements: [x0, y0, x1, y1] in [0, W] x [0, H] coordinate system. - color: pyplot color to use for the box. - text: (Optional) String; if provided then draw a label for this box. """ TEXT_BOX_HEIGHT = 10 if torch.is_tensor(box) and box.dim() == 2: box = box.view(-1) assert box.size(0) == 4 x0, y0, x1, y1 = box assert y1 > y0, box assert x1 > x0, box w, h = x1 - x0, y1 - y0 rect = Rectangle((x0, y0), w, h, fc='none', lw=2, ec=color) plt.gca().add_patch(rect) if text is not None: text_rect = Rectangle((x0, y0), w, TEXT_BOX_HEIGHT, fc=color, alpha=0.5) plt.gca().add_patch(text_rect) tx = 0.5 * (x0 + x1) ty = y0 + TEXT_BOX_HEIGHT / 2.0 plt.text(tx, ty, text, va='center', ha='center')
5,338,661
def get(context: mgp.ProcCtx) -> mgp.Record(tracks=list): """Returns a list of track_ids of trendy songs. Calculates recently popular tracks by comparing the popularity of songs using the `followers`, `created_at`, and proximity to other popular songs (pagerank). Example usage: CALL trendy_tracks.get() YIELD tracks Equivalent cypher query: MATCH (track:Track)<--(playlist:Playlist) WITH track, count(playlist) AS popularity RETURN track ORDER BY popularity DESC LIMIT 10 :return: List of track ids that are currently trendy. :rtype: mgp.Record(tracks=list[dict[str][Any]]) """ return mgp.Record( tracks=list( map( lambda vertex: dict(vertex.properties), nlargest( 10, filter( lambda vertex: "Track" in vertex.labels, context.graph.vertices, ), key=lambda vertex: sum(1 for _ in vertex.in_edges), ), ) ) )
5,338,662
def ip_only(value): """ Returns only the IP address string of the value provided. The value could be either an IP address, and IP network or and IP interface as defined by the ipaddress module. Parameters ---------- value : str The value to use Returns ------- str The IP address only value, if the value provided was valid None If the value provided is not an IP thing """ for test in [lambda x: str(ipaddress.ip_address(x)), lambda x: str(ipaddress.ip_interface(x).ip), lambda x: str(ipaddress.ip_network(x).network_address)]: try: return test(value) except: pass return None
5,338,663
def GetAuth1Token(): """Returns an Auth1Token for use with server authentication.""" if AUTH1_TOKEN: return AUTH1_TOKEN if not OBJC_OK: logging.error('Objective-C bindings not available.') return None pref_value = Foundation.CFPreferencesCopyAppValue( 'AdditionalHttpHeaders', 'ManagedInstalls') if pref_value is None: logging.error('GetAuth1Token(): AdditionalHttpHeaders not present.') return None header = 'Cookie: Auth1Token=' for h in pref_value: if h.startswith(header): logging.debug('GetAuth1Token(): found %s', h) token = h[len(header):] if token.find(';') > -1: token = token[0:token.find(';')] token = str(token) return token logging.error('GetAuth1Token(): AdditionalHttpHeaders lacks a token.') return None
5,338,664
def test_post_request_wrong_token_null_token(mock_app): """test receiving a post request with Auth headers and wrong scheme""" headers = copy.deepcopy(HEADERS) headers["Authorization"] = "Bearer " # When a POST request Authorization="" is sent response = mock_app.test_client().post("/apiv1.0/query?", headers=headers) # Then it should return unauthorized error 401 assert response.status_code == 401 data = json.loads(response.data) assert data["errorMessage"] == MISSING_TOKEN["errorMessage"]
5,338,665
def TemplateInputFilename(context): """Build template file name from config.""" if args.templatedir: filename = config_lib.CONFIG.Get("PyInstaller.template_filename", context=context) return os.path.join(args.templatedir, filename) return None
5,338,666
def get_data_by_isin(isin: str, dates: Tuple[datetime.date], is_etf: bool) -> Tuple[Optional[np.ndarray], str]: """Retrieves stock/ETF prices in EUR by ISIN for the given dates. Cached to make sure this is only queried once for a given currency & date-range.""" from_date = dates[0].strftime("%d/%m/%Y") to_date = (dates[-1] + datetime.timedelta(days=7)).strftime("%d/%m/%Y") # Retrieves stock/etf information based on the ISIN try: if is_etf: data = investpy.search_etfs(by="isin", value=isin) else: data = investpy.search_stocks(by="isin", value=isin) except RuntimeError: print(f"[DGPC] Warning, could not retrieve {'ETF' if is_etf else 'stock'} data for ISIN {isin}.") return None, "" # When a stock/ETF is listed in multiple countries, take one of the preferred countries if found for country in PREFERRED_COUNTRIES: local_data = data[data["country"] == country] if local_data.shape[0] > 0: break else: # Taking the first country from the results if none of the preferred countries is found country = data["country"][0] local_data = data # Retrieves the actual historical prices for the stock/etf currency = list(local_data["currency"])[0] symbol = list(local_data["symbol"])[0] if is_etf: name = list(local_data["name"])[0] history = investpy.get_etf_historical_data(name, country=country, from_date=from_date, to_date=to_date) else: history = investpy.get_stock_historical_data(symbol, country=country, from_date=from_date, to_date=to_date) history = history.reset_index() values = densify_history(history, dates) # Convert the results to euro if currency != "EUR": currency_modifier = to_euro_modifier(currency, tuple(dates)) values *= currency_modifier return values, symbol
5,338,667
def get_result_qiskit() -> Dict[str, Dict[str, Any]]: """Fixture for returning sample experiment result Returns ------- Dict[str, Dict[str, Any]] A dictionary of results for physics simulation and perfect gates A result dictionary which looks something like:: { "name": name of this experiment (obtained from qobj.experiment header) "seed": random seed used for simulation "shots": number of shots used in the simulation "data": { "counts": {'0x9: 5, ...}, "memory": ['0x9', '0xF', '0x1D', ..., '0x9'] }, "status": status string for the simulation "success": boolean "time_taken": simulation time of this single experiment } """ # Result of physics based sim for applying X on qubit 0 in 6 qubits perfect_counts = {"110000": 1000} counts_dict = { "c3_qasm_perfect_simulator": perfect_counts, } return counts_dict
5,338,668
async def test_get_with_no_precision_loss_small_granularity( min_level, max_level, date_from, date_to, expected, ): """Test query with interval granularity not smaller than the mininum layer level. In this case we don't have any precision loss. """ indexer = MemoryIndexer(min_level=min_level, max_level=max_level) await indexer.load(mock_data_small_granularity) actual = indexer.get(date_from, date_to) np.testing.assert_array_equal(actual, expected)
5,338,669
def all_budgets_for_student(user_id): """Returns a queryset for all budgets that a student can view/edit i.e. is the submitter, president, or treasurer for any of the organization's budgets""" query = Q(budget__submitter=user_id) | Q(budget__president_crsid=user_id) | Q(budget__treasurer_crsid=user_id) orgs = Organization.objects.filter(query) budgets = Budget.objects.filter(organization__in=orgs) return budgets
5,338,670
def exec_process(cmdline, silent=True, catch_enoent=True, input=None, **kwargs): """Execute a subprocess and returns the returncode, stdout buffer and stderr buffer. Optionally prints stdout and stderr while running.""" try: sub = subprocess.Popen(args=cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) stdout, stderr = sub.communicate(input=input) if type(stdout) != type(""): # decode on Python 3 # do nothing on Python 2 (it just doesn't care about encoding anyway) stdout = stdout.decode(sys.getdefaultencoding(), "replace") stderr = stderr.decode(sys.getdefaultencoding(), "replace") returncode = sub.returncode if not silent: sys.stdout.write(stdout) sys.stderr.write(stderr) except OSError as e: if e.errno == errno.ENOENT and catch_enoent: raise DistutilsError('"%s" is not present on this system' % cmdline[0]) else: raise if returncode != 0: raise DistutilsError('Got return value %d while executing "%s", stderr output was:\n%s' % (returncode, " ".join(cmdline), stderr.rstrip("\n"))) return stdout
5,338,671
def uniform_weights(x, x_mask): """Return uniform weights over non-masked x (a sequence of vectors). Args: x: batch * len * hdim x_mask: batch * len (1 for padding, 0 for true) Output: x_avg: batch * hdim """ alpha = Variable(torch.ones(x.size(0), x.size(1))) if x.data.is_cuda: alpha = alpha.cuda() alpha = alpha * x_mask.eq(0).float() alpha = alpha / alpha.sum(1, keepdim=True).expand(alpha.size()) return alpha
5,338,672
def mixed_estimator_2(T1, T2, verbose=False): """ Based on the Lavancier and Rochet (2016) article. The method combines two series of estimates of the same quantity taking into account their correlations. The individual measureements are assumed independent. The current implementation works only for point estimates. The main result corresponds to Eq. (11) from the article. Its variance is the equation after Eq. (9). [equation not checked] """ B = 1000 # bootstrap repetitions # Drop nans not_nans = np.logical_or(np.isnan(T1), np.isnan(T2)) T1, T2 = T1[~not_nans], T2[~not_nans] n = len(T1) # Return nan if no samples # If one sample, return simple average with no variance if n == 0: return np.nan, np.nan, np.array([np.nan, np.nan]) elif n == 1: # print(T1) return T1[0] / 2 + T2[0] / 2, np.nan, np.array([0.5, 0.5]) # Calculate the estimators for the data set. This is the input data for the rest T1_data_median = np.median(T1) T2_data_median = np.median(T2) # Estimate the covariance sigma matrix with bootstrap (with replacement, as described in the article) sigma = np.zeros((2, 2)) for b in range(B): T1_sample = np.random.choice(T1, size=n, replace=True) T2_sample = np.random.choice(T2, size=n, replace=True) # print('T1', T1_sample) T1_sample_median = np.median(T1_sample) T2_sample_median = np.median(T2_sample) sigma += np.array([[(T1_sample_median - T1_data_median)**2, (T1_sample_median - T1_data_median) * (T2_sample_median - T2_data_median)], [(T1_sample_median - T1_data_median) * (T2_sample_median - T2_data_median), (T2_sample_median - T2_data_median)**2]]) sigma /= B # print(n, sigma) # Calculate the mixed estimator I = np.array([[1, 1]]).T T = np.array([[T1_data_median, T2_data_median]]).T weights = inv(I.T @ inv(sigma) @ I) @ I.T @ inv(sigma) mixed_estimator = (weights @ T)[0, 0] mixedV = (inv(I.T @ inv(sigma) @ I))[0, 0] if verbose: print('weights', weights) print(mixed_estimator, '+-', np.sqrt(mixedV)) return mixed_estimator, mixedV, np.squeeze(weights)
5,338,673
def test_coincident_indices_0(): """Simple test with one coincident time.""" list0, list1 = setup_0() solution = {5: 5} assert coincident_indices(list0, list1, delta) == solution
5,338,674
def main(api_key, token, board_id): """List out the board lists for our client""" trello_client = TrelloClient( api_key=api_key, token=token, ) board = Board(client=trello_client, board_id=board_id) print('Lists') print('-----') print('Name: Id') for card_list in board.all_lists(): print('{card_list.name}: {card_list.id}'.format(card_list=card_list))
5,338,675
def e_add_const(pub, a, n): """Add constant n to an encrypted integer""" return a * modpow(pub.g, n, pub.n_sq) % pub.n_sq
5,338,676
def area_triangle(base, height): """ """ return (base * height) / 2.0
5,338,677
def _getAtomInvariantsWithRadius(mol, radius): """ Helper function to calculate the atom invariants for each atom with a given radius Arguments: - mol: the molecule of interest - radius: the radius for the Morgan fingerprint Return: list of atom invariants """ inv = [] for i in range(mol.GetNumAtoms()): info = {} fp = rdMolDescriptors.GetMorganFingerprint(mol, radius, fromAtoms=[i], bitInfo=info) for k in info.keys(): if info[k][0][1] == radius: inv.append(k) return inv
5,338,678
def build_argparser(): """Construct an argument parser for the ``translate_header.py`` script. Returns ------- argparser : `argparse.ArgumentParser` The argument parser that defines the ``translate_header.py`` command-line interface. """ parser = argparse.ArgumentParser(description="Summarize headers from astronomical data files") parser.add_argument("files", metavar="file", type=str, nargs="+", help="File(s) from which headers will be parsed." " If a directory is given it will be scanned for files matching the regular" " expression defined in --regex.") parser.add_argument("-q", "--quiet", action="store_true", help="Do not report the translation content from each header. This forces " "output mode 'none'.") parser.add_argument("-d", "--dumphdr", action="store_true", help="Dump the header in YAML format to standard output rather than translating it." " This is the same as using mode=yaml") parser.add_argument("--traceback", action="store_true", help="Give detailed trace back when any errors encountered") parser.add_argument("-n", "--hdrnum", default=1, help="HDU number to read. If the HDU can not be found, a warning is issued but " "translation is attempted using the primary header. " "The primary header is always read and merged with this header.") parser.add_argument("-m", "--mode", default="auto", choices=OUTPUT_MODES, help="Display mode for translated parameters. 'verbose' displays all the information" " available. 'table' displays important information in tabular form." " 'yaml' dumps the header in YAML format (this is equivalent to -d option)." " 'fixed' dumps the header in YAML after it has had corrections applied." " Add 'native' suffix to dump YAML in PropertyList or Astropy native form." " 'none' displays no translated header information and is an alias for the " " '--quiet' option." " 'auto' mode is 'verbose' for a single file and 'table' for multiple files.") parser.add_argument("-l", "--log", default="warn", help="Python logging level to use.") re_default = r"\.fit[s]?\b" parser.add_argument("-r", "--regex", default=re_default, help="When looking in a directory, regular expression to use to determine whether" f" a file should be examined. Default: '{re_default}'") parser.add_argument("-p", "--packages", action="append", type=str, help="Python packages to import to register additional translators") return parser
5,338,679
def radcool(temp, zmetal): """ Cooling Function This version redefines Lambda_sd (rho/m_p)^2 Lambda(T,z) is the cooling in erg/cm^3 s Args: temp : temperature in the unit of K zmetal: metallicity in the unit of solar metallicity Return: in the unit of erg*s*cm^3 """ tshape = temp.shape tempflt = temp.flatten() qlog0 = np.zeros_like(tempflt) qlog1 = np.zeros_like(tempflt) for i, t in enumerate(tempflt): tlog = np.log10(t) # zero metal cooling coefficient Lambda_([Fe/H]=0 if tlog>=6.1: qlog0[i] = -26.39 + 0.471*(np.log10(t + 3.1623e6)) elif tlog>=4.9: arg = 10.**(-(tlog-4.9)/.5) + 0.077302 qlog0[i] = -22.16 + np.log10(arg) elif tlog>=4.25: bump1rhs = -21.98 - ((tlog-4.25)/0.55) bump2lhs = -22.16 - ((tlog-4.9)/0.284)**2 qlog0[i] = max(bump1rhs,bump2lhs) else: qlog0[i] = -21.98 - ((tlog-4.25)/0.2)**2 if qlog0[i]==np.nan: mylog.warning('There is NaN.') # emission from metals alone at solar abundance if tlog>=5.65: tlogc = 5.65 qlogc = -21.566 qloginfty = -23.1 p = 0.8 qlog1[i] = qlogc -p*(tlog - tlogc) qlog1[i] = max(qlog1[i],qloginfty) else: tlogm = 5.1 qlogm = -20.85 sig = 0.65 qlog1[i] = qlogm - ((tlog - tlogm)/sig)**2 qlambda0 = 10.**qlog0 qlambda1 = 10.**qlog1 # final cooling coefficient Lambda_sd: radcoolsd = qlambda0 + zmetal.flatten()*qlambda1 radcoolsd = radcoolsd.reshape(tshape) return radcoolsd
5,338,680
def L_model_forward(X, parameters): """ Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation Arguments: X -- data, numpy array of shape (input size, number of examples) parameters -- output of initialize_parameters_deep() Returns: AL -- last post-activation value caches -- list of caches containing: every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1) """ caches = [] A = X L = len(parameters) // 2 # number of layers in the neural network # Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list. for l in range(1, L): A_prev = A A, cache = linear_activation_forward(A_prev, parameters["W" + str(l)], parameters["b" + str(l)], activation="relu") caches.append(cache) # Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list. AL, cache = linear_activation_forward(A, parameters["W" + str(L)], parameters["b" + str(L)], activation="sigmoid") caches.append(cache) assert(AL.shape == (1,X.shape[1])) return AL, caches
5,338,681
def linalg_multiply(a): """ Multiple all elements in vector or matrix Parameters: * a (array or matrix): The input to multiply Return (number): The product of all elements """ return np.prod(a)
5,338,682
def async_parser(word, rules, skip=False, **kwargs): """ Asynchronously parses the pipe content Args: word (str): The string to transform rules (List[obj]): the parsed rules (Objectify instances). skip (bool): Don't parse the content kwargs (dict): Keyword arguments Kwargs: assign (str): Attribute to assign parsed content (default: strtransform) stream (dict): The original item Returns: Deferred: twisted.internet.defer.Deferred item Examples: >>> from riko.bado import react >>> from riko.bado.mock import FakeReactor >>> from meza.fntools import Objectify >>> >>> def run(reactor): ... item = {'content': 'hello world'} ... conf = {'rule': {'transform': 'title'}} ... rule = Objectify(conf['rule']) ... kwargs = {'stream': item, 'conf': conf} ... d = async_parser(item['content'], [rule], **kwargs) ... return d.addCallbacks(print, logger.error) >>> >>> try: ... react(run, _reactor=FakeReactor()) ... except SystemExit: ... pass ... Hello World """ if skip: value = kwargs['stream'] else: value = yield ait.coop_reduce(reducer, rules, word) return_value(value)
5,338,683
def lambda_handler(event, context): """ This method selects 10% of the input manifest as validation and creates an s3 file containing the validation objects. """ label_attribute_name = event['LabelAttributeName'] meta_data = event['meta_data'] s3_input_uri = meta_data['IntermediateManifestS3Uri'] input_total = int(meta_data['counts']['input_total']) # 10% of the total input should be used for validation. validation_set_size = input_total // 10 source = S3Ref.from_uri(s3_input_uri) validation_labeled_query = """select * from s3object[*] s where s."{}-metadata"."human-annotated" IN ('yes') LIMIT {}""".format( label_attribute_name, validation_set_size) dest = create_ref_at_parent_key(source, "validation_input.manifest") copy_with_query(source, dest, validation_labeled_query) logger.info("Uploaded validation set of size {} to {}.".format( validation_set_size, dest.get_uri())) meta_data['counts']['validation'] = validation_set_size meta_data['ValidationS3Uri'] = dest.get_uri() return meta_data
5,338,684
def run_remove_entry_via_id( id_to_remove ): """ Removes id from solr. Triggered by utils.reindex_all_support.run_enqueue_all_index_updates(). """ indexer = Indexer() indexer.remove_index_entry( inscription_id=id_to_remove ) return
5,338,685
def get_table_6(): """表 6 蓄熱の採用の可否 Args: Returns: list: 表 6 蓄熱の採用の可否 """ table_6 = [ ('不可', '不可', '可', '可', '可'), ('不可', '不可', '可', '可', '可'), ('不可', '不可', '可', '可', '可'), ('不可', '不可', '可', '可', '可'), ('不可', '不可', '可', '可', '可'), ('不可', '不可', '不可', '可', '可'), ('不可', '不可', '不可', '可', '可') ] return table_6
5,338,686
def test_get_filename_not_found(fs: Mock, cwd: Mock) -> None: """It raises `SystemExit` when file is not found.""" with pytest.raises(SystemExit): assert file_handler.get_filenames(".", "*.pdf")
5,338,687
def is_prime(number, num_trials=200): """Determines whether a number is prime. Runs the Miller-Rabin probabilistic primality test many times on the given number. Args: number (int): Number to perform primality test on. num_trials (int): Number of times to perform the Miller-Rabin test. Returns: True if number is prime, False otherwise. """ if number < 2: return False if number != 2 and number % 2 == 0: return False # Find largest odd factor of n-1. exp = number - 1 while exp % 2 == 0: exp //= 2 for _ in range(num_trials): rand_val = int(random.SystemRandom().randrange(1, number)) new_exp = exp power = pow(rand_val, new_exp, number) while new_exp != number - 1 and power != 1 and power != number - 1: power = (power * power) % number new_exp *= 2 if power != number - 1 and new_exp % 2 == 0: return False return True
5,338,688
def extract_labels(filenames): """ Extract class labels of the images from image path list. # Arguments filenames: List of paths to image file. # Returns List of image labels. """ return LabelEncoder().fit_transform([extract_label(filename) for filename in filenames])
5,338,689
def reverseList(head): """ :type head: ListNode :rtype: ListNode """ current = head; # temp is first counter = 0; flag = 'Y'; while current is not None and flag != 'N': # store the current element # print(f"Current: {current.val}"); first = current; if counter == 0: try: # find the next element second = current.next; current.next = None; current = second.next; if current == None: # this means we are in an edge case so we want to exit head = second; second.next = first; return head; second.next = first; next_element = second; except: # this means we are in an edge case so we want to exit flag='N'; else: try: next_iter = current.next; current.next = next_element; current = next_iter; next_element = first; head = first; except: # this means we are in an edge case so we want to exit flag = 'N'; counter+=1; return head;
5,338,690
def test_solver(m=20, n=10, k=5): """Test lstsq._tikhonov.solve().""" A = np.random.random((m, n)) B = np.random.random((m, k)) regularizers = 5 + np.random.random(k) Ps = [5 + np.random.random((n, n)) for _ in range(k)] Ps_diag = [5 + np.random.random(n) for _ in range(k)] # Bad number of regularization parameters. with pytest.raises(ValueError) as ex: opinf.lstsq.solver(A, B, regularizers[:k-2]) assert ex.value.args[0] == "invalid or misaligned input P" # Bad number of regularization matrices. with pytest.raises(ValueError) as ex: opinf.lstsq.solver(A, B, Ps[:k-2]) assert ex.value.args[0] == "invalid or misaligned input P" # Try to solve 1D problem with multiple regularizations. with pytest.raises(ValueError) as ex: opinf.lstsq.solver(A, B[:, 0], Ps) assert ex.value.args[0] == "invalid or misaligned input P" # Bad type for regularization. with pytest.raises(ValueError) as ex: opinf.lstsq.solver(A, B, {}) assert ex.value.args[0] == "invalid or misaligned input P" # Correct usage. solver = opinf.lstsq.solver(A, B, 0) assert isinstance(solver, opinf.lstsq.SolverL2) solver = opinf.lstsq.solver(A, B, regularizers[0]) assert isinstance(solver, opinf.lstsq.SolverL2) solver = opinf.lstsq.solver(A, B, regularizers) assert isinstance(solver, opinf.lstsq.SolverL2Decoupled) solver = opinf.lstsq.solver(A, B, Ps[0]) assert isinstance(solver, opinf.lstsq.SolverTikhonov) solver = opinf.lstsq.solver(A, B, Ps) assert isinstance(solver, opinf.lstsq.SolverTikhonovDecoupled) solver = opinf.lstsq.solver(A, B, Ps_diag) assert isinstance(solver, opinf.lstsq.SolverTikhonovDecoupled) opinf.lstsq.solve(A, B, 0)
5,338,691
def test_verify_password_reset_token_invalid(): """It should return None if the token is invalid""" token = "wrong" result = verify_password_reset_token(token) assert result is None
5,338,692
def validate(data: BuildParams): """ Makes sure a valid combination of params have been provided. """ git_repo = bool(data.source.git_repo) dockerfile = bool(data.source.dockerfile) build_context = bool(data.source.build_context) git_valid = git_repo and not dockerfile and not build_context dockerfile_valid = dockerfile and not git_repo if not (git_valid or dockerfile_valid): return False, "Only one of build sources (git_repo, dockerfile) can be used.\n" \ "git_context can only be used in combination with dockerfile" return True, ""
5,338,693
def load_model(filepath=FILEPATH) -> TrainingParams: """ Load :param filepath: :return: """ with open(filepath, "rb") as handler: model = pickle.load(handler) return model
5,338,694
def test_add_vertex_data_prop_columns(df_type): """ add_vertex_data() on "merchants" table, subset of properties. """ from cugraph.experimental import PropertyGraph merchants = dataset1["merchants"] merchants_df = df_type(columns=merchants[0], data=merchants[1]) expected_props = ["merchant_name", "merchant_sales", "merchant_location"] pG = PropertyGraph() pG.add_vertex_data(merchants_df, type_name="merchants", vertex_id_column="merchant_id", property_columns=expected_props) assert pG.num_vertices == 5 assert pG.num_edges == 0 assert sorted(pG.vertex_property_names) == sorted(expected_props)
5,338,695
def entry_point(): """ Get station information and departures from public transport operators, and update a departure board with departure information for a station. """
5,338,696
def is_symmetric(m): """Check if a sparse matrix is symmetric https://mail.python.org/pipermail/scipy-dev/2014-October/020117.html Parameters ---------- m : sparse matrix Returns ------- check : bool """ if m.shape[0] != m.shape[1]: raise ValueError('m must be a square matrix') if not isinstance(m, sparse.coo_matrix): m = sparse.coo_matrix(m) r, c, v = m.row, m.col, m.data tril_no_diag = r > c triu_no_diag = c > r if triu_no_diag.sum() != tril_no_diag.sum(): return False, "no_diag_sum", triu_no_diag.sum() - tril_no_diag.sum() rl = r[tril_no_diag] cl = c[tril_no_diag] vl = v[tril_no_diag] ru = r[triu_no_diag] cu = c[triu_no_diag] vu = v[triu_no_diag] sortl = np.lexsort((cl, rl)) sortu = np.lexsort((ru, cu)) vl = vl[sortl] vu = vu[sortu] check = np.allclose(vl, vu) return check
5,338,697
def get_proj_libdirs(proj_dir: Path) -> List[str]: """ This function finds the library directories """ proj_libdir = os.environ.get("PROJ_LIBDIR") libdirs = [] if proj_libdir is None: libdir_search_paths = (proj_dir / "lib", proj_dir / "lib64") for libdir_search_path in libdir_search_paths: if libdir_search_path.exists(): libdirs.append(str(libdir_search_path)) if not libdirs: raise SystemExit( "ERROR: PROJ_LIBDIR dir not found. Please set PROJ_LIBDIR." ) else: libdirs.append(proj_libdir) return libdirs
5,338,698
def render_raster_map(bounds, scale, basemap_image, aoi_image, id, path, colors): """Render raster dataset map based on bounds. Merge this over basemap image and under aoi_image. Parameters ---------- bounds : list-like of [xmin, ymin, xmax, ymax] bounds of map scale : dict map scale info basemap_image : Image object aoi_image : Image object id : str map ID path : str path to raster dataset colors : list-like of colors colors to render map image based on values in raster Returns ------- id, Image object Image object is None if it could not be rendered or does not overlap bounds """ raster_img = render_raster(path, bounds, scale, WIDTH, HEIGHT, colors) map_image = merge_maps([basemap_image, raster_img, aoi_image]) map_image = to_base64(map_image) return id, map_image
5,338,699