content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import struct def collect_js( deps, closure_library_base = None, has_direct_srcs = False, no_closure_library = False, css = None): """Aggregates transitive JavaScript source files from unfurled deps.""" srcs = [] direct_srcs = [] ijs_files = [] infos = [] modules = [] descriptors = [] stylesheets = [] js_module_roots = [] has_closure_library = False for dep in deps: srcs += [getattr(dep.closure_js_library, "srcs", depset())] ijs_files += [getattr(dep.closure_js_library, "ijs_files", depset())] infos += [getattr(dep.closure_js_library, "infos", depset())] modules += [getattr(dep.closure_js_library, "modules", depset())] descriptors += [getattr(dep.closure_js_library, "descriptors", depset())] stylesheets += [getattr(dep.closure_js_library, "stylesheets", depset())] js_module_roots += [getattr(dep.closure_js_library, "js_module_roots", depset())] has_closure_library = ( has_closure_library or getattr(dep.closure_js_library, "has_closure_library", False) ) if no_closure_library: if has_closure_library: fail("no_closure_library can't be used when Closure Library is " + "already part of the transitive closure") elif has_direct_srcs and not has_closure_library: direct_srcs += closure_library_base has_closure_library = True if css: direct_srcs += closure_library_base + [css.closure_css_binary.renaming_map] return struct( srcs = depset(direct_srcs, transitive = srcs), js_module_roots = depset(transitive = js_module_roots), ijs_files = depset(transitive = ijs_files), infos = depset(transitive = infos), modules = depset(transitive = modules), descriptors = depset(transitive = descriptors), stylesheets = depset(transitive = stylesheets), has_closure_library = has_closure_library, )
7a243401280646103522ed339ff20c35f05e031d
3,646,400
import termios import struct import fcntl def send_control(uuid, type, data): """ Sends control data to the terminal, as for example resize events """ sp = sessions[uuid] if type == 'resize': winsize = struct.pack("HHHH", data['rows'], data['cols'], 0, 0) fcntl.ioctl(sp['ptymaster'].fileno(), termios.TIOCSWINSZ, winsize) return True else: serverboards.warning("Unknown control type: %s" % (type)) return False
262ef0ccffac80c0293d1446eb0e38e50b2ce687
3,646,401
import os def get_absolute_path(path): """ Returns absolute path. """ if path.startswith("/"): return path else: return os.path.join(HOME_DIR, path)
6754a947cd1081a760cc529fbb94270a7f867e68
3,646,402
import os def get_prefix_by_xml_filename(xml_filename): """ Obtém o prefixo associado a um arquivo xml Parameters ---------- xml_filename : str Nome de arquivo xml Returns ------- str Prefixo associado ao arquivo xml """ file, ext = os.path.splitext(xml_filename) return file
169b5571ae0bfca2a923c4030325002503790f6e
3,646,403
def dgausscdf(x): """ Derivative of the cumulative distribution function for the normal distribution. """ return gausspdf(x)
e968f20ca28555eb50d5766440c5f3f47522c1ff
3,646,404
import os import sys def LStatFile(path): """ LStat the file. Do not follow the symlink. """ d = None error = None try: d=os.lstat(path) except OSError as error: print("Exception lstating file " + path + " Error Code: " + str(error.errno) + " Error: " +error.strerror, file=sys.stderr) LG().Log('ERROR', "Exception lstating file " + path + " Error Code: " + str(error.errno) + " Error: " + error.strerror) except IOError as error: print("Exception lstating file " + path + " Error Code: " + str(error.errno) + " Error: " +error.strerror, file=sys.stderr) LG().Log('ERROR', "Exception lstating file " + path + " Error Code: " + str(error.errno) + " Error: " + error.strerror) return d
54ea199de8eef527743a882940140aad7e44add4
3,646,405
import tqdm def model_datasets_to_rch(gwf, model_ds, print_input=False): """convert the recharge data in the model dataset to a recharge package with time series. Parameters ---------- gwf : flopy.mf6.modflow.mfgwf.ModflowGwf groundwater flow model. model_ds : xr.DataSet dataset containing relevant model grid information print_input : bool, optional value is passed to flopy.mf6.ModflowGwfrch() to determine if input should be printed to the lst file. Default is False Returns ------- rch : flopy.mf6.modflow.mfgwfrch.ModflowGwfrch recharge package """ # check for nan values if model_ds['recharge'].isnull().any(): raise ValueError('please remove nan values in recharge data array') # get stress period data if model_ds.steady_state: mask = model_ds['recharge'] != 0 if model_ds.gridtype == 'structured': rch_spd_data = mdims.data_array_2d_to_rec_list( model_ds, mask, col1='recharge', first_active_layer=True, only_active_cells=False) elif model_ds.gridtype == 'vertex': rch_spd_data = mdims.data_array_1d_vertex_to_rec_list( model_ds, mask, col1='recharge', first_active_layer=True, only_active_cells=False) # create rch package rch = flopy.mf6.ModflowGwfrch(gwf, filename=f'{gwf.name}.rch', pname=f'{gwf.name}', fixed_cell=False, maxbound=len(rch_spd_data), print_input=True, stress_period_data={0: rch_spd_data}) return rch # transient recharge if model_ds.gridtype == 'structured': empty_str_array = np.zeros_like(model_ds['idomain'][0], dtype="S13") model_ds['rch_name'] = xr.DataArray(empty_str_array, dims=('y', 'x'), coords={'y': model_ds.y, 'x': model_ds.x}) model_ds['rch_name'] = model_ds['rch_name'].astype(str) # dimension check if model_ds['recharge'].dims == ('time', 'y', 'x'): axis = 0 rch_2d_arr = model_ds['recharge'].data.reshape( (model_ds.dims['time'], model_ds.dims['x'] * model_ds.dims['y'])).T # check if reshaping is correct if not (model_ds['recharge'].values[:, 0, 0] == rch_2d_arr[0]).all(): raise ValueError( 'reshaping recharge to calculate unique time series did not work out as expected') elif model_ds['recharge'].dims == ('y', 'x', 'time'): axis = 2 rch_2d_arr = model_ds['recharge'].data.reshape( (model_ds.dims['x'] * model_ds.dims['y'], model_ds.dims['time'])) # check if reshaping is correct if not (model_ds['recharge'].values[0, 0, :] == rch_2d_arr[0]).all(): raise ValueError( 'reshaping recharge to calculate unique time series did not work out as expected') else: raise ValueError('expected dataarray with 3 dimensions' f'(time, y and x) or (y, x and time), not {model_ds["recharge"].dims}') rch_unique_arr = np.unique(rch_2d_arr, axis=0) rch_unique_dic = {} for i, unique_rch in enumerate(rch_unique_arr): model_ds['rch_name'].data[np.isin( model_ds['recharge'].values, unique_rch).all(axis=axis)] = f'rch_{i}' rch_unique_dic[f'rch_{i}'] = unique_rch mask = model_ds['rch_name'] != '' rch_spd_data = mdims.data_array_2d_to_rec_list(model_ds, mask, col1='rch_name', first_active_layer=True, only_active_cells=False) elif model_ds.gridtype == 'vertex': empty_str_array = np.zeros_like(model_ds['idomain'][0], dtype="S13") model_ds['rch_name'] = xr.DataArray(empty_str_array, dims=('cid'), coords={'cid': model_ds.cid}) model_ds['rch_name'] = model_ds['rch_name'].astype(str) # dimension check if model_ds['recharge'].dims == ('cid', 'time'): rch_2d_arr = model_ds['recharge'].values elif model_ds['recharge'].dims == ('time', 'cid'): rch_2d_arr = model_ds['recharge'].values.T else: raise ValueError('expected dataarray with 2 dimensions' f'(time, cid) or (cid, time), not {model_ds["recharge"].dims}') rch_unique_arr = np.unique(rch_2d_arr, axis=0) rch_unique_dic = {} for i, unique_rch in enumerate(rch_unique_arr): model_ds['rch_name'][(rch_2d_arr == unique_rch).all( axis=1)] = f'rch_{i}' rch_unique_dic[f'rch_{i}'] = unique_rch mask = model_ds['rch_name'] != '' rch_spd_data = mdims.data_array_1d_vertex_to_rec_list(model_ds, mask, col1='rch_name', first_active_layer=True, only_active_cells=False) # create rch package rch = flopy.mf6.ModflowGwfrch(gwf, filename=f'{gwf.name}.rch', pname='rch', fixed_cell=False, maxbound=len(rch_spd_data), print_input=print_input, stress_period_data={0: rch_spd_data}) # get timesteps tdis_perioddata = mfpackages.get_tdis_perioddata(model_ds) perlen_arr = [t[0] for t in tdis_perioddata] time_steps_rch = [0.0] + np.array(perlen_arr).cumsum().tolist() # create timeseries packages for i, key in tqdm(enumerate(rch_unique_dic.keys()), total=len(rch_unique_dic.keys()), desc="Building ts packages rch"): # add extra time step to the time series object (otherwise flopy fails) recharge_val = list(rch_unique_dic[key]) + [0.0] recharge = list(zip(time_steps_rch, recharge_val)) if i == 0: rch.ts.initialize(filename=f'{key}.ts', timeseries=recharge, time_series_namerecord=key, interpolation_methodrecord='stepwise') else: rch.ts.append_package(filename=f'{key}.ts', timeseries=recharge, time_series_namerecord=key, interpolation_methodrecord='stepwise') return rch
b32442c508e17205737ddb8168fe323b57cfbb2f
3,646,406
from typing import List from datetime import datetime def create_events_to_group( search_query: str, valid_events: bool, group: Group, amount: int = 1, venue: bool = False, ) -> List[Event]: """ Create random test events and save them to a group Arguments: search_query {str} -- use query param for the search request valid_events {bool} -- should the groups searchable by the the query term group {Group} -- group to at the events Keyword Arguments: amount {int} -- how many events should be created (default: {1}) venue {bool} -- if venue should be added to eventa (default: {False}) Returns: List[Event] -- created & saved events """ created_events: List[Event] = [] for i in range(0, amount): event_name: str = random_string(search_query=search_query, valid=valid_events) event: Event = Event( meetup_id=event_name, time=datetime.now(), name=event_name, link="http://none", date_in_series_pattern=False, ) if venue: event.venue_name = event_name event.venue_location = {"lat": i + 1, "lon": i + 1} created_events.append(event) group.add_events(events=created_events) group.save() sleep(1) return created_events
31045c8f9311d677d766d87ed9fc1d6848cc210d
3,646,407
def alt_stubbed_receiver() -> PublicKey: """Arbitrary known public key to be used as reciever.""" return PublicKey("J3dxNj7nDRRqRRXuEMynDG57DkZK4jYRuv3Garmb1i98")
c07461fc060f9dc637e93cadd32604aae892f924
3,646,408
import base64 def create_api_headers(token): """ Create the API header. This is going to be sent along with the request for verification. """ auth_type = 'Basic ' + base64.b64encode(bytes(token + ":")).decode('ascii') return { 'Authorization': auth_type, 'Accept': 'application/json', 'Content-Type': 'application/json' }
41ba1e22898dab2d42dde52e4458abc40640e957
3,646,409
def _combine(bundle, transaction_managed=False, rollback=False, use_reversion=True): """ Returns one sreg and DHCP output for that SREG. If rollback is True the sreg will be created and then rolleback, but before the rollback all its HWAdapters will be polled for their DHCP output. """ bundle['errors'] = None bundle['old-dhcp-output'] = get_all_dhcp_for_system(bundle['system']) sreg = StaticReg( label=bundle['a'].label, domain=bundle['a'].domain, ip_str=bundle['ip'], system=bundle['system'], description='Migrated SREG', ip_type=bundle['a'].ip_type ) try: bundle['new-dhcp-output'] = ( "<span class='no-dhcp-output'>No new DHCP output</span>" ) view_names = [v.name for v in bundle['a'].views.all()] try: bundle['a'].delete(check_cname=False, call_prune_tree=False) except ValidationError, e: rollback = True bundle['errors'] = 'Error while deleting the A record.' + str(e) return try: bundle['ptr'].delete() except ValidationError, e: rollback = True bundle['errors'] = 'Error while deleting the PTR record.' + str(e) return try: sreg.save() for name in view_names: sreg.views.add(View.objects.get(name=name)) if use_reversion: reversion.set_comment('Migrated via combine()') except ValidationError, e: rollback = True bundle['errors'] = 'Error while creating the SREG record.' + str(e) return for nic in bundle['hwadapters']: hw_info, kvs = nic.emit_hwadapter() if not hw_info['mac']: rollback = True return try: hw, _ = HWAdapter.objects.get_or_create( sreg=sreg, mac=hw_info['mac'] ) # HWAdapter class does this for us. #hw.name = hw_info['name'].replace hw.save() except ValidationError, e: rollback = True bundle['errors'] = 'Error while creating HW Adapter' return try: for kv in kvs: if kv['key'] in ('hostname', 'option_hostname'): # If the option host-name value matches the SREG fqdn # we don't need to add the option, it will be added by # default. all other cases it will be overriden. if kv['value'] == sreg.fqdn: continue else: key = 'host_name' else: key = kv['key'] if HWAdapterKeyValue.objects.filter(key=key, obj=hw).exists(): pass else: kv_ = HWAdapterKeyValue( key=key, value=kv['value'], obj=hw ) kv_.clean() kv_.save() for kv in nic._nic: SystemKeyValue.objects.filter(pk=kv.pk).delete() except ValidationError, e: transaction.rollback() bundle['errors'] = ( 'Error while creating HW Adapter KeyValue. ' + str(e) ) return bundle['new-dhcp-output'] = get_all_dhcp_for_system(bundle['system']) return sreg finally: if not transaction_managed: if rollback: transaction.rollback() else: transaction.commit()
0171e804e4f10167d85e92608a09bca55308edfa
3,646,410
def get_node_session(*args, **kwargs): """Creates a NodeSession instance using the provided connection data. Args: *args: Variable length argument list with the connection data used to connect to the database. It can be a dictionary or a connection string. **kwargs: Arbitrary keyword arguments with connection data used to connect to the database. Returns: mysqlx.XSession: XSession object. """ settings = _get_connection_settings(*args, **kwargs) if "routers" in settings: raise InterfaceError("NodeSession expects only one pair of host and port") return NodeSession(settings)
bb992b7e49a698dfb7b54b1492616913a6b5df27
3,646,411
import os def load_aaz_command_table(loader, aaz_pkg_name, args): """ This function is used in AzCommandsLoader.load_command_table. It will load commands in module's aaz package. """ profile_pkg = _get_profile_pkg(aaz_pkg_name, loader.cli_ctx.cloud) command_table = {} command_group_table = {} arg_str = ' '.join(args) fully_load = os.environ.get(AAZ_PACKAGE_FULL_LOAD_ENV_NAME, 'False').lower() == 'true' # used to disable cut logic if profile_pkg is not None: _load_aaz_pkg(loader, profile_pkg, command_table, command_group_table, arg_str, fully_load) for group_name, command_group in command_group_table.items(): loader.command_group_table[group_name] = command_group for command_name, command in command_table.items(): loader.command_table[command_name] = command return command_table, command_group_table
6c75fd9f3e13e8397cf7064fbcf22385c63f2a29
3,646,412
def edit_role(payload, search_term): """Find and edit the role.""" role = Role.query.get(search_term) # if edit request == stored value if not role: return response_builder(dict(status="fail", message="Role does not exist."), 404) try: if payload["name"] == role.name: return response_builder(dict( data=dict(path=role.serialize()), message="No change specified." ), 200) else: old_role_name = role.name role.name = payload["name"] role.save() return response_builder(dict( data=dict(path=role.serialize()), message="Role {} has been changed" " to {}.".format(old_role_name, role.name) ), 200) except KeyError: return response_builder( dict(status="fail", message="Name to edit to must be provided."), 400)
8690c8fc1c1aea5245d9cef540c355a2903a8484
3,646,413
def use_redis_cache(key, ttl_sec, work_func): """Attemps to return value by key, otherwise caches and returns `work_func`""" redis = redis_connection.get_redis() cached_value = get_pickled_key(redis, key) if cached_value: return cached_value to_cache = work_func() pickle_and_set(redis, key, to_cache, ttl_sec) return to_cache
a2c631466aef18c7bb640b17e57421e257ad7314
3,646,414
def counting_sort(array, low, high): """Razeni pocitanim (CountingSort). Seradte zadane pole 'array' pricemz o poli vite, ze se v nem nachazeji pouze hodnoty v intervalu od 'low' po 'high' (vcetne okraju intervalu). Vratte serazene pole. """ counts = [0 for i in range(high - low + 1)] for elem in array: counts[elem - low] += 1 current = 0 for i in range(high - low + 1): for j in range(current, current + counts[i]): array[j] = i + low current += counts[i] return array
bd4ccccdb24786ec3f3d867afe1adf340c9e53b5
3,646,415
import re def normalize_archives_url(url): """ Normalize url. will try to infer, find or guess the most useful archives URL, given a URL. Return normalized URL, or the original URL if no improvement is found. """ # change new IETF mailarchive URLs to older, still available text .mail archives new_ietf_exp = re.compile( "https://mailarchive\\.ietf\\.org/arch/search/" "\\?email_list=(?P<list_name>[\\w-]+)" ) ietf_text_archives = ( r"https://www.ietf.org/mail-archive/text/\g<list_name>/" ) new_ietf_browse_exp = re.compile( r"https://mailarchive.ietf.org/arch/browse/(?P<list_name>[\w-]+)/?" ) match = new_ietf_exp.match(url) if match: return re.sub(new_ietf_exp, ietf_text_archives, url) match = new_ietf_browse_exp.match(url) if match: return re.sub(new_ietf_browse_exp, ietf_text_archives, url) return url
e8a5351af28338c77c3e94fdf2b81e22c7a6edfd
3,646,416
import os def logs(): """ :return: The absolute path to the directory that contains Benchmark's log file. """ return os.path.join(benchmark_confdir(), "logs")
a94f2435b705a23b19c5c9da57e0bff0448a9f4b
3,646,417
def getIsolatesFromIndices(indices): """ Extracts the isolates from the indices of a df_X. :param pandas.index indices: cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP :return dict: keyed by cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP values correspond to rows element in the index """ keys = [n for n in indices.names] result = {} for idx, key in enumerate(keys): result[key] = [v[idx] for v in indices.values] return result
4e9200c722ce0c478d13eddcc799f4a8f7cab6db
3,646,418
def save_group_geo_org(user_id, group_id, area_id, org_unit_id): """Method for attaching org units and sub-counties.""" try: if org_unit_id: geo_org_perm, ctd = CPOVCUserRoleGeoOrg.objects.update_or_create( user_id=user_id, group_id=group_id, org_unit_id=org_unit_id, is_void=False, defaults={'area_id': area_id, 'org_unit_id': org_unit_id, 'user_id': user_id, 'group_id': group_id, 'is_void': False},) geo_org_perm, ctd = CPOVCUserRoleGeoOrg.objects.update_or_create( user_id=user_id, group_id=group_id, area_id=area_id, is_void=False, defaults={'area_id': area_id, 'org_unit_id': org_unit_id, 'user_id': user_id, 'group_id': group_id, 'is_void': False},) except Exception, e: error = 'Error searching org unit -%s' % (str(e)) print error return None else: return geo_org_perm, ctd
ed7750760405e12f790454e247e54917184e7044
3,646,419
def tf_efficientnet_lite0(pretrained=False, **kwargs): """ EfficientNet-Lite0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_lite( 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model
49ea1c68f168ad613222808e2fbb1ead52190243
3,646,420
import os def ensuredir(dirpath): """ ensure @dirpath exists and return it again raises OSError on other error than EEXIST """ try: os.makedirs(dirpath, 0o700) except FileExistsError: pass return dirpath
447f4542faa00e8928e30f0a9793436b8377964a
3,646,421
import ast from typing import Optional def get_qualname(node: ast.AST) -> Optional[str]: """ If node represents a chain of attribute accesses, return is qualified name. """ parts = [] while True: if isinstance(node, ast.Name): parts.append(node.id) break elif isinstance(node, ast.Attribute): parts.append(node.attr) node = node.value else: return None return '.'.join(reversed(parts))
0d08b25a50b7d159f5df3b0b17282725eb748f38
3,646,422
def traceUsage(addr, register, steps): """ Given a start address, a register which holds a value and the number of steps, this function disassembles forward #steps instructions and traces the value of <register> until it is used in a call instruction. It then returns the offset added to <register> and the address of the call Note that this tracing is very basic and does neither handle multiple registers at the same time nor any other modification than adding constants e.g.: 00401622 mov eax, g_IAT //start at addr = 0x00401622, register = "eax" 00401627 mov ecx, [eax+0Ch] //trace ecx, forget eax from now on. Save offset "0x0C" 0040162A push edx //ignore 0040162B call ecx //return offset 0x0c and address 0x0040162B """ potentialOffset = -1 localRegister = register for step in range(steps): addr = NextHead(addr) dis = GetMnem(addr) if dis == 'mov' and localRegister in GetOpnd(addr,1): #look for e.g."mov eax, [<register>+1CCh]" potentialOffset = GetOpnd(addr,1) if potentialOffset[0] != '[' or potentialOffset[-1] != ']': #"[<register>+1CCh]" continue potentialOffset = potentialOffset[1:-1] #"<register>+1CCh" if '+' in potentialOffset: #we might have had "mov ecx, [eax]", so there is no plus potentialOffset = potentialOffset.split(register+'+')[1] # "1CCh" else: potentialOffset = "0" if potentialOffset.endswith('h'): potentialOffset = int(potentialOffset[:-1], 16) / 4 #"1cc" else: potentialOffset = int(potentialOffset) / 4 localRegister = GetOpnd(addr,0) #get new register to search for upcoming call-instruction elif dis == 'call' and GetOpnd(addr,0) == localRegister: return potentialOffset, addr if potentialOffset != -1: print "[-] Error: Got potentialOffset %s but no corresponding call - maybe increase the steps range?" % (str(potentialOffset)) return -1, -1
78c805af660b5e98348de1bd1ae4b7ce9a57238b
3,646,423
import os def index_folder(folder, images=[]): """ simple multi threaded recusive function to map folder Args: @param folder: folder str path to folder @param images: images list containing absolute paths of directory images Returns: List with image paths """ print(f'Entering {folder}') folders = [] for i in os.listdir(folder): item_path = os.path.join(folder, i) try: Image.open(item_path, mode='r') images.append(item_path) except (PermissionError, IsADirectoryError): print(f'found folder {i}') print(item_path) folders.append(item_path) if folders: with ThreadPool(cpu_count()) as pool: pool.map_async(index_folder, folders).get() return images
ef6fb4a1fc9fa9d16756ff3a0c0855fde7d66bc1
3,646,424
def array3d (surface): """pygame.surfarray.array3d (Surface): return array Copy pixels into a 3d array. Copy the pixels from a Surface into a 3D array. The bit depth of the surface will control the size of the integer values, and will work for any type of pixel format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method). """ global numpysf try: return numpysf.array3d(surface) except AttributeError: return numpysf.array3d(surface)
a2079a540453d5ba69f5b10e292341ef6fcfb972
3,646,425
import torch def masked_kl_div(input, target, mask): """Evaluate masked KL divergence between input activations and target distribution. Parameters: input (tensor) - NxD batch of D-dimensional activations (un-normalized log distribution). target (tensor) - NxD normalized target distribution. mask (tensor, torch.bool) - NxD mask of elements to include in calculation. Returns: Nx1 tensor of cross-entropy calculation results. """ input = input.clone() input[~mask] = -float('inf') log_q = F.log_softmax(input, dim=1) log_q[~mask] = 0 log_p = torch.log(target) log_p[~mask] = 0 KLi = target * (log_p - log_q) KLi[target == 0] = 0 KL = torch.sum(KLi, dim=1, keepdim=True) return KL
afdd704bac7caabd7d0cbbd2599af6c1a440ae1c
3,646,426
import os import random def create_sample_data(input_seqs, sample_size): """ Takes a sample of size 'sample_size' from an input file containing sequences and their associated expression levels, and writes them to a separate file. The format of the first 2 lines of the resulting output file will be of the format: " number_of_seqs_in_file\t<###> length_of_each_sequence\t<$$$> " where '<###>' is the number of sequences in the file, and '<$$$>'is the length to which every sequence in the file is padded. Args: ----- input_seqs (str) -- the absolute path of the input file containing sequence and expression level data to sample. sample_size (int) -- the number of samples to take from the input file. Returns: ----- sample_data (str) -- the absolute path of the output file containing the sample of sequence and expression level data. """ # Assertions assert isinstance(input_seqs, str), 'Input sequences file path must be\ passed as a string.' assert os.path.exists(input_seqs), 'Input file does not exist.' assert isinstance(sample_size, int), 'Number of sequences to sample must\ be passed as an integer.' assert sample_size < get_seq_count(input_seqs), 'Sample size must be\ smaller than the number of sequences in the input file.' # Functionality # Define output file path index = input_seqs.rfind('/') + 1 insert = str(sample_size) + '_from_' sample_seqs = input_seqs[:index] + insert + input_seqs[index:] # Pull sequences to create sample data with smart_open(input_seqs, 'r') as inf: inf.readline() inf.readline() # skip the first 2 info lines all_lines = inf.readlines() for i in range(50): lines = random.sample(all_lines, sample_size) with smart_open(sample_seqs, 'w') as g: for line in lines: g.write(line) # Write number and length of sequence info to top of resulting file write_num_and_len_of_seqs_to_file(sample_seqs) return sample_seqs
e7490ec512472536c5c12c4d229b836220249417
3,646,427
def find_peaks(ts, mindist=100): """ Find peaks in time series :param ts: :return: """ extreme_value = -np.inf extreme_idx = 0 peakvalues = [] peaktimes = [] find_peak = True idx = 0 for r in ts.iteritems(): # print(r) if find_peak: # look for maximum if r[1] > extreme_value: # update current maximum point extreme_value = r[1] extreme_idx = idx elif r[1] + mindist < extreme_value: # consider current maximum a peak peakvalues.append(extreme_value) peaktimes.append(extreme_idx) # update current maximum extreme_value = r[1] extreme_idx = idx find_peak = False else: # look for minimum if r[1] < extreme_value: # update value extreme_value = r[1] extreme_idx = idx elif r[1] - mindist > extreme_value: extreme_value = r[1] extreme_idx = idx find_peak = True idx += 1 return peakvalues, peaktimes
5f4dbf0b6c9e4e8961c14b1ba255ebcdf210c50b
3,646,428
import os import datasets def load_dataset(name, root, sample="default", **kwargs): """ Default dataset wrapper :param name (string): Name of the dataset (Out of cifar10/100, imagenet, tinyimagenet, CUB200, STANFORD120, MIT67). :param root (string): Path to download the dataset. :param sample (string): Default (random) sampling as the classic pytorch dataloader or Pairwise sampling as mentioned in the paper "Regularizing Class-wise Predictions via Self-knowledge Distillation" """ # Dataset if name in ["imagenet", "tinyimagenet", "CUB200", "STANFORD120", "MIT67"]: if name == "tinyimagenet": transform_train = transforms.Compose( [ transforms.RandomResizedCrop(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) transform_test = transforms.Compose( [ transforms.Resize(32), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) train_val_dataset_dir = os.path.join(root, "train") test_dataset_dir = os.path.join(root, "val") trainset = DatasetWrapper( datasets.ImageFolder( root=train_val_dataset_dir, transform=transform_train ) ) valset = DatasetWrapper( datasets.ImageFolder(root=test_dataset_dir, transform=transform_test) ) elif name == "imagenet": transform_train = transforms.Compose( [ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) transform_test = transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) train_val_dataset_dir = os.path.join(root, "train") test_dataset_dir = os.path.join(root, "val") trainset = DatasetWrapper( datasets.ImageFolder( root=train_val_dataset_dir, transform=transform_train ) ) valset = DatasetWrapper( datasets.ImageFolder(root=test_dataset_dir, transform=transform_test) ) else: transform_train = transforms.Compose( [ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) transform_test = transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) train_val_dataset_dir = os.path.join(root, name, "train") test_dataset_dir = os.path.join(root, name, "test") trainset = DatasetWrapper( datasets.ImageFolder( root=train_val_dataset_dir, transform=transform_train ) ) valset = DatasetWrapper( datasets.ImageFolder(root=test_dataset_dir, transform=transform_test) ) elif name.startswith("cifar"): transform_train = transforms.Compose( [ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize( (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010) ), ] ) transform_test = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize( (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010) ), ] ) if name == "cifar10": CIFAR = datasets.CIFAR10 else: CIFAR = datasets.CIFAR100 trainset = DatasetWrapper( CIFAR(root, train=True, download=True, transform=transform_train) ) valset = DatasetWrapper( CIFAR(root, train=False, download=True, transform=transform_test) ) else: raise Exception("Unknown dataset: {}".format(name)) # Sampler if sample == "default": get_train_sampler = lambda d: BatchSampler( RandomSampler(d), kwargs["batch_size"], False ) get_test_sampler = lambda d: BatchSampler( SequentialSampler(d), kwargs["batch_size"], False ) elif sample == "pair": get_train_sampler = lambda d: PairBatchSampler(d, kwargs["batch_size"]) get_test_sampler = lambda d: BatchSampler( SequentialSampler(d), kwargs["batch_size"], False ) else: raise Exception("Unknown sampling: {}".format(sampling)) train_loader = DataLoader( trainset, batch_sampler=get_train_sampler(trainset), num_workers=4 ) val_loader = DataLoader( valset, batch_sampler=get_test_sampler(valset), num_workers=4 ) return train_loader, val_loader
b59b427b032c7360d68a3f79d358c6ae938de7bd
3,646,429
def get_flanking_seq(genome, scaffold, start, end, flanking_length): """ Get flanking based on Blast hit """ for rec in SeqIO.parse(genome, "fasta"): if rec.id == scaffold: return str( rec.seq[int(start) - int(flanking_length) : int(end) + int(flanking_length)] )
509002a7099ad62b0449e1c5de9a1a7dd875bc0c
3,646,430
import re def d(vars): """List of variables starting with string "df" in reverse order. Usage: d(dir()) @vars list of variables output by dir() command """ list_of_dfs = [item for item in vars if (item.find('df') == 0 and item.find('_') == -1 and item != 'dfs')] list_of_dfs.sort(key=lambda x:int(re.sub("[^0-9]", "", x.replace('df',''))) if len(x) > 2 else 0, reverse=True) return list_of_dfs
4961ae70a61e45b81e06e55ee9553ff61fd45d18
3,646,431
import inspect def get_class_namespaces(cls: type) -> tuple[Namespace, Namespace]: """ Return the module a class is defined in and its internal dictionary Returns: globals, locals """ return inspect.getmodule(cls).__dict__, cls.__dict__ | {cls.__name__: cls}
46f275bcc328d9ca87ffdebf616d42096705d3fb
3,646,432
from .io import select_driver def write_stream(path, sync=True, *args, **kwargs): """Creates a writer object (context manager) to write multiple dataframes into one file. Must be used as context manager. Parameters ---------- path : str, filename or path to database table sync : bool, default True Set to `False` to run the writer in the background process. args, kwargs : parameters passed to writer driver (see erde.io modules) Example: with write_stream('/tmp/my_file.gpkg') as write: for df in data_generator(): write(df) """ dr, pm = select_driver(path) return dr.write_stream(path, sync=sync, *args, **kwargs)
8e2274e102b60b139b6e40f425682d06268e10a5
3,646,433
from typing import Dict def diff( df: DataFrame, columns: Dict[str, str], periods: int = 1, axis: PandasAxis = PandasAxis.ROW, ) -> DataFrame: """ Calculate row-by-row or column-by-column difference for select columns. :param df: DataFrame on which the diff will be based. :param columns: columns on which to perform diff, mapping source column to target column. For instance, `{'y': 'y'}` will replace the column `y` with the diff value in `y`, while `{'y': 'y2'}` will add a column `y2` based on diff values calculated from `y`, leaving the original column `y` unchanged. :param periods: periods to shift for calculating difference. :param axis: 0 for row, 1 for column. default 0. :return: DataFrame with diffed columns :raises QueryObjectValidationError: If the request in incorrect """ df_diff = df[columns.keys()] df_diff = df_diff.diff(periods=periods, axis=axis) return _append_columns(df, df_diff, columns)
38ed83fc7e1847a2c9e31abb217990becc1bc04f
3,646,434
import base64 def decodeTx(data: bytes) -> Transaction: """Function to convert base64 encoded data into a transaction object Args: data (bytes): the data to convert Returns a transaction object """ data = base64.b64decode(data) if data[:1] != tx_flag: return None timestamp = float(data[1:21].decode('utf-8')) hash = data[21:53].hex() script_sig = data[53:117].hex() inputs = [] outputs = [] io = data[117:].split(array_flag) for x in io: if x[:1] == tx_in: pub_key = x[1:34].hex() sig = x[34:98].hex() utxoRef = x[98:].decode('utf-8') inputs.append(Input(utxoRef, pub_key, sig)) elif x[:1] == tx_out: addr = x[1:35].decode('utf-8') amount = float(x[35:].decode('utf-8')) outputs.append(Output(addr, amount)) tx = Transaction(inputs, outputs) tx.timestamp = timestamp tx.hash = hash tx.script_sig = script_sig return tx
da52e9dcb641d2986fa47d15f9da8d1edea28659
3,646,435
def create_package_from_datastep(table): """Create an importable model package from a score code table. Parameters ---------- table : swat.CASTable The CAS table containing the score code. Returns ------- BytesIO A byte stream representing a ZIP archive which can be imported. See Also -------- :meth:`model_repository.import_model_from_zip <.ModelRepository.import_model_from_zip>` """ assert 'DataStepSrc' in table.columns sess = table.session.get_connection() dscode = table.to_frame().loc[0, 'DataStepSrc'] file_metadata = [{'role': 'score', 'name': 'dmcas_scorecode.sas'}] zip_file = _build_zip_from_files({ 'fileMetadata.json': file_metadata, 'dmcas_scorecode.sas': dscode }) return zip_file
0874f1a755ed73af09091a7c0f1b3fb3e5e861e4
3,646,436
def _test_diff(diff: list[float]) -> tuple[float, float, float]: """Последовательный тест на медианную разницу с учетом множественного тестирования. Тестирование одностороннее, поэтому p-value нужно умножить на 2, но проводится 2 раза. """ _, upper = seq.median_conf_bound(diff, config.P_VALUE / population.count()) return float(np.median(diff)), upper, np.max(diff)
024d0eaba612361e4fef39839bfd31474d5be5a6
3,646,437
def get_repo_of_app_or_library(app_or_library_name): """ This function takes an app or library name and will return the corresponding repo for that app or library""" specs = get_specs() repo_name = specs.get_app_or_lib(app_or_library_name)['repo'] if not repo_name: return None return Repo(repo_name)
72c0349354fdc11da3ff16f2dfa3126eb02fa381
3,646,438
from datetime import datetime def get_index_price_change_by_ticker(fromdate: str, todate: str, market: str="KOSPI") -> DataFrame: """입력된 기간동안의 전체 지수 등락률 Args: fromdate (str ): 조회 시작 일자 (YYMMDD) todate (str ): 조회 종료 일자 (YYMMDD) market (str, optional): 조회 시장 (KOSPI/KOSDAQ/RKX/테마) Returns: DataFrame: >> get_index_price_change_by_ticker("20210101", "20210130") 시가 종가 등락률 거래량 거래대금 지수명 코스피 2873.47 3152.18 9.703125 7162398637 149561467924511 코스피 200 389.29 430.22 10.507812 2221276866 119905899468167 코스피 100 2974.06 3293.96 10.757812 1142234783 95023508273187 코스피 50 2725.20 3031.59 11.242188 742099360 79663247553065 코스피 200 중소형주 1151.78 1240.92 7.738281 1079042083 24882391194980 """ if isinstance(fromdate, datetime.datetime): fromdate = _datetime2string(fromdate) if isinstance(todate, datetime.datetime): todate = _datetime2string(todate) fromdate = fromdate.replace("-", "") todate = todate.replace("-", "") # KRX 웹 서버의 제약으로 인한 영업일 검사 fromdate = get_nearest_business_day_in_a_week(fromdate, prev=False) todate = get_nearest_business_day_in_a_week(todate) return krx.get_index_price_change_by_ticker(fromdate, todate, market)
6d65ffeaccd1e5fe307e1e5387e413db3c2eb5fe
3,646,439
def axpy(alpha, x, y, stream=None): """y <- alpha*x + y """ global _blas if not isinstance(alpha, Number): raise ValueError('alpha is not a numeric type') validate_argument_dtype(x, 'x') validate_argument_dtype(y, 'y') if not _blas: _blas = Blas() _blas.stream = stream dtype = promote(promote(type(alpha), x.dtype), y.dtype) yf = colmajor(y, dtype, 'y') _blas.axpy(dtype.type(alpha), x.astype(dtype), yf) if y.dtype == yf.dtype and not alias(y, yf): y[:] = yf return y else: return yf
10b8c46b1fc160d637241750c408957b8f184ee9
3,646,440
def _unenroll_get_hook(app_context): """Add field to unenroll form offering data removal, if policy supports.""" removal_policy = _get_removal_policy(app_context) return removal_policy.add_unenroll_additional_fields(app_context)
6c8e6a06d45fecfa8828ce8a24ca9e1e910b1e9c
3,646,441
from typing import Union def query_fetch_bom_df(search_key: str, size: int) -> Union[pd.DataFrame, None]: """Fetch and return bom dataframe of the article Runs recursive query on database to fetch the bom. """ # Recursive query raw_query = f"""WITH cte AS ( SELECT * FROM [{DB_NAME}].[dbo].[{SQL_T_BOM}] WHERE father = '{search_key}' UNION ALL SELECT p.* FROM [{DB_NAME}].[dbo].[{SQL_T_BOM}] p INNER JOIN cte ON cte.child = p.father WHERE cte.child Like '%{size}' OR cte.child Like '%l' OR cte.child Like '%g' OR cte.child Like '%x' OR cte.child Like '%b' OR cte.child Like '%r' OR cte.child Like '%k' OR cte.child Like '%c' OR cte.child Like '4-pux%' OR cte.child Like '4-cca-ang%' ) SELECT * FROM cte ORDER BY cte.process_order, cte.father, cte.child option (maxrecursion 100);""" df = None try: df = pd.read_sql(raw_query, engine) except Exception as e: df = None return df
753f0378590df1c2b3e50f7bad8d2b15490ae488
3,646,442
def zscore(collection, iteratee=None): """Calculate the standard score assuming normal distribution. If iteratee is passed, each element of `collection` is passed through a iteratee before the standard score is computed. Args: collection (list|dict): Collection to process. iteratee (mixed, optional): Iteratee applied per iteration. Returns: float: Calculated standard score. Example: >>> results = zscore([1, 2, 3]) # [-1.224744871391589, 0.0, 1.224744871391589] .. versionadded:: 2.1.0 """ array = pyd.map_(collection, iteratee) avg = mean(array) sig = std_deviation(array) return pyd.map_(array, lambda item: (item - avg) / sig)
a813295f6cce309b936b94a9d70f082f435a4b89
3,646,443
from typing import Tuple def AND( *logicals: Tuple[func_xltypes.XlExpr] ) -> func_xltypes.XlBoolean: """Determine if all conditions in a test are TRUE https://support.office.com/en-us/article/ and-function-5f19b2e8-e1df-4408-897a-ce285a19e9d9 """ if not logicals: raise xlerrors.NullExcelError('logical1 is required') # Use delayed evaluation to minimize th amount of values to evaluate. for logical in logicals: val = logical() for item in xl.flatten([val]): if func_xltypes.Blank.is_blank(item): continue if not bool(item): return False return True
ebdc5c4f2c3cab31a78507923eded284eb679fd4
3,646,444
def check_mask(mask): """Check if mask is valid by its area""" area_ratio = np.sum(mask) / float(mask.shape[0] * mask.shape[1]) return (area_ratio > MASK_THRES_MIN) and (area_ratio < MASK_THRES_MAX)
a82f415d95ea07571da2aabeeddc6837b0a80f8d
3,646,445
def supported_estimators(): """Return a `dict` of supported estimators.""" allowed = { 'LogisticRegression': LogisticRegression, 'RandomForestClassifier': RandomForestClassifier, 'DecisionTreeClassifier': DecisionTreeClassifier, 'KNeighborsClassifier': KNeighborsClassifier, 'MultinomialNB': MultinomialNB, 'GaussianNB': GaussianNB, 'BernoulliNB': BernoulliNB } return allowed
1bb76e81252c3b959a376f23f2462d4faef234a9
3,646,446
from hiicart.gateway.base import GatewayError from hiicart.gateway.amazon.gateway import AmazonGateway from hiicart.gateway.google.gateway import GoogleGateway from hiicart.gateway.paypal.gateway import PaypalGateway from hiicart.gateway.paypal2.gateway import Paypal2Gateway from hiicart.gateway.paypal_adaptive.gateway import PaypalAPGateway from hiicart.gateway.braintree.gateway import BraintreeGateway from hiicart.gateway.authorizenet.gateway import AuthorizeNetGateway from hiicart.gateway.paypal_express.gateway import PaypalExpressCheckoutGateway from hiicart.gateway.stripe.gateway import StripeGateway def validate_gateway(gateway): """Test that a gateway is correctly set up. Returns True if successful, or an error message.""" gateways = { 'amazon': AmazonGateway, 'google': GoogleGateway, 'paypal': PaypalGateway, 'paypal2': Paypal2Gateway, 'paypal_adaptive': PaypalAPGateway, 'paypal_express': PaypalExpressCheckoutGateway, 'braintree': BraintreeGateway, 'authorizenet': AuthorizeNetGateway, 'stripe': StripeGateway } try: cls = gateways[gateway] obj = cls() return obj._is_valid() or "Authentication Error" except GatewayError, err: return err.message
c60e3e88cf6bb919208821d8ee214368d39dc7f6
3,646,447
import sqlite3 def execute_query(db, query): """get data from database """ result = [] with closing(sqlite3.connect(db)) as conn: conn.row_factory = sqlite3.Row cur = conn.cursor() for row in cur.execute(query): result.append({name: row[name] for name in row.keys()}) return result
75476c8a9f14751eb46fc2891ba5e7bddecd3c0e
3,646,448
from zipimport import zipimporter import os def module_list(path): """ Return the list containing the names of the modules available in the given folder. :param path: folder path :type path: str :returns: modules :rtype: list """ if os.path.isdir(path): folder_list = os.listdir(path) elif path.endswith('.egg'): try: folder_list = [f for f in zipimporter(path)._files] except: folder_list = [] else: folder_list = [] #folder_list = glob.glob(os.path.join(path,'*')) folder_list = [ p for p in folder_list if (os.path.exists(os.path.join(path, p, '__init__.py')) or p[-3:] in {'.py', '.so'} or p[-4:] in {'.pyc', '.pyo', '.pyd'})] folder_list = [os.path.basename(p).split('.')[0] for p in folder_list] return folder_list
ef0b80a91a350d3909e580dd8a592e09bfaa38ad
3,646,449
def to_mgb_supported_dtype(dtype_): """get the dtype supported by megbrain nearest to given dtype""" if ( dtype.is_lowbit(dtype_) or dtype.is_quantize(dtype_) or dtype.is_bfloat16(dtype_) ): return dtype_ return _detail._to_mgb_supported_dtype(dtype_)
864b5bb7099771705ad478e5e89db8f3035f1c4f
3,646,450
def get_reset_state_name(t_fsm): """ Returns the name of the reset state. If an .r keyword is specified, that is the name of the reset state. If the .r keyword is not present, the first state defined in the transition table is the reset state. :param t_fsm: blifparser.BlifParser().blif.fsm object :return str reset_state: name of the reset state """ reset_state = None if t_fsm.r is None: if len(t_fsm.transtable) > 0: reset_state = t_fsm.transtable[0][1] else: reset_state = t_fsm.r.name return reset_state
c65ea80f94f91b31a179faebc60a97f7260675c4
3,646,451
def gridmake(*arrays): """ Expands one or more vectors (or matrices) into a matrix where rows span the cartesian product of combinations of the input arrays. Each column of the input arrays will correspond to one column of the output matrix. Parameters ---------- *arrays : tuple/list of np.ndarray Tuple/list of vectors to be expanded. Returns ------- out : np.ndarray The cartesian product of combinations of the input arrays. Notes ----- Based of original function ``gridmake`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ if all([i.ndim == 1 for i in arrays]): d = len(arrays) if d == 2: out = _gridmake2(*arrays) else: out = _gridmake2(arrays[0], arrays[1]) for arr in arrays[2:]: out = _gridmake2(out, arr) return out else: raise NotImplementedError("Come back here")
56c5375024170fbd599500c0603e0e3dcc7f53d4
3,646,452
import logging import math def pagerotate(document: vp.Document, clockwise: bool): """Rotate the page by 90 degrees. This command rotates the page by 90 degrees counter-clockwise. If the `--clockwise` option is passed, it rotates the page clockwise instead. Note: if the page size is not defined, an error is printed and the page is not rotated. """ page_size = document.page_size if page_size is None: logging.warning("pagerotate: page size is not defined, page not rotated") return document w, h = page_size if clockwise: document.rotate(math.pi / 2) document.translate(h, 0) else: document.rotate(-math.pi / 2) document.translate(0, w) document.page_size = h, w return document
37f0a9e726f490c357afb48ace49484cfcae84ce
3,646,453
import argparse from typing import Tuple import atexit import json def create_new_deployment(runner: Runner, args: argparse.Namespace) -> Tuple[str, str]: """Create a new Deployment, return its name and Kubernetes label.""" run_id = str(uuid4()) def remove_existing_deployment(): runner.get_kubectl( args.context, args.namespace, [ "delete", "--ignore-not-found", "all", "--selector=telepresence=" + run_id, ] ) atexit.register(remove_existing_deployment) remove_existing_deployment() command = [ "run", # This will result in using Deployment: "--restart=Always", "--limits=memory=256Mi", "--requests=memory=64Mi", args.new_deployment, "--image=" + TELEPRESENCE_REMOTE_IMAGE, "--labels=telepresence=" + run_id, ] for port in args.expose.remote(): command.append("--port={}".format(port)) if args.expose.remote(): command.append("--expose") # If we're on local VM we need to use different nameserver to prevent # infinite loops caused by sshuttle: if args.method == "vpn-tcp" and args.in_local_vm: command.append( "--env=TELEPRESENCE_NAMESERVER=" + get_alternate_nameserver() ) if args.needs_root: override = { "apiVersion": "extensions/v1beta1", "spec": { "template": { "spec": { "securityContext": { "runAsUser": 0 } } } } } command.append("--overrides=" + json.dumps(override)) runner.get_kubectl(args.context, args.namespace, command) return args.new_deployment, run_id
2cf76661fb4ab89ec94efb8648d917abedf70f48
3,646,454
from .org import org_organisation_logo from re import A def inv_send_rheader(r): """ Resource Header for Send """ if r.representation == "html" and r.name == "send": record = r.record if record: db = current.db s3db = current.s3db T = current.T s3 = current.response.s3 settings = current.deployment_settings tabs = [(T("Edit Details"), None), (T("Items"), "track_item"), ] if settings.get_inv_send_packaging(): tabs.append((T("Packaging"), "send_package")) if settings.get_inv_document_filing(): tabs.append((T("Documents"), "document")) rheader_tabs = s3_rheader_tabs(r, tabs) table = r.table stable = s3db.org_site send_id = record.id status = record.status site_id = record.site_id if site_id: site = db(stable.site_id == site_id).select(stable.organisation_id, stable.instance_type, limitby = (0, 1), ).first() logo = org_organisation_logo(site.organisation_id) instance_table = s3db[site.instance_type] if "phone1" in instance_table.fields: site = db(instance_table.site_id == site_id).select(instance_table.phone1, instance_table.phone2, limitby = (0, 1), ).first() phone1 = site.phone1 phone2 = site.phone2 else: phone1 = None phone2 = None else: logo = "" phone1 = None phone2 = None to_site_id = record.to_site_id if to_site_id: site = db(stable.site_id == to_site_id).select(stable.location_id, limitby = (0, 1), ).first() address = s3db.gis_LocationRepresent(address_only = True)(site.location_id) else: address = NONE if settings.get_inv_send_req(): req_ref_label = TH("%s: " % table.req_ref.label) ltable = s3db.inv_send_req rtable = s3db.inv_req query = (ltable.send_id == send_id) & \ (ltable.req_id == rtable.id) rows = db(query).select(rtable.id, rtable.req_ref, ) if len(rows) == 1: row = rows.first() req_ref_value = TD(inv_ReqRefRepresent(show_link = True)(row.req_ref, row)) else: # Cache values in class refs = [row.req_ref for row in rows] represent = inv_ReqRefRepresent(show_link = True) represent.bulk(refs, rows, show_link = True) refs_repr = [s3_str(represent(ref)) for ref in refs] refs_repr = ", ".join(refs_repr) req_ref_value = TD(XML(refs_repr)) elif settings.get_inv_send_req_ref(): req_ref_label = TH("%s: " % table.req_ref.label) #req_ref_value = TD(inv_ReqRefRepresent(show_link = True)(record.req_ref)) req_ref_value = TD(record.req_ref) else: req_ref_label = "" req_ref_value = "" shipment_details = TABLE(TR(TD(T(settings.get_inv_send_form_name().upper()), _colspan = 2, _class = "pdf_title", ), TD(logo, _colspan = 2, ), ), TR(TH("%s: " % table.status.label), table.status.represent(status), ), TR(TH("%s: " % table.send_ref.label), TD(table.send_ref.represent(record.send_ref)), req_ref_label, req_ref_value, ), TR(TH("%s: " % table.date.label), table.date.represent(record.date), TH("%s: " % table.delivery_date.label), table.delivery_date.represent(record.delivery_date), ), TR(TH("%s: " % table.to_site_id.label), table.to_site_id.represent(record.to_site_id), TH("%s: " % table.site_id.label), table.site_id.represent(record.site_id), ), TR(TH("%s: " % T("Address")), TD(address, _colspan=3), ), TR(TH("%s: " % table.transported_by.label), table.transported_by.represent(record.transported_by), TH("%s: " % table.transport_ref.label), table.transport_ref.represent(record.transport_ref), ), TR(TH("%s: " % table.sender_id.label), table.sender_id.represent(record.sender_id), TH("%s: " % table.recipient_id.label), table.recipient_id.represent(record.recipient_id), ), TR(TH("%s: " % T("Complete? Please call")), phone1 or "", TH("%s: " % T("Problems? Please call")), phone2 or phone1 or "", ), TR(TH("%s: " % table.comments.label), TD(record.comments or "", _colspan=3) ) ) rfooter = TAG[""]() if status != SHIP_STATUS_CANCEL and \ r.method != "form": if current.auth.s3_has_permission("update", "inv_send", record_id = record.id, ): packaging = None # Don't show buttons unless Items have been added tracktable = s3db.inv_track_item query = (tracktable.send_id == send_id) item = db(query).select(tracktable.id, limitby = (0, 1), ).first() if item: actions = DIV() jappend = s3.js_global.append if s3.debug: s3.scripts.append("/%s/static/scripts/S3/s3.inv_send_rheader.js" % r.application) else: s3.scripts.append("/%s/static/scripts/S3/s3.inv_send_rheader.min.js" % r.application) if status == SHIP_STATUS_IN_PROCESS: actions.append(A(ICON("print"), " ", T("Picking List"), _href = URL(args = [record.id, "pick_list.xls", ] ), _class = "action-btn", ) ) if settings.get_inv_send_packaging(): actions.append(A(ICON("print"), " ", T("Labels"), _href = URL(args = [record.id, "labels.xls", ] ), _class = "action-btn", ) ) actions.append(A(T("Send Shipment"), _href = URL(args = [record.id, "process", ] ), _id = "send-process", _class = "action-btn", ) ) jappend('''i18n.send_process_confirm="%s"''' % \ T("Do you want to send this shipment?")) elif status == SHIP_STATUS_RETURNING: actions.append(A(T("Complete Returns"), _href = URL(c = "inv", f = "send", args = [record.id, "return_complete", ] ), _id = "return-process", _class = "action-btn" ) ) jappend('''i18n.return_process_confirm="%s"''' % \ T("Do you want to complete the return process?")) elif status == SHIP_STATUS_SENT: actions.append(A(T("Manage Returns"), _href = URL(c = "inv", f = "send", args = [record.id, "return", ], vars = None, ), _id = "send-return", _class = "action-btn", _title = T("Only use this button to accept back into stock some items that were returned from a delivery.") ) ) jappend('''i18n.send_return_confirm="%s"''' % \ T("Confirm that some items were returned from a delivery and they will be accepted back into stock.")) actions.append(A(T("Confirm Shipment Received"), _href = URL(f = "send", args = [record.id, "received", ], ), _id = "send-receive", _class = "action-btn", _title = T("Only use this button to confirm that the shipment has been received by a destination which will not record the shipment directly into the system.") ) ) jappend('''i18n.send_receive_confirm="%s"''' % \ T("Confirm that the shipment has been received by a destination which will not record the shipment directly into the system.")) if status != SHIP_STATUS_RECEIVED: if settings.get_inv_send_packaging(): if status == SHIP_STATUS_IN_PROCESS: # Insert in front of 'Send Shipment' index = -1 else: # Append at end index = len(actions) actions.insert(index, A(ICON("print"), " ", T("Packing List"), _href = URL(args = [record.id, "packing_list.xls", ] ), _class = "action-btn", ) ) if settings.get_inv_send_gift_certificate(): if status == SHIP_STATUS_IN_PROCESS: # Insert in front of 'Send Shipment' index = -1 else: # Append at end index = len(actions) actions.insert(index, A(ICON("print"), " ", T("Gift Certificate"), _href = URL(c = "inv", f = "send", args = [record.id, "gift_certificate.xls", ] ), _class = "action-btn" ) ) if status != SHIP_STATUS_IN_PROCESS: actions.append(A(T("Cancel Shipment"), _href = URL(c = "inv", f = "send", args = [record.id, "cancel", ] ), _id = "send-cancel", _class = "delete-btn" ) ) jappend('''i18n.send_cancel_confirm="%s"''' % \ T("Do you want to cancel this sent shipment? The items will be returned to the Warehouse. This action CANNOT be undone!")) shipment_details.append(TR(TH(actions, _colspan = 2, ))) s3.rfooter = rfooter rheader = DIV(shipment_details, rheader_tabs, #rSubdata ) return rheader return None
5fddfaeede501531674557a0ecf8e4fb43989bdf
3,646,455
import torch def gauss_reparametrize(mu, logvar, n_sample=1): """Gaussian reparametrization""" std = logvar.mul(0.5).exp_() size = std.size() eps = Variable(std.data.new(size[0], n_sample, size[1]).normal_()) z = eps.mul(std[:, None, :]).add_(mu[:, None, :]) z = torch.clamp(z, -4., 4.) return z.view(z.size(0)*z.size(1), z.size(2), 1, 1)
5c4fa87c5287aae3727608a003c3c91c2ba5c1a9
3,646,456
import os import sys import unicodedata def run_setup_py(cmd, pypath=None, path=None, data_stream=0, env=None): """ Execution command for tests, separate from those used by the code directly to prevent accidental behavior issues """ if env is None: env = dict() for envname in os.environ: env[envname] = os.environ[envname] # override the python path if needed if pypath is not None: env["PYTHONPATH"] = pypath # override the execution path if needed if path is not None: env["PATH"] = path if not env.get("PATH", ""): env["PATH"] = _which_dirs("tar").union(_which_dirs("gzip")) env["PATH"] = os.pathsep.join(env["PATH"]) cmd = [sys.executable, "setup.py"] + list(cmd) # http://bugs.python.org/issue8557 shell = sys.platform == 'win32' try: proc = _Popen( cmd, stdout=_PIPE, stderr=_PIPE, shell=shell, env=env, ) if isinstance(data_stream, tuple): data_stream = slice(*data_stream) data = proc.communicate()[data_stream] except OSError: return 1, '' # decode the console string if needed if hasattr(data, "decode"): # use the default encoding data = data.decode() data = unicodedata.normalize('NFC', data) # communicate calls wait() return proc.returncode, data
12fca9f58444f9b1e3ba3c890e8956c9bbed60cc
3,646,457
def forward_pass(img, session, images_placeholder, phase_train_placeholder, embeddings, image_size): """Feeds an image to the FaceNet model and returns a 128-dimension embedding for facial recognition. Args: img: image file (numpy array). session: The active Tensorflow session. images_placeholder: placeholder of the 'input:0' tensor of the pre-trained FaceNet model graph. phase_train_placeholder: placeholder of the 'phase_train:0' tensor of the pre-trained FaceNet model graph. embeddings: placeholder of the 'embeddings:0' tensor from the pre-trained FaceNet model graph. image_size: (int) required square image size. Returns: embedding: (numpy array) of 128 values after the image is fed to the FaceNet model. """ # If there is a human face if img is not None: # Normalize the pixel values of the image for noise reduction for better accuracy and resize to desired size image = load_img( img=img, do_random_crop=False, do_random_flip=False, do_prewhiten=True, image_size=image_size ) # Run forward pass on FaceNet model to calculate embedding feed_dict = {images_placeholder: image, phase_train_placeholder: False} embedding = session.run(embeddings, feed_dict=feed_dict) return embedding else: return None
846c05a167e116ca4efbe3888486a3ee740d33ef
3,646,458
import urllib def check_url(url): """Returns True if the url returns a response code between 200-300, otherwise return False. """ try: req = urllib.request.Request(url, headers=headers) response = urllib.request.urlopen(req) return response.code in range(200, 209) except Exception: return False
79f20eeb14724b728f020ff4c680e49f6a1a2473
3,646,459
def build_permutation_importance( data, data_labels, feature_names, model, metrics, repeats=100, random_seed=42 ): """Calculates permutation feature importance.""" pi_results = {} for metric in metrics: pi = sklearn.inspection.permutation_importance( model, data, data_labels, n_repeats=repeats, scoring=metric, random_state=random_seed) pi_results[metric] = [] for feature_id, feature_name in enumerate(feature_names): pi_results[metric].append(( feature_name, pi.importances_mean[feature_id], pi.importances_std[feature_id] )) # for i in pi.importances_mean.argsort()[::-1]: # if pi.importances_mean[i] - 2 * pi.importances_std[i] > 0: # print(f'{feature_name:<8}' # f'{pi.importances_mean[feature_id]:.3f}' # f' +/- {pi.importances_std[feature_id]:.3f}') return pi_results
3b0b87ddf53446156b20189dad7c3d0b3ae2a1c2
3,646,460
def _load_parent(collection, meta): """Determine the parent document for the document that is to be ingested.""" parent = ensure_dict(meta.get("parent")) parent_id = meta.get("parent_id", parent.get("id")) if parent_id is None: return parent = Document.by_id(parent_id, collection=collection) if parent is None: raise BadRequest( response=jsonify( {"status": "error", "message": "Cannot load parent document"}, status=400, ) ) return parent
2f53440fa9610f9e8ca494ec8ec27bf9d6a09273
3,646,461
import requests def get_latest_sensor_reading(sensor_serial, metric): """ Get latest sensor reading from MT sensor metrics: 'temperature', 'humidity', 'water_detection' or 'door' """ headers = { "Content-Type": "application/json", "Accept": "application/json", "X-Cisco-Meraki-API-Key": meraki_api_key } params = { "serials[]": sensor_serial, "metric": metric } try: msg = requests.request('GET', f"{base_url}/networks/{network_id}/sensors/stats/latestBySensor", headers=headers, params=params) if msg.ok: data = msg.json() return data except Exception as e: print("API Connection error: {}".format(e))
88de9d770f3be91700e3c86ff6460e2fdaa35d01
3,646,462
def border_msg(msg: str): """ This function creates boarders in the top and bottom of text """ row = len(msg) h = ''.join(['+'] + ['-' * row] + ['+']) return h + "\n" + msg + "\n" + h
cdd9d17ba76014f4c80b9c429aebbc4ca6f959c3
3,646,463
def create_app(config_name='development'): """Returns flask app based on the configuration""" flask_app = Flask(__name__) flask_app.config.from_object(app_config[config_name]) flask_app.config['JSON_SORT_KEYS'] = False flask_app.url_map.strict_slashes = False flask_app.register_error_handler(400, handle_bad_request) flask_app.register_error_handler(404, handle_not_found) flask_app.register_blueprint(v1_bp) flask_app.register_blueprint(party_bp) flask_app.register_blueprint(office_bp) flask_app.register_blueprint(user_bp) return flask_app
783edefb40c2f3cc0aefa0788b0c1c04d581aa39
3,646,464
def auto_merge_paths(data, auto_merge_distance, auto_close_paths=True): """ This function connects all paths in the given dataset, for which the start or endpoints are closer than auto_merge_distance. :param data: Should be a list or tuple containing paths, attributes, svg_attributes. :param auto_merge_distance: If the start or endpoint of a pair of paths is closer than this distance in units of milli meters, they are automatically merged. If one of the paths has to be reversed to do so, this is automatically done. A line is added to the path to bridge the gap. :param auto_close_paths: If set the paths are automatically closed after the merging operation if the start and end point of one path are closer than the auto_merge_distance. It is closed by a line and it's closed flag is set. :return paths, attributes, svg_attributes, iters, numclosed: Modified paths, modified attributes, svg_attributes, number of pairs connected and number of paths that were closed. """ paths, attributes, svg_attributes = data def fix_first_pair(paths_, attributes_): """ Helper function that fixes the next best pair of paths, if they fulfill the condition :rtype: NoneType in case paths_ is empty. Else fixed paths_ and attributes_. """ for i_ in range(len(paths_)): # Get start end end points start1 = paths_[i_][0].start end1 = paths_[i_][-1].end for j in range(len(paths_)): if i_ != j: start2 = paths_[j][0].start end2 = paths_[j][-1].end # Calculate all relevant distances for this pair distance_ = px2mm(np.abs(start2 - end1)) distance_r1 = px2mm(np.abs(start2 - start1)) distance_r2 = px2mm(np.abs(end2 - end1)) # Perform merger if distance_ < auto_merge_distance or distance_r2 < auto_merge_distance: first = i_ second = j else: first = j second = i_ if distance_r1 < auto_merge_distance or distance_r2 < auto_merge_distance: # Reverse paths_[j] if necessary paths_[j] = svgpathtools.path.Path( *[svgpathtools.path.bpoints2bezier(segment.bpoints()[::-1]) for segment in paths_[j]]) if min([distance_, distance_r1, distance_r2]) < auto_merge_distance: # Merge both paths paths_[first] = svgpathtools.path.Path(*[segment for segment in paths_[first]] + [ svgpathtools.path.Line(paths_[first][-1].end, paths_[second][0].start)] + [segment for segment in paths_[second]]) return paths_[:second] + paths_[second + 1:], attributes_[:second] + attributes_[second + 1:] return None iters = 0 while True: ret = fix_first_pair(paths, attributes) if ret is not None: paths, attributes = ret iters += 1 else: break # Make sure, paths are closed... numclosed = 0 if auto_close_paths: for i, path in enumerate(paths): # Get start end end point distance start = path[0].start end = path[-1].end distance = px2mm(np.abs(start - end)) if distance < auto_merge_distance: # Close the path paths[i] = svgpathtools.path.Path(*[segment for segment in path] + [svgpathtools.path.Line(end, start)]) paths[i].closed = True numclosed += 1 return paths, attributes, svg_attributes, iters, numclosed
34ec7d0b853a70159ebef6244236475375a3ca9d
3,646,465
def is_authorized(secure: AccessRestriction): """Returns authorization status based on the given access restriction. :param secure: access restriction :type secure: AccessRestriction :return: authorization status (``True`` or ``False``) """ if secure == AccessRestriction.ALL: return True elif secure == AccessRestriction.STAFF: return is_staff(get_course()) elif secure == AccessRestriction.STUDENT: return is_enrolled(get_course()) else: raise Exception(f"{secure} is not a valid AccessRestriction")
e070ae5521db1079426b80b6ff8a3fc5c9a9ba09
3,646,466
def create_link_forum(**attrs): """Save a new link forum.""" link = build_link_forum(**attrs) link.save() return link
e94e1001e42f46cd1c1803fbff35d0eded89858e
3,646,467
from datetime import datetime def open_report(): """Probe Services: Open report --- parameters: - in: body name: open report data required: true schema: type: object properties: data_format_version: type: string format: type: string probe_asn: type: string probe_cc: type: string software_name: type: string software_version: type: string test_name: type: string test_start_time: type: string test_version: type: string responses: '200': description: Open report confirmation content: application/json: schema: type: object properties: backend_version: type: string report_id: type: string supported_formats: type: array items: type: string """ log = current_app.logger try: data = req_json() except Exception as e: log.error(e) return jerror("JSON expected") log.info("Open report %r", data) asn = data.get("probe_asn", "AS0").upper() if len(asn) > 8 or len(asn) < 3 or not asn.startswith("AS"): asn = "AS0" try: asn_i = int(asn[2:]) except: asn_i = 0 cc = data.get("probe_cc", "ZZ").upper().replace("_", "") if len(cc) != 2: cc = "ZZ" test_name = data.get("test_name", "").lower().replace("_", "") ts = datetime.utcnow().strftime("%Y%m%dT%H%M%SZ") cid = "1" # collector id TODO read from conf rand = b64encode(urandom(12), b"oo").decode() rid = f"{ts}_{test_name}_{cc}_{asn_i}_n{cid}_{rand}" return jsonify( backend_version="1.3.5", supported_formats=["yaml", "json"], report_id=rid )
c5e824157ed382267236a5de98f0199b2b5ff23d
3,646,468
def prepare_scan(): """ Returns a lexical scanner for HTSQL grammar. """ # Start a new grammar. grammar = LexicalGrammar() # Regular context. query = grammar.add_rule('query') # Whitespace characters and comments (discarded). query.add_token(r''' SPACE: [\s]+ | [#] [^\0\r\n]* ''', is_junk=True) # A sequence of characters encloses in single quotes. query.add_token(r''' STRING: ['] ( [^'\0] | [']['] )* ['] ''', unquote=(lambda t: t[1:-1].replace("''", "'"))) # An opening quote character without a closing quote. query.add_token(r''' BAD_STRING: ['] ''', error="cannot find a matching quote mark") # A number in exponential notation. query.add_token(r''' FLOAT: ( [0-9]+ ( [.] [0-9]* )? | [.] [0-9]+ ) [eE] [+-]? [0-9]+ ''') # A number with a decimal point. query.add_token(r''' DECIMAL: [0-9]+ [.] [0-9]* | [.] [0-9]+ ''') # An unsigned integer number. query.add_token(r''' INTEGER: [0-9]+ ''') # A sequence of alphanumeric characters (not starting with a digit). query.add_token(r''' NAME: [\w]+ ''') # Operators and punctuation characters. The token code coincides # with the token value. query.add_token(r''' SYMBOL: [~] | [!][~] | [<][=] | [<] | [>][=] | [>] | [=][=] | [=] | [!][=][=] | [!][=] | [\^] | [?] | [-][>] | [@] | [:][=] | [!] | [&] | [|] | [+] | [-] | [*] | [/] | [(] | [)] | [{] | [}] | [.] | [,] | [:] | [;] | [$] ''', is_symbol=True) # The `[` character starts an identity constructor. query.add_token(r''' LBRACKET: [\[] ''', is_symbol=True, push='identity') # An unmatched `]`. query.add_token(r''' BAD_RBRACKET: [\]] ''', error="cannot find a matching '['") # The input end. query.add_token(r''' END: $ ''', is_symbol=True, pop=1) # Identity constructor context. identity = grammar.add_rule('identity') # Whitespace characters (discarded). identity.add_token(r''' SPACE: [\s]+ ''', is_junk=True) # Start of a nested label group. identity.add_token(r''' LBRACKET: [\[] | [(] ''', is_symbol=True, push='identity') # End of a label group or the identity constructor. identity.add_token(r''' RBRACKET: [\]] | [)] ''', is_symbol=True, pop=1) # Label separator. identity.add_token(r''' SYMBOL: [.] ''', is_symbol=True) # Unquoted sequence of alphanumeric characters and dashes. identity.add_token(r''' LABEL: [\w-]+ ''') # A sequence of characters encloses in single quotes. identity.add_token(r''' STRING: ['] ( [^'\0] | [']['] )* ['] ''', unquote=(lambda t: t[1:-1].replace("''", "'"))) # An opening quote character without a closing quote. identity.add_token(r''' BAD_STRING: ['] ''', error="cannot find a matching quote mark") # A reference indicator. identity.add_token(r''' REFERENCE: [$] ''', is_symbol=True, push='name') # Unexpected end of input. identity.add_token(r''' END: $ ''', error="cannot find a matching ']'") # A context for an identifier following the `$` indicator # in an identity constructor. We need a separate rule because # `%NAME` and `%LABEL` productions intersect. name = grammar.add_rule('name') # Whitespace characters (discarded). name.add_token(r''' SPACE: [\s]+ ''', is_junk=True) # An integer number; not expected here, but ensures that the following # `%NAME` production does not start with a digit. name.add_token(r''' INTEGER: [0-9]+ ''', pop=1) # A sequence of alphanumeric characters (not starting with a digit). name.add_token(r''' NAME: [\w]+ ''', pop=1) # Anything else. name.add_token(r''' OTHER: () ''', is_junk=True, pop=1) # Add a `%DIRSIG` token in front of `+` and `-` direction indicators # to distinguish them from addition/subtraction operators. grammar.add_signal(''' DIRSIG: ( `+` | `-` )+ ( `:` | `,` | `;` | `)` | `}` ) ''') # Add `%PIPESIG` in front of `/:` pipe indicator to prevent it from # being recognized as a division operator. grammar.add_signal(''' PIPESIG: `/` `:` ''') # Add `%LHSSIG` in front of a left-hand side of an assignment expression. grammar.add_signal(''' LHSSIG: `$`? %NAME ( `.` `$`? %NAME )* ( `(` ( `$`? %NAME ( `,` `$`? %NAME )* `,`? )? `)` )? `:=` ''') # Generate and return the scanner. return grammar()
ffc30354378a03f95be988b7ee62b01708795f41
3,646,469
def get_test_server(ctxt, **kw): """Return a Server object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ kw['object_type'] = 'server' get_db_server_checked = check_keyword_arguments( db_utils.get_test_server) db_server = get_db_server_checked(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_server['id'] server = objects.Server(ctxt, **db_server) return server
03d754223274282b15aeb9b5cf636f6acd90024c
3,646,470
def keras_model(optimizer="Adamax", activation="softplus", units=32): """Function to create model, required for KerasClassifier""" model = Sequential() model.add(Dense(units, activation="relu", input_dim=2500)) model.add(Dense(2, activation=activation)) model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"]) return model
ccd1cc5652a207e3c4c2bc170d43fe22b4375c0b
3,646,471
def start_end_key(custom_cmp): """ Compare models with start and end dates. """ class K(object): """ Define comparison operators. http://code.activestate.com/recipes/576653-convert-a-cmp-function-to-a-key-function/ """ def __init__(self, obj, *args): self.obj = obj def __lt__(self, other): return custom_cmp(self.obj, other.obj) < 0 def __gt__(self, other): return custom_cmp(self.obj, other.obj) > 0 def __eq__(self, other): return custom_cmp(self.obj, other.obj) == 0 def __le__(self, other): return custom_cmp(self.obj, other.obj) <= 0 def __ge__(self, other): return custom_cmp(self.obj, other.obj) >= 0 def __ne__(self, other): return custom_cmp(self.obj, other.obj) != 0 return K
b1d7b48cc3e9926b6138850ad3b8307adbb4f2f3
3,646,472
def get_previous_release_date(): """ Fetch the previous release date (i.e. the release date of the current live database) """ releases = Release.objects.all().order_by('-date') return str(releases[1].date)
764d90daaf5c60460f22e56063a40c261cb6b45e
3,646,473
def readLensModeParameters(calibfiledir, lensmode='WideAngleMode'): """ Retrieve the calibrated lens correction parameters """ # For wide angle mode if lensmode == 'WideAngleMode': LensModeDefaults, LensParamLines = [], [] with open(calibfiledir, 'r') as fc: # Read the full file as a line-split string block calib = fc.read().splitlines() # Move read cursor back to the beginning fc.seek(0) # Scan through calibration file, find and append line indices # (lind) to specific lens settings for lind, line in enumerate(fc): if '[WideAngleMode defaults' in line: LensModeDefaults.append(lind) elif '[WideAngleMode@' in line: LensParamLines.append(lind) # Specify regular expression pattern for retrieving numbers numpattern = r'[-+]?\d*\.\d+|[-+]?\d+' # Read detector settings at specific lens mode aRange, eShift = [], [] for linum in LensModeDefaults: # Collect the angular range aRange = parsenum( numpattern, calib, aRange, linenumber=linum, offset=2, Range='all') # Collect the eShift eShift = parsenum( numpattern, calib, eShift, linenumber=linum, offset=3, Range='all') # Read list calibrated Da coefficients at all retardation ratios rr, aInner, Da1, Da3, Da5, Da7 = [], [], [], [], [], [] for linum in LensParamLines: # Collect the retardation ratio (rr) rr = parsenum( numpattern, calib, rr, linenumber=linum, offset=0, Range='all') # Collect the aInner coefficient aInner = parsenum( numpattern, calib, aInner, linenumber=linum, offset=1, Range='all') # Collect Da1 coefficients Da1 = parsenum( numpattern, calib, Da1, linenumber=linum, offset=2, Range='1:4') # Collect Da3 coefficients Da3 = parsenum( numpattern, calib, Da3, linenumber=linum, offset=3, Range='1:4') # Collect Da5 coefficients Da5 = parsenum( numpattern, calib, Da5, linenumber=linum, offset=4, Range='1:4') # Collect Da7 coefficients Da7 = parsenum( numpattern, calib, Da7, linenumber=linum, offset=5, Range='1:4') aRange, eShift, rr, aInner = list(map(lambda x: np.asarray( x, dtype='float').ravel(), [aRange, eShift, rr, aInner])) Da1, Da3, Da5, Da7 = list( map(lambda x: np.asarray(x, dtype='float'), [Da1, Da3, Da5, Da7])) return aRange, eShift, rr, aInner, Da1, Da3, Da5, Da7 else: print('This mode is currently not supported!')
51245aa19f32ebb31df5748e0b40022ccae01e24
3,646,474
def scale(boxlist, y_scale, x_scale, scope=None): """scale box coordinates in x and y dimensions. Args: boxlist: BoxList holding N boxes y_scale: (float) scalar tensor x_scale: (float) scalar tensor scope: name scope. Returns: boxlist: BoxList holding N boxes """ with tf.name_scope(scope, 'Scale'): y_scale = tf.cast(y_scale, tf.float32) x_scale = tf.cast(x_scale, tf.float32) y_min, x_min, y_max, x_max = tf.split( value=boxlist.boxes, num_or_size_splits=4, axis=1) y_min = y_scale * y_min y_max = y_scale * y_max x_min = x_scale * x_min x_max = x_scale * x_max scaled_boxlist = BoxList( tf.concat([y_min, x_min, y_max, x_max], 1)) return _copy_extra_datas(scaled_boxlist, boxlist)
adffbdce632470852e0499bb93915f93a7695d5a
3,646,475
import requests def fetch(uri: str, method: str = 'get', token: str = None): """:rtype: (str|None, int)""" uri = 'https://api.github.com/{0}'.format(uri) auth = app.config['GITHUB_AUTH'] headers = {'Accept': 'application/vnd.github.mercy-preview+json'} json = None if token: headers['Authorization'] = 'token {}'.format(token) auth = None try: result = getattr(requests, method.lower())(uri, auth=auth, headers=headers) result.raise_for_status() json = result.json() if result.status_code != 204 else None except requests.HTTPError as e: app.logger.info( "Request to {} is failed ({}, {}): {}\n{}\n" .format(result.url, method, e.strerror, result.status_code, result.text) ) return json, result.status_code
14cde2808108173e6ab86f3eafb4c8e35daf4b40
3,646,476
from typing import OrderedDict from typing import Mapping from typing import Sequence from typing import Container from typing import Iterable from typing import Sized def nested_tuple(container): """Recursively transform a container structure to a nested tuple. The function understands container types inheriting from the selected abstract base classes in `collections.abc`, and performs the following replacements: `Mapping` `tuple` of key-value pair `tuple`s. The order is preserved in the case of an `OrderedDict`, otherwise the key-value pairs are sorted if orderable and otherwise kept in the order of iteration. `Sequence` `tuple` containing the same elements in unchanged order. `Container and Iterable and Sized` (equivalent to `Collection` in python >= 3.6) `tuple` containing the same elements in sorted order if orderable and otherwise kept in the order of iteration. The function recurses into these container types to perform the same replacement, and leaves objects of other types untouched. The returned container is hashable if and only if all the values contained in the original data structure are hashable. Parameters ---------- container Data structure to transform into a nested tuple. Returns ------- tuple Nested tuple containing the same data as `container`. """ if isinstance(container, OrderedDict): return tuple(map(nested_tuple, container.items())) if isinstance(container, Mapping): return tuple(sorted_if_possible(map(nested_tuple, container.items()))) if not isinstance(container, (str, bytes)): if isinstance(container, Sequence): return tuple(map(nested_tuple, container)) if ( isinstance(container, Container) and isinstance(container, Iterable) and isinstance(container, Sized) ): return tuple(sorted_if_possible(map(nested_tuple, container))) return container
60dac69865d753b14558d7156e40703e26fb57a1
3,646,477
from typing import OrderedDict def _validate_args(func, args, kwargs): """Validate customer function args and convert them to kwargs.""" # Positional arguments validate all_parameters = [param for _, param in signature(func).parameters.items()] # Implicit parameter are *args and **kwargs if any(param.kind in {param.VAR_KEYWORD, param.VAR_POSITIONAL} for param in all_parameters): raise UnsupportedParameterKindError(func.__name__) all_parameter_keys = [param.name for param in all_parameters] empty_parameters = {param.name: param for param in all_parameters if param.default is Parameter.empty} min_num = len(empty_parameters) max_num = len(all_parameters) if len(args) > max_num: raise TooManyPositionalArgsError(func.__name__, min_num, max_num, len(args)) provided_args = OrderedDict({param.name: args[idx] for idx, param in enumerate(all_parameters) if idx < len(args)}) for _k in kwargs.keys(): if _k not in all_parameter_keys: raise UnexpectedKeywordError(func.__name__, _k, all_parameter_keys) if _k in provided_args.keys(): raise MultipleValueError(func.__name__, _k) provided_args[_k] = kwargs[_k] if len(provided_args) < len(empty_parameters): missing_keys = empty_parameters.keys() - provided_args.keys() raise MissingPositionalArgsError(func.__name__, missing_keys) for pipeline_input_name in provided_args: data = provided_args[pipeline_input_name] if data is not None and not isinstance(data, SUPPORTED_INPUT_TYPES): msg = ( "Pipeline input expected an azure.ai.ml.Input or primitive types (str, bool, int or float), " "but got type {}." ) raise UserErrorException( message=msg.format(type(data)), no_personal_data_message=msg.format("[type(pipeline_input_name)]"), ) return provided_args
51d357d032dc0b26aeb32d1850b1a630bafab508
3,646,478
def _qual_arg(user_value, python_arg_name, gblock_arg_name, allowable): """ Construct and sanity check a qualitative argument to send to gblocks. user_value: value to try to send to gblocks python_arg_name: name of python argument (for error string) gblock_arg_name: name of argument in gblocks allowable: dictionary of allowable values mapping python to whatever should be jammed into gblocks """ if user_value in allowable.keys(): return "-{}={}".format(gblock_arg_name,allowable[user_value]) else: err = "\n\n{} '{}' not recognized\n".format(python_arg_name, user_value) err += "must be one of:\n" allowed = list(allowable) allowed.sort() for a in allowed: err += " {}\n".format(a) raise ValueError(err)
7bf6717ee3dbeb533902773c86316d2bbdcd59a9
3,646,479
def is_valid_ip(ip_addr): """ :param ip_addr: :return: """ octet_ip = ip_addr.split(".") int_octet_ip = [int(i) for i in octet_ip] if (len(int_octet_ip) == 4) and \ (0 <= int_octet_ip[0] <= 255) and \ (0 <= int_octet_ip[1] <= 255) and \ (0 <= int_octet_ip[2] <= 255) and \ (0 <= int_octet_ip[3] <= 255): return True else: print("Invalid IP, closing program... \n") exit(0)
7d776107f54e3c27a2a918570cbb267b0e9f419e
3,646,480
def make_replay_buffer(env: gym.Env, size: int) -> ReplayBuffer: """Make a replay buffer. If not ShinEnv: Returns a ReplayBuffer with ("rew", "done", "obs", "act", "log_prob", "timeout"). If ShinEnv: Returns a ReplayBuffer with ("rew", "done", "obs", "act", "log_prob", "timeout", "state"). """ is_shin_env = hasattr(env, "mdp") if isinstance(env.action_space, gym.spaces.Discrete): act_type, act_shape = int, 1 elif isinstance(env.action_space, gym.spaces.Box): act_type, act_shape = float, env.action_space.shape env_dict = { "rew": {"dtype": float, "shape": 1}, "done": {"dtype": bool, "shape": 1}, "obs": {"dtype": float, "shape": env.observation_space.shape}, "act": {"dtype": act_type, "shape": act_shape}, "log_prob": {"dtype": float, "shape": act_shape}, "timeout": {"dtype": bool, "shape": 1}, } if is_shin_env: env_dict.update({"state": {"dtype": int, "shape": 1}}) return ReplayBuffer(size, env_dict, next_of=("obs", "state")) return ReplayBuffer(size, env_dict, next_of=("obs",))
27f7c0bae37fc1963f4f7c72b42e8da424ab313e
3,646,481
from typing import Callable import decimal def scale_places(places: int) -> Callable[[decimal.Decimal], decimal.Decimal]: """ Returns a function that shifts the decimal point of decimal values to the right by ``places`` places. """ if not isinstance(places, int): raise ValueError( 'Argument `places` must be int. Got value {} of type {}.'. format(places, type(places)), ) with decimal.localcontext(abi_decimal_context): scaling_factor = TEN ** -places def f(x: decimal.Decimal) -> decimal.Decimal: with decimal.localcontext(abi_decimal_context): return x * scaling_factor places_repr = 'Eneg{}'.format(places) if places > 0 else 'Epos{}'.format(-places) func_name = 'scale_by_{}'.format(places_repr) f.__name__ = func_name f.__qualname__ = func_name return f
aaf2d9eb14d7a1b28d169d971011b456e2164000
3,646,482
def create_model(params : model_params): """ Create ReasoNet model Args: params (class:`model_params`): The parameters used to create the model """ logger.log("Create model: dropout_rate: {0}, init:{1}, embedding_init: {2}".format(params.dropout_rate, params.init, params.embedding_init)) # Query and Doc/Context/Paragraph inputs to the model query_seq_axis = Axis('sourceAxis') context_seq_axis = Axis('contextAxis') query_sequence = sequence.input(shape=(params.vocab_dim), is_sparse=True, sequence_axis=query_seq_axis, name='query') context_sequence = sequence.input(shape=(params.vocab_dim), is_sparse=True, sequence_axis=context_seq_axis, name='context') entity_ids_mask = sequence.input(shape=(1,), is_sparse=False, sequence_axis=context_seq_axis, name='entity_ids_mask') # embedding if params.embedding_init is None: embedding_init = create_random_matrix(params.vocab_dim, params.embedding_dim) else: embedding_init = params.embedding_init embedding = parameter(shape=(params.vocab_dim, params.embedding_dim), init=None) embedding.value = embedding_init embedding_matrix = constant(embedding_init, shape=(params.vocab_dim, params.embedding_dim)) if params.dropout_rate is not None: query_embedding = ops.dropout(times(query_sequence , embedding), params.dropout_rate, name='query_embedding') context_embedding = ops.dropout(times(context_sequence, embedding), params.dropout_rate, name='context_embedding') else: query_embedding = times(query_sequence , embedding, name='query_embedding') context_embedding = times(context_sequence, embedding, name='context_embedding') contextGruW = Parameter(_INFERRED + _as_tuple(params.hidden_dim), init=glorot_uniform(), name='gru_params') queryGruW = Parameter(_INFERRED + _as_tuple(params.hidden_dim), init=glorot_uniform(), name='gru_params') entity_embedding = ops.times(context_sequence, embedding_matrix, name='constant_entity_embedding') # Unlike other words in the context, we keep the entity vectors fixed as a random vector so that each vector just means an identifier of different entities in the context and it has no semantic meaning full_context_embedding = ops.element_select(entity_ids_mask, entity_embedding, context_embedding) context_memory = ops.optimized_rnnstack(full_context_embedding, contextGruW, params.hidden_dim, 1, True, recurrent_op='gru', name='context_mem') query_memory = ops.optimized_rnnstack(query_embedding, queryGruW, params.hidden_dim, 1, True, recurrent_op='gru', name='query_mem') qfwd = ops.slice(sequence.last(query_memory), -1, 0, params.hidden_dim, name='fwd') qbwd = ops.slice(sequence.first(query_memory), -1, params.hidden_dim, params.hidden_dim*2, name='bwd') init_status = ops.splice(qfwd, qbwd, name='Init_Status') # get last fwd status and first bwd status return attention_model(context_memory, query_memory, init_status, params.hidden_dim, params.attention_dim, max_steps = params.max_rl_steps)
b175adef530dbbbdb132fed0a6653945ec02fef9
3,646,483
def _process_voucher_data_for_order(cart): """Fetch, process and return voucher/discount data from cart.""" vouchers = Voucher.objects.active(date=date.today()).select_for_update() voucher = get_voucher_for_cart(cart, vouchers) if cart.voucher_code and not voucher: msg = pgettext( 'Voucher not applicable', 'Voucher expired in meantime. Order placement aborted.') raise NotApplicable(msg) if not voucher: return {} increase_voucher_usage(voucher) return { 'voucher': voucher, 'discount_amount': cart.discount_amount, 'discount_name': cart.discount_name, 'translated_discount_name': cart.translated_discount_name}
d89816fc24192d7d2d4ce7d8edaf11ae94e3f171
3,646,484
def transform_batch(images, max_rot_deg, max_shear_deg, max_zoom_diff_pct, max_shift_pct, experimental_tpu_efficiency=True): """Transform a batch of square images with the same randomized affine transformation. """ def clipped_random(): rand = tf.random.normal([1], dtype=tf.float32) rand = tf.clip_by_value(rand, -2., 2.) / 2. return rand batch_size = images.shape[0] tf.debugging.assert_equal( images.shape[1], images.shape[2], "Images should be square") DIM = images.shape[1] channels = images.shape[3] XDIM = DIM % 2 rot = max_rot_deg * clipped_random() shr = max_shear_deg * clipped_random() h_zoom = 1.0 + clipped_random()*max_zoom_diff_pct w_zoom = 1.0 + clipped_random()*max_zoom_diff_pct h_shift = clipped_random()*(DIM*max_shift_pct) w_shift = clipped_random()*(DIM*max_shift_pct) # GET TRANSFORMATION MATRIX m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift) # LIST DESTINATION PIXEL INDICES x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) # 10000, y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],tf.int32) idx = tf.stack( [x,y,z] ) # [3, 10000] # ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS idx2 = tf.matmul(m,tf.cast(idx,tf.float32)) idx2 = tf.cast(idx2,tf.int32) idx2 = tf.clip_by_value(idx2,-DIM//2+XDIM+1,DIM//2) # FIND ORIGIN PIXEL VALUES idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] ) idx3 = tf.transpose(idx3) batched_idx3 = tf.tile(idx3[tf.newaxis], [batch_size, 1, 1]) if experimental_tpu_efficiency: # This reduces excessive padding in the original tf.gather_nd op idx4 = idx3[:, 0] * DIM + idx3[:, 1] images = tf.reshape(images, [batch_size, DIM * DIM, channels]) d = tf.gather(images, idx4, axis=1) return tf.reshape(d, [batch_size,DIM,DIM,channels]) else: d = tf.gather_nd(images, batched_idx3, batch_dims=1) return tf.reshape(d,[batch_size,DIM,DIM,channels])
5486b1e9bbaf162e7a188c25517b0c164c8da317
3,646,485
def prep_seven_zip_path(path, talkative=False): """ Print p7zip path on POSIX, or notify if not there. :param path: Path to use. :type path: str :param talkative: Whether to output to screen. False by default. :type talkative: bool """ if path is None: talkaprint("NO 7ZIP\nPLEASE INSTALL p7zip", talkative) sentinel = False else: talkaprint("7ZIP FOUND AT {0}".format(path), talkative) sentinel = True return sentinel
c9d4cc77111c8fc9768c713556fb16e5b8f69ec2
3,646,486
from typing import Dict async def root() -> Dict[str, str]: """ Endpoint for basic connectivity test. """ logger.debug('root requested') return {'message': 'OK'}
97721416c745d460cd60cea486fa5367ff52cffa
3,646,487
def overlapping_community(G, community): """Return True if community partitions G into overlapping sets. """ community_size = sum(len(c) for c in community) # community size must be larger to be overlapping if not len(G) < community_size: return False # check that the set of nodes in the communities is the same as G if not set(G) == set.union(*community): return False return True
da9e3465c6351df0efd19863e579c49bbc6b9d67
3,646,488
import json def validate_credential(zone, credential): """ Token is already calculated """ source = DataSource(DataSource.TYPE_DATABASE, CONNECTION_FILE_PATH) canAccess = source.get_or_create_client_access_rights(credential, zone) if canAccess: return json.dumps({'success':True}), 200, {'ContentType':'application/json'} else: return json.dumps({'success':False}), 403, {'ContentType':'application/json'}
083ecc977b53e6f5c5df64b0ed52ad9ebeeee821
3,646,489
def gm(data,g1=0.0,g2=0.0,g3=0.0,inv=False): """ Lorentz-to-Gauss Apodization Functional form: gm(x_i) = exp(e - g*g) Where: e = pi*i*g1 g = 0.6*pi*g2*(g3*(size-1)-i) Parameters: * data Array of spectral data. * g1 Inverse exponential width. * g2 Gaussian broaden width. * g3 Location of gauss maximum. * inv Set True for inverse apodization. """ size = data.shape[-1] e = pi*np.arange(size)*g1 g = 0.6*pi*g2*(g3*(size-1) - np.arange(size)) apod = np.exp(e-g*g, sig = data.dtype) if inv: apod = 1/apod return apod*data
7c6aec6d9a21f9c5b2800aa742e5aaa3ead1ac63
3,646,490
import torch def exp_t(u, t): """Compute exp_t for `u`.""" if t == 1.0: return torch.exp(u) else: return torch.relu(1.0 + (1.0 - t) * u) ** (1.0 / (1.0 - t))
8b1a8773b8a5159d9332332d6f77d65cacc68d7c
3,646,491
def decode_json_dict(data): # type: (Dict) -> Dict """Converts str to python 2 unicodes in JSON data.""" return _strify(data)
d2512ea50bf5cfca059ca706adc403bea5af1753
3,646,492
from typing import Any def linear_search(lst: list, x: Any) -> int: """Return the index of the first element of `lst` equal to `x`, or -1 if no elements of `lst` are equal to `x`. Design idea: Scan the list from start to finish. Complexity: O(n) time, O(1) space. For an improvement on linear search for sorted lists, see the binary search function in the decrease_and_conquer module. """ for i, y in enumerate(lst): if x == y: return i return -1
47e73d53ff68954aadc6d0e9e293643717a807d8
3,646,493
def get_color_cmap(name, n_colors=6): """ Return discrete colors from a matplotlib palette. :param name: Name of the palette. This should be a named matplotlib colormap. :type: str :param n_colors: Number of discrete colors in the palette. :type: int :return: List-like object of colors as hexadecimal tuples :type: list """ brewer_qual_pals = {"Accent": 8, "Dark2": 8, "Paired": 12, "Pastel1": 9, "Pastel2": 8, "Set1": 9, "Set2": 8, "Set3": 12, 'tab20':20, 'tab20b':20} if name == 'tab20' and n_colors > 19: second = 'tab20b' ncolor2 = n_colors - 19 n_colors = 19 else : second = False cmap = getattr(cm, name) if name in brewer_qual_pals: bins = np.linspace(0, 1, brewer_qual_pals[name]) if 'tab20' == name : len_bins = len(bins) bins = [bins[i] for i in range(len_bins) if i != 14][:n_colors] else : bins = bins[:n_colors] else: bins = np.linspace(0, 1, n_colors + 2)[1:-1] palette = list(map(tuple, cmap(bins)[:, :3])) if second : cmap = getattr(cm, second) bins = np.linspace(0, 1, brewer_qual_pals[second])[:ncolor2] palette += list(map(tuple, cmap(bins)[:, :3])) pal_cycle = cycle(palette) palette = [next(pal_cycle) for _ in range(n_colors+ncolor2)] else : pal_cycle = cycle(palette) palette = [next(pal_cycle) for _ in range(n_colors)] return [colors.rgb2hex(rgb) for rgb in palette]
90550127196bb1841f48d37ed1f304462d165037
3,646,494
def logkde2entropy(vects, logkde): """ computes the entropy of the kde incorporates vects so that kde is properly normalized (transforms into a truly discrete distribution) """ vol = vects2vol(vects) truth = logkde > -np.infty return -vects2vol(vects)*np.sum(np.exp(logkde[truth])*logkde[truth])
5ce96636607bc3b2160791cda28ef586cb0f29c2
3,646,495
from typing import Optional from typing import Dict import json def get_deployment_json( runner: Runner, deployment_name: str, context: str, namespace: str, deployment_type: str, run_id: Optional[str] = None, ) -> Dict: """Get the decoded JSON for a deployment. If this is a Deployment we created, the run_id is also passed in - this is the uuid we set for the telepresence label. Otherwise run_id is None and the Deployment name must be used to locate the Deployment. """ assert context is not None assert namespace is not None span = runner.span() try: get_deployment = [ "get", deployment_type, "-o", "json", "--export", ] if run_id is None: return json.loads( runner.get_kubectl( context, namespace, get_deployment + [deployment_name], stderr=STDOUT ) ) else: # When using a selector we get a list of objects, not just one: return json.loads( runner.get_kubectl( context, namespace, get_deployment + ["--selector=telepresence=" + run_id], stderr=STDOUT ) )["items"][0] except CalledProcessError as e: raise SystemExit( "Failed to find Deployment '{}': {}".format( deployment_name, str(e.stdout, "utf-8") ) ) finally: span.end()
b9cb4cabea6a506cc33c18803bbe45699cf2b222
3,646,496
import ctypes def is_admin() -> bool: """Check does the script has admin privileges.""" try: return ctypes.windll.shell32.IsUserAnAdmin() except AttributeError: # Windows only return None
000fdc8034bf026045af0a5264936c6847489063
3,646,497
import urllib def get_firewall_status(gwMgmtIp, api_key): """ Reruns the status of the firewall. Calls the op command show chassis status Requires an apikey and the IP address of the interface we send the api request :param gwMgmtIp: :param api_key: :return: """ global gcontext # cmd = urllib.request.Request('https://google.com') cmd = urllib.request.Request( "https://" + gwMgmtIp + "/api/?type=op&cmd=<show><chassis-ready></chassis-ready></show>&key=" + api_key) # Send command to fw and see if it times out or we get a response logger.info('[INFO]: Sending command: {}'.format(cmd)) try: response = urllib.request.urlopen(cmd, data=None, context=gcontext, timeout=5).read() logger.info( "[INFO]:Got http 200 response from FW with address {}. So need to check the response".format(gwMgmtIp)) # Now we do stuff to the gw except urllib.error.URLError: logger.info("[INFO]: No response from FW with address {}. So maybe not up!".format(gwMgmtIp)) return 'down' # sleep and check again? else: logger.info("[INFO]: FW is responding!!") logger.info("[RESPONSE]: {}".format(response)) resp_header = et.fromstring(response) if resp_header.tag != 'response': logger.info("[ERROR]: didn't get a valid response from firewall...maybe a timeout") return 'down' if resp_header.attrib['status'] == 'error': logger.info("[ERROR]: Got response header error for the command") return 'down' if resp_header.attrib['status'] == 'success': # The fw responded with a successful command execution for element in resp_header: if element.text.rstrip() == 'yes': # Call config gw command? logger.info("[INFO]: FW with ip {} is ready ".format(gwMgmtIp)) return 'running' else: return 'down'
16d06a5659e98b3d420ab90b21d720367ecde97a
3,646,498
import logging def create_logger(name, logfile, level): """ Sets up file logger. :param name: Logger name :param logfile: Location of log file :param level: logging level :return: Initiated logger """ logger = logging.getLogger(name) handler = logging.FileHandler(logfile) formatter = logging.Formatter( '%(asctime)s %(name)-12s %(levelname)-8s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(level) return logger
83a0614053c558682588c47e641eceee368f88e0
3,646,499