content
stringlengths
22
815k
id
int64
0
4.91M
def adfuller_test(series, signif=0.05, name='', verbose=False): """Perform ADFuller to test for Stationarity of given series, print report and return if series is stationary""" r = adfuller(series, autolag='AIC') output = {'test_statistic': round(r[0], 4), 'pvalue': round(r[1], 4), 'n_lags': round(r[2], 4), 'n_obs': r[3]} p_value = output['pvalue'] def adjust(val, length=6): return str(val).ljust(length) # Print Summary if verbose: print(' Augmented Dickey-Fuller Test on "{}"'.format(name), "\n ", '-' * 47) print(' Null Hypothesis: Data has unit root. Non-Stationary.') print(' Significance Level = {}'.format(signif)) print(' Test Statistic = {}'.format(output["test_statistic"])) print(' No. Lags Chosen = {}'.format(output["n_lags"])) for key, val in r[4].items(): print(' Critical value {} = {}'.format(adjust(key), round(val, 3))) if p_value <= signif: if verbose: print(" => P-Value = {}. Rejecting Null Hypothesis.".format(p_value)) print(" => Series is Stationary.") return True else: if verbose: print(" => P-Value = {}. Weak evidence to reject the Null Hypothesis.".format(p_value)) print(" => Series is Non-Stationary.") return False
5,341,400
def AddRemoveEndpoint(endpoint_group, endpoint_spec, support_global_scope, support_hybrid_neg, support_l4ilb_neg): """Adds remove endpoint argument for updating network endpoint groups.""" help_text = """\ The network endpoint to detach from the network endpoint group. Allowed keys are: * instance - Name of instance in same zone as network endpoint group. * ip - Optional IP address of the network endpoint. If the IP address is not specified then all network endpoints that belong to the instance are removed from the NEG. * port - Optional port for the network endpoint. Required if the network endpoint type is `GCE_VM_IP_PORT`. """ if support_global_scope or support_hybrid_neg or support_l4ilb_neg: help_text = """\ The network endpoint to detach from the network endpoint group. Keys used depend on the endpoint type of the NEG. `GCE_VM_IP_PORT` *instance* - Required name of instance whose endpoint(s) to detach. If IP address is unset then all endpoints for the instance in the NEG will be detached. *ip* - Optional IP address of the network endpoint to detach. If specified port must be provided as well. *port* - Optional port of the network endpoint to detach. """ if support_global_scope: help_text += """\ `INTERNET_IP_PORT` *ip* - Required IP address of the network endpoint to detach. *port* - Optional port of the network endpoint to detach if the endpoint has a port specified. `INTERNET_FQDN_PORT` *fqdn* - Required fully qualified domain name of the endpoint to detach. *port* - Optional port of the network endpoint to detach if the endpoint has a port specified. """ if support_hybrid_neg: help_text += """\ `NON_GCP_PRIVATE_IP_PORT` *ip* - Required IP address of the network endpoint to detach. *port* - Required port of the network endpoint to detach unless NEG default port is set. """ if support_l4ilb_neg: help_text += """\ `GCE_VM_PRIMARY_IP` *ip* - Required IP address of the network endpoint to attach. The IP address must be the primary IP of a VM's primary network interface. """ endpoint_group.add_argument( '--remove-endpoint', action='append', type=arg_parsers.ArgDict(spec=endpoint_spec), help=help_text)
5,341,401
def wrhdf(hdf_filename, x, y, z, f): """ Write an HDF4 file. x, y, and z are the scales. f is the data. str: hdf_filename HDF4 filename. """ # Create an HDF file sd_id = SD(hdf_filename, SDC.WRITE | SDC.CREATE | SDC.TRUNC) if f.dtype == np.float32: ftype = SDC.FLOAT32 elif f.dtype == np.float64: ftype = SDC.FLOAT64 # Create the dataset (Data-Set-2 is the name used by the psi data)). sds_id = sd_id.create("Data-Set-2", ftype, f.shape) #Get number of dimensions: ndims = np.ndim(f) #Set the scales: for i in range(0,ndims): dim = sds_id.dim(i) if i == 0: if x.dtype == np.float32: stype = SDC.FLOAT32 elif x.dtype == np.float64: stype = SDC.FLOAT64 dim.setscale(stype,x) elif i == 1: if y.dtype == np.float32: stype = SDC.FLOAT32 elif y.dtype == np.float64: stype = SDC.FLOAT64 dim.setscale(stype,y) elif i == 2: if z.dtype == np.float32: stype = SDC.FLOAT32 elif z.dtype == np.float64: stype = SDC.FLOAT64 dim.setscale(stype,z) # Write the data: sds_id.set(f) # Close the dataset: sds_id.endaccess() # Flush and close the HDF file: sd_id.end()
5,341,402
def datetime_to_timestring(dt_): """ Returns a pretty formatting string from a datetime object. For example, >>>datetime.time(hour=9, minute=10, second=30) ..."09:10:30" :param dt_: :class:`datetime.datetime` or :class:`datetime.time` :returns: :class:`str` """ return pad(dt_.hour)+':'+pad(dt_.minute)+':'+pad(dt_.second)
5,341,403
def test_invalid_backtest_metrics(catboost_pipeline: Pipeline, metrics: List[Metric], example_tsdf: TSDataset): """Test Pipeline.backtest behavior in case of invalid metrics.""" with pytest.raises(ValueError): _ = catboost_pipeline.backtest(ts=example_tsdf, metrics=metrics, n_folds=2)
5,341,404
def save_eog_model(model): """ Save EOG model to disk using pickle. :param model: EOG model to save to disk """ _save_model(model, EOG_MODEL_DIR)
5,341,405
def get_class_for_name(name: str, module_name: str = __name__) -> Type: """Gets a class from a module based on its name. Tread carefully with this. Personally I feel like it's only safe to use with dataclasses with known interfaces. Parameters ---------- name : str Name of the class we're trying to get the class object for. module_name: str, optional Which module to get a class from, by defualt __name__. Returns ------- Type [description] """ import importlib this_module = importlib.import_module(module_name) this_class = getattr(this_module, name) return this_class
5,341,406
def get_cache_file_static(): """ Helper function to get the path to the VCR cache file for requests that must be updated by hand in cases where regular refreshing is infeasible, i.e. limited access to the real server. To update this server recording: 1) delete the existing recording 2) re-run all tests (with API keys for telescopes in place) 3) replace any secret information (such as API keys) with dummy values 4) commit recording """ return "data/tests/test_server_recordings_static.yaml"
5,341,407
def intervals_where_mask_is_true(mask): """Determine intervals where a 1D boolean mask is True. Parameters ---------- mask : numpy.ndarray Boolean mask. Returns ------- ranges : list List of slice intervals [(low, upp), ...] indicating where the mask has `True` values. """ indices = np.where(mask == True)[0] if indices.size == 0: return [] nonzero = np.append(np.zeros(1), (indices[1:] - indices[:-1]) - 1) nonzero = nonzero.astype('int') indices_nonzero = np.argwhere(nonzero != 0) breakpoints = [indices[0]] if indices_nonzero.size != 0: for i in indices_nonzero: breakpoints.append(indices[i[0] - 1] + 1) breakpoints.append(indices[i[0]]) breakpoints.append(indices[-1] + 1) ranges = [] for i in range(int(len(breakpoints) / 2)): low, upp = breakpoints[i*2], breakpoints[i*2 + 1] if low != upp: ranges.append([low, upp]) # if there is one single positive channel at the end # TODO: check if this may cause problems else: ranges.append([low, upp + 1]) return ranges
5,341,408
def get_FAAM_mineral_dust_calibration(instrument='PCASP', rtn_values=True): """ Retrieve FAAM mineral dust calibration """ # Location and name of calibration files? folder = '{}/FAAM/'.format(get_local_folder('ARNA_data')) if instrument == 'PCASP': # NOTE: range ~0.1-4 microns filename = 'PCASP1_faam_20200128_v001_r000_cal.nc' # NOTE: dust values are a nc subgroup! # group = 'bin_cal' group = 'bin_cal/mineral_dust' # group = 'flow_cal' # The real part of the refractive index was taken as 1.53 which is a common value and is in the OPAC database. It is quite a bit smaller than the 1.547 that was reported by Weinzierl et al. [2011] but has been shown to have a relatively weak effect on the instrument response. The values of the imaginary part were based on references in Ryder et al. [2019] along with the frequency distribution of k(550nm) presented in fig 9 of Ryder et al. [2013]. So the minimum value was extended from 0.0015i to 0.001i. Calculating the bin boundaries with these multiple Mie curves was done with Gaussian centre-weighted averaging with 0.001i and 0.0024i being +/-2 sigma extreme values. elif instrument == 'CDP': # NOTE: range ~4-120 microns filename = 'CDP1_faam_20200208_v001_r000_cal.nc' # NOTE: dust values are a nc subgroup! group = 'master_cal/mineral_dust' # Open and return the widths and ds = xr.open_dataset(folder+filename, group=group) # Get values for bin centres and widths in microns (1E-6 metres) BinWidths = ds['dia_width'].values.flatten() BinCentres = ds['dia_centre'].values.flatten() d = {'BinWidths': BinWidths, 'BinCentres': BinCentres} if rtn_values: return d else: return ds
5,341,409
async def cancel_handler(message: types.Message, state: FSMContext): """ @dp.message_handler(state='*', commands='cancel') """ current_state = await state.get_state() if current_state is None: return await state.finish() await message.reply( md.text( md.hbold('Авторизация отменена.'), md.text('Если ты боишься вводить свои данные, ознакомься со следующей <a href="https://orioks-monitoring.github.io/bot/faq#почему-это-безопасно">информацией</a>'), sep='\n', ), reply_markup=keyboards.main_menu_keyboard(first_btn_text='Авторизация'), disable_web_page_preview=True, )
5,341,410
def load_classifier(path=False): """ Load the ALLSorts classifier from a pickled file. ... Parameters __________ path : str Path to a pickle object that holds the ALLSorts model. Default: "/models/allsorts/allsorts.pkl.gz" Returns __________ allsorts_clf : ALLSorts object ALLSorts object, unpacked, ready to go. """ if not path: path = str(root_dir()) + "/models/allsorts/allsorts.pkl.gz" message("Loading classifier...") allsorts_clf = joblib.load(path) return allsorts_clf
5,341,411
def setup_auth_turing(cluster): """ Set up athentication with Turing k8s cluster on Azure. """ # Read in auth info azure_file = os.path.join(ABSOLUTE_HERE, "secrets", "turing-auth-key-prod.json") with open(azure_file, "r") as stream: azure = json.load(stream) # Login in to Azure login_cmd = [ "az", "login", "--service-principal", "--username", azure["sp-app-id"], "--password", azure["sp-app-key"], "--tenant", azure["tenant-id"] ] subprocess.check_output(login_cmd) # Set kubeconfig creds_cmd = [ "az", "aks", "get-credentials", "--name", cluster, "--resource-group", "binder-prod" ] stdout = subprocess.check_output(creds_cmd) print(stdout.decode('utf-8'))
5,341,412
def get_response(url: str, *, max_attempts=5) -> requests.Response: """Return the response. Tries to get response max_attempts number of times, otherwise return None Args: url (str): url string to be retrieved max_attemps (int): number of request attempts for same url E.g., r = get_response(url) r = xmltodict.parse(r.text) # or r = json.load(r.text) """ for count, x in enumerate(range(max_attempts)): try: response = requests.get(url, timeout=10) return response except: time.sleep(0.01) # if count exceeded return None
5,341,413
def hamming_distance(lhs, rhs): """Returns the Hamming Distance of Two Equal Strings Usage >>> nt.hamming_distance('Pear','Pearls') """ return len([(x, y) for x, y in zip(lhs, rhs) if x != y])
5,341,414
def get_uleb128(byte_str): """ Gets a unsigned leb128 number from byte sting :param byte_str: byte string :return: byte string, integer """ uleb_parts = [] while byte_str[0] >= 0x80: uleb_parts.append(byte_str[0] - 0x80) byte_str = byte_str[1:] uleb_parts.append(byte_str[0]) byte_str = byte_str[1:] uleb_parts = uleb_parts[::-1] integer = 0 for i in range(len(uleb_parts) - 1): integer = (integer + uleb_parts[i]) << 7 integer += uleb_parts[-1] return byte_str, integer
5,341,415
def expired_response(): """ Expired token callback. Author: Lucas Antognoni Arguments: Response: json { 'error': (boolean), 'message': (str) } Response keys: - 'error': True. - 'message': Error message. """ return jsonify({ 'error': True, 'message': 'Token has expired' }), 401
5,341,416
def test_correct_technologiesPage(client): """Grab the technologies page, check for 200 code(all ok), then check to see if the response is a HTML page. """ response = client.get("/technologies") assert response.status_code == 200 assert "<!DOCTYPE html>" in response.get_data(True)
5,341,417
def check_shots_vs_bounds(shot_dict, mosaic_bounds, max_out_of_bounds = 3): """Checks whether all but *max_out_of_bounds* shots are within mosaic bounds Parameters ---------- shot_dict : dict A dictionary (see czd_utils.scancsv_to_dict()) with coordinates of all shots in a .scancsv file: {shot: [x_coords, y_coords], ...} mosaic_bounds : list A list of bounds to a .Align file (see get_mos_bounds()): [min_x, max_x, min_y, max_y] max_out_of_bounds : int, optional Max number of out-of-bounds shots allowed for a \ 'match' between mosaic and .scancsv. The default is 3. Returns ------- Boolean True or False, depending on whether all but *max_out_of_bounds* \ shots are within mosaic bounds """ total_out_of_bounds = 0 min_x, max_x, min_y, max_y = mosaic_bounds for eachcoords in shot_dict.values(): if not min_x <= eachcoords[0] <= max_x or not min_y <= eachcoords[1] <= max_y: total_out_of_bounds += 1 return total_out_of_bounds <= max_out_of_bounds
5,341,418
def wrap(func, *args, unsqueeze=False): """ Wrap a torch function so it can be called with NumPy arrays. Input and return types are seamlessly converted. :param func: :param args: :param unsqueeze: :return: """ # Convert input types where applicable args = list(args) for i, arg in enumerate(args): if type(arg) == np.ndarray: args[i] = torch.from_numpy(arg) if unsqueeze: args[i] = args[i].unsqueeze(0) result = func(*args) # Convert output types where applicable if isinstance(result, tuple): result = list(result) for i, res in enumerate(result): if type(res) == torch.Tensor: if unsqueeze: res = res.squeeze(0) result[i] = res.numpy() return tuple(result) elif type(result) == torch.Tensor: if unsqueeze: result = result.squeeze(0) return result.numpy() else: return result
5,341,419
def iter_schemas(schema: Schema, strict_enums: bool = True) -> Iterable[Tuple[str, Any]]: """ Build zero or more JSON schemas for a marshmallow schema. Generates: name, schema pairs. """ builder = Schemas(build_parameter=build_parameter, strict_enums=strict_enums) return builder.iter_schemas(schema)
5,341,420
async def publish_email_message(qsm: QueueServiceManager, # pylint: disable=redefined-outer-name cloud_event_msg: dict): """Publish the email message onto the NATS emailer subject.""" logger.debug('publish to queue, subject:%s, event:%s', APP_CONFIG.EMAIL_PUBLISH_OPTIONS['subject'], cloud_event_msg) await qsm.service.publish(subject=APP_CONFIG.EMAIL_PUBLISH_OPTIONS['subject'], msg=cloud_event_msg)
5,341,421
def test_find_datasets_save(df_datasets_ensembl): """Test the available datasets returned by find_datasets(save=True) for the default mart (ENSEMBL_MART_ENSEMBL).""" expect = (df_datasets_ensembl .sort_values(by="Dataset_ID", axis=0) .reset_index(drop=True)) _ = find_datasets(save=True) saved = pd.read_csv("apybiomart_datasets.csv") result = (saved .replace(np.nan, "") .sort_values(by="Dataset_ID", axis=0) .reset_index(drop=True)) try: assert_frame_equal(result, expect) finally: os.remove("apybiomart_datasets.csv")
5,341,422
def deserializer(serialized): """Example deserializer function with extra sanity checking. :param serialized: Serialized byte string. :type serialized: bytes :return: Deserialized job object. :rtype: kq.Job """ assert isinstance(serialized, bytes), "Expecting a bytes" return dill.loads(serialized)
5,341,423
def test_max_none(capsys): """No output.""" a = BST() assert find_maximum_value(a) is None
5,341,424
def sym_auc_score(X, y): """Compute the symmetric auroc score for the provided sample. symmetric auroc score is defined as 2*abs(auroc-0.5) Parameters ---------- X : {array-like, sparse matrix} shape = [n_samples, n_features] The set of regressors that will be tested sequentially. y : array of shape(n_samples) The data matrix. Returns ------- F : array, shape = [n_features,] The set of auroc scores. """ X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo']) scores = np.apply_along_axis(_auc_score, 0, X, y) return np.abs(scores - 0.5) * 2.0
5,341,425
def visibility_of_element_wait(driver, xpath, timeout=10): """Checking if element specified by xpath is visible on page :param driver: webdriver instance :param xpath: xpath of web element :param timeout: time after looking for element will be stopped (default: 10) :return: first element in list of found elements """ timeout_message = f"Element for xpath: '{xpath}' and url: {driver.current_url} not found in {timeout} seconds" locator = (By.XPATH, xpath) element_located = EC.visibility_of_element_located(locator) wait = WebDriverWait(driver, timeout) return wait.until(element_located, timeout_message)
5,341,426
def _read_byte(stream): """Read byte from stream""" read_byte = stream.read(1) if not read_byte: raise Exception('No more bytes!') return ord(read_byte)
5,341,427
def statistic_xls(request): """Генерация XLS""" from directions.models import Issledovaniya import xlwt from collections import OrderedDict wb = xlwt.Workbook(encoding='utf-8') response = HttpResponse(content_type='application/ms-excel') request_data = request.POST if request.method == "POST" else request.GET pk = request_data.get("pk", "") tp = request_data.get("type", "") date_start_o = request_data.get("date-start", "") date_end_o = request_data.get("date-end", "") users_o = request_data.get("users", "[]") user_o = request_data.get("user") date_values_o = request_data.get("values", "{}") date_type = request_data.get("date_type", "d") depart_o = request_data.get("department") if tp == 'lab' and pk == '0': tp = 'all-labs' symbols = (u"абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ", u"abvgdeejzijklmnoprstufhzcss_y_euaABVGDEEJZIJKLMNOPRSTUFHZCSS_Y_EUA") # Словарь для транслитерации tr = {ord(a): ord(b) for a, b in zip(*symbols)} # Перевод словаря для транслита borders = xlwt.Borders() borders.left = xlwt.Borders.THIN borders.right = xlwt.Borders.THIN borders.top = xlwt.Borders.THIN borders.bottom = xlwt.Borders.THIN if "-" in date_start_o: date_start_o = normalize_date(date_start_o) date_end_o = normalize_date(date_end_o) date_start, date_end = try_parse_range(date_start_o, date_end_o) if date_start and date_end and tp not in ["lab_sum", "covid_sum", "lab_details"]: delta = date_end - date_start if abs(delta.days) > 60: slog.Log(key=tp, type=101, body=json.dumps({"pk": pk, "date": {"start": date_start_o, "end": date_end_o}}), user=request.user.doctorprofile).save() return JsonResponse({"error": "period max - 60 days"}) if date_start_o != "" and date_end_o != "": slog.Log(key=tp, type=100, body=json.dumps({"pk": pk, "date": {"start": date_start_o, "end": date_end_o}}), user=request.user.doctorprofile).save() # Отчет по динамике анализов if tp == "directions_list_dynamic": pk = json.loads(pk) dn = Napravleniya.objects.filter(pk__in=pk) cards = {} napr_client = set() depart_napr = OrderedDict() depart_fraction = OrderedDict() one_param = "one_param" for d in dn: if d.department() is None or d.department().p_type != 2: continue c = d.client napr_client.add(c.pk) # Проверить, что все направления относятся к одной карте. И тип "Лаборатория" if len(napr_client) > 1: response['Content-Disposition'] = str.translate("attachment; filename=\"Назначения.xls\"", tr) ws = wb.add_sheet("Вакцинация") row_num = 0 row = [ ("Пациент", 7000), ("Карта", 6000), ("Направление", 4000), ("Дата", 4000), ("Назначение", 7000), ] wb.save(response) return response # Распределить направления по подразделениям: "depart_napr" # {БИО:[напр1, напр2, напр3], КДЛ: [напр11, напр21, напр31], ИММ: [напр41, напр42, напр43]} tmp_num_dir = [] department_title = d.department().id department_id = d.department().id if department_title in depart_napr.keys(): tmp_num_dir = depart_napr.get(department_title) tmp_num_dir.append(d.pk) depart_napr[department_title] = tmp_num_dir else: tmp_num_dir.append(d.pk) depart_napr[department_title] = tmp_num_dir # По исследованиям строим структуру "depart_fraction": # Будущие заголовки в Excel. Те исследования у, к-рых по одной фракции в общий подсловарь, # у к-рых больше одного показателя (фракции) в самостоятельные подсловари. Выборка из справочника, НЕ из "Результатов" # пример стр-ра: {биохим: {услуги, имеющие по 1 фракции:[фр1-усл1, фр2-усл2, фр3-усл3], # усл1:[фр1, фр2, фр3],усл2:[фр1, фр2, фр3], # усл2:[фр1, фр2, фр3],усл2:[фр1, фр2, фр3]} # порядок фракций "По весу". one_param_temp = OrderedDict() for i in Issledovaniya.objects.filter(napravleniye=d): dict_research_fraction = OrderedDict() research_iss = i.research_id dict_research_fraction = { p: str(t) + ',' + str(u) for p, t, u in directory.Fractions.objects.values_list('pk', 'title', 'units').filter(research=i.research).order_by("sort_weight") } if depart_fraction.get(department_id) is not None: if len(dict_research_fraction.keys()) == 1: one_param_temp = depart_fraction[department_id][one_param] one_param_temp.update(dict_research_fraction) depart_fraction[department_id].update({one_param: one_param_temp}) else: depart_fraction[department_id].update({research_iss: dict_research_fraction}) else: depart_fraction.update({department_id: {}}) if len(dict_research_fraction) == 1: depart_fraction[department_id].update({one_param: dict_research_fraction}) else: depart_fraction[department_id].update({research_iss: dict_research_fraction}) depart_fraction[department_id].update({one_param: {}}) # Все возможные анализы в направлениях - стр-ра А # направления по лабораториям (тип лаборатории, [номера направлений]) obj = [] for type_lab, l_napr in depart_napr.items(): a = [ [p, r, n, datetime.datetime.strftime(utils.localtime(t), "%d.%m.%y")] for p, r, n, t in Issledovaniya.objects.values_list('pk', 'research_id', 'napravleniye_id', 'time_confirmation').filter(napravleniye_id__in=l_napr) ] obj.append(a) for i in obj: for j in i: result_k = {fr_id: val for fr_id, val in Result.objects.values_list('fraction', 'value').filter(issledovaniye_id=j[0])} j.append(result_k) finish_obj = [] for i in obj: for j in i: j.pop(0) finish_obj.append(j) # Строим стр-ру {тип лаборатория: id-анализа:{(направление, дата):{id-фракции:результат,id-фракции:результат}}} finish_ord = OrderedDict() for t_lab, name_iss in depart_fraction.items(): finish_ord[t_lab] = {} for iss_id, fract_dict in name_iss.items(): if fract_dict: frac = True else: frac = False finish_ord[t_lab][iss_id] = {} opinion_dict = { ( 'напр', 'дата', ): fract_dict } val_dict = fract_dict.copy() finish_ord[t_lab][iss_id].update(opinion_dict) for k, v in fract_dict.items(): val_dict[k] = '' # Строим стр-ру {id-анализа:{(направление, дата,):{id-фракции:результат,id-фракции:результат}}} # one_param - это анализы у которых несколько параметров-фракции (ОАК, ОАМ) if iss_id != 'one_param' or iss_id != '' or iss_id is not None: for d in finish_obj: tmp_dict = {} if iss_id == d[0]: for i, j in d[3].items(): val_dict[i] = j tmp_dict[ ( d[1], d[2], ) ] = deepcopy(val_dict) finish_ord[t_lab][iss_id].update(tmp_dict) # Строим стр-ру {one_param:{(направление, дата,):{id-фракции:результат,id-фракции:результат}}} # one_param - это анализы у которых только один параметр-фракции (холестерин, глюкоза и др.) key_tuple = ( ( 0, 0, ), ) if iss_id == 'one_param' and frac: tmp_dict = {} for d in finish_obj: if key_tuple != ( d[1], d[2], ): for k, v in fract_dict.items(): val_dict[k] = '' for u, s in val_dict.items(): if d[3].get(u): val_dict[u] = d[3].get(u) tmp_dict[ ( d[1], d[2], ) ] = deepcopy(val_dict) key_tuple = ( d[1], d[2], ) finish_ord[t_lab][iss_id].update(tmp_dict) response['Content-Disposition'] = str.translate("attachment; filename=\"Назначения.xls\"", tr) font_style = xlwt.XFStyle() font_style.alignment.wrap = 1 font_style.borders = borders font_style_b = xlwt.XFStyle() font_style_b.alignment.wrap = 1 font_style_b.font.bold = True font_style_b.borders = borders ws = wb.add_sheet("Динамика") row_num = 0 for k, v in finish_ord.items(): col_num = 0 ws.write(row_num, 0, label=Podrazdeleniya.objects.values_list('title').get(pk=k)) row_num += 1 col_num = 0 for name_iss, fr_id in v.items(): if name_iss != 'one_param': ws.write(row_num, 0, label=Researches.objects.values_list('title').get(pk=name_iss)) else: ws.write(row_num, 0, label=name_iss) row_num += 1 a, b = '', '' for i, j in fr_id.items(): col_num = 0 a, b = i ws.write(row_num, col_num, label=a) col_num += 1 ws.write(row_num, col_num, label=b) ss = '' for g, h in j.items(): col_num += 1 ss = str(h) ws.write(row_num, col_num, label=ss) row_num += 1 col_num += 1 row_num += 1 row_num += 1 if tp == "directions_list": pk = json.loads(pk) dn = Napravleniya.objects.filter(pk__in=pk) cards = {} for d in dn: c = d.client if c.pk not in cards: cards[c.pk] = { "card": c.number_with_type(), "fio": c.individual.fio(), "bd": c.individual.bd(), "hn": d.history_num, "d": {}, } cards[c.pk]["d"][d.pk] = { "r": [], "dn": str(dateformat.format(d.data_sozdaniya.date(), settings.DATE_FORMAT)), } for i in Issledovaniya.objects.filter(napravleniye=d): cards[c.pk]["d"][d.pk]["r"].append( { "title": i.research.title, } ) response['Content-Disposition'] = str.translate("attachment; filename=\"Назначения.xls\"", tr) font_style = xlwt.XFStyle() font_style.alignment.wrap = 1 font_style.borders = borders font_style_b = xlwt.XFStyle() font_style_b.alignment.wrap = 1 font_style_b.font.bold = True font_style_b.borders = borders ws = wb.add_sheet("Вакцинация") row_num = 0 row = [ ("Пациент", 7000), ("Карта", 6000), ("Направление", 4000), ("Дата", 4000), ("Назначение", 7000), ] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num][0], font_style_b) ws.col(col_num).width = row[col_num][1] row_num += 1 for ck in cards.keys(): c = cards[ck] started = False for dk in c["d"].keys(): if not started: row = [ "{} {}".format(c["fio"], c["bd"]), c["card"], ] started = True else: row = ["", ""] s2 = False for r in c["d"][dk]["r"]: if not s2: s2 = True row.append(str(dk)) row.append(c["d"][dk]["dn"]) else: row.append("") row.append("") row.append("") row.append("") row.append(r["title"]) for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) row_num += 1 row = [] if tp == "statistics-visits": date_start, date_end = try_parse_range(date_start_o, date_end_o) t = request.GET.get("t", "sum") fio = request.user.doctorprofile.get_full_fio() dep = request.user.doctorprofile.podrazdeleniye.get_title() dirs = Napravleniya.objects.filter( visit_date__range=( date_start, date_end, ), visit_who_mark=request.user.doctorprofile, ).order_by("visit_date") if t == "sum": response['Content-Disposition'] = str.translate("attachment; filename=\"Суммарный отчёт по посещениям.xls\"", tr) font_style = xlwt.XFStyle() font_style.alignment.wrap = 1 font_style.borders = borders font_style_b = xlwt.XFStyle() font_style_b.alignment.wrap = 1 font_style_b.font.bold = True font_style_b.borders = borders ws = wb.add_sheet("Посещения") row_num = 0 row = [ (fio, 7000), (dep, 7000), ("", 3000), ] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num][0], font_style) ws.col(col_num).width = row[col_num][1] row_num += 1 row = [ date_start_o + " - " + date_end_o, "", "", ] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) row_num += 1 row = [ "", "", "", ] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style if col_num > 0 else font_style_b) row_num += 1 row = [ "Услуга", "Источник финансирования", "Количество", ] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style_b) row_num += 1 iss = {} for d in dirs: for i in Issledovaniya.objects.filter(napravleniye=d).order_by("research__title").order_by("napravleniye__istochnik_f"): rt = i.research.title istf = i.napravleniye.istochnik_f.base.title + " - " + i.napravleniye.fin_title if rt not in iss: iss[rt] = {} if istf not in iss[rt]: iss[rt][istf] = 0 iss[rt][istf] += 1 for k in iss: for istf in iss[k]: row = [ k, istf, iss[k][istf], ] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) row_num += 1 elif tp == "vac": date_start, date_end = try_parse_range(date_start_o, date_end_o) response['Content-Disposition'] = str.translate("attachment; filename=\"Вакцинация.xls\"", tr) font_style = xlwt.XFStyle() font_style.alignment.wrap = 1 font_style.borders = borders font_style_b = xlwt.XFStyle() font_style_b.alignment.wrap = 1 font_style_b.font.bold = True font_style_b.borders = borders ts = ["Название", "Доза", "Серия", "Срок годности", "Способ введения", "Дата постановки вакцины"] ws = wb.add_sheet("Вакцинация") row_num = 0 row = [("Исполнитель", 6000), ("Подтверждено", 5000), ("RMIS UID", 5000), ("Вакцина", 5000), ("Код", 4000)] for t in ts: row.append((t, 4000)) row.append(("Этап", 2500)) for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num][0], font_style_b) ws.col(col_num).width = row[col_num][1] row_num += 1 for i in Issledovaniya.objects.filter( research__podrazdeleniye__vaccine=True, time_confirmation__range=( date_start, date_end, ), ).order_by("time_confirmation"): if i.napravleniye: row = [ i.doc_confirmation_fio, i.time_confirmation.astimezone(pytz.timezone(settings.TIME_ZONE)).strftime("%d.%m.%Y %X"), i.napravleniye.client.individual.get_rmis_uid_fast(), i.research.title, i.research.code, ] else: continue v = {} for p in ParaclinicResult.objects.filter(issledovaniye=i): field_type = p.get_field_type() if p.field.get_title(force_type=field_type) in ts: if field_type == 1: v_date = p.value.replace("-", ".") v[p.field.get_title(force_type=field_type)] = v_date else: v[p.field.get_title(force_type=field_type)] = p.value for t in ts: row.append(v.get(t, "")) row.append("V") for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) row_num += 1 elif tp == "statistics-tickets-print": data_date = request_data.get("date_values") data_date = json.loads(data_date) if request_data.get("date_type") == 'd': d1 = datetime.datetime.strptime(data_date['date'], '%d.%m.%Y') d2 = datetime.datetime.strptime(data_date['date'], '%d.%m.%Y') month_obj = '' else: month_obj = int(data_date['month']) + 1 _, num_days = calendar.monthrange(int(data_date['year']), month_obj) d1 = datetime.date(int(data_date['year']), month_obj, 1) d2 = datetime.date(int(data_date['year']), month_obj, num_days) type_fin = request_data.get("fin") title_fin = IstochnikiFinansirovaniya.objects.filter(pk=type_fin).first() if title_fin.title == 'ОМС' and title_fin.base.internal_type: can_null = 1 else: can_null = 0 users_o = json.loads(user_o) us_o = None if users_o != -1: us = int(users_o) us_o = [DoctorProfile.objects.get(pk=us)] elif depart_o != -1: depart = Podrazdeleniya.objects.get(pk=depart_o) us_o = DoctorProfile.objects.filter(podrazdeleniye=depart) wb = openpyxl.Workbook() wb.remove(wb.get_sheet_by_name('Sheet')) styles_obj = structure_sheet.style_sheet() wb.add_named_style(styles_obj[0]) start_date = datetime.datetime.combine(d1, datetime.time.min) end_date = datetime.datetime.combine(d2, datetime.time.max) # Проверить, что роль у объекта Врач-Лаборант, или Лаборант, или Врач параклиники, или Лечащий врач if us_o: for i in us_o: if i.is_member(["Лечащий врач", "Врач-лаборант", "Врач параклиники", "Лаборант", "Врач консультаций"]): res_oq = sql_func.direct_job_sql(i.pk, start_date, end_date, type_fin, can_null) res_job = sql_func.indirect_job_sql(i.pk, start_date, end_date) if res_job: ws = wb.create_sheet(f'{i.get_fio()}-Косвенные') ws = structure_sheet.inderect_job_base(ws, i, d1, d2) dict_job = {} for r_j in res_job: key_type_job = r_j[1] key_date = utils.strfdatetime(r_j[0], "%d.%m.%Y") value_total = r_j[2] temp_dict = dict_job.get(key_date, {}) temp_dict.update({key_type_job: value_total}) dict_job[key_date] = temp_dict structure_sheet.inderect_job_data(ws, dict_job) ws = wb.create_sheet(i.get_fio()) ws = structure_sheet.statistics_tickets_base(ws, i, type_fin, d1, d2, styles_obj[0], styles_obj[1]) ws = structure_sheet.statistics_tickets_data(ws, res_oq, i, styles_obj[2]) if month_obj: # issledovaniye_id(0), research_id(1), date_confirm(2), doc_confirmation_id(3), def_uet(4), # co_executor_id(5), co_executor_uet(6), co_executor2_id(7), co_executor2_uet(8), research_id(9), # research_title(10), research - co_executor_2_title(11) # строим стр-ру {дата:{наименование анализа:УЕТ за дату, СО2:УЕТ за дату}} total_report_dict = OrderedDict() r_sql = sql_func.total_report_sql(i.pk, start_date, end_date, type_fin) titles_set = OrderedDict() for n in r_sql: titles_set[n[10]] = '' titles_set[n[11]] = '' temp_uet, temp_uet2 = 0, 0 if i.pk == n[3]: temp_uet = n[4] if n[4] else 0 if i.pk == n[5] and n[5] != n[3]: temp_uet = n[6] if n[6] else 0 if i.pk == n[7]: temp_uet2 = n[8] if n[8] else 0 # попытка получить значения за дату if total_report_dict.get(n[2]): temp_d = total_report_dict.get(n[2]) # попытка получить такие же анализы current_uet = temp_d.get(n[10], 0) current_uet2 = temp_d.get(n[11], 0) current_uet = current_uet + temp_uet current_uet2 = current_uet2 + temp_uet2 temp_dict = {n[10]: current_uet, n[11]: current_uet2} total_report_dict[int(n[2])].update(temp_dict) else: total_report_dict[int(n[2])] = {n[10]: temp_uet, n[11]: temp_uet2} titles_list = list(titles_set.keys()) ws = wb.create_sheet(i.get_fio() + ' - Итог') ws = structure_sheet.job_total_base(ws, month_obj, type_fin) ws, cell_research = structure_sheet.jot_total_titles(ws, titles_list) ws = structure_sheet.job_total_data(ws, cell_research, total_report_dict) response['Content-Disposition'] = str.translate("attachment; filename=\"Статталоны.xlsx\"", tr) wb.save(response) return response elif tp == "statistics-passed": d_s = request_data.get("date-start") d_e = request_data.get("date-end") d1 = datetime.datetime.strptime(d_s, '%d.%m.%Y') d2 = datetime.datetime.strptime(d_e, '%d.%m.%Y') start_date = datetime.datetime.combine(d1, datetime.time.min) end_date = datetime.datetime.combine(d2, datetime.time.max) passed_oq = sql_func.passed_research(start_date, end_date) wb = openpyxl.Workbook() wb.remove(wb.get_sheet_by_name('Sheet')) ws = wb.create_sheet(f'{d_s}-{d_e}') ws = structure_sheet.passed_research_base(ws, d_s) ws = structure_sheet.passed_research_data(ws, passed_oq) response['Content-Disposition'] = str.translate("attachment; filename=\"Движения.xlsx\"", tr) wb.save(response) return response elif tp == "call-patient": return call_patient.call_patient(request_data, response, tr, COVID_QUESTION_ID) elif tp == "swab-covidt": return swab_covid.swab_covid(request_data, response, tr, COVID_QUESTION_ID) elif tp == "cert-not-workt": return cert_notwork.cert_notwork(request_data, response, tr, COVID_QUESTION_ID) elif tp == "statistics-onco": d_s = request_data.get("date-start") d_e = request_data.get("date-end") d1 = datetime.datetime.strptime(d_s, '%d.%m.%Y') d2 = datetime.datetime.strptime(d_e, '%d.%m.%Y') start_date = datetime.datetime.combine(d1, datetime.time.min) end_date = datetime.datetime.combine(d2, datetime.time.max) onco_query = sql_func.disp_diagnos('U999', start_date, end_date) wb = openpyxl.Workbook() wb.remove(wb.get_sheet_by_name('Sheet')) ws = wb.create_sheet(f'{d_s}-{d_e}') ws = structure_sheet.onco_base(ws, d_s, d_e) ws = structure_sheet.passed_onco_data(ws, onco_query) response['Content-Disposition'] = str.translate("attachment; filename=\"Онкоподозрения.xlsx\"", tr) wb.save(response) return response elif tp == "statistics-research": response['Content-Disposition'] = str.translate("attachment; filename=\"Услуги.xlsx\"", tr) pk = request_data.get("research") user_groups = request.user.groups.values_list('name', flat=True) research_id = int(pk) data_date = request_data.get("date_values") data_date = json.loads(data_date) if request_data.get("date_type") == 'd': d1 = datetime.datetime.strptime(data_date['date'], '%d.%m.%Y') d2 = datetime.datetime.strptime(data_date['date'], '%d.%m.%Y') month_obj = '' else: month_obj = int(data_date['month']) + 1 _, num_days = calendar.monthrange(int(data_date['year']), month_obj) d1 = datetime.date(int(data_date['year']), month_obj, 1) d2 = datetime.date(int(data_date['year']), month_obj, num_days) wb = openpyxl.Workbook() wb.remove(wb.get_sheet_by_name('Sheet')) ws = wb.create_sheet("Отчет") research_title = Researches.objects.values_list('title').get(pk=research_id) start_date = datetime.datetime.combine(d1, datetime.time.min) end_date = datetime.datetime.combine(d2, datetime.time.max) hospital_id = request.user.doctorprofile.hospital_id if 'Статистика-все МО' in user_groups: hospital_id = -1 if research_id == DEATH_RESEARCH_PK: if 'Свидетельство о смерти-доступ' not in user_groups: return JsonResponse({"error": "Нет доступа к данному отчету"}) if 'Статистика свидетельство о смерти-все МО' in user_groups: hospital_id = -1 researches_sql = sql_func.statistics_death_research(research_id, start_date, end_date, hospital_id) unique_issledovaniya = get_unique_directions(researches_sql) child_iss = get_expertis_child_iss_by_issledovaniya(unique_issledovaniya) if unique_issledovaniya else None expertise_final_data = {} if child_iss: data = {i.child_iss: i.parent_id for i in child_iss} child_iss_tuple = tuple(set([i.child_iss for i in child_iss])) result_expertise = get_expertis_results_by_issledovaniya(child_iss_tuple) result_val = {} for i in result_expertise: if not result_val.get(i.issledovaniye_id, ""): result_val[i.issledovaniye_id] = "Экспертиза;" if i.value.lower() == "да": result_val[i.issledovaniye_id] = f"{result_val[i.issledovaniye_id]} {i.title};" for k, v in result_val.items(): if not expertise_final_data.get(data.get(k, "")): expertise_final_data[data.get(k)] = "" expertise_final_data[data.get(k)] = f"{expertise_final_data[data.get(k)]} {v}" data_death = death_form_result_parse(researches_sql, reserved=False) wb.remove(wb.get_sheet_by_name('Отчет')) ws = wb.create_sheet("По документам") ws = structure_sheet.statistic_research_death_base(ws, d1, d2, research_title[0]) ws = structure_sheet.statistic_research_death_data(ws, data_death, expertise_final_data) reserved_researches_sql = sql_func.statistics_reserved_number_death_research(research_id, start_date, end_date, hospital_id) data_death_reserved = death_form_result_parse(reserved_researches_sql, reserved=True) ws2 = wb.create_sheet("Номера в резерве") ws2 = structure_sheet.statistic_reserved_research_death_base(ws2, d1, d2, research_title[0]) ws2 = structure_sheet.statistic_reserved_research_death_data(ws2, data_death_reserved) card_has_death_date = sql_func.card_has_death_date(research_id, start_date, end_date) card_tuple = tuple(set([i.id for i in card_has_death_date])) if card_tuple: temp_data = sql_func.statistics_death_research_by_card(research_id, card_tuple, hospital_id) prev_card = None prev_direction = None final_data = [] count = 0 for k in temp_data: if k.client_id == prev_card and prev_direction != k.napravleniye_id and count != 0: continue else: final_data.append(k) prev_card = k.client_id prev_direction = k.napravleniye_id count += 1 data_death_card = death_form_result_parse(final_data, reserved=False) ws3 = wb.create_sheet("По людям") ws3 = structure_sheet.statistic_research_death_base_card(ws3, d1, d2, research_title[0]) ws3 = structure_sheet.statistic_research_death_data_card(ws3, data_death_card) else: ws = structure_sheet.statistic_research_base(ws, d1, d2, research_title[0]) researches_sql = sql_func.statistics_research(research_id, start_date, end_date, hospital_id) ws = structure_sheet.statistic_research_data(ws, researches_sql) elif tp == "journal-get-material": access_to_all = 'Просмотр статистики' in request.user.groups.values_list('name', flat=True) or request.user.is_superuser users = [x for x in json.loads(users_o) if (access_to_all or (x.isdigit() and int(x) == request.user.doctorprofile.pk)) and DoctorProfile.objects.filter(pk=x).exists()] date_values = json.loads(date_values_o) monthes = { "0": "Январь", "1": "Февраль", "2": "Март", "3": "Апрель", "4": "Май", "5": "Июнь", "6": "Июль", "7": "Август", "8": "Сентябрь", "9": "Октябрь", "10": "Ноябрь", "11": "Декабрь", } date_values["month_title"] = monthes[date_values["month"]] response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Забор_биоматериала.xls\"", tr) font_style = xlwt.XFStyle() font_style.alignment.wrap = 1 font_style.borders = borders font_style_b = xlwt.XFStyle() font_style_b.alignment.wrap = 1 font_style_b.font.bold = True font_style_b.borders = borders for user_pk in users: user_row = DoctorProfile.objects.get(pk=user_pk) ws = wb.add_sheet("{} {}".format(user_row.get_fio(dots=False), user_pk)) row_num = 0 row = [("Исполнитель: ", 4000), (user_row.get_full_fio(), 7600)] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num][0], font_style) ws.col(col_num).width = row[col_num][1] row_num += 1 row = ["Подразделение: ", user_row.podrazdeleniye.title] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) row_num += 1 row = ["Дата: ", date_values["date"] if date_type == "d" else "{month_title} {year}".format(**date_values)] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) daterow = row_num row_num += 3 row = [ ("№", 4000), ("ФИО", 7600), ("Возраст", 3000), ("Карта", 6000), ("Число направлений", 5000), ("Номера направлений", 6000), ("Наименования исследований", 20000), ] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num][0], font_style_b) ws.col(col_num).width = row[col_num][1] row_num += 1 if date_type == "d": day = date_values.get("date", "01.01.2015") day1 = datetime.date(int(day.split(".")[2]), int(day.split(".")[1]), int(day.split(".")[0])) day2 = day1 + datetime.timedelta(days=1) elif date_type == "m": month = int(date_values.get("month", "0")) + 1 next_m = month + 1 if month < 12 else 1 year = int(date_values.get("year", "2015")) next_y = year + 1 if next_m == 1 else year day1 = datetime.date(year, month, 1) day2 = datetime.date(next_y, next_m, 1) else: day1 = day2 = timezone.now() iss_list = ( Issledovaniya.objects.filter(tubes__doc_get=user_row, tubes__time_get__isnull=False, tubes__time_get__range=(day1, day2)) .order_by("napravleniye__client__individual__patronymic", "napravleniye__client__individual__name", "napravleniye__client__individual__family") .distinct() ) patients = {} for iss in iss_list: k = iss.napravleniye.client.individual_id if k not in patients: client = iss.napravleniye.client.individual patients[k] = {"fio": client.fio(short=True, dots=True), "age": client.age_s(direction=iss.napravleniye), "directions": [], "researches": [], "cards": []} if iss.napravleniye_id not in patients[k]["directions"]: patients[k]["directions"].append(iss.napravleniye_id) kn = iss.napravleniye.client.number_with_type() if kn not in patients[k]["cards"]: patients[k]["cards"].append(kn) patients[k]["researches"].append(iss.research.title) n = 0 for p_pk in patients: n += 1 row = [ str(n), patients[p_pk]["fio"], patients[p_pk]["age"], ", ".join(patients[p_pk]["cards"]), len(patients[p_pk]["directions"]), ", ".join([str(x) for x in patients[p_pk]["directions"]]), ", ".join(patients[p_pk]["researches"]), ] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) row_num += 1 row = ["Число пациентов: ", str(len(patients))] for col_num in range(len(row)): ws.write(daterow + 1, col_num, row[col_num], font_style) elif tp == "lab": lab = Podrazdeleniya.objects.get(pk=int(pk)) response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Лаборатория_{}_{}-{}.xls\"".format(lab.title.replace(" ", "_"), date_start_o, date_end_o), tr) import directions.models as d from operator import itemgetter date_start, date_end = try_parse_range(date_start_o, date_end_o) for card_base in list(CardBase.objects.filter(hide=False)) + [None]: cb_title = "Все базы" if not card_base else card_base.short_title for finsource in list(IstochnikiFinansirovaniya.objects.filter(base=card_base)) + [False]: finsource_title = "Все источники" if isinstance(finsource, IstochnikiFinansirovaniya): finsource_title = finsource.title ws = wb.add_sheet(cb_title + " " + finsource_title + " выполн.") font_style = xlwt.XFStyle() font_style.borders = borders row_num = 0 row = ["Период: ", "{0} - {1}".format(date_start_o, date_end_o)] for col_num in range(len(row)): if col_num == 0: ws.write(row_num, col_num, row[col_num], font_style) else: ws.write_merge(row_num, row_num, col_num, col_num + 2, row[col_num], style=font_style) row_num += 1 font_style = xlwt.XFStyle() font_style.borders = borders row = [(lab.title, 16000)] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num][0], font_style) ws.col(col_num).width = row[col_num][1] ws.write(row_num, col_num + 1, "", font_style) row_num = 2 row = ["Выполнено исследований", cb_title + " " + finsource_title] for col_num in range(len(row)): if col_num == 0: ws.write(row_num, col_num, row[col_num], font_style) else: ws.write_merge(row_num, row_num, col_num, col_num + 1, row[col_num], style=font_style) font_style = xlwt.XFStyle() font_style.alignment.wrap = 1 font_style.borders = borders pki = int(pk) otds = {pki: defaultdict(lambda: 0)} otds_pat = {pki: defaultdict(lambda: 0)} ns = 0 for obj in directory.Researches.objects.filter(podrazdeleniye__pk=lab.pk): if finsource is not False: iss_list = Issledovaniya.objects.filter( research__pk=obj.pk, time_confirmation__isnull=False, time_confirmation__range=(date_start, date_end), napravleniye__istochnik_f=finsource ) elif card_base: iss_list = Issledovaniya.objects.filter( research__pk=obj.pk, time_confirmation__isnull=False, time_confirmation__range=(date_start, date_end), napravleniye__istochnik_f__base=card_base ) else: iss_list = Issledovaniya.objects.filter(research__pk=obj.pk, time_confirmation__isnull=False, time_confirmation__range=(date_start, date_end)) iss_list = iss_list.filter(napravleniye__isnull=False) for researches in iss_list: n = False for x in d.Result.objects.filter(issledovaniye=researches): x = x.value.lower().strip() n = any([y in x for y in ["забор", "тест", "неправ", "ошибк", "ошибочный", "кров", "брак", "мало", "недостаточно", "реактив"]]) or x == "-" if n: break if n: continue if researches.napravleniye: otd_pk = "external-" + str(researches.napravleniye.imported_org_id) if not researches.napravleniye.doc else researches.napravleniye.doc.podrazdeleniye_id else: otd_pk = "empty" if otd_pk not in otds: otds[otd_pk] = defaultdict(lambda: 0) otds[otd_pk][obj.pk] += 1 otds[pki][obj.pk] += 1 if any([x.get_is_norm()[0] == "normal" for x in researches.result_set.all()]): continue if otd_pk not in otds_pat: otds_pat[otd_pk] = defaultdict(lambda: 0) otds_pat[otd_pk][obj.pk] += 1 otds_pat[pki][obj.pk] += 1 style = xlwt.XFStyle() style.borders = borders font = xlwt.Font() font.bold = True style.font = font otd_local_keys = [x for x in otds.keys() if isinstance(x, int)] otd_external_keys = [int(x.replace("external-", "")) for x in otds.keys() if isinstance(x, str) and "external-" in x and x != "external-None"] for otdd in ( list(Podrazdeleniya.objects.filter(pk=pki)) + list(Podrazdeleniya.objects.filter(pk__in=[x for x in otd_local_keys if x != pki])) + list(RMISOrgs.objects.filter(pk__in=otd_external_keys)) ): row_num += 2 row = [ otdd.title if otdd.pk != pki else "Сумма по всем отделениям", "" if otdd.pk != pki else "Итого", ] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], style=style) rows = [] ok = otds.get(otdd.pk, otds.get("external-{}".format(otdd.pk), {})) for obj in directory.Researches.objects.filter(pk__in=[x for x in ok.keys()]): row = [ obj.title, ok[obj.pk], ] rows.append(row) ns += 1 for row in sorted(rows, key=itemgetter(0)): row_num += 1 for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) ws_pat = wb.add_sheet(cb_title + " " + finsource_title + " паталог.") row_num = 0 row = ["Период: ", "{0} - {1}".format(date_start_o, date_end_o)] for col_num in range(len(row)): if col_num == 0: ws_pat.write(row_num, col_num, row[col_num], font_style) else: ws_pat.write_merge(row_num, row_num, col_num, col_num + 2, row[col_num], style=font_style) row_num = 1 row = [ (lab.title, 16000), ] for col_num in range(len(row)): ws_pat.write(row_num, col_num, row[col_num][0], font_style) ws_pat.col(col_num).width = row[col_num][1] ws_pat.write(row_num, col_num + 1, "", font_style) font_style = xlwt.XFStyle() font_style.borders = borders row_num = 2 row = ["Паталогии", cb_title + " " + finsource_title] for col_num in range(len(row)): if col_num == 0: ws_pat.write(row_num, col_num, row[col_num], font_style) else: ws_pat.write_merge(row_num, row_num, col_num, col_num + 1, row[col_num], style=font_style) otd_local_keys = [x for x in otds_pat.keys() if isinstance(x, int)] otd_external_keys = [int(x.replace("external-", "")) for x in otds_pat.keys() if isinstance(x, str) and "external-" in x] for otdd in ( list(Podrazdeleniya.objects.filter(pk=pki)) + list(Podrazdeleniya.objects.filter(pk__in=[x for x in otd_local_keys if x != pki])) + list(RMISOrgs.objects.filter(pk__in=otd_external_keys)) ): row_num += 2 row = [ otdd.title, "" if otdd.pk != pki else "Итого", ] for col_num in range(len(row)): ws_pat.write(row_num, col_num, row[col_num], style=style) rows = [] ok = otds_pat.get(otdd.pk, otds_pat.get("external-{}".format(otdd.pk), {})) for obj in directory.Researches.objects.filter(pk__in=[x for x in otds_pat.get(otdd.pk, ok.keys())]): row = [ obj.title, ok[obj.pk], ] rows.append(row) for row in sorted(rows, key=itemgetter(0)): row_num += 1 for col_num in range(len(row)): ws_pat.write(row_num, col_num, row[col_num], font_style) if ns == 0: ws.sheet_visible = False ws_pat.sheet_visible = False elif tp == "lab_sum": response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Лаборатория_Колво_{}-{}.xls\"".format(date_start_o, date_end_o), tr) wb = openpyxl.Workbook() wb.remove(wb.get_sheet_by_name('Sheet')) ws = wb.create_sheet("Кол-во по лаборатории") d1 = datetime.datetime.strptime(date_start_o, '%d.%m.%Y') d2 = datetime.datetime.strptime(date_end_o, '%d.%m.%Y') start_date = datetime.datetime.combine(d1, datetime.time.min) end_date = datetime.datetime.combine(d2, datetime.time.max) lab_podr = get_lab_podr() lab_podr = tuple([i[0] for i in lab_podr]) researches_by_sum = sql_func.statistics_sum_research_by_lab(lab_podr, start_date, end_date) ws = structure_sheet.statistic_research_by_sum_lab_base(ws, d1, d2, "Кол-во по лабораториям") ws = structure_sheet.statistic_research_by_sum_lab_data(ws, researches_by_sum) elif tp == "lab_details": response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Лаборатория_детали_{}-{}.xls\"".format(date_start_o, date_end_o), tr) wb = openpyxl.Workbook() wb.remove(wb.get_sheet_by_name('Sheet')) ws = wb.create_sheet("Детали по лаборатории") d1 = datetime.datetime.strptime(date_start_o, '%d.%m.%Y') d2 = datetime.datetime.strptime(date_end_o, '%d.%m.%Y') start_date = datetime.datetime.combine(d1, datetime.time.min) end_date = datetime.datetime.combine(d2, datetime.time.max) lab_podr = get_lab_podr() lab_podr = tuple([i[0] for i in lab_podr]) researches_deatails = sql_func.statistics_details_research_by_lab(lab_podr, start_date, end_date) ws = structure_sheet.statistic_research_by_details_lab_base(ws, d1, d2, "Детали по лаборатории") ws = structure_sheet.statistic_research_by_details_lab_data(ws, researches_deatails) elif tp == "covid_sum": response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Лаборатория_Колво_{}-{}.xls\"".format(date_start_o, date_end_o), tr) wb = openpyxl.Workbook() wb.remove(wb.get_sheet_by_name('Sheet')) ws = wb.create_sheet("Кол-во по Ковид") pk = request_data.get("research") d1 = datetime.datetime.strptime(date_start_o, '%d.%m.%Y') d2 = datetime.datetime.strptime(date_end_o, '%d.%m.%Y') start_date = datetime.datetime.combine(d1, datetime.time.min) end_date = datetime.datetime.combine(d2, datetime.time.max) result_patient = sql_get_result_by_direction(pk, start_date, end_date) cards = tuple(set([i.client_id for i in result_patient])) document_card = sql_get_documents_by_card_id(cards) patient_docs = {} document_type = {4: "снилс", 5: "рождение", 1: "паспорт", 3: "полис"} for doc in document_card: data = None if doc.document_type_id in [4, 3]: data = {document_type.get(doc.document_type_id): doc.number} elif doc.document_type_id in [1, 5]: data = {document_type.get(doc.document_type_id): f"{doc.serial}@{doc.number}"} if patient_docs.get(doc.card_id, None): temp_docs = patient_docs.get(doc.card_id) temp_docs.append(data) patient_docs[doc.card_id] = temp_docs else: if data: patient_docs[doc.card_id] = [data] ws = structure_sheet.statistic_research_by_covid_base(ws, d1, d2, "Кол-во по ковид") ws = structure_sheet.statistic_research_by_covid_data(ws, result_patient, patient_docs) elif tp == "lab-staff": lab = Podrazdeleniya.objects.get(pk=int(pk)) researches = list(directory.Researches.objects.filter(podrazdeleniye=lab, hide=False).order_by('title').order_by("sort_weight").order_by("direction_id")) pods = list(Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.DEPARTMENT).order_by("title")) response['Content-Disposition'] = str.translate( "attachment; filename=\"Статистика_Исполнители_Лаборатория_{0}_{1}-{2}.xls\"".format(lab.title.replace(" ", "_"), date_start_o, date_end_o), tr ) import directions.models as d from operator import itemgetter date_start, date_end = try_parse_range(date_start_o, date_end_o) iss = Issledovaniya.objects.filter(research__podrazdeleniye=lab, time_confirmation__isnull=False, time_confirmation__range=(date_start, date_end)) font_style_wrap = xlwt.XFStyle() font_style_wrap.alignment.wrap = 1 font_style_wrap.borders = borders font_style_vertical = xlwt.easyxf('align: rotation 90') font_style_vertical.borders = borders def val(v): return "" if v == 0 else v def nl(v): return v + ("" if len(v) > 19 else "\n") for executor in DoctorProfile.objects.filter(user__groups__name__in=("Врач-лаборант", "Лаборант"), podrazdeleniye__p_type=Podrazdeleniya.LABORATORY).order_by("fio").distinct(): cnt_itogo = {} ws = wb.add_sheet(executor.get_fio(dots=False) + " " + str(executor.pk)) row_num = 0 row = [("Исполнитель", 5500), ("Отделение", 5000)] from django.utils.text import Truncator for research in researches: row.append( ( Truncator(research.title).chars(30), 1300, ) ) for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num][0], font_style_wrap if col_num < 2 else font_style_vertical) ws.col(col_num).width = row[col_num][1] row_num += 1 itogo_row = [executor.get_fio(dots=True), nl("Итого")] empty_row = ["", ""] cnt_local_itogo = {} for pod in pods: row = [executor.get_fio(dots=True), nl(pod.title)] cnt = {} for research in researches: if research.title not in cnt.keys(): cnt[research.title] = 0 if research.title not in cnt_local_itogo.keys(): cnt_local_itogo[research.title] = 0 if research.title not in cnt_itogo.keys(): cnt_itogo[research.title] = 0 for i in iss.filter(doc_confirmation=executor, napravleniye__doc__podrazdeleniye=pod, research=research): isadd = False allempty = True for r in Result.objects.filter(issledovaniye=i): value = r.value.lower().strip() if value != "": allempty = False n = any([y in value for y in ["забор", "тест", "неправ", "ошибк", "ошибочный", "кров", "брак", "мало", "недостаточно", "реактив"]]) if not n: isadd = True if not isadd or allempty: continue cnt[research.title] += 1 cnt_itogo[research.title] += 1 cnt_local_itogo[research.title] += 1 for research in researches: row.append(val(cnt[research.title])) # data["otds"][pod.title] += 1 # data["all"][pod.title] += 1 # cnt_all[pod.title] += 1 for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style_wrap) row_num += 1 for research in researches: itogo_row.append(val(cnt_local_itogo[research.title])) empty_row.append("") for col_num in range(len(itogo_row)): ws.write(row_num, col_num, itogo_row[col_num], font_style_wrap) row_num += 1 elif tp == "otd": otd = Podrazdeleniya.objects.get(pk=int(pk)) response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Отделение_{0}_{1}-{2}.xls\"".format(otd.title.replace(" ", "_"), date_start_o, date_end_o), tr) ws = wb.add_sheet("Выписано направлений") font_style = xlwt.XFStyle() row_num = 0 row = ["За период: ", "{0} - {1}".format(date_start_o, date_end_o)] date_start_o, date_end_o = try_parse_range(date_start_o, date_end_o) for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) row_num += 1 font_style = xlwt.XFStyle() row = [otd.title] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) font_style = xlwt.XFStyle() row_num += 1 row = [ (u"Всего выписано", 6000), (str(Napravleniya.objects.filter(doc__podrazdeleniye=otd, data_sozdaniya__range=(date_start_o, date_end_o)).count()), 3000), ] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num][0], font_style) ws.col(col_num).width = row[col_num][1] row_num += 1 researches = Issledovaniya.objects.filter(napravleniye__doc__podrazdeleniye=otd, napravleniye__data_sozdaniya__range=(date_start_o, date_end_o), time_confirmation__isnull=False) naprs = len(set([v.napravleniye_id for v in researches])) row = [u"Завершенных", str(naprs)] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) elif tp == "list-users": response['Content-Disposition'] = str.translate("attachment; filename=\"Список_пользователей.xls\"", tr) ws = wb.add_sheet("Пользователи") row_num = 0 font_style = xlwt.XFStyle() for p in Podrazdeleniya.objects.filter(hide=False).order_by("title"): has = False for u in DoctorProfile.objects.filter(podrazdeleniye=p).exclude(user__username="admin").order_by("fio"): has = True row = [("ID отделения %s" % p.pk, 9000), (p.title, 9000), ("ID пользователя %s" % u.pk, 9000), (u.user.username, 5000), (u.fio, 10000)] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num][0], font_style) ws.col(col_num).width = row[col_num][1] row_num += 1 if has: row_num += 1 elif tp == "lab-receive": lab = Podrazdeleniya.objects.get(pk=int(pk)) response['Content-Disposition'] = str.translate( "attachment; filename=\"Статистика_Принято_емкостей_{0}_{1}-{2}.xls\"".format(lab.title.replace(" ", "_"), date_start_o, date_end_o), tr ) import directions.models as d from operator import itemgetter date_start, date_end = try_parse_range(date_start_o, date_end_o) ws = wb.add_sheet(lab.title) font_style_wrap = xlwt.XFStyle() font_style_wrap.alignment.wrap = 1 font_style_wrap.borders = borders font_style = xlwt.XFStyle() font_style.borders = borders row_num = 0 row = [ (lab.title + ", принято емкостей за {0}-{1}".format(date_start_o, date_end_o), 16000), ] replace = [{"from": "-", "to": " "}, {"from": ".", "to": " "}, {"from": " и ", "to": " "}] n = len(row) - 1 pods = Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.DEPARTMENT).order_by("title") for pod in pods: n += 1 title = pod.title for rep in replace: title = title.replace(rep["from"], rep["to"]) tmp = title.split() title = [] nx = 0 for x in tmp: x = x.strip() if len(x) == 0: continue title.append(x if x.isupper() else x[0].upper() + ("" if nx > 0 else x[1:7])) nx += 1 row.append( ( "".join(title), 3700, ) ) for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num][0], font_style) ws.col(col_num).width = row[col_num][1] row_num += 1 for tube in directory.Tubes.objects.filter(releationsft__fractions__research__podrazdeleniye=lab).distinct().order_by("title"): row = [tube.title] for pod in pods: gets = ( d.TubesRegistration.objects.filter(issledovaniya__research__podrazdeleniye=lab, type__tube=tube, time_recive__range=(date_start, date_end), doc_get__podrazdeleniye=pod) .filter(Q(notice="") | Q(notice__isnull=True)) .distinct() ) row.append("" if not gets.exists() else str(gets.count())) for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) row_num += 1 elif tp == "all-labs": labs = Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.LABORATORY).exclude(title="Внешние организации") response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Все_Лаборатории_{0}-{1}.xls\"".format(date_start_o, date_end_o), tr) ws = wb.add_sheet("Выполненых анализов") font_style = xlwt.XFStyle() row_num = 0 row = ["За период: ", "{0} - {1}".format(date_start_o, date_end_o)] date_start_o, date_end_o = try_parse_range(date_start_o, date_end_o) for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) row_num += 1 font_style = xlwt.XFStyle() font_style.font.bold = True columns = [ (u"Лаборатория", 9000), (u"Выполнено анализов", 8000), ] for col_num in range(len(columns)): ws.write(row_num, col_num, columns[col_num][0], font_style) ws.col(col_num).width = columns[col_num][1] font_style = xlwt.XFStyle() font_style.alignment.wrap = 1 all = 0 for lab in labs: row_num += 1 c = Issledovaniya.objects.filter(research__podrazdeleniye=lab, time_confirmation__isnull=False, time_confirmation__range=(date_start_o, date_end_o)).count() row = [lab.title, c] all += c for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) row_num += 1 row = [ "", "Всего: " + str(all), ] font_style = xlwt.XFStyle() font_style.alignment.wrap = 3 font_style.alignment.horz = 3 for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) elif tp == "tubes-using": response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_Использование_Емкостей_{0}-{1}.xls\"".format(date_start_o, date_end_o), tr) per = "{0} - {1}".format(date_start_o, date_end_o) ws = wb.add_sheet("Общее использование емкостей") font_style = xlwt.XFStyle() row_num = 0 row = ["За период: ", per] date_start_o, date_end_o = try_parse_range(date_start_o, date_end_o) for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) row_num += 1 font_style = xlwt.XFStyle() font_style.font.bold = True columns = [ (u"Тип емкости", 9000), (u"Материал взят в процедурном каб", 9000), (u"Принято лабораторией", 8000), (u"Не принято лабораторией", 8000), (u"Потеряны", 4000), ] for col_num in range(len(columns)): ws.write(row_num, col_num, columns[col_num][0], font_style) ws.col(col_num).width = columns[col_num][1] font_style = xlwt.XFStyle() font_style.alignment.wrap = 1 all_get = 0 all_rec = 0 all_nrec = 0 all_lost = 0 for tube in Tubes.objects.all(): row_num += 1 c_get = TubesRegistration.objects.filter(type__tube=tube, time_get__isnull=False, time_get__range=(date_start_o, date_end_o)).count() c_rec = TubesRegistration.objects.filter(type__tube=tube, time_recive__isnull=False, notice="", time_get__range=(date_start_o, date_end_o)).count() c_nrec = TubesRegistration.objects.filter(type__tube=tube, time_get__isnull=False, time_get__range=(date_start_o, date_end_o)).exclude(notice="").count() str1 = "" str2 = "" if c_nrec > 0: str1 = str(c_nrec) if c_get - c_rec - all_nrec > 0: str2 = str(c_get - c_rec - all_nrec) all_lost += c_get - c_rec - all_nrec row = [tube.title, c_get, c_rec, str1, str2] all_get += c_get all_rec += c_rec all_nrec += c_nrec for col_num in range(len(row)): font_style.alignment.wrap = 1 font_style.alignment.horz = 1 if col_num > 0: font_style.alignment.wrap = 3 font_style.alignment.horz = 3 ws.write(row_num, col_num, row[col_num], font_style) labs = Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.LABORATORY).exclude(title="Внешние организации") for lab in labs: ws = wb.add_sheet(lab.title) font_style = xlwt.XFStyle() row_num = 0 row = ["За период: ", per] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) row_num += 1 font_style = xlwt.XFStyle() font_style.font.bold = True columns = [ (u"Тип емкости", 9000), (u"Материал взят в процедурном каб", 9000), (u"Принято лабораторией", 8000), (u"Не принято лабораторией", 8000), (u"Потеряны", 4000), ] for col_num in range(len(columns)): ws.write(row_num, col_num, columns[col_num][0], font_style) ws.col(col_num).width = columns[col_num][1] font_style = xlwt.XFStyle() font_style.alignment.wrap = 1 all_get = 0 all_rec = 0 all_nrec = 0 all_lost = 0 for tube in Tubes.objects.all(): row_num += 1 c_get = TubesRegistration.objects.filter( issledovaniya__research__podrazdeleniye=lab, type__tube=tube, time_get__isnull=False, time_get__range=(date_start_o, date_end_o) ).count() c_rec = TubesRegistration.objects.filter( issledovaniya__research__podrazdeleniye=lab, type__tube=tube, time_recive__isnull=False, notice="", time_get__range=(date_start_o, date_end_o) ).count() c_nrec = ( TubesRegistration.objects.filter(issledovaniya__research__podrazdeleniye=lab, type__tube=tube, time_get__isnull=False, time_get__range=(date_start_o, date_end_o)) .exclude(notice="") .count() ) str1 = "" str2 = "" if c_nrec > 0: str1 = str(c_nrec) if c_get - c_rec - all_nrec > 0: str2 = str(c_get - c_rec - all_nrec) all_lost += c_get - c_rec - all_nrec row = [tube.title, c_get, c_rec, str1, str2] all_get += c_get all_rec += c_rec all_nrec += c_nrec for col_num in range(len(row)): font_style.alignment.wrap = 1 font_style.alignment.horz = 1 if col_num > 0: font_style.alignment.wrap = 3 font_style.alignment.horz = 3 ws.write(row_num, col_num, row[col_num], font_style) elif tp == "uets": usrs = DoctorProfile.objects.filter(podrazdeleniye__p_type=Podrazdeleniya.LABORATORY).order_by("podrazdeleniye__title") response['Content-Disposition'] = str.translate("attachment; filename=\"Статистика_УЕТс_{0}-{1}.xls\"".format(date_start_o, date_end_o), tr) ws = wb.add_sheet("УЕТы") font_style = xlwt.XFStyle() row_num = 0 row = ["За период: ", "{0} - {1}".format(date_start_o, date_end_o)] date_start_o, date_end_o = try_parse_range(date_start_o, date_end_o) for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num], font_style) font_style = xlwt.XFStyle() font_style.font.bold = True row_num += 1 row = [ (u"Лаборатория", 8000), (u"ФИО", 8000), (u"УЕТы", 2500), ] for col_num in range(len(row)): ws.write(row_num, col_num, row[col_num][0], font_style) ws.col(col_num).width = row[col_num][1] font_style = xlwt.XFStyle() for usr in usrs: researches_uets = {} researches = Issledovaniya.objects.filter(doc_save=usr, time_save__isnull=False, time_save__range=(date_start_o, date_end_o)) for issledovaniye in researches: if usr.labtype == 1: uet_tmp = sum([v.uet_doc for v in directory.Fractions.objects.filter(research=issledovaniye.research)]) else: uet_tmp = sum([v.uet_lab for v in directory.Fractions.objects.filter(research=issledovaniye.research)]) researches_uets[issledovaniye.pk] = {"uet": uet_tmp} researches = Issledovaniya.objects.filter(doc_confirmation=usr, time_confirmation__isnull=False, time_confirmation__range=(date_start_o, date_end_o)) for issledovaniye in researches: if usr.labtype == 1: uet_tmp = sum([v.uet_doc for v in directory.Fractions.objects.filter(research=issledovaniye.research)]) else: uet_tmp = sum([v.uet_lab for v in directory.Fractions.objects.filter(research=issledovaniye.research)]) researches_uets[issledovaniye.pk] = {"uet": uet_tmp} uets = sum([researches_uets[v]["uet"] for v in researches_uets.keys()]) row_num += 1 row = [ usr.podrazdeleniye.title, usr.get_full_fio(), uets, ] for col_num in range(len(row)): font_style.alignment.wrap = 1 font_style.alignment.horz = 1 if col_num > 2: font_style.alignment.wrap = 3 font_style.alignment.horz = 3 ws.write(row_num, col_num, row[col_num], font_style) elif tp == "message-ticket": filters = {'pk': int(request_data.get("hospital"))} any_hospital = request.user.doctorprofile.all_hospitals_users_control if not any_hospital: filters['pk'] = request.user.doctorprofile.get_hospital_id() response['Content-Disposition'] = str.translate(f"attachment; filename=\"Обращения {date_start_o.replace('.', '')} {date_end_o.replace('.', '')} {filters['pk']}.xlsx\"", tr) wb = openpyxl.Workbook() wb.remove(wb.get_sheet_by_name('Sheet')) ws = wb.create_sheet("Обращения") styles_obj = structure_sheet.style_sheet() wb.add_named_style(styles_obj[0]) if int(filters['pk']) == -1 and any_hospital: filters = {} rows_hosp = list(Hospitals.objects.values_list('pk', flat=True).filter(hide=False, **filters)) d1 = datetime.datetime.strptime(date_start_o, '%d.%m.%Y') d2 = datetime.datetime.strptime(date_end_o, '%d.%m.%Y') ws = structure_sheet.statistic_message_ticket_base(ws, date_start_o, date_end_o, styles_obj[3]) start_date = datetime.datetime.combine(d1, datetime.time.min) end_date = datetime.datetime.combine(d2, datetime.time.max) message_ticket_sql = sql_func.message_ticket(rows_hosp, start_date, end_date) ws = structure_sheet.statistic_message_ticket_data(ws, message_ticket_sql, styles_obj[3]) ws = wb.create_sheet("Итоги-Обращения") message_total_purpose_sql = sql_func.message_ticket_purpose_total(rows_hosp, start_date, end_date) ws = structure_sheet.statistic_message_purpose_total_data(ws, message_total_purpose_sql, date_start_o, date_end_o, styles_obj[3]) wb.save(response) return response
5,341,428
def populate_labels(model_name: str, paths: dict) -> list: """Report full list of object labels corresponding to detection model of choice Args: model_name: name of the model to use paths: dictionary of paths from yml file Returns: labels (list(str)): list of object labels strings """ model_file_path = paths['local_detection_model'] labels_file_path = os.path.join(model_file_path, model_name, 'coco.names') f = open(labels_file_path, 'r') labels = [line.strip() for line in f.readlines()] return labels
5,341,429
def get_checkers(): """Get default checkers to run on code. :returns: List of default checkers to run. """ return [function, readability]
5,341,430
def get_token(): """ Get or create token. """ try: token = Token.objects.get(name=settings.TOKEN_NAME) except Token.DoesNotExist: client_id = raw_input("Client id:") client_secret = raw_input("Client secret:") token = Token.objects.create( name=settings.TOKEN_NAME, scope=settings.TOKEN_SCOPE, client_id=client_id, client_secret=client_secret ) if not (token.access_token and token.refresh_token): authorize_token(token) return token
5,341,431
def test_rmr_set_get(): """ test set functions """ sbuf = rmr.rmr_alloc_msg(MRC_SEND, SIZE) _assert_new_sbuf(sbuf) # test payload pay = b"\x01\x00\x80" rmr.set_payload_and_length(pay, sbuf) summary = rmr.message_summary(sbuf) assert summary["payload"] == pay assert summary["payload length"] == 3 # test transid (note we cant test payload because it's randomly gen) assert summary["transaction id"] == b"" assert len(summary["transaction id"]) == 0 rmr.generate_and_set_transaction_id(sbuf) summary = rmr.message_summary(sbuf) assert summary["transaction id"] != b"" assert len(summary["transaction id"]) == 32 # test meid assert rmr.rmr_get_meid(sbuf) == summary["meid"] == b"" rmr.rmr_set_meid(sbuf, b"666\x01\x00\x01") summary = rmr.message_summary(sbuf) assert rmr.rmr_get_meid(sbuf) == summary["meid"] == b"666\x01" assert (len(summary["meid"])) == 4
5,341,432
def get_labels_from_sample(sample): """ Each label of Chinese words having at most N-1 elements, assuming that it contains N characters that may be grouped. Parameters ---------- sample : list of N characters Returns ------- list of N-1 float on [0,1] (0 represents no split) """ labels = [] for word in sample: if len(word) > 1: for _ in range(len(word)-1): labels.append(0) # within a word, append a '0' for each interstice labels.append(1) # at the end of a word, append a '1' else: labels.append(1) labels = labels[:-1] # Throw away the last value, it doesn't represent an interstice return labels
5,341,433
async def test_turn_off(opp): """Test turn on device.""" await common.async_set_hvac_mode(opp, HVAC_MODE_HEAT, ENTITY_CLIMATE) state = opp.states.get(ENTITY_CLIMATE) assert state.state == HVAC_MODE_HEAT await common.async_turn_off(opp, ENTITY_CLIMATE) state = opp.states.get(ENTITY_CLIMATE) assert state.state == HVAC_MODE_OFF
5,341,434
def _recarray_from_array(arr, names, drop_name_dim=_NoValue): """ Create recarray from input array `arr`, field names `names` """ if not arr.dtype.isbuiltin: # Structured array as input # Rename fields dtype = np.dtype([(n, d[1]) for n, d in zip(names, arr.dtype.descr)]) return arr.view(dtype) # Can drop name axis for > 1D arrays or row vectors (scalar per name). can_name_drop = arr.ndim > 1 or len(names) > 1 if can_name_drop and drop_name_dim is _NoValue: warnings.warn( 'Default behavior of make_recarray and > 1D arrays will ' 'change in next Nipy release. Current default returns\n' 'array with same number of dimensions as input, with ' 'axis corresponding to the field names having length 1\n; ' 'Future default will be to drop this length 1 axis. Please ' 'change your code to use explicit True or False for\n' 'compatibility with future Nipy.', VisibleDeprecationWarning, stacklevel=2) # This default will change to True in next version of Nipy drop_name_dim = False dtype = np.dtype([(n, arr.dtype) for n in names]) # At least for numpy <= 1.7.1, the dimension that numpy applies the names # to depends on the memory layout (C or F). Ensure C layout for consistent # application of names to last dimension. rec_arr = np.ascontiguousarray(arr).view(dtype) if can_name_drop and drop_name_dim: rec_arr.shape = arr.shape[:-1] return rec_arr
5,341,435
def pad(mesh: TriangleMesh, *, side: str, width: int, opts: str = '', label: int = None) -> TriangleMesh: """Pad a triangle mesh. Parameters ---------- mesh : TriangleMesh The mesh to pad. side : str Side to pad, must be one of `left`, `right`, `top`, `bottom`. width : int Width of the padded area. opts : str, optional Optional arguments passed to `triangle.triangulate`. label : int, optional The label to assign to the padded area. If not defined, generates the next unique label based on the existing ones. Returns ------- new_mesh : TriangleMesh Padded triangle mesh. Raises ------ ValueError When the value of `side` is invalid. """ if label is None: label = mesh.unique_labels.max() + 1 if width == 0: return mesh top_edge, right_edge = mesh.points.max(axis=0) bottom_edge, left_edge = mesh.points.min(axis=0) if side == 'bottom': is_edge = mesh.points[:, 0] == bottom_edge corners = np.array([[bottom_edge - width, right_edge], [bottom_edge - width, left_edge]]) elif side == 'left': is_edge = mesh.points[:, 1] == left_edge corners = np.array([[bottom_edge, left_edge - width], [top_edge, left_edge - width]]) elif side == 'top': is_edge = mesh.points[:, 0] == top_edge corners = np.array([[top_edge + width, right_edge], [top_edge + width, left_edge]]) elif side == 'right': is_edge = mesh.points[:, 1] == right_edge corners = np.array([[bottom_edge, right_edge + width], [top_edge, right_edge + width]]) else: raise ValueError('Side must be one of `right`, `left`, `bottom`' f'`top`. Got {side=}') edge_coords = mesh.points[is_edge] coords = np.vstack([edge_coords, corners]) pad_mesh = simple_triangulate(points=coords, opts=opts) mesh_edge_index = np.argwhere(is_edge).flatten() pad_edge_index = np.arange(len(mesh_edge_index)) edge_mapping = np.vstack([pad_edge_index, mesh_edge_index]) n_verts = len(mesh.points) n_edge_verts = len(edge_coords) n_pad_verts = len(pad_mesh.points) - n_edge_verts mesh_index = np.arange(n_verts, n_verts + n_pad_verts) pad_index = np.arange(n_edge_verts, n_edge_verts + n_pad_verts) pad_mapping = np.vstack([pad_index, mesh_index]) # mapping for the cell indices cells in `pad_mesh` to the source mesh. mapping = np.hstack([edge_mapping, pad_mapping]) shape = pad_mesh.cells.shape pad_cells = pad_mesh.cells.copy().ravel() mask = np.in1d(pad_cells, mapping[0, :]) pad_cells[mask] = mapping[1, np.searchsorted(mapping[0, :], pad_cells[mask])] pad_cells = pad_cells.reshape(shape) pad_verts = pad_mesh.points[n_edge_verts:] pad_labels = np.ones(len(pad_cells)) * label # append values to source mesh points = np.vstack([mesh.points, pad_verts]) cells = np.vstack([mesh.cells, pad_cells]) labels = np.hstack([mesh.labels, pad_labels]) new_mesh = TriangleMesh(points=points, cells=cells, labels=labels) return new_mesh
5,341,436
def make_qa_plots(targs, qadir='.', targdens=None, max_bin_area=1.0, weight=True, imaging_map_file=None, truths=None, objtruths=None, tcnames=None, cmx=False, bit_mask=None, mocks=False): """Make DESI targeting QA plots given a passed set of targets. Parameters ---------- targs : :class:`~numpy.array` or `str` An array of targets in the DESI data model format. If a string is passed then the targets are read from the file with the passed name (supply the full directory path). qadir : :class:`str`, optional, defaults to the current directory The output directory to which to write produced plots. targdens : :class:`dictionary`, optional, set automatically by the code if not passed A dictionary of DESI target classes and the goal density for that class. Used to label the goal density on histogram plots. max_bin_area : :class:`float`, optional, defaults to 1 degree The bin size in the passed coordinates is chosen automatically to be as close as possible to this value without exceeding it. weight : :class:`boolean`, optional, defaults to True If this is set, weight pixels using the ``DESIMODEL`` HEALPix footprint file to ameliorate under dense pixels at the footprint edges. imaging_map_file : :class:`str`, optional, defaults to no weights If `weight` is set, then this file contains the location of the imaging HEALPixel map (e.g. made by :func:` desitarget.randoms.pixmap()` if this is not sent, then the weights default to 1 everywhere (i.e. no weighting). truths : :class:`~numpy.array` or `str` The truth objects from which the targs were derived in the DESI data model format. If a string is passed then read from that file (supply the full directory path). objtruths : :class:`dict` Object type-specific truth metadata. tcnames : :class:`list`, defaults to None A list of strings, e.g. ['QSO','LRG','ALL'] If passed, return only the QA pages for those specific bits. A useful speed-up when testing. cmx : :class:`boolean`, defaults to ``False`` Pass as ``True`` to operate on commissioning bits instead of SV or main survey bits. Commissioning files have no MWS or BGS columns. bit_mask : :class:`~numpy.array`, optional, defaults to ``None`` Load the bit names from this passed mask (with zero density constraints) instead of the main survey bits. mocks : :class:`boolean`, optional, default=False If ``True``, add plots that are only relevant to mocks at the bottom of the webpage. Returns ------- :class:`float` The total area of the survey used to make the QA plots. Notes ----- - The ``DESIMODEL`` environment variable must be set to find the default expected target densities. - On execution, a set of .png plots for target QA are written to `qadir`. """ # ADM set up the default logger from desiutil. log = get_logger() start = time() log.info('Start making targeting QA plots...t = {:.1f}s'.format(time()-start)) if mocks and targs is None and truths is None and objtruths is None: if isinstance(targs, str): targs, truths, objtruths = collect_mock_data(targs) if mockdata == 0: mocks = False else: pass # = mockdata else: log.warning('To make mock-related plots, targs must be a directory+file-location string...') log.warning('...will proceed by only producing the non-mock plots...') else: # ADM if a filename was passed, read in the targets from that file. if isinstance(targs, str): targs = fitsio.read(targs) log.info('Read in targets...t = {:.1f}s'.format(time()-start)) truths, objtruths = None, None # ADM determine the nside for the passed max_bin_area. for n in range(1, 25): nside = 2 ** n bin_area = hp.nside2pixarea(nside, degrees=True) if bin_area <= max_bin_area: break # ADM calculate HEALPixel numbers once, here, to avoid repeat calculations # ADM downstream. from desimodel import footprint pix = footprint.radec2pix(nside, targs["RA"], targs["DEC"]) log.info('Calculated HEALPixel for each target...t = {:.1f}s' .format(time()-start)) # ADM set up the weight of each HEALPixel, if requested. weights = np.ones(len(targs)) # ADM a count of the uniq pixels that are covered, useful for area calculations. uniqpixset = np.array(list(set(pix))) # ADM the total pixel weight assuming none of the areas are fractional # ADM or need rewighted (i.e. each pixel's weight is 1). totalpixweight = len(uniqpixset) if weight: # ADM load the imaging weights file. if imaging_map_file is not None: from desitarget import io as dtio pixweight = dtio.load_pixweight_recarray(imaging_map_file, nside)["FRACAREA"] # ADM determine what HEALPixels each target is in, to set the weights. fracarea = pixweight[pix] # ADM weight by 1/(the fraction of each pixel that is in the DESI footprint) # ADM except for zero pixels, which are all outside of the footprint. w = np.where(fracarea == 0) fracarea[w] = 1 # ADM to guard against division by zero warnings. weights = 1./fracarea weights[w] = 0 # ADM if we have weights, then redetermine the total pix weight. totalpixweight = np.sum(pixweight[uniqpixset]) log.info('Assigned weights to pixels based on DESI footprint...t = {:.1f}s' .format(time()-start)) # ADM calculate the total area (useful for determining overall average densities # ADM from the total number of targets/the total area). pixarea = hp.nside2pixarea(nside, degrees=True) totarea = pixarea*totalpixweight # ADM Current goal target densities for DESI. if targdens is None: targdens = _load_targdens(tcnames=tcnames, bit_mask=bit_mask, mocks=mocks) if mocks: dndz = _load_dndz() # ADM clip the target densities at an upper density to improve plot edges # ADM by rejecting highly dense outliers. upclipdict = {k: 5000. for k in targdens} if bit_mask is not None: main_mask = bit_mask else: main_mask = desi_mask upclipdict = {'ELG': 4000, 'LRG': 1200, 'QSO': 400, 'ALL': 8000, 'STD_FAINT': 300, 'STD_BRIGHT': 300, # 'STD_FAINT': 200, 'STD_BRIGHT': 50, 'LRG_1PASS': 1000, 'LRG_2PASS': 500, 'BGS_FAINT': 2500, 'BGS_BRIGHT': 2500, 'BGS_WISE': 2500, 'BGS_ANY': 5000, 'MWS_ANY': 2000, 'MWS_BROAD': 2000, 'MWS_WD': 50, 'MWS_NEARBY': 50, 'MWS_MAIN_RED': 2000, 'MWS_MAIN_BLUE': 2000} for objtype in targdens: if 'ALL' in objtype: w = np.arange(len(targs)) else: if ('BGS' in objtype) and not('ANY' in objtype) and not(cmx): w = np.where(targs["BGS_TARGET"] & bgs_mask[objtype])[0] elif ('MWS' in objtype) and not('ANY' in objtype) and not(cmx): w = np.where(targs["MWS_TARGET"] & mws_mask[objtype])[0] else: w = np.where(targs["DESI_TARGET"] & main_mask[objtype])[0] if len(w) > 0: # ADM make RA/Dec skymaps. qaskymap(targs[w], objtype, qadir=qadir, upclip=upclipdict[objtype], weights=weights[w], max_bin_area=max_bin_area) log.info('Made sky map for {}...t = {:.1f}s' .format(objtype, time()-start)) # ADM make histograms of densities. We already calculated the correctly # ADM ordered HEALPixels and so don't need to repeat that calculation. qahisto(pix[w], objtype, qadir=qadir, targdens=targdens, upclip=upclipdict[objtype], weights=weights[w], max_bin_area=max_bin_area, catispix=True) log.info('Made histogram for {}...t = {:.1f}s' .format(objtype, time()-start)) # ADM make color-color plots qacolor(targs[w], objtype, targs[w], qadir=qadir, fileprefix="color") log.info('Made color-color plot for {}...t = {:.1f}s' .format(objtype, time()-start)) # ADM make magnitude histograms qamag(targs[w], objtype, qadir=qadir, fileprefix="nmag", area=totarea) log.info('Made magnitude histogram plot for {}...t = {:.1f}s' .format(objtype, time()-start)) if truths is not None: # ADM make noiseless color-color plots qacolor(truths[w], objtype, targs[w], qadir=qadir, mocks=True, fileprefix="mock-color", nodustcorr=True) log.info('Made (mock) color-color plot for {}...t = {:.1f}s' .format(objtype, time()-start)) # ADM make N(z) plots mock_qanz(truths[w], objtype, qadir=qadir, area=totarea, dndz=dndz, fileprefixz="mock-nz", fileprefixzmag="mock-zvmag") log.info('Made (mock) redshift plots for {}...t = {:.1f}s' .format(objtype, time()-start)) # # ADM plot what fraction of each selected object is actually a contaminant # mock_qafractype(truths[w], objtype, qadir=qadir, fileprefix="mock-fractype") # log.info('Made (mock) classification fraction plots for {}...t = {:.1f}s'.format(objtype, time()-start)) # ADM make Gaia-based plots if we have Gaia columns if "PARALLAX" in targs.dtype.names and np.sum(targs['PARALLAX'] != 0) > 0: qagaia(targs[w], objtype, qadir=qadir, fileprefix="gaia") log.info('Made Gaia-based plots for {}...t = {:.1f}s' .format(objtype, time()-start)) log.info('Made QA plots...t = {:.1f}s'.format(time()-start)) return totarea
5,341,437
def do_validate(): """ Validate function. print out the config, count the users and print the actions. """ try: # Print config logging.info( "\n====== [RTA] Rest Tenant Automation Configuration ======") for k, v in config.items(): if isinstance(v, dict): logging.info("%s:" % (k)) for subk, subv in v.items(): logging.info(" |-%-20s: %-10s" % (subk, subv)) logging.info("\n") else: logging.info("%-10s: %s" % (k, v)) logging.info("\nConfiguration loaded correctly.") load_users_csv() logging.info( "\nCSV file loaded correctly. Printing users with their control flags...") # Print flags logging.info("\n====== [RTA] Rest Tenant Automation Users ======") # Print Headers print_layout = "[%-2d] %-40s %-20s %-8s %-8s" logging.info(print_layout % (0, "USER-ID", "TENANT-ID", "SKIP", "IS_ADMIN")) i = 0 for id in CSV_DATA: data = CSV_DATA[id] is_skip = str_bool(data.get(key_skip)) is_admin = str_bool(data.get(key_isClusterAdminGroup)) # If skip is declared, we go to the next row i = i + 1 logging.info(print_layout % (i, id, data.get(key_tenantId), is_skip, is_admin)) except Exception as e: # catch all exceptions exc_type, exc_value, exc_traceback = sys.exc_info() logging.error( "There is an error validating, please fix it before executing.") logging.error(traceback.print_exc()) traceback.print_exc() return
5,341,438
def krauss_basis(qubits): """ Helper function to return the Krauss operator basis formed by the Cartesian product of [I, X, Y, Z] for the n-qubit. :param qubits: number of qubits :type qubits: int :return: Krauss operator :rtype: np.ndarray, float """ return [i for i in itertools.product([I, X, Y, Z], repeat=qubits)]
5,341,439
def choose_fun_cov(str_cov: str) -> constants.TYPING_CALLABLE: """ It chooses a covariance function. :param str_cov: the name of covariance function. :type str_cov: str. :returns: covariance function. :rtype: callable :raises: AssertionError """ assert isinstance(str_cov, str) if str_cov in ('eq', 'se'): fun_cov = cov_se elif str_cov == 'matern32': fun_cov = cov_matern32 elif str_cov == 'matern52': fun_cov = cov_matern52 else: raise NotImplementedError('choose_fun_cov: allowed str_cov condition,\ but it is not implemented.') return fun_cov
5,341,440
async def error_500(request, error: HTTPException): """ TODO: Handle the error with our own error handling system. """ log.error( "500 - Internal Server Error", exc_info=(type(error), error, error.__traceback__), ) return JSONResponse( status_code=500, content={ "error": "Internal Server Error", "message": "Server got itself in trouble", }, )
5,341,441
def stream_comments(sync_q): """Main task: Starts comment stream from the blockchain""" processed_posts = [] try: blockfile = open(startblock, 'r') starting_point = int(blockfile.read()) blockfile.close() except: try: props = steem.get_dynamic_global_properties() starting_point = props['last_irreversible_block_num'] except: stream_comments(sync_q) try: stream = map(Comment, blockchain.stream(start=starting_point, opNames=["comment"])) logger.info("Stream from blockchain started at block "+str(starting_point)) except Exception as error: logger.warning("Could not start blockchain stream "+repr(error)) stream_comments(starting_point) """Continuously stream comment objects from Blockchain, react to relevant one""" for post in stream: try: post.refresh() tags = post["tags"] author = post["author"] body = post["body"] authorperm = construct_authorperm(author, post['permlink']) if post.is_comment(): if author in curatorlist: """Initiates an action if a curator uses the invocation command in a comment""" parent = post.get_parent() if "!tf50" in body: sync_q.put(Post_Action(parent, "tf50", author, None)) elif "!tf100" in body: sync_q.put(Post_Action(parent, "tf100", author, None)) elif "!coop100" in body: sync_q.put(Post_Action(parent, "coop100", None, None)) elif "!ad10" in body: sync_q.put(Post_Action(parent, "ad10", author, None)) elif "!tfreview" in body: """Users targeted by bot comments can have their posts manually reviewed""" if post.parent_author == postaccount: parent = post.get_parent() parentlink = construct_authorperm(parent['author'], parent['permlink']) logger.info("@{} requests manual review ".format(author)) history = get_history(author) try: sync_q.put(Discord_Message("Author requests manual review: "+history, feedchannel)) sync_q.put(Discord_Message("https://steemit.com/"+parentlink, feedchannel)) except: logger.warning("Could not send message to Discord") try: post.reply("Thanks! We will review your post.", author=postaccount) except: logger.warning("Could not send reply to !tfreview reuester") elif post.is_main_post() and tracktag in tags and not author in whitelist: """Checks for each *post* in #travelfeed if it fits the criteria""" commenttext = "" if post.time_elapsed() > timedelta(minutes=2) or post in processed_posts: #If a post is edited within the first two minutes it would be processed twice without checking for the second condition. The array of processed posts does not need to be saved at exit since it is only relevant for two minutes logger.info("Ignoring updated post") continue elif author in blacklist: commenttext = blacklisttext logger.info("Detected post by blacklisted user @{}".format(author)) else: try: content = re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', ''.join(BeautifulSoup(markdown(body), "html.parser").findAll(text=True))) count = len(content.split(" ")) check_eligible = is_eligible(content, 225, "en") if count < 240: commenttext = shortposttext logger.info("Detected short post by @{} who posted with just {} words".format(author, count)) elif check_eligible == False: commenttext = wronglangtext logger.info("Detected post by @{} who posted not in English".format(author)) else: logger.info("Sending awesome post by @{} to Discord feed".format(author)) try: history = get_history(author) location = get_location(body, None) except: history = "" location = None if location == None: msg = history+". **"+str(count)+"** words." else: msg = history+". **"+str(count)+"** words. Location: **"+location+"**" try: sync_q.put(Discord_Message(msg, feedchannel)) sync_q.put(Discord_Message("https://steemit.com/"+authorperm, feedchannel)) except: logger.warning("Could not send message to Discord") except Exception as error: logger.warning("Error during content processing "+repr(error)) continue if not commenttext == "": try: post.reply(commenttext.format(author, count), author=postaccount) logger.info("I sucessfully left a comment for @{}".format(author)) except: logger.warning("There was an error posting the comment.") try: sync_q.put(Discord_Message("Could not leave a comment for bad post https://steemit.com/"+authorperm, logchannel)) except: logger.warning("Could not send message to Discord") continue processed_posts += [authorperm] elif post.is_main_post() and (adtag1 in tags) and not tracktag in tags: """Checks if post is in adtag and eligable for advertisement""" content = re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', ''.join(BeautifulSoup(markdown(body), "html.parser").findAll(text=True))) if is_eligible(content, 400, "en"): adfile = open(autpath, 'a+') adfile.seek(0) author_list = adfile.read().splitlines() if not author in author_list: try: adfile.write("\n"+author) sync_q.put(Discord_Message("https://steemit.com/"+authorperm, adchannel)) logger.info("Found an advertisement post by @{}".format(author)) except: logger.warning("Could not promote advertisement post by @{}".format(author)) adfile.close() adfile = open(autpath, 'a+') except ContentDoesNotExistsException: continue except Exception as error: logger.warning("Exception during post processing: "+repr(error))
5,341,442
def log_sum_exp(x): """Utility function for computing log_sum_exp while determining This will be used to determine unaveraged confidence loss across all examples in a batch. Args: x (Variable(tensor)): conf_preds from conf layers 确定时用于计算的实用函数(log_sum_exp) 这将用于确定批处理中所有示例的不可用信心损失。 参数: x(变量(张量):来自conf层的conf preds """ x_max = x.data.max() return np.log(np.sum(np.exp(x-x_max), 1, keepdims=True)) + x_max
5,341,443
def session_hook(func): """ hook opens a database session do a session_hook(read or write) and closes the connection after the run() func: function that communicates with the database (e.g fun(*args, db: Session)) returns; data: The return from func error: in case of an error in hook """ def run(*args, **kwargs): global db try: db = SessionLocal() data = func(db, *args, **kwargs) return data except exc.IntegrityError as e: print(e._message()) db.rollback() raise CustomException(error='Operation fail') except Exception as e: raise (Exception(e)) finally: db.close() return run
5,341,444
def float_index_to_time_index(df): """Convert a dataframe float indices to `datetime64['us']` indices.""" df.index = df.index.map(datetime.utcfromtimestamp) df.index = pd.to_datetime(df.index, unit="us", utc=True) return df
5,341,445
def correlated_hybrid_matrix(data_covmat,theory_covmat=None,theory_corr=None,cap=True,cap_off=0.99): """ Given a diagonal matrix data_covmat, and a theory matrix theory_covmat or its correlation matrix theory_corr, produce a hybrid non-diagonal matrix that has the same diagonals as the data matrix but has correlation coefficient given by theory. """ if theory_corr is None: assert theory_covmat is not None theory_corr = cov2corr(theory_covmat) r = theory_corr def _cap(imat,cval,csel): imat[imat>1] = 1 imat[imat<-1] = -1 imat[csel][imat[csel]>cval] = cval imat[csel][imat[csel]>-cval] = -cval d = data_covmat.copy() sel = np.where(~np.eye(d.shape[0],dtype=bool)) d[sel] = 1 dcorr = 1./cov2corr(d) if cap: _cap(r,cap_off,sel) fcorr = dcorr * r d[sel] = fcorr[sel] return d
5,341,446
def prepare_config(self, config=None): """Set defaults and check fields. Config is a dictionary of values. Method creates new config using default class config. Result config keys are the same as default config keys. Args: self: object with get_default_config method. config: User-provided config. Returns: Config dictionary with defaults set. """ default_config = self.get_default_config() if config is None: config = {} elif isinstance(config, str): config = read_config(config) elif not isinstance(config, dict): raise ConfigError("Config dictionary or filename expected, got {}".format(type(config))) # Check type. if CONFIG_TYPE in config: cls_name = type(self).__name__ if cls_name != config[CONFIG_TYPE]: raise ConfigError("Type mismatch: expected {}, got {}".format( config[CONFIG_TYPE], cls_name)) # Sample hyperparameters. _propagate_hyper_names(config) if CONFIG_HYPER in config: # Type of config[CONFIG_HYPER] is checked in _propagate_hyper_names. config = config.copy() for key, hopt in config[CONFIG_HYPER].items(): # There can be unexpected hyperparameters for another implementation. # Skip them. if key not in default_config: continue config[key] = Hyper(**hopt).sample() # Merge configs. for key in config: if key in {CONFIG_TYPE, CONFIG_HYPER}: continue if key not in default_config: value = config[key] if isinstance(value, dict) and _is_hyper_only_config(value): # Subconfigs can contain hyper parameters for alternative configurations. pass else: raise ConfigError("Unknown parameter {}".format(key)) new_config = OrderedDict() for key, value in default_config.items(): new_config[key] = config.get(key, value) return new_config
5,341,447
def number_to_block(number, block_number=0): """ Given an address number, normalizes it to the block number. >>> number_to_block(1) '0' >>> number_to_block(10) '0' >>> number_to_block(100) '100' >>> number_to_block(5) '0' >>> number_to_block(53) '0' >>> number_to_block(153) '100' >>> number_to_block(1000) '1000' >>> number_to_block(1030) '1000' >>> number_to_block(1359) '1300' >>> number_to_block(13593) '13500' >>> number_to_block('01') '0' >>> number_to_block('00') '0' >>> number_to_block('foo') 'foo' >>> number_to_block('3xx') '300' >>> number_to_block('3XX') '300' >>> number_to_block('3pp') '3pp' >>> number_to_block('XX') '0' >>> number_to_block('X') 'X' block_number lets you customize the "XX" of "3XX block". >>> number_to_block(234, 99) '299' >>> number_to_block(12345, 99) '12399' """ number = re.sub('(?i)xx', '00', str(number)) try: number = int(number) except (TypeError, ValueError): return number return str(int(math.floor(number / 100.0)) * 100 + block_number)
5,341,448
def pf_active_overlay(ods, ax=None, **kw): """ Plots overlays of active PF coils. INCOMPLETE: only the oblique geometry definition is treated so far. More should be added later. :param ods: OMAS ODS instance :param ax: axes instance into which to plot (default: gca()) :param \**kw: Additional keywords scalex, scaley: passed to ax.autoscale_view() call at the end * Accepts standard omas_plot overlay keywords: mask, labelevery, notesize * Remaining keywords are passed to matplotlib.patches.Polygon call Hint: you may want to set facecolor instead of just color """ # Make sure there is something to plot or else just give up and return import matplotlib from matplotlib import pyplot nc = get_channel_count( ods, 'pf_active', check_loc='pf_active.coil.0.element.0.geometry.geometry_type', channels_name='coil', test_checker='checker > -1') if nc == 0: return if ax is None: ax = pyplot.gca() kw.setdefault('label', 'Active PF coils') kw.setdefault('facecolor', 'gray') kw.setdefault('edgecolor', 'k') kw.setdefault('alpha', 0.7) labelevery = kw.pop('labelevery', 0) notesize = kw.pop('notesize', 'xx-small') mask = kw.pop('mask', numpy.ones(nc, bool)) scalex, scaley = kw.pop('scalex', True), kw.pop('scaley', True) def path_rectangle(rectangle): """ :param rectangle: ODS sub-folder: element.*.geometry.rectangle :return: n x 2 array giving the path around the outline of the coil element, suitable for input to Polygon() """ x = rectangle['r'] y = rectangle['z'] dx = rectangle['width'] dy = rectangle['height'] return numpy.array([ [x - dx / 2., x - dx / 2., x + dx / 2., x + dx / 2.], [y - dy / 2., y + dy / 2., y + dy / 2., y - dy / 2.]]).T def path_outline(outline): """ :param outline: ODS sub-folder: element.*.geometry.outline :return: n x 2 array giving the path around the outline of the coil element, suitable for input to Polygon() """ return numpy.array([outline['r'], outline['z']]).T patches = [] for i in range(nc): # From iris:/fusion/usc/src/idl/efitview/diagnoses/DIII-D/coils.pro , 2018 June 08 D. Eldon if mask[i]: try: geometry_type = geo_type_lookup(ods['pf_active.coil'][i]['element.0.geometry.geometry_type'], 'pf_active', ods.imas_version) except (IndexError, ValueError): geometry_type = 'unrecognized' try: path = eval('path_{}'.format(geometry_type))( ods['pf_active.coil'][i]['element.0.geometry'][geometry_type]) except NameError: print('Warning: unrecognized geometry type for pf_active coil {}: {}'.format(i, geometry_type)) continue patches.append(matplotlib.patches.Polygon(path, closed=True, **kw)) kw.pop('label', None) # Prevent label from being placed on more than one patch try: pf_id = ods['pf_active.coil'][i]['element.0.identifier'] except ValueError: pf_id = None if (labelevery > 0) and ((i % labelevery) == 0) and (pf_id is not None): ax.text(numpy.mean(xarr), numpy.mean(yarr), pf_id, ha='center', va='center', fontsize=notesize) for p in patches: ax.add_patch(p) # Using patch collection breaks auto legend labeling, so add patches individually. ax.autoscale_view(scalex=scalex, scaley=scaley) # add_patch doesn't include this return
5,341,449
def distance_between_vehicles(self_vhc_pos, self_vhc_orientation, self_vhc_front_length, self_vhc_rear_length, self_vhc_width, ext_vhc_pos, ext_vhc_orientation, ext_vhc_width, ext_vhc_rear_length, ext_vhc_front_length): """Only in 2-D space (no z-axis in positions)""" ext_vhc_frnt_left = rotate_point_ccw([-ext_vhc_width, ext_vhc_front_length], -ext_vhc_orientation) + ext_vhc_pos ext_vhc_frnt_right = rotate_point_ccw([ext_vhc_width, ext_vhc_front_length], -ext_vhc_orientation) + ext_vhc_pos ext_vhc_rear_left = rotate_point_ccw([-ext_vhc_width, -ext_vhc_rear_length], -ext_vhc_orientation) + ext_vhc_pos ext_vhc_rear_right = rotate_point_ccw([ext_vhc_width, -ext_vhc_rear_length], -ext_vhc_orientation) + ext_vhc_pos ext_vhc_frnt_left_in_vhc_coord = rotate_point_ccw(ext_vhc_frnt_left - self_vhc_pos, -self_vhc_orientation) ext_vhc_frnt_right_in_vhc_coord = rotate_point_ccw(ext_vhc_frnt_right - self_vhc_pos, -self_vhc_orientation) ext_vhc_rear_left_in_vhc_coord = rotate_point_ccw(ext_vhc_rear_left - self_vhc_pos, -self_vhc_orientation) ext_vhc_rear_right_in_vhc_coord = rotate_point_ccw(ext_vhc_rear_right - self_vhc_pos, -self_vhc_orientation) ext_vehicle_lines = [[ext_vhc_frnt_left_in_vhc_coord, ext_vhc_frnt_right_in_vhc_coord], [ext_vhc_frnt_right_in_vhc_coord, ext_vhc_rear_right_in_vhc_coord], [ext_vhc_rear_right_in_vhc_coord, ext_vhc_rear_left_in_vhc_coord], [ext_vhc_rear_left_in_vhc_coord, ext_vhc_frnt_left_in_vhc_coord]] ext_vehicle_corners = [ext_vhc_frnt_left_in_vhc_coord, ext_vhc_frnt_right_in_vhc_coord, ext_vhc_rear_right_in_vhc_coord, ext_vhc_rear_left_in_vhc_coord] ego_points = [np.array([-self_vhc_width, self_vhc_front_length]), np.array([self_vhc_width, self_vhc_front_length]), np.array([-self_vhc_width, self_vhc_rear_length]), np.array([self_vhc_width, self_vhc_rear_length])] distance = math.inf # Compute the minimum distance from each corner of the external vehicle to the edges of the ego vehicle: # This is easier because the external vehicle is already represented in the ego vehicle's coordinate system. for ext_vehicle_corner in ext_vehicle_corners: if -self_vhc_width < ext_vehicle_corner[0] < self_vhc_width: x_dist = 0.0 elif ext_vehicle_corner[0] > self_vhc_width: x_dist = ext_vehicle_corner[0] - self_vhc_width else: x_dist = -self_vhc_width - ext_vehicle_corner[0] if -self_vhc_rear_length < ext_vehicle_corner[1] < self_vhc_front_length: y_dist = 0.0 elif ext_vehicle_corner[1] > self_vhc_front_length: y_dist = ext_vehicle_corner[1] - self_vhc_front_length else: y_dist = -self_vhc_rear_length - ext_vehicle_corner[1] temp_dist = math.sqrt(x_dist**2 + y_dist**2) distance = min(distance, temp_dist) # Compute the minimum distance from each corner of the ego vehicle to the edges of the external vehicle: for ego_point in ego_points: num_inside_pts = 0 for ext_vehicle_line in ext_vehicle_lines: (temp_dist, t) = line_dist(ext_vehicle_line[0], ext_vehicle_line[1], ego_point) if 0.0001 < t < 0.9999: # NOT (on a line or outside one of the lines). # When the closest point on the line is one end of the line (t==0 or t==1), # then the point is outside the rectangle. num_inside_pts += 1 else: distance = min(distance, temp_dist) if num_inside_pts == len(ext_vehicle_lines): distance = 0.0 if distance == 0.0: break return distance
5,341,450
def _complexity_lyapunov_checklength( n, delay=1, dimension=2, tolerance="default", len_trajectory=20, method="rosenstein1993", matrix_dim=4, min_neighbors="default", ): """Helper function that calculates the minimum number of data points required. """ if method in ["rosenstein", "rosenstein1993"]: # minimum length required to find single orbit vector min_len = (dimension - 1) * delay + 1 # we need len_trajectory orbit vectors to follow a complete trajectory min_len += len_trajectory - 1 # we need tolerance * 2 + 1 orbit vectors to find neighbors for each min_len += tolerance * 2 + 1 # Sanity check if n < min_len: warn( f"for dimension={dimension}, delay={delay}, tolerance={tolerance} and " + f"len_trajectory={len_trajectory}, you need at least {min_len} datapoints in your time series.", category=NeuroKitWarning, ) elif method in ["eckmann", "eckmann1996"]: m = (dimension - 1) // (matrix_dim - 1) # minimum length required to find single orbit vector min_len = dimension # we need to follow each starting point of an orbit vector for m more steps min_len += m # we need tolerance * 2 + 1 orbit vectors to find neighbors for each min_len += tolerance * 2 # we need at least min_nb neighbors for each orbit vector min_len += min_neighbors # Sanity check if n < min_len: warn( f"for dimension={dimension}, delay={delay}, tolerance={tolerance}, " + f"matrix_dim={matrix_dim} and min_neighbors={min_neighbors}, " + f"you need at least {min_len} datapoints in your time series.", category=NeuroKitWarning, )
5,341,451
def compute_hits_importance_scores( hits_bed_path, shap_scores_hdf5_path, peak_bed_path, out_path ): """ For each MOODS hit, computes the hit's importance score as the ratio of the hit's average importance score to the total importance of the sequence. Arguments: `hits_bed_path`: path to BED file output by `collapse_hits` without the p-value column `shap_scores_hdf5_path`: an HDF5 of DeepSHAP scores of peak regions measuring importance `peak_bed_path`: BED file of peaks; we require that these coordinates must match the DeepSHAP score coordinates exactly `out_path`: path to output the resulting table Each of the DeepSHAP score HDF5s must be of the form: `coords_chrom`: N-array of chromosome (string) `coords_start`: N-array `coords_end`: N-array `hyp_scores`: N x L x 4 array of hypothetical importance scores `input_seqs`: N x L x 4 array of one-hot encoded input sequences Outputs an identical hit BED with an extra column for the importance score fraction. """ _, imp_scores, _, coords = util.import_shap_scores( shap_scores_hdf5_path, "hyp_scores", remove_non_acgt=False ) peak_table = pd.read_csv( peak_bed_path, sep="\t", header=None, index_col=False, usecols=[0, 1, 2], names=["peak_chrom", "peak_start", "peak_end"] ) assert np.all(coords == peak_table.values) hit_table = pd.read_csv( hits_bed_path, sep="\t", header=None, index_col=False, names=["chrom", "start", "end", "key", "strand", "score", "peak_index"] ) # Merge in the peak starts/ends to the hit table merged_hits = pd.merge( hit_table, peak_table, left_on="peak_index", right_index=True ) # Important! Reset the indices of `merged_hits` after merging, otherwise # iteration over the rows won't be in order merged_hits = merged_hits.reset_index(drop=True) # Compute start and end of each motif relative to the peak merged_hits["motif_rel_start"] = \ merged_hits["start"] - merged_hits["peak_start"] merged_hits["motif_rel_end"] = \ merged_hits["end"] - merged_hits["peak_start"] # Careful! Because of the merging step that only kept the top peak hit, some # hits might overrun the edge of the peak; we limit the motif hit indices # here so they stay in the peak; this should not be a common occurrence merged_hits["peak_min"] = 0 merged_hits["peak_max"] = \ merged_hits["peak_end"] - merged_hits["peak_start"] merged_hits["motif_rel_start"] = \ merged_hits[["motif_rel_start", "peak_min"]].max(axis=1) merged_hits["motif_rel_end"] = \ merged_hits[["motif_rel_end", "peak_max"]].min(axis=1) del merged_hits["peak_min"] del merged_hits["peak_max"] # Get score of each motif hit as average importance over the hit, divided # by the total score of the sequence scores = np.empty(len(merged_hits)) for peak_index, group in merged_hits.groupby("peak_index"): # Iterate over grouped table by peak score_track = np.sum(np.abs(imp_scores[peak_index]), axis=1) total_score = np.sum(score_track) for i, row in group.iterrows(): scores[i] = np.mean( score_track[row["motif_rel_start"]:row["motif_rel_end"]] ) / total_score merged_hits["imp_frac_score"] = scores new_hit_table = merged_hits[[ "chrom", "start", "end", "key", "strand", "score", "peak_index", "imp_frac_score" ]] new_hit_table.to_csv(out_path, sep="\t", header=False, index=False)
5,341,452
def searchlight_dictdata(faces, nrings, vertex_list): """ Function to generate neighbor vertex relationship for searchlight analysis The format of dictdata is [label]:[vertices] Parameters: ----------- faces: nrings: vertex_list: vertex-index relationship, e.g. vertex_list[29696] = 32492 Returns: -------- output_vx """ output_vx = {} vertex_list = list(vertex_list) index_dict = dict((value, idx) for idx,value in enumerate(vertex_list)) for i, vl in enumerate(vertex_list): print('{0}:{1}'.format(i+1, vl)) neighbor_vxidx = surf_tools.get_n_ring_neighbor(int(vl), faces, n=nrings)[0] neighbor_vxidx.intersection_update(set(index_dict.keys())) neighbor_vx = [index_dict[nv] for nv in neighbor_vxidx] output_vx[i] = neighbor_vx return output_vx
5,341,453
def parse_resource_uri(resource_uri): """ Parse a resource uri (like /api/v1/prestataires/1/) and return the resource type and the object id. """ match = resource_pattern.search(resource_uri) if not match: raise ValueError("Value %s is not a resource uri." % resource_uri) return match.group(1), match.group(2)
5,341,454
def foldl(func: tp.Callable, acc, lst: List): """ >>> foldl(lambda x, y: x + y, 0, Nil()) 0 >>> foldl(lambda x, y: x + y, 2, from_seq([1, 2, 3])) 8 >>> foldl(lambda x, y: x - y, 1, from_seq([3, 2, 1])) -5 """ return acc if null(lst) else foldl(func, func(acc, head(lst)), tail(lst))
5,341,455
def _print_info_dict(info_dict: Dict[str, str]): """Print the information dictionary""" for key, stat in info_dict.items(): print(f"{key:>10}: {stat}")
5,341,456
def test_expand_substory_protocol_list_with_null(f): """ We expand protocol of composed story, if substory define protocol with list of strings and parent story does not define protocol. """ class T(f.ChildWithList, f.NormalMethod): pass class Q(f.ParentWithNull, f.NormalParentMethod, T): pass class J(f.ParentWithNull, f.NormalParentMethod): def __init__(self): self.x = T().x # Substory inheritance. assert Q().a.failures == ["foo", "bar", "baz"] result = Q().a() assert result is None result = Q().a.run() assert result.is_success assert result.value is None # Substory DI. assert J().a.failures == ["foo", "bar", "baz"] result = J().a() assert result is None result = J().a.run() assert result.is_success assert result.value is None
5,341,457
def generate_rules(F, support_data, min_confidence=0.5, verbose=True): """Generates a set of candidate rules from a list of frequent itemsets. For each frequent itemset, we calculate the confidence of using a particular item as the rule consequent (right-hand-side of the rule). By testing and merging the remaining rules, we recursively create a list of pruned rules. Parameters ---------- F : list A list of frequent itemsets. support_data : dict The corresponding support data for the frequent itemsets (L). min_confidence : float The minimum confidence threshold. Defaults to 0.5. Returns ------- rules : list The list of candidate rules above the minimum confidence threshold. """ rules = [] for i in range(1, len(F)): for freq_set in F[i]: H1 = [frozenset([itemset]) for itemset in freq_set] if (i > 1): rules_from_conseq(freq_set, H1, support_data, rules, min_confidence, verbose) else: calc_confidence(freq_set, H1, support_data, rules, min_confidence, verbose) return rules
5,341,458
def cond_model(model1, model2): """Conditional. Arguments: model1 {MentalModel} -- antecedent model2 {MentalModel} -- consequent Returns: MentalModel -- the conditional model """ mental = and_model(model1, model2) mental.ell += 1 fully = merge_fullex( and_model(model1, model2), and_model(not_model(model1), not_model(model2)), and_model(not_model(model1), model2) ) return merge_mental_and_full(mental, fully)
5,341,459
def get_pages(): """Select all pages and order them by page_order.""" pages = query_db("SELECT page_order, name, shortname, available FROM pages ORDER BY page_order") return pages
5,341,460
def set_default_interface(etree): """ Sets the default interface that PyAMF will use to deal with XML entities (both objects and blobs). """ global types, ET, modules t = _get_etree_type(etree) _types = set(types or []) _types.update([t]) types = tuple(_types) modules[t] = etree old, ET = ET, etree return old
5,341,461
def atom_site(block): """Handle ATOM_SITE block. Data items in the ATOM_SITE category record details about the atom sites in a macromolecular crystal structure, such as the positional coordinates, atomic displacement parameters, magnetic moments and directions. (source: http://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v50.dic/Categories/atom_site.html) Args: block: Pdbx data block Returs: pdblist: array of pdb.ATOM objects errlist: array of thigs that """ pdb_arr = [] err_arr = [] atoms = block.getObj("atom_site") num_model_arr = count_models(block) if len(num_model_arr) == 1: for i in range(atoms.getRowCount()): if atoms.getValue("group_PDB", i) == "ATOM": try: line = "" # 1 - 6 RECORD NAME (ATOM) line += atoms.getValue("group_PDB", i) + \ " "*(6 - len(atoms.getValue("group_PDB", i))) # 7 - 11 ATOM SERIAL line += " "*(5 - len(str(atoms.getValue("id", i)))) + \ str(atoms.getValue("id", i)) # 12 - 13 line += " " # 14 - 16 ATOM NAME line += atoms.getValue("label_atom_id", i) + \ " "*(3 - len(atoms.getValue("label_atom_id", i))) # 17 ALT LOCATION if atoms.getValue("label_alt_id", i) == ".": line += " " else: atoms.getValue("label_alt_id", i) # 18 - 20 RES NAME line += " "*(3 - len(atoms.getValue("label_comp_id", i))) + \ atoms.getValue("label_comp_id", i) # 21 line += " " # 22 CHAIN ID line += " "*(1 - len(atoms.getValue("label_asym_id", i))) + \ atoms.getValue("label_asym_id", i) # 23 - 26 RES SEQ ID line += " "*(4 - len(str(atoms.getValue("auth_seq_id", i)))) + \ str(atoms.getValue("auth_seq_id", i)) # 27 - 30 line += " "*3 # 31 - 38 X Coords line += " "*(8 - len(str(atoms.getValue("Cartn_x", i)))) + \ str(atoms.getValue("Cartn_x", i)) # 39 - 46 Y Coords line += " "*(8 - len(str(atoms.getValue("Cartn_y", i)))) + \ str(atoms.getValue("Cartn_y", i)) # 47 - 54 Z Coords line += " "*(8 - len(str(atoms.getValue("Cartn_z", i)))) + \ str(atoms.getValue("Cartn_z", i)) # 55 - 60 OCCUPANCY line += " "*(6 - len(str(atoms.getValue("occupancy", i)))) + \ str(atoms.getValue("occupancy", i)) # 61 - 66 TEMP FACTOR line += " "*(6 - len(str(atoms.getValue("B_iso_or_equiv", i)))) + \ str(atoms.getValue("B_iso_or_equiv", i)) # 67 - 76 line += " "*(10) # 77 - 78 ELEMENT SYMBOL line += " "*(2 - len(atoms.getValue("type_symbol", i))) + \ atoms.getValue("type_symbol", i) # 79 - 80 CHARGE OF ATOM if atoms.getValue("pdbx_formal_charge", i) == "?": line += " "*2 else: atoms.getValue("pdbx_formal_charge", i) pdb_arr.append(pdb.ATOM(line)) # TODO - what are we catching here? except: _LOGGER.error("atom_site: Error reading line: #%s#\n", line) elif atoms.getValue("group_PDB", i) == "HETATM": try: line = "" # 1 - 6 RECORD NAME (HETATM) line += atoms.getValue("group_PDB", i) + \ ""*(6 - len(atoms.getValue("group_PDB", i))) # 7 - 11 ATOM SERIAL line += " "*(5 - len(str(atoms.getValue("id", i)))) + \ str(atoms.getValue("id", i)) # 12 - 13 line += " " # 14 - 16 ATOM NAME line += atoms.getValue("label_atom_id", i) + \ " "*(3 - len(atoms.getValue("label_atom_id", i))) # 17 ALT LOCATION if atoms.getValue("label_alt_id", i) == ".": line += " " else: atoms.getValue("label_alt_id", i) # 18 - 20 RES NAME line += " "*(3 - len(atoms.getValue("label_comp_id", i))) + \ atoms.getValue("label_comp_id", i) # 21 line += " " # 22 CHAIN ID line += " "*(1 - len(atoms.getValue("label_asym_id", i))) + \ atoms.getValue("label_asym_id", i) # 23 - 26 RES SEQ ID line += " "*(4 - len(str(atoms.getValue("auth_seq_id", i)))) + \ str(atoms.getValue("auth_seq_id", i)) # 27 - 30 line += " "*3 # 31 - 38 X Coords line += " "*(8 - len(str(atoms.getValue("Cartn_x", i)))) + \ str(atoms.getValue("Cartn_x", i)) # 39 - 46 Y Coords line += " "*(8 - len(str(atoms.getValue("Cartn_y", i)))) + \ str(atoms.getValue("Cartn_y", i)) # 47 - 54 Z Coords line += " "*(8 - len(str(atoms.getValue("Cartn_z", i)))) + \ str(atoms.getValue("Cartn_z", i)) # 55 - 60 OCCUPANCY line += " "*(6 - len(str(atoms.getValue("occupancy", i)))) + \ str(atoms.getValue("occupancy", i)) # 61 - 66 TEMP FACTOR line += " "*(6 - len(str(atoms.getValue("B_iso_or_equiv", i)))) + \ str(atoms.getValue("B_iso_or_equiv", i)) # 67 - 76 line += " "*(10) # 77 - 78 ELEMENT SYMBOL line += " "*(2 - len(atoms.getValue("type_symbol", i))) + \ atoms.getValue("type_symbol", i) # 79 - 80 CHARGE OF ATOM if atoms.getValue("pdbx_formal_charge", i) == "?": line += " "*2 else: atoms.getValue("pdbx_formal_charge", i) pdb_arr.append(pdb.HETATM(line)) # TODO - what are we catching here? except: _LOGGER.error("atom_site: Error reading line:\n%s", line) return pdb_arr, err_arr # TODO - Given the return statement above, is this "else" ever reached? else: for j in num_model_arr: try: line = "MODEL " line += " "*4 line += " "*(4 - len(str(j))) + str(j) pdb_arr.append(pdb.MODEL(line)) except: _LOGGER.error("atom_site: Error readline line:\n%s", line) err_arr.append("MODEL") for i in range(atoms.getRowCount()): if atoms.getValue("pdbx_PDB_model_num", i) == j: if atoms.getValue("group_PDB", i) == "ATOM": try: line = "" # 1 - 6 RECORD NAME (ATOM) line += atoms.getValue("group_PDB", i) + \ " "*(6 - len(atoms.getValue("group_PDB", i))) # 7 - 11 ATOM SERIAL line += " "*(5 - len(str(atoms.getValue("id", i)))) + \ str(atoms.getValue("id", i)) # 12 - 13 line += " " # 14 - 16 ATOM NAME line += atoms.getValue("label_atom_id", i) + \ " "*(3 - len(atoms.getValue("label_atom_id", i))) # 17 ALT LOCATION if atoms.getValue("label_alt_id", i) == ".": line += " " else: atoms.getValue("label_alt_id", i) # 18 - 20 RES NAME line += " "*(3 - len(atoms.getValue("label_comp_id", i))) + \ atoms.getValue("label_comp_id", i) # 21 line += " " # 22 CHAIN ID line += " "*(1 - len(atoms.getValue("label_asym_id", i))) + \ atoms.getValue("label_asym_id", i) # 23 - 26 RES SEQ ID line += " "*(4 - len(str(atoms.getValue("auth_seq_id", i)))) + \ str(atoms.getValue("auth_seq_id", i)) # 27 - 30 line += " "*3 # 31 - 38 X Coords line += " "*(8 - len(str(atoms.getValue("Cartn_x", i)))) + \ str(atoms.getValue("Cartn_x", i)) # 39 - 46 Y Coords line += " "*(8 - len(str(atoms.getValue("Cartn_y", i)))) + \ str(atoms.getValue("Cartn_y", i)) # 47 - 54 Z Coords line += " "*(8 - len(str(atoms.getValue("Cartn_z", i)))) + \ str(atoms.getValue("Cartn_z", i)) # 55 - 60 OCCUPANCY line += " "*(6 - len(str(atoms.getValue("occupancy", i)))) + \ str(atoms.getValue("occupancy", i)) # 61 - 66 TEMP FACTOR line += " "*(6 - len(str(atoms.getValue("B_iso_or_equiv", i)))) + \ str(atoms.getValue("B_iso_or_equiv", i)) # 67 - 76 line += " "*(10) # 77 - 78 ELEMENT SYMBOL line += " "*(2 - len(atoms.getValue("type_symbol", i))) + \ atoms.getValue("type_symbol", i) # 79 - 80 CHARGE OF ATOM if atoms.getValue("pdbx_formal_charge", i) == "?": line += " "*2 else: atoms.getValue("pdbx_formal_charge", i) pdb_arr.append(pdb.ATOM(line)) # TODO - what are we catching here? except: _LOGGER.error("atom_site: Error reading line:\n%s", line) err_arr.append("ATOM") elif atoms.getValue("group_PDB", i) == "HETATM": try: line = "" # 1 - 6 RECORD NAME (HETATM) line += atoms.getValue("group_PDB", i) + \ ""*(6 - len(atoms.getValue("group_PDB", i))) # 7 - 11 ATOM SERIAL line += " "*(5 - len(str(atoms.getValue("id", i)))) + \ str(atoms.getValue("id", i)) # 12 - 13 line += " " # 14 - 16 ATOM NAME line += atoms.getValue("label_atom_id", i) + \ " "*(3 - len(atoms.getValue("label_atom_id", i))) # 17 ALT LOCATION if atoms.getValue("label_alt_id", i) == ".": line += " " else: atoms.getValue("label_alt_id", i) # 18 - 20 RES NAME line += " "*(3 - len(atoms.getValue("label_comp_id", i))) + \ atoms.getValue("label_comp_id", i) # 21 line += " " # 22 CHAIN ID line += " "*(1 - len(atoms.getValue("label_asym_id", i))) + \ atoms.getValue("label_asym_id", i) # 23 - 26 RES SEQ ID line += " "*(4 - len(str(atoms.getValue("auth_seq_id", i)))) + \ str(atoms.getValue("auth_seq_id", i)) # 27 - 30 line += " "*3 # 31 - 38 X Coords line += " "*(8 - len(str(atoms.getValue("Cartn_x", i)))) + \ str(atoms.getValue("Cartn_x", i)) # 39 - 46 Y Coords line += " "*(8 - len(str(atoms.getValue("Cartn_y", i)))) + \ str(atoms.getValue("Cartn_y", i)) # 47 - 54 Z Coords line += " "*(8 - len(str(atoms.getValue("Cartn_z", i)))) + \ str(atoms.getValue("Cartn_z", i)) # 55 - 60 OCCUPANCY line += " "*(6 - len(str(atoms.getValue("occupancy", i)))) + \ str(atoms.getValue("occupancy", i)) # 61 - 66 TEMP FACTOR line += " "*(6 - len(str(atoms.getValue("B_iso_or_equiv", i)))) + \ str(atoms.getValue("B_iso_or_equiv", i)) # 67 - 76 line += " "*(10) # 77 - 78 ELEMENT SYMBOL line += " "*(2 - len(atoms.getValue("type_symbol", i))) + \ atoms.getValue("type_symbol", i) # 79 - 80 CHARGE OF ATOM if atoms.getValue("pdbx_formal_charge", i) == "?": line += " "*2 else: atoms.getValue("pdbx_formal_charge", i) pdb_arr.append(pdb.HETATM(line)) # TODO - what are we catching here? except: _LOGGER.error("atom_site: Error reading line:\n%s", line) err_arr.append("HETATOM") try: line = "ENDMDL" pdb_arr.append(pdb.ENDMDL(line)) except: _LOGGER.error("atom_site: Error reading line:\n%s", line) err_arr.append("ENDMDL") return pdb_arr, err_arr
5,341,462
def return_var_plot(result, attr_name, attr_type, option=0): """Method that generates the corresponding plot for each attribute, based on the type and the selection of the user.""" aval = f'{attr_name}_value' if attr_type == 'NUMBER' or attr_type == 'DATE_TIME': if aval not in result[0].keys(): return None vals = [r[aval] for r in result] if option == 0: fig = px.histogram(x=vals) fig.update_yaxes(title='Frequency') elif option == 1: fig = px.box(x=vals) fig.update_xaxes(title=attr_name.capitalize()) return fig elif attr_type == 'GEOLOCATION': #location if aval not in result[0].keys(): return None pois = [tuple(map(float, r[aval][7:-1].split(' '))) for r in result] x, y = zip(*pois) minx, miny, maxx, maxy = min(x), min(y), max(x), max(y) bb = box(minx, miny, maxx, maxy) map_center = [bb.centroid.y, bb.centroid.x] m = folium.Map(location=map_center, tiles='OpenStreetMap', width='100%', height='100%') m.fit_bounds(([bb.bounds[1], bb.bounds[0]], [bb.bounds[3], bb.bounds[2]])) m.add_child(Fullscreen()) if option == 0: coords, popups = [], [] poi_layer = folium.FeatureGroup(name='pois') for r, yy, xx in zip(result, y, x): coords.append([yy, xx]) popups.append(__prepare_popup(r)) poi_layer.add_child(MarkerCluster(locations=coords, popups=popups)) m.add_child(poi_layer) folium.GeoJson(bb).add_to(m) elif option == 1: scores = [r['score'] for r in result] HeatMap(zip(y,x,scores), radius=10).add_to(m) elif option == 2: if 'keywords_value' not in result[0].keys(): return None kwds = [r['keywords_value'] for r in result] scores = [r['score'] for r in result] labels, eps = compute_clusters(pois) pois = [Point(poi) for poi in pois] d = {'geometry': pois, 'kwd': kwds, 'score': scores, 'cluster_id': labels} gdf = GeoDataFrame(d, crs="EPSG:4326") gdf = gdf[gdf.cluster_id >= 0] aois = cluster_shapes(gdf, eps).set_index('cluster_id') means = gdf.groupby('cluster_id').agg({'score': 'mean', 'kwd': lambda x: ' '.join(x)}) clustered_keys = pd.concat([aois, means], axis=1).reset_index(drop=False) bins = list(clustered_keys['score'].quantile([0, 0.25, 0.5, 0.75, 1])) folium.Choropleth(geo_data=clustered_keys, data=clustered_keys, columns=['cluster_id','score'], bins=bins, key_on='feature.properties.cluster_id', fill_color='YlOrRd', fill_opacity=0.6, line_opacity=0.5).add_to(m) wc = WordCloud(width = 200, height = 150, random_state=1, background_color='salmon', colormap='Pastel1', collocations=False, stopwords = STOPWORDS) for index, row in clustered_keys.iterrows(): c = Counter(row['kwd']) s = wc.generate_from_frequencies(c) plt.imshow(s, interpolation='bilinear') plt.axis("off") buf = BytesIO() plt.savefig(buf, format='png', bbox_inches='tight') # Include image popup to the marker html = '<img src="data:image/PNG;base64,{}" style="width:100%; height:100%; display:block">'.format encoded = base64.b64encode(buf.getvalue()).decode() iframe = IFrame(html(encoded), width=300, height=150) popup = folium.Popup(iframe, min_width=300, max_width=300, parse_html=True) # max_width=2650 buf.close() folium.GeoJson(row['geometry']).add_child(popup).add_to(m) return m.get_root().render() elif attr_type == 'KEYWORD_SET': if aval not in result[0].keys(): return None wc = WordCloud(width = 400, height = 300, random_state=1, background_color='salmon', colormap='Pastel1', collocations=False, stopwords = STOPWORDS) c = Counter() for r in result: c.update(r[aval]) s = wc.generate_from_frequencies(c) if option == 0: fig = px.imshow(s, labels={}) fig.update_xaxes(showticklabels=False) fig.update_yaxes(showticklabels=False) fig.update_traces(hovertemplate=None, hoverinfo='skip' ) return fig elif option == 1: df = pd.DataFrame(c.most_common(10), columns=['Word', 'Frequency']) df = df.sort_values('Frequency', ascending=True) fig = px.bar(df, x="Frequency", y="Word", orientation='h') fig.update_yaxes(title=None) return fig
5,341,463
def get_inequivalent_sites(sub_lattice, lattice): """Given a sub lattice, returns symmetry unique sites for substitutions. Args: sub_lattice (list of lists): array containing Cartesian coordinates of the sub-lattice of interest lattice (ASE crystal): the total lattice Returns: List of sites """ sg = get_sg(lattice) inequivalent_sites = [] for site in sub_lattice: new_site = True # Check against the existing members of the list of inequivalent sites if len(inequivalent_sites) > 0: for inequiv_site in inequivalent_sites: if smact.are_eq(site, inequiv_site) == True: new_site = False # Check against symmetry related members of the list of inequivalent sites equiv_inequiv_sites, _ = sg.equivalent_sites(inequiv_site) for equiv_inequiv_site in equiv_inequiv_sites: if smact.are_eq(site, equiv_inequiv_site) == True: new_site = False if new_site == True: inequivalent_sites.append(site) return inequivalent_sites
5,341,464
def test_re_mod_search(verbose = None): """ some tests borrowed from cpython testing re.search(), and matobj.group() """ regex_tests = testsuite.search_regex_tests for t in regex_tests: pattern=s=outcome=repl=expected=None if len(t)==5: pattern, s, outcome, repl, expected = t elif len(t)==3: pattern, s, outcome = t else: raise ('Test tuples should have 3 or 5 fields',t) try: matobj=re.search(pattern, s) except: if outcome==SYNTAX_ERROR: # This should have been a syntax error; forget it. pass else: print('=== Unexpected exception:', matobj, pattern, s) if outcome==FAIL: if matobj==None: pass # No match, as expected else: print('=== Succeeded incorrectly', obj, matobj, pattern, s) elif outcome==SUCCEED: if matobj!=None: # Matched, as expected, so now we compute the # result string and compare it to our expected result. found=matobj.group(0) repl = repl.replace("found", str(found)) for i in range(1,11): if "g"+str(i) in repl: gi = str(matobj.group(i)) repl = repl.replace("g"+str(i), gi) if len(t) == 5: repl = repl.replace('+', '') repl = repl.replace('\"', '') if repl!=expected: print( '=== grouping error', t, str(repl)+' should be '+str(expected)) RAISE() else: print ('=== Failed incorrectly', t)
5,341,465
def cone_emline(ra, dec, radius=5, selectcol=['specObjID', 'ra', 'dec', 'z', 'zErr', 'bpt', 'Flux_Ha_6562', 'Flux_NII_6583', 'Flux_Hb_4861', 'Flux_OIII_5006']): """ box search in emissionLinesPort table ra, dec in degrees, size in arcsec. Columns described in http://skyserver.sdss.org/dr16/en/help/browser/browser.aspx?cmd=description+emissionLinesPort+U#&&history=description+emissionLinesPort+U TODO: use selectcol in sql query """ # dumb way # query = """SELECT TOP 10 emline.*\nFROM emissionLinesPort AS emline\nWHERE ra > {0} and ra < {1}\nAND dec > {2} and dec < {3}""".format(ra-size/2, ra+size/2, dec-size/2, dec+size/2) colstr = ', ' query = """SELECT TOP 10 G.specobjID, G.ra, G.dec, G.z, G.bpt, G.Flux_Ha_6562, G.Flux_NII_6583, G.Flux_Hb_4861, G.Flux_OIII_5006, N.distance\nFROM emissionLinesPort as G\nJOIN dbo.fGetNearbySpecObjEq({0}, {1}, {2}) AS N\nON G.specobjID = N.specobjID""".format(ra, dec, radius/60) jobs = mastcasjobs.MastCasJobs(context="SDSSDR14") tab = jobs.quick(query, task_name="python emission line cone search") return tab
5,341,466
def kld(means, var): """KL divergence""" mean = torch.zeros_like(means) scale = torch.ones_like(var) return kl_divergence(Normal(means, torch.sqrt(var)), Normal(mean, scale)).sum(dim=1)
5,341,467
def add(vec_1, vec_2): """ This function performs vector addition. This is a good place to play around with different collection types (list, tuple, set...), :param vec_1: a subscriptable collection of length 3 :param vec_2: a subscriptable collection of length 3 :return vec_3: a subscriptable collection of length 3 """ # add two vectors vec_3 = [float(vec_1[0]) + float(vec_2[0]), float(vec_1[1]) + float(vec_2[1]), float(vec_1[2]) + float(vec_2[2])] return vec_3
5,341,468
def generateOrnament(fromMIDINote:int, key:Key, mode:ModeNames, bpm:float) -> Union[List[Any],None]: """ Generate OSC arguments describing ornaments, with the form: [ <ornamentName> <BPM> <beatSubdivision> [<listOfOrnamentNoteMIDIOffsets...>] ] ASSUME This function is called on every beat, or with some organic regularity so output over time is roughly consistent with itself. Maintain module internal state to govern frequency and type of ornaments produced. Random filters to manage internal state are arbitrrary, specific and experimental. YMMV. Call generateOrnamentReset() to reset ornament module internal state. """ ornamentChoice :str = None ornamentBlob :List[Any] = None oscArgs :List[Any] = [] fourCount :int = 4 global ornamentState # if ornamentState["sixteenthTripletTurnaround"] > 0: # Check existing state. ornamentState["sixteenthTripletTurnaround"] -= 1 if ornamentState["sixteenthTripletTurnaround"] == 2: if z.percentTrue(35): ornamentChoice = "sixteenthPop" if not ornamentChoice: if z.percentTrue(70): return None # Frequency to bypass ornaments. ornamentChoice = random.choice(list(Ornaments.keys())) # if "sixteenthLeadIn" == ornamentChoice: pass elif "sixteenthPop" == ornamentChoice: if ornamentState["sixteenthTripletTurnaround"] > 0 \ and ornamentState["sixteenthTripletTurnaround"] != 2: return None elif "sixteenthTripletTurnaround" == ornamentChoice: # Generate no more often than once every fourCount. # Optionally generate "sixteenthPop" at half-way (above). # if ornamentState["sixteenthTripletTurnaround"] > 0: return None ornamentState["sixteenthTripletTurnaround"] = fourCount else: log.error(f"UNRECOGNIZED ornament choice. ({ornamentChoice})") return None # ornamentBlob = _translateOrnamentScaleToMIDI(ornamentChoice, fromMIDINote, key, mode) if not ornamentBlob: return None oscArgs = [ornamentChoice, bpm, ornamentBlob[0]] oscArgs += ornamentBlob[1] return oscArgs
5,341,469
def ms_to_samples(ms, sampling_rate): """ Convert a duration in milliseconds into samples. Arguments: ms (float): Duration in ms. sampling_rate (int): Sampling rate of of the signal. Returns: int: Duration in samples. """ return int((ms / 1000) * sampling_rate)
5,341,470
def flatten(L): """Flatten a list recursively Inspired by this fun discussion: https://stackoverflow.com/questions/12472338/flattening-a-list-recursively np.array.flatten did not work for irregular arrays and itertools.chain.from_iterable cannot handle arbitrarily nested lists :param L: A list to flatten :return: the flattened list """ if L == []: return L if isinstance(L[0], list): return flatten(L[0]) + flatten(L[1:]) return L[:1] + flatten(L[1:])
5,341,471
def _has_access_to_course(user, access_level, course_key): """ Returns True if the given user has access_level (= staff or instructor) access to the course with the given course_key. This ensures the user is authenticated and checks if global staff or has staff / instructor access. access_level = string, either "staff" or "instructor" """ if user is None or (not user.is_authenticated): debug("Deny: no user or anon user") return ACCESS_DENIED if is_masquerading_as_student(user, course_key): return ACCESS_DENIED global_staff, staff_access, instructor_access = administrative_accesses_to_course_for_user(user, course_key) if global_staff: debug("Allow: user.is_staff") return ACCESS_GRANTED if access_level not in ('staff', 'instructor'): log.debug("Error in access._has_access_to_course access_level=%s unknown", access_level) debug("Deny: unknown access level") return ACCESS_DENIED if staff_access and access_level == 'staff': debug("Allow: user has course staff access") return ACCESS_GRANTED if instructor_access and access_level in ('staff', 'instructor'): debug("Allow: user has course instructor access") return ACCESS_GRANTED debug("Deny: user did not have correct access") return ACCESS_DENIED
5,341,472
def occupy_region(image, center_x, center_y, buffer, color): """ 'Occupy' a region around (x,y) with the given color """ for x in range((center_x - buffer / 2), (center_x + buffer / 2)): for y in range((center_y - buffer / 2), (center_y + buffer / 2)): try: image.putpixel((x, y), color) except IndexError: # We tried to modify a pixel out of bounds (e.g. a negative coordinate) continue
5,341,473
def recreate_instance(source_instance_name, target_instance_name, instance_class): """ source_instance_nameの最新のスナップショットをもとにtarget_instance_nameで 指定されたインスタンスを再作成する :param str source_instance_name: 作成するインスタンスのものになるDB名 :param str target_instance_name: 再作成するDB名 :param str instance_class: RDSのインスタンスクラス :return: """ default_logger.info('start RDS Re Maker.') # 新規作成時はサフィックスをつけておく tmp_new_instance_name = '{base_name}-{time_suffix}'.format( base_name=target_instance_name, time_suffix=datetime.datetime.now().strftime('%m%d%H%M') ) # 既存DBリネーム時はtmpサフィックスをつけておく tmp_renamed_instance_name = '{base_name}-tmp'.format(base_name=target_instance_name) rds_maker = RdsMaker( region_name=REGION_NAME, az_name=AZ_NAME, aws_access_key=AWS_ACCESS_KEY, aws_rds_secret_key=AWS_RDS_SECRET_KEY, logger=default_logger ) if not rds_maker.is_db_exist(target_instance_name): default_logger.critical('{} is not exist.'.format(target_instance_name)) sys.exit(1) snapshot_identifier = rds_maker.get_latest_snapshot(source_instance_name) # 仮の名前でDB作成 rds_maker.create_db_instance_sync( db_identifier=tmp_new_instance_name, snapshot_identifier=snapshot_identifier, instance_class=instance_class, ) # 作成時には設定できない部分を変更 rds_maker.change_db_instance_attributes_sync(db_identifier=tmp_new_instance_name, attributes={ 'DBParameterGroupName': DB_PARAMETER_GROUP, 'BackupRetentionPeriod': 7, 'VpcSecurityGroupIds': [VPC_SECURITY_GROUP_ID], }) # 既存のインスタンスをリネーム rds_maker.rename_db_instance_sync(from_identifier=target_instance_name, to_identifier=tmp_renamed_instance_name) # 作成した新規インスタンスをリネームして同名にする rds_maker.rename_db_instance_sync(from_identifier=tmp_new_instance_name, to_identifier=target_instance_name) # もともとのインスタンス削除 snapshot_name = rds_maker.delete_db_instance(db_identifier=tmp_renamed_instance_name) default_logger.info('end RDS Re Maker.')
5,341,474
def download_and_mosaic_through_ftps(file_list, tmp_path, cds_url, cds_path, cds_sso, cds_pw, bbox, crs, geoTransform): """ Download Copernicus DEM tiles and create mosaic according to satellite imagery tiling scheme file_list : list with strings list of DEM tile filenames tmp_path : string work path where to download and untar DEM tiles cds_url : string Copernicus Data Service url cds_path : string data directory of interest cds_sso : string single sign-in cds_pw : string pass word bbox : list bound box formated as (x_min, y_min, x_max, y_max) crs : string coordinate reference string geoTransform : tuple, size=(6,1) affine transformation coefficients of the DEM tile Returns ------- dem_clip : DataArray object retiled DEM """ with tempfile.TemporaryDirectory(dir=tmp_path) as tmpdir: for file_name in file_list: get_file_from_ftps(cds_url, cds_sso, cds_pw, cds_path, file_name, tmpdir) tar_tiles_filenames = [f for f in os.listdir(tmpdir) if f.endswith('.tar')] for tar_fname in tar_tiles_filenames: tar_file = tarfile.open(os.path.join(tmpdir, tar_fname), mode="r|") tar_file.extractall(members=copDEM_files(tar_file), path=tmpdir) tar_file.close() dem_tiles_filename = pathlib.Path(tmpdir).glob("**/*_DEM.tif") dem_clip = mosaic_tiles(dem_tiles_filename, bbox, crs, geoTransform) # sometimes out of bound tiles are still present, # hence rerun a clip to be sure # dem_clip = dem_clip.rio.clip_box(*bbox) return dem_clip
5,341,475
def chi2(observed, expected): """ Return the chi2 sum of the provided observed and expected values. :param observed: list of floats. :param expected: list of floats. :return: chi2 (float). """ if 0 in expected: return 0.0 return sum((_o - _e) ** 2 / _e ** 2 for _o, _e in zip(observed, expected))
5,341,476
def purgeThisMessageFromTheAttachments(aMessage): """ this function is recursive, if the message contains multipart message the function is recursively called to purge all the attachments""" partsToDelete = [] #empty list index = 0 list = aMessage.get_payload() #end of recursion, the payload is a string if type(list) == type(""): return for part in list: maintype = part.get_content_maintype() print maintype #I mark this part for delete, because it is not text if ( maintype != "text" and maintype != "multipart" and maintype != "message"): # Also the message type is a kind of multipart partsToDelete.append(index) if (maintype == "multipart" or maintype == "message"): #recursive call purgeThisMessageFromTheAttachments(part) index = index + 1 #I can now delete the parts listParts = aMessage.get_payload() offset = 0 #DEBUG prints #print "I should delete these parts" #print partsToDelete #print "The message has these parts" #print listParts for indexToDelete in partsToDelete: #print indexToDelete indexToDelete = indexToDelete - offset #let's save the part that we wish to delete. filename = listParts[indexToDelete].get_filename() if not filename: ext = mimetypes.guess_extension(part.get_type()) if not ext: #generic extension ext = ".bin" filename = "part-%03d%s" % (indexToDelete, ext) fp = open (ATTACHMENTS_DIR + filename, "wb") fp.write(listParts[indexToDelete].get_payload(decode=1)) fp.close() del listParts[indexToDelete] offset = offset + 1 #print listParts
5,341,477
async def test_smartapp_sync_subscriptions_handles_exceptions( opp, smartthings_mock, device_factory, subscription_factory ): """Test synchronization does nothing when current.""" smartthings_mock.delete_subscription.side_effect = Exception smartthings_mock.create_subscription.side_effect = Exception smartthings_mock.subscriptions.return_value = [ subscription_factory(Capability.battery), subscription_factory(Capability.switch), subscription_factory(Capability.switch_level), ] devices = [ device_factory("", [Capability.thermostat, "ping"]), device_factory("", [Capability.switch, Capability.switch_level]), device_factory("", [Capability.switch]), ] await smartapp.smartapp_sync_subscriptions( opp, str(uuid4()), str(uuid4()), str(uuid4()), devices ) assert smartthings_mock.subscriptions.call_count == 1 assert smartthings_mock.delete_subscription.call_count == 1 assert smartthings_mock.create_subscription.call_count == 1
5,341,478
def range_bearing(p1: LatLon, p2: LatLon, R: float = NM) -> tuple[float, Angle]: """Rhumb-line course from :py:data:`p1` to :py:data:`p2`. See :ref:`calc.range_bearing`. This is the equirectangular approximation. Without even the minimal corrections for non-spherical Earth. :param p1: a :py:class:`LatLon` starting point :param p2: a :py:class:`LatLon` ending point :param R: radius of the earth in appropriate units; default is nautical miles. Values include :py:data:`KM` for kilometers, :py:data:`MI` for statute miles and :py:data:`NM` for nautical miles. :returns: 2-tuple of range and bearing from p1 to p2. """ d_NS = R * (p2.lat.radians - p1.lat.radians) d_EW = ( R * math.cos((p2.lat.radians + p1.lat.radians) / 2) * (p2.lon.radians - p1.lon.radians) ) d = math.hypot(d_NS, d_EW) tc = math.atan2(d_EW, d_NS) % (2 * math.pi) theta = Angle(tc) return d, theta
5,341,479
def cf_model_to_life(first_best, update_prod=False, pr_cache=False): """ We simulate the response of several variables to a shock to z and x. We fixed the cross-section distribution of (X,Z) and set rho to rho_start We apply a permanent shock to either X or Z, and fix the employment relationship, as well as (X,Z) We then simulate forward the Rho, and the wage, and report several different variable of interest. """ nt = 20*4 np.random.seed(JMP_CONF['seeds']['model_to_life']) # we load the model model = wd.FullModel.load("res_main_model.pkl") p = model.p p.tax_expost_tau = p.tax_tau p.tax_expost_lambda = p.tax_lambda # we simulate from the model to get a cross-section sim = wd.Simulator(model, p) sdata = sim.simulate().get_sdata() # we construct the different starting values tm = sdata['t'].max() d0 = sdata.query('e==1 & t==@tm')[['x','z','h','r']] # we start at target rho R0 = model.target_rho[ (d0['z'],d0['x']) ] # starting with X and Z shocks def get_z_pos(pr): Z1_pos = np.minimum(sdata['z'].max(), d0['z'] + 1) Z1_pos = np.where(np.random.uniform(size=len(Z1_pos)) > pr, Z1_pos, d0['z'] ) return(Z1_pos) def get_z_neg(pr): Z1_neg = np.maximum(0, d0['z'] - 1) Z1_neg = np.where(np.random.uniform(size=len(Z1_neg)) > pr, Z1_neg, d0['z'] ) return(Z1_neg) def get_x_pos(pr): Xtrans_pos = np.array([1,2,3,4,4,6,7,8,9,9,11,12,13,14,14],int) X1_pos = Xtrans_pos[d0['x']] X1_pos = np.where(np.random.uniform(size=len(X1_pos)) > pr, X1_pos, d0['x'] ) return(X1_pos) def get_x_neg(pr): Xtrans_neg = np.array([0,0,1,2,3, 5,5,6,7,8, 10,10,11,12,13],int) X1_neg = Xtrans_neg[d0['x']] X1_neg = np.where( np.random.uniform(size=len(X1_neg)) > pr, X1_neg, d0['x'] ) return(X1_neg) # simulate a control group var_name = {'x':r'worker productivity $x$', 'w':r'log earnings $\log w$', 'W1':'worker promised value $V$', 'lceq':'worker cons. eq.', 'Pi':r'firm present value $J(x,z,V)$', 'y':r'log match output $\log f(x,z)$', 'pr_j2j':'J2J probability', 'pr_e2u':'E2U probability', 'target_wage':r'log of target wage $\log w^*(x,z)$', 'vs':'worker search decision $v_1$', 'effort':'effort cost $c(e)$'} var_list = { k:'mean' for k in var_name.keys() } def sim_agg(dd): # compute consumption equivalent for W1 dd['lceq'] = model.pref.log_consumption_eq(dd['W1']) dd['lpeq'] = model.pref.log_profit_eq(dd['W1']) return(dd.groupby('t').agg(var_list)) if first_best: model_fb = wd.FullModel.load("res_main_model_fb.pkl") for iz in range(model_fb.p.num_z): for ix in range(model_fb.p.num_x): model_fb.rho_star[iz,:,ix] = model_fb.rho_grid sim.model = model_fb # let's find rho_star for the first best model I=range(p.num_v)[::-1] R0_fb = np.zeros((p.num_z,p.num_x)) for ix in range(p.num_x): for iz in range(p.num_z): R0_fb[iz,ix] = np.interp( 0.0, model_fb.Vf_J[iz,I,ix], model_fb.rho_grid[I]) R0 = R0_fb[ (d0['z'],d0['x']) ] sdata0 = sim_agg(sim.simulate_force_ee(d0['x'],d0['z'],d0['h'],R0, nt, update_x=False, update_z=False, pb=True)) # we run for a grid of probabilities if pr_cache: with open("res_cf_pr_fb{}.json".format(first_best)) as f: all = json.load(f) else: all = [] vec = np.linspace(0,1,10) for i in range(len(vec)): logging.info("simulating {}/{}".format(i, len(vec))) res = {} res['pr'] = vec[i] pr = vec[i] res['x_pos'] = sim.simulate_force_ee( get_x_pos(pr), d0['z'],d0['h'],R0, nt, update_x=False, update_z=False, pb=True)['y'].mean() res['x_neg'] = sim.simulate_force_ee( get_x_neg(pr), d0['z'],d0['h'],R0, nt, update_x=False, update_z=False, pb=True)['y'].mean() res['z_pos'] = sim.simulate_force_ee( d0['x'], get_z_pos(pr), d0['h'],R0, nt, update_x=False, update_z=False, pb=True)['y'].mean() res['z_neg'] = sim.simulate_force_ee( d0['x'], get_z_neg(pr), d0['h'],R0, nt, update_x=False, update_z=False, pb=True)['y'].mean() all.append(res) # save to file! # with open("res_cf_pr_fb{}.json".format(first_best), 'w') as fp: # json.dump(all, fp) df = pd.DataFrame(all) df = df.sort_values(['x_pos']) pr_x_pos = np.interp( sdata0['y'].mean() + 0.1, df['x_pos'] , df['pr'] ) df = df.sort_values(['x_neg']) pr_x_neg = np.interp( sdata0['y'].mean() - 0.1, df['x_neg'] , df['pr'] ) df = df.sort_values(['z_pos']) pr_z_pos = np.interp( sdata0['y'].mean() + 0.1, df['z_pos'] , df['pr'] ) df = df.sort_values(['z_neg']) pr_z_neg = np.interp( sdata0['y'].mean() - 0.1, df['z_neg'] , df['pr'] ) logging.info(" chosen probability x pos:{}".format(pr_x_pos)) logging.info(" chosen probability x neg:{}".format(pr_x_neg)) logging.info(" chosen probability z pos:{}".format(pr_z_pos)) logging.info(" chosen probability z neg:{}".format(pr_z_neg)) sdata0 = sim_agg(sim.simulate_force_ee(d0['x'],d0['z'],d0['h'],R0, nt, update_x=update_prod, update_z=update_prod, pb=True)) # finaly we simulate at the probabilities that we have chosen. sdata_x_pos = sim_agg(sim.simulate_force_ee( get_x_pos(pr_x_pos),d0['z'],d0['h'],R0, nt, update_x=update_prod, update_z=update_prod,pb=True)) sdata_x_neg = sim_agg(sim.simulate_force_ee( get_x_neg(pr_x_neg),d0['z'],d0['h'],R0, nt, update_x=update_prod, update_z=update_prod,pb=True)) sdata_z_pos = sim_agg(sim.simulate_force_ee( d0['x'],get_z_pos(pr_z_pos),d0['h'],R0, nt, update_x=update_prod, update_z=update_prod,pb=True)) sdata_z_neg = sim_agg(sim.simulate_force_ee( d0['x'],get_z_neg(pr_z_neg),d0['h'],R0, nt, update_x=update_prod, update_z=update_prod,pb=True)) # preparing the lead and lag plots pp0 = lambda v : np.concatenate([ np.zeros(5), v ]) ppt = lambda v : np.concatenate([ [-4,-3,-2,-1,0], v ]) to_plot = {'w','pr_j2j','pr_e2u','vs','effort','Pi','y','W1','target_wage'} to_plot = {k:v for k,v in var_name.items() if k in to_plot} # Z shock response plt.clf() # plt.rcParams["figure.figsize"]=12,12 plt.figure(figsize=(12, 12), dpi=80) for i,name in enumerate(to_plot.keys()): plt.subplot(3, 3, i+1) plt.plot( ppt (sdata0.index/4) , pp0(sdata_z_pos[name] - sdata0[name]) ) plt.plot( ppt (sdata0.index/4) , pp0(sdata_z_neg[name] - sdata0[name]), linestyle='--') #plt.plot( ppt (sdata0.index/4) , pp0(sdata_z_pos_fb[name] - sdata0[name]), linestyle='dashdot') #plt.plot( ppt (dd0.index/4) , pp0(sdata_x_pos[name] - sdata0[name]) ) #plt.plot( ppt (dd0.index/4) , pp0(sdata_x_neg[name] - sdata0[name]) ) plt.axhline(0,linestyle=':',color="black") plt.xlabel(var_name[name]) #plt.xlabel('years') plt.xticks(range(0,21,5)) plt.ticklabel_format(axis="y", style="sci", scilimits=(-3,5)) plt.subplots_adjust(hspace=0.3, wspace=0.3) if first_best: plt.savefig('../figures/figurew6-ir-zshock-fb.pdf', bbox_inches='tight') else: plt.savefig('../figures/figure4-ir-zshock.pdf', bbox_inches='tight') plt.clf() # plt.rcParams["figure.figsize"]=12,12 plt.figure(figsize=(12, 12), dpi=80) for i,name in enumerate(to_plot.keys()): plt.subplot(3, 3, i+1) plt.plot( ppt (sdata0.index/4) , pp0(sdata_x_pos[name] - sdata0[name]) ) plt.plot( ppt (sdata0.index/4) , pp0(sdata_x_neg[name] - sdata0[name]) ,ls='--') #plt.plot( ppt (dd0.index/4) , pp0(sdata_x_pos[name] - sdata0[name]) ) #plt.plot( ppt (dd0.index/4) , pp0(sdata_x_neg[name] - sdata0[name]) ) plt.axhline(0,linestyle=':',color="black") plt.xlabel(var_name[name]) plt.xticks(range(0,21,5)) plt.ticklabel_format(axis="y", style="sci", scilimits=(-3,5)) plt.subplots_adjust(hspace=0.3, wspace=0.3) if first_best: plt.savefig('../figures/figurew5-ir-xshock-fb.pdf', bbox_inches='tight') else: plt.savefig('../figures/figure3-ir-xshock.pdf', bbox_inches='tight')
5,341,480
def kde_interpolation(poi, bw='scott', grid=None, resolution=1, area=None, return_contour_geojson=False): """Applies kernel density estimation to a set points-of-interest measuring the density estimation on a grid of places (arbitrary points regularly spaced). Parameters ---------- poi : GeoDataFrame. Corresponds to input data. bw : 'scott', 'silverman' or float. The bandwidth for kernel density estimation. Check `scipy docs`_ about their bw parameter of gaussian_kde. grid : GeoDataFrame or None, default is None. If a grid is not given, then it is provided according to the area parameter and resolution. resolution : float, default is 1. Space in kilometers between the arbitrary points of resulting grid. area : GeoDataFrame or None, default is None. If area is given, grid will be bounded accordingly with the GeoDataFrame passed. return_contour_geojson : bool, default is False. If True, it returns the result of the kde as a contourplot in the geojson format. Returns ------- GeoDataFrame with a grid of points regularly spaced with the respective density values for the input points-of-interest given. Example ------- >>> import geohunter as gh >>> poi = gh.osm.Eagle().get(bbox='(-5.91,-35.29,-5.70,-35.15)', amenity=['hospital' , 'police'], natural='*') >>> neighborhood = gh.osm.Eagle().get(bbox='(-5.91,-35.29,-5.70,-35.15)', largest_geom=True, name='Ponta Negra') >>> result = kde_interpolation(poi, bw='scott', area=neighborhood, resolution=0.5) >>> ax = area.plot(edgecolor='black', color='white') >>> result.plot(column='density', ax=ax) .. _scipy docs: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.html """ lonv, latv = None, None if grid is None and area is None: raise ValueError('grid or area must be given.') if grid is None and isinstance(area, gpd.GeoDataFrame): grid, lonv, latv = make_gridpoints(area, resolution, return_coords=True) assert isinstance(poi, gpd.GeoDataFrame) kernel = stats.gaussian_kde(np.vstack([poi.centroid.x, poi.centroid.y]), bw_method=bw) grid_ = grid[:] grid_['density'] = kernel(grid[['lon', 'lat']].values.T) if return_contour_geojson: assert lonv is not None and latv is not None, \ "grid should not be passed for this operation. Try to pass area and pick a resolution level." return contour_geojson(grid_['density'], lonv, latv, cmin=grid_['density'].min(), cmax=grid_['density'].max()) else: return grid_
5,341,481
def nums2tcrs(nums): """Converts a list containing lists of numbers to amino acid sequences. Each number is considered to be an index of the alphabet.""" tcrs_letter=[] n=len(nums) for i in range(n): num=nums[i] tcr='' for j in range(len(num)): tcr+=alphabet[num[j]] tcrs_letter.append(tcr) return tcrs_letter
5,341,482
def extract_columns(data): """ EXTRACTS COLUMNS TO USE IN `DictWriter()` """ columns = [] column_headers = data[0] for key in column_headers: columns.append(key) return columns
5,341,483
def cylindric_grid(dr, dz, origin_z=None, layer=False, material="dfalt"): """ Generate a cylindric mesh as a radial XZ structured grid. Parameters ---------- dr : array_like Grid spacing along X axis. dz : array_like Grid spacing along Z axis. origin_z : scalar, optional, default None Depth of origin point. layer : bool, optional, default False If `True`, mesh will be generated by layers. material : str, optional, default 'dfalt' Default material name. Returns ------- toughio.Mesh Output cylindric mesh. """ if not isinstance(dr, (list, tuple, numpy.ndarray)): raise TypeError() if not isinstance(dz, (list, tuple, numpy.ndarray)): raise TypeError() if not (origin_z is None or isinstance(origin_z, (int, float))): raise TypeError() if not isinstance(material, str): raise TypeError() dr = numpy.asarray(dr) dz = numpy.asarray(dz) if not (dr > 0.0).all(): raise ValueError() if not (dz > 0.0).all(): raise ValueError() origin_z = origin_z if origin_z is not None else -dz.sum() mesh = structured_grid( dr, [1.0], dz, origin=[0.0, -0.5, origin_z], layer=layer, material=material, ) return CylindricMesh( dr, dz, layer, points=mesh.points, cells=mesh.cells, point_data=mesh.point_data, cell_data=mesh.cell_data, field_data=mesh.field_data, )
5,341,484
def get_clip_preview_feedback(program, event, classifier, start_time, audio_track, reviewer): """ Gets the feedback provided by a user for a Segment's clip Returns: Feedback if present. Empty Dictionary of no feedback exists. """ event = urllib.parse.unquote(event) program = urllib.parse.unquote(program) classifier = urllib.parse.unquote(classifier) start_time = Decimal(urllib.parse.unquote(start_time)) tracknumber = urllib.parse.unquote(audio_track) clip_preview_table = ddb_resource.Table(CLIP_PREVIEW_FEEDBACK_TABLE_NAME) response = clip_preview_table.query( KeyConditionExpression=Key("PK").eq( f"{program}#{event}#{classifier}#{str(start_time)}#{str(tracknumber)}#{reviewer}") ) if "Items" not in response or len(response["Items"]) == 0: return {} return response["Items"][0]
5,341,485
def interp(specStr, t): """Return the current value of t using linear interpolation. <specStr> is a string containing a list of pairs e.g. '[[0,20],[30,65],[60,50],[90,75]]' The first element of each pair is DAYS. The second is a NUMBER. <t> is time in seconds""" specList = ast.literal_eval(specStr) X = [i[0] for i in specList] Y = [i[1] for i in specList] day = t/(60*60*24.0) return numpy.interp(day,X,Y)
5,341,486
def time_is(location): """ Retrieves the time in a location by parsing the time element in the html from Time.is . :param location: str location of the place you want to find time (works for small towns as well). :return: time str or None on failure. """ if BeautifulSoup: header = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/51.0.2704.106 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-GB,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Connection': 'keep-alive', 'Referrer': 'http://time.is/', } post_url = 'http://time.is/' + str(location) time_data = util.web.http_get(post_url, header=header) time_html = time_data['content'] soup = BeautifulSoup(time_html, "html.parser") time = '' try: for hit in soup.findAll(attrs={'id': 'twd'}): time = hit.contents[0].strip() except KeyError: pass return time else: return None
5,341,487
def _generate_template_context(arguments: PackagingResourceArguments, manifest: OdahuProjectManifest, output_folder: str) -> DockerTemplateContext: """ Generate Docker packager context for templates """ logging.info('Building context for template') return DockerTemplateContext( model_name=manifest.model.name, model_version=manifest.model.version, odahuflow_version=manifest.odahuflowVersion, timeout=arguments.timeout, host=arguments.host, port=arguments.port, workers=arguments.workers, threads=arguments.threads, pythonpath=output_folder, wsgi_handler=f'{HANDLER_MODULE}:{HANDLER_APP}', model_location=ODAHU_SUB_PATH_NAME, entrypoint_target=ENTRYPOINT_TEMPLATE, handler_file=f'{HANDLER_MODULE}.py', base_image=arguments.dockerfileBaseImage, conda_file_name=CONDA_FILE_NAME, conda_server_file_name=CONDA_SERVER_FILE_NAME, entrypoint_docker=ENTRYPOINT_TEMPLATE )
5,341,488
def get_records(fname): """ Read the records of an IRAF database file into a python list Parameters ---------- fname : str name of an IRAF database file Returns ------- A list of records """ f = open(fname) dtb = f.read() f.close() recs = dtb.split('begin')[1:] records = [Record(r) for r in recs] return records
5,341,489
def get_all_clips_matching_filter(fid: int) -> List[Clip]: """ gets all te clips that is part of the project and matches the filter :param fid: The filter the clips should match :return: A list of all clips that is part of the project and matches the filter """ filter = get_filter_by_id(fid) assert filter is not None clips = get_all_clips_in_project(filter.project.id) res = [] for clip in clips: if clip.clip_match_filter(filter): res.append(clip) return res
5,341,490
def get_gushim(): """ get gush_id metadata """ detailed = request.args.get('detailed', '') == 'true' gushim = helpers._get_gushim(fields={'gush_id': True, 'last_checked_at': True, '_id': False}) if detailed: # Flatten list of gushim into a dict g_flat = dict((g['gush_id'], {"gush_id": g['gush_id'], "last_checked_at": g['last_checked_at'], "plan_stats": {}}) for g in gushim) # Get plan statistics from DB stats = helpers._get_plan_statistics() # Merge stats into gushim dict for g in stats['result']: try: gush_id = g['_id']['gush_id'] status = g['_id']['status'] g_flat[gush_id]['plan_stats'][status] = g['count'] except KeyError, e: # Gush has plans but is missing from list of gushim? app.logger.warn("Gush #%d has plans but is not listed in the Gushim list", gush_id) app.log_exception(e) # De-flatten our dict gushim = g_flat.values() return helpers._create_response_json(gushim)
5,341,491
def dump_source(buf, id): """Dump BASIC source.""" if id == ID_SP5030: line_end_code = 0x0d src_end_code = 0x0000 kind = "SP-5030" elif id == ID_SBASIC: line_end_code = 0x00 src_end_code = 0x0000 kind = "S-BASIC" elif id == ID_HUBASIC: line_end_code = 0x00 src_end_code = 0x0000 kind = "Hu-BASIC" else: return 1 if not found_word_endcode(buf, src_end_code): print("Not found %s end code (0x%04X)" % (kind, src_end_code)) return 1 p = 0 while True: line_length = get_word(buf, p) if line_length == src_end_code: # Found Source end code break # get 1 line data line = buf[p:p + line_length] if get_last_byte(line) != line_end_code: print("Not found %s line end code (0x%02X)" % (kind, line_end_code)) return 1 line_number = get_word(line, 2) if id == ID_SP5030: lstr = get_line_sp5030(line, 4, line_end_code) elif id == ID_SBASIC: lstr = get_line_sbasic(line, 4, line_end_code) elif id == ID_HUBASIC: lstr = get_line_hubasic(line, 4, line_end_code) if jp_flag: # print("%d %s" % (line_number, lstr.encode('utf-8'))) print("%d %s" % (line_number, lstr.encode('cp932'))) else: print("%d %s" % (line_number, lstr)) p += line_length return 0
5,341,492
def dense_layers(sequences, training, regularizer, initializer, num_layers=3, activation=tf.nn.relu): """ Create a chain of dense (fully-connected) neural network layers. Args: sequences (tf.Tensor): Input sequences. training (bool): Whether the mode is training or not. regularizer: TF weight reqularizer. initializer: TF weight initializer. num_layers (int): activation (function): TF activation function. Returns: tf.Tensor: Output tensor. """ with tf.variable_scope('dense'): output = sequences for _ in range(num_layers): output = tf.layers.dense(output, FLAGS.num_units_dense, activation=activation, kernel_initializer=initializer, kernel_regularizer=regularizer) output = tf.minimum(output, FLAGS.relu_cutoff) output = tf.layers.dropout(output, rate=FLAGS.dense_dropout_rate, training=training) # output = [batch_size, time, num_units_dense] return output
5,341,493
def apply_cst(im, cst): """ Applies CST matrix to image. Args: im: input ndarray image ((height * width) x channel). cst: a 3x3 CST matrix. Returns: transformed image. """ result = im for c in range(3): result[:, :, c] = (cst[c, 0] * im[:, :, 0] + cst[c, 1] * im[:, :, 1] + cst[c, 2] * im[:, :, 2]) return result
5,341,494
def show_binary_classification_accuracy(best_m: nn.Module, local_loader: data_utils.DataLoader, chatty = False) -> Tuple: """ Given the model and dataloader, calculate the classification accuracy. Returns true_positives, true_negatives, false_positives, false_negatives, roc_auc, pr for use elsewhere. :param best_m: :param local_loader: :return: """ correct = 0; total = 0 false_positives = 0 false_negatives = 0 true_positives = 0 true_negatives = 0 pred_list = [] lab_list = [] with torch.no_grad(): for data, labels in local_loader: outputs = best_m(data) predicted = torch.argmax(outputs, dim=1) #print(predicted) #print(labels.shape[0]) total += labels.shape[0] #print(labels.shape[0]) #print(labels) correct += int((predicted == labels).sum()) pred_list.extend(predicted.detach().flatten().numpy()) lab_list.extend(labels.detach().flatten().numpy()) #Calculate false positives, etc. for kt in zip(predicted, labels): if kt[0] == kt[1] ==1: true_positives+=1 elif kt[0] == kt[1] == 0: true_negatives+=1 elif kt[0] == 1 and kt[1] == 0: false_negatives+=1 elif kt[0] == 0 and kt[1] == 1: false_positives+=1 accuracy = correct/total print("Accuracy: %f" % (accuracy)) auc = roc_auc_score(pred_list, lab_list) pr = precision_recall_curve(pred_list, lab_list) if chatty: print("True Positives", true_positives, " False Positives", false_positives, f" at {false_positives/(total-correct):.2f}") print("True Negatives", true_negatives, " False Negatives", false_negatives, f" at {false_negatives/(total-correct):.2f}") return accuracy, true_positives, true_negatives, false_positives, false_negatives, auc, pr, pred_list, lab_list
5,341,495
async def test_full_config(opp, mock_client): """Test the full config of component.""" config = { prometheus.DOMAIN: { "namespace": "ns", "default_metric": "m", "override_metric": "m", "component_config": {"fake.test": {"override_metric": "km"}}, "component_config_glob": {"fake.time_*": {"override_metric": "h"}}, "component_config_domain": {"climate": {"override_metric": "°C"}}, "filter": { "include_domains": ["climate"], "include_entity_globs": ["fake.time_*"], "include_entities": ["fake.test"], "exclude_domains": ["script"], "exclude_entity_globs": ["climate.excluded_*"], "exclude_entities": ["fake.time_excluded"], }, } } assert await async_setup_component(opp, prometheus.DOMAIN, config) await opp.async_block_till_done() assert opp.bus.listen.called assert opp.bus.listen.call_args_list[0][0][0] == EVENT_STATE_CHANGED
5,341,496
def validate_accelerator_count(accel: Accelerator, count: int) -> int: """Raises an error if the count isn't valid for the supplied accelerator, else returns the count. """ is_gpu = accel in GPU ucase = accelerator_name(is_gpu) valid_counts = accelerator_counts(accel) if not _AccelCountMT[accel].get(count): raise argparse.ArgumentTypeError( with_advice_suffix( accel, "{} {}s of type {} aren't available \ for any machine type. Try one of the following counts: {}\n".format( count, ucase, accel.name, valid_counts))) return count
5,341,497
def objective(trial: optuna.trial.Trial, log_dir: str, device, backbone) -> Tuple[float, int, float]: """Optuna objective. Args: trial Returns: float: score1(e.g. accuracy) int: score2(e.g. params) """ hyperparams = search_hyperparam(trial) if backbone: model = Efficientnet_b0() model_path = os.path.join(log_dir, "best.pt") # result model will be saved in this path print(f"Model save path: {model_path}") model.to(device) else: model_config: Dict[str, Any] = {} model_config["input_channel"] = 3 img_size = hyperparams["IMG_SIZE"] model_config["INPUT_SIZE"] = [img_size, img_size] model_config["depth_multiple"] = trial.suggest_categorical( "depth_multiple", [0.25, 0.5, 0.75, 1.0] ) model_config["width_multiple"] = trial.suggest_categorical( "width_multiple", [0.25, 0.5, 0.75, 1.0] ) model_config["backbone"], module_info = search_model(trial) model = Model(model_config, verbose=True) model_path = os.path.join(log_dir, "best.pt") # result model will be saved in this path print(f"Model save path: {model_path}") model.to(device) model.model.to(device) # check ./data_configs/data.yaml for config information data_config: Dict[str, Any] = {} data_config["DATA_PATH"] = DATA_PATH data_config["DATASET"] = "TACO" data_config["IMG_SIZE"] = hyperparams["IMG_SIZE"] data_config["AUG_TRAIN"] = "randaugment_train" data_config["AUG_TEST"] = "simple_augment_test" data_config["AUG_TRAIN_PARAMS"] = { "n_select": hyperparams["n_select"], } data_config["AUG_TEST_PARAMS"] = None data_config["BATCH_SIZE"] = hyperparams["BATCH_SIZE"] data_config["EPOCHS"] = hyperparams["EPOCHS"] data_config["VAL_RATIO"] = 0.2 data_config["INIT_LR"] = hyperparams["INIT_LR"] data_config["FP16"] = True data_config["SUBSET_SAMPLING_RATIO"] = 0.5 # 0 means full data data_config["LOSS"] = 'CrossEntropy_Weight' trial.set_user_attr('hyperparams', hyperparams) if backbone: mean_time = check_runtime( model, [3]+[224, 224], device, ) else: trial.set_user_attr('model_config', model_config) mean_time = check_runtime( model.model, [model_config["input_channel"]] + model_config["INPUT_SIZE"], device, ) trial.set_user_attr('data_config', data_config) for key, value in trial.params.items(): print(f" {key}:{value}") model_info(model, verbose=True) train_loader, val_loader, test_loader = create_dataloader(data_config) weights = get_weights(data_config["DATA_PATH"]) criterion = get_loss(data_config["LOSS"], data_config["FP16"], weight=weights, device=device) if hyperparams["OPTIMIZER"] == "SGD": optimizer = torch.optim.SGD(model.parameters(), lr=hyperparams["INIT_LR"]) else: optimizer = getattr(optim, hyperparams["OPTIMIZER"])(model.parameters(), lr=hyperparams["INIT_LR"]) scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, max_lr=hyperparams["INIT_LR"], steps_per_epoch=len(train_loader), epochs=hyperparams["EPOCHS"], pct_start=0.05, cycle_momentum=True if hyperparams["OPTIMIZER"] == "SGD" else False ) # Amp loss scaler scaler = ( torch.cuda.amp.GradScaler() if data_config["FP16"] and device != torch.device("cpu") else None ) trainer = TorchTrainer( model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, scaler=scaler, model_path=model_path, device=device, verbose=1, ) trainer.train(train_loader, hyperparams["EPOCHS"], val_dataloader=val_loader) loss, f1_score, acc_percent = trainer.test(model, test_dataloader=val_loader) params_nums = count_model_params(model) model_info(model, verbose=True) print('='*50) return f1_score, params_nums, mean_time
5,341,498
def block_device_mapping_destroy(context, bdm_id): """Destroy the block device mapping.""" return IMPL.block_device_mapping_destroy(context, bdm_id)
5,341,499