blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b4455a1a07bbfe0f6b6717376cf81f9d093e2d98
|
0f7a2efe20cff389800b32d1fc4a8791402b7aee
|
/elections_cleaning.py
|
62c5848df26bf0d6baadc878afff99c1c796ab4c
|
[] |
no_license
|
wmceachen/tweetVotes
|
1ab322ce9ac7c616d009aa8d4945d5ccadec70e5
|
1192ac5e415c5335b7d07c2e8da9a5973d315ff8
|
refs/heads/master
| 2022-07-06T00:42:24.634511
| 2020-05-18T21:20:29
| 2020-05-18T21:20:29
| 262,635,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,259
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[212]:
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import re
# get_ipython().run_line_magic('matplotlib', 'inline')
# In[233]:
states = ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut',
'Delaware', 'Columbia', 'Florida', 'Georgia', 'Hawaii', 'Idaho', 'Iowa', 'Illinois',
'Indiana', 'Kansas', 'Kentucky', 'Louisiana', 'Maine', 'Maryland', 'Massachusetts', 'Michigan',
'Minnesota', 'Mississippi', 'Missouri', 'Montana', 'Nebraska', 'Nevada', 'New Hampshire',
'New Jersey', 'New Mexico', 'New York', 'North Carolina', 'North Dakota', 'Ohio', 'Oklahoma',
'Oregon', 'Pennsylvania', 'Rhode Island', 'South Carolina', 'South Dakota', 'Tennessee',
'Texas', 'Utah', 'Vermont', 'Virginia', 'Washington', 'West Virginia', 'Wisconsin', 'Wyoming']
# In[234]:
earlier_data = pd.read_json("bp_accounts/2012_14.json")
later_data = pd.read_json("bp_accounts/2016_18.json")
tweet_df = pd.concat([earlier_data, later_data])
tweet_df
# In[235]:
tweet_df = tweet_df[['State', 'Body', 'Party', 'Year', 'Name', 'Campaign Twitter',
'Personal Twitter', 'Official Twitter']]
tweet_df.plot.hist('Year')
# In[236]:
def party_diff(election_df: pd.DataFrame) -> float:
"""[summary]
Arguments:
election_df {pd.DataFrame} -- DF with just a single election
Returns:
float -- p-value for difference between those who ran and those with twitter accounts
"""
party_counts = pd.value_counts(election_df['Party'])
election_df.dropna(how='all', inplace=True, subset=['Campaign Twitter',
'Personal Twitter', 'Official Twitter'])
account_count = pd.value_counts(election_df.Party)
ax = party_counts.plot.bar(title='Candidate Distribution')
account_count.plot.bar(title='Account Distribution', ax=ax)
plt.show()
return stats.chisquare(account_count, party_counts)
tweet_df.groupby('Year').describe()
# In[237]:
party_counts = pd.value_counts(tweet_df['Party'])
pd.value_counts(tweet_df['Party']).plot.bar()
# tweet_df.dropna(how='all', inplace=True, subset=['Campaign Twitter',
# 'Personal Twitter', 'Official Twitter'])
tweet_df.Name = tweet_df.Name.apply(
lambda name: re.sub("\(.*\)", "", name).strip())
# pd.value_counts(df['Year']).plot.bar()
# In[238]:
pd.value_counts(tweet_df['Party']).plot.bar()
stats.chisquare(pd.value_counts(tweet_df.Party), party_counts)
# In[239]:
# 2012
def get_district_2012(race_row):
region_info = race_row['State'].split(",")
state, district = region_info[0].split(
',')[0], "".join(region_info[1:]).strip()
return state.strip(), district.strip()
house12, senate12 = pd.read_csv(
'elections/2012/2012_house.csv'), pd.read_csv('elections/2012/2012_senate.csv')
house12['State'], house12['District'] = zip(
*house12.apply(get_district_2012, axis=1))
senate12['District'] = 'Senate'
elections12 = pd.concat([house12, senate12])
assert all(elections12.State.apply(lambda state: state in states))
elections12['Year'] = 2012
elections12.District.unique()
# In[240]:
# 2014
def get_district_2014(race_row):
# print(race_row)
if 'at-large' in race_row['District'].lower():
state_district = race_row.District.lower().partition('at-large')
return state_district[0].split('\'')[0].strip().title(), "".join(state_district[1:]).strip().title()
else:
state_district = race_row.District.partition('District')
return state_district[0].strip(), "".join(state_district[1:]).strip()
house14, senate14 = pd.read_csv(
'elections/2014/2014_house.csv'), pd.read_csv('elections/2014/2014_senate.csv')
house14['State'], house14['District'] = zip(
*house14.apply(get_district_2014, axis=1))
senate14['District'] = 'Senate'
elections14 = pd.concat([house14, senate14])
elections14.rename(columns={'Total Vote': 'Total Votes'}, inplace=True)
elections14.replace({"West Virginia,": 'West Virginia', 'Louisiana Runoff Election': 'Louisiana',
'Oklahoma Special Election': 'Oklahoma', 'South Carolina Special Election': 'South Carolina'}, inplace=True)
assert all(elections14.State.apply(lambda state: state in states))
elections14['Year'] = 2014
def remove_special(dist_str):
if dist_str not in ['Senate', 'At-Large District'] and not re.match('^\w{8} \d{1,2}$', dist_str):
return re.search('^\w{8} \d{1,2}', dist_str).group()
return dist_str
elections14.District = elections14.District.apply(remove_special)
elections14
# In[241]:
# 2016 Individual
house16, senate16 = pd.read_csv(
'elections/2016/2016_house.csv'), pd.read_csv('elections/2016/2016_senate.csv')
house16['State'], house16['District'] = zip(
*house16.apply(get_district_2014, axis=1))
senate16.reset_index(inplace=True)
new_header = senate16.iloc[0] # grab the first row for the header
senate16 = senate16[1:] # take the data less the header row
senate16.columns = new_header # set the header row as the df header
senate16['District'] = 'Senate'
# In[242]:
# 2016 Ratings NOT USING
# def get_ratings_district(dist_data):
# if 'at-large' in dist_data.lower():
# state_district = dist_data.lower().partition('at-large')
# return state_district[0].split('\'')[0].strip().title(), 'At-Large District'
# else:
# dist_num = re.search('\d+', dist_data).group()
# return re.split('\d+', dist_data)[0].split('\'')[0], 'District '+dist_num
# house16_ratings = pd.read_csv('elections/2016/2016_house_ratings.csv')
# senate16_ratings = pd.read_csv('elections/2016/2016_senate_ratings.csv')
# senate16_with_ratings = senate16.merge(senate16_ratings, on='State')
# house16_ratings['State'], house16_ratings['District'] = zip(*house16_ratings.District.apply(get_ratings_district))
# # house16.merge(house16_ratings, on=['State', 'District'])
# # house16_ratings
# In[243]:
# 2016 All Elections
elections16 = pd.concat([house16, senate16])
elections16.rename(columns={'Total Vote': 'Total Votes'}, inplace=True)
assert all(elections16.State.apply(lambda state: state in states))
elections16['Year'] = 2016
elections16
# In[244]:
# 2018
def get_district_2018(dist_data):
if 'at-large' in dist_data:
state_district = dist_data.lower().partition('at-large')
return state_district[0].split('\'')[0].strip().title(), 'At-Large District'
else:
dist_num = re.search('\d+', dist_data).group()
return re.split('\d+', dist_data)[0].split('\'')[0], 'District '+dist_num
house18, senate18 = pd.read_csv(
'elections/2018/2018_house.csv'), pd.read_csv('elections/2018/2018_senate.csv')
house18['State'], house18['District'] = zip(
*house18.District.apply(get_district_2018))
senate18['State'] = senate18['District'].apply(lambda x: x.split(',')[1])
senate18['District'] = 'Senate'
elections18 = pd.concat([house18, senate18])
elections18.State = elections18.State.str.strip()
assert all(elections18.State.apply(lambda state: state in states))
elections18.rename(columns={'Runner-up': 'Top Opponent',
'Margin of victory': 'Margin of Victory', 'Total votes': 'Total Votes'}, inplace=True)
elections18 = elections18[['District', 'Winner',
'Margin of Victory', 'Total Votes', 'Top Opponent', 'State']]
elections18['Year'] = 2018
elections18
# In[245]:
elections = pd.concat([eval(f'elections{year}')for year in range(12, 20, 2)])
elections['Margin of Victory'] = elections['Margin of Victory'].str.strip(
'%').astype(float)
assert all(elections.State.apply(lambda state: state in states))
# In[246]:
def candidate_gen(dist_row):
region_dict = dist_row[['State', 'District', 'Year']]
winner_dict, loser_dict = region_dict.copy(), region_dict.copy()
winner_dict['Name'], loser_dict['Name'] = dist_row['Winner'], dist_row['Top Opponent']
margin = dist_row['Margin of Victory']
winner_dict['Vote Share'], loser_dict['Vote Share'] = round(
(margin+100)/2), round(100-(margin+100)/2)
winner, loser = pd.DataFrame(winner_dict), pd.DataFrame(loser_dict)
return winner, loser
# winner = pd.DataFrame({State, District,Year})
district_candidates = elections.apply(candidate_gen, axis=1).tolist()
# In[247]:
candidates = []
for district in range(len(district_candidates)):
for i in range(2):
candidates.append(district_candidates[district][i].transpose())
# In[248]:
candidate_df = pd.concat(candidates).reset_index(drop=True).dropna()
candidate_df = candidate_df[~candidate_df.Name.str.contains("Write|nopposed")]
candidate_df.sort_values(by=['Year', 'State', 'Name'], inplace=True)
candidate_df
# In[255]:
candidates_accounts_df = pd.merge(candidate_df, tweet_df, how='outer', on=[
'Year', 'State', 'Name'], indicator=True)
candidates_accounts_df = candidates_accounts_df[candidates_accounts_df._merge == 'both'].dropna(
how='all', subset=['Campaign Twitter', 'Personal Twitter', 'Official Twitter'])
candidates_accounts_df.to_csv('accounts.csv', index=False)
|
[
"wmceachen@berkeley.edu"
] |
wmceachen@berkeley.edu
|
192dba6f7212426e97860967092928411f81219c
|
0e86ab6a7906f00b809275cabbc39b1746964cb2
|
/Virus Spread Sim_0/SoC-Assignment-1-Abhishek.py
|
28a617c60e028e8118d1f28cce87a894849f34e9
|
[] |
no_license
|
abhipaiangle/Intelligent_Agent_AbhishekPaiAngle
|
d8919d45590e0b68f890cea77167b99d5730bbcc
|
a51d3511bea9633b17fc3fc627163ed2c3c564c5
|
refs/heads/master
| 2022-12-31T07:24:26.134252
| 2020-10-22T11:00:01
| 2020-10-22T11:00:01
| 268,830,602
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
def swap(l):
for i in range(5):
from random import choice
a=choice(list(range(0,10000)))
b=choice(list(range(0,10000)))
s=[l[a],l[b]]
l[a]=s[1]
l[b]=s[0]
p1=[]
for i in range(1000):
a=[]
for i in range(10000):
a.append(0)
a[4999]=1
prob=[0]*65
for i in range(35):
prob.append(1)
p=[]
import matplotlib.pyplot as plt
from random import choice
k=0
while(k!=10000):
i=0
swap(a)
while(i<9999):
if(a[i]==1):
if((choice(prob)==1) and (i<9999) and (a[i+1]!=1)):
a[i+1]=1
i=i+1
if((choice(prob)==1) and (i<9999) and (a[i-1]!=1)):
a[i-1]=1
i=i+1
k=0
for i in range(10000):
if(a[i]==1):
k=k+1
p.append(k)
plt.plot(p)
plt.xlabel('iteration')
plt.ylabel('number of ones')
plt.show()
p1.append(len(p));
plt.plot(p1)
plt.xlabel('number of runs')
plt.ylabel('number of iterations')
plt.show()
import numpy as np
b=np.array(p1)
print("average of number of iterations ",b.mean())
p2=[]
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
for i in range(0,len(p1)-1):
p2.append(p1[i+1]-p1[i])
plt.plot(p2);
yhat=savgol_filter(p2,101,5)
plt.xlabel('x')
plt.ylabel('dy/dx')
plt.plot(yhat,c='red')
plt.show()
print("maximum of dy/dx: ",max(yhat))
|
[
"abhishekangle6gmail.com"
] |
abhishekangle6gmail.com
|
dc4466d8625431c97d761145a1d498accb32cc44
|
a232988fe8f247fbd56f7a91748ccfbf73326265
|
/thinkful/TBay/DBRelationships.py
|
5e21b196bc4d8b1d014133ad73849bdeeb7d4655
|
[] |
no_license
|
Vigs16/hello-world
|
32236f6258ce42e6d8f5ef5139ecd84090be37bd
|
4212e0669944f22d0d3148516b97bf9fec95b72d
|
refs/heads/master
| 2021-01-20T13:03:51.843003
| 2017-05-30T04:52:13
| 2017-05-30T04:52:13
| 90,442,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,861
|
py
|
from sqlalchemy import Column, Integer, String, Date, ForeignKey,create_engine
from sqlalchemy.orm import relationship,sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime
engine=create_engine('postgresql://ubuntu:thinkful@localhost:5432/tbay')
Session=sessionmaker(bind=engine)
session=Session()
Base=declarative_base()
class Manufacturer(Base):
__tablename__="manufacturer"
id=Column(Integer,primary_key=True)
name=Column(String,nullable=False)
guitars=relationship("Guitar",backref="manufacturer")
class Guitar(Base):
__tablename__="guitar"
id=Column(Integer,primary_key=True)
name=Column(String,nullable=False)
manufacturer_id=Column(Integer,ForeignKey('manufacturer.id'),nullable=False)
class Pizza(Base):
__tablename__="pizza"
id=Column(Integer,primary_key=True)
name=Column(String,nullable=False)
toppings=relationship("Topping", secondary="pizzatoppingtable",backref="pizzas")
class Topping(Base):
__tablename__="topping"
id=Column(Integer,primary_key=True)
name=Column(String,nullable=False)
class PizzaToppingTable(Base):
__tablename__="pizzatoppingtable"
pizza_id= Column(Integer,ForeignKey('pizza.id'),primary_key=True)
topping_id=Column(Integer,ForeignKey('topping.id'),primary_key=True)
peppers = Topping(name="Peppers")
garlic = Topping(name="Garlic")
chilli = Topping(name="Chilli")
spicy_pepper = Pizza(name="Spicy Pepper")
spicy_pepper.toppings = [peppers, chilli]
vampire_weekend = Pizza(name="Vampire Weekend")
vampire_weekend.toppings = [garlic, chilli]
session.add_all([garlic, peppers, chilli, spicy_pepper, vampire_weekend])
session.commit()
for topping in vampire_weekend.toppings:
print(topping.name)
for pizza in chilli.pizzas:
print(pizza.name)
|
[
"vigs16@outlook.com"
] |
vigs16@outlook.com
|
6ef5bd8a575932a3a353cc02c902815a09541620
|
dd7c22891de26e4c90c3ba7e71c0dd2b65e83740
|
/alogrithm.py
|
7d9ab8e3a66a15f2657e10bdd3459a1193d4ea84
|
[] |
no_license
|
Leogaogithub/pythonUtils
|
572dbdf45bfa18d5b2ad258ab045918a32dc56ce
|
464877f9b2a50434587fe0abfdf4b5218368047c
|
refs/heads/master
| 2021-07-03T03:20:10.812998
| 2019-03-31T22:55:18
| 2019-03-31T22:55:18
| 146,221,515
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
x = [1,2,3,4,1,2]
y = [6,5,4,3,2,1]
indexs = []
indexs.append(0)
for i in range(0, len(x)):
pre = indexs[-1]
if x[pre] < x[i] and y[pre] > y[i]:
indexs.append(i)
print('\nx:\n')
for i in indexs:
print str(x[i]), ''
print('\ny:\n')
for i in indexs:
print str(y[i]), ''
|
[
"longhaogao@gmail.com"
] |
longhaogao@gmail.com
|
dc05449b1b6b8decce88de7a50c4979e35579a3f
|
3d0fca9603cd54d0dea24370e48eb6c043ce398f
|
/1-й курс/Программы с пар/зашифруй.py
|
3ada209d60e5b09a4a7b57d2ac09201a34c02588
|
[] |
no_license
|
bungabakung/my_homeworks
|
4357f272bd47469293b93846c6bcf4d474bc71ec
|
b1f6aedd41f5d8b24fd2e2abd49443be0e1907c6
|
refs/heads/master
| 2021-06-24T05:23:32.251831
| 2018-06-14T20:52:09
| 2018-06-14T20:52:09
| 67,619,420
| 0
| 1
| null | 2021-06-01T22:19:27
| 2016-09-07T15:25:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,647
|
py
|
ss=[]
string=''
latinica='abcdefghijklmnopqrstuvwxyz'
LATINICA='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
kirillica='абвгдеёжзийклмнопрстуфхцчшщъыьэюя'
KIRILLICA='АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ'
s=input('Введите что-нибудь ')
while len(s)!=0:
for i in range(len(s)):
if s[i] in latinica:
for j in range(len(latinica)):
if s[i]==latinica[j]:
if j==len(latinica)-1:
string+=latinica[1]
else:
string+=latinica[j+1]
elif s[i] in LATINICA:
for j in range(len(LATINICA)):
if s[i]==LATINICA[j]:
if j==len(LATINICA)-1:
string+=LATINICA[1]
else:
string+=LATINICA[j+1]
elif s[i] in kirillica:
for j in range(len(kirillica)):
if s[i]==kirillica[j]:
if j==len(kirillica)-1:
string+=kirillica[1]
else:
string+=kirillica[j+1]
elif s[i] in KIRILLICA:
for j in range(len(KIRILLICA)):
if s[i]==KIRILLICA[j]:
if j==len(KIRILLICA)-1:
string+=KIRILLICA[1]
else:
string+=KIRILLICA[j+1]
else:
string+=(s[i])
print(string,' ')
string=''
s=input('Введите что-нибудь ')
print('Вы ничего не ввели, программа идет спать')
|
[
"lilia.blyumina@gmail.com"
] |
lilia.blyumina@gmail.com
|
8505d4a70065822b96e2bb52f3c8e44ab696d6f6
|
f0657bd269d63317aba8bef38c579ed4d57f2d14
|
/models/layers/unet_gn_layer.py
|
d5369cf769eeb864d4aace0bcd6f46e55b91816b
|
[] |
no_license
|
kehuantiantang/ExFuse
|
b632a9f6b856fa86487af98e0a442443104a3809
|
b8e029e8213ac58689ca63c16f52c85ff6077c62
|
refs/heads/master
| 2020-05-23T10:57:44.331900
| 2019-05-15T01:49:55
| 2019-05-15T01:49:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,449
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def weights_init_kaiming(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
class ConvGNReLU(nn.Module):
def __init__(self, in_size, out_size, group, kernel_size=3, stride=1, padding=1):
super(ConvGNReLU, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, kernel_size, stride, padding),
nn.GroupNorm(group, out_size),
nn.ReLU(inplace=True),)
# initialise the blocks
for m in self.children():
m.apply(weights_init_kaiming)
def forward(self, inputs):
return self.conv1(inputs)
class UnetGNConv2D(nn.Module):
def __init__(self, in_size, out_size, group, kernel_size=3, stride=1, padding=1):
super(UnetGNConv2D, self).__init__()
self.conv1 = ConvGNReLU(in_size, out_size, group, kernel_size, stride, padding)
self.conv2 = ConvGNReLU(out_size, out_size, group, kernel_size, 1, padding)
def forward(self, inputs):
x = self.conv1(inputs)
return self.conv2(x)
class UnetUpGNConv2D(nn.Module):
def __init__(self, in_size, out_size, group, is_deconv=True):
super(UnetUpGNConv2D, self).__init__()
self.conv = UnetGNConv2D(in_size, out_size, group)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=4, stride=2, padding=1)
else:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('UnetGNConv2D') != -1:
continue
m.apply(weights_init_kaiming)
def forward(self, input1, input2):
output2 = self.up(input2)
offset = output2.size()[2] - input1.size()[2]
padding = [offset // 2] * 4
output1 = F.pad(input1, padding)
output = torch.cat([output1, output2], 1)
return self.conv(output)
|
[
"zsef123@gmail.com"
] |
zsef123@gmail.com
|
cf51c0cc7167bbbeda4735d54fbf5b33b8042846
|
857237167c71b398c7f9924ce9a4c0d2ce274728
|
/testone.py
|
5930a8a9ec66ff9e9d5d7e14fab66a297b048805
|
[] |
no_license
|
navaneethknair/WeHealth
|
821def00a0bbd6d1f00a95457a4d931265c16a29
|
6be7790f16f204bb02c28b3bead45e752fad70ad
|
refs/heads/master
| 2020-03-30T21:34:27.264398
| 2017-08-13T01:05:02
| 2017-08-13T01:05:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
import socket
if __name__ == '__main__':
address = ('127.0.0.1', 8000)
client = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
client.connect(address)
client.send('hello-world')
data = client.recv(1024)
print data
client.close()
|
[
"597445175@qq.com"
] |
597445175@qq.com
|
49658d0045d2a7230bb89dd775406d26d43ad7bc
|
fffefcbfa7fa08daf740abf49cccb21efbc01eb3
|
/src/data_collection/processing.py
|
a7c9d08f3fc75014c12e12855b2d2969ffdae678
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
GrigalashviliT/spoilerBlocker
|
2b4260549ad3d80fbd7a229313228c591222f5a4
|
18a5e9689099d3b631a15ed20cc84a043f324055
|
refs/heads/master
| 2022-11-25T07:44:15.388691
| 2020-07-25T18:20:04
| 2020-07-25T18:20:04
| 256,992,634
| 6
| 0
|
MIT
| 2020-07-25T18:20:05
| 2020-04-19T12:23:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,437
|
py
|
import spacy
import neuralcoref
class dataprocessing:
"""
dataprocessing class is to process film or serial data
"""
def __init__(self):
"""
@logic:
initialization models and etc.
@param:
"""
self.nlp = spacy.load('en_core_web_lg')
neuralcoref.add_to_pipe(self.nlp)
self.pronouns = ['I', 'me', 'you', 'he', 'him', 'she', 'her', 'it', 'we', 'us', 'they', 'them', 'Mine', 'My', 'Your', 'Yours', 'Its', 'Hers', 'His', 'our', 'ours', 'your', 'yours', 'their', 'theirs']
self.pronouns = [pronoun.lower() for pronoun in self.pronouns]
def process_data(self, text):
"""
@logic:
processing text, remove pronouns
@param:
"""
doc = self.nlp(text)
processed_text = ''
for line in doc:
if str(line).replace("'", '').lower() in self.pronouns:
if len(line._.coref_clusters) == 0:
continue
replace = line._.coref_clusters[0][0]
if str(replace).replace("'", '').lower() in self.pronouns:
continue
processed_text += str(replace) + ' '
else:
processed_text += str(line) + ' '
return processed_text
|
[
"noreply@github.com"
] |
noreply@github.com
|
6b3dbbcb6756b64d57391c14e942620926fb347f
|
4c60293476c437d3b91eb909e60cf46669d7adc4
|
/dataset/DeepFakes/faceswap-master/lib/FaceFilter.py
|
0dfc23d6baa5e7e6437d1aae157d06c994a2a72a
|
[
"MIT"
] |
permissive
|
flynn-chen/faceforensics_benchmark
|
5d9726cae414cd5e76fbbcb0e13d9084bb2466a0
|
897e474a12e90954318a5e2fce810cde9a824d99
|
refs/heads/master
| 2022-12-10T15:22:05.122525
| 2019-12-13T23:29:00
| 2019-12-13T23:29:00
| 227,938,897
| 2
| 0
|
NOASSERTION
| 2022-12-08T01:50:26
| 2019-12-13T23:30:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,863
|
py
|
# import dlib
# import numpy as np
import face_recognition
# import face_recognition_models
def avg(arr):
return sum(arr)*1.0/len(arr)
class FaceFilter():
def __init__(self, reference_file_paths, nreference_file_paths, threshold = 0.6):
images = list(map(face_recognition.load_image_file, reference_file_paths))
nimages = list(map(face_recognition.load_image_file, nreference_file_paths))
# Note: we take only first face, so the reference file should only contain one face.
self.encodings = list(map(lambda im: face_recognition.face_encodings(im)[0], images))
self.nencodings = list(map(lambda im: face_recognition.face_encodings(im)[0], nimages))
self.threshold = threshold
def check(self, detected_face):
# we could use detected landmarks, but I did not manage to do so. TODO The copy/paste below should help
encodings = face_recognition.face_encodings(detected_face.image)
if encodings is not None and len(encodings) > 0:
distances = list(face_recognition.face_distance(self.encodings, encodings[0]))
distance = avg(distances)
mindistance = min(distances)
maxdistance = max(distances)
if distance > self.threshold:
print("Distance above threshold: %f < %f" % (distance, self.threshold))
return False
if len(self.nencodings) > 0:
ndistances = list(face_recognition.face_distance(self.nencodings, encodings[0]))
ndistance = avg(ndistances)
nmindistance = min(ndistances)
nmaxdistance = max(ndistances)
if (mindistance > nmindistance):
print("Distance to negative sample is smaller")
return False
if (distance > ndistance):
print("Average distance to negative sample is smaller")
return False
# k-nn classifier
K=min(5, min(len(distances), len(ndistances)) + 1)
N=sum(list(map(lambda x: x[0],
list(sorted([(1,d) for d in distances] + [(0,d) for d in ndistances],
key=lambda x: x[1]))[:K])))
ratio = N/K
if (ratio < 0.5):
print("K-nn is %.2f" % ratio)
return False
return True
else:
print("No face encodings found")
return False
# # Copy/Paste (mostly) from private method in face_recognition
# face_recognition_model = face_recognition_models.face_recognition_model_location()
# face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
# def convert(detected_face):
# return np.array(face_encoder.compute_face_descriptor(detected_face.image, detected_face.landmarks, 1))
# # end of Copy/Paste
|
[
"andreas.roessler@tum.de"
] |
andreas.roessler@tum.de
|
0bf78b5a94b1e07dee662b8e341ee34aea435e03
|
54857571461a579bed30cee27871aaa5fe396bcc
|
/nltk-0.9.7/src/nltk/inference/inference.py
|
0b6d64c2a35e50e6cfaa2627aae6c30fe56517a5
|
[] |
no_license
|
ahmedBazaz/affective-text-classification
|
78375182e800b39e0e309e8b469e273c0d9590f0
|
719e9b26e60863c620662564fb9cfeafc004777f
|
refs/heads/master
| 2021-01-10T14:50:01.100274
| 2009-01-09T03:59:01
| 2009-01-09T03:59:01
| 48,296,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,176
|
py
|
# Natural Language Toolkit: Interface to Theorem Provers
#
# Author: Dan Garrette <dhgarrette@gmail.com>
# Ewan Klein <ewan@inf.ed.ac.uk>
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from nltk.sem import logic
import api
import tableau
import prover9
import mace
import resolution
"""
A wrapper module that calls theorem provers and model builders.
"""
def get_prover(goal=None, assumptions=None, prover_name=None):
"""
@param goal: Input expression to prove
@type goal: L{logic.Expression}
@param assumptions: Input expressions to use as assumptions in the proof
@type assumptions: L{list} of logic.Expression objects
"""
if not prover_name:
prover_name = 'Prover9'
if prover_name.lower() == 'tableau':
return api.BaseProverCommand(tableau.Tableau(), goal, assumptions)
elif prover_name.lower() == 'prover9':
return prover9.Prover9Command(goal, assumptions)
elif prover_name.lower() == 'resolution':
return resolution.ResolutionCommand(goal, assumptions)
raise Exception('\'%s\' is not a valid prover name' % prover_name)
def get_model_builder(goal=None, assumptions=None, model_builder_name=None):
"""
@param goal: Input expression to prove
@type goal: L{logic.Expression}
@param assumptions: Input expressions to use as assumptions in the proof
@type assumptions: L{list} of logic.Expression objects
"""
if not model_builder_name:
model_builder_name = 'Mace'
if model_builder_name.lower() == 'mace':
return mace.MaceCommand(goal, assumptions)
def get_parallel_prover_builder(goal=None, assumptions=None,
prover_name='', model_builder_name=''):
prover = get_prover(prover_name=prover_name)
model_builder = get_model_builder(model_builder_name=model_builder_name)
return api.ParallelProverBuilderCommand(prover.get_prover(),
model_builder.get_model_builder(),
goal, assumptions)
def demo():
lp = logic.LogicParser()
a = lp.parse(r'some x.(man(x) and walks(x))')
b = lp.parse(r'some x.(walks(x) and man(x))')
bicond = logic.IffExpression(a, b)
print "Trying to prove:\n '%s <-> %s'" % (a, b)
print 'tableau: %s' % get_prover(bicond, prover_name='tableau').prove()
print 'Prover9: %s' % get_prover(bicond, prover_name='Prover9').prove()
print '\n'
lp = logic.LogicParser()
a = lp.parse(r'all x.(man(x) -> mortal(x))')
b = lp.parse(r'man(socrates)')
c1 = lp.parse(r'mortal(socrates)')
c2 = lp.parse(r'-mortal(socrates)')
print get_prover(c1, [a,b], 'prover9').prove()
print get_prover(c2, [a,b], 'prover9').prove()
print get_model_builder(c1, [a,b], 'mace').build_model()
print get_model_builder(c2, [a,b], 'mace').build_model()
print get_parallel_prover_builder(c1, [a,b]).prove(True)
print get_parallel_prover_builder(c1, [a,b]).build_model(True)
if __name__ == '__main__':
demo()
|
[
"tytung@6129d76e-ddfe-11dd-a37d-c9d1c40e0883"
] |
tytung@6129d76e-ddfe-11dd-a37d-c9d1c40e0883
|
f36f09b4e05bbc16b9f5367879c5ca25aebf7d66
|
bd55c7d73a95caed5f47b0031264ec05fd6ff60a
|
/apps/qa/migrations/0006_coupon_vendor_branch.py
|
35b01a8e9fea5a66447f1b448de6613892793c36
|
[] |
no_license
|
phonehtetpaing/ebdjango
|
3c8610e2d96318aff3b1db89480b2f298ad91b57
|
1b77d7662ec2bce9a6377690082a656c8e46608c
|
refs/heads/main
| 2023-06-26T13:14:55.319687
| 2021-07-21T06:04:58
| 2021-07-21T06:04:58
| 381,564,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
# Generated by Django 2.0.5 on 2019-03-12 08:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0123_auto_20190312_1736'),
('qa', '0005_auto_20190312_1732'),
]
operations = [
migrations.AddField(
model_name='coupon',
name='vendor_branch',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='coupon_vendor_branch', to='core.VendorBranch', verbose_name='vendor_branch'),
),
]
|
[
"phonehtetpaing1221@gmail.com"
] |
phonehtetpaing1221@gmail.com
|
0ed4d8ca65b096d70ac92f93671e165389e63b8b
|
9f03e4d754b21df4e6681dd65f7f59f09fd4178d
|
/oneleftfootapi/models/dance_type.py
|
6864f8211a60de7ab9b4655c7f8d7c62437b8be3
|
[] |
no_license
|
morriscodez/one-left-foot-server
|
6ea1ee751bddff7c206ea8e4eefc2ed16fc2ef53
|
1221e87569d1fdd1401829bbe2dcf50d13aa09d3
|
refs/heads/main
| 2023-06-03T14:50:27.232339
| 2021-06-24T20:26:16
| 2021-06-24T20:26:16
| 375,131,673
| 0
| 0
| null | 2021-06-24T19:53:39
| 2021-06-08T20:07:18
|
Python
|
UTF-8
|
Python
| false
| false
| 105
|
py
|
from django.db import models
class DanceType(models.Model):
label = models.CharField(max_length=25)
|
[
"dylanrobertmorris@gmail.com"
] |
dylanrobertmorris@gmail.com
|
2f05050ea1ccc4477c77e75b7a36881dd9aa9857
|
d1b15ac88ac517d4ccf78298bccabfe7b6bd1505
|
/combine/jinja/exceptions.py
|
d211f601eff1050c5f7ac78647152aa2ff51efa1
|
[
"MIT"
] |
permissive
|
syllogy/combine
|
5a6cdb4bd79ca8f61a7d95895bd92bb8a9583fbe
|
b24e13a46adea1860a23abb6346bea309d46f35c
|
refs/heads/master
| 2023-08-14T13:51:30.096540
| 2021-10-06T02:28:20
| 2021-10-06T02:28:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
class MissingVariableError(Exception):
def __init__(self, name):
self.name = name
self.message = f'The required variable "{self.name}" is missing'
super().__init__(self.message)
class ReservedVariableError(Exception):
def __init__(self, name):
self.name = name
self.message = (
f'The variable"{self.name}" is reserved and should only be set by combine'
)
super().__init__(self.message)
|
[
"dave.gaeddert@gmail.com"
] |
dave.gaeddert@gmail.com
|
606e7c9ee0d79351ab7e217de6f5110a63244578
|
e0ab809c11cfd4df37c5c4cf9d6fc29abc018796
|
/Homework/HW2/HW2_Ryden.py
|
5a20546493bb53c3732c4dd2fa97e07531bd50a3
|
[] |
no_license
|
RydenButler/PythonCourse2016
|
807283e81c71568bf4930b02e037a463745dcd21
|
888bda9a500f9ea50c9d97fa7cfb3b383e077c46
|
refs/heads/master
| 2021-01-14T09:20:07.651554
| 2016-08-28T11:41:02
| 2016-08-28T11:41:02
| 64,665,310
| 0
| 0
| null | 2016-08-01T12:34:09
| 2016-08-01T12:34:09
| null |
UTF-8
|
Python
| false
| false
| 2,265
|
py
|
from bs4 import BeautifulSoup
from django.utils.encoding import smart_str, smart_unicode
from urlparse import *
import urllib2
import random
import time
import os
import re
import csv
address = 'https://petitions.whitehouse.gov/petitions'
#Save_Petitions(address)
def Save_Petitions(website):
with open('HW2_Output.csv', 'wb') as f:
writer = csv.DictWriter(f, fieldnames = ("PetitionTitle", "UploadDate", "IssueTags", "Signatures"))
writer.writeheader()
Titles, Dates, Tags, Sigs = Scrape_Petitions(website)
for i in range(len(Titles)):
writer.writerow({'PetitionTitle':Titles[i], 'UploadDate': Dates[i], 'IssueTags': Tags[i], 'Signatures': Sigs[i]})
def Scrape_Petitions(website):
Headers, Tags, Sigs = get_Headers_Tags_Sigs(website)
Links, Titles = get_Links_Titles(Headers)
Dates = get_Dates(Links)
return Titles, Dates, Tags, Sigs
def get_Headers_Tags_Sigs(website):
Headers = []
Tag_Lists = []
Tags = []
Signatures = []
for i in range(4):
web_page = urljoin(str(website), '?page=%d' % i)
web_text = urllib2.urlopen(web_page)
soup = BeautifulSoup(web_text.read())
Headers.extend(soup.find_all('h3'))
sigs = soup.find_all('span', {'class': 'signatures-number'})
for sig in sigs:
Signatures.append(str(sig.get_text()))
tag_list = soup.find_all('div', {'class': "field field-name-field-petition-issues field-type-taxonomy-term-reference field-label-hidden tags"})
for item in tag_list:
Tag_Lists.append(item.find_all('h6'))
for List in Tag_Lists:
tags = []
for tag in List:
tags.append(str(tag.get_text()))
Tags.append(tags)
Tags = Tags[-74:]
return Headers, Tags, Signatures
def get_Links_Titles(petitions):
Links = []
Titles = []
for petition in petitions:
try:
extension = petition.a['href']
except:
continue
else:
Titles.append(smart_str(petition.a.get_text()))
Links.append(urljoin("https://petitions.whitehouse.gov", str(extension)))
return Links, Titles
def get_Dates(Links):
Dates = []
for link in Links:
web_text = urllib2.urlopen(link)
soup = BeautifulSoup(web_text.read())
attrib = soup.find_all('h4', {'class': 'petition-attribution'})
text_att = str(attrib[0].get_text())
words = text_att.split()
Dates.append(' '.join(words[-3:]))
return Dates
|
[
"ryden.winfield@gmail.com"
] |
ryden.winfield@gmail.com
|
8253f43e416783f00b1ec25c50be53ea186aef46
|
3931a54177d8c4798fa231450b1bde557e27df7a
|
/catkin_ws/build/Hardware/motor_node/catkin_generated/pkg.develspace.context.pc.py
|
0861c9c34d17e69515613ea7b650852d537fc6e6
|
[] |
no_license
|
RobotJustina/Azcatl
|
458d676711b7109f056a4c8671e6f82086ba7176
|
b80bdb4e5bd3f02141b0bb38ccfa7f2907509893
|
refs/heads/master
| 2020-04-09T14:50:06.011296
| 2019-02-08T00:10:42
| 2019-02-08T00:10:42
| 160,408,580
| 0
| 1
| null | 2019-02-08T00:10:43
| 2018-12-04T19:31:39
|
Makefile
|
UTF-8
|
Python
| false
| false
| 368
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "motor_node"
PROJECT_SPACE_DIR = "/home/pi/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"hugin.sanchez@gmail.com"
] |
hugin.sanchez@gmail.com
|
a38219cf230e02a4b51f77acdf5bb58c8c66cc5d
|
c2ae65792af1fab2e7843303ef90790819f872e8
|
/SampleCodes/Webview/v3/lib/python3.5/copyreg.py
|
3ba6ec1ff37b3f84594bbbe694537c660c2574bd
|
[] |
no_license
|
behappyyoung/PythonSampleCodes
|
47c224ca76ce509a03c8b75ef6b4bf7f49ebdd7f
|
f7640467273fa8ea3c7e443e798737ca5bcea6f9
|
refs/heads/master
| 2023-03-15T00:53:21.034605
| 2023-02-13T17:12:32
| 2023-02-13T17:12:32
| 26,919,763
| 3
| 3
| null | 2023-03-07T12:45:21
| 2014-11-20T15:57:16
|
Python
|
UTF-8
|
Python
| false
| false
| 97
|
py
|
/usr/local/Cellar/python3/3.5.2/Frameworks/Python.framework/Versions/3.5/lib/python3.5/copyreg.py
|
[
"ypark@stanfordhealthcare.org"
] |
ypark@stanfordhealthcare.org
|
100f564a4960c5d738f67fda6983c5a2359d5135
|
9366543de077bb66dda6da8ad772abc595d69877
|
/player/neural_network/neural_net.py
|
e6f5c49dea1dcc3ef7ef90da64c739cab9f2ad80
|
[] |
no_license
|
joshualee/cs181-finalproject
|
057b567a7dd02a6bf3d60eea80dcf0a4c7465244
|
4499e04bc520c8c1fe94169de42221e876b35ad8
|
refs/heads/master
| 2021-01-25T10:14:12.975113
| 2013-05-06T02:24:23
| 2013-05-06T02:24:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,091
|
py
|
import neural_net_pickle as nnp
import math
class Weight:
def __init__(self, value):
self.value = value
class Node:
"""
Attributes:
----------
inputs : a list of node who are inputs for this node
weights : a list of weight objects, for links with input nodes
fixed_weight : w0 in the lecture notes and slides
forward_neighbors : a list of nodes who are output for this node
raw_value : the linear combination of weights and input signals, that is w'x
transformed_value : the signal emitted by this node, that is g(w'x)
Description:
------------
The situation can be summarized as follow:
weights[i] forward_weights[i]
inputs[i] -----------> self ------------------> forward_neighbors[i]
AND:
inputs \
=> raw_value => transformed value =>
weights & fixed_weight /
"""
def __init__(self):
self.inputs = []
self.weights = []
self.fixed_weight = None
self.forward_neighbors = []
self.forward_weights = []
self.raw_value = 0
self.transformed_value = 0
def AddInput(self, node, weight, network):
self.inputs.append(node)
if not weight:
weight = network.GetNewWeight()
self.weights.append(weight)
node.forward_neighbors.append(self)
node.forward_weights.append(weight)
if not self.fixed_weight:
self.fixed_weight = network.GetNewWeight()
class Input:
def __init__(self):
self.values = []
class Target:
def __init__(self):
self.values = []
class NeuralNetwork:
INPUT = 1
HIDDEN = 2
OUTPUT = 3
def __init__(self):
self.complete = False
self.inputs = []
self.hidden_nodes = []
self.outputs = []
self.node_set = {}
self.weights = []
def GetNewWeight(self):
weight = Weight(0.0)
self.weights.append(weight)
return weight
def AddNode(self, node, node_type):
self.CheckIncomplete()
if node_type == self.INPUT:
assert len(node.inputs) == 0, 'Input node cannot have inputs'
# Check that we only reference inputs already in the network
for input in node.inputs:
assert input in self.node_set, 'Cannot reference input that is not already in the network'
self.node_set[node] = True
if node_type == self.INPUT:
self.inputs.append(node)
elif node_type == self.HIDDEN:
self.hidden_nodes.append(node)
else:
assert node_type == self.OUTPUT, 'Unexpected node_type: ' % node_type
self.outputs.append(node)
def MarkAsComplete(self):
seen_nodes = {}
for input in self.inputs:
seen_nodes[input] = True
assert len(input.inputs) == 0, 'Inputs should not have inputs of their own.'
for node in self.hidden_nodes:
seen_nodes[node] = True
for input in node.inputs:
assert input in seen_nodes, ('Node refers to input that was added to the network later than'
'it.')
for node in self.outputs:
assert len(node.forward_neighbors) == 0, 'Output node cannot have forward neighbors.'
for input in node.inputs:
assert input in seen_nodes, ('Node refers to input that was added to the network later than'
'it.')
self.complete = True
def CheckComplete(self):
if self.complete:
return
self.MarkAsComplete()
def CheckIncomplete(self):
assert not self.complete, ('Tried to modify the network when it has already been marked as'
'complete')
@staticmethod
def ComputeRawValue(node):
total_weight = 0
for i in range(len(node.inputs)):
total_weight += node.weights[i].value * node.inputs[i].transformed_value
total_weight += node.fixed_weight.value
return total_weight
@staticmethod
def Sigmoid(value):
try:
return 1.0 / (1 + math.exp(-value))
except:
if value < 0:
return 0.0
else:
return 1.0
@staticmethod
def SigmoidPrime(value):
try:
return math.exp(-value) / math.pow(1 + math.exp(-value), 2)
except:
return 0
def InitFromWeights(self, weights):
assert len(self.weights) == len(weights), (
'Trying to initialize from a different sized weight vector.')
for i in range(len(weights)):
self.weights[i].value = weights[i]
class NetworkFramework(object):
def __init__(self):
self.network = NeuralNetwork()
# Don't worry about these functions, you
# will be asked to implement them in another
# file. You should not modify them here
self.FeedForwardFn = None
self.TrainFn = None
def EncodeLabel(self, label):
raise NotImplementedError("This function has not been implemented")
def GetNetworkLabel(self, label):
raise NotImplementedError("This function has not been implemented")
def Convert(self, image):
raise NotImplementedError("This function has not been implemented")
def InitializeWeights(self):
for weight in self.network.weights:
weight.value = 0
def Classify(self, image):
input = self.Convert(image)
self.FeedForwardFn(self.network, input)
return self.GetNetworkLabel()
def ClassifySoft(self, image):
input = self.Convert(image)
self.FeedForwardFn(self.network, input)
return self.GetNetworkLabelSoft()
def Performance(self, images):
# Loop over the set of images and count the number correct.
correct = 0
for image in images:
if self.Classify(image) == image.label:
correct += 1
return correct * 1.0 / len(images)
def Train(self, images, validation_images, learning_rate, epochs):
# Convert the images and labels into a format the network can understand.
inputs = []
targets = []
for image in images:
inputs.append(self.Convert(image))
targets.append(self.EncodeLabel(image.label))
# Initializes performance log
performance_log = []
performance_log.append((self.Performance(images), self.Performance(validation_images)))
max_performance = 0
iteration_without_update = 0
loops = 0
# Loop through the specified number of training epochs.
for i in range(epochs):
# This calls your function in neural_net_impl.py.
self.TrainFn(self.network, inputs, targets, learning_rate, 1)
# Print out the current training and validation performance.
perf_train = self.Performance(images)
perf_validate = self.Performance(validation_images)
print '%d Performance: %.8f %.3f' % (
i + 1, perf_train, perf_validate)
# updates log
performance_log.append((perf_train, perf_validate))
loops += 1
# if we haven't increased the max validation in 10 iterations, then we stop
if perf_validate > max_performance:
max_performance = perf_validate
iteration_without_update = 0
else:
iteration_without_update += 1
if iteration_without_update == 5:
break;
return(loops, performance_log)
def RegisterFeedForwardFunction(self, fn):
self.FeedForwardFn = fn
def RegisterTrainFunction(self, fn):
self.TrainFn = fn
|
[
"LeeJoshuaK@gmail.com"
] |
LeeJoshuaK@gmail.com
|
9e9e5ead1c12e034531e9ee64226900db7b7bbbe
|
ce33b3cbd5ac044095c6baac2191b23bd8ed5531
|
/blog/models.py
|
45a134f3ecd1da09b2178000f871d8c4892bcd84
|
[] |
no_license
|
Chaitanyate/django3-portfolio
|
011e13d4c6d2162c4196395c262c484991c9ba7f
|
92235de9b74ad02b2eb32054305652ffea1f16a4
|
refs/heads/master
| 2023-01-12T06:08:03.679171
| 2020-11-18T17:10:24
| 2020-11-18T17:10:24
| 313,672,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
from django.db import models
class blog(models.Model):
title = models.CharField(max_length=100)
description = models.TextField(max_length=250)
date=models.DateField()
def __str__(self):
return self.title
|
[
"Chaitanya.Tejas@gds.ey.com"
] |
Chaitanya.Tejas@gds.ey.com
|
5c15afa29895acb8165f67f96d1744092f542d33
|
ed269e9a4d9d6bfbb833381b7aef65a23f391fe2
|
/比赛/5479. 千位分隔数.py
|
f95b565d2780cc9f0cda6a36ec21c68410b1d997
|
[] |
no_license
|
Comyn-Echo/leeCode
|
fcff0d4c4c10209a47bd7c3204e3f64565674c91
|
67e9daecb7ffd8f7bcb2f120ad892498b1219327
|
refs/heads/master
| 2023-04-28T17:35:52.963069
| 2021-05-19T01:52:16
| 2021-05-19T01:52:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
class Solution(object):
def thousandSeparator(self, n):
"""
:type n: int
:rtype: str
"""
ans =""
res= n % 1000
n = n // 1000
ans = str(res) + ans
if n ==0:
return ans
if len(str(res)) == 2:
ans = "0" + ans
elif len(str(res)) ==1:
ans = "00" + ans
while n !=0:
res = n % 1000
n = n // 1000
ans = str(res) +"." + ans
if n == 0:
return ans
if len(str(res)) == 2:
ans = "0" + ans
elif len(str(res)) ==1:
ans = "00" + ans
return ans
ans = Solution.thousandSeparator(None, 7)
print(ans)
|
[
"2892211452aa@gmail.com"
] |
2892211452aa@gmail.com
|
5ff6922415e890ad248793d54a5878ab2061d1a3
|
7f7b64bbc1c8fccb337b3279b24f325da96d2dc4
|
/tests/test_user_model.py
|
5cb1289e5585d6285630c83a5e689eef62a69a49
|
[] |
no_license
|
wenliangsun/Flasky
|
7d28cb0c7797062f11f4b1055f943a343e116b0b
|
8d43ae322480c7ce47f06d1daaaf7b33581ffde9
|
refs/heads/master
| 2021-06-16T14:15:05.609286
| 2018-11-22T12:00:18
| 2018-11-22T12:00:18
| 146,570,738
| 0
| 0
| null | 2021-02-02T21:50:21
| 2018-08-29T08:38:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'swl'
__mtime__ = '8/6/18'
"""
import unittest
from ..app.models import User, Role, AnonymousUser, Permission
class UserModelTestCase(unittest.TestCase):
def test_password_setter(self):
u = User(password='cat')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User(password='cat')
with self.assertRaises(AttributeError):
u.password
def test_password_salts_are_random(self):
u = User(password='cat')
u2 = User(password='cat')
self.assertTrue(u.password_hash != u2.password_hash)
def test_roles_and_permissions(self):
Role.insert_roles()
u = User(email='john@example.com', password='cat')
self.assertTrue(u.can(Permission.WRITE_ARTICLES))
self.assertFalse(u.can(Permission.MODERATE_COMMENTS))
def test_anonymous_user(self):
u = AnonymousUser()
self.assertFalse(u.can(Permission.FOLLOW))
|
[
"2572907084@qq.com"
] |
2572907084@qq.com
|
3794ad4e6c4c29f51277e6c3db63938934199c94
|
912b3b5321c7e26887af94cf2f97e4892c8c956a
|
/Day6/1_os模块.py
|
e6c1d55e65f6ce1f83dbdb50a2a2369a9e7f34ed
|
[] |
no_license
|
nmap1208/2016-python-oldboy
|
a3a614694aead518b86bcb75127e1ed2ef94604a
|
873820e30aeb834b6a95bae66259506955436097
|
refs/heads/master
| 2021-05-31T04:43:14.636250
| 2016-05-06T01:24:39
| 2016-05-06T01:24:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
# -*- coding:utf-8 -*-
import os
a = os.popen('ls')
print(type(a))
print(a.read())
b = os.system('ls')
print(b)
c = os.stat('1_os模块.py')
print(c)
print(os.path.isfile('.'))
print(os.path.isdir('.'))
print()
print(os.path.isfile('1_os模块.py'))
print(os.path.isdir('1_os模块.py'))
|
[
"yangk@ersoft.cn"
] |
yangk@ersoft.cn
|
90856229f731ecc7f365cd9bfefd3bb1642d00b2
|
1fed8a3e683ac7762214977a889bc02940d05ef7
|
/da1.py
|
daeccc0967d7a69e3dd0abf40bab8bb43cbab8b0
|
[] |
no_license
|
Sjsatishjaiswal/DA2
|
1eeff78bf2abe3a39e41f356ddac3f26eef0e859
|
b8a36b877e4e02c526198b1eb8f7c6d8f71c3494
|
refs/heads/master
| 2022-12-04T22:23:01.363217
| 2020-08-10T11:59:32
| 2020-08-10T11:59:32
| 286,460,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,707
|
py
|
Today we will discuss the Numpy library
import numpy as np
np.__version__
The basic data structure in numpy is an n-dimensional array.
a = np.array([1, 2, 3, 4, 5])
b = np.array([1.1, 2.2, 3.3, 4.4, 5.5])
c = np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype = 'float32')
d = np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype = 'int32')
e = np.array([1])
f = np.array([[1]])
g = np.array([[[1]]])
e.ndim
f.ndim
g.ndim
h = np.array([range(i, i+3) for i in [2, 4, 6]])
i = np.zeros(10, dtype = int)
np.zeros((2, 5))
np.ones(10)
j = np.ones((3, 5), dtype = int)
k = np.full((3, 5), 3.14)
range ----> arange
np.arange(0, 10)
np.arange(0, 20, 3)
l = np.arange(0, 20)
m = np.linspace(0, 20)
np.linspace(0, 10, 5)
n = np.random.randn(4, 4)
o = np.random.rand(4, 4)
p = np.random.randint(4)
q = np.random.randint(0, 10, (4, 4))
r = np.eye(4)
s = np.empty((3, 3))
The common operations on numpy arrays are:
1. Determining attributes of array
2. Indexing
3. Slicing & Dicing
4. Reshaping
5. Joining and Splitting
x1 = np.random.randint(10, size = 6)
x2 = np.random.randint(10, size = (3, 4))
x3 = np.random.randint(10, size = (3, 4, 5))
x3.ndim
x3.shape
x3.size
x3.dtype
x3.itemsize
x3.nbytes
x1
x1[0]
x1[4]
x1[5]
x1[-1]
x2
x2[0]
x2[0, 0]
x2[-1, -1]
x2[0, 0] = 12
x2[0, 0] = 12.34
x = np.arange(10)
x
x[0:5]
x[:5]
x[5:]
x[4:7]
x[:]
x[::2]
x[::3]
x[::-1]
x[::-3]
x2
x2[rows, cols]
x2[:2, :3]
x2[1, :]
x2[:, 3]
x2[2, 1]
Note : When we invoke the slice of an array, it returns a
view and not a copy.
print(x2)
x2_sub = x2[:2, :3]
x2_sub[0, 0] = 101
print(x2)
x2_s = x2[:2, :2]
x2_s[:] = 100
print(x2)
x2_sub_copy = x2[:2, :2].copy()
x2_sub_copy[:] = 7
grid = np.arange(1, 10)
grid = grid.reshape((3, 3))
x = np.array([1, 2, 3])
y = np.array([3, 2, 1])
np.concatenate([x, y])
z = np.array([99, 99, 99])
np.concatenate([x, y, z])
grid = np.array([[1, 2, 3],
[4, 5, 6]])
grid
np.concatenate([grid, grid])
np.concatenate([grid, grid], axis = 1)
x = [1, 4, 5, 99, 99, 4, 8, 7]
x1, x2, x3 = np.split(x, [3, 5])
x1
x2
x3
my_list = list(range(1000000))
my_arr = np.array(range(1000000))
%time for i in range(10): my_list2 = my_list * 2
%time for i in range(10): my_arr2 = my_arr * 2
[1, 2, 3] * 3
np.array([1, 2, 3]) * 3
mat = np.array([[1, 2], [3, 4]])
mat * mat
mat @ mat
|
[
"noreply@github.com"
] |
noreply@github.com
|
69c203aa42e95ec1b2b2ecaa3d7ba0d6c6f3fbef
|
f15aae2b0039037f6b49c318e8a619ab98575769
|
/university_chatbot/models.py
|
7cb5cee91ae23b4c0dc8b3c0d5c6d30e37bb7d2d
|
[] |
no_license
|
bethanw10/university-chatbot
|
75d1838d593425b713ecdb9fb9672fb5db6ee646
|
b32390b97bac8fc29c4194459d5eecd72ad8e77f
|
refs/heads/master
| 2021-03-16T09:57:13.102375
| 2018-05-15T18:39:26
| 2018-05-15T18:39:26
| 107,578,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
import os
from peewee import *
package_dir = os.path.dirname(os.path.abspath(__file__))
database_path = os.path.join(package_dir, 'data/db.sqlite')
db = SqliteDatabase(database_path)
class BaseModel(Model):
class Meta:
database = db
class Course(BaseModel):
name = CharField(unique=True)
class Module(BaseModel):
name = CharField()
code = CharField(unique=True)
semester = CharField()
class Meta:
database = db
class Course_Module(BaseModel):
course = ForeignKeyField(Course)
module = ForeignKeyField(Module)
class Timetable(BaseModel):
module = ForeignKeyField(Module)
activity = CharField()
day = CharField()
start = CharField()
finishes = CharField()
campus = CharField()
room = CharField()
lecturer = CharField()
group_details = CharField()
class Week_Range(BaseModel):
timetable = ForeignKeyField(Timetable)
start_week = CharField()
end_week = CharField()
def create_tables():
db.connect()
db.create_tables([Course, Course_Module, Module, Timetable, Week_Range])
try:
create_tables()
except:
pass
|
[
"bethanw10@gmail.com"
] |
bethanw10@gmail.com
|
8ca1f76025a6c70f3e1501bb42a2497806635dcd
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/framework/e2e/jit/test_TransformerDecoderLayer_base.py
|
a05bd02f5bfde173c39458efb57dde220590c836
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903
| 2023-09-04T11:17:50
| 2023-09-04T11:17:50
| 383,138,186
| 42
| 312
| null | 2023-09-13T11:13:35
| 2021-07-05T12:44:59
|
Python
|
UTF-8
|
Python
| false
| false
| 668
|
py
|
#!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_TransformerDecoderLayer_base():
"""test TransformerDecoderLayer_base"""
jit_case = JitTrans(case=yml.get_case_info("TransformerDecoderLayer_base"))
jit_case.jit_run()
|
[
"825276847@qq.com"
] |
825276847@qq.com
|
07cd76beba0fa6e4a8bb8ffbd676f72a58405923
|
8ae0b7faa92ba3bcb7d18274d51753e7512438cb
|
/lib/github/issue.py
|
caac788a73897d25812d1f4d6e69277aa8d5f407
|
[] |
no_license
|
YumaInaura/api
|
073f608b55678fee08ca605e2f263b98e697e6b0
|
3b0831993e70c11b1960aef9f521b33d3073e5f0
|
refs/heads/master
| 2020-05-23T19:20:58.674855
| 2019-06-18T00:55:04
| 2019-06-18T00:55:04
| 186,910,527
| 0
| 0
| null | 2019-06-18T00:55:06
| 2019-05-15T22:20:44
|
Python
|
UTF-8
|
Python
| false
| false
| 473
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests, os, json
owner = os.environ.get('OWNER')
repository = os.environ.get('REPOSITORY')
results = []
round = int(os.environ.get('ROUND')) if os.environ.get('ROUND') else 3
for i in range(0, round):
api_url = 'https://api.github.com/repos/' + owner + '/' + repository + '/issues?page=' + str(i+1)
res = requests.get(api_url)
json_result = res.json()
results += json_result
print(json.dumps(results))
|
[
"yuma.inaura@gmail.com"
] |
yuma.inaura@gmail.com
|
ca02e6bd20b6e7e827873b82f15572b04e8ec3d9
|
ccedce69e00c22982170bcdb7f2cdab0d1090f84
|
/making_pizzas.py
|
841e447635accf49be63c73418a3b38467c87307
|
[] |
no_license
|
itry1997/python_work_ol
|
b52a42d840d50a1e5ac58a2781707a2be3144596
|
ea49cf6caeb2b64b104912df83caa5b41ba75896
|
refs/heads/master
| 2020-03-25T23:05:17.769989
| 2018-08-10T09:18:49
| 2018-08-10T09:18:49
| 144,258,663
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
from pizza2 import *
make_pizza(16,'pepperoni')
make_pizza(12,'mushrooms','green peppers','extra cheese')
|
[
"1290475797@qq.com"
] |
1290475797@qq.com
|
7779863b118dff78c2699620fdef4105a1714c2c
|
ad2777c974326177b7036f023301c19e7ecbf4e8
|
/rolld.py
|
c25440d5a316afa3b13b57ddfd90b978c8491674
|
[] |
no_license
|
lraulin/bin
|
c67f5f52667a4d63e4ceace8837750e0e5dc2287
|
a67026b920fea5d8731c47bad448f977f245a58d
|
refs/heads/master
| 2021-01-22T04:15:08.948736
| 2018-06-21T00:20:10
| 2018-06-21T00:20:10
| 92,446,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
import random
def roll(max):
roll1 = max * random.random()
|
[
"leeraulin@gmail.com"
] |
leeraulin@gmail.com
|
172a86f3c38e5011aa0bf1ac25cc91867d724c2f
|
9e5353ba6e50f77a40a765bd494d8bfb990c8922
|
/stream_backend/api/serializers.py
|
d5946df70464fd9e7b8cffcfad2c351823f30c86
|
[] |
no_license
|
admiralbolt/stream-stuff
|
d9e24f1d78ac142416525b9b42cc53ef0bc4712a
|
29cfa96f9e8d40c531362aced47ebacadccbe759
|
refs/heads/master
| 2023-08-05T00:02:17.812991
| 2021-09-23T05:47:16
| 2021-09-23T05:47:16
| 261,022,447
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
import json
from rest_framework import serializers
from api import models
class JsonSerializer(serializers.Field):
"""Custom serializer for json fields.
Internally json fields are represented as a string.
Externally it's json. What the fuck did you expect?
"""
def to_representation(self, value):
return json.loads(value) if value else []
def to_internal_value(self, data):
return json.dumps(data)
class CustomEmoteSerializer(serializers.ModelSerializer):
class Meta:
model = models.CustomEmote
fields = "__all__"
class KeyValueSerializer(serializers.ModelSerializer):
"""Serialize a key value pair.
In theory we could use a json serialized field here but I've found that just
doing the translation by hand works better.
"""
class Meta:
model = models.KeyValue
fields = "__all__"
class ScriptSerializer(serializers.ModelSerializer):
"""Serialize a script model."""
class Meta:
model = models.Script
fields = "__all__"
class SoundSerializer(serializers.ModelSerializer):
"""Serialize a sound model."""
class Meta:
model = models.Sound
fields = "__all__"
class TwitchClipSerializer(serializers.ModelSerializer):
"""Serialize dat boi."""
class Meta:
model = models.TwitchClip
fields = "__all__"
|
[
"aviknecht@gmail.com"
] |
aviknecht@gmail.com
|
e845a0954b654b82c32c2ec6f0c531f6024d0ec1
|
73156408f17f0fe1b5203694bcb4564b2f105aff
|
/exercises/ex22.py
|
ca827e53fad7e87d77fc28609201b9d408f4f78d
|
[] |
no_license
|
jasonwaters/Learn-Python
|
7431d0b943da23fc3ab48aa06a28a721367c5456
|
bb6d0aa9cf9b95e12c019d8d06b6c41c9da28342
|
refs/heads/master
| 2020-05-25T09:33:19.765430
| 2013-02-23T19:08:24
| 2013-02-23T19:08:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79
|
py
|
#!/usr/bin/env python
#http://learnpythonthehardway.org/book/ex22.html
# meh.
|
[
"waters@attask.com"
] |
waters@attask.com
|
e01e2b05fabcddca2a5a6ff51953f8e148933344
|
34ddec647d6ad357c1527cf713eaeaee4eb575aa
|
/2020/24/part1.py
|
15437d944874572ab3349f6d824f88d3d20bf217
|
[
"Unlicense"
] |
permissive
|
cheshyre/advent-of-code
|
98327c564f6b401244778aaf9a16043000b4d85e
|
7ecb827745bd59e6ad249707bd976888006f935c
|
refs/heads/master
| 2022-12-21T15:53:38.789228
| 2022-12-20T20:07:28
| 2022-12-20T20:07:28
| 75,426,961
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
import os
import hex_grid
cur_dir = os.path.dirname(os.path.abspath(__file__))
tiles_count = {}
with open(f"{cur_dir}/input") as f:
for line in f:
instrs = hex_grid.parse_instructions(line)
point = (0, 0)
for i in instrs:
point = hex_grid.apply_instruction_to_point(point, i)
if point in tiles_count:
del tiles_count[point]
else:
tiles_count[point] = 1
print(f"There are {len(tiles_count)} black tiles.")
|
[
"heinz.matthias.3@gmail.com"
] |
heinz.matthias.3@gmail.com
|
685c72d8daa0b4eb0c1e3ab5e5c5a6ab6186cfb3
|
c422c8fa80f95165b106f0f65ad63faad27ba078
|
/hci/PySide/speech-coding-project/uiLoader-and-gluing/fali.pyw
|
6068f93c53e70f18f38d97b2a097e5d574eeb719
|
[
"Apache-2.0"
] |
permissive
|
rziwr/in-vicinity-python
|
f26fefb930b033d13b6ed5eb802d6d4ed626aec8
|
32524767af2459e23aad916f75cb0065e40edc66
|
refs/heads/master
| 2021-01-21T03:30:14.626241
| 2014-11-30T18:00:24
| 2014-11-30T18:00:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
pyw
|
# -*- coding: utf-8 -*-
from PySide.QtCore import *
from PySide.QtGui import *
import sys
app = QApplication(sys.argv)
def callback_int(value_as_int):
print 'int value changed:', repr(value_as_int)
def callback_unicode(value_as_unicode):
print 'unicode value changed:', repr(value_as_unicode)
spinbox = QSpinBox()
spinbox.connect(SIGNAL('valueChanged(int)'), callback_int)
spinbox.valueChanged.connect(callback_unicode)
spinbox.show()
sys.exit(app.exec_())
|
[
"igor.a.lugansky@gmail.com"
] |
igor.a.lugansky@gmail.com
|
9f96bd3e842b17ffff0232b9c3744b778aa03a07
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_None/trend_MovingAverage/cycle_0/ar_/test_artificial_32_None_MovingAverage_0__20.py
|
4d34289521e410c56e4feef0b93dd14485083f72
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 262
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
718c00a4b5ef47295634ca7326f93aee9b7c71cd
|
df1556f5b6fd39770b283c9396ba785139979729
|
/Sailor/venv/Scripts/pip3.7-script.py
|
79b9e6436a5f1ac3439185e33d92ce43b0864f92
|
[] |
no_license
|
jay11234/algorithm
|
bd44fdfb7e08e3225aff2cf1252f8c43d8b209f0
|
7d1adafa2a622835df7dc6e105196db6dbd87d64
|
refs/heads/master
| 2022-01-07T05:46:46.464237
| 2019-07-22T00:09:41
| 2019-07-22T00:09:41
| 197,166,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
#!C:\Users\algo\PycharmProjects\Sailor\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
|
[
"dptmdkdlxl1@gmail.com"
] |
dptmdkdlxl1@gmail.com
|
421b4ba13abf2d8f6b23e28835ddc48b36ab8f02
|
be213289ec15160d2d29378bde21e4a0debe8eaf
|
/test.py
|
793431fe98a6725a09b759ef0cd9f01a7a76aa30
|
[] |
no_license
|
johndpope/SRCNN
|
b0df3e028021a1d71fd418452a864d6fabc0566f
|
f31da7c08d78f6a25d4009733c61352938566b23
|
refs/heads/master
| 2020-04-24T15:04:06.027853
| 2019-02-20T09:15:09
| 2019-02-20T09:15:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,097
|
py
|
import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torchvision.utils import save_image
from tensorboardX import SummaryWriter
from pathlib import Path
from math import log10
from model import SRCNN
from dataset import DatasetFromFolder, DatasetFromFolderEval
import argparse
parser = argparse.ArgumentParser(description='predictionCNN Example')
parser.add_argument('--cuda', action='store_true', default=False)
parser.add_argument('--weight_path', type=str, default=None)
parser.add_argument('--save_dir', type=str, default=None)
opt = parser.parse_args()
test_set = DatasetFromFolderEval(image_dir='./data/General-100/test', scale_factor=4)
test_loader = DataLoader(dataset=test_set, batch_size=1, shuffle=False)
model = SRCNN()
criterion = nn.MSELoss()
if opt.cuda:
model = model.cuda()
criterion = criterion.cuda()
model.load_state_dict(torch.load(opt.weight_path, map_location='cuda' if opt.cuda else 'cpu'))
model.eval()
total_loss, total_psnr = 0, 0
total_loss_b, total_psnr_b = 0, 0
with torch.no_grad():
for batch in test_loader:
inputs, targets = batch[0], batch[1]
if opt.cuda:
inputs = inputs.cuda()
targets = targets.cuda()
prediction = model(inputs)
loss = criterion(prediction, targets)
total_loss += loss.data
total_psnr += 10 * log10(1 / loss.data)
loss = criterion(inputs, targets)
total_loss_b += loss.data
total_psnr_b += 10 * log10(1 / loss.data)
save_image(prediction, Path(opt.save_dir) / '{}_sr.png'.format(batch[2][0]), nrow=1)
save_image(inputs, Path(opt.save_dir) / '{}_lr.png'.format(batch[2][0]), nrow=1)
save_image(targets, Path(opt.save_dir) / '{}_hr.png'.format(batch[2][0]), nrow=1)
print("===> Avg. Loss: {:.4f}, PSNR: {:.4f} dB".format(total_loss / len(test_loader), total_psnr / len(test_loader)))
print("===> Avg. Loss: {:.4f}, PSNR: {:.4f} dB".format(total_loss_b / len(test_loader), total_psnr_b / len(test_loader)))
|
[
"uchida301045@sansan.com"
] |
uchida301045@sansan.com
|
516663114078ef350830e0ea442b8a2a2d334484
|
9dc6dee6ac0e575014aa6e0fd8cad0b6e56c1512
|
/api.py
|
3a3c5ff396db5606ada247c7ca525f32da29e94b
|
[] |
no_license
|
BEYoun/backEnd
|
36dd72c1e2bd618e7aa97098f375df30b6004cfa
|
ee5360e0f2a297a18ffa466db07c142e883121df
|
refs/heads/main
| 2023-01-05T20:48:15.472492
| 2020-10-23T03:09:34
| 2020-10-23T03:09:34
| 306,515,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
from flask import Flask,request
from speechToText.index import speechToText
from flask_cors import CORS
from scipy.io.wavfile import write
import base64
app = Flask(__name__)
CORS(app)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
extention =""
data = request.get_json()["formData"]['file']
if "wav" in data:
extention = "wav"
elif "mpeg" in data:
extention = "mp3"
elif "x-panasonic-rw" in data:
extention = "raw"
else:
return "non Accepter"
with open("./speechToText/resources/exemple."+extention, 'wb') as wav_file:
wav_file.write(base64.b64decode(data.replace("data:audio/wav;base64,", "").replace("data:audio/mpeg;base64","")))
return speechToText(extention)
return "Get appel"
if __name__ == "__main__":
app.run()
|
[
"younesbe@pop-os.localdomain"
] |
younesbe@pop-os.localdomain
|
320fdd977350ab8995a33cbdc553428c2739212a
|
c9848ecdf60a12b8aca4f832aeffe7413a7c369b
|
/collections.Counter()_Sol.py
|
0797fad9af76b5dd3ea6d466f0ea723ce3804e44
|
[] |
no_license
|
tunghim/HackerRank_Python_Solutions
|
10e61402a66a233abb76ef5a7b56b28ae419abd9
|
6cd47a64e1006176e0b131f66a474f440527b01c
|
refs/heads/master
| 2020-06-23T20:27:33.334728
| 2016-12-27T03:37:09
| 2016-12-27T03:37:09
| 74,636,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
from collections import Counter
X = int(input())
sizeList = Counter(map(int, input().split()))
N = int(input())
money = 0
for _ in range(N):
size, price = map(int, input().split())
if sizeList[size]:
money += price
sizeList[size] -= 1
print(money)
|
[
"tunghim@gmail.com"
] |
tunghim@gmail.com
|
27b08bf753b3e7b11a571f0fed928273c7e0ca40
|
0826c6b4e96b03b9c5b0ca29ee11519f64c8abd8
|
/Assignment-1/klucb.py
|
b87ebe35ebd4055ea38bdd68f0db8b5dca90c67a
|
[] |
no_license
|
Suchetaaa/CS747-Assignments
|
7d187ccffecabd6aafd950888687a5cdff48deed
|
350bbe82dd28d94392d234496db89fc099d7b3e6
|
refs/heads/master
| 2020-07-15T09:26:30.348621
| 2019-11-08T14:21:49
| 2019-11-08T14:21:49
| 205,532,330
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,740
|
py
|
import numpy as np
import sys
import numpy.random as random
import os
def KL(p, q):
if p == 1:
return p*np.log(p/q)
elif p == 0:
return (1-p)*np.log((1-p)/(1-q))
else:
return p*np.log(p/q) + (1-p)*np.log((1-p)/(1-q))
def solve_q(rhs, p_a):
if p_a == 1:
return 1
q = np.arange(p_a, 1, 0.01)
lhs = []
for el in q:
lhs.append(KL(p_a, el))
lhs_array = np.array(lhs)
lhs_rhs = lhs_array - rhs
lhs_rhs[lhs_rhs <= 0] = np.inf
min_index = lhs_rhs.argmin()
return q[min_index]
def ucb_func(pulls, arm_rewards, time_steps, num_bandits):
ucb_arms = np.zeros(num_bandits, dtype=float)
for x in xrange(0,num_bandits):
p_a = arm_rewards[x]/pulls[x]
rhs = (np.log(time_steps) + 3*np.log(np.log(time_steps)))/pulls[x]
ucb_arms[x] = solve_q(rhs, p_a)
# print ucb_arms
return ucb_arms
def kl_ucb(num_bandits, bandit_probs, epsilon, horizon, seed):
random.seed(seed)
rewards = np.zeros((num_bandits, horizon), dtype=int)
for y in xrange(0,num_bandits):
s = np.random.binomial(1, bandit_probs[y], horizon)
rewards[y, :] = s
cum_reward = 0
# cum_reward_horizons = np.array([0, 0, 0, 0, 0, 0])
curr_arm = 0
curr_reward = 0
pulls = np.zeros(num_bandits, dtype=int)
arm_rewards = np.zeros(num_bandits, dtype=int)
ucb_arms = np.zeros(num_bandits, dtype=float)
for x in xrange(0,min(num_bandits,horizon)):
# print x
curr_arm = x
curr_reward = rewards[curr_arm, pulls[curr_arm]]
pulls[curr_arm] += 1
cum_reward += curr_reward
arm_rewards[curr_arm] += curr_reward
if horizon > num_bandits:
for y in xrange(num_bandits,horizon):
ucb_arms = ucb_func(pulls, arm_rewards, y, num_bandits)
max_ucb = np.amax(ucb_arms)
indices = np.where(ucb_arms == max_ucb)
curr_arm = np.amax(indices)
curr_reward = rewards[curr_arm, pulls[curr_arm]]
pulls[curr_arm] += 1
cum_reward += curr_reward
arm_rewards[curr_arm] += curr_reward
# print "{}".format(curr_arm)
# print y
# if y == 49:
# cum_reward_horizons[0] = cum_reward
# elif y == 199:
# cum_reward_horizons[1] = cum_reward
# elif y == 799:
# cum_reward_horizons[2] = cum_reward
# elif y == 3199:
# cum_reward_horizons[3] = cum_reward
# elif y == 12799:
# cum_reward_horizons[4] = cum_reward
# elif y == 51199:
# cum_reward_horizons[5] = cum_reward
# else: continue
# print cum_reward
return cum_reward
if __name__ == '__main__':
instance = sys.argv[1]
epsilon = float(sys.argv[2])
horizon = int(sys.argv[3])
seed = int(sys.argv[4])
instance_path = instance[3:]
abs_path = os.path.abspath(__file__)
present_dir = os.path.dirname(abs_path)
parent_dir = os.path.dirname(present_dir)
path = os.path.join(parent_dir, instance_path)
file_instance = open(path, "r")
f_lines = file_instance.readlines()
a = []
for h in f_lines:
a.append(float(h.strip()))
bandit_probs = np.array(a)
num_bandits = (bandit_probs.shape)[0]
# print num_bandits
max_p = np.amax(bandit_probs)
# print max_p
curr_reward = kl_ucb(num_bandits,bandit_probs, epsilon, horizon, seed)
regret = max_p*horizon - curr_reward
# print regret
# file_obj = open("KLData.txt", "a")
# horizons = np.array([50, 200, 800, 3200, 12800, 51200])
# regrets_horizons = max_p*horizons - curr_reward_horizons
# for x in xrange(0,6):
# file_obj.write("../instances/i-{}.txt, kl-ucb, {}, {}, {}, {}\n".format(instance+1, seed, epsilon, horizons[x], regrets_horizons[x]))
# file_obj.write("../instances/i-{}.txt, kl-ucb, {}, {}, {}, {}\n".format(instance+1, seed, epsilon, horizon, regret))
# file_obj.close()
# file_obj = open("outputs1.txt", "a")
print "{}, kl-ucb, {}, {}, {}, {}\n".format(instance, seed, epsilon, horizon, regret)
# file_obj.close()
|
[
"sucheta317@gmail.com"
] |
sucheta317@gmail.com
|
53686554e0bf31163f55281131166ba0cd7e3277
|
92a28bcb36194895676b56e3d5e224ddc6c0bb78
|
/__init__.py
|
e89a55c9060e5782d3e74942aa0962c87e68fc8c
|
[] |
no_license
|
incredibleone/projectt
|
1d08d9a22013c609168e8208eedb2d92cc95dbc4
|
da00f7ddd31acbbf19e1cece539766da4d703f88
|
refs/heads/master
| 2023-06-23T23:14:34.893396
| 2021-07-26T17:53:38
| 2021-07-26T17:53:38
| 389,722,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
from . import *
from tradaboost.TrAdaboost.py
|
[
"anshulsingh52001@gmail.com"
] |
anshulsingh52001@gmail.com
|
e78ceb65a33560de313fff47b36551461777b97a
|
bbb7f48c5c53c6d09bc420bfe9e29d211bc028b0
|
/src/main.py
|
c43ad50e723e76b685dca64803a4d70cbfa874e9
|
[] |
no_license
|
Sujan242/Dynamic-Graphs
|
8f3d2dd15fb5bf43e846df3ccdcce76851687538
|
4edbd7b41d747e18d526ad9e81333edb9caa85b2
|
refs/heads/master
| 2022-11-06T21:43:20.663779
| 2020-06-21T04:32:00
| 2020-06-21T04:32:00
| 247,033,679
| 0
| 4
| null | 2020-06-01T06:28:48
| 2020-03-13T09:28:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,064
|
py
|
from connectivity import Connect
# from connectivity_by_dfs import Graph
from connectivity_by_dfs import Graph
from EulerTourTree import EulerTourTree
import random
f=open('graph_input.txt','r')
k = f.readline()
l=k.split()
n=int(l[0])
m=int(l[1])
o=Connect()
g=Graph()
for i in range(n+1):
o.add_vertex(i)
g.add_vertex(i)
flag=1
i=0
for k in f:
if flag==1:
flag=0
continue
# print(k)
i+=1
# print(i)
l=k.split()
# print(l)
u=int(l[0])
v=int(l[1])
o.add_edge(u,v)
g.add_edge(u,v)
# print(o.delete_edge(58,15))
o.delete_edge(58,15)
g.delete_edge(58,15)
# o.delete_edge(6,10)
# g.delete_edge(6,10)
# o.delete_edge(52,27)
# g.delete_edge(52,27)
# o.delete_edge(65,17)
# g.delete_edge(65,17)
# o.delete_edge(6,10)
# g.delete_edge(6,10)
# o.delete_edge(81,9)
# g.delete_edge(81,9)
del_edges = set()
def del_edge(e):
global del_edges
if e in del_edges:
return
o.delete_edge(e[0],e[1])
g.delete_edge(e[0],e[1])
def del_rand_edge():
e1 = random.randint(1,n+1)
e2 = random.randint(1,n+1)
while e1 == e2:
e2 = random.randint(1,n+1)
del_edge((e1, e2))
return (e1, e2)
# o.is_connected(2,99)
for i in range(m//10):
del_rand_edge()
import time
tr=0
fl=0
print(f"Graph is present in graph_input.txt file, first line contains no.of vertices no.of edges. Following lines contain the edges")
print(f"WE ARE DELETING {m//10} RANDOM EDGES TO PROVE THE CORRECTNESS OF OUR IMPLEMENTATION.\nSO RESULTS WILL BE VARIED EVERYTIME THE PROGRAM RUNS")
print(f"\nNo. of verices in the graph - {n}")
print(f"No. of edges in the graph - {m}")
print()
print('Running is_connected query on all vertices with one another using DFS(Naive algorithm)')
start=time.time()
for i in range(1,n+1):
for j in range(1,n+1):
if g.is_connected(i,j) ==True:
tr+=1
else:
fl+=1
print(f"Time taken by DFS - {time.time() - start} seconds")
print(f"No. of connected and disconnected vertices - {tr} , {fl}")
print()
tr=0
fl=0
print('Running is_connected query on all vertices with one another using Eulertour tree method')
start=time.time()
for i in range(1,n+1):
for j in range(1,n+1):
if o.is_connected(i,j) ==True:
tr+=1
else:
fl+=1
print(f"Time taken by our implementation - {time.time() - start} seconds")
print(f"No. of connected and disconnected vertices - {tr} , {fl}")
g_man = []
dc = Connect()
def create_g(n):
global g_man, dc
g_man = [[0]*n for i in range(n)]
dc = Connect()
for i in range(1,n+1):
dc.add_vertex(i)
def insert_e(u,v):
global g_man, dc
if g_man[u-1][v-1] == 1:
print('Edge already exists')
return
g_man[u-1][v-1] = 1
g_man[v-1][u-1] = 1
dc.add_edge(u,v)
def del_e(u,v):
global g_man, dc
if g_man[u-1][v-1] == 0:
print('Edge doesn\'t exists')
return
g_man[u-1][v-1] = 0
g_man[v-1][u-1] = 0
dc.delete_edge(u,v)
print('\nMANUAL TESTING')
c = 2
n = 0
choice_str = '\nEnter choice\n0 - QUIT\n1 - CREATE new Graph\n2 - add edge\n3 - del edge\n4 - is connected\n'
while c!=0:
c = int(input(choice_str))
if c == 0:
exit()
elif c == 1:
n = int(input('Enter no. of vertices\n'))
create_g(n)
print(f"Created Graph with {n} vertices")
elif c == 2:
u = int(input('Enter vertex 1 of the edge to be added\n'))
v = int(input('Enter vertex 2 of the edge to be added\n'))
if u < 1 or u > n or v < 1 or v > n:
print("Vertices should be between",1,n)
continue
insert_e(u,v)
print(f"Added edge({u}, {v}) to the graph")
elif c == 3:
u = int(input('Enter vertex 1 of the edge to be deleted\n'))
v = int(input('Enter vertex 2 of the edge to be deleted\n'))
if u < 1 or u > n or v < 1 or v > n:
print("Vertices should be between",1,n)
continue
del_e(u,v)
print(f"Deleted edge({u}, {v}) from the graph")
elif c == 4:
u = int(input('Enter vertex 1 of the edge to check\n'))
v = int(input('Enter vertex 2 of the edge to check\n'))
if u < 1 or u > n or v < 1 or v > n:
print("Vertices should be between",1,n)
continue
if dc.is_connected(u,v):
print(f"Vertices {u} and {v} are connected")
else:
print(f"Vertices {u} and {v} are not connected")
|
[
"akasdeeps19@gmail.com"
] |
akasdeeps19@gmail.com
|
7bdaf0803d4586e4c0ad47ce15d47ffa9ad0b31b
|
2ca063506433cbf29fc80acdc292acb66fa472f0
|
/Facial_Expression_Recognition/6_server_demo/static/face_detector_trained/emotion.py
|
cfc59eacb368f31c0b1e920c15a9c99084bfa484
|
[
"Apache-2.0"
] |
permissive
|
zhangdaifu67/Computer_Vision_Project
|
97c4c308cce64b3f5b9bc73f97cf84b919df87d2
|
f8ee6275c5e5490f2263d890f65931975fb7b29e
|
refs/heads/master
| 2020-08-10T15:26:07.356129
| 2019-10-11T01:45:18
| 2019-10-11T01:45:18
| 214,367,945
| 2
| 0
|
Apache-2.0
| 2019-10-11T07:09:02
| 2019-10-11T07:09:00
| null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
from flask import Flask, request
from flask import render_template
import time
from expression_demo import expression_predict # 表情预测项目
system_path = "/home/leong/emotion/"
app = Flask(__name__) # 创建一个 Flask 实例,使用单一模块,应该使用 __name__
@app.route('/')
def hello(imgPath=None):
return render_template('index.html', imgPath="/static/image/logo.jpg")
@app.route('/upload', methods=['POST'])
def upload(imgPath=None, result="None"):
"""
:param imgPath: 上传的图片会保存在服务器里
:param result: 预测的结果
:return:
"""
file = request.files['file']
fileName = file.filename
filePath = system_path+"static/image/"+fileName #"/tmp/flask_img_predict/static/image/" + fileName # 图片路径
# print(filePath)
if file:
file.save(filePath)
result = expression_predict(filePath)
if result is None:
result = "could not found your beauty face"
return render_template('index.html', imgPath="/static/image/"+fileName, result=result)
else:
return render_template('index.html', imgPath="/static/image/logo.jpg")
if __name__ == '__main__':
# 使你的服务器公开可用,WIN+R-> cmd -> ipconfig/all 可以看见主机名:在任意电脑输入 主机名:5000 即可看到效果
app.run(host="0.0.0.0", debug=True) #
|
[
"944830401@qq.com"
] |
944830401@qq.com
|
b9642de93661865bab78a7709c913727dd0be712
|
334fc2f741c1194ec5a0faa76f38155e153cd3bb
|
/examples/fourFn_test.py
|
0fb8c60225634f51518569842c031fc9d130ab2d
|
[
"MIT"
] |
permissive
|
klahnakoski/mo-parsing
|
af6a82040f489da2d56ba92173769c19b2f4e716
|
f4d2dc8e4ff1b1269f94585530192cb72bb6a732
|
refs/heads/master
| 2023-08-30T17:00:04.505856
| 2023-08-22T03:10:18
| 2023-08-22T03:10:18
| 241,972,972
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,679
|
py
|
# fourFn.py
#
# Demonstration of the mo_parsing module, implementing a simple 4-function expression parser,
# with support for scientific notation, and symbols for e and pi.
# Extended to add exponentiation and simple built-in functions.
# Extended test cases, simplified pushFirst method.
# Removed unnecessary expr.suppress() call (thanks Nathaniel Peterson!), and added Group
# Changed fnumber to use a Regex, which is now the preferred method
# Reformatted to latest pypyparsing features, support multiple and variable args to functions
#
# Copyright 2003-2019 by Paul McGuire
#
import math
from examples.fourFn import exprStack, bnf, evaluate_stack
from mo_parsing.utils import Log
def test(s, expected):
try:
exprStack[:] = []
bnf.parse_string(s, parse_all=True)
val = evaluate_stack(exprStack[:])
if val != expected:
Log.error("wrong")
except Exception:
if expected:
Log.error("wrong")
test("9", 9)
test("-9", -9)
test("--9", 9)
test("-E", -math.e)
test("9 + 3 + 6", 9 + 3 + 6)
test("9 + 3 / 11", 9 + 3.0 / 11)
test("(9 + 3)", (9 + 3))
test("(9+3) / 11", (9 + 3.0) / 11)
test("9 - 12 - 6", 9 - 12 - 6)
test("9 - (12 - 6)", 9 - (12 - 6))
test("2*3.14159", 2 * 3.14159)
test("3.1415926535*3.1415926535 / 10", 3.1415926535 * 3.1415926535 / 10)
test("PI * PI / 10", math.pi * math.pi / 10)
test("PI*PI/10", math.pi * math.pi / 10)
test("PI^2", math.pi ** 2)
test("round(PI^2)", round(math.pi ** 2))
test("6.02E23 * 8.048", 6.02e23 * 8.048)
test("e / 3", math.e / 3)
test("sin(PI/2)", math.sin(math.pi / 2))
test("10+sin(PI/4)^2", 10 + math.sin(math.pi / 4) ** 2)
test("trunc(E)", int(math.e))
test("trunc(-E)", int(-math.e))
test("round(E)", round(math.e))
test("round(-E)", round(-math.e))
test("E^PI", math.e ** math.pi)
test("exp(0)", 1)
test("exp(1)", math.e)
test("2^3^2", 2 ** 3 ** 2)
test("(2^3)^2", (2 ** 3) ** 2)
test("2^3+2", 2 ** 3 + 2)
test("2^3+5", 2 ** 3 + 5)
test("2^9", 2 ** 9)
test("sgn(-2)", -1)
test("sgn(0)", 0)
test("sgn(0.1)", 1)
test("foo(0.1)", None)
test("round(E, 3)", round(math.e, 3))
test("round(PI^2, 3)", round(math.pi ** 2, 3))
test("sgn(cos(PI/4))", 1)
test("sgn(cos(PI/2))", 0)
test("sgn(cos(PI*3/4))", -1)
test("+(sgn(cos(PI/4)))", 1)
test("-(sgn(cos(PI/4)))", -1)
"""
Test output:
>python fourFn.py
9 = 9 ['9'] => ['9']
-9 = -9 ['-', '9'] => ['9', 'unary -']
--9 = 9 ['-', '-', '9'] => ['9', 'unary -', 'unary -']
-E = -2.718281828459045 ['-', 'E'] => ['E', 'unary -']
9 + 3 + 6 = 18 ['9', '+', '3', '+', '6'] => ['9', '3', '+', '6', '+']
9 + 3 / 11 = 9.272727272727273 ['9', '+', '3', '/', '11'] => ['9', '3', '11', '/', '+']
(9 + 3) = 12 [['9', '+', '3']] => ['9', '3', '+']
(9+3) / 11 = 1.0909090909090908 [['9', '+', '3'], '/', '11'] => ['9', '3', '+', '11', '/']
9 - 12 - 6 = -9 ['9', '-', '12', '-', '6'] => ['9', '12', '-', '6', '-']
9 - (12 - 6) = 3 ['9', '-', ['12', '-', '6']] => ['9', '12', '6', '-', '-']
2*3.14159 = 6.28318 ['2', '*', '3.14159'] => ['2', '3.14159', '*']
3.1415926535*3.1415926535 / 10 = 0.9869604400525172 ['3.1415926535', '*', '3.1415926535', '/', '10'] => ['3.1415926535', '3.1415926535', '*', '10', '/']
PI * PI / 10 = 0.9869604401089358 ['PI', '*', 'PI', '/', '10'] => ['PI', 'PI', '*', '10', '/']
PI*PI/10 = 0.9869604401089358 ['PI', '*', 'PI', '/', '10'] => ['PI', 'PI', '*', '10', '/']
PI^2 = 9.869604401089358 ['PI', '^', '2'] => ['PI', '2', '^']
round(PI^2) = 10 [('round', 1), [['PI', '^', '2']]] => ['PI', '2', '^', ('round', 1)]
6.02E23 * 8.048 = 4.844896e+24 ['6.02E23', '*', '8.048'] => ['6.02E23', '8.048', '*']
e / 3 = 0.9060939428196817 ['E', '/', '3'] => ['E', '3', '/']
sin(PI/2) = 1.0 [('sin', 1), [['PI', '/', '2']]] => ['PI', '2', '/', ('sin', 1)]
10+sin(PI/4)^2 = 10.5 ['10', '+', ('sin', 1), [['PI', '/', '4']], '^', '2'] => ['10', 'PI', '4', '/', ('sin', 1), '2', '^', '+']
trunc(E) = 2 [('trunc', 1), [['E']]] => ['E', ('trunc', 1)]
trunc(-E) = -2 [('trunc', 1), [['-', 'E']]] => ['E', 'unary -', ('trunc', 1)]
round(E) = 3 [('round', 1), [['E']]] => ['E', ('round', 1)]
round(-E) = -3 [('round', 1), [['-', 'E']]] => ['E', 'unary -', ('round', 1)]
E^PI = 23.140692632779263 ['E', '^', 'PI'] => ['E', 'PI', '^']
exp(0) = 1.0 [('exp', 1), [['0']]] => ['0', ('exp', 1)]
exp(1) = 2.718281828459045 [('exp', 1), [['1']]] => ['1', ('exp', 1)]
2^3^2 = 512 ['2', '^', '3', '^', '2'] => ['2', '3', '2', '^', '^']
(2^3)^2 = 64 [['2', '^', '3'], '^', '2'] => ['2', '3', '^', '2', '^']
2^3+2 = 10 ['2', '^', '3', '+', '2'] => ['2', '3', '^', '2', '+']
2^3+5 = 13 ['2', '^', '3', '+', '5'] => ['2', '3', '^', '5', '+']
2^9 = 512 ['2', '^', '9'] => ['2', '9', '^']
sgn(-2) = -1 [('sgn', 1), [['-', '2']]] => ['2', 'unary -', ('sgn', 1)]
sgn(0) = 0 [('sgn', 1), [['0']]] => ['0', ('sgn', 1)]
sgn(0.1) = 1 [('sgn', 1), [['0.1']]] => ['0.1', ('sgn', 1)]
foo(0.1) failed eval: invalid identifier 'foo' ['0.1', ('foo', 1)]
round(E, 3) = 2.718 [('round', 2), [['E'], ['3']]] => ['E', '3', ('round', 2)]
round(PI^2, 3) = 9.87 [('round', 2), [['PI', '^', '2'], ['3']]] => ['PI', '2', '^', '3', ('round', 2)]
sgn(cos(PI/4)) = 1 [('sgn', 1), [[('cos', 1), [['PI', '/', '4']]]]] => ['PI', '4', '/', ('cos', 1), ('sgn', 1)]
sgn(cos(PI/2)) = 0 [('sgn', 1), [[('cos', 1), [['PI', '/', '2']]]]] => ['PI', '2', '/', ('cos', 1), ('sgn', 1)]
sgn(cos(PI*3/4)) = -1 [('sgn', 1), [[('cos', 1), [['PI', '*', '3', '/', '4']]]]] => ['PI', '3', '*', '4', '/', ('cos', 1), ('sgn', 1)]
+(sgn(cos(PI/4))) = 1 ['+', [('sgn', 1), [[('cos', 1), [['PI', '/', '4']]]]]] => ['PI', '4', '/', ('cos', 1), ('sgn', 1)]
-(sgn(cos(PI/4))) = -1 ['-', [('sgn', 1), [[('cos', 1), [['PI', '/', '4']]]]]] => ['PI', '4', '/', ('cos', 1), ('sgn', 1), 'unary -']
"""
|
[
"kyle@lahnakoski.com"
] |
kyle@lahnakoski.com
|
00054f224feac895bdeb59caf0cd9aa1a4ec7ba7
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r8/Gen/DecFiles/options/13102401.py
|
51ba130c83268a3466ef39a7a7bdf749d0a89dca
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/13102401.py generated: Fri, 27 Mar 2015 15:48:14
#
# Event Type: 13102401
#
# ASCII decay Descriptor: [B_s0 -> rho+ K-]cc
#
from Configurables import Generation
Generation().EventType = 13102401
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bs_rho+K-=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 531,-531 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 531
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 531,-531 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_531.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 13102401
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
24b7c56a4431a04a5a5bd4b5942dda7c680b3846
|
2de05b2984d04b12713bb805b370009b4d8c8eb0
|
/venv/Lib/site-packages/spatialmath/geom3d.py
|
91a5a99732ee3ef2832b9b9dd7741491475e153b
|
[] |
no_license
|
prasun-biswas/robot2link
|
5a759fe1750729d3b85b8bae3dd2434031264dc5
|
15a90bc18c61fe964c103dcf8b0a3700ee8aa65f
|
refs/heads/master
| 2023-08-16T14:25:14.207578
| 2021-10-14T13:09:57
| 2021-10-14T13:09:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39,003
|
py
|
# Part of Spatial Math Toolbox for Python
# Copyright (c) 2000 Peter Corke
# MIT Licence, see details in top-level file: LICENCE
import numpy as np
import math
from collections import namedtuple
import matplotlib.pyplot as plt
import spatialmath.base as base
from spatialmath import SE3
from spatialmath.baseposelist import BasePoseList
_eps = np.finfo(np.float64).eps
# ======================================================================== #
class Plane3:
r"""
Create a plane object from linear coefficients
:param c: Plane coefficients
:type c: 4-element array_like
:return: a Plane object
:rtype: Plane
Planes are represented by the 4-vector :math:`[a, b, c, d]` which describes
the plane :math:`\pi: ax + by + cz + d=0`.
"""
def __init__(self, c):
self.plane = base.getvector(c, 4)
# point and normal
@classmethod
def PN(cls, p, n):
"""
Create a plane object from point and normal
:param p: Point in the plane
:type p: 3-element array_like
:param n: Normal to the plane
:type n: 3-element array_like
:return: a Plane object
:rtype: Plane
"""
n = base.getvector(n, 3) # normal to the plane
p = base.getvector(p, 3) # point on the plane
return cls(np.r_[n, -np.dot(n, p)])
# point and normal
@classmethod
def P3(cls, p):
"""
Create a plane object from three points
:param p: Three points in the plane
:type p: numpy.ndarray, shape=(3,3)
:return: a Plane object
:rtype: Plane
"""
p = base.ismatrix(p, (3,3))
v1 = p[:,0]
v2 = p[:,1]
v3 = p[:,2]
# compute a normal
n = np.cross(v2-v1, v3-v1)
return cls(n, v1)
# line and point
# 3 points
@property
def n(self):
r"""
Normal to the plane
:return: Normal to the plane
:rtype: 3-element array_like
For a plane :math:`\pi: ax + by + cz + d=0` this is the vector
:math:`[a,b,c]`.
"""
# normal
return self.plane[:3]
@property
def d(self):
r"""
Plane offset
:return: Offset of the plane
:rtype: float
For a plane :math:`\pi: ax + by + cz + d=0` this is the scalar
:math:`d`.
"""
return self.plane[3]
def contains(self, p, tol=10*_eps):
"""
:param p: A 3D point
:type p: 3-element array_like
:param tol: Tolerance, defaults to 10*_eps
:type tol: float, optional
:return: if the point is in the plane
:rtype: bool
"""
return abs(np.dot(self.n, p) - self.d) < tol
def plot(self, bounds=None, ax=None, **kwargs):
ax = base.axes_logic(ax, 3)
if bounds is None:
bounds = np.r_[ax.get_xlim(), ax.get_ylim(), ax.get_zlim()]
# X, Y = np.meshgrid(bounds[0: 2], bounds[2: 4])
# Z = -(X * self.plane[0] + Y * self.plane[1] + self.plane[3]) / self.plane[2]
X, Y = np.meshgrid(np.linspace(bounds[0], bounds[1], 50),
np.linspace(bounds[2], bounds[3], 50))
Z = -(X * self.plane[0] + Y * self.plane[1] + self.plane[3]) / self.plane[2]
Z[Z < bounds[4]] = np.nan
Z[Z > bounds[5]] = np.nan
ax.plot_surface(X, Y, Z, **kwargs)
def __str__(self):
"""
:return: String representation of plane
:rtype: str
"""
return str(self.plane)
# ======================================================================== #
class Line3(BasePoseList):
"""
Plucker coordinate class
Concrete class to represent a 3D line using Plucker coordinates.
Methods:
Plucker Contructor from points
Plucker.planes Constructor from planes
Plucker.pointdir Constructor from point and direction
Information and test methods::
closest closest point on line
commonperp common perpendicular for two lines
contains test if point is on line
distance minimum distance between two lines
intersects intersection point for two lines
intersect_plane intersection points with a plane
intersect_volume intersection points with a volume
pp principal point
ppd principal point distance from origin
point generate point on line
Conversion methods::
char convert to human readable string
double convert to 6-vector
skew convert to 4x4 skew symmetric matrix
Display and print methods::
display display in human readable form
plot plot line
Operators:
* multiply Plucker matrix by a general matrix
| test if lines are parallel
^ test if lines intersect
== test if two lines are equivalent
~= test if lines are not equivalent
Notes:
- This is reference (handle) class object
- Plucker objects can be used in vectors and arrays
References:
- Ken Shoemake, "Ray Tracing News", Volume 11, Number 1
http://www.realtimerendering.com/resources/RTNews/html/rtnv11n1.html#art3
- Matt Mason lecture notes http://www.cs.cmu.edu/afs/cs/academic/class/16741-s07/www/lectures/lecture9.pdf
- Robotics, Vision & Control: Second Edition, P. Corke, Springer 2016; p596-7.
Implementation notes:
- The internal representation is a 6-vector [v, w] where v (moment), w (direction).
- There is a huge variety of notation used across the literature, as well as the ordering
of the direction and moment components in the 6-vector.
Copyright (C) 1993-2019 Peter I. Corke
"""
# w # direction vector
# v # moment vector (normal of plane containing line and origin)
def __init__(self, v=None, w=None):
"""
Create a Plucker 3D line object
:param v: Plucker vector, Plucker object, Plucker moment
:type v: 6-element array_like, Plucker instance, 3-element array_like
:param w: Plucker direction, optional
:type w: 3-element array_like, optional
:raises ValueError: bad arguments
:return: Plucker line
:rtype: Plucker
- ``L = Plucker(X)`` creates a Plucker object from the Plucker coordinate vector
``X`` = [V,W] where V (3-vector) is the moment and W (3-vector) is the line direction.
- ``L = Plucker(L)`` creates a copy of the Plucker object ``L``.
- ``L = Plucker(V, W)`` creates a Plucker object from moment ``V`` (3-vector) and
line direction ``W`` (3-vector).
Notes:
- The Plucker object inherits from ``collections.UserList`` and has list-like
behaviours.
- A single Plucker object contains a 1D array of Plucker coordinates.
- The elements of the array are guaranteed to be Plucker coordinates.
- The number of elements is given by ``len(L)``
- The elements can be accessed using index and slice notation, eg. ``L[1]`` or
``L[2:3]``
- The Plucker instance can be used as an iterator in a for loop or list comprehension.
- Some methods support operations on the internal list.
:seealso: Plucker.PQ, Plucker.Planes, Plucker.PointDir
"""
super().__init__() # enable list powers
if w is None:
# zero or one arguments passed
if super().arghandler(v, convertfrom=(SE3,)):
return
else:
# additional arguments
assert base.isvector(v, 3) and base.isvector(w, 3), 'expecting two 3-vectors'
self.data = [np.r_[v, w]]
# needed to allow __rmul__ to work if left multiplied by ndarray
#self.__array_priority__ = 100
@property
def shape(self):
return (6,)
@staticmethod
def _identity():
return np.zeros((6,))
@staticmethod
def isvalid(x, check=False):
return x.shape == (6,)
@classmethod
def TwoPoints(cls, P=None, Q=None):
"""
Create Plucker line object from two 3D points
:param P: First 3D point
:type P: 3-element array_like
:param Q: Second 3D point
:type Q: 3-element array_like
:return: Plucker line
:rtype: Plucker
``L = Plucker(P, Q)`` create a Plucker object that represents
the line joining the 3D points ``P`` (3-vector) and ``Q`` (3-vector). The direction
is from ``Q`` to ``P``.
:seealso: Plucker, Plucker.Planes, Plucker.PointDir
"""
P = base.getvector(P, 3)
Q = base.getvector(Q, 3)
# compute direction and moment
w = P - Q
v = np.cross(w, P)
return cls(np.r_[v, w])
@classmethod
def TwoPlanes(cls, pi1, pi2):
r"""
Create Plucker line from two planes
:param pi1: First plane
:type pi1: 4-element array_like, or Plane
:param pi2: Second plane
:type pi2: 4-element array_like, or Plane
:return: Plucker line
:rtype: Plucker
``L = Plucker.planes(PI1, PI2)`` is a Plucker object that represents
the line formed by the intersection of two planes ``PI1`` and ``PI2``.
Planes are represented by the 4-vector :math:`[a, b, c, d]` which describes
the plane :math:`\pi: ax + by + cz + d=0`.
:seealso: Plucker, Plucker.PQ, Plucker.PointDir
"""
# TODO inefficient to create 2 temporary planes
if not isinstance(pi1, Plane3):
pi1 = Plane3(base.getvector(pi1, 4))
if not isinstance(pi2, Plane3):
pi2 = Plane3(base.getvector(pi2, 4))
w = np.cross(pi1.n, pi2.n)
v = pi2.d * pi1.n - pi1.d * pi2.n
return cls(np.r_[v, w])
@classmethod
def PointDir(cls, point, dir):
"""
Create Plucker line from point and direction
:param point: A 3D point
:type point: 3-element array_like
:param dir: Direction vector
:type dir: 3-element array_like
:return: Plucker line
:rtype: Plucker
``L = Plucker.pointdir(P, W)`` is a Plucker object that represents the
line containing the point ``P`` and parallel to the direction vector ``W``.
:seealso: Plucker, Plucker.Planes, Plucker.PQ
"""
p = base.getvector(point, 3)
w = base.getvector(dir, 3)
v = np.cross(w, p)
return cls(np.r_[v, w])
def append(self, x):
"""
:param x: Plucker object
:type x: Plucker
:raises ValueError: Attempt to append a non Plucker object
:return: Plucker object with new Plucker line appended
:rtype: Plucker
"""
#print('in append method')
if not type(self) == type(x):
raise ValueError("can pnly append Plucker object")
if len(x) > 1:
raise ValueError("cant append a Plucker sequence - use extend")
super().append(x.A)
@property
def A(self):
# get the underlying numpy array
if len(self.data) == 1:
return self.data[0]
else:
return self.data
def __getitem__(self, i):
# print('getitem', i, 'class', self.__class__)
return self.__class__(self.data[i])
@property
def v(self):
"""
Moment vector
:return: the moment vector
:rtype: numpy.ndarray, shape=(3,)
"""
return self.data[0][0:3]
@property
def w(self):
"""
Direction vector
:return: the direction vector
:rtype: numpy.ndarray, shape=(3,)
:seealso: Plucker.uw
"""
return self.data[0][3:6]
@property
def uw(self):
"""
Line direction as a unit vector
:return: Line direction
:rtype: numpy.ndarray, shape=(3,)
``line.uw`` is a unit-vector parallel to the line.
"""
return base.unitvec(self.w)
@property
def vec(self):
"""
Line as a Plucker coordinate vector
:return: Coordinate vector
:rtype: numpy.ndarray, shape=(6,)
``line.vec`` is the Plucker coordinate vector ``X`` = [V,W] where V (3-vector)
is the moment and W (3-vector) is the line direction.
"""
return np.r_[self.v, self.w]
@property
def skew(self):
r"""
Line as a Plucker skew-matrix
:return: Skew-symmetric matrix form of Plucker coordinates
:rtype: numpy.ndarray, shape=(4,4)
``M = line.skew()`` is the Plucker matrix, a 4x4 skew-symmetric matrix
representation of the line.
.. math::
\sk{L} = \begin{bmatrix} 0 & v_z & -v_y & \omega_x \\
-v_z & 0 & v_x & \omega_y \\
v_y & -v_x & 0 & \omega_z \\
-\omega_x & -\omega_y & -\omega_z & 0 \end{bmatrix}
.. note::
- For two homogeneous points P and Q on the line, :math:`PQ^T-QP^T` is
also skew symmetric.
- The projection of Plucker line by a perspective camera is a
homogeneous line (3x1) given by :math:`\vee C M C^T` where :math:`C
\in \mathbf{R}^{3 \times 4}` is the camera matrix.
"""
v = self.v
w = self.w
# the following matrix is at odds with H&Z pg. 72
return np.array([
[ 0, v[2], -v[1], w[0]],
[-v[2], 0 , v[0], w[1]],
[ v[1], -v[0], 0, w[2]],
[-w[0], -w[1], -w[2], 0 ]
])
@property
def pp(self):
"""
Principal point of the line
``line.pp`` is the point on the line that is closest to the origin.
Notes:
- Same as Plucker.point(0)
:seealso: Plucker.ppd, Plucker.point
"""
return np.cross(self.v, self.w) / np.dot(self.w, self.w)
@property
def ppd(self):
"""
Distance from principal point to the origin
:return: Distance from principal point to the origin
:rtype: float
``line.ppd`` is the distance from the principal point to the origin.
This is the smallest distance of any point on the line
to the origin.
:seealso: Plucker.pp
"""
return math.sqrt(np.dot(self.v, self.v) / np.dot(self.w, self.w) )
def point(self, lam):
r"""
Generate point on line
:param lam: Scalar distance from principal point
:type lam: float
:return: Distance from principal point to the origin
:rtype: float
``line.point(LAMBDA)`` is a point on the line, where ``LAMBDA`` is the parametric
distance along the line from the principal point of the line such
that :math:`P = P_p + \lambda \hat{d}` and :math:`\hat{d}` is the line
direction given by ``line.uw``.
:seealso: Plucker.pp, Plucker.closest, Plucker.uw
"""
lam = base.getvector(lam, out='row')
return self.pp.reshape((3,1)) + self.uw.reshape((3,1)) * lam
def lam(self, point):
return np.dot( point.flatten() - self.pp, self.uw)
# ------------------------------------------------------------------------- #
# TESTS ON PLUCKER OBJECTS
# ------------------------------------------------------------------------- #
def contains(self, x, tol=50*_eps):
"""
Test if points are on the line
:param x: 3D point
:type x: 3-element array_like, or numpy.ndarray, shape=(3,N)
:param tol: Tolerance, defaults to 50*_eps
:type tol: float, optional
:raises ValueError: Bad argument
:return: Whether point is on the line
:rtype: bool or numpy.ndarray(N) of bool
``line.contains(X)`` is true if the point ``X`` lies on the line defined by
the Plucker object self.
If ``X`` is an array with 3 rows, the test is performed on every column and
an array of booleans is returned.
"""
if base.isvector(x, 3):
x = base.getvector(x)
return np.linalg.norm( np.cross(x - self.pp, self.w) ) < tol
elif base.ismatrix(x, (3,None)):
return [np.linalg.norm(np.cross(_ - self.pp, self.w)) < tol for _ in x.T]
else:
raise ValueError('bad argument')
def __eq__(self, l2): # pylint: disable=no-self-argument
"""
Test if two lines are equivalent
:param l1: First line
:type l1: Plucker
:param l2: Second line
:type l2: Plucker
:return: Plucker
:return: line equivalence
:rtype: bool
``L1 == L2`` is true if the Plucker objects describe the same line in
space. Note that because of the over parameterization, lines can be
equivalent even if their coordinate vectors are different.
"""
l1 = self
return abs( 1 - np.dot(base.unitvec(l1.vec), base.unitvec(l2.vec))) < 10*_eps
def __ne__(self, l2): # pylint: disable=no-self-argument
"""
Test if two lines are not equivalent
:param l1: First line
:type l1: Plucker
:param l2: Second line
:type l2: Plucker
:return: line inequivalence
:rtype: bool
``L1 != L2`` is true if the Plucker objects describe different lines in
space. Note that because of the over parameterization, lines can be
equivalent even if their coordinate vectors are different.
"""
l1 = self
return not l1.__eq__(l2)
def isparallel(self, l2, tol=10*_eps): # pylint: disable=no-self-argument
"""
Test if lines are parallel
:param l1: First line
:type l1: Plucker
:param l2: Second line
:type l2: Plucker
:return: lines are parallel
:rtype: bool
``l1.isparallel(l2)`` is true if the two lines are parallel.
``l1 | l2`` as above but in binary operator form
:seealso: Plucker.or, Plucker.intersects
"""
l1 = self
return np.linalg.norm(np.cross(l1.w, l2.w) ) < tol
def __or__(self, l2): # pylint: disable=no-self-argument
"""
Overloaded ``|`` operator tests for parallelism
:param l1: First line
:type l1: Plucker
:param l2: Second line
:type l2: Plucker
:return: lines are parallel
:rtype: bool
``l1 | l2`` is an operator which is true if the two lines are parallel.
.. note:: The ``|`` operator has low precendence.
:seealso: Plucker.isparallel, Plucker.__xor__
"""
l1 = self
return l1.isparallel(l2)
def __xor__(self, l2): # pylint: disable=no-self-argument
"""
Overloaded ``^`` operator tests for intersection
:param l1: First line
:type l1: Plucker
:param l2: Second line
:type l2: Plucker
:return: lines intersect
:rtype: bool
``l1 ^ l2`` is an operator which is true if the two lines intersect at a point.
.. note::
- The ``^`` operator has low precendence.
- Is ``False`` if the lines are equivalent since they would intersect at
an infinite number of points.
:seealso: Plucker.intersects, Plucker.parallel
"""
l1 = self
return not l1.isparallel(l2) and (abs(l1 * l2) < 10*_eps )
# ------------------------------------------------------------------------- #
# PLUCKER LINE DISTANCE AND INTERSECTION
# ------------------------------------------------------------------------- #
def intersects(self, l2): # pylint: disable=no-self-argument
"""
Intersection point of two lines
:param l1: First line
:type l1: Plucker
:param l2: Second line
:type l2: Plucker
:return: 3D intersection point
:rtype: numpy.ndarray, shape=(3,) or None
``l1.intersects(l2)`` is the point of intersection of the two lines, or
``None`` if the lines do not intersect or are equivalent.
:seealso: Plucker.commonperp, Plucker.eq, Plucker.__xor__
"""
l1 = self
if l1^l2:
# lines do intersect
return -(np.dot(l1.v, l2.w) * np.eye(3, 3) + \
l1.w.reshape((3,1)) @ l2.v.reshape((1,3)) - \
l2.w.reshape((3,1)) @ l1.v.reshape((1,3))) * base.unitvec(np.cross(l1.w, l2.w))
else:
# lines don't intersect
return None
def distance(self, l2): # pylint: disable=no-self-argument
"""
Minimum distance between lines
:param l1: First line
:type l1: Plucker
:param l2: Second line
:type l2: Plucker
:return: Closest distance
:rtype: float
``l1.distance(l2) is the minimum distance between two lines.
Notes:
- Works for parallel, skew and intersecting lines.
"""
l1 = self
if l1 | l2:
# lines are parallel
l = np.cross(l1.w, l1.v - l2.v * np.dot(l1.w, l2.w) / dot(l2.w, l2.w)) / np.linalg.norm(l1.w)
else:
# lines are not parallel
if abs(l1 * l2) < 10*_eps:
# lines intersect at a point
l = 0
else:
# lines don't intersect, find closest distance
l = abs(l1 * l2) / np.linalg.norm(np.cross(l1.w, l2.w))**2
return l
def closest_to_line(self, line):
"""
Closest point between two lines
:param line: second line
:type line: Plucker
:return: nearest points and distance between lines at those points
:rtype: ndarray(3,N), ndarray(N)
Finds the point on the first line closest to the second line, as well
as the minimum distance between the lines.
For two sets of lines, of equal size, return an array of closest points
and distances.
Example:
.. runblock:: pycon
>>> from spatialmath import Plucker
>>> line1 = Plucker.TwoPoints([1, 1, 0], [1, 1, 1])
>>> line2 = Plucker.TwoPoints([0, 0, 0], [2, 3, 5])
>>> line1.closest_to_line(line2)
:reference: `Plucker coordinates <https://web.cs.iastate.edu/~cs577/handouts/plucker-coordinates.pdf>`_
"""
# point on line closest to another line
# https://web.cs.iastate.edu/~cs577/handouts/plucker-coordinates.pdf
# but (20) (21) is the negative of correct answer
p = []
dist = []
for line1, line2 in zip(self, line):
v1 = line1.v
w1 = line1.w
v2 = line2.v
w2 = line2.w
with np.errstate(divide='ignore', invalid='ignore'):
p1 = (np.cross(v1, np.cross(w2, np.cross(w1, w2))) - np.dot(v2, np.cross(w1, w2)) * w1) \
/ np.sum(np.cross(w1, w2) ** 2)
p2 = (np.cross(-v2, np.cross(w1, np.cross(w1, w2))) + np.dot(v1, np.cross(w1, w2)) * w2) \
/ np.sum(np.cross(w1, w2) ** 2)
p.append(p1)
dist.append(np.linalg.norm(p1 - p2))
if len(p) == 1:
return p[0], dist[0]
else:
return np.array(p).T, np.array(dist)
def closest_to_point(self, x):
"""
Point on line closest to given point
:param line: A line
:type l1: Plucker
:param l2: An arbitrary 3D point
:type l2: 3-element array_like
:return: Point on the line and distance to line
:rtype: ndarray(3), float
Find the point on the line closest to ``x`` as well as the distance
at that closest point.
Example:
.. runblock:: pycon
>>> from spatialmath import Plucker
>>> line1 = Plucker.TwoPoints([0, 0, 0], [2, 2, 3])
>>> line1.closest_to_point([1, 1, 1])
:seealso: Plucker.point
"""
# http://www.ahinson.com/algorithms_general/Sections/Geometry/PluckerLine.pdf
# has different equation for moment, the negative
x = base.getvector(x, 3)
lam = np.dot(x - self.pp, self.uw)
p = self.point(lam).flatten() # is the closest point on the line
d = np.linalg.norm( x - p)
return p, d
def commonperp(self, l2): # pylint: disable=no-self-argument
"""
Common perpendicular to two lines
:param l1: First line
:type l1: Plucker
:param l2: Second line
:type l2: Plucker
:return: Perpendicular line
:rtype: Plucker or None
``l1.commonperp(l2)`` is the common perpendicular line between the two lines.
Returns ``None`` if the lines are parallel.
:seealso: Plucker.intersect
"""
l1 = self
if l1 | l2:
# no common perpendicular if lines are parallel
return None
else:
# lines are skew or intersecting
w = np.cross(l1.w, l2.w)
v = np.cross(l1.v, l2.w) - np.cross(l2.v, l1.w) + \
(l1 * l2) * np.dot(l1.w, l2.w) * base.unitvec(np.cross(l1.w, l2.w))
return self.__class__(v, w)
def __mul__(self, right): # pylint: disable=no-self-argument
r"""
Reciprocal product
:param left: Left operand
:type left: Plucker
:param right: Right operand
:type right: Plucker
:return: reciprocal product
:rtype: float
``left * right`` is the scalar reciprocal product :math:`\hat{w}_L \dot m_R + \hat{w}_R \dot m_R`.
Notes:
- Multiplication or composition of Plucker lines is not defined.
- Pre-multiplication by an SE3 object is supported, see ``__rmul__``.
:seealso: Plucker.__rmul__
"""
left = self
if isinstance(right, Line3):
# reciprocal product
return np.dot(left.uw, right.v) + np.dot(right.uw, left.v)
else:
raise ValueError('bad arguments')
def __rmul__(self, left): # pylint: disable=no-self-argument
"""
Line transformation
:param left: Rigid-body transform
:type left: SE3
:param right: Right operand
:type right: Plucker
:return: transformed line
:rtype: Plucker
``T * line`` is the line transformed by the rigid body transformation ``T``.
:seealso: Plucker.__mul__
"""
right = self
if isinstance(left, SE3):
A = np.r_[ np.c_[left.R, base.skew(-left.t) @ left.R],
np.c_[np.zeros((3,3)), left.R]
]
return self.__class__( A @ right.vec) # premultiply by SE3
else:
raise ValueError('bad arguments')
# ------------------------------------------------------------------------- #
# PLUCKER LINE DISTANCE AND INTERSECTION
# ------------------------------------------------------------------------- #
def intersect_plane(self, plane): # pylint: disable=no-self-argument
r"""
Line intersection with a plane
:param line: A line
:type line: Plucker
:param plane: A plane
:type plane: 4-element array_like or Plane
:return: Intersection point
:rtype: collections.namedtuple
- ``line.intersect_plane(plane).p`` is the point where the line
intersects the plane, or None if no intersection.
- ``line.intersect_plane(plane).lam`` is the `lambda` value for the point on the line
that intersects the plane.
The plane can be specified as:
- a 4-vector :math:`[a, b, c, d]` which describes the plane :math:`\pi: ax + by + cz + d=0`.
- a ``Plane`` object
The return value is a named tuple with elements:
- ``.p`` for the point on the line as a numpy.ndarray, shape=(3,)
- ``.lam`` the `lambda` value for the point on the line.
See also Plucker.point.
"""
# Line U, V
# Plane N n
# (VxN-nU:U.N)
# Note that this is in homogeneous coordinates.
# intersection of plane (n,p) with the line (v,p)
# returns point and line parameter
if not isinstance(plane, Plane3):
plane = Plane3(base.getvector(plane, 4))
den = np.dot(self.w, plane.n)
if abs(den) > (100*_eps):
# P = -(np.cross(line.v, plane.n) + plane.d * line.w) / den
p = (np.cross(self.v, plane.n) - plane.d * self.w) / den
t = self.lam(p)
return namedtuple('intersect_plane', 'p lam')(p, t)
else:
return None
def intersect_volume(self, bounds):
"""
Line intersection with a volume
:param line: A line
:type line: Plucker
:param bounds: Bounds of an axis-aligned rectangular cuboid
:type plane: 6-element array_like
:return: Intersection point
:rtype: collections.namedtuple
``line.intersect_volume(bounds).p`` is a matrix (3xN) with columns
that indicate where the line intersects the faces of the volume
specified by ``bounds`` = [xmin xmax ymin ymax zmin zmax]. The number of
columns N is either:
- 0, when the line is outside the plot volume or,
- 2 when the line pierces the bounding volume.
``line.intersect_volume(bounds).lam`` is an array of shape=(N,) where
N is as above.
The return value is a named tuple with elements:
- ``.p`` for the points on the line as a numpy.ndarray, shape=(3,N)
- ``.lam`` for the `lambda` values for the intersection points as a
numpy.ndarray, shape=(N,).
See also Plucker.plot, Plucker.point.
"""
intersections = []
# reshape, top row is minimum, bottom row is maximum
bounds23 = bounds.reshape((3, 2))
for face in range(0, 6):
# for each face of the bounding volume
# x=xmin, x=xmax, y=ymin, y=ymax, z=zmin, z=zmax
# planes are:
# 0 normal in x direction, xmin
# 1 normal in x direction, xmax
# 2 normal in y direction, ymin
# 3 normal in y direction, ymax
# 4 normal in z direction, zmin
# 5 normal in z direction, zmax
i = face // 2 # 0, 1, 2
I = np.eye(3,3)
p = [0, 0, 0]
p[i] = bounds[face]
plane = Plane3.PN(n=I[:,i], p=p)
# find where line pierces the plane
try:
p, lam = self.intersect_plane(plane)
except TypeError:
continue # no intersection with this plane
# print('face %d: n=(%f, %f, %f)' % (face, plane.n[0], plane.n[1], plane.n[2]))
# print(' : p=(%f, %f, %f) ' % (p[0], p[1], p[2]))
# print('face', face, ' point ', p, ' plane ', plane)
# print('lamda', lam, self.point(lam))
# find if intersection point is within the cube face
# test x,y,z simultaneously
k = (p >= bounds23[:,0]) & (p <= bounds23[:,1])
k = np.delete(k, i) # remove the boolean corresponding to current face
if all(k):
# if within bounds, add
intersections.append(lam)
# print(' HIT');
# put them in ascending order
intersections.sort()
p = self.point(intersections)
return namedtuple('intersect_volume', 'p lam')(p, intersections)
# ------------------------------------------------------------------------- #
# PLOT AND DISPLAY
# ------------------------------------------------------------------------- #
def plot(self, *pos, bounds=None, axis=None, **kwargs):
"""
Plot a line
:param line: A line
:type line: Plucker
:param bounds: Bounds of an axis-aligned rectangular cuboid as [xmin xmax ymin ymax zmin zmax], optional
:type plane: 6-element array_like
:param **kwargs: Extra arguents passed to `Line2D <https://matplotlib.org/3.2.2/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D>`_
:return: Plotted line
:rtype: Line3D or None
- ``line.plot(bounds)`` adds a line segment to the current axes, and the handle of the line is returned.
The line segment is defined by the intersection of the line and the given rectangular cuboid.
If the line does not intersect the plotting volume None is returned.
- ``line.plot()`` as above but the bounds are taken from the axis limits of the current axes.
The line color or style is specified by:
- a MATLAB-style linestyle like 'k--'
- additional arguments passed to `Line2D <https://matplotlib.org/3.2.2/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D>`_
:seealso: Plucker.intersect_volume
"""
if axis is None:
ax = plt.gca()
else:
ax = axis
if bounds is None:
bounds = np.r_[ax.get_xlim(), ax.get_ylim(), ax.get_zlim()]
else:
bounds = base.getvector(bounds, 6)
ax.set_xlim(bounds[:2])
ax.set_ylim(bounds[2:4])
ax.set_zlim(bounds[4:6])
# print(bounds)
#U = self.Q - self.P;
#line.p = self.P; line.v = unit(U);
lines = []
for line in self:
P, lam = line.intersect_volume(bounds)
if len(lam) > 0:
l = ax.plot(tuple(P[0,:]), tuple(P[1,:]), tuple(P[2,:]), *pos, **kwargs)
lines.append(l)
return lines
def __str__(self):
"""
Convert to a string
:return: String representation of line parameters
:rtype: str
``str(line)`` is a string showing Plucker parameters in a compact single
line format like::
{ 0 0 0; -1 -2 -3}
where the first three numbers are the moment, and the last three are the
direction vector.
"""
return '\n'.join(['{{ {:.5g} {:.5g} {:.5g}; {:.5g} {:.5g} {:.5g}}}'.format(*list(base.removesmall(x.vec))) for x in self])
def __repr__(self):
"""
%Twist.display Display parameters
%
L.display() displays the twist parameters in compact single line format. If L is a
vector of Twist objects displays one line per element.
%
Notes::
- This method is invoked implicitly at the command line when the result
of an expression is a Twist object and the command has no trailing
semicolon.
%
See also Twist.char.
"""
if len(self) == 1:
return "Plucker([{:.5g}, {:.5g}, {:.5g}, {:.5g}, {:.5g}, {:.5g}])".format(*list(self.A))
else:
return "Plucker([\n" + \
',\n'.join([" [{:.5g}, {:.5g}, {:.5g}, {:.5g}, {:.5g}, {:.5g}]".format(*list(tw)) for tw in self.data]) +\
"\n])"
def _repr_pretty_(self, p, cycle):
"""
Pretty string for IPython
:param p: pretty printer handle (ignored)
:param cycle: pretty printer flag (ignored)
Print colorized output when variable is displayed in IPython, ie. on a line by
itself.
Example::
In [1]: x
"""
if len(self) == 1:
p.text(str(self))
else:
for i, x in enumerate(self):
if i > 0:
p.break_()
p.text(f"{i:3d}: {str(x)}")
# function z = side(self1, pl2)
# Plucker.side Plucker side operator
#
# # X = SIDE(P1, P2) is the side operator which is zero whenever
# # the lines P1 and P2 intersect or are parallel.
#
# # See also Plucker.or.
#
# if ~isa(self2, 'Plucker')
# error('SMTB:Plucker:badarg', 'both arguments to | must be Plucker objects');
# end
# L1 = pl1.line(); L2 = pl2.line();
#
# z = L1([1 5 2 6 3 4]) * L2([5 1 6 2 4 3])';
# end
#
# function z = intersect(self1, pl2)
# Plucker.intersect Line intersection
#
# PL1.intersect(self2) is zero if the lines intersect. It is positive if PL2
# passes counterclockwise and negative if PL2 passes clockwise. Defined as
# looking in direction of PL1
#
# ---------->
# o o
# ---------->
# counterclockwise clockwise
#
# z = dot(self1.w, pl1.v) + dot(self2.w, pl2.v);
# end
# Static factory methods for constructors from exotic representations
class Plucker(Line3):
def __init__(self, v=None, w=None):
import warnings
warnings.warn('use Line class instead', DeprecationWarning)
super().__init__(v, w)
if __name__ == '__main__': # pragma: no cover
import pathlib
import os.path
a = Plane3([0.1, -1, -1, 2])
base.plotvol3(5)
a.plot(color='r', alpha=0.3)
plt.show(block=True)
# a = SE3.Exp([2,0,0,0,0,0])
# exec(open(pathlib.Path(__file__).parent.parent.absolute() / "tests" / "test_geom3d.py").read()) # pylint: disable=exec-used
|
[
"Rpblikescodes@gmail.com"
] |
Rpblikescodes@gmail.com
|
55731c532edc27437393021d7c5142c7a0582820
|
d393841cb0a781cfcf11ad934f2b1a06efbca291
|
/factory/tools/analyze_frontends
|
174afc21f875bff660a61d97b33ccda0b3037524
|
[] |
no_license
|
djw8605/glideinWMS
|
50c8d5228a5e5b4a30bd46fa3932ab4caf53446f
|
8c562c63dde2884aeed04599998fa6608ecdee4c
|
refs/heads/master
| 2021-01-16T20:54:53.730437
| 2012-05-16T15:45:26
| 2012-05-16T15:45:26
| 1,834,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,466
|
#!/usr/bin/env python
# analyze_frontends
# Focuses mostly on Client Info
import os, sys, getopt, re
import datetime
import urllib
STARTUP_DIR=sys.path[0]
sys.path.append(os.path.join(STARTUP_DIR,"../../lib"))
sys.path.append(os.path.join(STARTUP_DIR,"lib"))
import xmlParse, analyze
def list_print(frontend,zero_supp,entry_data,sorting,attr_list,sort_attribute,div):
to_be_printed = []
sum2 = 0
for entry_name, entry in entry_data.iteritems():
entry['RunDiff'] = entry['StatusRunning']-entry['ClientGlideTotal']
# avoid division by zero
unmatched_percent = 0
rundiff_percent = 0
if entry['StatusRunning']!=0:
rundiff_percent = float(entry['RunDiff'])/float(entry['StatusRunning'])*100
if entry['ClientGlideTotal']!=0:
unmatched_percent = float(entry['ClientGlideIdle'])/float(entry['ClientGlideTotal'])*100
entry['UM'] = unmatched_percent
entry['RD'] = rundiff_percent
if zero_supp==1:
sum = 0
for a in attr_list:
sum += entry[a]
if sum==0:
continue
sum2 += sum
to_be_printed.append((entry[attr_list[sort_attribute]],
("%-40s %7s %7s %7s | %7s | %7s %7s %7s | %7s %4d%% %4d%%"
% (entry_name.lstrip("entry_"),
analyze.km(float(entry['ClientGlideTotal'])/div),
analyze.km(float(entry['ClientGlideRunning'])/div),
analyze.km(float(entry['ClientGlideIdle'])/div),
analyze.km(float(entry['ReqIdle'])/div),
analyze.km(float(entry['ClientJobsRunning'])/div),
analyze.km(float(entry['ClientJobsRunHere'])/div),
analyze.km(float(entry['ClientJobsIdle'])/div),
analyze.km(float(entry['RunDiff'])/div),
entry['UM'],
entry['RD']))))
columns = "%-40s %7s %7s %7s | %7s | %7s %7s %7s | %7s %5s %5s\n" % (
frontend, "Regd","Claimd","Unmtchd","ReqIdle","JobRun","JobHere","JobIdle","RunDiff","%UM","%RD")
if sorting == 1:
if zero_supp==1 and sum2==0:
return
to_be_printed.sort()
to_be_printed.reverse()
to_be_printed.insert(0,(0,columns))
for a in to_be_printed:
print a[1]
print
else:
if zero_supp==1 and sum2==0:
return
to_be_printed.insert(0,(0,columns))
for a in to_be_printed:
print a[1]
print
##########################################################################
def main():
usage="""
USAGE:
-x [#] : interval to do verbose stats (default 24)
--source [ABSPATH or http addr] : factory base (default current directory)
-s [attribute]: sort by attribute
(-s x to see list of choices)
-f [frontend] : filter by a single frontend
(can omit "frontend_" before name)
-z : zero suppression (don't show entries with 0s across all attributes)
-p : show all periods (default off: just show 24 hours)
-m : frontend mode - emphasize frontend data (don't show entry data)
default unit: slots
--ms : frontend mode, using seconds instead of slots
--mh : frontend mode, using hours instead of slots
-h : this usage message
"""
# flags
x = 24
dir = os.getcwd()
sorting = 0
sort_attribute = 0
filter_frontend = 0
frontend_mode = 0
show_all_periods = 0
zero_supp = 0
try:
opts, args = getopt.getopt(sys.argv[1:], 'x:hwf:s:zmp', ['source=','ms','mh'])
except getopt.GetoptError:
print("\n Option not recognized or missing parameters.")
print(" Use -h for usage.\n")
sys.exit(0)
for o, a in opts:
if o == "-x":
x = a
elif o == "--source":
dir = a
elif o in ("-h", "-help"):
print usage
return
elif o == "-s":
if a=='%UM' or a=='UM': a='%unmatched'
if a=='%RD' or a=='RD': a='%rundiff'
sort_attribute = a.lower()
sorting = 1
elif o == "-z":
zero_supp = 1
elif o == "-p":
show_all_periods = 1
elif o == "-m":
frontend_mode = 1
elif o == "--ms":
frontend_mode = 2
elif o == "--mh":
frontend_mode = 3
elif o == "-f":
filter_frontend = a
if 'frontend_' not in filter_frontend:
filter_frontend = 'frontend_' + filter_frontend
attributes = {'ClientGlideTotal':0,'ClientGlideIdle':0,'ClientGlideRunning':0,
'ClientJobsRunning':0,'ClientJobsRunHere':0,'ClientJobsIdle':0,
'ReqIdle':0,'StatusRunning':0,'StatusHeld':0}
sort_attributes = ['registered', 'claimed', 'unmatched',
'jobsrunning','jobsrunhere','jobsidle',
'reqidle','rundiff',
'%unmatched','%rundiff']
attr_list = ['ClientGlideTotal','ClientGlideRunning','ClientGlideIdle',
'ClientJobsRunning','ClientJobsRunHere','ClientJobsIdle',
'ReqIdle','RunDiff','UM','RD']
if sorting !=0:
if sort_attribute not in sort_attributes:
print("%s not in list of attributes. Choices are:\n" % sort_attribute)
for a in sort_attributes:
print a
print
return
sort_attribute = sort_attributes.index(sort_attribute)
data = {} #sorted data
rrd_data = {}
rrd = "rrd_Status_Attributes.xml"
if "http" in dir:
file_dir = os.path.join(dir, rrd)
else:
file_dir = os.path.join(dir, "monitor", rrd)
try:
u = urllib.urlopen(file_dir)
rrd_data = xmlParse.xmlfile2dict(u)
except:
print "\nCannot open", file_dir,"\n\tor",rrd,"was not found there.\n"
raise
u.close()
# rrd_data[updated,total,entries[entry[total[periods], frontends[periods]]]]
# rrd_data numbers verified by hand
##############################################################################
# Rearranges rrd_data from data[entries][frontends][periods]
# into data = [periods][frontends][entries][elements]
# (periods are integers and in seconds)
###############################################################################
frontend_list = []
for entry in rrd_data['entries']:
for frontend in rrd_data['entries'][entry]['frontends']:
if frontend not in frontend_list:
frontend_list.append(frontend)
if filter_frontend != 0:
if filter_frontend not in frontend_list:
print "\nFrontend", filter_frontend, "not found at source.\n"
print "Choices are:\n "
for frontend in frontend_list:
print frontend
print
sys.exit(1)
for entry in rrd_data['entries']:
for frontend in rrd_data['entries'][entry]['frontends']:
#if filtering, only choose selected frontend
if filter_frontend != 0:
if frontend != filter_frontend:
continue
for period, elements in rrd_data['entries'][entry]['frontends'][frontend]['periods'].iteritems():
if int(period) not in data:
data[int(period)] = {}
if frontend not in data[int(period)]:
data[int(period)][frontend] = {}
if entry not in data[int(period)][frontend]:
data[int(period)][frontend][entry] = {}
for a in attributes.keys():
if a not in data[int(period)][frontend][entry]:
data[int(period)][frontend][entry][a] = 0
try: data[int(period)][frontend][entry][a] += int(float(elements[a])*int(period))
except: pass
# data[period[frontend[entry[element[value]]]]]
#'data' numbers verified by hand
#debug_print_dict(data)
#####################################################################
# Organize totals/stats for each period, frontend, and entry independantly
######################################################################
if filter_frontend == 0:
print("""
Status Attributes (Clients) analysis for All Entries - %s
""" % datetime.datetime.now().strftime("%d-%m-%Y_%H:%M:%S"))
else: print("""
Status Attributes (Clients) analysis for %s - %s
""" % (filter_frontend, datetime.datetime.now().strftime("%d-%m-%Y_%H:%M:%S")))
period_data = {}
frontend_data = {}
entry_data = {}
entry_data_all_frontends = {}
for period, frontends in data.iteritems():
period = int(period)
period_data[period] = {}
frontend_data[period] = {}
entry_data[period] = {}
entry_data_all_frontends[period] = {}
for a in attributes.keys(): period_data[period][a]=0
for frontend, entries in frontends.iteritems():
frontend_data[period][frontend]={}
entry_data[period][frontend] = {}
for a in attributes.keys(): frontend_data[period][frontend][a]=0
for entry, elements in entries.iteritems():
entry_data[period][frontend][entry] = {}
entry_data_all_frontends[period][entry] = {}
for a in attributes.keys():
entry_data[period][frontend][entry][a]=0
entry_data_all_frontends[period][entry][a]=0
for a in attributes.keys():
entry_data[period][frontend][entry][a] += elements[a]
frontend_data[period][frontend][a] += elements[a]
period_data[period][a] += elements[a]
entry_data_all_frontends[period][entry][a] += elements[a]
######################################################################
# Print
######################################################################
# sort periods from least to greatest, with 24 hours at the top
period_list = period_data.keys()
period_list.sort()
period_list.remove(86400)
period_list.insert(0,86400)
period = int(x)*3600
# if filtering by period, make sure it's in the data
if period not in period_list:
print "Interval",x,"does not exist in data.\n Choices are:"
for a in period_list:
print a/3600
print
return
if show_all_periods==0:
period_list=[period]
for period in period_list:
title = ("Past %.1f hours" % (float(period)/3600))
print(
"""----------------------------------------
%s:
Registered: %s
Claimed: %s
Unmatched : %s
Requested Idle: %s
Jobs Running: %s
Jobs Run Here: %s
Jobs Idle: %s
RunDiff (Running-ClientRegistered): %s
"""
% (title,
analyze.printline(period_data[period]['ClientGlideTotal'],1,period),
analyze.printline(period_data[period]['ClientGlideRunning'],period_data[period]['ClientGlideTotal'],period),
analyze.printline(period_data[period]['ClientGlideIdle'],period_data[period]['ClientGlideTotal'],period),
analyze.printline(period_data[period]['ReqIdle'],1,period),
analyze.printline(period_data[period]['ClientJobsRunning'],1,period),
analyze.printline(period_data[period]['ClientJobsRunHere'],1,period),
analyze.printline(period_data[period]['ClientJobsIdle'],1,period),
analyze.printline(period_data[period]['StatusRunning']-period_data[period]['ClientGlideTotal'],period_data[period]['StatusRunning'],period)))
################################################################################
# Print list-style stats
################################################################################
period = int(x)*3600
if filter_frontend == 0 and frontend_mode == 0:
print """
---------------------------------------
---------------------------------------
Per Entry (all frontends) stats for the past %s hours.\n""" % x
list_print("",zero_supp, entry_data_all_frontends[period],sorting, attr_list, sort_attribute,1)
if frontend_mode == 0:
print """
---------------------------------------
---------------------------------------
Per Entry (per frontend) stats for the past %s hours.\n""" % x
if frontend_mode==0:
for frontend, entries in data[period].iteritems():
list_print(frontend, zero_supp, entry_data[period][frontend], sorting, attr_list, sort_attribute,1)
else: # print frontend like entries, and omit entries
units = ["Slots","Seconds","Hours"]
divs = [period,1.0,3600.0]
print """
---------------------------------------
---------------------------------------
Frontend stats for the past %s hours, units = %s.\n""" % (x,units[frontend_mode-1])
list_print("", zero_supp, frontend_data[period],sorting,attr_list,sort_attribute,divs[frontend_mode-1])
################################################################################
# Print Key
################################################################################
print("""-----------------------------------
LEGEND:
K = x 1,000
M = x 100,000
Regd - Registered (ClientGlideTotal)
Claimd - Claimed (ClientGlideRunning)
Unmtchd - Unmatched (ClientGlideIdle)
ReqIdle - Requested Idle
JobRun - Client Jobs Running
JobHere - Client Jobs Run Here
JobIdle - Client Jobs Idle
RunDiff - StatusRunning - ClientRegistered (ClientGlideTotal)
%UM - Percent Unmatched (Unmatched/Registered)
%RD - Percent RunDiff over Running (RunDiff/StatusRunning)
-------------------------------------
\n""")
if __name__ == "__main__":
main()
|
[
"sfiligoi"
] |
sfiligoi
|
|
2a95a869e7d772ab128482d441931e4fa0c543aa
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/FGzWE8vNyxtTrw3Qg_9.py
|
5ad6edc044c49e09b4fc47b751fcc79350dfb72e
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
def get_nbrs(grid, r, c):
nbrs = [[r+dr, c+dc] for dr, dc in [[-1,0],[0,1],[1,0],[0,-1]]]
return [[nr, nc] for nr, nc in nbrs if 0<=nr<len(grid) and 0<=nc<len(grid[0]) and grid[nr][nc]==1]
def is_region(grid, r, c):
if grid[r][c] != 1: return False
# set all interconnected cells in region to 0
# using backtracking to cells with multiple neighbours
stack = []
while True:
grid[r][c] = 0
nbrs = get_nbrs(grid, r, c)
if not nbrs:
if not stack: break
r, c = stack.pop()
else:
if len(nbrs) > 1: stack.append([r, c])
r, c = nbrs[0]
return True
def num_regions(grid):
return sum(1 for r in range(len(grid)) for c in range(len(grid[0])) if is_region(grid, r, c))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
0ea8adfd5aaa807ca297b5e326a09a94de77fb47
|
ab6ea034c31627ae33996495e71274d7befdcd2e
|
/selfdrive/controls/controlsd.py
|
9918ff0bd8e7e7425b852588bc0bc85d48e96a83
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
declarus/open-pilot
|
484bade78316395ad83e5ce3af694fdfc9a177f8
|
6f26bbcc90222280ab0f2c41889024eb67fae7d6
|
refs/heads/master
| 2022-07-08T08:39:13.354359
| 2020-02-06T12:10:22
| 2020-02-06T12:10:22
| 237,997,130
| 0
| 0
|
MIT
| 2022-06-22T01:00:58
| 2020-02-03T15:27:05
|
C
|
UTF-8
|
Python
| false
| false
| 22,821
|
py
|
#!/usr/bin/env python3
import os
import gc
import json
import capnp
import zmq
from cereal import car, log
from common.numpy_fast import clip
from common.realtime import sec_since_boot, set_realtime_priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import selfdrive.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.services import service_list
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_alert, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import get_events, \
create_event, \
EventTypes as ET, \
update_v_cruise, \
initialize_v_cruise
from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.alertmanager import AlertManager
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.driver_monitor import DriverStatus, MAX_TERMINAL_ALERTS
from selfdrive.controls.lib.planner import LON_MPC_STEP
from selfdrive.controls.lib.gps_helpers import is_rhd_region
from selfdrive.locationd.calibration_helpers import Calibration, Filter
ThermalStatus = log.ThermalData.ThermalStatus
State = log.ControlsState.OpenpilotState
HwType = log.HealthData.HwType
def isActive(state):
"""Check if the actuators are enabled"""
return state in [State.enabled, State.softDisabling]
def isEnabled(state):
"""Check if openpilot is engaged"""
return (isActive(state) or state == State.preEnabled)
def events_to_bytes(events):
# optimization when comparing capnp structs: str() or tree traverse are much slower
ret = []
for e in events:
if isinstance(e, capnp.lib.capnp._DynamicStructReader):
e = e.as_builder()
ret.append(e.to_bytes())
return ret
def data_sample(CI, CC, sm, can_poller, can_sock, cal_status, cal_perc, overtemp, free_space, low_battery,
driver_status, state, mismatch_counter, params):
"""Receive data from sockets and create events for battery, temperature and disk space"""
# Update carstate from CAN and create events
can_strs = messaging.drain_sock_raw_poller(can_poller, can_sock, wait_for_one=True)
CS = CI.update(CC, can_strs)
sm.update(0)
events = list(CS.events)
enabled = isEnabled(state)
# Check for CAN timeout
if not can_strs:
events.append(create_event('canError', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if sm.updated['thermal']:
overtemp = sm['thermal'].thermalStatus >= ThermalStatus.red
free_space = sm['thermal'].freeSpace < 0.07 # under 7% of space free no enable allowed
low_battery = sm['thermal'].batteryPercent < 1 and sm['thermal'].chargingError # at zero percent battery, while discharging, OP should not allowed
# Create events for battery, temperature and disk space
if low_battery:
events.append(create_event('lowBattery', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if overtemp:
events.append(create_event('overheat', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if free_space:
events.append(create_event('outOfSpace', [ET.NO_ENTRY]))
# GPS coords RHD parsing, once every restart
if sm.updated['gpsLocation'] and not driver_status.is_rhd_region_checked:
is_rhd = is_rhd_region(sm['gpsLocation'].latitude, sm['gpsLocation'].longitude)
driver_status.is_rhd_region = is_rhd
driver_status.is_rhd_region_checked = True
put_nonblocking("IsRHD", "1" if is_rhd else "0")
# Handle calibration
if sm.updated['liveCalibration']:
cal_status = sm['liveCalibration'].calStatus
cal_perc = sm['liveCalibration'].calPerc
cal_rpy = [0,0,0]
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
events.append(create_event('calibrationIncomplete', [ET.NO_ENTRY, ET.SOFT_DISABLE, ET.PERMANENT]))
else:
events.append(create_event('calibrationInvalid', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
else:
rpy = sm['liveCalibration'].rpyCalib
if len(rpy) == 3:
cal_rpy = rpy
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not enabled:
mismatch_counter = 0
if sm.updated['health']:
controls_allowed = sm['health'].controlsAllowed
if not controls_allowed and enabled:
mismatch_counter += 1
if mismatch_counter >= 2:
events.append(create_event('controlsMismatch', [ET.IMMEDIATE_DISABLE]))
# Driver monitoring
if sm.updated['driverMonitoring']:
driver_status.get_pose(sm['driverMonitoring'], cal_rpy, CS.vEgo, enabled)
if driver_status.terminal_alert_cnt >= MAX_TERMINAL_ALERTS:
events.append(create_event("tooDistracted", [ET.NO_ENTRY]))
return CS, events, cal_status, cal_perc, overtemp, free_space, low_battery, mismatch_counter
def state_transition(frame, CS, CP, state, events, soft_disable_timer, v_cruise_kph, AM):
"""Compute conditional state transitions and execute actions on state transitions"""
enabled = isEnabled(state)
v_cruise_kph_last = v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
if not CP.enableCruise:
v_cruise_kph = update_v_cruise(v_cruise_kph, CS.buttonEvents, enabled)
elif CP.enableCruise and CS.cruiseState.enabled:
v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
# decrease the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
soft_disable_timer = max(0, soft_disable_timer - 1)
# DISABLED
if state == State.disabled:
if get_events(events, [ET.ENABLE]):
if get_events(events, [ET.NO_ENTRY]):
for e in get_events(events, [ET.NO_ENTRY]):
AM.add(frame, str(e) + "NoEntry", enabled)
else:
if get_events(events, [ET.PRE_ENABLE]):
state = State.preEnabled
else:
state = State.enabled
AM.add(frame, "enable", enabled)
v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, v_cruise_kph_last)
# ENABLED
elif state == State.enabled:
if get_events(events, [ET.USER_DISABLE]):
state = State.disabled
AM.add(frame, "disable", enabled)
elif get_events(events, [ET.IMMEDIATE_DISABLE]):
state = State.disabled
for e in get_events(events, [ET.IMMEDIATE_DISABLE]):
AM.add(frame, e, enabled)
elif get_events(events, [ET.SOFT_DISABLE]):
state = State.softDisabling
soft_disable_timer = 300 # 3s
for e in get_events(events, [ET.SOFT_DISABLE]):
AM.add(frame, e, enabled)
# SOFT DISABLING
elif state == State.softDisabling:
if get_events(events, [ET.USER_DISABLE]):
state = State.disabled
AM.add(frame, "disable", enabled)
elif get_events(events, [ET.IMMEDIATE_DISABLE]):
state = State.disabled
for e in get_events(events, [ET.IMMEDIATE_DISABLE]):
AM.add(frame, e, enabled)
elif not get_events(events, [ET.SOFT_DISABLE]):
# no more soft disabling condition, so go back to ENABLED
state = State.enabled
elif get_events(events, [ET.SOFT_DISABLE]) and soft_disable_timer > 0:
for e in get_events(events, [ET.SOFT_DISABLE]):
AM.add(frame, e, enabled)
elif soft_disable_timer <= 0:
state = State.disabled
# PRE ENABLING
elif state == State.preEnabled:
if get_events(events, [ET.USER_DISABLE]):
state = State.disabled
AM.add(frame, "disable", enabled)
elif get_events(events, [ET.IMMEDIATE_DISABLE, ET.SOFT_DISABLE]):
state = State.disabled
for e in get_events(events, [ET.IMMEDIATE_DISABLE, ET.SOFT_DISABLE]):
AM.add(frame, e, enabled)
elif not get_events(events, [ET.PRE_ENABLE]):
state = State.enabled
return state, soft_disable_timer, v_cruise_kph, v_cruise_kph_last
def state_control(frame, rcv_frame, plan, path_plan, CS, CP, state, events, v_cruise_kph, v_cruise_kph_last,
AM, rk, driver_status, LaC, LoC, read_only, is_metric, cal_perc):
"""Given the state, this function returns an actuators packet"""
actuators = car.CarControl.Actuators.new_message()
enabled = isEnabled(state)
active = isActive(state)
# check if user has interacted with the car
driver_engaged = len(CS.buttonEvents) > 0 or \
v_cruise_kph != v_cruise_kph_last or \
CS.steeringPressed
# add eventual driver distracted events
events = driver_status.update(events, driver_engaged, isActive(state), CS.standstill)
# send FCW alert if triggered by planner
if plan.fcw:
AM.add(frame, "fcw", enabled)
# State specific actions
if state in [State.preEnabled, State.disabled]:
LaC.reset()
LoC.reset(v_pid=CS.vEgo)
elif state in [State.enabled, State.softDisabling]:
# parse warnings from car specific interface
for e in get_events(events, [ET.WARNING]):
extra_text = ""
if e == "belowSteerSpeed":
if is_metric:
extra_text = str(int(round(CP.minSteerSpeed * CV.MS_TO_KPH))) + " kph"
else:
extra_text = str(int(round(CP.minSteerSpeed * CV.MS_TO_MPH))) + " mph"
AM.add(frame, e, enabled, extra_text_2=extra_text)
plan_age = DT_CTRL * (frame - rcv_frame['plan'])
dt = min(plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL # no greater than dt mpc + dt, to prevent too high extraps
a_acc_sol = plan.aStart + (dt / LON_MPC_STEP) * (plan.aTarget - plan.aStart)
v_acc_sol = plan.vStart + dt * (a_acc_sol + plan.aStart) / 2.0
# Gas/Brake PID loop
actuators.gas, actuators.brake = LoC.update(active, CS.vEgo, CS.brakePressed, CS.standstill, CS.cruiseState.standstill,
v_cruise_kph, v_acc_sol, plan.vTargetFuture, a_acc_sol, CP)
# Steering PID loop and lateral MPC
actuators.steer, actuators.steerAngle, lac_log = LaC.update(active, CS.vEgo, CS.steeringAngle, CS.steeringRate, CS.steeringTorqueEps, CS.steeringPressed, CP, path_plan)
# Send a "steering required alert" if saturation count has reached the limit
if LaC.sat_flag and CP.steerLimitAlert:
AM.add(frame, "steerSaturated", enabled)
# Parse permanent warnings to display constantly
for e in get_events(events, [ET.PERMANENT]):
extra_text_1, extra_text_2 = "", ""
if e == "calibrationIncomplete":
extra_text_1 = str(cal_perc) + "%"
if is_metric:
extra_text_2 = str(int(round(Filter.MIN_SPEED * CV.MS_TO_KPH))) + " kph"
else:
extra_text_2 = str(int(round(Filter.MIN_SPEED * CV.MS_TO_MPH))) + " mph"
AM.add(frame, str(e) + "Permanent", enabled, extra_text_1=extra_text_1, extra_text_2=extra_text_2)
AM.process_alerts(frame)
return actuators, v_cruise_kph, driver_status, v_acc_sol, a_acc_sol, lac_log
def data_send(sm, pm, CS, CI, CP, VM, state, events, actuators, v_cruise_kph, rk, AM,
driver_status, LaC, LoC, read_only, start_time, v_acc, a_acc, lac_log, events_prev):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = isEnabled(state)
CC.actuators = actuators
CC.cruiseControl.override = True
CC.cruiseControl.cancel = not CP.enableCruise or (not isEnabled(state) and CS.cruiseState.enabled)
# Some override values for Honda
brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0)) # brake discount removes a sharp nonlinearity
CC.cruiseControl.speedOverride = float(max(0.0, (LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount) if CP.enableCruise else 0.0)
CC.cruiseControl.accelOverride = CI.calc_accel_override(CS.aEgo, sm['plan'].aTarget, CS.vEgo, sm['plan'].vTarget)
CC.hudControl.setSpeed = float(v_cruise_kph * CV.KPH_TO_MS)
CC.hudControl.speedVisible = isEnabled(state)
CC.hudControl.lanesVisible = isEnabled(state)
CC.hudControl.leadVisible = sm['plan'].hasLead
right_lane_visible = sm['pathPlan'].rProb > 0.5
left_lane_visible = sm['pathPlan'].lProb > 0.5
CC.hudControl.rightLaneVisible = bool(right_lane_visible)
CC.hudControl.leftLaneVisible = bool(left_lane_visible)
blinker = CS.leftBlinker or CS.rightBlinker
ldw_allowed = CS.vEgo > 12.5 and not blinker
if len(list(sm['pathPlan'].rPoly)) == 4:
CC.hudControl.rightLaneDepart = bool(ldw_allowed and sm['pathPlan'].rPoly[3] > -(1.08 + CAMERA_OFFSET) and right_lane_visible)
if len(list(sm['pathPlan'].lPoly)) == 4:
CC.hudControl.leftLaneDepart = bool(ldw_allowed and sm['pathPlan'].lPoly[3] < (1.08 - CAMERA_OFFSET) and left_lane_visible)
CC.hudControl.visualAlert = AM.visual_alert
if not read_only:
# send car controls over can
can_sends = CI.apply(CC)
pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
force_decel = driver_status.awareness < 0.
# controlsState
dat = messaging.new_message()
dat.init('controlsState')
dat.valid = CS.canValid
dat.controlsState = {
"alertText1": AM.alert_text_1,
"alertText2": AM.alert_text_2,
"alertSize": AM.alert_size,
"alertStatus": AM.alert_status,
"alertBlinkingRate": AM.alert_rate,
"alertType": AM.alert_type,
"alertSound": AM.audible_alert,
"awarenessStatus": max(driver_status.awareness, -0.1) if isEnabled(state) else 1.0,
"driverMonitoringOn": bool(driver_status.face_detected),
"canMonoTimes": list(CS.canMonoTimes),
"planMonoTime": sm.logMonoTime['plan'],
"pathPlanMonoTime": sm.logMonoTime['pathPlan'],
"enabled": isEnabled(state),
"active": isActive(state),
"vEgo": CS.vEgo,
"vEgoRaw": CS.vEgoRaw,
"angleSteers": CS.steeringAngle,
"curvature": VM.calc_curvature((CS.steeringAngle - sm['pathPlan'].angleOffset) * CV.DEG_TO_RAD, CS.vEgo),
"steerOverride": CS.steeringPressed,
"state": state,
"engageable": not bool(get_events(events, [ET.NO_ENTRY])),
"longControlState": LoC.long_control_state,
"vPid": float(LoC.v_pid),
"vCruise": float(v_cruise_kph),
"upAccelCmd": float(LoC.pid.p),
"uiAccelCmd": float(LoC.pid.i),
"ufAccelCmd": float(LoC.pid.f),
"angleSteersDes": float(LaC.angle_steers_des),
"vTargetLead": float(v_acc),
"aTarget": float(a_acc),
"jerkFactor": float(sm['plan'].jerkFactor),
"gpsPlannerActive": sm['plan'].gpsPlannerActive,
"vCurvature": sm['plan'].vCurvature,
"decelForModel": sm['plan'].longitudinalPlanSource == log.Plan.LongitudinalPlanSource.model,
"cumLagMs": -rk.remaining * 1000.,
"startMonoTime": int(start_time * 1e9),
"mapValid": sm['plan'].mapValid,
"forceDecel": bool(force_decel),
}
if CP.lateralTuning.which() == 'pid':
dat.controlsState.lateralControlState.pidState = lac_log
elif CP.lateralTuning.which() == 'lqr':
dat.controlsState.lateralControlState.lqrState = lac_log
elif CP.lateralTuning.which() == 'indi':
dat.controlsState.lateralControlState.indiState = lac_log
pm.send('controlsState', dat)
# carState
cs_send = messaging.new_message()
cs_send.init('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = events
pm.send('carState', cs_send)
# carEvents - logged every second or on change
events_bytes = events_to_bytes(events)
if (sm.frame % int(1. / DT_CTRL) == 0) or (events_bytes != events_prev):
ce_send = messaging.new_message()
ce_send.init('carEvents', len(events))
ce_send.carEvents = events
pm.send('carEvents', ce_send)
# carParams - logged every 50 seconds (> 1 per segment)
if (sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message()
cp_send.init('carParams')
cp_send.carParams = CP
pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message()
cc_send.init('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
pm.send('carControl', cc_send)
return CC, events_bytes
def controlsd_thread(sm=None, pm=None, can_sock=None):
gc.disable()
# start the loop
set_realtime_priority(3)
params = Params()
is_metric = params.get("IsMetric", encoding='utf8') == "1"
passive = params.get("Passive", encoding='utf8') == "1"
openpilot_enabled_toggle = params.get("OpenpilotEnabledToggle", encoding='utf8') == "1"
passive = passive or not openpilot_enabled_toggle
# Pub/Sub Sockets
if pm is None:
pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState', 'carControl', 'carEvents', 'carParams'])
if sm is None:
sm = messaging.SubMaster(['thermal', 'health', 'liveCalibration', 'driverMonitoring', 'plan', 'pathPlan', \
'gpsLocation'], ignore_alive=['gpsLocation'])
can_poller = zmq.Poller()
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
can_sock = messaging.sub_sock(service_list['can'].port, timeout=can_timeout)
can_poller.register(can_sock)
# wait for health and CAN packets
hw_type = messaging.recv_one(sm.sock['health']).health.hwType
has_relay = hw_type in [HwType.blackPanda, HwType.uno]
print("Waiting for CAN messages...")
get_one_can(can_sock)
CI, CP = get_car(can_sock, pm.sock['sendcan'], has_relay)
car_recognized = CP.carName != 'mock'
# If stock camera is disconnected, we loaded car controls and it's not chffrplus
controller_available = CP.enableCamera and CI.CC is not None and not passive
read_only = not car_recognized or not controller_available or CP.dashcamOnly
if read_only:
CP.safetyModel = CP.safetyModelPassive
# Write CarParams for radard and boardd safety mode
params.put("CarParams", CP.to_bytes())
params.put("LongitudinalControl", "1" if CP.openpilotLongitudinalControl else "0")
CC = car.CarControl.new_message()
AM = AlertManager()
startup_alert = get_startup_alert(car_recognized, controller_available)
AM.add(sm.frame, startup_alert, False)
LoC = LongControl(CP, CI.compute_gb)
VM = VehicleModel(CP)
if CP.lateralTuning.which() == 'pid':
LaC = LatControlPID(CP)
elif CP.lateralTuning.which() == 'indi':
LaC = LatControlINDI(CP)
elif CP.lateralTuning.which() == 'lqr':
LaC = LatControlLQR(CP)
driver_status = DriverStatus()
is_rhd = params.get("IsRHD")
if is_rhd is not None:
driver_status.is_rhd = bool(int(is_rhd))
state = State.disabled
soft_disable_timer = 0
v_cruise_kph = 255
v_cruise_kph_last = 0
overtemp = False
free_space = False
cal_status = Calibration.INVALID
cal_perc = 0
mismatch_counter = 0
low_battery = False
events_prev = []
sm['pathPlan'].sensorValid = True
sm['pathPlan'].posenetValid = True
# detect sound card presence
sounds_available = not os.path.isfile('/EON') or (os.path.isdir('/proc/asound/card0') and open('/proc/asound/card0/state').read().strip() == 'ONLINE')
# controlsd is driven by can recv, expected at 100Hz
rk = Ratekeeper(100, print_delay_threshold=None)
# FIXME: offroad alerts should not be created with negative severity
connectivity_alert = params.get("Offroad_ConnectivityNeeded", encoding='utf8')
internet_needed = connectivity_alert is not None and json.loads(connectivity_alert.replace("'", "\""))["severity"] >= 0
prof = Profiler(False) # off by default
while True:
start_time = sec_since_boot()
prof.checkpoint("Ratekeeper", ignore=True)
# Sample data and compute car events
CS, events, cal_status, cal_perc, overtemp, free_space, low_battery, mismatch_counter =\
data_sample(CI, CC, sm, can_poller, can_sock, cal_status, cal_perc, overtemp, free_space, low_battery,
driver_status, state, mismatch_counter, params)
prof.checkpoint("Sample")
# Create alerts
if not sm.all_alive_and_valid():
events.append(create_event('commIssue', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not sm['pathPlan'].mpcSolutionValid:
events.append(create_event('plannerError', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if not sm['pathPlan'].sensorValid:
events.append(create_event('sensorDataInvalid', [ET.NO_ENTRY, ET.PERMANENT]))
if not sm['pathPlan'].paramsValid:
events.append(create_event('vehicleModelInvalid', [ET.WARNING]))
if not sm['pathPlan'].posenetValid:
events.append(create_event('posenetInvalid', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not sm['plan'].radarValid:
events.append(create_event('radarFault', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if sm['plan'].radarCanError:
events.append(create_event('radarCanError', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not CS.canValid:
events.append(create_event('canError', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if not sounds_available:
events.append(create_event('soundsUnavailable', [ET.NO_ENTRY, ET.PERMANENT]))
if internet_needed:
events.append(create_event('internetConnectivityNeeded', [ET.NO_ENTRY, ET.PERMANENT]))
# Only allow engagement with brake pressed when stopped behind another stopped car
if CS.brakePressed and sm['plan'].vTargetFuture >= STARTING_TARGET_SPEED and not CP.radarOffCan and CS.vEgo < 0.3:
events.append(create_event('noTarget', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if not read_only:
# update control state
state, soft_disable_timer, v_cruise_kph, v_cruise_kph_last = \
state_transition(sm.frame, CS, CP, state, events, soft_disable_timer, v_cruise_kph, AM)
prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, v_cruise_kph, driver_status, v_acc, a_acc, lac_log = \
state_control(sm.frame, sm.rcv_frame, sm['plan'], sm['pathPlan'], CS, CP, state, events, v_cruise_kph, v_cruise_kph_last, AM, rk,
driver_status, LaC, LoC, read_only, is_metric, cal_perc)
prof.checkpoint("State Control")
# Publish data
CC, events_prev = data_send(sm, pm, CS, CI, CP, VM, state, events, actuators, v_cruise_kph, rk, AM, driver_status, LaC,
LoC, read_only, start_time, v_acc, a_acc, lac_log, events_prev)
prof.checkpoint("Sent")
rk.monitor_time()
prof.display()
print(CC)
def main(sm=None, pm=None, logcan=None):
controlsd_thread(sm, pm, logcan)
if __name__ == "__main__":
main()
|
[
"avbdeclarus@gmail.com"
] |
avbdeclarus@gmail.com
|
d97bbd64cdadcd753f5d3f9854425e04fe873e2f
|
0195f64e47ed23ad58c960bb3807961a0f3bee5c
|
/rep0st/background_job.py
|
2a59c1bd82dccec61173e77dd6436dd0890fc645
|
[
"MIT"
] |
permissive
|
Vanilla-Chan/rep0st
|
42fedb019263d2080e9c64f84f64e281e998f0da
|
65cf934b2841b4564940ae2a7f68c6f312dd3a3d
|
refs/heads/master
| 2020-06-21T13:10:52.701269
| 2019-08-01T18:47:51
| 2019-08-01T18:47:51
| 197,454,028
| 0
| 0
|
MIT
| 2019-07-17T19:58:08
| 2019-07-17T19:58:07
| null |
UTF-8
|
Python
| false
| false
| 4,354
|
py
|
import datetime
import time
import msgpack
import numpy as np
import schedule
from annoy import AnnoyIndex
from logbook import Logger
import config
from rep0st import api, analyze
from rep0st.database import PostStatus, Feature, FeatureType, PostType
from rep0st.rep0st import get_rep0st
config.load()
log = Logger('background-job')
rep = get_rep0st()
current_index = 1
def update(index_id):
latest_id = rep.database.latest_post_id()
log.info("getting new posts. latest post {}", latest_id)
counter = 0
posts = []
ids = []
features_FEATURE_VECTOR = []
for post in api.iterate_posts(latest_id):
counter += 1
posts.append(post)
ids.append(post.id)
rep.database.get_session().add(post)
if post.type == PostType.IMAGE:
image = rep.read_image(post)
if image is not None:
result = analyze.analyze_image(image)
post.status = PostStatus.INDEXED
for type, data in result.items():
rep.database.session.merge(Feature.from_analyzeresult(post, type, data))
if type == FeatureType.FEATURE_VECTOR:
features_FEATURE_VECTOR.append(msgpack.packb({
'id': post.id,
'data': data
}))
rep.database.get_session().commit()
if len(features_FEATURE_VECTOR) > 0:
rep.redis.lpush('rep0st-latest-feature-vectors-index-' + str(index_id), *features_FEATURE_VECTOR)
log.info("finished getting new posts. added {} posts to database", counter)
def build_index(index_id):
n_trees = config.index_config['tree_count']
log.info("started index build")
count = rep.database.session.query(Feature).filter(Feature.type == FeatureType.FEATURE_VECTOR).count()
index = AnnoyIndex(108, metric='euclidean')
cnt = 0
log.info("adding {} features to index", count)
start = time.time()
for feature in rep.database.session.query(Feature).filter(Feature.type == FeatureType.FEATURE_VECTOR).yield_per(
1000):
arr = np.asarray(bytearray(feature.data)).astype(np.float32)
index.add_item(feature.post_id, arr)
cnt += 1
if cnt % 10000 == 0:
log.debug("added {}/{} features to the index", cnt, count)
stop = time.time()
log.info("added all {} features to the index in {}", count, str(datetime.timedelta(seconds=stop - start)))
log.info("building index with {} trees. this will take a while...", n_trees)
start = time.time()
index.build(20)
index_file = config.index_config['index_path'] + "index_" + str(index_id) + ".ann"
log.info("saving index to file {}", index_file)
index.save(index_file)
stop = time.time()
log.debug("finished building of index. it took {}", str(datetime.timedelta(seconds=stop - start)))
def job_build_index():
try:
global current_index
next_index = 2 if current_index == 1 else 1
log.info("current index is {}, next will be {}", current_index, next_index)
rep.update_database()
rep.update_features()
build_index(next_index)
rep.redis.delete('rep0st-latest-feature-vectors-index-' + str(next_index))
rep.redis.set('rep0st-current-index', next_index)
rep.redis.publish('rep0st-index-change', next_index)
current_index = next_index
except:
log.error('Error executing job_build_index', exc_info=True)
def job_update():
try:
update(current_index)
except:
log.error('Error executing job_update', exc_info=True)
if __name__ == '__main__':
if rep.redis.exists('rep0st-current-index'):
current_index = int(rep.redis.get('rep0st-current-index'))
log.info("reusing index id {} for cycling", current_index)
else:
current_index = 2
log.info("starting fresh index cycle with id 1", current_index)
job_build_index()
if config.IS_PRODUCTION:
schedule.every().day.at(config.backgroundjob_config['rebuild_index_time']).do(job_build_index)
else:
schedule.every(5).minutes.do(job_build_index)
schedule.every(config.backgroundjob_config['update_index_every_seconds']).seconds.do(job_update)
while True:
schedule.run_pending()
time.sleep(1)
|
[
"mail@renehollander.at"
] |
mail@renehollander.at
|
e4eac2a306fbd0203025d23dd16432556a71ab94
|
0e281f33cc1ff6f8d8fbeb4ad14396a1a0441e86
|
/tests/test_inss.py
|
8e0495e07bf12739e267e242c48518f55fb17e19
|
[] |
no_license
|
rafaelgoncalvesbarreira/net_salary
|
3e177c8ff7c8a5f3fb3716103f6588dcc0635659
|
0d7a812b761296659812084dc1261f287765006f
|
refs/heads/master
| 2020-04-05T01:55:18.789055
| 2018-11-06T22:19:45
| 2018-11-06T22:19:45
| 156,456,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
import unittest
import inss
from inss import InssRate
class TestInss(unittest.TestCase):
def setUp(self):
self.inss = inss.InssRate(0,3000,10)
def test_inssInsideLimit(self):
result = self.inss.calculate(2000)
self.assertEqual(result, 200)
def test_inssEdgeLimit(self):
result = self.inss.calculate(3000)
self.assertEqual(result, 300)
def test_abovelimit(self):
result = self.inss.calculate(4000)
self.assertEqual(result, 300)
class TestSelectionInss(unittest.TestCase):
def setUp(self):
self.table = [
InssRate(0,1000,9),
InssRate(1001,2000,10),
InssRate(2001,3000, 11)
]
def test_loadAnyone(self):
inssRate = inss.get_Inss_by_value(1000, self.table)
self.assertEqual(9, inssRate.rate)
def test_loadAboveLimit(self):
inssRate = inss.get_Inss_by_value(5000, self.table)
self.assertEqual(11, inssRate.rate)
class TestReadingInss(unittest.TestCase):
def test_loadFromDefault(self):
table = inss.load_inss()
self.assertEqual(len(table), 3)
if __name__ == "__main__":
unittest.main()
|
[
"rafaelgoncalvesbarreira@gmail.com"
] |
rafaelgoncalvesbarreira@gmail.com
|
cb25e10541dcf9198b30e71e14743e5665027d02
|
a1f5ac2713469bd63d6d05778cd4ec9651d35a86
|
/create_StrainFinderInput.py
|
88b0b042aec5232234e20127d96880715161a088
|
[] |
no_license
|
benjaminhgood/highres_microbiome_timecourse
|
00c2c41bb84f85585ed35bab2d38ba0e573253e9
|
6cb7bf99dc96c2a573116604760b2c67e575aa6c
|
refs/heads/master
| 2021-01-20T05:12:42.586510
| 2018-12-14T20:04:12
| 2018-12-14T20:04:12
| 89,760,706
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,695
|
py
|
import tarfile, bz2, cPickle
# MIDAS STuff
import parse_midas_data
import parse_timecourse_data as parse_sample_data
import parse_timecourse_data
import numpy
from numpy.random import shuffle
import stats_utils
import diversity_utils
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="Loads only a subset of SNPs for speed", action="store_true")
parser.add_argument("--chunk-size", type=int, help="max number of records to load", default=1000000000)
parser.add_argument("--species", help="Name of specific species to run code on")
parser.add_argument("-o", "--outdir", help="Where to write output file",metavar="DIR")
parser.add_argument("-Lmax", "--downsample", type=int, help="Where to write output file",default=1e08)
parser.add_argument("-fstar", "--freq-threshold", type=float, help="Frequency has to exceed to be included",default=0.2)
parser.add_argument("--fraction-covered", type=float, help="Fraction of timepoints with sufficient coverage",default=0.5)
args = parser.parse_args()
debug = args.debug
chunk_size = args.chunk_size
species_name = args.species
outdir = args.outdir
max_snps = args.downsample
fstar = args.freq_threshold
fraction_covered = args.fraction_covered
snp_file = outdir+"/"+species_name+".strainfinder.p"
snp_samples = parse_timecourse_data.morteza_samples
sample_size = len(snp_samples)
sys.stderr.write("Proceeding with %d temporal samples!\n" % sample_size)
snp_alignment = [] # (construct a # sites x # samples x # bases (4) array)
final_line_number = 0
while final_line_number >= 0:
sys.stderr.write("Loading chunk starting @ %d...\n" % final_line_number)
dummy_samples, allele_counts_map, passed_sites_map, final_line_number = parse_midas_data.parse_snps(species_name, debug=debug, allowed_samples=snp_samples, chunk_size=chunk_size,initial_line_number=final_line_number)
sys.stderr.write("Done! Loaded %d genes\n" % len(allele_counts_map.keys()))
snp_samples = dummy_samples
for gene_name in allele_counts_map.keys():
#if gene_name!='435590.9.peg.242':
# continue
for var_type in allele_counts_map[gene_name].keys():
locations = allele_counts_map[gene_name][var_type]['locations']
allele_counts = allele_counts_map[gene_name][var_type]['alleles']
if len(allele_counts)==0:
continue
depths = allele_counts.sum(axis=2)
freqs = allele_counts[:,:,0]*1.0/(depths+(depths==0))
for snp_idx in xrange(0,len(locations)):
insufficient_coverage = ((depths[snp_idx,:]>0).sum() < fraction_covered*depths.shape[1])
low_frequency = ((freqs[snp_idx]<fstar).all() or (freqs[snp_idx]>(1-fstar)).all())
if insufficient_coverage or low_frequency:
continue
four_base_counts = [numpy.hstack([allele_counts[snp_idx,sample_idx,:], [0,0]]) for sample_idx in xrange(0,depths.shape[1])]
snp_alignment.append( four_base_counts )
print "Shuffling!"
shuffle(snp_alignment)
if len(snp_alignment) > max_snps:
snp_alignment = snp_alignment[0:max_snps]
print len(snp_alignment)
snp_alignment = numpy.array(snp_alignment)
#sys.stderr.write("Original aligmmetn: %s \n" % (str(snp_alignment.shape)))
snp_alignment = numpy.swapaxes(snp_alignment,0,1)
sys.stderr.write("Saving %s alignment file \n" % (str(snp_alignment.shape)))
cPickle.dump(snp_alignment, open(snp_file, 'wb'))
|
[
"bgood@260j-desktop.dyn.berkeley.edu"
] |
bgood@260j-desktop.dyn.berkeley.edu
|
4051fcf132f0a23bff36c6698b99f05df3ffe149
|
392dd3ec9aab9a01eb660f180dad83cb687c09cb
|
/tests/unit/test_div.py
|
6fd9f4908aca61c42605b0f5b9c40d5900b835f0
|
[] |
no_license
|
nboumlaik/nb_project_for_pcotte
|
7a296a7fcedd32442c382cf5925adcd6b9c233b2
|
0d10503f390ad2a4276caaf0341591b03f75c3bd
|
refs/heads/master
| 2022-12-28T17:54:49.864311
| 2020-10-18T19:53:13
| 2020-10-18T19:53:13
| 305,183,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
from complex_nb.complex_nb import ComplexOper as cmpx
def test_mul():
var1 = cmpx(3, -2)
var2 = cmpx(-45, 1)
res1 = var1 / var2
assert res1.preel == -0.0676
assert res1.pimag == 0.0429
|
[
"nboumlaik@advestis.com"
] |
nboumlaik@advestis.com
|
6abab70ba9053fec823a14d7f7ce505943a9a753
|
085f29c31fa171d0e772bded090141a45c32a099
|
/WL/Lattices/latticeConstructor.py
|
7d5e23b34d455c846b67bbbe43fff5f49350ef70
|
[] |
no_license
|
JouziP/WL
|
764466ae169333ef75a57f599ac9c4295d925bd3
|
13698718b5be0b43be150b6bb7ca6ccae228d9df
|
refs/heads/master
| 2021-04-06T02:27:01.703767
| 2018-03-23T19:56:00
| 2018-03-23T19:56:00
| 125,282,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,679
|
py
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import numpy as np
#from numpy import linalg as lg
################
from WL.BasicFunctions.cleanNeighbors import cleanNeighbors
from latticeLinkRemover import linkRemover
from rankNeighbs import getUpToRankNeighbs
def getFirstneighbors(neighb_table):
pass
def getCartesianFromCoords(coords, **kwgs):
a1_x=kwgs['a1_x']
a1_y=kwgs['a1_y']
#
a2_x=kwgs['a2_x']
a2_y=kwgs['a2_y']
#
q1=coords[0]
q2=coords[1]
x = a1_x*q1 + a2_x*q2
y = a1_y*q1 + a2_y*q2
return x,y
def getIndexFromCoords(coords, **kwgs):
N1 = kwgs['N1']
q1 = coords[0]
q2 = coords[1]
idx = q2*N1 + q1
return idx
def getCoordsFromIndex(idx, **kwgs):
N1 = kwgs['N1']
q2=idx/N1
q1=idx - q2*N1
return q1, q2
def getDistance(deltaX, deltaY):
distance = ( (deltaX)**2 + (deltaY)**2)**(0.5)
return distance
def constructLattice(**kwgs):
N1 = kwgs['N1']
N2 = kwgs['N2']
num_sites = N1* N2
neighbors_array = []
for n in range(num_sites):
neighbors_array.append(getNeighborTableGeneralized(n, **kwgs))
######
neighbors_array = linkRemover(neighbors_array, **kwgs)
###### refine
neighbors_array_refined=[]
for n in range(len(neighbors_array)):
refined=[]
for j in range(neighbors_array[n].shape[0]):
if np.round(neighbors_array[n][j, 1], 8)!=np.round(0, 8):
refined.append(neighbors_array[n][j, :])
neighbors_array_refined.append(np.array(refined))
return neighbors_array_refined
def getNeighborTableGeneralized(idx, **kwgs):
###
power = kwgs['power']
a1_x=kwgs['a1_x']
a1_y=kwgs['a1_y']
#
a2_x=kwgs['a2_x']
a2_y=kwgs['a2_y']
#
###
neighbs=[]
q1, q2 = getCoordsFromIndex(idx, **kwgs)
#
N1 = kwgs['N1']
#
N2 = kwgs['N2']
#
N = N1 * N2
for i in range(0, N, 1):
q1_n, q2_n = getCoordsFromIndex(i, **kwgs)
### direct
if q1==q1_n and q2==q2_n:
pass
else:
x_n, y_n = getCartesianFromCoords(np.array([q1_n, q2_n]),
**kwgs)
# cartesian coords
x0,y0 = getCartesianFromCoords(np.array([q1,q2]), **kwgs)
distance = getDistance( (x_n-(x0)) , (y_n-y0) )
### periodic 1
# cartesian coords
x=x0+N2*a1_x
y=y0+N2*a1_y
distance10 = getDistance( (x_n-x), (y_n-y) )
#
x=x0-N2*a1_x
y=y0-N2*a1_y
distance11 = getDistance( (x_n-x), (y_n-y) )
distance1=np.min([distance10, distance11])
### peridic in 2
x=x0+N1*a2_x
y=y0+N1*a2_y
distance20 = getDistance( (x_n-x), (y_n-y) )
x=x0-N1*a2_x
y=y0-N1*a2_y
distance21 = getDistance( (x_n-x), (y_n-y) )
distance2=np.min([distance20, distance21])
#### peridic in 1 and 2
# cartesian coords
x=x0+N1*a2_x+N2*a1_x
y=y0+N1*a2_y+N2*a1_y
distance300 = getDistance( (x_n-x), (y_n-y) )
x=x0+N1*a2_x-N2*a1_x
y=y0+N1*a2_y-N2*a1_y
distance301 = getDistance( (x_n-x), (y_n-y) )
x=x0-N1*a2_x+N2*a1_x
y=y0-N1*a2_y+N2*a1_y
distance310 = getDistance( (x_n-x), (y_n-y) )
x=x0-N1*a2_x-N2*a1_x
y=y0-N1*a2_y-N2*a1_y
distance311 = getDistance( (x_n-x), (y_n-y) )
distance3=np.min([distance300,
distance310,
distance301,
distance311])
distance = np.min([distance,
distance1,
distance2,
distance3,
])
#
strength = 1./distance**(power)
#
neighbs.append([i, strength])
#
#####
if kwgs['first_neighb']==True:
# print neighbs
# print '=========='
all_neighbors = cleanNeighbors(neighbs)
neighbs=all_neighbors[0]
#####
if kwgs['first_neighb']==False:
if 'up_to_rank' in kwgs:
up_to_rank=kwgs['up_to_rank']
# print neighbs
# print '=========='
neighbs=getUpToRankNeighbs(up_to_rank,
neighbs
)
###
return np.array(neighbs)
|
[
"p.jouzdani@knights.ucf.edu"
] |
p.jouzdani@knights.ucf.edu
|
6679f5e96dba34b1e1bb39a7be3d39dfeacb29a6
|
3fb00269f32b627ef42360e16c5538ab7d8756c3
|
/venv/lib/python3.6/encodings/iso2022_jp.py
|
ff14137b3ac365c8edbac7aca0e39ba68773fcea
|
[] |
no_license
|
ThisIsJorgeLima/DashFinance
|
6a58987a1930b61358b607a875ec6ee60f717759
|
6b07353d91a386b63e290f17cffd79b8cbfb643c
|
refs/heads/master
| 2021-10-19T18:03:08.846652
| 2019-02-22T21:21:49
| 2019-02-22T21:21:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 70
|
py
|
/Users/sterlingbutters/anaconda3/lib/python3.6/encodings/iso2022_jp.py
|
[
"sterlingbutters@gmail.com"
] |
sterlingbutters@gmail.com
|
315b0aa2d94ec9decb4b1fa17ccb63b59e20b7d7
|
cb5bf5526a2e5e33d92367f615e9c0b7ab0c290c
|
/policies.py
|
3240ce91b4edc69d9db45d2dd581ce84979287b0
|
[] |
no_license
|
kevinduh/neural_language_model
|
6a1a46654664de7b6fb2c3474d6baeeeed72b54c
|
f785a4b33f27a5e7cbefb6fb281a56c07ba27d27
|
refs/heads/master
| 2021-01-21T03:14:29.775949
| 2014-08-28T22:11:35
| 2014-08-28T22:11:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,861
|
py
|
import theano
import theano.tensor as T
import numpy as np
from picklable import Picklable
import pprint
from hessian import hessian_diagonal
class Policy(Picklable):
"""
implements a gradient descent policy for a given parameter
"""
def __init__(self, learning_rate, param):
learning_rate = np.cast[theano.config.floatX](learning_rate)
self._set_attrs(learning_rate=learning_rate, param=param)
def updates(self, cost):
"""
return the update for this param
"""
return []
def updates_indexed(self, cost, index_list, slice_list):
"""
used by methods that only update a subset of parameters
"""
return []
def burn_in_updates(self, cost):
"""
if this policy requires initialization (i.e. vSGD), do it with these updates
"""
return []
def burn_in_updates_indexed(self, cost, index_list, slice_list):
"""
if this policy requires initialization (i.e. vSGD), do it with these updates,
used by methods that only update a subset of parameters
"""
return []
def afterburn(self):
"""
initialize parameters after the burn-in period
"""
pass
class SGD(Policy):
"""
stochastic gradient descent, non-annealing
"""
def _shared_attrs(self):
return ['learning_rate']
def _nonshared_attrs(self):
return ['param']
def updates(self, cost):
grad = T.grad(cost, self.param)
return [(self.param, self.param - self.learning_rate * grad)]
def updates_indexed(self, cost, index_list, slice_list):
indices = T.stack(*index_list)
grad = T.stack(*T.grad(cost, slice_list))
return [(self.param, T.inc_subtensor(self.param[indices],
-1 * self.learning_rate * grad))]
class VSGD(Policy):
"""
unfinished implementation of variance-based SGD, from Schaul et al ``No More Pesky Learning Rates''
not used, symbolic method of finding hessians with Theano is too slow!
better to use finite differences method?
"""
def _shared_attrs(self):
return [
'learning_rate',
'tau',
'h_avg',
'g_avg',
'v_avg',
'last_grad',
'last_grad2',
'last_rate',
'N',
]
def _nonshared_attrs(self):
return ['param', ('n0', 100), ('C', 10), ('epsilon', 1e-20), ('burned', False)]
def __repr__(self):
return pprint.pformat({
'h': self.h_avg.get_value(),
'g': self.g_avg.get_value(),
'v': self.v_avg.get_value(),
'tau': self.tau.get_value(),
'last_grad': self.last_grad.get_value(),
'last_grad2': self.last_grad2.get_value(),
'last_rate': self.last_rate.get_value(),
'N': self.N.get_value(),
})
def __init__(self, learning_rate, param, n0=100, C=10, epsilon=1e-20):
learning_rate = np.cast[theano.config.floatX](learning_rate)
param_value = param.get_value()
h_avg = np.zeros_like(param_value, dtype=theano.config.floatX)
g_avg = np.zeros_like(param_value, dtype=theano.config.floatX)
v_avg = np.zeros_like(param_value, dtype=theano.config.floatX)
last_grad = np.zeros_like(param_value, dtype=theano.config.floatX)
last_grad2 = np.zeros_like(param_value, dtype=theano.config.floatX)
last_rate = np.zeros_like(param_value, dtype=theano.config.floatX)
tau = np.zeros_like(param_value, dtype=theano.config.floatX)
self._set_attrs(
param=param,
learning_rate=learning_rate,
h_avg=h_avg,
g_avg=g_avg,
v_avg=v_avg,
last_grad=last_grad,
last_grad2=last_grad2,
last_rate=last_rate,
tau=tau,
N=0,
n0=n0,
C=C,
epsilon=epsilon,
burned=False,
)
def burn_in_updates(self, cost):
grad = T.grad(cost, self.param)
grad2 = hessian_diagonal(cost, self.param, grad=grad)
print 'burn in updates for %s' % self.param
return [
(self.g_avg, self.g_avg + grad),
(self.h_avg, self.h_avg + T.abs_(grad2)),
(self.v_avg, self.v_avg + grad**2),
(self.N, self.N + 1)
]
def afterburn(self):
if self.burned:
print 'already afterburned!'
return
else:
print 'afterburning %s' % self.param
self.burned = True
self.g_avg.set_value(self.g_avg.get_value() / self.N.get_value())
hess = self.h_avg.get_value() / self.N.get_value() * self.C
self.h_avg.set_value(np.where(hess < self.epsilon, self.epsilon, hess))
self.v_avg.set_value(self.v_avg.get_value() / self.N.get_value() * self.C)
self.tau.set_value(np.ones_like(self.tau.get_value()) * self.N.get_value())
def updates(self, cost):
grad = T.grad(cost, self.param)
grad2 = hessian_diagonal(cost, self.param, grad=grad)
# calculate memory constants
tau_rec = 1.0 / self.tau
tau_inv_rec = 1.0 - tau_rec
# new moving average of gradient
g_avg_new = tau_inv_rec * self.g_avg + tau_rec * grad
# new moving average of squared gradient
v_avg_new = tau_inv_rec * self.v_avg + tau_rec * grad**2
# new moving average of hessian diagonal
h_avg_new = tau_inv_rec * self.h_avg + tau_rec * T.abs_(grad2)
rate_unsafe = (g_avg_new ** 2) / (v_avg_new * h_avg_new)
rate = T.switch(T.isinf(rate_unsafe) | T.isnan(rate_unsafe), self.learning_rate, rate_unsafe)
tau_unsafe = (1 - (g_avg_new ** 2) / v_avg_new) * self.tau + 1
tau_new = T.switch(T.isnan(tau_unsafe) | T.isinf(tau_unsafe), self.tau, tau_unsafe)
return [(self.g_avg, g_avg_new),
(self.v_avg, v_avg_new),
(self.h_avg, h_avg_new),
(self.tau, tau_new),
(self.last_grad, grad),
(self.last_grad2, grad2),
(self.last_rate, rate),
(self.param, self.param - rate * grad)]
def updates_indexed(self, cost, index_list, slice_list):
# TODO: finish this
return self.updates(cost)
# slice_grads = T.grad(cost, slice_list)
# slice_hessians = []
# for slice_, slice_grad in zip(slice_list, slice_grads):
# slice_grad = T.grad(cost, slice_)
# slice_hessians.append(hessian_diagonal(cost, slice_, grad=slice_grad))
# grad = T.stack(*slice_grads)
# grad2 = T.stack(*slice_hessians)
# indices = T.stack(*index_list)
# # calculate memory constants
# tau_rec = 1.0 / self.tau[indices]
# tau_inv_rec = 1.0 - tau_rec
# # new moving average of gradient
# g_avg_new = tau_inv_rec * self.g_avg[indices] + tau_rec * grad
# # new moving average of squared gradient
# v_avg_new = tau_inv_rec * self.v_avg[indices] + tau_rec * grad**2
# # new moving average of hessian diagonal
# h_avg_new = tau_inv_rec * self.h_avg[indices] + tau_rec * grad2
# rate = (g_avg_new ** 2) / (v_avg_new * h_avg_new)
# tau_new = (1 - (g_avg_new ** 2) / v_avg_new) * self.tau[indices] + 1
# return [(self.g_avg, T.set_subtensor(self.g_avg[indices], g_avg_new)),
# (self.v_avg, T.set_subtensor(self.v_avg[indices], v_avg_new)),
# (self.h_avg, T.set_subtensor(self.h_avg[indices], h_avg_new)),
# (self.tau, T.set_subtensor(self.tau[indices], tau_new)),
# (self.param, T.inc_subtensor(self.param[indices], - rate * grad))]
|
[
"dfried@email.arizona.edu"
] |
dfried@email.arizona.edu
|
bf12e2ab5e0fbd7d790933df7ab2e92d03ddf273
|
23d3834e4e6bb6112ac2a83dee655e4b18715ede
|
/migrations/versions/3041931c8df7_.py
|
f8b32517053e94a3bd110bff28ad220f5330ea21
|
[] |
no_license
|
intuij/Flasky-Blog
|
fa556da8ea491f19320b0083e9cbac9170939bed
|
8e63c9780cc52f42af23b533e077f1a349c8a19f
|
refs/heads/master
| 2020-03-20T03:20:17.127425
| 2018-06-14T06:22:39
| 2018-06-14T06:22:39
| 137,142,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
"""empty message
Revision ID: 3041931c8df7
Revises: b4f39920300e
Create Date: 2018-06-12 00:22:02.183782
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3041931c8df7'
down_revision = 'b4f39920300e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=64), nullable=True),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_posts_timestamp'), 'posts', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_posts_timestamp'), table_name='posts')
op.drop_table('posts')
# ### end Alembic commands ###
|
[
"jiang.weiyi@outlook.com"
] |
jiang.weiyi@outlook.com
|
e1edf3832bd3800e899a31af38c7c139b2481ae8
|
a1811bf48ef2ba360482d8cfea42a01679ffd772
|
/SystemCode/proj1Django-master/venv/bin/easy_install-3.7
|
e9566d28621ec73ed31056edde2cc3db02ecaaf7
|
[] |
no_license
|
ocplease/IRS-PM-2020-11-01-IS02FT-GRP4-SchoolRecommender
|
529c9d3bf4ea82413e0f7da5d04ad9a9fe6a4de7
|
2ff8abb12376748f0d5da0a66ff4c77745ec0130
|
refs/heads/main
| 2023-01-04T19:01:38.286682
| 2020-11-01T03:37:03
| 2020-11-01T03:37:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
7
|
#!/Users/shuulin/PycharmProjects/ShchoolRecommender/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"linxi@yxinsur.com"
] |
linxi@yxinsur.com
|
c39b6a0de1a3c4c82a2cda09bdd8a47dac81b9dc
|
10f0ca897d117e7cda0b32a9d55351b7b8a2b8bf
|
/boppy/plugin/uptime.py
|
b41f72b195c90900160d1a3b4a1d4827e7f0a121
|
[
"MIT"
] |
permissive
|
kakakaya/boppy
|
fa9cb1711ebe37c9fec4495fed0fa59917045c52
|
f8b2bfb9aeab72f1512562e40024f87fab276880
|
refs/heads/master
| 2021-01-19T00:11:46.068879
| 2016-12-15T10:20:42
| 2016-12-15T10:20:42
| 72,923,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: kakakaya, Date: Sun Nov 6 00:47:26 2016
# from pprint import pprint as p
def resp(robot, msg):
robot.dst.respond(msg, robot.uptime())
def register(robot):
robot.listen("uptime", resp)
|
[
"kakakaya+git@gmail.com"
] |
kakakaya+git@gmail.com
|
036588999b7708789204c355ed1140500e2850fa
|
057a69bf18d9b1c36b99e092c2a88b4223fd13f7
|
/pythontestingexample/testing/test_employee.py
|
1fb512daa4fb835d29f885acd903de25f56a9db5
|
[] |
no_license
|
JK696X/PythonTestFile
|
18074266fe053bab7dd33a5f4b0fa84fe7d0f7a6
|
42c55853bea6c140ce3000c6048c59ef95a73eec
|
refs/heads/main
| 2023-04-07T17:32:33.104173
| 2021-04-16T20:58:10
| 2021-04-16T20:58:10
| 358,047,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,315
|
py
|
# Part of standard libary
import unittest
# Use this for testing absolute/relative path
import sys
import os
import requests
# Use this for importing mock with our unit testing
from unittest.mock import patch
# NOT SURE WHY THIS WORKS BUT IT DOES
absfilepath = os.path.abspath(__file__) # absolute path of the module
# print("here is the absolute file path: " + absfilepath)
filedir = os.path.dirname(os.path.abspath(__file__)) # directory of the module
# print("here is the file directory path: " + filedir)
parentdir = os.path.dirname(filedir) # directory of the module directory
# print("here is the parent directory path: " + parentdir)
newpath = os.path.join(
parentdir, "application"
) # get the directory for stringfunctions
# print("here is the newpath: " + newpath)
sys.path.append(newpath) # add path into pythonpath
from employee import Employee
class TestEmployee(unittest.TestCase):
# runs before all of testing
@classmethod
def setUpClass(cls):
# Used for resource intensive stuff
print("setupClass")
# runs before all of testing
@classmethod
def tearDownClass(cls):
# Used for resource intensive stuff
print("teardownClass")
# runs before every test case
def setUp(self):
print("setUp\n")
self.emp_1 = Employee("Corey", "Schafer", 50000)
self.emp_2 = Employee("Sue", "Smith", 60000)
pass
# runs AFTER every test case
def tearDown(self):
print("tearDown\n")
pass
def test_email(self):
print("test_email\n")
self.assertEqual(self.emp_1.email, "Corey.Schafer@email.com")
self.assertEqual(self.emp_2.email, "Sue.Smith@email.com")
self.emp_1.first = "John"
self.emp_2.first = "Jane"
self.assertEqual(self.emp_1.email, "John.Schafer@email.com")
self.assertEqual(self.emp_2.email, "Jane.Smith@email.com")
def test_fullname(self):
print("test_fullname\n")
self.assertEqual(self.emp_1.fullname, "Corey Schafer")
self.assertEqual(self.emp_2.fullname, "Sue Smith")
self.emp_1.first = "John"
self.emp_2.first = "Jane"
self.assertEqual(self.emp_1.fullname, "John Schafer")
self.assertEqual(self.emp_2.fullname, "Jane Smith")
def test_apply_raise(self):
print("test_apply_raise\n")
self.emp_1.apply_raise()
self.emp_2.apply_raise()
self.assertEqual(self.emp_1.pay, 52500)
self.assertEqual(self.emp_2.pay, 63000)
def test_monthly_schedule(self):
print("Printing test_monthly_schedule with patch")
with patch("employee.requests.get") as mocked_get:
# Successful Response
mocked_get.return_value.ok = True
mocked_get.return_value.text = "Success"
schedule = self.emp_1.monthly_schedule("May")
mocked_get.assert_called_with("http://company.com/Schafer/May")
self.assertEqual(schedule, "Success")
# Failed Response
mocked_get.return_value.ok = False
schedule = self.emp_2.monthly_schedule("June")
mocked_get.assert_called_with("http://company.com/Smith/June")
self.assertEqual(schedule, "Bad Response!")
if __name__ == "__main__":
unittest.main()
|
[
"jk696x@att.com"
] |
jk696x@att.com
|
6e265c83ace2cd7ffbec2d40e565a64a8876679c
|
f5ae92d3f3e4aeb99927416856008bc92fa3050d
|
/main.py
|
ba9c914e91770175437fa41825c7c3d0cd7f62b7
|
[] |
no_license
|
vigorweijia/MLHuaweiBigData
|
a1aef5007aed69604a357893620ae12a3c79617a
|
559e1fa01c2d1c1ca1e05a60188b9870060c2327
|
refs/heads/master
| 2022-10-30T20:46:16.990643
| 2020-06-19T04:19:48
| 2020-06-19T04:19:48
| 273,401,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,484
|
py
|
import pandas as pd
from tqdm import tqdm
import numpy as np
from sklearn.metrics import mean_squared_error,explained_variance_score
from sklearn.model_selection import KFold
from sklearn.ensemble import GradientBoostingRegressor
import lightgbm as lgb
#lightgbm简单理解就是一种很好用的决策树集成算法;
from math import radians, cos, sin, asin, sqrt
import warnings
warnings.filterwarnings('ignore')
# baseline只用到gps定位数据,即train_gps_path
EARTH_REDIUS = 6378.137
train_gps_path = 'event_port/train0523.csv'
test_data_path = 'event_port/A_testData0531.csv'
order_data_path = 'event_port/loadingOrderEvent.csv'
port_data_path = 'event_port/port.csv'
def geodistance(lng1,lat1,lng2,lat2):
#print(type(lng1),type(lng2),type(lat1),type(lat2))
#print(lng1,lng2,lat1,lat2)
lng1, lat1, lng2, lat2 = map(radians, [float(lng1), float(lat1), float(lng2), float(lat2)]) # 经纬度转换成弧度
dlon=lng2-lng1
dlat=lat2-lat1
a=sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
distance=2*asin(sqrt(a))*6371*1000 # 地球平均半径,6371km
distance=round(distance/1000,3)
return distance
def get_data(data, mode='train'):
assert mode == 'train' or mode == 'test'
#类型转换
if mode == 'train':
data['vesselNextportETA'] = pd.to_datetime(data['vesselNextportETA'],infer_datetime_format=True)
elif mode == 'test':
data['temp_timestamp'] = data['timestamp']
data['onboardDate'] = pd.to_datetime(data['onboardDate'], infer_datetime_format=True)
data['timestamp'] = pd.to_datetime(data['timestamp'], infer_datetime_format=True)
data['longitude'] = data['longitude'].astype(float)
data['loadingOrder'] = data['loadingOrder'].astype(str)
data['latitude'] = data['latitude'].astype(float)
data['speed'] = data['speed'].astype(float)
data['direction'] = data['direction'].astype(float)
return data
def mean_skip_zero(arr):
number=0
mysum=0
for i,v in arr.iteritems():
if v!=0:
number+=1
mysum+=v
if number==0:
return 0
else:
return mysum/number
def MY_MSE_skip_zero(arr):
number = 0
mysum = 0
for i, v in arr.iteritems():
if v != 0:
number += 1
mysum += v
if number == 0:
average=0
else:
average=mysum / number
res=0
for i,v in arr.iteritems():
if v!=0:
res+=np.square(average-v)
if number==0:
return 0
else:
return res/number
def get_time(arr):
return (arr.max()-arr.min()).total_seconds()
#下面这些表示该函数返回后的结果中的列名,其中的loadingOrder,label,count不是训练特征,其他的都是特征
#loadingOrder,distance,mean_speed,speed_mse,mean_speed_skip0,speed_mse_skip0
#anchor_0_6,anchor_7_15,label(时间、label),count,anchor_ratio_0_6,anchor_ratio_7_15,
def get_feature_train(df):
df.sort_values(['loadingOrder', 'timestamp'], inplace=True)
#首先按照订单号进行排序,然后按照时间进行排序
df['lat_diff']=df.groupby('loadingOrder')['latitude'].diff(1)#计算相邻两个时间点上的经纬度差
df['lon_diff']=df.groupby('loadingOrder')['longitude'].diff(1)
df['point_to_point']=df.apply(lambda x:geodistance(x['latitude'],x['longitude'],x['latitude']-
x['lat_diff'],x['longitude']-x['lon_diff']) if True else 0,axis=1) #计算当前这一点与上一点之间的距离
dis=df.groupby('loadingOrder')['point_to_point'].agg('sum').reset_index()#dis表示每一个订单中对应的总距离
dis.columns=['loadingOrder','distance']
mean_speed=df.groupby('loadingOrder')['speed'].agg(['mean','var',mean_skip_zero,MY_MSE_skip_zero]).reset_index()#求出速度,速度的方差,有0,无0的
mean_speed.columns=['loadingOrder','mean_speed','speed_mse','mean_speed_skip0','speed_mse_skip0']
df['anchor_0_6']=df.apply(lambda x: 1 if x['speed']<=6 else 0,axis=1)#抛锚次数
df['anchor_7_15']=df.apply(lambda x:1 if x['speed']>6 and x['speed']<=15 else 0,axis=1)
res_df=df.groupby('loadingOrder').agg({'anchor_0_6':['sum'],'anchor_7_15':['sum']}).reset_index()
res_df.columns=['loadingOrder','anchor_0_6','anchor_7_15']#hhhhhhhhhhhh
a=df.groupby('loadingOrder')['timestamp'].agg(['count',get_time]).reset_index()
a.columns=('loadingOrder','count','label')
res_df=res_df.merge(a,on='loadingOrder')
#res_df['label']=df.groupby('loadingOrder')['timestamp'].agg(get_time).reset_index()#时间
res_df['anchor_ratio_0_6']=res_df['anchor_0_6']/res_df['count']
res_df['anchor_ratio_7_15']=res_df['anchor_7_15']/res_df['count']
res_df=res_df.merge(dis,on='loadingOrder')
res_df=res_df.merge(mean_speed,on='loadingOrder')
first_df = df.sort_values('timestamp').groupby('loadingOrder', as_index=False).first() # 找出最近的时间戳
first_df = first_df[['loadingOrder', 'longitude', 'latitude']]
first_df.columns = ['loadingOrder', 'first_longitude', 'first_latitude']
last_df = df.sort_values('timestamp', ascending=False).groupby('loadingOrder', as_index=False).first()
last_df = last_df[['loadingOrder', 'longitude', 'latitude']]
last_df.columns = ['loadingOrder', 'last_longitude', 'last_latitude']
first_df = first_df.merge(last_df, on='loadingOrder') # 存储的是第一个经纬度和最后一个经纬度
res_df = res_df.merge(first_df, on='loadingOrder')
res_df.reset_index(drop=True)
#应该把count这一列删去?,count是GPS的检测次数
return res_df
#loadingOrder,distance,mean_speed,speed_mse,mean_speed_skip0,speed_mse_skip0
#anchor_0_6,anchor_7_15,label(时间、label),count,anchor_ratio_0_6,anchor_ratio_7_15,
def get_feature_test(df,port_data_path):
df.sort_values(['loadingOrder', 'timestamp'], inplace=True)
# 首先按照订单号进行排序,然后按照时间进行排序
df['lat_diff'] = df.groupby('loadingOrder')['latitude'].diff(1) # 计算相邻两个时间点上的经纬度差
df['lon_diff'] = df.groupby('loadingOrder')['longitude'].diff(1)
df['point_to_point'] = df.apply(lambda x: geodistance(x['latitude'], x['longitude'], x['latitude'] -
x['lat_diff'], x['longitude'] - x['lon_diff']) if True else 0,axis=1) # 计算当前这一点与上一点之间的距离
dis = df.groupby('loadingOrder')['point_to_point'].agg('sum').reset_index() # dis表示每一个订单中对应的总距离
dis.columns=['loadingOrder','previous_dis']
#接下来计算后半段的距离
back_dis=df.sort_values('timestamp',ascending=False).groupby('loadingOrder',as_index=False).first()#找出最远的那个时间戳
back_dis['dest']=back_dis.apply(lambda x:x['TRANSPORT_TRACE'][x['TRANSPORT_TRACE'].rfind('-')+1:] if True else '',axis=1)#提取出终点港口
ports=pd.read_csv(port_data_path)#读取港口文件
#print(ports.columns)
m=0
for index,row in ports.iterrows():
#print(index,row)
m+=1
if m>=600:
break
ports['LONGITUDE']=ports['LONGITUDE'].astype(float)
ports['LATITUDE']=ports['LATITUDE'].astype(float)
dict_ports={}#存到一个字典里
for index,row in ports.iterrows():
dict_ports[row['TRANS_NODE_NAME']]=(row['LONGITUDE'],row['LATITUDE'])#港口名是key,经纬度是value
#已经获得了终点港口的经纬度,接下来可以计算距离
back_dis['dest_lon']=back_dis.apply(lambda x:dict_ports[x['dest']][0],axis=1)
back_dis['dest_lat']=back_dis.apply(lambda x:dict_ports[x['dest']][1],axis=1)
back_dis['back_dis']=back_dis.apply(lambda x:geodistance(x['longitude'],x['latitude'],x['dest_lon'],x['dest_lat']) if True else 0,axis=1)
temp=back_dis[['loadingOrder','back_dis']]
dis=dis.merge(temp,on='loadingOrder')
dis['distance']=dis['back_dis']+dis['previous_dis']
#dis['distance']=dis.apply(lambda x:dis['back_dis']+dis['previous_dis'] if True else 0,axis=1)#dis中的列名有loadingOrder,previous_dis,back_dis,distance
dis=dis[['loadingOrder','distance']]
mean_speed = df.groupby('loadingOrder')['speed'].agg(
['mean', 'var', mean_skip_zero, MY_MSE_skip_zero]).reset_index() # 求出速度,速度的方差,有0,无0的
mean_speed.columns = ['loadingOrder', 'mean_speed', 'speed_mse', 'mean_speed_skip0', 'speed_mse_skip0']
df['anchor_0_6'] = df.apply(lambda x: 1 if x['speed'] <= 6 else 0, axis=1) # 抛锚次数
df['anchor_7_15'] = df.apply(lambda x: 1 if x['speed'] > 6 and x['speed'] <= 15 else 0, axis=1)
res_df = df.groupby('loadingOrder').agg({'anchor_0_6': ['sum'], 'anchor_7_15': ['sum']}).reset_index()
res_df.columns = ['loadingOrder', 'anchor_0_6', 'anchor_7_15'] # hhhhhhhhhhhh
a = df.groupby('loadingOrder')['timestamp'].agg(['count', get_time]).reset_index()
a.columns = ('loadingOrder', 'count', 'label')
res_df = res_df.merge(a, on='loadingOrder')
# res_df['label']=df.groupby('loadingOrder')['timestamp'].agg(get_time).reset_index()#时间
res_df['anchor_ratio_0_6'] = res_df['anchor_0_6'] / res_df['count']
res_df['anchor_ratio_7_15'] = res_df['anchor_7_15'] / res_df['count']
res_df = res_df.merge(dis, on='loadingOrder')
res_df = res_df.merge(mean_speed, on='loadingOrder')
res_df.reset_index(drop=True)
# 应该把count这一列删去?,count是GPS的检测次数
return res_df
def mse_score_eval(preds, valid):
labels = valid.get_label()
scores = mean_squared_error(y_true=labels, y_pred=preds)
return 'mse_score', scores, True
def build_model(train, test, pred, label, seed=1080, is_shuffle=True):
train_pred = np.zeros((train.shape[0],))
test_pred = np.zeros((test.shape[0],))
n_splits = 10
# Kfold
fold = KFold(n_splits=n_splits, shuffle=is_shuffle, random_state=seed)
kf_way = fold.split(train[pred])
# params
params = {
'learning_rate': 0.01,
'boosting_type': 'gbdt',
'objective': 'regression',
'num_leaves': 36,
'feature_fraction': 0.6,
'bagging_fraction': 0.7,
'bagging_freq': 6,
'seed': 8,
'bagging_seed': 1,
'feature_fraction_seed': 7,
'min_data_in_leaf': 20,
'nthread': 8,
'verbose': 1,
}
# train
for n_fold, (train_idx, valid_idx) in enumerate(kf_way, start=1):
train_x, train_y = train[pred].iloc[train_idx], train[label].iloc[train_idx]
valid_x, valid_y = train[pred].iloc[valid_idx], train[label].iloc[valid_idx]
# 数据加载
n_train = lgb.Dataset(train_x, label=train_y)
n_valid = lgb.Dataset(valid_x, label=valid_y)
clf = lgb.train(
params=params,
train_set=n_train,
num_boost_round=3000,
valid_sets=[n_valid],
early_stopping_rounds=100,
verbose_eval=100,
feval=mse_score_eval
)
train_pred[valid_idx] = clf.predict(valid_x, num_iteration=clf.best_iteration)
test_pred += clf.predict(test[pred], num_iteration=clf.best_iteration) / fold.n_splits
test['label'] = test_pred
return test[['loadingOrder', 'label']]
def main():
#NROWS = 20000000
train_data = pd.read_csv('del_not_suit_dest_data_2.csv',header=None)
#del train_data[0]
train_data.columns = ['loadingOrder', 'carrierName', 'timestamp', 'longitude',
'latitude', 'vesselMMSI', 'speed', 'direction', 'vesselNextport',
'vesselNextportETA', 'vesselStatus', 'vesselDatasource', 'TRANSPORT_TRACE']
train_data.drop([0],inplace=True)
test_data = pd.read_csv(test_data_path)
#print(test_data.columns)
train_data = get_data(train_data, mode='train')
test_data = get_data(test_data, mode='test')
print('get data done')
train = get_feature_train(train_data)
test = get_feature_test(test_data, port_data_path)
#print(train.columns)
#print(test.columns)
features = [c for c in train.columns if c not in ['count', 'label', 'loadingOrder',
'anchor_7_15', 'anchor_ratio_7_15', 'mean_speed', 'speed_mse',
'first_longitude', 'first_latitude', 'last_longitude', 'last_latitude']]
#train.to_csv('train.csv')
#test.to_csv('test.csv')
print('FEATURES:'+str(features))
train['anchor_0_6'] = train['anchor_0_6'].astype(float)
train['anchor_7_15'] = train['anchor_7_15'].astype(float)
train['anchor_ratio_0_6'] = train['anchor_ratio_0_6'].astype(float)
train['anchor_ratio_7_15'] = train['anchor_ratio_7_15'].astype(float)
train['distance'] = train['distance'].astype(float)
train['mean_speed'] = train['mean_speed'].astype(float)
train['speed_mse'] = train['speed_mse'].astype(float)
train['mean_speed_skip0'] = train['mean_speed_skip0'].astype(float)
train['speed_mse_skip0'] = train['speed_mse_skip0'].astype(float)
#train['first_longitude'] = train['first_longitude'].astype(float)
#train['first_latitude'] = train['first_latitude'].astype(float)
#train['last_longitude'] = train['last_longitude'].astype(float)
#train['last_latitude'] = train['last_latitude'].astype(float)
test['anchor_0_6'] = test['anchor_0_6'].astype(float)
test['anchor_7_15'] = test['anchor_7_15'].astype(float)
# train['count'] = train['count'].astype(float)
# train['label'] = train['label'].astype(float)
test['anchor_ratio_0_6'] = test['anchor_ratio_0_6'].astype(float)
test['anchor_ratio_7_15'] = test['anchor_ratio_7_15'].astype(float)
test['distance'] = test['distance'].astype(float)
test['mean_speed'] = test['mean_speed'].astype(float)
test['speed_mse'] = test['speed_mse'].astype(float)
test['mean_speed_skip0'] = test['mean_speed_skip0'].astype(float)
test['speed_mse_skip0'] = test['speed_mse_skip0'].astype(float)
#test['first_longitude'] = test['first_longitude'].astype(float)
#test['first_latitude'] = test['first_latitude'].astype(float)
#test['last_longitude'] = test['last_longitude'].astype(float)
#test['last_latitude'] = test['last_latitude'].astype(float)
result = build_model(train, test, features, 'label', is_shuffle=True)
result.to_csv('result-061622.csv')
#构建并训练模型,result就是预测出的消耗的时间,再加上起始时间就是ETA;
test_data = test_data.merge(result, on='loadingOrder', how='left')
test_data['ETA'] = (test_data['onboardDate'] + test_data['label'].apply(lambda x:pd.Timedelta(seconds=x))).apply(lambda x:x.strftime('%Y/%m/%d %H:%M:%S'))
test_data.drop(['direction', 'TRANSPORT_TRACE'], axis=1, inplace=True)
test_data['onboardDate'] = test_data['onboardDate'].apply(lambda x:x.strftime('%Y/%m/%d %H:%M:%S'))
test_data['creatDate'] = pd.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
test_data['timestamp'] = test_data['temp_timestamp']
# 整理columns顺序
result = test_data[['loadingOrder', 'timestamp', 'longitude', 'latitude', 'carrierName', 'vesselMMSI', 'onboardDate', 'ETA', 'creatDate']]
result.to_csv('testout-061622.csv',index=False)
if __name__ == '__main__':
main()
|
[
"vigorwei@tencent.com"
] |
vigorwei@tencent.com
|
78a84cfe9643483d2d8675e2fdb4694a62bf1674
|
6ffb674d9575e265afd0e5424bb69f1ad7e985e1
|
/api-test/app.py
|
e76159e14b93f4bc293b40fb4b330f1cf3b30972
|
[] |
no_license
|
n3ih7/Forest-Wildland-Fire-Prediction
|
6c511945779d324c2f7f93aee8373dc9d391c9e3
|
3cd9480372a492b6315059dddad1c9ed7f3d073f
|
refs/heads/master
| 2023-01-25T00:39:12.207796
| 2020-11-20T13:13:33
| 2020-11-20T13:13:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
#!flask/bin/python
from flask import Flask, jsonify
app = Flask(__name__)
tasks = [
{
'id': 1,
'title': u'Buy groceries',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
@app.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks():
return jsonify({'tasks': tasks})
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0', port=8090)
|
[
"nono@Nonos-Mac-mini.local"
] |
nono@Nonos-Mac-mini.local
|
f8d41521db5df36c79739acad35a1175915ce5b4
|
21a9947d9060c89d369038c376b7fe75b890a81b
|
/download.py
|
4be9f919bf4f7f8a67fc0678cf44305ef331b24d
|
[] |
no_license
|
Tanishka1997/Learning_PY
|
6705eaed2d6da3bbc37f4e3d3acc33dc5310e332
|
b9b0de28d1e0fd44b4774c20d3c7c419025c6b04
|
refs/heads/master
| 2021-01-13T05:34:45.073251
| 2017-01-29T07:06:24
| 2017-01-29T07:06:24
| 79,948,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
import requests
res = requests.get('https://automatetheboringstuff.com/files/rj.txt')
res.raise_for_status
playfile=open("Romeo and Juliet","wb")
for dt in res.iter_content(100000):
playfile.write(dt)
playfile.close()
|
[
"u15co059@coed.svnit.ac.in"
] |
u15co059@coed.svnit.ac.in
|
d0851d1752901a65deaef8878707401154b2f416
|
816232db2f21e193612eaa60eda0d5897d31caaf
|
/Baek/Hyun/1697.py
|
89117cb190b8f1eca099d774d0f485dc3bb5878c
|
[] |
no_license
|
Juyoung4/StudyAlgorithm
|
a60bfa7657eac57f59200bfa204aff1ad27c79f8
|
4b190e0bfeb268bef4be00ae9bedd9ca8946fbd6
|
refs/heads/master
| 2023-08-31T04:37:07.422641
| 2021-09-27T08:38:09
| 2021-09-27T08:38:09
| 282,757,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
# 숨바꼭질
from collections import deque
if __name__ == "__main__":
N, K = map(int, input().split())
queue = deque()
queue.append([N, 0])
visitied = [-1]*100001
while queue:
cur, time = queue.popleft()
if cur == K:
print(time)
break
next_time = time+1
# x-1
if cur-1 >= 0 and (visitied[cur-1] == -1 or visitied[cur-1] > next_time):
queue.append([cur-1, next_time])
visitied[cur-1] = next_time
# x+1
if cur+1 <= 100000 and (visitied[cur+1] == -1 or visitied[cur+1] > next_time):
queue.append([cur+1, next_time])
visitied[cur+1] = next_time
# x*2
if 0 < cur*2 <= 100000 and (visitied[cur*2] == -1 or visitied[cur*2] > next_time):
queue.append([cur*2, next_time])
visitied[cur*2] = next_time
|
[
"47167335+Juyoung4@users.noreply.github.com"
] |
47167335+Juyoung4@users.noreply.github.com
|
8f24bbdaa6aeaab0528c1a32ec5b61952bee8c16
|
fe7e2886e53b70e147474010f892113bfc538d21
|
/CMS/Admin/cmsem.py
|
85bfbed2a84ff4ca07213a03635d735c3caf24a8
|
[] |
no_license
|
BUAA007/CMS
|
cc1af165911417b860d4719606684b8363349dd7
|
ea205cfa04382ba3678dacea8070a416a930be52
|
refs/heads/master
| 2020-03-21T19:37:36.544482
| 2018-07-08T06:04:30
| 2018-07-08T06:04:30
| 138,960,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
import smtplib
from email.mime.text import MIMEText
mailto_list=['572260394@qq.com'] #收件人(列表)
mail_host="smtp.163.com" #使用的邮箱的smtp服务器地址,这里是163的smtp地址
mail_user="h529987" #用户名
mail_pass="15211009hlz" #密码
mail_postfix="163.com" #邮箱的后缀,网易就是163.com
def send_mail(to_list,sub,content):
me="CMS团队"+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEText(content,_subtype='plain')
msg['Subject'] = sub
msg['From'] = me
for to in to_list:
msg['To'] = to #将收件人列表以‘;’分隔
try:
server = smtplib.SMTP()
server.connect(mail_host) #连接服务器
server.login(mail_user,mail_pass) #登录操作
server.sendmail(me, to, msg.as_string())
server.close()
except:
print("email error")
'''
for i in range(1): #发送1封,上面的列表是几个人,这个就填几
if send_mail(mailto_list,"电话","电话是lalal"): #邮件主题和邮件内容
#这是最好写点中文,如果随便写,可能会被网易当做垃圾邮件退信
print ("done!")
else:
print ("failed!")
'''
|
[
"572260394@qq.com"
] |
572260394@qq.com
|
77392a7107db2ca4ec67311e5605b226d4ad64b0
|
761d331e9ea19f5b3735be46285751beda8033ee
|
/results/store.py
|
aede1f87e9d835e970bbd7b4e4bb7d184f792882
|
[] |
no_license
|
zhonggx80/python-iperf
|
22d45f5924ea4b36e8de2dad2eb0855c63812485
|
49001ba0ac5dc522d112fdd0b02e0b0d07aea705
|
refs/heads/master
| 2022-04-09T07:37:17.739442
| 2015-04-10T20:44:11
| 2015-04-10T20:44:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
from Parser import *
import os, sys
import csv
if len(sys.argv) != 3:
print "usage: python store.py <source.json> <target.csv>"
sys.exit()
inFile = sys.argv[1]
outFile = sys.argv[2]
data = []
legend = ['Bits Per Second', 'Bytes', 'Interval 1', 'Interval 2', 'Interval 3', 'Interval 4', 'Interval 5', 'Interval 6']
par = Parser()
par.extract(inFile)
data.append([par.bps, par.bytes, par.intervalBPS[0], par.intervalBPS[1], par.intervalBPS[2], par.intervalBPS[3], par.intervalBPS[4], par.intervalBPS[5]], )
# test if file already exists before we open it
exists = os.path.isfile(outFile)
with open(outFile, 'a') as file:
writer = csv.writer(file, delimiter=',')
if not exists:
writer.writerows([legend])
writer.writerows(data)
|
[
"adam.r.drescher@gmail.com"
] |
adam.r.drescher@gmail.com
|
4361aff03bd68cad0b825bfa8375bdfe5f9ed063
|
0faac0c32c47715b7b6c3c96264a0cc4cbe9bfca
|
/Hello/test/Step04_list.py
|
710244fe5ed64a7a350d1eb0ff02ee77a7c96331
|
[] |
no_license
|
hyunhee7/python_work
|
339a61c2361a8b37bd7c2c04b7ae5b92c63fb5b2
|
31adb3105fcad6254327f3c383d905b7592cd4af
|
refs/heads/master
| 2021-01-15T13:29:25.231833
| 2017-08-22T09:38:11
| 2017-08-22T09:38:11
| 99,674,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,837
|
py
|
#-*- coding: utf-8 -*-
'''
- list type
1. 순서가 있다
2. 여러 type 의 데이터를 저장할 수 있다.
3. 값 변경 가능
'''
a=[1,2,3]
b=[10, True, "abcd"]
c=[10, 20, 30]
d=a
# 같은 참조값 확인!
print "a id:", id(a)
print "d id:", id(d)
print "a[0] :", a[0]
print "a[1] :", a[1]
print "a[2] :", a[2]
family=[u"엄마",u"아빠",u"나"]
print u"가족 구성원 목록:", family[0], family[1], family[2]
print u"가족 구성원 수 :", len(family)
# list type 에 데이터 추가
family.append(u"남동생")
family.append(u"여동생")
print "추가된 구성원 :", family[3], family[4]
print "가족 구성원 수:", len(family)
# 값에 의한 삭제
family.remove(u"남동생")
# 인덱스에 의한 삭제
del family[0]
print u"삭제후 구성원 목록:", family[0], family[1], family[2]
print u"삭제후 구성원 수:", len(family)
# 빈 list type 객체 만들고
numbers=[]
# 데이터 추가
numbers.append(10)
numbers.append(40)
numbers.append(50)
numbers.append(20)
numbers.append(30)
# list 의 구조 확인
print numbers
# 오름 차순 정렬
numbers.sort()
print u"오름 차순 정렬 후 numbers:", numbers
# 내림 차순 정렬
numbers.sort(reverse=True) # keyword argument 전달
print u"내림차순 정렬 후 numbers:", numbers
# slice 연습
numbers2=[1,2,3,4,5,6,7,8,9,10]
print "hie" ,numbers2[-3:-1]
print numbers2[0:2]
print numbers2
print numbers2[3:5]
print numbers2[-5:-1]
print range(10)
print range(20)
a = range(5)
print u"a type:", type(a)
# list 에 들어 있는 값을 하나씩 순서대로 참조해서
for i in range(10):
# 출력해보기
print i
print "---------------"
friends=["cat","dog","elephant","snake","frog"]
for item in friends:
print item
print u"Step04_list 모듈의 실행순서가 종료 됩니다"
|
[
"hyunhi7@naver.com"
] |
hyunhi7@naver.com
|
d8c4f1108dd0b65bc65e560aa0b611fe37d062a0
|
04e7d6915c5a607d4100730f6b55e7feb658d4ab
|
/src/tests/test_k_primes.py
|
5c4823a1f9a306d7eb1c3ac2e5ec2f525fef38a4
|
[
"MIT"
] |
permissive
|
AveryPratt/code-katas
|
c41edd4c159c66cb1bc782b75df1d2a80c76450f
|
ffd05517b8e825e0741c8cbc7e0b2c5b2a867b01
|
refs/heads/master
| 2021-01-10T18:07:04.006827
| 2017-03-08T07:47:53
| 2017-03-08T07:47:53
| 76,058,436
| 0
| 0
| null | 2017-03-08T07:47:54
| 2016-12-09T18:00:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
"""Tests for k-primes kata."""
import pytest
PRIMES = [
[2, 3],
[3, 5],
[5, 7],
[7, 11],
[11, 13]
]
TESTS = [
[2, 0, 100, [4, 6, 9, 10, 14, 15, 21, 22, 25, 26, 33, 34, 35, 38, 39, 46, 49, 51, 55, 57, 58, 62, 65, 69, 74, 77, 82, 85, 86, 87, 91, 93, 94, 95]],
[3, 0, 100, [8, 12, 18, 20, 27, 28, 30, 42, 44, 45, 50, 52, 63, 66, 68, 70, 75, 76, 78, 92, 98, 99]],
[5, 1000, 1100, [1020, 1026, 1032, 1044, 1050, 1053, 1064, 1072, 1092, 1100]],
[5, 500, 600, [500, 520, 552, 567, 588, 592, 594]],
]
PUZZLES = [
[0, 0],
[138, 1],
[139, 1],
[140, 0],
[143, 2],
[1000, 244]
]
@pytest.mark.parametrize("prime, nxt", PRIMES)
def test_nxt_prime(prime, nxt):
"""Checks that nxt prime of a number returns the nxt lowest value prime."""
from k_primes import nxt_prime
assert nxt_prime(prime) == nxt
@pytest.mark.parametrize("k, start, end, result", TESTS)
def test_k_primes(k, start, end, result):
"""Checks lists of all k-primes in a range of start to end."""
from k_primes import count_Kprimes
assert count_Kprimes(k, start, end) == result
@pytest.mark.parametrize("target, result", PUZZLES)
def test_puzzles(target, result):
"""Checks that puzzle returns correct number of solutions"""
from k_primes import puzzle
assert puzzle(target) == result
|
[
"apratt91@gmail.com"
] |
apratt91@gmail.com
|
7e618e38824b5016cc3f2a51fcaa867cf87c2493
|
a6f7b9c9cdfbc44af3c1c332abc94450cbd0e61b
|
/binpack/apps.py
|
cfdc666405675fd847c0dec63cbed9680510946a
|
[] |
no_license
|
igorpejic/visapi
|
fe2e03a22d2e55e9fe7b31a2b21f098a83743c4d
|
29564eb69efb691f7c27e45a4265dc803efcac8b
|
refs/heads/master
| 2022-12-11T09:21:21.459796
| 2020-12-18T05:48:57
| 2020-12-18T05:48:57
| 224,496,779
| 9
| 1
| null | 2022-12-08T04:22:42
| 2019-11-27T18:51:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 89
|
py
|
from django.apps import AppConfig
class BinpackConfig(AppConfig):
name = 'binpack'
|
[
"igor.pejic@dr.com"
] |
igor.pejic@dr.com
|
1af7478a5ccc39c7e8958468814792161a1bd6df
|
70c3cf5f0c58b0074b33f653500604b5f4f7e198
|
/rm_scraping/scrape.py
|
4de5cfa3e045ca42c9b60e3faf2c82cac2d44c8e
|
[] |
no_license
|
colinmorris/wiki-controversial-titles
|
659a7264c7fe652b696e20414acbd74a4cb1b3f6
|
b089c08655527e10624ecd912a0058fd1f150778
|
refs/heads/master
| 2020-06-03T05:04:07.017575
| 2019-07-27T22:32:34
| 2019-07-27T22:32:34
| 191,450,697
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,391
|
py
|
import csv
import os
import mwclient
import argparse
import pandas as pd
from RM import RM
from constants import *
FLUSH_EVERY = 50
LIMIT = 0
NEXT_ID = 0
def scrape_rms_for_title(title, f_fail, debug=0):
global NEXT_ID
pg = wiki.pages[title]
section_ix = 1
while 1:
try:
section = pg.text(section=section_ix)
except KeyError:
break
if RM.section_is_rm(section):
try:
yield RM(section, title, debug=debug, id=NEXT_ID)
except Exception as e:
row = '{}\t{}\n'.format(title, section_ix)
f_fail.write(row)
print('Exception:', e)
else:
NEXT_ID += 1
section_ix += 1
def flush_rms(rms, rm_w, votes_w, pols_w):
rm_w.writerows(rm.row for rm in rms)
vote_rows = []
pol_rows = []
for rm in rms:
for vote in rm.votes:
vote['rm_id'] = rm.id
vote_rows.extend(rm.votes)
for user, counts in rm.user_to_policies.items():
for pol, n in counts.items():
row = dict(user=user, pol=pol, n=n, rm_id=rm.id)
pol_rows.append(row)
votes_w.writerows(vote_rows)
pols_w.writerows(pol_rows)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clobber', action='store_true', help='Overwrite existing csv files')
parser.add_argument('-r', '--title-re',
help='Regex to add as an intitle filter to search query')
parser.add_argument('--invert-titlematch', action='store_true',
help='Invert the intitle filter')
args = parser.parse_args()
if args.clobber:
fresh = True
else:
try:
st = os.stat('rms.csv')
except FileNotFoundError:
fresh = True
else:
fresh = st.st_size == 0
extant_pages = set()
if not fresh:
df = pd.read_csv('rms.csv')
NEXT_ID = df['id'].max() + 1
print("Found existing files. Appending. Ids starting at {}".format(NEXT_ID))
extant_pages = set(df['talkpage'].values)
oflag = 'w' if fresh else 'a'
frm = open('rms.csv', oflag)
fvotes = open('votes.csv', oflag)
fpols = open('pols.csv', oflag)
out_rm = csv.DictWriter(frm, RM.COLS)
out_votes = csv.DictWriter(fvotes, RM.VOTE_COLS)
out_pols = csv.DictWriter(fpols, RM.POL_COLS)
writers = [out_rm, out_votes, out_pols]
if fresh:
for wr in writers:
wr.writeheader()
wiki = mwclient.Site(('https', 'en.wikipedia.org'))
query = 'insource:/"{}"/'.format(RMTOP)
if args.title_re:
query += ' {}intitle:/{}/'.format(
('-' if args.invert_titlematch else ''),
args.title_re
)
results = wiki.search(query, namespace=1)
rms = []
failures = []
f_fail = open('failures.tsv', oflag)
i_pg = 0
i_rm = 0
skipped = 0
for result in results:
# Don't rescrape pages we've already done.
if result['title'] in extant_pages:
skipped += 1
continue
for rm in scrape_rms_for_title(result['title'], f_fail):
rms.append(rm)
i_rm += 1
if len(rms) >= FLUSH_EVERY:
flush_rms(rms, out_rm, out_votes, out_pols)
rms = []
if LIMIT and i_rm >= LIMIT:
print("Reached limit. rms={}. Stopping".format(i_rm))
break
i_pg += 1
if i_pg % 100 == 0:
print("i_pg = {}; skipped = {}".format(i_pg, skipped))
if rms:
flush_rms(rms, out_rm, out_votes, out_pols)
for f in [frm, fvotes, fpols, f_fail]:
f.close()
print("Skipped {} pages".format(skipped))
|
[
"colin.morris2@gmail.com"
] |
colin.morris2@gmail.com
|
7cae65bd64fbcb449d3d6c319aa30a205b8fe418
|
681943710088a2b3bae34ae5cf1ad56ab5d1e6d8
|
/Python/Tests/MatchGetter.py
|
23f6bc1f6b223bc7d141902f2767c5bf060525ee
|
[] |
no_license
|
lumoura/DotaTwoHighFive
|
dfe770caef53b371af5211d68fbd0af68d6ccc4a
|
9316c6eba746ad6f20b086d81a3f8ff3aba3cd74
|
refs/heads/master
| 2022-04-01T09:46:07.841526
| 2020-01-18T01:41:18
| 2020-01-18T01:41:18
| 58,897,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,297
|
py
|
import dota2api
import numpy as np
from enumerator import enumerate
# Initialise the API module
api = dota2api.Initialise("2179FDF11489B59187A1AD12B4B1E2C9", raw_mode=True)
total_matches = int(open("total.txt", "r").readline())
how_much_more_str = input()
if how_much_more_str == "":
wanted_matches = total_matches + 500
else:
wanted_matches = total_matches + int(how_much_more_str)
print("Wanted matches: " + str(wanted_matches))
# How many heroes?
T = 114
# How numbers are going to be stored?
dt = np.dtype(np.int32)
dtsize = dt.itemsize
while (total_matches < wanted_matches):
# Read the last parsed match number
oldnum = int(open("newnum.txt", "r").readline())
# Get new matchlist beginning at last parsed match number
match_list = api.get_match_history_by_seq_num(oldnum)
newnum = oldnum
matches = match_list.get("matches")
if (len(matches) > 10):
for match in matches:
valid = True
match_seq_num = match.get("match_seq_num")
# If this match is older than the oldest parsed match, ignore it
if(match_seq_num <= oldnum):
valid = False
continue
# Check if it is a valid lobby type
lobby_type = match.get("lobby_type")
if (lobby_type == 0) or (lobby_type == 7):
for player in match.get("players"):
leaver_status = player.get("leaver_status")
# Check for players' leaver status
if (leaver_status != 0) and (leaver_status != 1):
valid = False
break
else:
valid = False
if valid:
if (match_seq_num > newnum):
newnum = match_seq_num
radiant_ids = np.array([], dtype=dt)
dire_ids = np.array([], dtype=dt)
for player in match.get("players"):
if (player.get("player_slot") < 128):
radiant_ids = np.append(radiant_ids, int(player.get("hero_id")))
else:
dire_ids = np.append(dire_ids, int(player.get("hero_id")))
radiant_ids = np.sort(radiant_ids)
dire_ids = np.sort(dire_ids)
radiant_win = match.get("radiant_win")
# Single Heroes:
with open("1.wr", "rb+") as wrfile:
for id1 in range(0, 5):
position = enumerate(h5=radiant_ids[id1], T=T)
if radiant_win:
wrfile.seek(position*2*dtsize)
wins = np.fromfile(wrfile, dtype=dt, count=1)
wins += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(wins).tobytes())
wrfile.seek((position*2*dtsize)+dtsize)
total = np.fromfile(wrfile, dtype=dt, count=1)
total +=1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(total).tobytes())
for id1 in range(0, 5):
position = enumerate(h5=dire_ids[id1], T=T)
if not radiant_win:
wrfile.seek(position*2*dtsize)
wins = np.fromfile(wrfile, dtype=dt, count=1)
wins += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(wins).tobytes())
wrfile.seek((position*2*dtsize)+dtsize)
total = np.fromfile(wrfile, dtype=dt, count=1)
total +=1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(total).tobytes())
# Dual Heroes
with open("2.wr", "rb+") as wrfile:
for id1 in range(0, 4):
for id2 in range(id1+1, 5):
position = enumerate(h4=radiant_ids[id1], h5=radiant_ids[id2], T=T)
if radiant_win:
wrfile.seek(position*2*dtsize)
wins = np.fromfile(wrfile, dtype=dt, count=1)
wins += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(wins).tobytes())
wrfile.seek((position*2*dtsize)+dtsize)
total = np.fromfile(wrfile, dtype=dt, count=1)
total += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(total).tobytes())
for id1 in range(0, 4):
for id2 in range(id1+1, 5):
position = enumerate(h4=dire_ids[id1], h5=dire_ids[id2], T=T)
if not radiant_win:
wrfile.seek(position*2*dtsize)
wins = np.fromfile(wrfile, dtype=dt, count=1)
wins += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(wins).tobytes())
wrfile.seek((position*2*dtsize)+dtsize)
total = np.fromfile(wrfile, dtype=dt, count=1)
total += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(total).tobytes())
# Tri Heroes
with open("3.wr", "rb+") as wrfile:
for id1 in range(0, 3):
for id2 in range(id1+1, 4):
for id3 in range(id2+1, 5):
position = enumerate(h3=radiant_ids[id1], h4=radiant_ids[id2], h5=radiant_ids[id3], T=T)
if radiant_win:
wrfile.seek(position*2*dtsize)
wins = np.fromfile(wrfile, dtype=dt, count=1)
wins += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(wins).tobytes())
wrfile.seek((position*2*dtsize)+dtsize)
total = np.fromfile(wrfile, dtype=dt, count=1)
total += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(total).tobytes())
for id1 in range(0, 3):
for id2 in range(id1+1, 4):
for id3 in range(id2+1, 5):
position = enumerate(h3=dire_ids[id1], h4=dire_ids[id2], h5=dire_ids[id3], T=T)
if not radiant_win:
wrfile.seek(position*2*dtsize)
wins = np.fromfile(wrfile, dtype=dt, count=1)
wins += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(wins).tobytes())
wrfile.seek((position*2*dtsize)+dtsize)
total = np.fromfile(wrfile, dtype=dt, count=1)
total += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(total).tobytes())
# Quad Heroes
with open("4.wr", "rb+") as wrfile:
for id1 in range(0, 2):
for id2 in range(id1+1, 3):
for id3 in range(id2+1, 4):
for id4 in range(id3+1, 5):
position = enumerate(h2=radiant_ids[id1], h3=radiant_ids[id2], h4=radiant_ids[id3], h5=radiant_ids[id4], T=T)
if radiant_win:
wrfile.seek(position*2*dtsize)
wins = np.fromfile(wrfile, dtype=dt, count=1)
wins += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(wins).tobytes())
wrfile.seek((position*2*dtsize)+dtsize)
total = np.fromfile(wrfile, dtype=dt, count=1)
total += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(total).tobytes())
for id1 in range(0, 2):
for id2 in range(id1+1, 3):
for id3 in range(id2+1, 4):
for id4 in range(id3+1, 5):
position = enumerate(h2=dire_ids[id1], h3=dire_ids[id2], h4=dire_ids[id3], h5=dire_ids[id4], T=T)
if not radiant_win:
wrfile.seek(position*2*dtsize)
wins = np.fromfile(wrfile, dtype=dt, count=1)
wins += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(wins).tobytes())
wrfile.seek((position*2*dtsize)+dtsize)
total = np.fromfile(wrfile, dtype=dt, count=1)
total += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(total).tobytes())
# Quid Heroes
with open("5.wr", "rb+") as wrfile:
position = enumerate(h1=radiant_ids[0], h2=radiant_ids[1], h3=radiant_ids[2], h4=radiant_ids[3], h5=radiant_ids[4], T=T)
if radiant_win:
wrfile.seek(position*2*dtsize)
wins = np.fromfile(wrfile, dtype=dt, count=1)
wins += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(wins).tobytes())
wrfile.seek((position*2*dtsize)+dtsize)
total = np.fromfile(wrfile, dtype=dt, count=1)
total += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(total).tobytes())
position = enumerate(h1=dire_ids[0], h2=dire_ids[1], h3=dire_ids[2], h4=dire_ids[3], h5=dire_ids[4], T=T)
if not radiant_win:
wrfile.seek(position*2*dtsize)
wins = np.fromfile(wrfile, dtype=dt, count=1)
wins += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(wins).tobytes())
wrfile.seek((position*2*dtsize)+dtsize)
total = np.fromfile(wrfile, dtype=dt, count=1)
total += 1
wrfile.seek(-dtsize, 1)
wrfile.write(np.array(total).tobytes())
total_matches += 1
with open("newnum.txt", "w") as newnum_file:
newnum_file.write(str(newnum))
with open("total.txt", "w") as total_file:
total_file.write(str(total_matches))
print(str(total_matches) + " matches parsed!")
else:
print("Insufficient matches! Abort!")
break
print("Ended job with " + str(total_matches) + " matches parsed.")
|
[
"lucasmour4@gmail.com"
] |
lucasmour4@gmail.com
|
8e3a8f3f6e64298e4915a92b2635127720a39df9
|
144d892b508f441fe2fb46feeff91ee4f8e85a13
|
/venv/bin/twistd
|
66d2b8c2f7e0d1949d44ee5967e51df5b2eecccd
|
[] |
no_license
|
Raspunt/WebSocket_test
|
490a1a848c7a4355d253b5bae5a4f8083c7242da
|
71fb93601d1920dcb3d2b16ea2ed5d39cf3b978a
|
refs/heads/master
| 2023-08-18T01:10:22.909672
| 2021-09-30T05:53:46
| 2021-09-30T05:53:46
| 411,939,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
#!/home/maxim/JustPython/RealTime_pr/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from twisted.scripts.twistd import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"m_ryabinin@inbox.ru"
] |
m_ryabinin@inbox.ru
|
|
03b9bbb19c5ef10a43487d477f47249ab946cb74
|
2f3c21a1c3eee658357514c4c09c3f28a4db955c
|
/source/miniworldmaker/containers/container.py
|
50aea71e3c639912a42be739ec6e551d0c8d8175
|
[
"MIT"
] |
permissive
|
zormit/miniworldmaker
|
8625552a7c4b3bf2513f189586fc9891f1db1764
|
8003aece905b0cffec9850af3805b03372f3dc97
|
refs/heads/master
| 2023-03-07T11:36:18.595075
| 2020-04-29T10:06:28
| 2020-04-29T10:06:28
| 259,937,089
| 0
| 0
|
MIT
| 2020-04-29T13:46:08
| 2020-04-29T13:46:08
| null |
UTF-8
|
Python
| false
| false
| 3,030
|
py
|
import logging
import pygame
class Container:
"""
Base class for containers
"""
clog = logging.getLogger("Container")
def __init__(self):
self.dirty = 1
self.surface = pygame.Surface((1, 1))
self.background_color = (255, 255, 255)
self.default_size = 100
self.registered_events = {"mouse_left", "mouse_right"}
# private
self._window = None # Set in add_to_window
self._container_width = 0 # Set in add_to_window
self._container_height = 0 # Set in add_to_window
self.container_top_left_x = 0 # Set in add_to_window
self.container_top_left_y = 0 # Set in add_to_window
self.docking_position = None # Set in add_to_windows
self._image = None
@property
def container_width(self):
return self._container_width
@property
def container_height(self):
return self._container_height
@property
def window(self):
return self._window
def _add_to_window(self, window, dock, size=None):
self._window = window
if size== None:
size = self.default_size
if dock == "top_left":
self.container_top_left_x = 0
self.container_top_left_y = 0
self.docking_position = dock
elif dock == "right":
self.container_top_left_x = self._window.window_width
self.container_top_left_y = 0
self.docking_position = dock
self._container_height = self._window.window_height
self._container_width = size
elif dock == "bottom":
self.container_top_left_x = 0
self.container_top_left_y = self._window.window_height
self.docking_position = dock
self._container_width = self._window.window_width
self._container_height = size
self.clog.info("Added Container {0} with width: {1} and height {2}".format(self, self.width, self.height))
self._image = pygame.Surface((self.width, self.height))
@property
def size(self):
return self._container_width, self._container_height
def repaint(self):
pass
def blit_surface_to_window_surface(self):
self._window.window_surface.blit(self.surface, self.rect)
def remove(self):
pass
def handle_event(self, event, data):
self.get_event(event, data)
def get_event(self, event, data):
pass
def is_in_container(self, x, y):
if self.rect.collidepoint((x, y)):
return True
else:
return False
@property
def rect(self):
return pygame.Rect(self.container_top_left_x, self.container_top_left_y, self.width, self.height)
@property
def window_docking_position(self):
return self.docking_position
def update(self):
pass
@property
def width(self):
return self._container_width
@property
def height(self):
return self._container_height
|
[
"andreas.siebelqit-teaching.de"
] |
andreas.siebelqit-teaching.de
|
a1eae57e9904cfd83eab09b30879ac329e1f5432
|
df085616ab9d61535ba4bc95c17ee75f70d9140b
|
/scraper/scraper/spiders/a27_ua.py
|
438e67416b93eae84fb7e016dde66bd418deb151
|
[] |
no_license
|
andrewFisherUa/coax-test
|
07a8884e86fd6a1685d3608ad87f5feb38a04f3d
|
31b7691191fbdaa7c85dfdebffe074036ddf2324
|
refs/heads/master
| 2020-09-30T23:41:09.030172
| 2018-11-10T12:48:33
| 2018-11-10T12:48:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,438
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import re
class Tile(scrapy.Item):
name = scrapy.Field()
height = scrapy.Field()
width = scrapy.Field()
class A27UaSpider(scrapy.Spider):
name = 'a27_ua'
allowed_domains = ['27.ua']
start_urls = ['https://27.ua/ua/shop/keramicheskaya-plitka-i-keramogranit/fs/otdelochnaya-poverhnost-stena/']
base = 'https://27.ua{}'
counter = 0
def parse(self, response):
next_page = response.xpath('//a[@rel="next"]/@href').extract()
next_page = self.base.format(next_page[0]) if next_page else None
# if next_page:
self.counter += 1
if self.counter < 2:
yield from self.parse_tiles(response)
yield scrapy.Request(next_page, callback=self.parse,
dont_filter=True)
def parse_tiles(self, response):
tiles = response.xpath('//b[@class="nc"]/text()').extract()
for tile in tiles:
ser = re.search(r'(?P<height>\d+,*?\d*?)[\*xх]+(?P<width>\d+,*?\d*?)', tile)
if not ser:
continue
height = ser.group('height')
width = ser.group('width')
height = float(height.replace(',', '.')) * 10 if height else None
width = float(width.replace(',', '.')) * 10 if width else None
if height and width:
yield Tile(height=height, width=width, name=tile.strip())
|
[
"rryabec@gmail.com"
] |
rryabec@gmail.com
|
d6cbdb0585782c2794ba7450f08232c03959e33d
|
eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429
|
/data/input/alan-hicks/django-dmarc/dmarc/views.py
|
9a3cac700e01af8df8f7ac8922d8369c5b52f135
|
[] |
no_license
|
bopopescu/pythonanalyzer
|
db839453bde13bf9157b76e54735f11c2262593a
|
8390a0139137574ab237b3ff5fe8ea61e8a0b76b
|
refs/heads/master
| 2022-11-22T02:13:52.949119
| 2019-05-07T18:42:52
| 2019-05-07T18:42:52
| 282,079,884
| 0
| 0
| null | 2020-07-23T23:46:09
| 2020-07-23T23:46:08
| null |
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
#----------------------------------------------------------------------
# Copyright (c) 2015, Persistent Objects Ltd http://p-o.co.uk/
#
# License: BSD
#----------------------------------------------------------------------
"""
DMARC views
http://dmarc.org/resources/specification/
"""
from django.contrib.admin.views.decorators import staff_member_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from dmarc.models import Report
# Create your views here.
@staff_member_required
def dmarc_report(request):
report_list = Report.objects.select_related(
'reporter',
).prefetch_related(
'records__results'
).order_by('-date_begin', 'reporter__org_name').all()
paginator = Paginator(report_list, 2)
page = request.GET.get('page')
try:
reports = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
reports = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
reports = paginator.page(paginator.num_pages)
context = {
"reports": reports,
}
return render(request, 'dmarc/report.html', context)
|
[
"rares.begu@gmail.com"
] |
rares.begu@gmail.com
|
9384bab2ba86503352ed5be8fe965b7eafe59e48
|
9376bcf3c67a67b102f3206f7cf74d025feb464f
|
/simNe_functions_analyze.py
|
78590bc0549417320ae19845767c26e0720b7590
|
[] |
no_license
|
rwaples/simNe
|
6b6ae2f9429b2c9ffa3605071f4b0a5c60ecb562
|
cc09aaae3da04dd9f2cef8edbd9c8f6d223a64c9
|
refs/heads/master
| 2020-03-24T07:11:26.465346
| 2018-07-27T09:19:17
| 2018-07-27T09:19:17
| 142,555,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,587
|
py
|
# coding: utf-8
# # Post - Sim Analysis
# In[7]:
#from pyplink import PyPlink
import numpy as np
import pandas as pd
import scipy as sp
import itertools
import scipy.spatial.distance
import matplotlib.pyplot as plt
import seaborn as sns
import collections
import glob
import random
from random import shuffle
import os.path
# In[8]:
import msprime
import allel
# In[9]:
import simuPOP
import simuPOP.utils
# In[10]:
from pylab import rcParams
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
# # Functions to
# In[6]:
def get_rsq(geno_mat):
"""returns the squared pearson r for each pair of loci in a condensed distance matrix"""
return scipy.spatial.distance.pdist(geno_mat.T, lambda x, y: scipy.stats.pearsonr(x, y)[0])**2
# In[ ]:
def get_r2_fast(geno_mat):
norm_snps = (geno_mat - sp.mean(geno_mat, 0)) / sp.std(geno_mat, 0, ddof=1)
norm_snps = norm_snps.T
num_snps, num_indivs = norm_snps.shape
ld_mat = (sp.dot(norm_snps, norm_snps.T) / float(num_indivs-1))**2
return(ld_mat)
def get_overall_mean(ld_mat):
return(ld_mat[np.triu_indices_from(ld_mat, k=1)].mean())
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return itertools.izip(a, a)
def get_nonoverlapping_mean(ld_mat):
# randomly selects non-overlapping pairs of loci and gets their mean
n_loci = ld_mat.shape[1]
indexes = range(n_loci)
random.shuffle(indexes)
a = indexes[0 : n_loci/2]
b = indexes[n_loci/2 : n_loci]
dat = ld_mat[a,b]
return (dat.mean())
def shuffle_mat(mat):
# shuffles a square matrix by rows and columns, while keeping the association
shape1, shape2 = mat.shape
assert(shape1 == shape2)
new_order = range(shape1)
random.shuffle(new_order)
return(mat[new_order][:,new_order])
def get_block_r2(ld_mat, nblocks):
# shuffles the matrix, then divides it into nblocks and takes the average of the meanr r2 within each block
ld_mat = shuffle_mat(ld_mat)
blocks = np.array_split(range(ld_mat.shape[1]), nblocks)
means = []
for block in blocks:
submat = ld_mat[block][:,block]
means.append(get_overall_mean(submat))
sum_squares = np.sum(map(lambda x: x**2, means))
# print submat
#print means
return(np.mean(means), sum_squares)
# In[ ]:
def get_weirFst(twopop_geno_mat, popsize):
ga = to_Genotype_array(twopop_geno_mat)
a, b, c = allel.weir_cockerham_fst(ga, subpops=[range(popsize), range(popsize, popsize*2)])
fst = np.sum(a) / (np.sum(a) + np.sum(b) + np.sum(c))
return fst
# In[ ]:
def to_Genotype_array(geno_mat):
a = np.where(geno_mat>0, 1, 0)
b = np.where(geno_mat>1, 1, 0)
return (allel.GenotypeArray(np.stack([a,b], axis =2)))
# In[ ]:
#allel.GenotypeArray(np.stack([a,b], axis =2))
# In[ ]:
# function to calculate temporal F given an ancestral and target pop
def get_temporalF(ancestral, current):
"""
Nei and Tajima 1981 temporal F
"""
#make sure the loci in each pop line up!
P1 = ancestral.mean(0)/2.
P2 = current.mean(0)/2.
PP = (P1+P2)/2.0
TF_num = (P2-P1)**2
TF_denom = (PP - P1*P2)
TF_unweighted = (TF_num/TF_denom).mean() # mean of the ratios
TF_weighted = TF_num.sum()/TF_denom.sum() # ratio of the sums
return(TF_unweighted, TF_weighted)
# In[ ]:
def get_Jorde_Ryman_F(ancestral, current):
xi = ancestral.mean(0)/2.
yi = current.mean(0)/2.
zi = (xi+yi)/2.
num = ((xi - yi)**2).sum()
denom = (zi*(1.0-zi)).sum()
return(num/denom)
# In[ ]:
def get_Nei_Gst(pop1, pop2):
P1 = pop1.mean(0)/2.
P2 = pop2.mean(0)/2.
sP1 = 1.0 - (P1**2 + (1-P1)**2)
sP2 = 1.0 - (P2**2 + (1-P2)**2)
Hexp = (sP1+sP2)/2.0
Pbar = (P1 + P2)/2.0
Htot = 1 - Pbar**2 - (1-Pbar)**2
F = 1.0 - Hexp/Htot
Fst_u = F.mean() # unweighted
G_num = Htot - Hexp
G_denom = Htot
Fst_w = G_num.sum()/G_denom.sum() # weighted
#return(P1, P2)
#return(F)
#return(F, G)
return(Fst_u, Fst_w)
# In[ ]:
#test_pop1 = np.loadtxt('./share/feb25a/Ne-400_Chr-1/Ne-400_Chr-1_Frep-0.geno', dtype = 'int8', delimiter='\t')
#test_pop2 = np.loadtxt('./share/feb25a/Ne-400_Chr-1/Ne-400_Chr-1_Frep-1.geno', dtype = 'int8', delimiter='\t')
#test_ancestral_pop = np.loadtxt('./share/feb25a/Ne-400_Chr-1/Ne-400_Chr-1.inital.txt', dtype = 'int8', delimiter='\t')
# In[ ]:
def get_allele_counts(geno_array):
"""Used for calcualting Fst and the (2d) SFS
rows are loci,
columns are counts of specific alleles, sum of each row = sample size
"""
n_samples = geno_array.shape[0]
#print n_samples
derived_counts = geno_array.sum(axis = 0) # sum alleles over indiviudals
ancestral_counts = (n_samples*2) - derived_counts
#return derived_counts
return np.stack((ancestral_counts, derived_counts)).T
# In[ ]:
def get_Hudson_Fst(pop1, pop2):
ac1 = get_allele_counts(pop1)
ac2 = get_allele_counts(pop2)
num, denom = allel.stats.hudson_fst(ac1, ac2)
fst_overall = np.sum(num)/np.sum(denom)
#print fst_overall
#return(num, denom)
return(fst_overall)
# In[ ]:
def get_Patterson_f2(pop1, pop2):
"""numerator from hudsons Fst"""
ac1 = get_allele_counts(pop1)
ac2 = get_allele_counts(pop2)
return allel.stats.admixture.patterson_f2(ac1, ac2).mean()
# In[ ]:
def get_2dSFS(pop1, pop2):
ac1 = get_allele_counts(pop1)
ac2 = get_allele_counts(pop2)
return(allel.stats.sf.joint_sfs_folded(ac1, ac2))
# In[ ]:
def do_analysis(target_dir, L = [16, 64, 256], S = [25, 50, 100, 200], min_AC = 2, replicates = 1):
"""do_analysis should be pointed at a directory,
this will make it easier to manage the target replicate, ancestral pop, and pairs of populations"""
results_file = os.path.join(target_dir, 'results.txt')
with open(results_file, 'w') as RESULTS:
#RESULTS.write('file\tL\tS\tSrep\tmean_r2\tLocus_pairs\n')
RESULTS.write('\t' .join(['file','L','S','Srep', 'actual_pairs_r2', 'overall_r2', 'block_r2', 'block_sum_squares', 'nonoverlap_r2',
'temporalF_u', 'temporalF_w', 'Patterson_f2', 'Hudson_fst', 'Jorde_Ryman_F', 'weir_Fst', 'actual_nloci_temporal'
]))
RESULTS.write('\n')
ancestral_file = glob.glob(os.path.join(target_dir, '*.inital.txt'))
assert(len(ancestral_file)==1)
ancestral_file = ancestral_file[0]
ancestral_data = np.loadtxt(ancestral_file, dtype = 'int8', delimiter='\t')
ancestral_nind = ancestral_data.shape[0]
Frep_file_list = glob.glob(os.path.join(target_dir, '*.geno'))
print('found {} *.geno files'.format(len(Frep_file_list)))
cnt = 0
for infile in Frep_file_list:
cnt += 1
if cnt % 20 == 0:
print ("working on file: {}".format(infile))
base_data = np.loadtxt(infile, dtype = 'int8', delimiter='\t')
rep_nind = base_data.shape[0]
for n_loci in L:
for n_sample in S:
for Srep in range(replicates):
# pick individuals for the rep and ancestral
# each rep file has up to 200 inds
if n_sample <= rep_nind:
pick_ind_rep = np.sort(np.random.choice(a = rep_nind, size = n_sample, replace = False))
pick_ind_anc = np.sort(np.random.choice(a = ancestral_nind, size = n_sample, replace = False))
subset_data = base_data[pick_ind_rep,:]
# do the ancestral data matching the sample size, and as a awhole
#ancestral_full = ancestral_data.copy()
ancestral_subset = ancestral_data[pick_ind_anc,:]
# filter for Allele count (AC) based on the sample
rep_AC = subset_data.sum(0)
both_AC = rep_AC + ancestral_subset.sum(0) # sum of AC in the acestral and base
max_rep_AC = 2 * n_sample - min_AC
max_both_AC = 4 * n_sample - min_AC
lower_rep_test = min_AC <= rep_AC
upper_rep_test = max_rep_AC >= rep_AC
lower_both_test = min_AC <= both_AC
upper_both_test = max_both_AC >= both_AC
passed_AC_rep = np.logical_and(lower_rep_test, upper_rep_test)
passed_AC_both = np.logical_and(lower_both_test, upper_both_test)
# Pick the set of loci passing MAC filters
# also subset the ancestral data in the same way!!
r2_input = subset_data[:, passed_AC_rep]
temporal_rep = subset_data[:, passed_AC_both]
temporal_anc = ancestral_subset[:, passed_AC_both]
#ancestral_full = ancestral_full[:,passed]
remaining_loci_r2 = r2_input.shape[1]
remaining_loci_temporal = temporal_rep.shape[1]
if remaining_loci_r2 > n_loci:
pick_loci_r2 = np.sort(np.random.choice(a = remaining_loci_r2, size = n_loci, replace = False))
r2_input = r2_input[:, pick_loci_r2]
actual_nloci_r2 = r2_input.shape[1]
actual_pairs_r2 = (actual_nloci_r2 * (actual_nloci_r2-1))/2
r2_mat = get_r2_fast(r2_input)
block_r2, block_sum_squares = get_block_r2(r2_mat, 16)
nonoverlap_r2 = get_nonoverlapping_mean(r2_mat)
overall_r2 = get_overall_mean(r2_mat)
if remaining_loci_temporal > n_loci:
pick_loci_temporal = np.sort(np.random.choice(a = remaining_loci_temporal, size = n_loci, replace = False))
temporal_rep = temporal_rep[:, pick_loci_temporal]
temporal_anc = temporal_anc[:, pick_loci_temporal]
actual_nloci_temporal = temporal_anc.shape[1]
# actually do the temporal calculations
temporalF_u, temporalF_w = get_temporalF(temporal_anc, temporal_rep)
Pf2 = get_Patterson_f2(temporal_anc, temporal_rep)
Hfst = get_Hudson_Fst(temporal_anc, temporal_rep)
Jorde_Ryman_F = get_Jorde_Ryman_F(temporal_anc, temporal_rep)
forWeir = np.concatenate([temporal_anc, temporal_rep], axis =0).T
weir_Fst = get_weirFst(forWeir, popsize = n_sample)
#SFS = get_2dSFS(ancestral_full, subset_data)
#SFS = SFS.flatten()
with open(results_file, 'a') as RESULTS:
RESULTS.write('{}\t'.format(infile))
for xx in [n_loci, n_sample, Srep, actual_pairs_r2, overall_r2, block_r2, block_sum_squares, nonoverlap_r2]:
RESULTS.write('{}\t'.format(xx))
for xx in [temporalF_u, temporalF_w, Pf2, Hfst, Jorde_Ryman_F, weir_Fst]:
RESULTS.write('{}\t'.format(xx))
for xx in [actual_nloci_temporal]:
RESULTS.write('{}'.format(xx))
#for xx in [SFS]:
# RESULTS.write(','.join([str(x) for x in SFS]))
# RESULTS.write('\t')
RESULTS.write('\n')
# pairwise analysis of populations
pair_results_file = os.path.join(target_dir, 'results.pairs.txt')
with open(pair_results_file, 'w') as PAIR_RESULTS:
PAIR_RESULTS.write('\t' .join(['file1','file2', 'S', 'L', 'Srep',
'Patterson_f2', 'Hudson_fst', 'weir_Fst', 'NeiGst_unweghted', 'NeiGst_weghted', 'actual_loci'
]))
PAIR_RESULTS.write('\n')
for file1, file2 in zip(Frep_file_list,Frep_file_list[1:])[::2]:
pop1_data = np.loadtxt(file1, dtype = 'int8', delimiter='\t')
pop2_data = np.loadtxt(file2, dtype = 'int8', delimiter='\t')
pop1_nind = pop1_data.shape[0]
pop2_nind = pop2_data.shape[0]
#print file1, file2
for n_loci in L:
for n_sample in S:
for Srep in range(replicates):
if n_sample <= rep_nind:
pick_inds = np.sort(np.random.choice(a = pop1_nind, size = n_sample, replace = False))
subset_pop1 = pop1_data[pick_inds, :]
pick_inds = np.sort(np.random.choice(a = pop2_nind, size = n_sample, replace = False))
subset_pop2 = pop2_data[pick_inds, :]
# filter for combined MAC
both_AC = subset_pop1.sum(0) + subset_pop2.sum(0)
max_both_AC = 4 * n_sample - min_AC
lower_both_test = min_AC <= both_AC
upper_both_test = max_both_AC >= both_AC
passed_AC_both = np.logical_and(lower_both_test, upper_both_test)
subset_pop1 = subset_pop1[:, passed_AC_both]
subset_pop2 = subset_pop2[:, passed_AC_both]
# random subset of loci
remaining_loci = subset_pop1.shape[1]
if remaining_loci > n_loci:
pick_loci = np.sort(np.random.choice(a = remaining_loci, size = n_loci, replace = False))
subset_pop1 = subset_pop1[:,pick_loci]
subset_pop2 = subset_pop2[:,pick_loci]
actual_loci = subset_pop1.shape[1]
# statistics
Pf2 = get_Patterson_f2(subset_pop1, subset_pop2)
Hfst = get_Hudson_Fst(subset_pop1, subset_pop2)
forWeir = np.concatenate([subset_pop1, subset_pop2], axis =0).T
weir_Fst = get_weirFst(forWeir, popsize = n_sample)
NeiGst_unweighted, NeiGst_weighted = get_Nei_Gst(subset_pop1, subset_pop2)
# TODO add Hudson Fst back in
with open(pair_results_file, 'a') as PAIR_RESULTS:
for xx in [file1, file2, n_sample, n_loci, Srep,
Pf2, Hfst, weir_Fst, NeiGst_unweighted, NeiGst_weighted]:
PAIR_RESULTS.write('{}\t'.format(xx))
for xx in [actual_loci]:
PAIR_RESULTS.write('{}'.format(xx))
PAIR_RESULTS.write('\n')
# # Other
# In[ ]:
def get_genotype_ld(sim_data):
first_r2_mat = allel.stats.ld.rogers_huff_r_between(get_diploid_genotypes(sim_data), get_diploid_genotypes(sim_data), fill=np.nan)
plt.imshow(first_r2_mat, interpolation="none", vmin=0, vmax=1, cmap="Blues")
plt.xticks([])
plt.yticks([])
plt.show()
# In[ ]:
# Not used !!!
def wrap_r2(file_list, output_file, MAF, L = [16, 64, 256, 1024, 4096], S = [50, 100], replicates = 1):
with open(output_file, 'w') as OUTFILE:
OUTFILE.write('file\tL\tS\tSrep\tmean_r2\tLocus_pairs\n') # write header
for infile in file_list:
print "working on file: {}".format(infile)
base_data = np.loadtxt(infile, dtype = 'int8', delimiter='\t')
for n_loci in L:
for n_sample in S:
for Srep in range(replicates):
# sample inds
pick_inds = np.sort(np.random.choice(a = 200, size = n_sample, replace = False))
subset_data = base_data[pick_inds,:]
# filter for MAF based on the freqs in the sample
subset_data = filter_MAF(subset_data, MAF = MAF)
# select n_loci from among those loci passing MAF filters
remaining_loci = subset_data.shape[1]
pick_loci = np.sort(np.random.choice(a = remaining_loci, size = n_loci, replace = False))
subset_data = subset_data[:,pick_loci]
actual_loci = subset_data.shape[1]
n_locus_pairs = (actual_loci * (actual_loci-1))/2
#print pick_inds, pick_loci
#return(subset_data)
mean_rsq = get_r2_fast(subset_data).mean()
with open(output_file, 'a') as OUTFILE:
OUTFILE.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(infile, n_loci, n_sample, Srep, mean_rsq, n_locus_pairs))
# ## Robin's code
# GetFst <- function(yy) {
# OldP = yy[1,]
# P2 = yy[2,]
# Fst = seq(1:length(L))
# SumP1 = 1 - (P2^2 + (1-P2)^2)
# SumP2 = 1 - (OldP^2 + (1-OldP)^2)
# Hexp = (SumP1+SumP2)/2
# Pbar = (P2 + OldP)/2
# Htot = 1 - Pbar^2 - (1-Pbar)^2
# F = 1 - Hexp/Htot
# Fst[1] = mean(F,na.rm=TRUE)
# for (k in 2:length(L)) {
# FF = F[1:L[k]]
# Fst[k] = mean(FF) } # end for k
# return(Fst) } # end function
#
# is.odd <- function(x) x %% 2 != 0
|
[
"ryan.waples@gmail.com"
] |
ryan.waples@gmail.com
|
19d593479108dded5500550fe92c72827816daa4
|
3a638154ca084dba8733612eac324777073a2a99
|
/src/python/pants/backend/awslambda/python/target_types_test.py
|
d850a8ce0b56d9b7dbf23fd8febef46bc197bea9
|
[
"Apache-2.0"
] |
permissive
|
orbital-transfer-survey/pants
|
e3de6f7f30510bf8e691e4b76ee411ac6a82f047
|
478110e155e00c14119f966a289223d6910e4b02
|
refs/heads/master
| 2022-12-22T06:45:52.714055
| 2020-10-07T02:36:54
| 2020-10-07T02:36:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants.backend.awslambda.python.target_types import PythonAwsLambdaRuntime
from pants.build_graph.address import Address
from pants.engine.target import InvalidFieldException
@pytest.mark.parametrize(
["runtime", "expected_major", "expected_minor"],
(
# The available runtimes at the time of writing.
# See https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html.
["python2.7", 2, 7],
["python3.6", 3, 6],
["python3.7", 3, 7],
["python3.8", 3, 8],
),
)
def test_to_interpreter_version(runtime, expected_major, expected_minor):
assert (expected_major, expected_minor) == PythonAwsLambdaRuntime(
raw_value=runtime, address=Address.parse("foo/bar:baz")
).to_interpreter_version()
@pytest.mark.parametrize(["invalid_runtime"], (["python88.99"], ["fooobar"]))
def test_runtime_validation(invalid_runtime):
with pytest.raises(InvalidFieldException):
PythonAwsLambdaRuntime(raw_value=invalid_runtime, address=Address.parse("foo/bar:baz"))
|
[
"noreply@github.com"
] |
noreply@github.com
|
62e29a0dd4b8581a50a28cf117c82e4fc35a3597
|
55d3f77fa214901b59a1dfd7e66c52369da1d948
|
/hubi/models/wiz_inherited_sale_advance_payment_inv.py
|
fe815ef4d5942621b91c8e7381e74b5abc318d60
|
[] |
no_license
|
arsenepoutsi/Projet_HUBI
|
22f5fa87579dc801aa8f46f6ce420d67f43398f2
|
a614cfae2535dba86442659a9b6baf49215ef2d4
|
refs/heads/master
| 2020-11-25T08:08:05.122700
| 2019-12-17T08:32:13
| 2019-12-17T08:32:13
| 228,568,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,660
|
py
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models, _
from datetime import date, timedelta, datetime
import time
import calendar
from dateutil.relativedelta import relativedelta
class HubiSaleAdvancePaymentInv(models.TransientModel):
_inherit = "sale.advance.payment.inv"
date_invoice = fields.Date(string="Invoice Date", default=lambda self: fields.Date.today())
@api.multi
def create_invoices(self):
context = dict(self.env.context)
sale_orders = self.env['sale.order'].browse(self._context.get('active_ids', []))
if self.advance_payment_method == 'delivered':
sale_orders.action_invoice_create(dateInvoice=self.date_invoice)
elif self.advance_payment_method == 'all':
sale_orders.action_invoice_create(final=True, dateInvoice=self.date_invoice)
else:
res = super(HubiSaleAdvancePaymentInv, self.with_context(context)).create_invoices()
self.env.cr.commit()
if not self.date_invoice:
date_invoice = time.strftime('%Y-%m-%d')
else:
date_invoice = self.date_invoice
date_due = False
for order in sale_orders:
# Search the invoice
invoices=self.env['account.invoice'].search([('origin','=', order.name)])
for invoice in invoices:
if date_invoice:
invoice.write({'date_invoice':date_invoice})
#invoice.write({'date_invoice':date_invoice})
if invoice.payment_term_id:
pterm = invoice.payment_term_id
pterm_list = pterm.with_context(currency_id=invoice.company_id.currency_id.id).compute(value=1, date_ref=date_invoice)[0]
date_due = max(line[0] for line in pterm_list)
elif invoice.date_due and (date_invoice > invoice.date_due):
date_due = date_invoice
if date_due and not invoice.date_due:
invoice.write({'date_due':date_due})
if order.sending_date and not invoice.sending_date:
invoice.write({'sending_date':order.sending_date})
if order.packaging_date and not invoice.packaging_date:
invoice.write({'packaging_date':order.packaging_date})
if order.pallet_number and not invoice.pallet_number:
invoice.write({'pallet_number':order.pallet_number})
if order.comment and not invoice.comment:
invoice.write({'comment':order.comment})
if order.carrier_id.id and not invoice.carrier_id:
invoice.write({'carrier_id':order.carrier_id.id})
if self._context.get('open_invoices', False):
return sale_orders.action_view_invoice()
return {'type': 'ir.actions.act_window_close'}
#return res
class Wizard_create_invoice_period(models.TransientModel):
_name = "wiz.invoiceperiod"
_description = "Wizard creation of invoice from period"
def add_months(sourcedate,months):
month = sourcedate.month - 1 + months
year = sourcedate.year + month // 12
month = month % 12 + 1
day = min(sourcedate.day,calendar.monthrange(year,month)[1])
return datetime.date(year,month,day)
@api.model
def _default_start(self):
return fields.Date.context_today(self)
@api.model
def _default_finish(self):
finish = datetime.today() + timedelta(days=7)
return fields.Date.context_today(self, timestamp=finish)
@api.model
@api.onchange('periodicity_invoice')
def onchange_periodicity_invoice(self):
finish = datetime.today()
if self.periodicity_invoice == "Weekly":
finish = datetime.today() + timedelta(days=-7)
if self.periodicity_invoice == "Decade":
finish = datetime.today() + timedelta(days=-10)
if self.periodicity_invoice == "Fortnight":
finish = datetime.today() + timedelta(days=-14)
if self.periodicity_invoice == "Monthly":
#start_date = datetime.today()
#days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#finish = start_date + timedelta(days=-days_in_month)
finish = datetime.today()+ relativedelta(months=-1)
self.date_start = finish
periodicity_invoice = fields.Selection([("Daily", "Daily"),("Weekly", "Weekly"),("Decade", "Decade"),
("Fortnight", "Fortnight"),("Monthly", "Monthly")], string="Invoice Period", default='Daily')
#date_start = fields.Date('Start Date', help="Starting date for the creation of invoices",default=_default_finish)
date_start = fields.Date('Start Date', help="Starting date for the creation of invoices",default=lambda self: fields.Date.today())
date_end = fields.Date('End Date', help="Ending valid for the the creation of invoices", default=lambda self: fields.Date.today())
date_invoice = fields.Date(string="Invoice Date", default=lambda self: fields.Date.today())
sale_order_ids = fields.Many2many("sale.order")
message = fields.Text(string="Information")
@api.multi
def create_invoice_period(self):
date_fin = datetime.strptime(self.date_end, "%Y-%m-%d") + timedelta(hours=25) + timedelta(minutes=59) + timedelta(seconds=59)
query_args = {'periodicity_invoice': self.periodicity_invoice,'date_start' : self.date_start,'date_end' : date_fin}
query = """ SELECT sale_order.id
FROM sale_order
INNER JOIN res_partner on res_partner.id = sale_order.partner_id
WHERE invoice_status = 'to invoice'
AND date_order between %(date_start)s AND %(date_end)s
AND periodicity_invoice=%(periodicity_invoice)s """
self.env.cr.execute(query, query_args)
ids = [r[0] for r in self.env.cr.fetchall()]
sale_orders = self.env['sale.order'].search([('id', 'in', ids)])
sale_orders.action_invoice_create(dateInvoice=self.date_invoice)
for order in sale_orders:
# Search the invoice
invoices=self.env['account.invoice'].search([('origin','=', order.name)])
for invoice in invoices:
if order.sending_date and not invoice.sending_date:
invoice.write({'sending_date':order.sending_date})
if order.packaging_date and not invoice.packaging_date:
invoice.write({'packaging_date':order.packaging_date})
if order.pallet_number and not invoice.pallet_number:
invoice.write({'pallet_number':order.pallet_number})
if order.comment and not invoice.comment:
invoice.write({'comment':order.comment})
if order.carrier_id.id and not invoice.carrier_id:
invoice.write({'carrier_id':order.carrier_id.id})
#return {'type': 'ir.actions.act_window_close'}
return sale_orders.action_view_invoice()
|
[
"arsenepoutsi@gmail.com"
] |
arsenepoutsi@gmail.com
|
f653ad44883019ebd1939a890f0678ec06b403af
|
a60179c08931f11a5706f50803120be946eec460
|
/core/migrations/0007_auto_20201214_1459.py
|
def00dc6607e3fdc4d87579ba2bf54f548e3d086
|
[] |
no_license
|
emilbakov/eshop
|
0394cfbd8ee746eff4cdf08db0e44100c1c0169c
|
b7d9c99b0ba729f39bb5d1ef9d3d56ad8e03cf6d
|
refs/heads/master
| 2023-02-12T09:05:49.134110
| 2021-01-13T16:31:18
| 2021-01-13T16:31:18
| 321,112,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
# Generated by Django 2.2.13 on 2020-12-14 12:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_item_quantity'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='quantity',
),
migrations.AddField(
model_name='orderitem',
name='quantity',
field=models.IntegerField(default=1),
),
]
|
[
"eb.bakov@gmail.com"
] |
eb.bakov@gmail.com
|
e237cb1926d6eec39d8e1a58d2ba4f1c5d85a9d4
|
a943bb95d76862a8f9f902acf99c2c38e65305d4
|
/codevita2020/cvpq/f.py
|
8276bb741b200d6ec888d5846cd253a9f9ea23da
|
[] |
no_license
|
init-13/codevita
|
bd742e7007023645063d673adf84825268de6495
|
4c96ab5106ecee12ba251a2d95626328772208b9
|
refs/heads/main
| 2023-01-21T08:57:36.512163
| 2020-11-29T15:29:12
| 2020-11-29T15:29:12
| 316,981,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
def ifp(s):
for i in range(len(s)//2):
if s[i]!=s[-(i+1)]:
return False
return True
#print(ifp('nayan'))
x=list()
y=list()
s=input()+'#'
#ml=list()
c1=s[0]
for i in range(len(s)):
if s[i]==c1:
x.append(i+1)
#print(x)
for i in x:
c2=s[i]
#print(c2)
for j in range(i,len(s)-1):
#print(c2,s[j],s[j+1],s[-2])
if s[j]==c2 and s[j+1]==s[-2]:
y.append([i,j+1])
for i in y:
a=i[0]
b=i[1]
if(ifp(s[:a]) and ifp(s[a:b]) and ifp(s[b:-1])):
print(s[:a])
print(s[a:b])
print(s[b:-1])
break
|
[
"noreply@github.com"
] |
noreply@github.com
|
a9fe9efe456c2cceb50748088e219aef5fb4799f
|
0ce91fb1b25ec0c740284e4a9dd2da21cbc6ec2a
|
/cogs/utils/db.py
|
64f87802a6ebf89a90bdaa541334ff2b92fd3a13
|
[] |
no_license
|
kkourtis/bowiebot-discord-bot
|
be4c50162cdf52dfba1f5c90364306e34e61f00e
|
2e62586695d384b671ff1ed6da8aaf76946d643b
|
refs/heads/main
| 2023-06-21T16:42:48.064036
| 2021-07-21T22:28:49
| 2021-07-21T22:28:49
| 303,024,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,984
|
py
|
import asyncio
import config
import asyncpg
async def connect():
return await asyncpg.connect(user=config.DB_USER,
password=config.DB_PASSWORD, database=config.DB,
host=config.DB_HOST)
async def create_pool(db_name):
return await asyncpg.create_pool(user=config.DB_USER, password=config.DB_PASSWORD, database=db_name, host=config.DB_HOST, min_size=10, max_size = 20)
async def insert_guild(guild_id, conn):
guild_id = str(guild_id)
q = f"""INSERT INTO guilds (guild_id, sound, roles, images, tags, logs) VALUES ({guild_id}, True, True, True, True, True);"""
await conn.execute(q)
q = f"insert into logs (guild_id, member_join, member_leave, channel_update, message_delete, member_ban, member_unban) " \
f"VALUES ({guild_id}, False, False, False, False, False, False)"
await conn.execute(q)
q = f"""insert into roles (guild_id) VALUES ({guild_id});"""
await conn.execute(q)
q = f"""insert into sound (guild_id, cooldown_enabled) VALUES ({guild_id}, False);"""
await conn.execute(q)
async def remove_guild(guild_id, conn):
guild_id = str(guild_id)
await conn.execute(f"""DELETE FROM guilds WHERE guild_id = '{guild_id}';""")
await conn.execute(f"""DELETE FROM logs where guild_id = '{guild_id}';""")
await conn.execute(f"""DELETE FROM roles where guild_id = '{guild_id}';""")
await conn.execute(f"""DELETE FROM sound where guild_id = '{guild_id}';""")
async def add_remove_missing_guilds(discord_guilds, conn=None):
if conn is None:
conn = await connect()
q = "select guild_id from guilds"
db_guilds = await conn.fetch(q)
db_ids = [guild['guild_id'] for guild in db_guilds]
discord_ids = [str(guild.id) for guild in discord_guilds]
added_guilds = 0
removed_guilds = 0
for guild_id in db_ids:
if guild_id not in discord_ids:
await remove_guild(guild_id, conn)
removed_guilds += 1
for guild_id in discord_ids:
if guild_id not in db_ids:
await insert_guild(guild_id, conn)
added_guilds += 1
await conn.close()
print(f'Guilds removed from Postgres: {removed_guilds}')
print(f'Guilds added to Postgres: {added_guilds}')
async def add_missing_guilds(discord_guilds, conn, db_guilds=None, db_ids=None,discord_ids=None):
if db_guilds is None:
db_guilds = await conn.fetch("select guild_id from guilds")
if db_ids is None:
db_ids = [guild['guild_id'] for guild in db_guilds]
if discord_ids is None:
discord_ids = [str(guild.id) for guild in discord_guilds]
added_guilds = 0
for guild_id in discord_ids:
if guild_id not in db_ids:
await insert_guild(guild_id, conn)
added_guilds += 1
print(f'{added_guilds} guilds added to Postgres')
return added_guilds
|
[
"noreply@github.com"
] |
noreply@github.com
|
ce9aa44b85bdc8d90fa959bf45fb94eac2f67229
|
76c78356a002bef01e162d8e2ed5592497f27c57
|
/imooc/test/test_urllib_request.py
|
3e9870fc2c643af0210fb6725f2af88b938ce0cd
|
[] |
no_license
|
Happy2012/Python
|
177b2c6e1854bcae059486b077a43ace95ac50a4
|
7c11ad4a7bb50d260c97b87dbe2536ce526d03e2
|
refs/heads/master
| 2020-04-13T15:32:17.443228
| 2018-12-27T13:46:20
| 2018-12-27T13:46:20
| 163,294,526
| 0
| 0
| null | 2018-12-27T13:46:21
| 2018-12-27T12:54:03
|
Python
|
UTF-8
|
Python
| false
| false
| 921
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = 'test_urllib_request.py'
__author__ = 'Amanda'
__mtime__ = '2018-12-24 11:58'
__summary__ = '网页下载的三种方法'
"""
import urllib.request
import http.cookiejar
url = "http://www.baidu.com"
print("第一种方法")
response1 = urllib.request.urlopen(url)
print(response1.getcode())
print(len(response1.read()))
print("第二种方法")
request = urllib.request.Request(url)
request.add_header("user-agent","Mozilla/5.0")
response2 = urllib.request.urlopen(request)
print(response2.getcode())
print(len(response2.read()))
print("第三种方法")
cj = http.cookiejar.CookieJar()
pro = urllib.request.HTTPCookieProcessor(cj)
opener = urllib.request.build_opener(pro)
urllib.request.install_opener(opener)
response3 = urllib.request.urlopen(url)
print(response3.getcode())
print(cj)
print('cookie对象打印完毕\n')
print(response3.read())
|
[
"38401010+seeaman@users.noreply.github.com"
] |
38401010+seeaman@users.noreply.github.com
|
20d34b2ce443116beb8f6b4bbc0a2d85826ec258
|
1949e30c64d04f0da4fc483e4cef552c58e0c8ba
|
/pyrothrottle/reqrate_base.py
|
a1b9d07836d56cf209722eed26430b2926dfde44
|
[
"MIT"
] |
permissive
|
TrendingTechnology/pyrothrottle
|
d13929479f7d8d80c90843a3e40a10474f035db9
|
89550a97c809ead0cda9d932a5ac87237fcc5032
|
refs/heads/main
| 2023-06-30T03:03:55.753144
| 2021-08-07T11:52:45
| 2021-08-07T11:52:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
from typing import Optional, Union, Callable, Any, List
from pyrogram import Client
from .base import Base
from .misc import Event, get_modulename
class ReqrateBase(Base):
def __init__(
self,
interval: Union[float, int, Callable[[int], Union[int, float]]],
amount: Union[int, Callable[[int], int]],
fallback: Optional[Callable[[Client, Event, List[float]], Any]] = None
):
super().__init__(interval, fallback)
self.amount = amount
self.last_processed = [] if 'global' in get_modulename(__name__) else {}
def get_amount(self, uid: Optional[int] = None) -> int:
if uid and callable(self.amount):
return self.amount(uid)
return self.amount
|
[
"denbartolomee@gmail.com"
] |
denbartolomee@gmail.com
|
b6ccd35c7399ff11fdcb2a5614e2653c1f7c18fa
|
f3c42fc2480c7abf350a5c9bb785c5990a11a864
|
/loader.py
|
2d5776494acbd34dfa5be4028a9743e9f9a92733
|
[] |
no_license
|
rromildosf/dental_radiograph_augmentation
|
c40a4503f15bd6ee004a798b1f88e567e627dda5
|
1f49d6439647211460f6a615f45d5d4b4bfb47bc
|
refs/heads/master
| 2020-03-30T13:25:09.716464
| 2018-10-09T18:13:53
| 2018-10-09T18:13:53
| 151,271,939
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
import os
import numpy as np
imgs = []
def get_names( array, ext ):
newarr = []
for i in [i.split('.') for i in array]:
if( i[1].lower() == ext.lower() ):
newarr.append( i[0] )
return newarr
def get_coords( imgs_path, annotations_path, img_ext='JPEG', ann_ext='txt' ):
imgs = get_names( os.listdir( imgs_path ), img_ext )
ants = get_names( os.listdir( annotations_path ), ann_ext )
pairs = []
for a in ants:
for i in imgs:
if( a == i ):
ann = load_file( '{}/{}.{}'.format(annotations_path, a, ann_ext ) )
pairs.append( (i +'.'+img_ext, ann) )
# print( pairs )
return pairs
def load_file( path ):
coords = []
with open( path, 'r' ) as f:
l = 'l'
while l != '':
l = f.readline().strip()
if( l == '' ): break
l = l.split(' ')
if( len(l) > 1 ): # cordenada válida
coords.append( [ int(i) for i in l] )
return coords
# coords = get_coords( './caries/imgs/', './caries/annotations/' )
# # print( load_file( './caries/annotations/1.txt') )
# for i in coords:
# print( i )
|
[
"rromildosf@gmail.com"
] |
rromildosf@gmail.com
|
c38bfa420c5312c6d22d880b91c8f4ebc6a9c36c
|
7ea394345fe36ef0574ee0eeaa6dca5030c2f80e
|
/crypto_pals/set1/build_subtable.py
|
02d3fc7cfae9c5015e793f3a85cdbebcbcce11fa
|
[] |
no_license
|
becgabri/CryptoPals
|
97c90c47365fcbe57a3a5d9fd7b7b78f2341ed99
|
c80f76b65c746e6b1b677fb7ddc91b116bb2c220
|
refs/heads/master
| 2020-03-26T10:17:33.447193
| 2019-09-06T02:41:08
| 2019-09-06T02:41:08
| 144,790,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
import os
import json
import math
from crypto_pals.set1 import GF28
filename = "AES_sub_bytes.txt"
def create_table():
with open(filename, 'w') as holder:
subst = {}
inverse = {}
for num in range(256):
num = GF28.GF28(num, bypass_modcheck=True)
# works with 0 b/c of hack in GF28.py
byte_stuff = num.inverse()
res = GF28.GF28()
for i in range(5):
res = res + byte_stuff
byte_stuff.rotate_bit()
res = res + GF28.GF28(0x63)
if num.number > 256 or res.number > 256:
raise ValueError("Num or res out of range")
subst[num.number] = res.number
inverse[res.number] = num.number
holder.write(json.dumps([subst, inverse]))
def main():
if not os.path.exists(filename):
create_table()
else:
print("Table already exists. Quitting...")
return
if __name__ == "__main__":
main()
|
[
"becgabri@umich.edu"
] |
becgabri@umich.edu
|
f17721de52e351a91edb7c7d3919db5f41100e00
|
377eee8d6e25d3ce358639dffc1151a865de3bd7
|
/customer/migrations/0011_auto_20201206_0247.py
|
8c33f6e35e6c44a13c2bee3c4e1898d3a572a307
|
[] |
no_license
|
bushmusi/DBS-WEB-DJANGO
|
2f7c88362eed7927b91b9ac93ee01e8cf26de1f9
|
93ef44a0c8ba16404723c9cd85de044a9e9eb092
|
refs/heads/master
| 2023-06-25T08:21:53.755474
| 2021-07-22T11:09:03
| 2021-07-22T11:09:03
| 312,900,013
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
# Generated by Django 3.1.3 on 2020-12-05 23:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0010_venue'),
]
operations = [
migrations.AddField(
model_name='house',
name='lat',
field=models.DecimalField(blank=True, decimal_places=14, max_digits=14, null=True),
),
migrations.AddField(
model_name='house',
name='longt',
field=models.DecimalField(blank=True, decimal_places=14, max_digits=14, null=True),
),
]
|
[
"bushera.mestofa@gmail.com"
] |
bushera.mestofa@gmail.com
|
0e83bba30b60601ca6e672349167f4444604c75d
|
42a7702986793423ce6a0ad301982f5d8677d271
|
/assignments/line/draw.py
|
a4db693dfa1f860acf414f2a68354f687bc203f7
|
[] |
no_license
|
kazijamal/computer-graphics
|
63936664dde9dfdd0d97c82e1de4ad2a8596c691
|
1eaf8fa746a9115be6eeeda41f8ee3a5d515dbd7
|
refs/heads/master
| 2022-11-18T21:43:35.450847
| 2020-06-25T22:27:48
| 2020-06-25T22:27:48
| 274,300,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,759
|
py
|
from display import *
def draw_line( x0, y0, x1, y1, screen, color ):
x = int(x0)
y = int(y0)
a = y1 - y0
b = (x1 - x0) * -1
if b == 0:
if y1 > y0:
while y <= y1:
plot(screen, color, x, y)
y += 1
else:
while y1 <= y:
plot(screen, color, x, y)
y -= 1
else:
m = a / (-1 * b)
d = 2 * a + b
if 0 <= m and m <= 1:
if x0 < x1:
while x <= x1:
plot(screen, color, x, y)
if d > 0:
y += 1
d += 2 * b
x += 1
d += 2 * a
else:
d = -2 * a - b
while x > x1:
plot(screen, color, x, y)
if d > 0:
y -= 1
d -= 2 * b
x -= 1
d -= 2 * a
elif m > 1:
if x0 < x1:
d = 2 * b + a
while y <= y1:
plot(screen, color, x, y)
if d < 0:
x += 1
d += 2 * a
y += 1
d += 2 * b
else:
d = -2 * b + -1 * a
while y > y1:
plot(screen, color, x, y)
if d < 0:
x -= 1
d -= 2 * a
y -= 1
d -= 2 * b
elif m < -1:
if y0 < y1:
d = -1 * a + 2 * b
while y <= y1:
plot(screen, color, x, y)
if d > 0:
x -= 1
d -= 2 * a
y += 1
d += 2 * b
else:
d = a - 2 * b
while y > y1:
plot(screen, color, x, y)
if d > 0:
x += 1
d += 2 * a
y -= 1
d -= 2 * b
elif m >= -1:
if x0 > x1:
d = -2 * a + b
while x > x1:
plot(screen, color, x, y)
if d < 0:
y += 1
d += 2 * b
x -= 1
d -= 2 * a
else:
d = 2 * a - b
while x <= x1:
plot(screen, color, x, y)
if d < 0:
y -= 1
d -= 2 * b
x += 1
d += 2 * a
|
[
"jamalkazi02@gmail.com"
] |
jamalkazi02@gmail.com
|
836e82e88a9493600c42a0ea873c9ff7f293adb0
|
c0d7269d96c2d70bc14d1fea8e2d3b0504675d7e
|
/day7_remove_duplicates.py
|
008188d8ca290050c63f1c003fe4ccd6f7d5aafe
|
[] |
no_license
|
ksamrish/30-day-challenge
|
b061109acc8cb5d4259e1f55a80f2ebb62ca4431
|
1a3073616ff7d4f2284004a33d1418bff162d6aa
|
refs/heads/master
| 2021-05-20T15:53:53.295912
| 2020-04-13T10:57:09
| 2020-04-13T10:57:09
| 252,355,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
inp = list(input())
l = []
for i in range(len(inp)):
if(l.count(inp[i])<1):
l.append(inp[i])
print(*l,sep='')
//another method::
inp = input()
l = []
l.append(inp[0])
for i in range(len(inp)):
for j in range(i):
if(inp[i]==inp[j]):
break
if(j==i-1):
l.append(inp[i])
print(*l,sep='')
|
[
"noreply@github.com"
] |
noreply@github.com
|
cf9c581862c21bd2f3f69e85f597ea1360230968
|
7b4c6db994210422b45cebba2b6e75154e0701a7
|
/DigitalCallCenterPlatform/NewBusinessScripts/newbusiness.py
|
d7aea351a021aed4082124a36c6f1dc0cd8b6af2
|
[
"MIT"
] |
permissive
|
OpreaSergiu/DigitalCallCenterPlatform
|
2cb3c9ebd328850c7183feabe4100895172f6ece
|
3d37e42e722a8e7b58b1b0fa3ce8b665d10008b5
|
refs/heads/master
| 2020-04-03T20:47:04.998673
| 2018-12-18T10:10:47
| 2018-12-18T10:10:47
| 155,558,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,170
|
py
|
from module import *
""" Global sql string definitions
"""
sql_insert_account = """
INSERT INTO [dbo].[WorkPlatformModels] ([ClientReference], [ClientID], [Name], [AssignAmount], [TotalReceived], [OtherDue], [TotalDue], [Desk], [Status], [PalacementDate], [LastWorkDate])
VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')
"""
sql_get_account_number = """
SELECT Id, AssignAmount FROM WorkPlatformModels WHERE ClientReference = '%s' AND ClientID = '%s' AND Name = '%s' AND Status = 'NEW'
"""
sql_insert_invoice = """
INSERT INTO [dbo].[InvoiceModels] ([AccountNumber], [Invoice], [Status], [Amount], [Due], [InvoiceDate], [DueDate], [PaymentRequestFlag], [PostedFlag]) VALUES ('%s', '%s', 'OPEN','%s', '%s','%s', '%s', '0', '0')
"""
sql_insert_phone = """
INSERT INTO [dbo].[PhoneModels] ([AccountNumber], [Prefix], [PhoneNumber], [Extension]) VALUES ('%s', '%s', '%s', '%s')
"""
sql_insert_address = """
INSERT INTO [dbo].[AddressModels] ([AccountNumber], [FullName], [Contact], [Address], [Email], [Country], [TimeZone]) VALUES ('%s', '%s', '%s', '%s','%s', '%s', '%s')
"""
sql_insert_note = """
INSERT INTO [dbo].[NotesModels] ([AccountNumber], [SeqNumber], [UserCode], [ActionCode], [Status], [Desk], [Note], [NoteDate]) VALUES ('%s', '%s', '%s', '%s','%s', '%s', '%s', '%s')
"""
sql_update_account = """
UPDATE WorkPlatformModels SET AssignAmount = '%s', TotalDue = '%s' WHERE Id = '%s'
"""
if __name__ == '__main__':
try:
totalAmountProcessed = 0
import datetime
dt = datetime.datetime.today()
now = dt.strftime("%Y-%m-%d %H:%M:%S")
if len(sys.argv) > 1:
args = sys.argv[1]
arguments = args.split("\t")
file = arguments[0]
flag = arguments[1]
file_inventory = []
c, trust_list = parseFile(file)
for elem in trust_list:
# [cli_ref, client_id, name, desk, prefix_1, phone_no_1, extenstion_1, prefix_2, phone_no_2, extenstion_2, invoice_no, invoice_amount, invoice_due, invoice_date, invoice_due_date, full_name, contct, address, email, contry, timezone]
if elem[0] not in file_inventory:
#[ClientReference], [ClientID], [Name], [AssignAmount], [TotalReceived], [OtherDue], [TotalDue], [Desk], [Status], [PalacementDate], [LastWorkDate]
cursor.execute(sql_insert_account % (elem[0], elem[1], elem[2], 0, 0, 0, 0, elem[3], 'NEW', now, now))
if flag == "Y":
connection.commit()
else:
connection.rollback()
file_inventory.append(elem[0])
cursor.execute(sql_get_account_number % (elem[0], elem[1], elem[2]))
results = cursor.fetchone()
acc_number = results[0]
assign_amount = results[1]
#[AccountNumber], [Prefix], [PhoneNumber], [Extension]
cursor.execute(sql_insert_phone % (acc_number, elem[4], elem[5], elem[6]))
cursor.execute(sql_insert_phone % (acc_number, elem[7], elem[8], elem[9]))
#[AccountNumber], [FullName], [Contact], [Address], [Email], [Country], [TimeZone]
cursor.execute(sql_insert_address % (acc_number, elem[15], elem[16], elem[17], elem[18], elem[19], elem[20]))
#[AccountNumber], [SeqNumber], [UserCode], [ActionCode], [Status], [Desk], [Note], [NoteDate]
cursor.execute(sql_insert_note % (acc_number, 1, 'SYS', 'NEW', 'NEW', elem[3], 'System: New account placed!',now))
if flag == "Y":
connection.commit()
else:
connection.rollback()
cursor.execute(sql_get_account_number % (elem[0], elem[1], elem[2]))
results = cursor.fetchone()
acc_number = results[0]
assign_amount = results[1]
#[AccountNumber], [Invoice], [Status], [Amount], [Due], [InvoiceDate], [DueDate]
cursor.execute(sql_insert_invoice % (acc_number, elem[10], elem[11], elem[12], elem[13], elem[14]))
totalAmountProcessed += elem[12]
new_assign_amount = assign_amount + elem[12]
cursor.execute(sql_update_account % (new_assign_amount, new_assign_amount, acc_number))
if flag == "Y":
connection.commit()
else:
connection.rollback()
print(len(file_inventory), c, totalAmountProcessed)
connection.close()
except:
connection.rollback()
print ("Exception: ", sys.exc_info()[0])
raise
|
[
"sergiuandreioprea@gmail.com"
] |
sergiuandreioprea@gmail.com
|
6133de21acc69badb689577af432bce59a5def07
|
14cef240063145bba81d7ac4bd25ed671585527c
|
/core/database/crud/bottify_user.py
|
4433dceb8bad52c68591d531e46bc649e45080ee
|
[] |
no_license
|
Kroonjay/Bottify
|
f139d3cf6753c36b85ec061888a88c9f82dfd715
|
c30c9cf924d19d053b0f678eb9d69143398ea83a
|
refs/heads/main
| 2023-07-30T02:10:08.878698
| 2021-09-29T16:30:35
| 2021-09-29T16:30:35
| 411,117,108
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,716
|
py
|
import logging
from databases import Database
from uuid import UUID
from core.security.password import get_password_hash
from core.models.user import BottifyUserInModel, BottifyUserModel
from core.database.tables.bottify_user import get_bottify_user_table
from core.database.helpers import build_model_from_row
user_table = get_bottify_user_table()
async def read_user_by_id(database: Database, user_id: int):
query = user_table.select().where(user_table.c.id == user_id).limit(1)
row = await database.fetch_one(query)
return build_model_from_row(row, BottifyUserModel)
async def read_user_by_guid(database: Database, guid_in: UUID):
if isinstance(guid_in, UUID):
user_guid = guid_in
elif isinstance(guid_in, str):
try:
user_guid = UUID(guid_in)
except ValueError as e:
logging.error(f"Read User by Guid:Failed to Parse UUID from String")
return None
else:
logging.error(
f"Read User by Guid:User GUID must be either UUID or String:Got: {type(guid_in)}"
)
return None
query = user_table.select().where(user_table.c.guid == user_guid).limit(1)
row = await database.fetch_one(query)
return build_model_from_row(row, BottifyUserModel)
async def read_user_by_username(database: Database, username: str):
if not isinstance(username, str):
logging.error(
f"Read User by Username:Username Must be type String:Got: {type(username)}"
)
query = user_table.select().where(user_table.c.username == username).limit(1)
row = await database.fetch_one(query)
return build_model_from_row(row, BottifyUserModel)
async def create_user(database: Database, user_in: BottifyUserInModel):
query = user_table.insert()
hashed_password = get_password_hash(user_in.password)
success = False
if not hashed_password:
logging.error(
f"Create User Error:Failed to Hash Password:User Data: {user_in.json()}"
)
return success
user_data = user_in.dict(exclude={"password"})
user_data.update({"hashed_password": hashed_password})
await database.execute(query, values=user_data)
success = True
return success
async def read_users(database: Database, limit: int):
if not isinstance(limit, int):
logging.error(
f"Read Users Error:Limit Param Must be an Integer:Got: {type(limit)}"
)
query = user_table.select().limit(limit)
users = []
async for row in database.iterate(query):
users.append(build_model_from_row(row, BottifyUserModel))
if not users:
logging.error(f"Read Users Error:Failed to Read Any Users")
return users
|
[
"scoutfullner96@gmail.com"
] |
scoutfullner96@gmail.com
|
0e58b2eb4476360bd160080cb9a03e7fcad7a6e2
|
490ffe1023a601760ae7288e86723f0c6e366bba
|
/kolla-docker/zun-ui/zun_ui/enabled/_2340_admin_container_providervms_panel.py
|
4812c633141e50e75d7f5283c994e5efb453fe51
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/Cloud-User-Management
|
89696a5ea5d2f95191327fbeab6c3e400bbfb2b8
|
390988bf4915a276c7bf8d96b62c3051c17d9e6e
|
refs/heads/master
| 2022-11-19T10:09:36.662906
| 2018-11-07T20:28:31
| 2018-11-07T20:28:31
| 281,786,345
| 0
| 0
| null | 2020-07-22T21:26:07
| 2020-07-22T21:26:06
| null |
UTF-8
|
Python
| false
| false
| 964
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'container.providervms'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'container'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'admin'
# Python panel class of the PANEL to be added.
ADD_PANEL = 'zun_ui.content.container.providervms.panel.Providervms'
|
[
"Mr.Qinlichao@hotmail.com"
] |
Mr.Qinlichao@hotmail.com
|
44db840123ae75c28b1835be712d45ca9bf7511a
|
5f645c0549e0287a826b2fb89af8bd74da889dfa
|
/bj11438_sol.py
|
b46a4b1699448ad27cac1264dd3336fd7bc47bf3
|
[] |
no_license
|
mino-park7/algorithm_study
|
f250e768af67942b41ebd4ad002581a46317713b
|
b448563cdb11759e0bd3f320e703a0fe6799dd7b
|
refs/heads/main
| 2023-01-24T05:45:12.224107
| 2020-12-08T04:25:53
| 2020-12-08T04:25:53
| 317,139,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
import sys
from math import log2
from collections import deque
import time
now = time.time()
sys.stdin = open("bj11438.txt", "r")
N=int(sys.stdin.readline())
logN=int(log2(N)+1)
tree=[[] for _ in range(N+1)]#각 노드의 부모노드 저장
for _ in range(N-1):
p,c=map(int,sys.stdin.readline().split())
tree[c].append(p)
tree[p].append(c)
p_list=[0 for _ in range(N+1)]#부모노드 저장
depth=[0 for _ in range(N+1)]#부모노드 개수
p_check=[True for _ in range(N+1)]#DFS를 위해 사용
#dfs로 모든 노드의 부모노드 찾기
q=deque()
q.append(1)
while q:
p=q.popleft()
p_check[p]=False
for i in tree[p]:
if p_check[i]:
q.append(i)
p_list[i]=p
depth[i]=depth[p]+1#깊이도 같이 저장해준다.
#2^k번째 부모노드 저장
#log2 1000000=16.6096...
DP=[[0 for _ in range(logN)] for i in range(N+1)]
#초기화
for i in range(N+1):
DP[i][0]=p_list[i]
for j in range(1,logN):
for i in range(1,N+1):
# if DP[DP[i][j-1]][j-1]!=0:
DP[i][j]=DP[DP[i][j-1]][j-1]
M=int(sys.stdin.readline())
for _ in range(M):
a, b = map(int, sys.stdin.readline().split())
if depth[a]>depth[b]:
a,b=b,a
#둘의 차이를 구하여 레벨 맞춰주기
dif=depth[b]-depth[a]
#b의 dif조상 찾기
for i in range(logN):
if dif & 1<<i: #ex dif의 11번째 부모노드를 구할 경우 경우 dif = 1011(2) b=DP[DP[DP[b][0]][1]][3]
b=DP[b][i]
if a==b:
print(a)
continue
for i in range(logN-1,-1,-1):
if DP[a][i]!=DP[b][i]:#처음으로 같지않은 부분으로가 가서 다시 검색
a=DP[a][i]
b=DP[b][i]
print(DP[b][0])
print(time.time() - now)
|
[
"minho.park2115@gmail.com"
] |
minho.park2115@gmail.com
|
bb4b53307e8d8ff92e50622171f4014c5623675c
|
a51b4aed04783d805e78b9db0935eb32b7a42c6d
|
/launcher/shopfloord.py
|
c0d7814643abf78eda2bdf26b283dfde286010d0
|
[] |
no_license
|
caoquan/learngit
|
0866f8c183e34d6faed0f98a70dac9874054e071
|
fe31e00f56aee179276ba37d83504a8d2ffd6838
|
refs/heads/master
| 2021-01-24T06:31:27.161954
| 2015-04-30T04:55:11
| 2015-04-30T04:55:11
| 34,826,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,314
|
py
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Python twisted's module creates definition dynamically 7
# pylint: disable=E1101
"""Shopfloor daemon.
The launcher is a daemon that manages underlying services. Including HTTPD
frontend, Shopfloor server FastCGI, update server and log minitor service.
Example:
# Run the daemon in user mode with test data set
./shopfloord.py -t
"""
import logging
import optparse
import os
import signal
from twisted.internet import error
from twisted.internet import reactor
import factory_common # pylint: disable=W0611
from cros.factory.shopfloor.launcher import constants
from cros.factory.shopfloor.launcher import env
from cros.factory.shopfloor.launcher import ShopFloorLauncherException
from cros.factory.shopfloor.launcher import utils
from cros.factory.shopfloor.launcher.commands import LauncherCommandFactory
# The string for detecting if we're inside a .par file
_RESOURCE_FACTORY_PAR = '/resources/factory.par'
def Run(config_file):
"""ShopFloor daemon loop."""
utils.UpdateConfig(config_file)
logging.info('Command port: %d', constants.COMMAND_PORT)
reactor.listenTCP(constants.COMMAND_PORT, LauncherCommandFactory())
# Start twisted, and prevent reactor from install signal handlers.
reactor.run(installSignalHandlers=0)
def main():
parser = optparse.OptionParser()
parser.add_option('-c', '--config', dest='yaml_config',
default='shopfloor.yaml')
parser.add_option('-t', '--test', dest='test_run', action='store_true',
default=False)
parser.add_option('-l', '--local', dest='local_dir', action='store_true',
default=False)
(options, args) = parser.parse_args()
if args:
parser.error('Invalid args: %s' % ' '.join(args))
log_format = '%(asctime)s %(levelname)s '
log_verbosity = logging.INFO
if options.test_run:
log_format += '(%(filename)s:%(lineno)d) '
log_verbosity = logging.DEBUG
log_format += '%(message)s'
logging.basicConfig(level=log_verbosity, format=log_format)
server_path = os.path.realpath(__file__)
search_dirs = []
# Set runtime_dir when running locally.
if options.test_run and not server_path.startswith(
constants.SHOPFLOOR_INSTALL_DIR):
if _RESOURCE_FACTORY_PAR in server_path:
env.runtime_dir = server_path[0:server_path.index(_RESOURCE_FACTORY_PAR)]
else:
env.runtime_dir = os.path.join(os.path.dirname(server_path), 'testdata')
search_dirs.append(os.path.dirname(server_path))
search_dirs += [env.runtime_dir, env.GetResourcesDir()]
config_file = utils.SearchFile(options.yaml_config, search_dirs)
if config_file and os.path.isfile(config_file):
Run(config_file)
else:
raise ShopFloorLauncherException('Launcher YAML config file not found: %s' %
options.yaml_config)
def ReactorStop():
"""Forces reactor to stop."""
logging.info('Stopping reactor.')
try:
reactor.stop()
except error.ReactorNotRunning:
pass
def DelayedStop(count_down):
"""Waits for services to end and stops the reactor.
Args:
count_down: seconds to wait before force shutdown.
"""
# Forces stop when count to zero
if count_down <= 0:
ReactorStop()
for svc in env.launcher_services:
if svc.subprocess:
logging.info('Wait for %s ... %d', svc.name, count_down)
reactor.callLater(1, DelayedStop, count_down - 1)
ReactorStop()
def SignalHandler(sig, dummy_frame):
"""Initiates stopping sequence.
Launcher holds multiple subprocess, runs the event loop in twisted reactor,
hence it could not stop gracefully with system before shutdown handler. The
correct sequence is:
SIG[TERM|INT]
--> stop subprocesses (call utils.StopServices())
--> wait for subprocesses end (reactor.callLater())
--> stop reactor and ignore not running error.
"""
logging.info('Received signal %d', sig)
logging.info('Stopping system...')
utils.StopServices()
reactor.callLater(3, DelayedStop, 60)
if __name__ == '__main__':
signal.signal(signal.SIGTERM, SignalHandler)
signal.signal(signal.SIGINT, SignalHandler)
main()
|
[
"jack.cao2@quantacn.com"
] |
jack.cao2@quantacn.com
|
726c1e415164f7e03ee100461f5f34b0c23e53e5
|
5d26d88c25713c0ddeac3b67c681753fb50a6570
|
/djsphinx/wrapper.py
|
7d124a7ee3db8fdc846c4e23b8a3a7b063594ee1
|
[
"MIT"
] |
permissive
|
PixxxeL/djsphinx
|
9faefdcaae45cdb233196a7356c86c25bcd27630
|
c3855e4950b2641fa46197e0c423441b0d4ba047
|
refs/heads/master
| 2021-12-09T17:31:52.418502
| 2021-12-02T06:49:34
| 2021-12-02T06:49:34
| 64,293,525
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
import sys
try:
from django.conf import settings
except ImportError:
settings = object()
py_ver = sys.version_info
if py_ver[0] > 2:
from .sphinxapi_py3 import *
else:
from .sphinxapi import *
TOTAL_LIMIT = 10000
def sphinx_search(query_str, index=None, offset=0, limit=TOTAL_LIMIT):
host = getattr(settings, 'SPHINX_HOST', '127.0.0.1')
port = getattr(settings, 'SPHINX_PORT', 9312)
index = index or getattr(settings, 'SPHINX_DEFAULT_INDEX', 'index_name')
cl = SphinxClient()
cl.SetServer(host, port)
cl.SetMatchMode(SPH_MATCH_ALL)
cl.SetLimits(offset, limit)
res = cl.Query(query_str, index)
if res and 'matches' in res:
return list(map(lambda m: m['id'], res['matches']))
else:
return []
|
[
"ivan.n.sergeev@gmail.com"
] |
ivan.n.sergeev@gmail.com
|
9d389c90d9298defc9873a7c3bf805c545897b58
|
82c8e4ed6dc6a2ca4a72007788e4a40fa72cad87
|
/plugin.video.1channel/waldo/indexes/1Channel_index.py
|
7e9315a148daada1022a5e0308ea438a51f53d5a
|
[] |
no_license
|
burhantural/pampereo-xbmc-plugins
|
691bad78b88ee0afd37158bc229024007d028382
|
b3f51c09574258b31c4b52176cce66d0a3a5ba0c
|
refs/heads/master
| 2016-08-11T19:09:31.859073
| 2013-08-17T17:17:02
| 2013-08-17T17:17:02
| 44,773,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,068
|
py
|
import os
import re
import sys
import urllib2
import xbmcgui
import xbmcplugin
import HTMLParser
from t0mm0.common.addon import Addon
addon = Addon('plugin.video.waldo', sys.argv)
BASE_URL = 'http://www.1channel.ch'
display_name = '1Channel'
#Label that will be displayed to the user representing this index
tag = '1Channel'
#MUST be implemented. Unique 3 or 4 character string that will be used to
#identify this index
required_addons = []
#MUST be implemented. A list of strings indicating which addons are required to
#be installed for this index to be used.
#For example: required_addons = ['script.module.beautifulsoup', 'plugin.video.youtube']
#Currently, xbmc does not provide a way to require a specific version of an addon
def get_settings_xml():
'''
Must be defined. This method should return XML which describes any Waldo
specific settings you would like for your plugin. You should make sure that
the ``id`` starts with your tag followed by an underscore.
For example:
xml = '<setting id="ExI_priority" '
xml += 'type="number" label="Priority" default="100"/>\\n'
return xml
The settings category will be your plugin's :attr:`display_name`.
Returns:
A string containing XML which would be valid in
``resources/settings.xml`` or boolean False if none are required
'''
return False
def get_browsing_options():#MUST be defined
'''
Returns a list of dicts. Each dict represents a different method of browsing
this index. The following keys MUST be provided:
'name': Label to display to the user to represent this browsing method
'function': A function (defined in this index) which will be executed when
the user selects this browsing method. This function should describe
and add the list items to the directory, and assume flow control from
this point on.
Once the user indicates the content they would like to search the providers
for (usually via selecting a list item), plugin.video.waldo should be called
with the following parameters (again usually via listitem):
mode = 'GetAllResults'
type = either 'movie', 'tvshow', 'season', or 'episode'
title = The title string to look for
year = The release year of the desired movie, or premiere date of the
desired tv show.
imdb = The imdb id of the movie or tvshow to find sources for
tvdb = The tvdb id of the movie or tvshow to find sources for
season = The season number for which to return results.
If season is supplied, but not episode, all results for that season
should be returned
episode: The episode number for which to return results
'''
option_1 = {}
option_1['name'] = 'Tv Shows'
option_1['function'] = 'BrowseListMenu'
option_1['kwargs'] = {'section':'tv'}
option_2 = {}
option_2['name'] = 'Movies'
option_2['function'] = 'BrowseListMenu'
option_2['kwargs'] = {'section':'movies'}
return [option_1,option_2]
def callback(params):
'''
MUST be implemented. This method will be called when the user selects a
listitem you created. It will be passed a dict of parameters you passed to
the listitem's url.
For example, the following listitem url:
plugin://plugin.video.waldo/?mode=main§ion=tv&api_key=1234
Will call this function with:
{'mode':'main', 'section':'tv', 'api_key':'1234'}
'''
addon.log('%s was called with the following parameters: %s' %(params.get('receiver',''), params))
sort_by = params.get('sort', None)
section = params.get('section')
if sort_by: GetFilteredResults(section, sort=sort_by)
def BrowseListMenu(section): #This must match the 'function' key of an option from get_browsing_options
addon.add_directory({'section': section, 'sort': 'featured'}, {'title': 'Featured'}, img=art('featured.png'), fanart=art('fanart.png'))
addon.add_directory({'section': section, 'sort': 'views'}, {'title': 'Most Popular'}, img=art('most_popular.png'), fanart=art('fanart.png'))
addon.add_directory({'section': section, 'sort': 'ratings'}, {'title': 'Highly rated'}, img=art('highly_rated.png'), fanart=art('fanart.png'))
addon.add_directory({'section': section, 'sort': 'release'}, {'title': 'Date released'}, img=art('date_released.png'), fanart=art('fanart.png'))
addon.add_directory({'section': section, 'sort': 'date'}, {'title': 'Date added'}, img=art('date_added.png'), fanart=art('fanart.png'))
addon.end_of_directory()
def art(file):
adn = Addon('plugin.video.1channel', sys.argv)
THEME_LIST = ['mikey1234','Glossy_Black']
THEME = THEME_LIST[int(adn.get_setting('theme'))]
THEME_PATH = os.path.join(adn.get_path(), 'art', 'themes', THEME)
img = os.path.join(THEME_PATH, file)
return img
def GetFilteredResults(section=None, genre=None, letter=None, sort='alphabet', page=None): #3000
addon.log('Filtered results for Section: %s Genre: %s Letter: %s Sort: %s Page: %s' %(section, genre, letter, sort, page))
pageurl = BASE_URL + '/?'
if section == 'tv': pageurl += 'tv'
if genre : pageurl += '&genre=' + genre
if letter : pageurl += '&letter=' + letter
if sort : pageurl += '&sort=' + sort
if page : pageurl += '&page=%s' % page
if page: page = int(page) + 1
else: page = 2
html = GetURL(pageurl)
r = re.search('number_movies_result">([0-9,]+)', html)
if r: total = int(r.group(1).replace(',', ''))
else: total = 0
total_pages = total/24
total = min(total,24)
r = 'class="index_item.+?href="(.+?)" title="Watch (.+?)"?\(?([0-9]{4})?\)?"?>.+?src="(.+?)"'
regex = re.finditer(r, html, re.DOTALL)
resurls = []
for s in regex:
resurl,title,year,thumb = s.groups()
if resurl not in resurls:
resurls.append(resurl)
li_title = '%s (%s)' %(title, year)
li = xbmcgui.ListItem(li_title, iconImage=thumb, thumbnailImage=thumb)
if section == 'tv': section = 'tvshow'
else: section = 'movie'
queries = {'waldo_mode':'GetAllResults', 'title':title, 'vid_type':section}
li_url = addon.build_plugin_url(queries)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), li_url, li,
isFolder=True, totalItems=total)
if html.find('> >> <') > -1:
label = 'Skip to Page...'
command = addon.build_plugin_url({'mode':'PageSelect', 'pages':total_pages, 'section':section, 'genre':genre, 'letter':letter, 'sort':sort})
command = 'RunPlugin(%s)' %command
cm = [(label, command)]
meta = {'title':'Next Page >>'}
addon.add_directory({'mode': 'CallModule', 'receiver':'1Channel', 'ind_path':os.path.dirname(__file__), 'section':section, 'genre':genre, 'letter':letter, 'sort':sort, 'page':page},
meta, cm, True, art('nextpage.png'), art('fanart.png'), is_folder=True)
addon.end_of_directory()
def GetURL(url, params=None, referrer=BASE_URL):
addon.log('Fetching URL: %s' % url)
USER_AGENT = 'User-Agent:Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.56'
if params: req = urllib2.Request(url, params)
else: req = urllib2.Request(url)
req.add_header('User-Agent', USER_AGENT)
req.add_header('Host', 'www.1channel.ch')
req.add_header('Referer', referrer)
try:
response = urllib2.urlopen(req, timeout=10)
body = response.read()
body = unicode(body,'iso-8859-1')
h = HTMLParser.HTMLParser()
body = h.unescape(body)
except Exception, e:
addon.log('Failed to connect to %s: %s' %(url, e))
return ''
return body.encode('utf-8')
|
[
"aymensaid82@gmail.com"
] |
aymensaid82@gmail.com
|
6ec3308ca74aee29ace51e8fb3b39a143120e86f
|
29eacf3b29753d65d8ec0ab4a60ea1f7ddecbd68
|
/tests/api_workflow/test_api_workflow_selection.py
|
73147fd03b6bbd08ffaf12ded248a6f812a0cb81
|
[
"MIT"
] |
permissive
|
lightly-ai/lightly
|
5b655fe283b7cc2ddf1d7f5bd098603fc1cce627
|
5650ee8d4057139acf8aa10c884d5d5cdc2ccb17
|
refs/heads/master
| 2023-08-17T11:08:00.135920
| 2023-08-16T12:43:02
| 2023-08-16T12:43:02
| 303,705,119
| 2,473
| 229
|
MIT
| 2023-09-14T14:47:16
| 2020-10-13T13:02:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,897
|
py
|
from typing import List
import pytest
from pytest_mock import MockerFixture
from lightly.active_learning.config.selection_config import SelectionConfig
from lightly.api import ApiWorkflowClient, api_workflow_selection
from lightly.openapi_generated.swagger_client.models import (
JobResultType,
JobState,
JobStatusData,
JobStatusDataResult,
SamplingCreateRequest,
SamplingMethod,
TagData,
)
from tests.api_workflow import utils
def _get_tags(dataset_id: str, tag_name: str = "just-a-tag") -> List[TagData]:
return [
TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=None,
bit_mask_data="0x1",
name=tag_name,
tot_size=4,
created_at=1577836800,
changes=[],
)
]
def _get_sampling_create_request(tag_name: str = "new-tag") -> SamplingCreateRequest:
return SamplingCreateRequest(
new_tag_name=tag_name,
method=SamplingMethod.RANDOM,
config={},
)
def test_selection__tag_exists(mocker: MockerFixture) -> None:
tag_name = "some-tag"
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient,
"get_all_tags",
return_value=_get_tags(dataset_id=utils.generate_id(), tag_name=tag_name),
)
client = ApiWorkflowClient()
with pytest.raises(RuntimeError) as exception:
client.selection(selection_config=SelectionConfig(name=tag_name))
assert (
str(exception.value) == "There already exists a tag with tag_name some-tag"
)
def test_selection__no_tags(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "get_all_tags", return_value=[])
client = ApiWorkflowClient()
with pytest.raises(RuntimeError) as exception:
client.selection(selection_config=SelectionConfig(name="some-tag"))
assert str(exception.value) == "There exists no initial-tag for this dataset."
def test_selection(mocker: MockerFixture) -> None:
tag_name = "some-tag"
dataset_id = utils.generate_id()
mocker.patch("time.sleep")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_tags", return_value=_get_tags(dataset_id=dataset_id)
)
mocker.patch.object(
ApiWorkflowClient,
"_create_selection_create_request",
return_value=_get_sampling_create_request(),
)
mocked_selection_api = mocker.MagicMock()
mocked_sampling_response = mocker.MagicMock()
mocked_sampling_response.job_id = utils.generate_id()
mocked_selection_api.trigger_sampling_by_id.return_value = mocked_sampling_response
mocked_jobs_api = mocker.MagicMock()
mocked_get_job_status = mocker.MagicMock(
return_value=JobStatusData(
id=utils.generate_id(),
wait_time_till_next_poll=1,
created_at=0,
status=JobState.FINISHED,
result=JobStatusDataResult(type=JobResultType.SAMPLING, data="new-tag-id"),
)
)
mocked_jobs_api.get_job_status_by_id = mocked_get_job_status
mocked_tags_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._selection_api = mocked_selection_api
client._jobs_api = mocked_jobs_api
client._tags_api = mocked_tags_api
client._dataset_id = dataset_id
client.embedding_id = "embedding-id"
client.selection(selection_config=SelectionConfig(name=tag_name))
mocked_get_job_status.assert_called_once()
mocked_tags_api.get_tag_by_tag_id.assert_called_once_with(
dataset_id=dataset_id, tag_id="new-tag-id"
)
def test_selection__job_failed(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
job_id = "some-job-id"
mocker.patch("time.sleep")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_tags", return_value=_get_tags(dataset_id=dataset_id)
)
mocker.patch.object(
ApiWorkflowClient,
"_create_selection_create_request",
return_value=_get_sampling_create_request(),
)
mocked_selection_api = mocker.MagicMock()
mocked_sampling_response = mocker.MagicMock()
mocked_sampling_response.job_id = job_id
mocked_selection_api.trigger_sampling_by_id.return_value = mocked_sampling_response
mocked_jobs_api = mocker.MagicMock()
mocked_get_job_status = mocker.MagicMock(
return_value=JobStatusData(
id=utils.generate_id(),
wait_time_till_next_poll=1,
created_at=0,
status=JobState.FAILED,
error="bad job",
)
)
mocked_jobs_api.get_job_status_by_id = mocked_get_job_status
client = ApiWorkflowClient()
client._selection_api = mocked_selection_api
client._jobs_api = mocked_jobs_api
client._dataset_id = dataset_id
client.embedding_id = "embedding-id"
with pytest.raises(RuntimeError) as exception:
client.selection(selection_config=SelectionConfig(name="some-tag"))
assert str(exception.value) == (
"Selection job with job_id some-job-id failed with error bad job"
)
def test_selection__too_many_errors(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
job_id = "some-job-id"
mocker.patch("time.sleep")
mocked_print = mocker.patch("builtins.print")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_tags", return_value=_get_tags(dataset_id=dataset_id)
)
mocker.patch.object(
ApiWorkflowClient,
"_create_selection_create_request",
return_value=_get_sampling_create_request(),
)
mocked_selection_api = mocker.MagicMock()
mocked_sampling_response = mocker.MagicMock()
mocked_sampling_response.job_id = job_id
mocked_selection_api.trigger_sampling_by_id.return_value = mocked_sampling_response
mocked_jobs_api = mocker.MagicMock()
mocked_get_job_status = mocker.MagicMock(
side_effect=[Exception("surprise!") for _ in range(20)]
)
mocked_jobs_api.get_job_status_by_id = mocked_get_job_status
client = ApiWorkflowClient()
client._selection_api = mocked_selection_api
client._jobs_api = mocked_jobs_api
client._dataset_id = dataset_id
client.embedding_id = "embedding-id"
with pytest.raises(Exception) as exception:
client.selection(selection_config=SelectionConfig(name="some-tag"))
assert str(exception.value) == "surprise!"
mocked_print.assert_called_once_with(
"Selection job with job_id some-job-id could not be started "
"because of error: surprise!"
)
def test_upload_scores(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
tags = _get_tags(dataset_id=dataset_id, tag_name="initial-tag")
tag_id = tags[0].id
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient,
"get_all_tags",
return_value=tags,
)
mocker.patch.object(
api_workflow_selection, "_parse_active_learning_scores", return_value=[1]
)
mocked_api = mocker.MagicMock()
mocked_create_score = mocked_api.create_or_update_active_learning_score_by_tag_id
client = ApiWorkflowClient()
client._scores_api = mocked_api
client._dataset_id = dataset_id
mocked_create_score.reset_mock()
client.upload_scores(al_scores={"score_type": [1, 2, 3]}, query_tag_id=tag_id)
mocked_create_score.assert_called_once()
kwargs = mocked_create_score.call_args[1]
assert kwargs.get("tag_id") == tag_id
|
[
"noreply@github.com"
] |
noreply@github.com
|
be63e32b7244190dd61164a9683413bd9b2ef5dc
|
356b61e3d236e8c4991a9cc7224e7b5c8f202abe
|
/nova/tests/fake_instance.py
|
a8004364aac6871eb536f6ae2e852f2324e46683
|
[
"Apache-2.0"
] |
permissive
|
Brocade-OpenSource/OpenStack-DNRM-Nova
|
88c1d446d9963cac069803ead46ce196c3654aca
|
7354f378398850113ac93b511547ed05218dc770
|
refs/heads/master
| 2021-01-22T13:54:47.428996
| 2013-10-10T15:44:28
| 2013-10-11T08:51:05
| 20,933,415
| 0
| 1
|
Apache-2.0
| 2020-07-24T09:12:31
| 2014-06-17T17:41:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from nova.objects import instance as instance_obj
def fake_db_instance(**updates):
db_instance = {
'id': 1,
'deleted': False,
'uuid': str(uuid.uuid4()),
'user_id': 'fake-user',
'project_id': 'fake-project',
'host': 'fake-host',
'created_at': datetime.datetime(1955, 11, 5),
}
for field, typefn in instance_obj.Instance.fields.items():
if field in db_instance:
continue
try:
db_instance[field] = typefn(None)
except TypeError:
db_instance[field] = typefn()
if updates:
db_instance.update(updates)
return db_instance
|
[
"danms@us.ibm.com"
] |
danms@us.ibm.com
|
b2e26be61b1f0bc2d9ca76fe897708f5e740d8b8
|
f3163e8590dc75220ab51de79f68a1862d06da84
|
/pCombineLAS.py
|
1783ddb651fb9155b3c358c98bdb9577e245faa3
|
[] |
no_license
|
kbhusain/loggingFormats
|
39b1815b4624b798221466ac71de315963488f61
|
5d258a325ec1b276e58b218eeed5c8c0d3e0de0f
|
refs/heads/master
| 2021-02-26T11:54:33.916557
| 2020-03-06T22:19:57
| 2020-03-06T22:19:57
| 245,523,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,746
|
py
|
#
#
#
import os, sys, shutil
from pLASutils import p_lasReader, p_simpleLASwriter, p_lasObject, p_lasWriter
##
#
class pCombineLAS:
def __init__(self):
self.option=None
pass
def combineFiles(self,las1,las2,lasMerged,wellname=None,setName='QUICKLOOK'):
self.debug = 1
self.nm_las1 = las1
self.nm_las2 = las2
self.nm_lasMerged = lasMerged
# Take care of null conditions first
if las1 == None and las2 == None: return
if las1 == None and las2:
shutil.copyfile(las2,lasMerged)
return
if las2 == None and las1:
shutil.copyfile(las1,lasMerged)
return
# file name is valid but and empty one! Where's xor?
if os.path.exists(las1) and not os.path.exists(las2):
shutil.copyfile(las1,lasMerged)
return
if not os.path.exists(las1) and os.path.exists(las2):
shutil.copyfile(las2,lasMerged)
return
# Okay, both exist
self.f_las1 = p_lasReader(las1)
self.f_las2 = p_lasReader(las2)
self.f_merged = p_simpleLASwriter()
d1min,d1max = self.f_las1.getDepthRange()
d2min,d2max = self.f_las2.getDepthRange()
totalRange = min(d1min,d2min), max(d1max,d2max)
allCurves = { }
if self.debug > 0:
print "Range of depth for las1 = ", d1min, d1max
print "Range of depth for las2 = ", d2min, d2max
print "Total Range of depth = ", totalRange
print "Curves las1 = ", self.f_las1.curveNames, len(self.f_las1.curveNames)
print "Curves las2 = ", self.f_las2.curveNames, len(self.f_las2.curveNames)
allCurveObjects = { }
allCurveNames = [ ]
allUnitNames = [ ]
for nm in self.f_las1.curveNames:
if not nm in allCurveNames:
obj = self.f_las1.curves[nm]
allCurveObjects[nm] = p_lasObject(obj.cname,obj.units,obj.dvalue,obj.desc)
allCurveNames.append(nm)
allUnitNames.append(obj.units)
for nm in self.f_las2.curveNames:
if not nm in allCurveNames:
obj = self.f_las2.curves[nm]
allCurveObjects[nm] = p_lasObject(obj.cname,obj.units,obj.dvalue,obj.desc)
allCurveNames.append(nm)
allUnitNames.append(obj.units)
if self.debug > 0:
print "All curves = ", allCurveNames, len(allCurveNames) , len(allCurveObjects.keys())
print "All curves = ", allUnitNames, len(allUnitNames)
# Now create the new depth array at the required intervals
lasout = p_lasWriter()
#----------------------------------------------------------------------------------
# Create depth array for the full range at 0.5 foot intervals
#----------------------------------------------------------------------------------
dataBlock = { }
depth = totalRange[0]
dpName = allCurveNames[0]
justCurveNames = allCurveNames[1:] # Get curve names to work with.
while depth < totalRange[1]:
dataBlock[depth] = {}
block = dataBlock[depth]
block[dpName] = depth # Set Depth to first column
for nm in justCurveNames: block[nm] = -999.25
depth += 0.5
#----------------------------------------------------------------------------------
#----------------------------------------------------------------------------------
# Create data array for the full range at all depth intervals
#----------------------------------------------------------------------------------
depth = totalRange[0]
while depth < totalRange[1]:
#print "Processing Depth", depth , totalRange
block = dataBlock[depth]
for nm in justCurveNames:
if nm in self.f_las1.curveNames:
block[nm] = self.f_las1.getCurveDataValue(nm,depth)
if nm in self.f_las2.curveNames:
block[nm] = self.f_las2.getCurveDataValue(nm,depth)
if block[nm] > 100 and nm == 'MT_PHIAX' : print nm, depth, block[nm]
depth += 0.5
#----------------------------------------------------------------------------------
skeys = dataBlock.keys()
skeys.sort()
lasout.setSetName(setName)
for nm in justCurveNames:
if nm in self.f_las1.curveNames:
lasout.setCurveObject(nm,self.f_las1.curves[nm])
if nm in self.f_las2.curveNames:
lasout.setCurveObject(nm,self.f_las2.curves[nm])
lasout.setSetName(setName)
lasout.setCurveNames(allCurveNames)
lasout.setUnitNames(allUnitNames)
# Now add some data lines
#print "------------Adding lines"
for dp in skeys:
#print "------------Adding line at", dp
block = dataBlock[dp]
dblock = [ block[nm] for nm in allCurveNames ]
dblock = map(str,dblock)
lasout.addCurveDataLine(" ".join(dblock))
#print "------------Done with lines"
#------------------------------------------------------------------------------------
for section in [ '~W', '~WELL', '~P', '~PARAMETER' ] :
lasout.addRecordsForSection(section,self.f_las1.records)
lasout.addRecordsForSection(section,self.f_las2.records)
lasout.adjustWellSection()
if self.option == '-filled': self.fillInColumns(lasout)
lasout.writeFile(lasMerged,wellname,wrap="YES")
def fillInColumns(self,lasout):
v0 = lasout.datalines[0].split() # Get initial Vector
newdataLines = [" ".join(v0),]
for vn in lasout.datalines[1:]:
v1 = vn.split()
i = 0
nv = []
for vr in v1:
if vr != '-999.25': v0[i] = vr
nv.append(v0[i])
i += 1
newdataLines.append(" ".join(nv))
lasout.datalines = newdataLines
if __name__ == '__main__':
print sys.argv
if len(sys.argv) > 3:
combined = pCombineLAS()
combined.combineFiles(sys.argv[1],sys.argv[2],sys.argv[3])
if len(sys.argv) > 4:
combined = pCombineLAS()
if sys.argv[4] == '-filled':
wellname= None
combined.option = sys.argv[4]
else:
wellname= sys.argv[4]
combined.option = None
combined.combineFiles(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
|
[
"noreply@github.com"
] |
noreply@github.com
|
bfd4a1f848e76f01b7d90f734c45b368dfd32e3e
|
6e91ad505da417a6345b84ea02534deb3ec0427a
|
/src/tools/test_net.py
|
344b28577a7d294398bde276af56229b3763ca91
|
[] |
no_license
|
cocoaaa/Tenenbaum2000
|
257a7a0376b670a0896ecd09ace848cb276d5813
|
6f6e8ee74bc9f597def9559f74a6841c37c0a214
|
refs/heads/master
| 2023-04-25T23:36:13.566521
| 2021-06-06T00:49:20
| 2021-06-06T00:49:20
| 312,381,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,729
|
py
|
# encoding: utf-8
"""
@author: sherlock
@contact: sherlockliao01@gmail.com
"""
import argparse
import os
import sys
from os import mkdir
import torch
sys.path.append('.')
from config import cfg
from data import make_data_loader
from engine.example_inference import inference
from modeling import build_model
from utils.logger import setup_logger
from ipdb import set_trace
def main():
parser = argparse.ArgumentParser(description="PyTorch Template MNIST Inference")
parser.add_argument(
"--config_file", default="", help="path to config file", type=str
)
parser.add_argument("opts", help="Modify config options using the command-line", default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir and not os.path.exists(output_dir):
mkdir(output_dir)
logger = setup_logger("template_model", output_dir, 0)
logger.info("Using {} GPUS".format(num_gpus))
logger.info(args)
if args.config_file != "":
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, 'r') as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
model = build_model(cfg)
model.load_state_dict(torch.load(cfg.TEST.WEIGHT))
val_loader = make_data_loader(cfg, is_train=False)
inference(cfg, model, val_loader)
if __name__ == '__main__':
main()
|
[
"haejinso@usc.edu"
] |
haejinso@usc.edu
|
5b060e15caed59f1055322105d504e5cb0b285a2
|
8b7ceef38588d3ec9cc122143f7cdcc1870cac22
|
/cuentaPalabras.py
|
26b2e7e1bc018d78fa3a5fcd878180059594722c
|
[] |
no_license
|
achenriques/EjerciciosALS
|
b64f06dd524fe6630b5eaebbd59cc57bb61a6fc0
|
5901137e34bdf6a8e9bfc8c390c0c14be1bca71b
|
refs/heads/master
| 2020-12-10T03:18:51.265729
| 2017-05-29T19:11:15
| 2017-05-29T19:11:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
f = open("text", "rU")
count = 0
todo = set()
for line in f:
s = line.lower().split(" ")
todo = (todo | set(s))
for aux in s:
if(aux != "\n"):
count += 1
print(count)
f.close
|
[
"marvazto@gmail.com"
] |
marvazto@gmail.com
|
ac3e9ad5cf690b51f7175b1316a4436b568389c7
|
9a72763c1cfaea4b882f39b8b137fff4b3a0c9e7
|
/main.py
|
5dde28deffb4c62fb35a0764c1d0ec8aca3c20e7
|
[
"MIT"
] |
permissive
|
Darkzarich/dzpolltelegrambot
|
cb26296ec4637de0f30c45cf6923a9d9dac6a366
|
caa7a854686e82211dcc526bfab472ab1d500708
|
refs/heads/master
| 2020-05-25T00:03:41.643009
| 2019-08-22T00:28:33
| 2019-08-22T00:28:33
| 187,527,332
| 0
| 0
|
MIT
| 2019-08-22T00:28:34
| 2019-05-19T20:57:34
| null |
UTF-8
|
Python
| false
| false
| 4,867
|
py
|
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler, Filters
from telegram import ReplyKeyboardMarkup
from telegram import ReplyKeyboardRemove
import configparser
import logging
import threading
import json
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
config = configparser.ConfigParser()
config.read('config.ini')
usersProgress = {}
jsonData = {}
maxQuestions = 0
def shutdown():
updater.stop()
updater.is_idle = False
def stop(bot, update, args):
if "".join(args) == config['DEFAULT']['SHUTDOWN_KEY']:
bot.send_message(chat_id=update.message.chat_id, text="right code")
threading.Thread(target=shutdown).start()
else:
bot.send_message(chat_id=update.message.chat_id, text="wrong code")
def startInit(update):
global usersProgress
username = update.effective_user.username
if usersProgress.get(username) == None or usersProgress.get(username) == 0:
usersProgress[update.effective_user.username] = {}
return 0
else:
return usersProgress[username]['currentStage']
def start(bot, update):
global usersProgress
currentStage = startInit(update)
if currentStage == 0:
usersProgress[update.effective_user.username]['currentStage'] = 0
bot.send_message(chat_id=update.message.chat_id, text=jsonData['settings']['greeting'], reply_markup=ReplyKeyboardMarkup([['Yes']], resize_keyboard=True))
def progressRouter(bot, update):
global usersProgress
global maxQuestions
currentUser = update.effective_user.username
userMsg = update.message.text.lower().strip()
if userMsg == "/start":
return
elif userMsg == "yes" and usersProgress[currentUser]['currentStage'] == 0:
bot.send_message(chat_id=update.message.chat_id, text=jsonData['settings']['begin'])
usersProgress[currentUser]['currentStage'] = usersProgress[currentUser]['currentStage'] + 1
custom_keyboard = [[i] for i in jsonData['questions']['question_' + str(usersProgress[currentUser]['currentStage'])]['answers'].values()]
usersProgress[currentUser]['possibleAnswers'] = [i.lower().strip() for i in jsonData['questions']['question_' + str(usersProgress[currentUser]['currentStage'])]['answers'].values()]
print(usersProgress)
bot.send_message(
chat_id=update.message.chat_id,
text=str(usersProgress[currentUser]['currentStage'])+ ') ' + jsonData['questions']['question_' + str(usersProgress[currentUser]['currentStage'])]['question']['full'],
reply_markup=ReplyKeyboardMarkup(custom_keyboard, resize_keyboard=False)
)
elif usersProgress[currentUser]['currentStage'] == maxQuestions:
usersProgress[currentUser]['currentStage'] = usersProgress[currentUser]['currentStage'] + 1
usersProgress[currentUser]['possibleAnswers'] = []
bot.send_message(chat_id=update.message.chat_id, text=jsonData['settings']['thanks'], reply_markup=ReplyKeyboardRemove())
bot.send_message(chat_id=update.message.chat_id, text=jsonData['settings']['farewell'])
print(usersProgress)
elif userMsg in usersProgress[currentUser]['possibleAnswers']:
bot.send_message(chat_id=update.message.chat_id, text=jsonData['settings']['thanks'])
usersProgress[currentUser]['currentStage'] = usersProgress[currentUser]['currentStage'] + 1
custom_keyboard = [[i] for i in jsonData['questions']['question_' + str(usersProgress[currentUser]['currentStage'])]['answers'].values()]
usersProgress[currentUser]['possibleAnswers'] = [i.lower().strip() for i in jsonData['questions']['question_' + str(usersProgress[currentUser]['currentStage'])]['answers'].values()]
print(usersProgress)
bot.send_message(
chat_id=update.message.chat_id,
text=str(usersProgress[currentUser]['currentStage'])+ ') ' + jsonData['questions']['question_' + str(usersProgress[currentUser]['currentStage'])]['question']['full'],
reply_markup=ReplyKeyboardMarkup(custom_keyboard, resize_keyboard=False)
)
elif usersProgress[currentUser]['currentStage'] == maxQuestions + 1:
bot.send_message(chat_id=update.message.chat_id, text=jsonData['settings']['stop'], reply_markup=ReplyKeyboardRemove())
else:
bot.send_message(chat_id=update.message.chat_id, text=jsonData['settings']['wrong'])
updater = Updater(token=config['DEFAULT']['API_KEY'])
def main():
global jsonData
global maxQuestions
dispatcher = updater.dispatcher
start_handler = CommandHandler('start', start)
end_handler = CommandHandler('stop', stop, pass_args=True)
progress_handler = MessageHandler(Filters.text, progressRouter)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(end_handler)
dispatcher.add_handler(progress_handler)
with open("QA.json", "r") as read_file:
jsonData = json.load(read_file)
maxQuestions = len(jsonData['questions'])
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
[
"darkzarichv2@gmail.com"
] |
darkzarichv2@gmail.com
|
1ae1aee696db2eb97e8b9ee69f1fd0016a3e0acc
|
89ffbb32676738b71c51149dd1f49a2f821934b2
|
/Tests/ML/runners/test_submit_for_inference.py
|
b667a315bc1a119983f0a21a39a6249ee4e7569d
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
MaxCodeXTC/InnerEye-DeepLearning
|
8d2877ed86afcdb5b7f4f902577d87d7a909b626
|
12b496093097ef48d5ac8880985c04918d7f76fe
|
refs/heads/master
| 2022-12-11T06:53:48.102364
| 2020-09-22T18:38:26
| 2020-09-22T18:38:26
| 297,888,308
| 1
| 0
|
MIT
| 2020-09-23T07:26:26
| 2020-09-23T07:26:25
| null |
UTF-8
|
Python
| false
| false
| 1,271
|
py
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from pathlib import Path
import pytest
from InnerEye.Common import common_util
from InnerEye.Common.fixed_paths import DEFAULT_RESULT_IMAGE_NAME
from InnerEye.Scripts.submit_for_inference import main
from Tests.Common.test_util import DEFAULT_MODEL_ID_NUMERIC
@pytest.mark.skipif(common_util.is_windows(), reason="Testing on Linux is enough")
# Test does not actually need GPU, but we put it in the GPU set as it takes a while to run
# and we don't want to make the main set even longer.
@pytest.mark.gpu
def test_submit_for_inference() -> None:
args = ["--image_file", "Tests/ML/test_data/train_and_test_data/id1_channel1.nii.gz",
"--model_id", DEFAULT_MODEL_ID_NUMERIC,
"--yaml_file", "InnerEye/settings.yml",
"--download_folder", "."]
seg_path = Path(DEFAULT_RESULT_IMAGE_NAME)
if seg_path.exists():
seg_path.unlink()
main(args)
assert seg_path.exists()
|
[
"noreply@github.com"
] |
noreply@github.com
|
4740b95a986580cb4e749f68ff49f6edc0d46aed
|
05acbbccd60853bff32d480b3032e32f725e42e8
|
/4/airplane_models/airplane.py
|
5bc486a3e79efa500a9e72673788292ed7d6705c
|
[] |
no_license
|
qingqingye/Aircraft-approach
|
81f252730686f35a8ea8ab85002dc6a86a441c8f
|
72f5cafb4a03e103df122702ce8f3cbf652cceeb
|
refs/heads/master
| 2020-12-01T17:07:28.936966
| 2019-12-29T05:43:49
| 2019-12-29T05:43:49
| 230,706,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,725
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2018 Diao Zihao <hi@ericdiao.com>. All right reserved.
import simplejson
from math import sin, cos, tan
class baseAirplane:
"""
Base airplane type for CS 181 Project. Provides basic interfaces for creating a new airplane.
Please DO NOT use this in practice. See the comments below for more information.
"""
def __init__(self, flightType=None, flight=None, registration=None, depature=None, destination=None, eta=None, etd=None,
heading=0.0,longitude = 0.0,latitude = 0.0, altitude = 0.0,groundSpeed = 0.0,squawk = '7700',prior = 0):
self._type = flightType.upper()
#There are cases that the flight information remain None for an airplane. Hang
if flight != None:
self._flight = flight.upper()
# The flight number. Consists of two-character airline designator and a 1 to 4 digit number. For example, flight number CA981 stands for Air China flight number 981 (it departs Beijing's PEK and goes to New York's JFK). See https://en.wikipedia.org/wiki/Airline_codes#IATA_airline_designator for list of airline designator.
# If there is no flight number for this flight (e.g. for private flight), use its egistration (of self._registrayion).
self._registration = registration.upper()
# In the ICAO format. For example, all chinese airplanes' registration has prefix `B` (with an optional dash)
# and 4 characters (number and english letter) (for mainland China). e.g. B-123A.
#There are cases that the depature or destination information remain None for an airplane. Hang
if depature != None:
self._depatureCity = depature.upper()
if destination != None:
self._destination = destination.upper()
# Above two are in the ICAO airport code format. For example, ZUUU for Chengdu/Shuangliu, ZSPD for Shanghai/Pudong.
# see: https://en.wikipedia.org/wiki/List_of_airports_by_IATA_and_ICAO_code
self._ETA = eta
self._ETD = etd
# Above two are in the UNIX timestamp format (We are CS student, right? And also we do not needs to deal with events before 1970-01-01 12:00, right?). e.g. (int) 15700000
self._spec = {"maxSpeed": 500, "ceiling": 6000}
# below are `dynamic` parameters that could change through time.
# self._heading = 0.0
self.heading = heading
# range from 0.0 to 360.0 (in degree)
# self._longitude = 0.0
# self._latitude = 0.0
self.position = [longitude,latitude]
# In float format. e.g 31.12345
# self._altitude = 0.0
self.altitude = altitude
# In meters.
# self._groundSpeed = 0.0
self.groundSpeed = groundSpeed
# hm
self._squawk = squawk # NOTE: this is a str.
# If you wonder what is a `Squawk`, refer to https://en.wikipedia.org/wiki/Transponder_(aeronautics).
self.priority = prior
def __repr__(self):
# return "[{}] {} {} @ ({}, {}) - {}".format(self._squawk, self._registration, self._type, self._longitude, self._latitude, self._altitude)
return "{}\n{}-{}\n{}\n{}\n{}".format(self._flight, self._depatureCity, self.destination, self.type, self._registration, self.squawk)
@property
def registration(self):
return self._registration
@property
def type(self):
return self._type
def getSpec(self, arg=None):
if arg:
return self._spec[arg]
return self._spec
@property
def flight(self):
return self._flight
@property
def depatureCity(self):
return self._depatureCity
@property
def destination(self):
return self._destination
@property
def squawk(self):
return self._squawk
@property
def ETD(self):
return self._ETD
@property
def ATD(self):
pass
@property
def ETA(self):
return self._ETA
@property
def groundSpeed(self):
return self._groundSpeed
@groundSpeed.setter
def groundSpeed(self, value):
if value > self._spec['maxSpeed'] or value < 0:
raise ValueError
self._groundSpeed = value
def getAirSpeed(self, airSpeed, airSpeedHeading):
pass
# TODO: calculate speed.
@property
def position(self):
return (self._longitude, self._latitude)
@position.setter
def position(self, value):
longitude = value[0]
latitude = value[1]
if abs(longitude) > 180.0 and abs(latitude) > 90.0:
raise ValueError
self._longitude = longitude
self._latitude = latitude
@property
def heading(self):
return self._heading
@heading.setter
def heading(self, value):
if value < 0.0 or value > 360.0:
raise ValueError
self._heading = value
@property
def altitude(self):
return self._altitude
@altitude.setter
def altitude(self, value):
if value > self._spec['ceiling']:
raise ValueError
# TODO: specify a certain error for that.
self._altitude = value
def takeAction(self, actionParam):
pass
# TODO: waiting for the environment.
class genericAirplane(baseAirplane):
"""
`genericAirplane(flightType=None, flight=None, registration=None, depature=None, destination=None, eta=None, etd=None)`
This is the default consturctor of an Airplane model.
The `flightType` specify the model of the consturcted airplane. The
constuctor will look for the corresponding `json` file in this directory
and load it to self._spec.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._getSpecFromFile()
def _getSpecFromFile(self):
fileName = "{}{}.json".format("./models/", self._type)
with open(fileName) as f:
self._spec = simplejson.load(f)
def baseAirplaneTest():
print("Testing class `baseAirplane`.")
test = baseAirplane(flightType="A330", flight="Ca1999",
registration="b-6878", depature="PVG", destination="ctu")
print(test)
print(test.__dict__)
old_val = test.__dict__
try:
test.postion = (181.0, 91.0)
print("Postion test failed.")
except ValueError:
print("Postion test passed.")
try:
test.heading = 361.0
print("Heading test failed.")
except ValueError:
print("Heading test passed.")
try:
test.altitude = 12501
print("Ceiling test failed.")
except ValueError:
print("Ceiling test passed.")
assert test.__dict__ == old_val
print("All test passed.")
def genericAirplaneTest():
print("Testing class `genericAirplane`.")
test = genericAirplane(flightType="A320", flight="Ca1999",
registration="b-6878", depature="PVG", destination="ctu")
print(test)
print(test.__dict__)
old_val = test.__dict__
try:
test.postion = (181.0, 91.0)
print("Postion test failed.")
except ValueError:
print("Postion test passed.")
try:
test.heading = 361.0
print("Heading test failed.")
except ValueError:
print("Heading test passed.")
try:
test.altitude = 12501
print("Ceiling test failed.")
except ValueError:
print("Ceiling test passed.")
assert test.__dict__ == old_val
print("All test passed.")
if __name__ == "__main__":
baseAirplaneTest()
genericAirplaneTest()
|
[
"yejj@shanghaitech.edu.cn"
] |
yejj@shanghaitech.edu.cn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.