blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
83029dfbcfd153132b7af86b01d074ab6eb42a3f | Python | denliehoo/expense-tracker-python | /tsting.py | UTF-8 | 619 | 3.0625 | 3 | [] | no_license | import pandas as pd
# df = pd.read_csv("database/individual_data/testing.csv", index_col="Date" )
# print(df)
# print(df.shape)
# # list_val = list(df.columns.values)
# # list_val = df['Date'].tolist()
# # print(list_val)
# # print(df)
# # initial_amount = (df.iloc[2,1]) #have to convery the numpyin64 object to a float
# # print("initial amt",initial_amount)
# df.loc["2021-01-02","Books"] +=1000 #adds the amount to an exisiting value # if we use the column as a date, we can use the column name (e.g. the date as the val)
# in other cases, we use the index because the index is the col name.
# print(df)
| true |
667f76efd4c5595f4ff1f65d12f3cf15e6c3ab4a | Python | adriacb/BigChem_LMW | /Filtering/Data_analysis/rotBonds.py | UTF-8 | 1,317 | 2.546875 | 3 | [] | no_license | import os
import sys
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import rdMolDescriptors
from rdkit.Chem import Descriptors3D
from rdkit.Chem import AllChem
from progress.bar import IncrementalBar
sns.set_style("white")
total = 4123967
def getMolsPMIs(pathfile):
rtb = pd.DataFrame(columns=["RBonds"])
c = 0
bar = IncrementalBar('Progress:', max = total)
with open(file_DB) as f:
for line in f:
bar.next()
smile = re.match(r"(.*)", line).group(0).replace("\t","")
mol = Chem.MolFromSmiles(smile)
try:
rbonds = rdMolDescriptors.CalcNumRotatableBonds(mol)
rtb = rtb.append({'RBonds':rbonds}, ignore_index=True)
except:
print(smile)
pass
bar.finish()
return rtb
if __name__ == "__main__":
start_time = datetime.now()
# Get the names of the compressed files
file_DB = "" # MUST BE CHANGED
mols = getMolsPMIs(file_DB)
sns.histplot(mols, x = "RBonds", color='#404080' ,bins= 6).set_title('Rotatable Bonds')
plt.savefig('RotBonds.png', dpi=300)
#plt.show()
end_time = datetime.now()
print("Total time: {}".format(end_time-start_time))
| true |
8bfb6881836276c178caaee8943fca30cd120152 | Python | YoungestSalon/Outputs | /Book/Everybody Python/08B-ifelse.py | UTF-8 | 107 | 3.078125 | 3 | [] | no_license | a=3
if a==2:
print ("A")
if a==3:
print ("B")
if a==4:
print ("C")
else:
print ("D")
| true |
ccc7784fb4f2a7c8064fb222eb7973cbf8be39b0 | Python | thorgeirt19/Forritun | /Project 1/voyager1.py | UTF-8 | 1,677 | 3.3125 | 3 | [] | no_license | #Byrja á að stilla upp upphafsgildunum
dist_byrjun_m = 16637000000 #mílur frá sólu
hradi_solarhring = 38241*24 #mílur sem voyager 1 ferðast á sólarhring
#Hér fyrir neðan hefjast útreikningarnir
input = input('Number of days after 9/25/09: ') #input frá notanda um fjölda daga sem hann vill reikna
dagar = int(input) #breyti inputinu í integer tölu
dist_m = (dagar * hradi_solarhring) + dist_byrjun_m #reikna út hvað hann fer langt á þeim dögum og bæti við upphafsfjarlægðina
dist_km = dist_m * 1.609344 #breyti í kílómetra
dist_AU = dist_m / 92955887.6 #breyti í AU
radio_time = dist_km / 1079252848.8 #reikna út tímann sem samskipti taka með þvi að deila km fjólda á hraðann í km/klst
round_dist_km = round(dist_km) #námunda fjarlægðina í km upp að næsta tug
round_dist_AU = round(dist_AU) #námunda fjarlægðina í AU upp að næsta tug
#Prenta út öll gildin sem voru reiknuð hér að ofan
print("Miles from the sun:", dist_m) #Prenta út fjarlægð í mílum
print("Kilometers from the sun:", round_dist_km) #Prenta út fjarlægð í kílómetrum
print("AU from the sun:", round_dist_AU) #Prenta út fjarlægð í AU
#print("Samskiptatími =", radio_time) #Prenta út tíma sem samskipti taka að fara fram og til baka | true |
af5203ee0afe63c3349ef6a0e632313de0d6f1b6 | Python | haple97/Machine-Learning-Models | /Naive Bayes Classifier/5_2.py | UTF-8 | 6,812 | 3.125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[17]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[18]:
'''Discretize continuous values into bins'''
def Discretize (b):
dating = pd.read_csv('./dating.csv')
all_columns = list(dating.columns.values)
continuous_valued_columns = [col for col in all_columns
if col not in ('gender', 'race', 'race_o', 'samerace', 'field', 'decision')]
preference_scores_of_participant = ['attractive_important', 'sincere_important', 'intelligence_important',
'funny_important', 'ambition_important', 'shared_interests_important']
preference_scores_of_partner = ['pref_o_attractive', 'pref_o_sincere', 'pref_o_intelligence', 'pref_o_funny',
'pref_o_ambitious', 'pref_o_shared_interests']
# Assign bin based on value of item; replace old column with new column of binned values
# Provide min and max values of each column to divide bins
for column in continuous_valued_columns:
if (column in preference_scores_of_participant) or (column in preference_scores_of_partner):
min_val = 0 # normalized columns
max_val = 1
elif column in ('age', 'age_o'):
min_val = 18
max_val = 58
elif column == 'interests_correlate':
min_val = -1
max_val = 1
else:
min_val = 0
max_val = 10
bin_num = b
bins = np.arange(min_val, max_val + 0.000000000000001, (max_val-min_val)/bin_num)
# Wrap outliers inside the defined range (min, max)
dating.loc[dating[column] < min_val, column] = min_val
dating.loc[dating[column] > max_val, column] = max_val
# Bin, including the right value (a <= x < b)
dating[column] = np.digitize(dating[column], bins)
# Assign x = b as belonging to the last bin
dating.loc[dating[column] > bin_num, column] = bin_num
# Count number of entries for each bin
num_items_by_bin = []
for i in range(bin_num):
count = len(dating[dating[column] == i+1].index)
num_items_by_bin.append(count)
# print(column+':', num_items_by_bin)
# dating.to_csv('./dating-binned.csv', index = False)
return dating
# In[19]:
'''Split'''
def Split(b):
dating_binned = pd.read_csv('./dating-binned-'+str(b)+'.csv')
# dating_binned = dating
test_set = dating_binned.sample(frac=0.2, random_state = 47)
test_set.to_csv('./testSet-'+str(b)+'.csv', index = False)
training_set = dating_binned.drop(test_set.index)
training_set.to_csv('./trainingSet-'+str(b)+'.csv', index = False)
# In[20]:
'''Train model'''
def nbc(t_frac, b):
training_set = pd.read_csv('./trainingSet-'+str(b)+'.csv')
data = training_set.sample(frac=t_frac, random_state = 47)
# Calculate priors
all_yes = data[data['decision'] == 1]
all_yes_ratio = (len(all_yes.index)+1)/(len(data.index)+2) # Laplace smoothing applied on priors
all_no = data[data['decision'] == 0]
all_no_ratio = (len(all_no.index)+1)/(len(data.index)+2)
# Calculate conditional probabilities each value of each attribute given decision
all_columns = list(data.columns.values)
attribute_columns = [col for col in all_columns if col not in ('decision')]
discrete_valued_columns = ('gender', 'race', 'race_o', 'samerace', 'field')
continuous_valued_columns = [col for col in attribute_columns if col not in discrete_valued_columns]
conditional_prob = {} # initialize dictionary
for column in attribute_columns:
if column in ('gender', 'samerace'):
min_val = 0
max_val = 1
num_vals = 2
elif column in ('race', 'race_o'):
min_val = 0
max_val = 4
num_vals = 5
elif column == 'field':
min_val = 0
max_val = 209
num_val = 210
else:
min_val = 1
max_val = b
num_vals = b
for i in range(min_val, max_val+1, 1):
key = column + str(i) # key to access dictionary is an attribute and one of its value
attribute_yes = data[(data[column] == i) & (data['decision'] == 1)]
yes_prob = (len(attribute_yes.index)+1)/(len(all_yes.index)+num_vals) # Laplace smoothing on con. prob
attribute_no = data[(data[column] == i) & (data['decision'] == 0)]
no_prob = (len(attribute_no.index)+1)/(len(all_no.index)+num_vals)
conditional_prob[key] = [yes_prob, no_prob] # assign pair of values to key
conditional_prob['priors'] = [all_yes_ratio, all_no_ratio]
return (conditional_prob)
# In[21]:
'''Predict decision with model & Return accuracy of model on a dataset'''
def Prediction(data, model):
all_columns = list(data.columns.values)
attribute_columns = [col for col in all_columns if col not in ('decision')]
pred_decision = []
p_yes_list = []
p_no_list = []
count = 0
for i in range (len(data.index)):
p_yes = 1
p_no = 1
for column in attribute_columns:
value = data.loc[i, column]
p_yes *= model[column + str(value)][0]
p_no *= model[column + str(value)][1]
p_yes = p_yes*model['priors'][0]
p_no = p_no*model['priors'][1]
if p_yes >= p_no:
decision = 1
else:
decision = 0
if decision == data.loc[i, 'decision']:
count += 1
accuracy = count/len(data.index)
return accuracy
# In[22]:
B = [2, 5, 10, 50, 100, 200]
training_B = []
test_B = []
for b in B:
# Discretize
data = Discretize(b)
data.to_csv('./dating-binned-'+str(b)+'.csv', index = False)
# Split train-test
Split(b)
# Obtain model
model = nbc(1, b)
# Obtain dataset to test model on
training_set = pd.read_csv('./trainingSet-'+str(b)+'.csv')
test_set = pd.read_csv('./testSet-'+str(b)+'.csv')
print('Bin size: ', b)
# Predict model on training set
training_accuracy = Prediction(training_set, model)
print ('Training Accuracy: ', round(training_accuracy,2))
training_B.append(training_accuracy)
# Predict model on test set
test_accuracy = Prediction(test_set, model)
print ('Test Accuracy: ', round(test_accuracy,2))
test_B.append(test_accuracy)
plt.plot(B, training_B, label = "Training Accuracy")
plt.plot(B, test_B, label = "Test Accuracy")
plt.xlabel('Number of Bins')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
| true |
ca469ef9577f5f96e66fca915920bdba1889ebb1 | Python | dev-arthur-g20r/how-to-code-together-using-python | /How to Code Together using Python/Python again XD/tribonacci.py | UTF-8 | 298 | 3.578125 | 4 | [] | no_license | import os
def tribonacci(n):
i = 0
first = 0
second = 1
third = 1
while (i < n):
print(first)
nextTerm = first + second + third
first = second
second = third
third = nextTerm
i += 1
terms = int(input("Number of terms in Tribonacci sequence: "))
tribonacci(terms)
os.system("pause") | true |
5809562fdd322d66bb3ffdc9e801f05b58a0775c | Python | skoett/de-assignment | /code/task_4.py | UTF-8 | 1,765 | 3.515625 | 4 | [] | no_license | #!/usr/bin/env python3
"""
This module contains the functions for solving task 4.
Task description:
Convert all column names to lowercase
"""
import glob
import csv
from types import SimpleNamespace
from code.utils.utils import get_project_root, time_execution
def header_to_lower(file_path: str, target_path: str) -> None:
"""
Opens a .csv file, lower-cases the header and saves the new file to 'target_path' destination.
:param file_path: The input file.
:param target_path: The target destination.
:return: None
"""
new_file_path = get_project_root() + '/' + target_path + file_path.split('/')[-1][:-4] + '.csv'
with open(file_path, 'r') as infile, open(new_file_path, 'w') as outfile:
reader = csv.reader(infile)
writer = csv.writer(outfile)
writer.writerow(list(map(lambda header: header.lower(), next(reader, None))))
for row in reader:
writer.writerow(row)
def clean_headers_to_lower(source_path: str, target_path: str) -> None:
"""
Cleans all headers in all .csv files located in source path
:param source_path: The source location of files
:param target_path: The target location of files
:return: None as we create new files
"""
files = glob.glob(get_project_root() + '/' + source_path + '*.csv')
# Acquire all headers from csv and flatten nested list of headers
[header_to_lower(f, target_path) for f in files]
@time_execution
def task4(config: SimpleNamespace) -> None:
"""
Executes the solution of fourth task
:param config: The configuration file from the initial .yaml
:type config: SimpleNamespace
:return: None
"""
clean_headers_to_lower(config.get('source'), config.get('sink'))
return None
| true |
893ed761c1253a34d9c77dde69bca4b7ec31d979 | Python | DataHubBocconi/Natural-Language-Analysis | /LASSO/01_createdf.py | UTF-8 | 955 | 2.578125 | 3 | [] | no_license | import os, glob
import pandas as pd
from Cleaner import *
from collections import defaultdict
cwd = os.getcwd()
par0 = os.path.abspath(os.path.join(cwd, os.pardir))
par = os.path.abspath(os.path.join(par0, os.pardir))
src = os.path.join(par, 'Sources')
#zero_path = os.path.join(src, 'neutral')
#one_path = os.path.join(src, 'racist')
oD = []
for path, subdirs, files in os.walk(src):
for name in files:
v = 0
if 'racist' in path:
v=1
oD.append([os.path.join(path, name),v,name])
#stem each book and create dictionary
D = []
for d in oD:
D.append([clean_book(d[0]), d[1],d[2]])
C = []
for d in D:
C.append({})
for w in d[0]:
try:
C[-1][w] += 1
except KeyError:
C[-1][w] = 1
C[-1]['racist'] = d[1]
C[-1]['name'] = d[2]
df = pd.DataFrame(C)
print(df.head())
df.fillna(0, inplace=True)
print(df.head())
df.to_csv(os.path.join(par, 'Data','rawdata.csv')) | true |
f1754a292dfb45135995cfc76b962a9be295a1f6 | Python | Sooho-Kim/Python_algorithm | /01.baekjoon/baekjoon_5063.py | UTF-8 | 374 | 3.09375 | 3 | [] | no_license | # 5063 TGN
Case = int(input())
for i in range(Case):
non_ad_income, ad_income, ad_cost = map(int, input().split())
if non_ad_income < (ad_income-ad_cost):
result = "advertise"
elif non_ad_income == (ad_income-ad_cost):
result = "does not matter"
elif non_ad_income > (ad_income-ad_cost):
result = "do not advertise"
print(result)
| true |
e00d1adee3cb04912c82f9f434d595fc0c83f155 | Python | Zhansayaas/ICTLAB1 | /27.py | UTF-8 | 229 | 3.59375 | 4 | [] | no_license | height = float(input("Please enter a height in metres:"))
weight = float(input("Please enter a weight in kilograms: "))
BMI = (weight) / (height * height)
print("The BMI of this person's height and weight is {}.".format(BMI)) | true |
019a4196fde116ad85a636b6dfac89affcff7e73 | Python | vrrohan/Topcoder | /medium/Day1/SRM292/abacus.py | UTF-8 | 3,689 | 4.3125 | 4 | [] | no_license | """
Problem Statement for Abacus
Problem Statement
An abacus can be used to do arithmetic. The version that we have has 6 horizontal threads, each with nine beads on it. The beads on each thread are always arranged with just one gap, possibly at one of the ends.
However many beads are adjacent and at the right end of the thread is the digit value of the thread.
The value on the abacus is read by taking the digits in order from top thread to bottom thread and arranging them from left to right (so the top thread is the one that contains the most significant digit).
Create a class Abacus that contains a method add that is given a String[] original and a number val and that returns a String[] showing the abacus after val has been added to the original abacus.
Both in original and in the return, the String[] will contain exactly 6 elements representing the 6 threads in order from top thread to bottom thread.
Each element will contain a lowercase 'o' to represent each bead and three consecutive hyphens '-' to indicate the empty part of the thread.
Each element will thus contain exactly 12 characters.
Definition
Class: Abacus
Method: add
Parameters: String[], int
Returns: String[]
Method signature: String[] add(String[] original, int val)
(be sure your method is public)
Constraints
- original will contain exactly 6 elements.
- Each element of original will contain exactly 12 characters, 9 lowercase 'o's and 3 consecutive '-'s.
- val will be between 0 and 999,999 inclusive.
- val added to the original abacus will result in a value that can be shown on the abacus.
Examples
0) {"ooo---oooooo",
"---ooooooooo",
"---ooooooooo",
"---ooooooooo",
"oo---ooooooo",
"---ooooooooo"}
5
Returns:
{"ooo---oooooo",
"---ooooooooo",
"---ooooooooo",
"---ooooooooo",
"o---oooooooo",
"ooooo---oooo" }
When we add 5 to the original, it is necessary to "carry" 1 to the next thread up. This shows the arithmetic 699979 + 5 = 699984
1) {"ooo---oooooo",
"---ooooooooo",
"---ooooooooo",
"---ooooooooo",
"oo---ooooooo",
"---ooooooooo"}
21
Returns:
{"oo---ooooooo",
"ooooooooo---",
"ooooooooo---",
"ooooooooo---",
"ooooooooo---",
"ooooooooo---" }
This shows 699979 + 21 = 700000
2) {"ooooooooo---",
"---ooooooooo",
"ooooooooo---",
"---ooooooooo",
"oo---ooooooo",
"---ooooooooo"}
100000
Returns:
{"oooooooo---o",
"---ooooooooo",
"ooooooooo---",
"---ooooooooo",
"oo---ooooooo",
"---ooooooooo" }
3) {"o---oooooooo",
"---ooooooooo",
"---ooooooooo",
"---ooooooooo",
"---ooooooooo",
"---ooooooooo" }
1
Returns:
{"---ooooooooo",
"ooooooooo---",
"ooooooooo---",
"ooooooooo---",
"ooooooooo---",
"ooooooooo---" }
"""
def add(original, val) :
newAbacusBeads = []
lastBeads = ''
for i in range(len(original)) :
lastBeads += str(len(original[i].split('---')[1]))
lastBeads = str(int(lastBeads) + val)
#print(lastBeads)
while len(lastBeads)!=6 :
lastBeads = '0' + lastBeads
#print(lastBeads)
for j in range(len(original)) :
leadingBeads = ''
for lead in range(9-int(lastBeads[j])) :
leadingBeads += 'o'
trailingBeads = ''
for trail in range(int(lastBeads[j])) :
trailingBeads += 'o'
newAbacusBeads.append(leadingBeads + '---' + trailingBeads)
return newAbacusBeads
abBeads = ["o---oooooooo", "---ooooooooo", "---ooooooooo", "---ooooooooo", "---ooooooooo", "---ooooooooo"]
abBeads2 = ["ooooooooo---", "---ooooooooo", "ooooooooo---", "---ooooooooo", "oo---ooooooo", "---ooooooooo"]
abBeads3 = ["ooooooooo---", "ooooooooo---", "ooooooooo---", "ooooooooo---", "ooooooooo---", "ooooooooo---"]
print(add(abBeads2, 100000))
print(add(abBeads3, 5))
| true |
35a64364495f64d8baa25c677e25e7c694aaab7f | Python | sizhuoli/Weighted-Tversky-loss | /Weighted_Tversky_loss.py | UTF-8 | 1,025 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
#weighted_tversky_loss
def tversky(y_true, y_pred, alpha=0.6, beta=0.4):
"""compute the weighted Tversky loss with weight maps"""
#annotation
y_t = y_true[...,0]
y_t = y_t[...,np.newaxis]
#weights
y_weights = y_true[...,1]
y_weights = y_weights[...,np.newaxis]
ones = K.ones(K.shape(y_t))
#p0: prob that the pixel is of class 1
p0 = y_pred
#p1: prob that the pixel is of class 0
p1 = ones - y_pred
g0 = y_t
g1 = ones - y_t
#terms in the Tversky loss function combined with weights
tp = tf.reduce_sum(y_weights * p0 * g0)
fp = alpha * tf.reduce_sum(y_weights * p0 * g1)
fn = beta * tf.reduce_sum(y_weights * p1 * g0)
#add to the denominator a small epsilon to prevent the value from being undefined
EPS = 1e-5
num = tp
den = tp + fp + fn + EPS
result = num / den
return 1 - tf.reduce_mean(result) | true |
e0da656b481a6f51c06496d1f2c3d8c76e12c73f | Python | JoseVillagranE/atari-RL | /DQN.py | UTF-8 | 1,642 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 02:04:10 2021
@author: josev
"""
import torch
import torch.nn as nn
def conv2d_size_out(size, kernels_size, strides, paddings, dilations):
for kernel_size, stride, padding, dilation in zip(kernels_size, strides, paddings, dilations):
size = (size + 2*padding - dilation*(kernel_size - 1) - 1)//stride + 1
return size
class DQN(nn.Module):
def __init__(self, action_space, n_channel):
super().__init__()
self.action_space = action_space
self.conv = nn.Sequential(nn.Conv2d(n_channel, 32, kernel_size=1),
nn.Conv2d(32, 64, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU())
outp_size = conv2d_size_out(84, [8, 4, 3], [4, 2, 1], [0, 0, 0], [1, 1, 1])
outp_size= 64*outp_size**2
self.linear = nn.Sequential(nn.Linear(outp_size, 512),
nn.ReLU(),
nn.Linear(512, action_space),
nn.Softmax(-1))
def forward(self, x):
action = self.linear(torch.flatten(self.conv(x), start_dim=1))
return action
if __name__=="__main__":
input = torch.rand((1, 3, 84, 84))
model = DQN(18, 3)
outp = model(input) | true |
aed48e238d4d289af5bcdd884a249a0c2e40f9f1 | Python | RHDZMOTA/lambda_credit | /financial_functions.py | UTF-8 | 2,299 | 3.265625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# future_value
futureValue = lambda capital, interest, periods: capital * (1 + interest) ** periods
# present_value
presetValue = lambda capital, interest, periods: capital * (1 + interest) ** (-periods)
# annual interest rate
annualInterest = lambda initial_capital, end_capital, years: (end_capital / initial_capital) ** (1 / years) - 1
# equivalent annual interest rate
equivalentAnnualInterest = lambda rate, cap: (1 + rate / cap) ** cap - 1
def getRiskFreeRate():
'''getRiskFreeRate function.
description: This fuction returns the risk-free interest rate (cetes) for 28,91,182 days.
---- inputs
---- outputs
'''
# import libraries
import urllib.request
# open url to get source
with urllib.request.urlopen('http://www.banxico.org.mx/SieInternet/'+
'consultarDirectorioInternetAction.do?a'+
'ccion=consultarCuadro&idCuadro=CF107&s'+
'ector=22&locale=es') as response:
# read source and save as string
html_source = response.read().decode('latin-1')
# identify target
def getTarget(source):
'''getTarget function
description: function adapted to retrieve the value of cetes interest rate.
---- inputs
source:
---- outputs
position_index:
value:
'''
tasa_de_rendimiento = source.find('Tasa de rendimiento')
visibility_hidden = 0
for i in range(3):
visibility_hidden += source[tasa_de_rendimiento:][visibility_hidden:].find('<span style="visibility:hidden">')+34
position_index = tasa_de_rendimiento + visibility_hidden - 10 - 34
value = float(source[position_index:position_index+10].strip(' '))
return position_index, value
# get key,values and save in dictionary
cetes_dictionary = {}
reference_index = 0
for i in [28, 91, 182]:
html_source = html_source[reference_index:]
reference_index, value = getTarget(html_source)
cetes_dictionary[i] = value
return cetes_dictionary
| true |
69921523e140b99a961005a19852c26d6d9045fe | Python | caoshen/leet-code | /python/Maximal Rectangle.py | UTF-8 | 861 | 2.9375 | 3 | [] | no_license | class Solution:
# @param matrix, a list of lists of 1 length string
# @return an integer
def maximalRectangle(self, matrix):
if len(matrix) == 0:
return 0
m, n = len(matrix), len(matrix[0])
H, L, R = [0] * n, [0] * n, [n] * n
result = 0
for i in range(m):
left, right = 0, n
for j in range(n):
if matrix[i][j] == '1':
L[j] = max(L[j], left)
H[j] += 1
else:
left = j + 1
L[j] , R[j], H[j] = 0, n, 0
for j in range(n - 1, -1, -1):
if matrix[i][j] == '1':
R[j] = min(R[j], right)
result = max(result, H[j] * (R[j] - L[j]))
else:
right = j
return result | true |
2d0dda6fd6a36f9319f75aca4177dd32bdc88878 | Python | martindzida/tkinter | /dialogs.py | UTF-8 | 1,842 | 3.0625 | 3 | [] | no_license | from tkinter import *
class CityDialog:
def __init__(self, parent, shape):
self.shape = shape
top = self.top = Toplevel(parent)
top.title("Počet obyvatel")
top.transient(parent)
# Zablokuje práci v hlavním okně aplikace - modální okno
top.grab_set()
# Nastaví zaměření na dialog
top.focus_set()
x = parent.winfo_x()
y = parent.winfo_y()
top.geometry("%dx%d+%d+%d" % (400, 100, x + 100, y + 50))
# Proměnné pro vkládání parametrů okna
spin_d_value = StringVar()
spin_d_value.set(self.shape.d)
# Kontejner pro pozici objektu
container1 = Frame(top, width=400, pady=10, padx=10)
label_pozice = Label(container1, text="Počet obyvatel", pady=5)
label_pozice.pack()
label_d = Label(container1, text="Počet obyvatel v tis. (max. 500) :")
label_d.pack(side=LEFT)
self.spinbox_d = Spinbox(container1, from_=0, to=parent.winfo_width(), textvariable=spin_d_value)
self.spinbox_d.pack(side=LEFT, padx=30)
container1.pack(fill=BOTH)
button_ok = Button(top, text="OK", command=self.ok)
button_ok.pack(side=LEFT, padx=10, pady=5, fill=BOTH, expand=True)
button_cancel = Button(top, text="Zrušit", command=self.cancel)
button_cancel.pack(side=LEFT, padx=10, pady=5, fill=BOTH, expand=True)
def ok(self, event=None):
#"město" nemůže mít více než 500 tisíc obyvatel, jelikož by zabíralo celkem hodně místa, takže bylo potřeba nějakého omezení
if float(self.spinbox_d.get()) < 501:
self.shape.d = float(self.spinbox_d.get())
self.top.destroy()
def cancel(self, event=None):
self.top.destroy() | true |
da91d7dd7c05373d2b04655a539e808503408046 | Python | JoaoBueno/estudos-python | /fullcontrol-menu/funcoes/isEmail.py | UTF-8 | 525 | 3.4375 | 3 | [] | no_license | import sys
import re
def isEmail(email):
if len(email) > 7:
if re.match(r'^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$', email) != None:
return True
return False
if __name__ == '__main__':
if len(sys.argv) < 2:
print("""
isEmail is an e-mail validator
Use: isEmail example@example.com
""")
exit(-1)
if isEmail(sys.argv[1]) == True:
print('This is a valid e-mail address')
else:
print('This is not a valid e-mail address')
| true |
cca75ac62b3b0b4874a436be0f49f963151c4be0 | Python | heddle317/coding-exercises | /factorial.py | UTF-8 | 631 | 3.171875 | 3 | [] | no_license | """
go-left Software Rotating Header Image
100 Little Programming Exercises
https://go-left.com/blog/programming/100-little-programming-exercises/
A.1 Factorial
Write a program which takes a single argument, computes the factorial and prints the value on the screen.
$ factorial 5
5! = 120
Ext 1: Change the program so that it computes the factorial iteratively and recursively.
Ext 2: Change the program so that it is interactive instead of terminating after each computation and that it uses a table of previous values to improve the performance.
"""
if __name__ == '__main__':
# Replace 'pass' with your code
pass
| true |
6c98bd975f2c6da9687771fbfb36bda27fb47ad3 | Python | sabin-thapa/Corona-Update | /scraper.py | UTF-8 | 2,970 | 3.4375 | 3 | [] | no_license | """WEB SCRAPING FOR CORONA UPDATES FROM THE WORLDOMETER WEBSITE USING REQUESTS, BEAUTIFUL SOUP AND PANDAS"""
from bs4 import BeautifulSoup
import pandas as pd
import requests
"""The Worldometer URL for corona updates"""
URL = "https://www.worldometers.info/coronavirus/"
print("Getting page info...")
# sending a GET request for the URL
page = requests.get(URL)
# parsing into the html source code using beautiful soup
soup = BeautifulSoup(page.content, 'html.parser')
print("Scraping page data...")
"""---------------------------------------------TABLE---------------------------------------------"""
# extracting the html of table of data using soup
table = soup.find(id='main_table_countries_today')
"""---------------------------------------------TABLE HEAD---------------------------------------------"""
# Getting the table head HTML
thead = table.find('thead')
# getting the html columns from the table head
thead_cols = thead.find_all('th')
# List for storing the table header titles
head = []
# iterating head columns to store seach value in list
for col in thead_cols:
head.append(col.text)
"""---------------------------------------------TABLE BODY---------------------------------------------"""
# Getting the table body HTML from table
tbody = soup.find('tbody')
# Getting all the rows in the table body
rows = tbody.find_all('tr')
# list for storing data of each row
row_data = []
# iterating each row to extract data
for row in rows:
# extracting each column from rows
cols = row.find_all('td')
# extracting the text of each column and saving into a list row_data
row = [i.text.strip() for i in cols]
row_data.append(row)
# Lists for storing each colums
Country = []
TotalCases = []
NewCases = []
TotalDeaths = []
NewDeaths = []
TotalRecovered = []
ActiveCases = []
SeriousCritical = []
TotCases1Mpop = []
TotDeaths1Mpop = []
# Iterating each row data and adding to the column lists
for row in row_data:
Country.append(row[0])
TotalCases.append(row[1])
NewCases.append(row[2])
TotalDeaths.append(row[3])
NewDeaths.append(row[4])
TotalRecovered.append(row[5])
ActiveCases.append(row[6])
SeriousCritical.append(row[7])
TotCases1Mpop.append(row[8])
TotDeaths1Mpop.append(row[9])
"""---------------------------------------------EXTRACTED DATA TABULATION---------------------------------------------"""
print("Exporting scraped data...")
# creating a dataframe with each columnlist
df = pd.DataFrame({'Country/Other': Country, 'Total Cases': TotalCases, 'New Cases': NewCases, 'Total Death': TotalDeaths, 'New Deaths': NewDeaths, 'Total Recovered': TotalRecovered,
'Active Cases': ActiveCases, 'Serious/Critical': SeriousCritical, 'Tot Cases/ 1M pop': TotCases1Mpop, 'Tot Deaths/ 1M pop': TotDeaths1Mpop})
# converting dataframe to csv file
df.to_csv('CoronaUpdate.csv', index=False, encoding='utf-8')
print("Data exported to: 'CoronaUpdate.csv' file")
| true |
854658cf13f4ecfb539041685c704bf876be67dd | Python | HuangCongQing/Spider | /package/3xpath/05xpath解析基础.py | UTF-8 | 1,018 | 2.765625 | 3 | [
"MIT"
] | permissive | '''
Description:
Author: HCQ
Company(School): UCAS
Email: 1756260160@qq.com
Date: 2021-01-01 13:56:16
LastEditTime: 2021-01-04 10:34:44
FilePath: /Spider/package/3xpath/05xpath解析基础.py
'''
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from lxml import etree
if __name__ == "__main__":
#实例化好了一个etree对象,且将被解析的源码加载到了该对象中
tree = etree.parse('test.html') # 在线网页 用 : etree.HTML(page_text)
r = tree.xpath('/html/body/div') # 3个Element对象 [<Element div at 0x7fcb3819b4c8>, <Element div at 0x7fcb3819b5c8>, <Element div at 0x7fcb3819b608>]
# r = tree.xpath('/html//div') # 等价于上面/html/body/div
# r = tree.xpath('//div') # # 等价于上面
# r = tree.xpath('//div[@class="song"]')
# r = tree.xpath('//div[@class="tang"]//li[5]/a/text()')[0]
r = tree.xpath('//li[7]//text()') # ['度蜜月']
# r = tree.xpath('//div[@class="tang"]//text()')
# r = tree.xpath('//div[@class="song"]/img/@src')
print(r)
| true |
ee84b580ee2524c773a897ef371ac05bd09eb727 | Python | ewintergames/sj-simulator | /competition_manager.py | UTF-8 | 2,816 | 2.765625 | 3 | [
"MIT"
] | permissive | from competition_results import CompetitionResults
from jump_simulator import JumpSimulator
import random
import matplotlib.pyplot as plt
import numpy as np
class CompetitionManager:
def __init__(self, hill, competition_data, jumpers):
self.hill = hill
self.rounds = int(competition_data['rounds'])
self.jumpers = jumpers
self.seed = int(competition_data['seed'])
self.gate = int(competition_data['gate'])
self.results = CompetitionResults(hill, competition_data['date'], jumpers)
self.simulator = JumpSimulator(self.hill)
self.jump_seed_gen = random.Random(self.seed)
self.wind_init()
def run_jump(self, jumper, debug=False):
jump_seed = self.next_jump_seed()
wind = self.get_wind(jump_seed)
jump_res, flight_path = self.simulator.simulate_jump(
jumper, wind, self.gate, jump_seed)
if debug:
self.render_jump(
flight_path[0], flight_path[1], jump_res.distance, self.hill.profile)
return jump_res
def run_competition(self, debug=False):
for round in range(self.rounds):
for ind, jumper in enumerate(self.jumpers):
x = self.run_jump(jumper, debug)
self.results.add_jump_result(ind, x)
# print(x)
self.results.sort_results()
# for x in self.results.ordered_results():
# print(x)
def next_jump_seed(self):
return self.jump_seed_gen.randint(0, 6574036)
def wind_init(self):
self.wind_gen = random.Random(self.seed)
self.wind_base = -3 + 6 * self.wind_gen.random()
self.wind_bias = self.wind_gen.random() * 2
def get_wind(self, jump_seed):
gen = random.Random(jump_seed)
return gen.random() * self.wind_bias + self.wind_base
def present_results(self):
print('Ski Jumping Competition', self.hill.name, self.results.date)
for res in self.results.ordered_results():
athlete = res.athlete
jumps = res.jump_results
print(res.rank, athlete.name, athlete.country, res.total_points)
for ind, jmp in enumerate(jumps):
print(f'\t{ind+1}. {jmp}')
def render_jump(self, fly_x, fly_y, dist, hill):
plt.title(f'{dist} m')
plt.plot(fly_x, fly_y, '--')
x0 = np.linspace(hill.A[0], 0, 100)
y0 = list(map(hill.inrun, x0))
x1 = np.linspace(0, hill.U[0])
y1 = list(map(hill.landing_area, x1))
xd = [hill.P[0], hill.K[0], hill.L[0]]
yd = [hill.P[1], hill.K[1], hill.L[1]]
plt.plot(x0, y0)
plt.plot(x1, y1)
plt.plot(xd, yd, 'o')
xt = []
yt = []
plt.axis('equal')
plt.show()
| true |
17bfc4b20370c6310ba0adeeee1c4bafb6b377ad | Python | omatveyuk/interview | /Hackbright_whiteboard_hard/josephus.py | UTF-8 | 5,027 | 3.890625 | 4 | [] | no_license | """Given num_item in circle, erase [_every]th person, return survivor.
Imagine a group of 10 in a circle, numbered 1 to 10.
If we started at the first item (#1) and erased every three item
This continues, though, looping around again, starting with where we left of at #10.
etc.
1 2 3 4 5 6 7 8 9 10
x x x !
1 2 3 4 5 6 7 8 9 10
x x x x ! x
1 2 3 4 5 6 7 8 9 10
x x x x x x x !
1 2 3 4 5 6 7 8 9 10
x x x x x x x x !
1 2 3 4 5 6 7 8 9 10
x x x x x x x x x
>>> find_survivor(4, 2)
1
>>> find_survivor(41, 3)
31
As a sanity case, if never skip anyone, the last person will be our survivor:
>>> find_survivor(10, 1)
10
"""
class Node(object):
"""Class Node"""
def __init__(self, data, next=None):
self.data = data
self.next = next
def __repr__(self):
return "<Node data={0}>".format(self.data)
class Circle_ll(object):
"""Class Circle Linked Linked"""
def __init__(self):
self.head = None
def insert(self, node):
if self.head == None:
self.head = node
node.next = self.head
else:
curr = self.head
while curr.next != self.head:
curr=curr.next
node.next = curr.next
curr.next = node
def remove(self, data):
# data is in head and circle linked list has only 1 node.
# Remove head. Empty circle linked list"
if self.head.data == data and self.head.next == self.head:
self.head = None
return
curr = self.head
# data is in head. Remove node which is head, move head to the next node
if self.head.data == data:
new_head = self.head.next
while curr.next != self.head:
curr = curr.next
self.head = new_head
curr.next = self.head
else:
# data is in inner node. Remove this node
while curr.data != data:
prev = curr
curr = curr.next
prev.next = curr.next
def display(self):
if self.head == None:
print "Empty circle linked llst"
else:
curr = self.head
while curr.next != self.head:
print curr.data,
curr = curr.next
print curr.data
print curr.next.data, '...'
def find_survivor(num_people, kill_every):
"""Given num_people in circle, kill [kill_every]th person, return survivor."""
# Create circle linked list with size = num.people
my_circle_ll = Circle_ll()
for i in xrange(num_people):
my_circle_ll.insert(Node(i+1))
# Remove each kill_every node until circle linked list will have 1 node
curr = my_circle_ll.head
while curr != curr.next:
for i in xrange(kill_every - 1):
# If we will every 3rd person, we'll skip over two
curr = curr.next
my_circle_ll.remove(curr.data)
curr = curr.next
return my_circle_ll.head.data
# Solution Hackbright
# class Node(object):
# """Doubly-linked node."""
# def __init__(self, data, prev=None, next=None):
# self.data = data
# self.prev = prev
# self.next = next
# def __repr__(self):
# return "<Node prev=%s data=%s next=%s>" % (
# self.prev.data, self.data, self.next.data)
# @classmethod
# def make_list(cls, n):
# """Construct a circular doubly-linked list of n items. Returns head node.
# >>> node = Node.make_list(3)
# >>> node.data
# 1
# >>> node.next.data
# 2
# >>> node.next.next.next.data
# 1
# >>> node.prev.data
# 3
# >>> node.prev.prev.prev.data
# 1
# """
# # Make the first node (and remember that it's the first)
# first = node = prev = cls(1)
# # Make every other node
# for i in range(2, n + 1):
# node = Node(i, prev=prev)
# prev.next = node
# prev = node
# # Fix the last and first node's prev/next
# node.next = first
# first.prev = node
# return first
# def find_survivor(num_people, kill_every):
# """Given num_people in circle, kill [kill_every]th person, return survivor."""
# node = Node.make_list(num_people)
# # Loop until we're the only item in the list (last survivor)
# while node.next != node:
# for i in range(kill_every - 1):
# # If we will every 3rd person, we'll skip over two
# node = node.next
# # We're on the node to kill. Remove it from our doubly-linked list
# node.prev.next = node.next
# node.next.prev = node.prev
# node = node.next
# return node.data
if __name__ == '__main__':
import doctest
if doctest.testmod().failed == 0:
print "\n*** ALL TEST PASSED. W00T! ***\n"
| true |
5ec634fdd3a89ee783ff3e307c49c8b179835590 | Python | rominf/Tatoeba-anki-deckgeneration | /tatoeba_anki.py | UTF-8 | 6,902 | 2.75 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""A tool do download taboeba sentences and to put them into Anki
Usage:
Taboeba_anki.py [--audio] [--tags] [--author] [--src-lang <lang>]... [--audio-lang <lang>]... [--target-lang <lang>] [--copy-media] [--anki-media-dir <dir>] [--all] <url>
Options:
--audio Grab audio if sentences in a source language have it.
--tags Copy the tags if they exist.
--author Put author name as an extra tag.
--src-lang <lang> Languages of source sentences (may be 1 or more that will appear on the question field). This should be 2-letter code ISO 639-1 https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes.
--audio-lang <lang> These codes are used in Tatoeba's url when you browse audio by language, e.g. cmn for Chinese in https://tatoeba.org/eng/sentences/with_audio/cmn.
--target-lang <lang> Target language code (will be on the answer's fiels).
--copy-media Copy media to Anki media folder.
--anki-media-dir <dir> This is "collection.media" folder which is normally located in your documents folder. [Default: ~/Documents/Anki/User\ 1/collection.media/].
-a, --all Combines --audio, --tags, --author, --copy-media.
"""
from future import standard_library
standard_library.install_aliases()
from builtins import input
from builtins import str
from builtins import range
from docopt import docopt
import csv
import os
import re
import shutil
import sys
import urllib.request, urllib.parse, urllib.error
import requests
import logging
logging.basicConfig(level=logging.INFO)
args = docopt(__doc__)
UrlListOfSentences = args['<url>']
output_dir = UrlListOfSentences.rpartition('/')[-1]
getAudio = args['--audio']
getTags = args['--tags']
getAuthor = args['--author']
srclang = args['--src-lang']
audio3letterslangcodes = args['--audio-lang']
targetlang = args['--target-lang']
copymediafilestoankifolder = args['--copy-media']
ankimediafolder = args['--anki-media-dir']
if args['--all']:
getAudio = True
getTags = True
getAuthor = True
copymediafilestoankifolder = True
if os.path.exists(output_dir):
key = input(f"'{output_dir}' folder already exists. Press Enter to clean it or close this window")
if not key:
shutil.rmtree(output_dir)
try:
os.mkdir(output_dir)
except:
logging.error(f"The script couldn't create a temporary workdir {output_dir}.")
sys.exit(1)
cfile = open(f"{output_dir}/exampledeck.csv", "w")
def procstring(string):
res = string
res = res.replace("'","'")
res = res.replace(""",'"')
return res
# process the link, open it and grab all we need
def proclink(num):
taglist = []
url = 'https://tatoeba.org/eng/sentences/show/' + num
curaudio = ''
resp = requests.get(url)
if resp.status_code != 200:
logging.error("Error response for search")
sys.exit(1)
if getTags:
tagname = re.findall('class="tagName".+?\>(.+?)\<', resp.text, re.DOTALL)
for i in tagname:
taglist.append(i.strip().replace(" ", "_"))
if getAuthor:
authorname = re.findall('title="belongs\sto\s(.+?)"', resp.text)
if len(authorname) > 0:
taglist.append('by_' + authorname[0])
else:
taglist.append('orphan_sentence')
srcsentence = ''
mainlang = ''
for i,item in enumerate(srclang):
srcsentence = re.findall('mainSentence.+?<div lang="' + item + '" dir="\w{3}" ng-non-bindable="" class="text correctnessZero">(.+?)<\/div><\/div><\/div>', resp.text)
if len(srcsentence) > 0:
srcsentence = srcsentence[0]
mainlang = audio3letterslangcodes[i]
break
else:
srcsentence = ''
continue
if srcsentence == '':
logging.error("Error while trying to get the source sentence")
return
audiourl = 'https://audio.tatoeba.org/sentences/' + mainlang + '/' + num + '.mp3'
if getAudio:
laudio = re.findall("https\:\/\/audio\.tatoeba\.org\/sentences\/(\w{3})\/" + num + ".mp3", resp.text)
if laudio != []:
# grab audio
urllib.request.urlretrieve(audiourl, f"{output_dir}/" + num + ".mp3")
curaudio = '[sound:' + num + '.mp3]'
targetsentence = re.findall('directTranslation".+?<div lang="' + targetlang + '" dir="\w{3}"\s+class="text correctnessZero">(.+?)<\/div>', resp.text.replace('ng-non-bindable=""',''))
if len(targetsentence) > 0:
targetsentence = targetsentence[0]
else:
targetsentence = ''
csv_writer = csv.writer(cfile, delimiter='\t', lineterminator='\n')
logging.info(" ".join([srcsentence + curaudio, targetsentence, " ".join(taglist)]))
csv_writer.writerow([procstring(srcsentence) + curaudio, procstring(targetsentence), " ".join(taglist)])
def mainproc():
# 1. get the list of sentences from the first page
global UrlListOfSentences
UrlListOfSentences = UrlListOfSentences.replace('/page:1','').rstrip("/")
resp = requests.get(UrlListOfSentences + '/page:1')
if resp.status_code != 200:
logging.error("Failed to open " + UrlListOfSentences)
sys.exit(1)
# how many pages there are in this list
pagescount = re.findall('/page\:(\d+?)\D', resp.text)
if pagescount != []:
pagescount = max([int(x) for x in pagescount])
else:
pagescount = 0 # there is no pagination
logging.debug(resp.text)
links = re.findall('class="md-icon-button" href="/\w\w\w/sentences/show/(.+?)\"\>', resp.text, re.DOTALL)
for i in range(len(links)):
proclink(links[i])
prCnt = 1 # this is a progress counter (not really necessary but kind of convenient feature)
for pagescounter in range(2,pagescount + 1):
urlloop = UrlListOfSentences.rstrip("/") + "/page:" + str(pagescounter)
resp = requests.get(urlloop)
if resp.status_code != 200:
logging.error("Failed to open " + urlloop)
sys.exit(1)
links = re.findall('class="md-icon-button" href="/\w\w\w/sentences/show/(.+?)\"\>', resp.text, re.DOTALL)
for i in range(len(links)):
proclink(links[i])
prCnt += 1
curPrcnt = (100.0*prCnt) / pagescount
current_percent_completed = str(round(curPrcnt, 3)) + '% completed'
if shutil.which('title'):
os.system('title ' + current_percent_completed)
else:
logging.info(current_percent_completed)
# copy media files to anki media folder
for root, dirs, files in os.walk(output_dir):
for f in files:
filename = os.path.join(root, f)
if filename.endswith('.mp3'):
if copymediafilestoankifolder:
shutil.copy2(filename, ankimediafolder)
mainproc()
cfile.close()
| true |
99429e852a0a21e21e5d4bfbe6b6782bd184adf1 | Python | zzg-971030/Learn_ML_in_Python | /算法设计基础/LeetCodeBook/test.py | UTF-8 | 653 | 2.78125 | 3 | [] | no_license | # ! /usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
ls = ['a', 'a', 'a', 'b', 'b', 'c']
se = pd.Series(ls)
countDcit = dict(se.value_counts())
proprotitionDcit = dict(se.value_counts(normalize=True))
print(countDcit)
print(proprotitionDcit)
class FenwickTree:
def __init__(self, n):
self._num = [0 for _ in rnage(n + 1)]
def update(self, i, delta):
while i < len(self._num):
self._num[i] = delta
i += i & -1
def query(sefl, i):
s = 0
while i > 0:
s += self._num[i]
i -= i & -1
return s
| true |
852a7c70ff7d0e6519b2718eacec7ad94f7ee7a0 | Python | AhmadMamduhh/Data-Mining-Project | /main.py | UTF-8 | 4,956 | 3.640625 | 4 | [] | no_license | import numpy as np
import pandas as pd
from visualizer import Visualizer
# ------------Loading the data-------------------
# 1 = classification, 2 = regression and any other number = clustering
method_identifier = int(input('Enter 1 to choose Classification, 2 to choose Regression' +
' or 3 to choose Clustering: '))
if method_identifier == 1:
dataset = pd.read_csv('wisconsin_breast_cancer.csv')
elif method_identifier == 2:
dataset = pd.read_csv('diamonds.csv')
elif method_identifier == 3:
from sklearn.datasets import load_iris
X = load_iris(return_X_y=False)['data']
iris = load_iris()
# diamonds.csv on kaggle.com for regression
# wisonsin breast cancer.cvs on kaggle.com for classification
# iris.csv for clustering (drop class column)
# --------------Choosing the desired algorithm--------------------
# Choosing classification algorithm
if method_identifier == 1:
identifier = int(input('Enter 1 to choose KNN, 2 to choose Decision Tree, 3 to choose Naive Bayes' +
', 4 to choose Random Forest or 5 to choose Neural Network: '))
if identifier == 1:
algorithm_name = 'KNN'
elif identifier == 2:
algorithm_name = 'Decision Tree'
elif identifier == 3:
algorithm_name = 'Naive Bayes'
elif identifier == 4:
algorithm_name = 'Random Forest'
else:
algorithm_name = 'Neural Network'
# Choosing Regression algorithm
elif method_identifier == 2:
identifier = int(input('Enter 1 to choose Linear Regression, 2 to choose Polynomial Regression,' +
' 3 to choose Decision Tree, 4 to choose KNN Regression, 5 to choose' +
' Random Forest Regressor or 6 to choose Neural Network: '))
if identifier == 1:
algorithm_name = 'Linear Regression'
elif identifier == 2:
algorithm_name = 'Polynomial Regression'
elif identifier == 3:
algorithm_name = 'Decision Tree'
elif identifier == 4:
algorithm_name = 'KNN Regression'
elif identifier == 5:
algorithm_name = 'Random Forest'
else:
algorithm_name = 'Neural Network'
# Choosing Clustering algorithm
else:
algorithm_name = 'K-Means'
# ---------------------Preprocessing the data---------------------------------
from preprocessor import Preprocessor
preprocess = Preprocessor()
# Cleaning the data
if method_identifier == 3: # Drop missing rows in iris dataset
X = preprocess.drop_missing(X)
else: # Drop missing rows in diamond or wisconsin breast cancer datasets
dataset = preprocess.drop_missing(dataset)
if method_identifier == 1:
X, y = preprocess.dataframe_to_numpy(dataset, 'breast cancer')
X, y = preprocess.encoding(X, y, 'breast cancer')
elif method_identifier == 2:
X, y = preprocess.dataframe_to_numpy(dataset, 'diamonds')
X, y = preprocess.encoding(X, y, 'diamonds')
# Splitting the data into train and test sets
if method_identifier == 1 or method_identifier == 2:
X_train, X_test, y_train, y_test = preprocess.split_data(X, 0.19, y)
elif method_identifier == 3:
X_train, X_test = preprocess.split_data(X, test_ratio=0.3)
# Scaling the data
X_train, X_test = preprocess.scaling(X_train, X_test, scale_type='Standard Scaler')
# ----------------------------Classifying the data----------------------------
if method_identifier == 1:
from classifier import Classifier
classifier = Classifier(algorithm_name)
y_predicted = classifier.classify(X_train, y_train, X_test, y_test)
classifier_accuracy = classifier.get_accuracy(y_test, y_predicted)
# Visualizing the results
visualizer = Visualizer()
visualizer.plot_classifier_regressor(y_test, y_predicted, method_identifier)
print('The accuracy is: ' + str(classifier_accuracy) + ' %')
print(algorithm_name)
# ---------------------Applying Regression to the data--------------------------
elif method_identifier == 2:
from regressor import Regressor
regressor = Regressor(algorithm_name)
y_predicted = regressor.predict(X_train, y_train, X_test)
regressor_score = regressor.get_score(y_test, y_predicted)
# Visualizing the results
visualizer = Visualizer()
visualizer.plot_classifier_regressor(y_test, y_predicted, method_identifier)
print('The coefficient of determination is: ' + str(regressor_score))
print(algorithm_name)
# ---------------------Clustering the data------------------------------------
elif method_identifier == 3:
from clustering import Clustering
clustering = Clustering(algorithm_name)
n_clusters, inertia = clustering.tune_parameters(X_train)
clusters = clustering.cluster(X_train, X_test, n_clusters)
# Visualizing the results
visualizer = Visualizer()
visualizer.plot_clustering(X_test, clusters)
print("The clustering model's inertia: " + str(inertia))
print(str(algorithm_name))
| true |
595edd6daa9549db52092be1461b978729d91032 | Python | donboyd5/weighting | /experimental_code/geoweight.py | UTF-8 | 6,780 | 3.078125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
geoweight class.
Created on Sat Aug 29 04:55:45 2020
@author: donbo
"""
import numpy as np
from scipy.optimize import least_squares
from timeit import default_timer as timer
class Geoweight:
"""Class with data and methods for geographic weighting.
Common terms and definitions:
h: number of households (tax records, etc.)
k: number of characteristics each household has (wages, pensions, etc.)
s: number of states or geographic areas
xmat: h x k matrix of characteristics for each household
wh: 1 x h vector of national weights for households
whs: h x s matrix of state weights for households (to be solved for)
for each household, the sum of state weights must equal the
total household weight
beta: s x k matrix of poisson model coefficients
(same for all households)
delta: 1 x h vector of poisson model constants, 1 per household
these values are uniquely determined by a given set of beta
coefficients and the wh values
"""
def __init__(self, wh, xmat, targets=None):
self.wh = wh
self.xmat = xmat
self.targets = targets
def geosolve(self):
start = timer()
h = self.xmat.shape[0]
k = self.xmat.shape[1]
s = self.targets.shape[0]
# input checks:
# targets must by s x k
betavec0 = np.zeros(self.targets.size)
dw = get_diff_weights(self.targets)
result = least_squares(targets_diff, betavec0,
method='trf', jac='2-point', verbose=2,
ftol=1e-10, xtol=1e-10,
args=(self.wh, self.xmat, self.targets, dw))
self.result = result
end = timer()
self.elapsed_minutes = (end - start) / 60
self.retrieve_values()
def retrieve_values(self):
self.beta_opt = self.result.x.reshape(self.targets.shape)
self.delta_opt = get_delta(self.wh, self.beta_opt, self.xmat)
self.whs_opt = get_weights(self.beta_opt,
self.delta_opt, self.xmat)
self.targets_opt = get_targets(self.beta_opt,
self.wh, self.xmat)
self.targets_diff = self.targets_opt - self.targets
def help():
print("\nThe Geoweight class requires the following arguments:",
"\twh:\t\t\th-length vector of national weights for households",
"\txmat:\t\th x k matrix of characteristices (data) for households",
"\ttargets:\ts x k matrix of targets", sep='\n')
print("\nThe goal of the method geosolve is to find state weights" +
" that will",
"hit the targets while ensuring that each household's state",
"weights sum to its national weight.\n", sep='\n')
def get_delta(wh, beta, xmat):
"""
Get vector of constants, 1 per household.
See (Khitatrakun, Mermin, Francis, 2016, p.5)
Note: we cannot let beta %*% xmat get too large!! or exp will be Inf and
problem will bomb. It will get large when a beta element times an
xmat element is large, so either beta or xmat can be the problem.
"""
beta_x = np.exp(np.dot(beta, xmat.T))
delta = np.log(wh / beta_x.sum(axis=0)) # axis=0 gives colsums
return delta
def get_diff_weights(targets, goal=100):
"""
difference weights - a weight to be applied to each target in the
difference function so that it hits its goal
set the weight to 1 if the target value is zero
do this in a vectorized way
"""
# avoid divide by zero or other problems
# numerator = np.full(targets.shape, goal)
# with np.errstate(divide='ignore'):
# dw = numerator / targets
# dw[targets == 0] = 1
goalmat = np.full(targets.shape, goal)
with np.errstate(divide='ignore'): # turn off divide-by-zero warning
diff_weights = np.where(targets != 0, goalmat / targets, 1)
return diff_weights
def get_targets(beta, wh, xmat):
"""
Calculate matrix of target values by state and characteristic.
Returns
-------
targets_mat : matrix
s x k matrix of target values.
"""
delta = get_delta(wh, beta, xmat)
whs = get_weights(beta, delta, xmat)
targets_mat = np.dot(whs.T, xmat)
return targets_mat
def get_weights(beta, delta, xmat):
"""
Calculate state-specific weights for each household.
Definitions:
h: number of households
k: number of characteristics each household has
s: number of states or geographic areas
See (Khitatrakun, Mermin, Francis, 2016, p.4)
Parameters
----------
beta : matrix
s x k matrix of coefficients for the poisson function that generates
state weights.
delta : vector
h-length vector of constants (one per household) for the poisson
function that generates state weights.
xmat : matrix
h x k matrix of characteristics (data) for households.
Returns
-------
matrix of dimension h x s.
"""
# begin by calculating beta_x, an s x h matrix:
# each row has the sum over k of beta[s_i, k] * x[h_j, k]
# for each household where s_i is the state in row i
# each column is a specific household
beta_x = np.dot(beta, xmat.T)
# add the delta vector of household constants to every row
# of beta_x and transpose
# beta_xd <- apply(beta_x, 1, function(mat) mat + delta)
beta_xd = (beta_x + delta).T
weights = np.exp(beta_xd)
return weights
def targets_diff(beta_object, wh, xmat, targets, diff_weights):
'''
Calculate difference between calculated targets and desired targets.
Parameters
----------
beta_obj: vector or matrix
if vector it will have length s x k and we will create s x k matrix
if matrix it will be dimension s x k
s x k matrix of coefficients for the poisson function that generates
state weights.
wh: array-like
DESCRIPTION.
xmat: TYPE
DESCRIPTION.
targets: TYPE
DESCRIPTION.
diff_weights: TYPE
DESCRIPTION.
Returns
-------
matrix of dimension s x k.
'''
# beta must be a matrix so if beta_object is a vector, reshape it
if beta_object.ndim == 1:
beta = beta_object.reshape(targets.shape)
elif beta_object.ndim == 2:
beta = beta_object
targets_calc = get_targets(beta, wh, xmat)
diffs = targets_calc - targets
diffs = diffs * diff_weights
# retirm a matrix or vector, depending on the shape of beta_object
if beta_object.ndim == 1:
diffs = diffs.flatten()
return diffs
| true |
65dc5290938f88bb9c70dc6201fa8efec8ac5dc7 | Python | bhanu566/8-puzzle-A-star | /astar.py | UTF-8 | 7,087 | 3.15625 | 3 | [] | no_license | # ;==================================================================
# ; Title: A* Algorithm using manhattan distance and misplaced tiles
# ; Author: Bhanu Prakash Reddy ,Satabdhi Reddy
# ; Date: 10 Feb 2019
# ;==================================================================
import numpy as np
from copy import deepcopy
import collections
print("enter input by giving spaces example:1 2 3 4 5 6 7 8 0 ")
initial_node = list(map(int,input("Enter Input node:").split())) #input initial node
initial_node = np.array(initial_node)
final_node = list(map(int,input("Enter Output node:").split())) #input final node
final_node = np.array(final_node)
Astarmethod=0 #flag for manhatton or misplaced tiles Astarmethod=0 manhattan 1 for misplaced tiles
# definition to calculate hn using manhattan distance
def manhattan_distance(mlist):
copy = mlist
mhtndist = 0
for i, list_item in enumerate(copy):
if list_item != 0:
for j,list_item_final in enumerate(final_node):
if list_item_final == list_item:
lr = j
break
row1,col1 = int(i/ 3) , i% 3
row2,col2 = int((lr) / 3), (lr) % 3
mhtndist += abs(row1-row2) + abs(col1 - col2)
return mhtndist
#definition to calculate hn using misplaced tiles
def misplaced_tiles(mlist):
copy = mlist
mis_dist=0
for i, list_item in enumerate(copy):
if list_item != 0 and final_node[i] != list_item:
mis_dist = mis_dist+1
return mis_dist
#definition to calculate successor nodes of the node to be expanded
def successorNodes(board):
global open_struct_array
global closed_struct_array
global nodeid
moves = np.array(
[
([0, 1, 2], -3),
([6, 7, 8], 3),
([0, 3, 6], -1),
([2, 5, 8], 1)
],
dtype=[
('pos', list),
('ind', int)
]
)
gn=board[1]+1
state = board[0]
loc = int(np.where(state == 0)[0])
parentid=board[4]
for m in moves:
if loc not in m['pos']:
nodepresent = 0
succ = deepcopy(state)
delta_loc = loc + m['ind']
succ[loc], succ[delta_loc] = succ[delta_loc], succ[loc]
for i in closed_struct_array: #checking if successor nodes are duplicates
if(i[0]==succ).all():
nodepresent = 1
for i in open_struct_array: #checking if successor nodes are duplicates
if (i[0] == succ).all():
nodepresent = 1
if nodepresent == 0:
#print("inloop")
if (Astarmethod == 0):
hn = manhattan_distance(succ)
else:
hn = misplaced_tiles(succ)
fn=gn + hn
nodeid=nodeid+1 #increment value of nodeid for each node genereated
#appending successor nodes to open_struct_array
open_struct_array=np.append(open_struct_array, np.array([(succ, gn, hn, fn, nodeid, parentid)], STATE), 0)
#definition to check if the node is final node
def solution(board):
global STATE
STATE = [
('board', list),
('gn', int),
('hn', int),
('fn', int),
('nodeid',int),
('parentid',int)
]
global open_struct_array
global closed_struct_array
global nodeid
nodeid = 0
if(Astarmethod==0):
hn=manhattan_distance(board)
else:
hn=misplaced_tiles(board)
open_struct_array = np.array([(board, 0, hn, 0 + hn, 0, -1)], STATE)
varran=np.array([0,0,0,0,0,0,0,0,0]) #closed struct array 1 time initialization
closed_struct_array=np.array([(varran, 0, 0, 0, 0, 0)], STATE)
closed_struct_array=np.delete(closed_struct_array, 0, 0)
while True:
length_queques = len(open_struct_array) + len(closed_struct_array) #checking if total nodes are crossing the threshold value
if length_queques >3000:
break
a=open_struct_array[0]
s=a[0]
if (s == final_node).all(): #comparing with final node
return len(closed_struct_array), nodeid
open_struct_array = np.delete(open_struct_array, 0, 0)
closed_struct_array=np.append(closed_struct_array, np.array([(a[0], a[1], a[2], a[3], a[4], a[5])], STATE), 0) #appending expanded node to closed node
successorNodes(a)
open_struct_array = np.sort(open_struct_array, kind='mergesort', order=['fn', 'nodeid']) #sorting bosed on
return 0,0
#definition to find the path of the final node
def solutionpath(open_structured_array, closedNode):
storelastelement = open_structured_array[0][0]
parentidd=open_structured_array[0][5]
con = np.concatenate((open_structured_array, closedNode), axis=0)
de = collections.deque([])
de.append(storelastelement)
while(parentidd != -1):
for i in con:
if i[4] == parentidd:
de.appendleft(i[0])
parentidd = i[5]
break
print('cost to reach final_node:',len(de)-1)
for i in de:
print(np.reshape(i,(3,3)),'\n')
#definintion to print output using both manhattan distance and misplaced tiles as hn
def main():
global open_struct_array
global closed_struct_array
global Astarmethod
comparearrays = (np.sort(initial_node) == np.sort(final_node)).all() #checking if input is correct
if not comparearrays:
print('incorrect input')
return
else:
nodes_expanded,nodes_generated=solution(initial_node) #if correct input find path
if(nodes_expanded==0 and nodes_generated ==0):
print('no solution')
return
print("--------------------------------A* Manhattan DIstance-------------------------------------------")
# print(open_struct_array)
# print(closed_struct_array)
print('nodes_generated:',nodes_generated)
print('nodes_expanded',nodes_expanded)
solutionpath(open_struct_array, closed_struct_array) #finding solution path hn= manhattan distance
print("-------------------------------A* Misplaced Tiles-------------------------------------------------")
Astarmethod=1 #set Astarmethod=1
open_struct_array=[] #empty both open and closed
closed_struct_array=[]
nodes_expanded, nodes_generated = solution(initial_node)
if (nodes_expanded == 0 and nodes_generated == 0): #return 0,0 if no solution
print('no solution')
return
# print(open_struct_array)
# print(closedNode)
print('nodes_generated:',nodes_generated)
print('nodes_expanded',nodes_expanded)
solutionpath(open_struct_array, closed_struct_array) #finding solution path hn=misplaced tiles
if __name__ == "__main__":
main()
| true |
0760bb59c8c0e66cedb8343197752d1addd25357 | Python | SpicyKong/problems | /BOJ/Q_11967.py | UTF-8 | 1,554 | 3.0625 | 3 | [] | no_license | # https://www.acmicpc.net/problem/11967 문제 제목 : 불켜기 , 언어 : Python, 날짜 : 2020-01-28, 결과 : 성공
# 이 문제는 두번쨰 while문에서 이상한 조건을 넣어버려서 계속 틀렸다..
import sys
from collections import deque
N, M = map(int, sys.stdin.readline().split())
list_map = [[0]*(N+1) for _ in range(N+1)]
list_lights = [[0]*(N+1) for _ in range(N+1)]
count_light = 1
for _ in range(M):
x,y,a,b = map(int,sys.stdin.readline().split())
if not list_map[y][x]:
list_map[y][x] = [[a,b]]
else:
list_map[y][x].append([a,b])
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
list_lights[1][1] = 1
while True:
list_visit = [[0]*(N+1) for _ in range(N+1)]
list_visit[1][1] = 1
save_count = count_light
list_queue = deque([[1,1]])
while list_queue:
now_x, now_y = list_queue.popleft()
if list_map[now_y][now_x]:
while list_map[now_y][now_x]:
light_x, light_y = list_map[now_y][now_x].pop()
if not list_lights[light_y][light_x]:
list_lights[light_y][light_x] = 1
count_light+=1
for i in range(4):
test_x = dx[i] + now_x
test_y = dy[i] + now_y
if 0 < test_x <= N and 0 < test_y <= N:
if list_lights[test_y][test_x] and not list_visit[test_y][test_x]:
list_visit[test_y][test_x] = 1
list_queue.append([test_x, test_y])
if save_count == count_light:
break
print(count_light)
| true |
1a0f46ef6f3b3ddf7c27183ce1da53afaef8e80b | Python | rafaelperazzo/programacao-web | /moodledata/vpl_data/117/usersdata/231/26244/submittedfiles/al2.py | UTF-8 | 88 | 2.65625 | 3 | [] | no_license | from __future__ import division
n=float(input('5.8709'))
i=int(n)
r=n-int
print('%.2f'i) | true |
4ae70bd8a919c24e36960d74f7703ef63140c1c8 | Python | jickw/my-project | /python/lnh_learn/201811/1103/test.py | UTF-8 | 328 | 3.28125 | 3 | [] | no_license | # -*- coding:utf-8 -*-
"""
__title__ = ''
__author__ = 'wux'
__mtime__ = '2018/10/24'
"""
# lit = [1, "花生", "山药"]
# print(type(lit))
# tu = ("字符串",) # 元素少于一个,需要添加一个逗号
# tu = tuple() # 空元组
# tu = (1, )
# print(type(tu))
tu = ('DNF', 'LOL', 'CF', "斗地主", "消消乐") | true |
67bda472259d4ed203b6830078ab80170dc49842 | Python | Lguyogiro/fst-nahuatl | /tools/generate_verb_bases_from_stem.py | UTF-8 | 4,255 | 2.84375 | 3 | [] | no_license | import re
class VerbalStem(object):
def __init__(self, stem, transitive=False, es=False):
self.stem = stem
self.vowels = ["a", "e", "i", "o", "u"]
self.cons = "bcdfghjklmnpqrstvwxz"
self.multi_char_cons = ["ch", "tl", "tz"]
self.numVPattern = re.compile("|".join(self.vowels))
self.n_syl = self.count_syllables()
self.is_spanish = es
self.trans = transitive
def count_syllables(self):
num_syl = 0
search_stem = self.stem
def find_next_vowel(s):
for i, ch in enumerate(s):
next_ch = s[i + 1] if i+1 < len(s) else ''
if ch in self.vowels:
vowlen = 1
if ch == 'u' and next_ch in 'iae':
vowlen += 1
char_incr = i + vowlen
if char_incr <= len(s):
return s[char_incr:]
else:
return ''
else:
return None
while True:
search_stem = find_next_vowel(search_stem)
if search_stem is not None:
num_syl += 1
else:
return max([1, num_syl])
@property
def present(self) -> str:
return self.stem
@property
def imperfect(self):
if self.stem.endswith('ia'):
return self.stem[:-2] + "a"
else:
return self.present
@property
def base2(self) -> str:
if self.n_syl == 1:
if self.stem.endswith('a'):
return "{}h".format(self.stem)
else:
#
# for '-i' verbs, the vowel lengthens here, but since neither
# IDIEZ nor SEP orthographies doesn't represent vowel length,
# this is just an identity.
#
return self.stem
elif self.stem.endswith('ia'):
#
# In Classical Nahuatl, this verbs' base 2 stem would end in 'ih'.
# In nhi the preterite they go from "...ih#" -> "...e#". I use
# the multichar symbols {i} and {H} in order to enable this
# phonological process.
#
return format(self.stem[:-2]) + "%{i%}%{H%}"
elif self.stem.endswith('oa'):
return self.stem[:-1] + "h"
elif self.stem.endswith('ca'):
return self.stem
elif self.stem.endswith('hua'):
if self.trans:
if self.stem[-4] in self.vowels:
return "{}uh".format(self.stem[:-3])
else:
return self.stem[:-3]
else:
return self.stem
elif self.stem[-2:] in ('ma', 'mi'):
return "{}n".format(self.stem[:-2])
elif self.stem.endswith('ya'):
return "{}x".format(self.stem[:-2])
elif self.stem[-1] in ('a', 'i'):
# check it is preceded by a single consonant
if self.stem[-3:-1] in self.multi_char_cons:
return self.stem[:-1]
elif self.stem[-2] in self.cons and self.stem[-3] in self.vowels:
return self.stem[:-1]
else:
return self.stem
else:
return self.stem
@property
def base3(self):
if self.stem[-2:] in ('oa', 'ia'):
return self.stem[:-1]
else:
return self.stem
def generate_stem_lexical_entries(canonical, transitivity='iv'):
if transitivity not in ('iv', 'tv', 'tv2'):
raise KeyError("`transitivity` must be either iv (intransitive), tv "
"(transitive), or tv2 (bitransitive)")
stem = VerbalStem(canonical)
all_stems = [stem.present, stem.imperfect, stem.base2, stem.base3]
cont_lexicons = ["PresentTense", "Imperfect", "Base2Suffixes", "Base3Suffixes"]
lexical_entries = [
"{}%<v%>%<{}%>:{}%> {};".format(canonical,
transitivity,
stem,
cont_lexicons[i])
for i, stem in enumerate(all_stems)
]
return lexical_entries
| true |
5001e2c0de30e0e6db1705b115fcc6f6d3d754bb | Python | hashin22/pythonProject | /week4/checkEven.py | UTF-8 | 866 | 4.03125 | 4 | [] | no_license | #p.126
def checkEven0dd(n):
if n % 2 ==0: #n을 2로 나눴을때 0이라면
return "짝수" #짝수로 출력을 한다.
else: #그 외의 숫자를 입력받으면
return "홀수" #홀수로 출력을 한다.
while True:
number = int(input("정수를 입력하시오 =")) #number이란 변수에 정수를 입력받는다.
if number != 0000: #0000이 아니라면 함수 checkEven0dd를 실행한다.
print(f'{number}는',checkEven0dd(number))
# format함수
# print('{0}은 {1}입니다'.format(number, checkEven0dd(number)))
else: # 그외라면
print('bye bye') #종료하도록 한다.
break
# 다른 코드==================
# if number == 0000:
# print('bye bye')
# break
# else:
# result = checkEven0dd(number)
| true |
6047d98bd420e6c390211ed9144e792cc3089036 | Python | BorysekOndrej/bakalarka3 | /app/utils/certificate.py | UTF-8 | 388 | 2.828125 | 3 | [
"MIT"
] | permissive | from OpenSSL import crypto
def certificate_thumbprint(crt_string, digest_type="sha1"):
crt = bytes(crt_string, 'utf-8')
cert = crypto.load_certificate(crypto.FILETYPE_PEM, crt)
# logger.debug(cert.get_subject())
# logger.debug(f"sha256: {cert.digest('sha1')}")
# logger.debug(f"sha256: {cert.digest('sha256')}")
return cert.digest(digest_type).decode("utf-8")
| true |
5670c59010af79754f2d4ecbab32c2677bc5c99e | Python | kratenko/HIOB-stale | /hiob/hiob_gui.py | UTF-8 | 11,290 | 2.6875 | 3 | [] | no_license | import logging
import transitions
from PIL import ImageTk, Image
import queue
import threading
import argparse
import collections
from PIL.ImageDraw import Draw
# Set up logging
logging.getLogger().setLevel(logging.INFO)
transitions.logger.setLevel(logging.WARN)
logger = logging.getLogger(__name__)
import tkinter as tk
class SGraph(object):
def __init__(self, min_y=0.0, max_y=1.0, length=10, height=20):
self.store = collections.deque([None] * length)
self.length = length
self.min_y = min_y
self.max_y = max_y
self.height = height
self.size = (self.length, height)
self.image = None
self.dirty = True
self.ylines = []
def append(self, value):
self.store.append(value)
while len(self.store) > self.length:
self.store.popleft()
self.dirty = True
def create_image(self):
im = Image.new("RGB", self.size, "white")
draw = Draw(im)
# add horizontal lines to show limits:
for v in self.ylines:
ry = 1 - (v - self.min_y) / (self.max_y - self.min_y)
ry = ry * self.size[1]
draw.line(((0, ry), (self.size[0], ry)), "green", 1)
# draw values as connected dotes to create a graph
last_pos = None
for n, v in enumerate(self.store):
if v is None:
last_pos = None
continue
ry = 1 - (v - self.min_y) / (self.max_y - self.min_y)
pos = (n, ry * self.size[1])
if last_pos is None:
draw.point(pos, "black")
else:
draw.line([last_pos, pos], "black", 1)
last_pos = pos
self.image = im
self.dirty = False
def get_image(self):
if self.dirty:
self.create_image()
return self.image
class ImageLabel(tk.Label):
def __init__(self, *args, **kwargs):
tk.Label.__init__(self, *args, **kwargs)
self._image = None
def set_image(self, image):
if image is None:
self._image = None
self['image'] = None
else:
self._image = ImageTk.PhotoImage(image)
self['image'] = self._image
class AppTerminated(Exception):
pass
class App:
def __init__(self, conf):
self.conf = conf
self.root = tk.Tk()
self.root.title("Hiob")
self.dead = False
self.queue = queue.Queue()
self.images = {}
self.texts = {}
self.build_widgets()
logger.info("starting tracker")
self.start_tracker()
logger.info("starting consumer for queue")
self.consume_loop()
def build_widgets(self):
self.sample_text = tk.Label(self.root)
self.sample_text.pack()
self.texts['sample_text'] = self.sample_text
self.video_text = tk.Label(self.root)
self.video_text.pack()
self.texts['video_text'] = self.video_text
self.capture_frame = tk.Frame(self.root)
self.capture_frame.pack()
self.capture_image = ImageLabel(
self.capture_frame, text="Capture", compound=tk.BOTTOM)
self.capture_image.pack(side=tk.LEFT)
self.images['capture_image'] = self.capture_image
self.sroi_image = ImageLabel(
self.capture_frame, text="SROI", compound=tk.BOTTOM)
self.sroi_image.pack(side=tk.RIGHT)
self.images['sroi_image'] = self.sroi_image
self.consolidation_image = ImageLabel(self.root)
self.consolidation_image.pack()
self.images['consolidation_image'] = self.consolidation_image
self.figure_frame = tk.Frame(self.root)
self.figure_frame.pack()
self.center_distance_figure = ImageLabel(self.figure_frame)
self.center_distance_figure.pack(side=tk.LEFT)
self.images['center_distance_figure'] = self.center_distance_figure
self.overlap_score_figure = ImageLabel(self.figure_frame)
self.overlap_score_figure.pack(side=tk.RIGHT)
self.images['overlap_score_figure'] = self.overlap_score_figure
self.lost_figure = ImageLabel(self.figure_frame)
self.lost_figure.pack(side=tk.RIGHT)
self.images['lost_figure'] = self.lost_figure
#
self.confidence_plotter = SGraph(length=100)
self.confidence_plot = ImageLabel(self.figure_frame)
self.confidence_plot.pack()
self.images['confidence_plot'] = self.confidence_plot
self.confidence_plotter = SGraph(
min_y=0, max_y=1.0, length=100, height=100)
self.confidence_plot = ImageLabel(
self.figure_frame, text="Confidence", compound=tk.BOTTOM,)
self.confidence_plot.pack(side=tk.LEFT)
self.images['confidence_plot'] = self.confidence_plot
self.distance_plotter = SGraph(
min_y=0, max_y=100, length=100, height=100)
self.distance_plotter.ylines = [20]
self.distance_plot = ImageLabel(
self.figure_frame, text="Distance", compound=tk.BOTTOM,)
self.distance_plot.pack(side=tk.LEFT)
self.images['distance_plot'] = self.distance_plot
self.overlap_plotter = SGraph(
min_y=0, max_y=1.0, length=100, height=100)
self.overlap_plot = ImageLabel(
self.figure_frame, text="Overlap", compound=tk.BOTTOM,)
self.overlap_plot.pack(side=tk.LEFT)
self.images['overlap_plot'] = self.overlap_plot
self.lost_plotter = SGraph(
min_y=0.0, max_y=3.0, length=100, height=100)
self.lost_plot = ImageLabel(
self.figure_frame, text="Lost", compound=tk.BOTTOM,)
self.lost_plot.pack(side=tk.LEFT)
self.images['lost_plot'] = self.lost_plot
def consume_entry(self, entry):
for k, v in entry.items():
if k in self.images:
self.images[k].set_image(v)
elif k in self.texts:
self.texts[k]['text'] = v
def consume_loop(self):
while True:
try:
entry = self.queue.get_nowait()
self.consume_entry(entry)
except queue.Empty:
break
self.root.after(10, self.consume_loop)
def feed_queue(self, entry):
self.queue.put(entry)
def start_tracker(self):
self.tracker_thread = threading.Thread(target=self.tracker_fun)
self.tracker_thread.start()
def verify_running(self):
if self.dead:
raise AppTerminated()
def tracker_one(self, tracker, sample):
tracking = tracker.start_tracking_sample(
sample)
# feature selection:
tracking.start_feature_selection()
sample = tracking.sample
self.feed_queue(
{'sroi_image': tracking.get_frame_sroi_image(decorations=True),
'capture_image': tracking.get_frame_capture_image(),
'sample_text': "Sample %s/%s, Attributes: %s" % (
sample.set_name, sample.name, ', '.join(sample.attributes)),
'video_text': "Frame #%04d/%04d" % (1, sample.actual_frames),
})
while not tracking.feature_selection_done():
self.verify_running()
tracking.feature_selection_step()
tracking.finish_feature_selection()
# consolidator training:
tracking.start_consolidator_training()
while not tracking.consolidator_training_done():
self.verify_running()
tracking.consolidator_training_step()
logger.info("COST: %f", tracking.consolidator_training_cost())
tracking.finish_consolidator_training()
# add threshold lines to confidence plotter:
confidence_lines = []
if tracking.tracker.consolidator.update_threshold:
confidence_lines.append(
tracking.tracker.consolidator.update_threshold)
if tracking.tracker.consolidator.update_lower_threshold:
confidence_lines.append(
tracking.tracker.consolidator.update_lower_threshold)
self.confidence_plotter.ylines = confidence_lines
# tracking:
tracking.start_tracking()
while tracking.frames_left():
self.verify_running()
tracking.tracking_step()
# evs = tracking.get_evaluation_figures()
sample = tracking.sample
cf = tracking.current_frame_number
fr = tracking.current_frame.result
self.confidence_plotter.append(
tracking.current_frame.prediction_quality)
self.distance_plotter.append(fr['center_distance'])
self.overlap_plotter.append(fr['overlap_score'])
self.lost_plotter.append(fr['lost'])
entry = {
'capture_image': tracking.get_frame_capture_image(),
'sroi_image': tracking.get_frame_sroi_image(),
'sample_text': "Sample %s/%s, Attributes: %s" % (
sample.set_name, sample.name, ', '.join(sample.attributes)),
'video_text': "Frame #%04d/%04d" % (cf, sample.actual_frames),
'consolidation_image': tracking.get_frame_consolidation_images()['single'],
# 'center_distance_figure': evs['center_distance'],
# 'overlap_score_figure': evs['overlap_score'],
'confidence_plot': self.confidence_plotter.get_image(),
'distance_plot': self.distance_plotter.get_image(),
'overlap_plot': self.overlap_plotter.get_image(),
'lost_plot': self.lost_plotter.get_image(),
}
self.feed_queue(entry)
tracking.finish_tracking()
return tracking
def tracker_fun(self):
from hiob.tracker import Tracker
tracker = Tracker(self.conf)
tracker.setup_environment()
try:
with tracker.setup_session():
for sample in tracker.samples:
sample.load()
tracking = self.tracker_one(tracker, sample)
tracker.evaluate_tracking(tracking)
sample.unload()
tracker.evaluate_tracker()
except AppTerminated:
logger.info("App terminated, ending tracker thread early.")
# tracking.execute_consolidator_training()
# tracking.execute_tracking()
logger.info("Leaving tracker thread")
def run(self):
self.root.mainloop()
self.dead = True
if self.tracker_thread:
self.tracker_thread.join()
if __name__ == '__main__':
# parse arguments:
logger.info("Parsing command line arguments")
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--environment')
parser.add_argument('-t', '--tracker')
args = parser.parse_args()
# create Configurator
from hiob.configuration import Configurator
logger.info("Creating configurator object")
conf = Configurator(
environment_path=args.environment,
tracker_path=args.tracker,
)
# execute gui app and run tracking
logger.info("Initiate tracking process in gui app")
app = App(conf)
app.run()
| true |
66ed1bf56cba96874ca8a1d76fc606e08fde83d7 | Python | jorgemira/codewars | /katas/4kyu/kata22.py | UTF-8 | 395 | 3.1875 | 3 | [] | no_license | """
Codewars 4 kyu kata: Pyramid Slide Down
URL: https://www.codewars.com/kata/551f23362ff852e2ab000037/python
"""
def longest_slide_down(pyramid):
prev_line = pyramid[-1]
for i in reversed(xrange(len(pyramid)-1)):
line = []
for j in xrange(i+1):
line.append(pyramid[i][j] + max(prev_line[j], prev_line[j+1]))
prev_line = line
return line[0]
| true |
82d664fc3cc7da5a069be3e62d1f6d8d9f704074 | Python | dly667/PythonSmallModule | /separationExcel/new.py | UTF-8 | 4,135 | 2.671875 | 3 | [] | no_license | import xlrd
import time
import xlwt
from xlutils.filter import process, XLRDReader, XLWTWriter
import win32com.client
class Transform(object):
def __init__(self):
self.row_col_dict = dict()
pass
def read(self):
workbook = xlrd.open_workbook(u'数据格式表.xlsx',on_demand=True)
sheet_names = workbook.sheet_names()
self.temp_book = list()
for sheet_name in sheet_names:
sheet2 = workbook.sheet_by_name(sheet_name)
print(sheet_name)
# 获取总行数
nrows = sheet2.nrows
# 获取销售列的数据
if sheet_name == "交易月综合表":
cols = sheet2.col_values(12)
self.saleNameList = list(set(cols))
self.saleNameList.remove("所属销售")
self.saleNameList.remove("")
temp_sheet = list()
for row_num in range(2,nrows-1):
rows = sheet2.row_values(row_num) # 获取第四行内容
temp_sheet.append(rows)
# cols = sheet2.col_values(1) # 获取第二列内容
self.temp_book.append({"sheet_name":sheet_name,
"second_line":sheet2.row_values(1),
"last_line": sheet2.row_values(nrows-1),
"data":temp_sheet})
def deal_with(self):
pass
def comprehensive_table(self):
new_file = dict()
for saleName in self.saleNameList:
pass
def detail_table(self):
pass
# 取出销售名为XXX的所有数据
def getBookBySaleName(self,saleName):
new_book = list()
self.row_col_dict[saleName] = {}
book = win32com.client.Dispatch('Excel.Application').Workbooks.Open(r'G:\python\PythonSmallModule\separationExcel\数据格式表.xlsx')
for sheet in self.temp_book:
sht = book.Worksheets(sheet["sheet_name"])
print(sht)
self.row_col_dict[saleName][sheet["sheet_name"]] = []
if sheet["sheet_name"] == "交易月综合表":
new_data = list()
temp = None
while True:
for index,row in enumerate( sheet["data"]):
if row[12] == saleName:
new_data.append(row)
else:
# 获得不符合此销售名字的行列号
self.row_col_dict[saleName][sheet["sheet_name"]].append(index + 3)
# 删除单行数据
# print("Rows",sht.Rows(i))
# time.sleep(0.1)
temp = index+3
if temp :
break
if temp is None:
break
sht.Rows(temp).Delete()
new_book.append(
{'sheet_name': sheet["sheet_name"], 'second_line': sheet["second_line"], "data": new_data})
#break
else:
new_data = list()
for index,row in enumerate( sheet["data"]):
if row[13] == saleName:
new_data.append(row)
#获得不符合此销售名字的行列号
else:
self.row_col_dict[saleName][sheet["sheet_name"]].append(index+3)
new_book.append(
{'sheet_name': sheet["sheet_name"], 'second_line': sheet["second_line"], "data": new_data})
book.SaveAs(r'G:\python\PythonSmallModule\separationExcel\{}.xlsx'.format(saleName))
return new_book
def deleteRow(self,sn,sheet,number):
pass
def main(self):
self.read()
# print(self.saleNameList)
for sn in self.saleNameList:
self.getBookBySaleName(sn)
# self.deleteRow(sn)
# print(self.row_col_dict)
# self.deal_with()
if __name__ == '__main__':
Transform().main() | true |
ff9f1b4931f14d4ec693a2985ba2f95ec2b87e7f | Python | JettChenT/realthink | /MidC/board/clock.py | UTF-8 | 4,736 | 2.609375 | 3 | [
"MIT"
] | permissive | import pygame,time,sys,datetime
SCREEN_WIDTH,SCREEN_HEIGHT=136,44
ETLED_WIDTH, ETLED_HEIGHT = 34,44
DOT_R = 3
STROKE_WIDTH,STROKE_HEIGHT = 16,12
DIGITAL_MASK=[0b00111111,0b00000110,0b01011011,0b01001111,0b01100110,0b01101101,0b01111101,0b01111111,\
0b00000111,0b01101111,0b01110111,0b01111100,0b00111001,0b01011110,0b10111111,0b10000110,\
0b11011011,0b11001111,0b11100110,0b11101101,0b11111101,0b11111111,0b10000111,0b11101111,0b11110111,0b11111100,\
0b10111001,0b11011110,0b11111001,0b11110001,0b11011110]
date = str(datetime.date.today()).split('-')
y = int(date[0])
month = int(date[1])
day = int(date[2])
class CLS_dgtled(object):
def __init__(self,x,y):
#init
self.x,self.y = x,y
self.w,self.h = ETLED_WIDTH,ETLED_HEIGHT
self.posList = [(6,2),(21,7),(21,24),(6,36),(2,24),(2,7),(6,19),(29,39)]
def draw(self,scr,mark):
#drawing methond
pygame.draw.rect(scr,(0,0,180),(self.x,self.y,self.w,self.h))
bit= 1
for i in range(8):
c = (0,0,240)
x0,y0 = self.x+self.posList[i][0],self.y+self.posList[i][1]
x1, y1 = x0+STROKE_WIDTH,y0+STROKE_HEIGHT
if mark & bit == bit:
c = (240,240,240)
bit *= 2
if i in (0,3,6):
pygame.draw.polygon(scr,c,[(x0,y0+2),(x0+2,y0),(x1-2,y0),(x1,y0+2),\
(x1,y0+3),(x1-2,y0+5),(x0+2,y0+5),(x0,y0+3)])
elif i==7:
pygame.draw.circle(scr,c,(x0,y0-5),DOT_R,0)
pygame.draw.circle(scr,c,(x0,y0-30),DOT_R,0)
else:
pygame.draw.polygon(scr,c,
[(x0+3,y0),(x0+5,y0+2),\
(x0+2,y1),(x0,y1-10),\
(x0,y0+2),(x0+2,y0)])
# Class clock
class Clock(object):
def __init__(self,clock,x,y):
self.clock = clock
self.dgt0 = CLS_dgtled(x,y)
self.dgt1 = CLS_dgtled(x+ETLED_WIDTH,y)
self.dgt2 = CLS_dgtled(x+ETLED_WIDTH*2,y)
self.dgt3 = CLS_dgtled(x+ETLED_WIDTH*3,y)
self.clock = clock
self.startsec = datetime.datetime.now().second
self.pFlag,self.num,self.switch,self.shine = 0,0,0,1
self.lastKT=self.startsec
def draw(self,scr):
pygame.display.update()
pFlag = self.pFlag
time = datetime.datetime.now()
y = time.year
m = time.month
d = time.day
h = time.hour
s = time.second
min = time.minute
if (s-self.startsec)%2==0:
self.shine=1
else:
self.shine=0
if self.switch == 0:
self.dgt0.draw(scr,DIGITAL_MASK[m//10%10]+pFlag)
self.dgt1.draw(scr,DIGITAL_MASK[m%10+self.shine*14]+pFlag)
self.dgt2.draw(scr,DIGITAL_MASK[d//10%10]+pFlag)
self.dgt3.draw(scr,DIGITAL_MASK[d%10]+pFlag)
if (s-self.lastKT)!=0 and (s-self.lastKT)%5==0:
self.switch=1
elif self.switch == 1:
self.dgt0.draw(scr,DIGITAL_MASK[h//10%10]+pFlag)
self.dgt1.draw(scr,DIGITAL_MASK[h%10+self.shine*14]+pFlag)
self.dgt2.draw(scr,DIGITAL_MASK[min//10%10]+pFlag)
self.dgt3.draw(scr,DIGITAL_MASK[min%10]+pFlag)
elif self.switch == 2:
self.dgt0.draw(scr,DIGITAL_MASK[y//1000%10]+pFlag)
self.dgt1.draw(scr,DIGITAL_MASK[y//100%10+self.shine*14]+pFlag)
self.dgt2.draw(scr,DIGITAL_MASK[y//10%10]+pFlag)
self.dgt3.draw(scr,DIGITAL_MASK[y%10]+pFlag)
if (s-self.lastKT)!=0 and (s-self.lastKT)%5==0:
self.switch=1
elif self.switch == 3:
self.dgt0.draw(scr,DIGITAL_MASK[0]+pFlag)
self.dgt1.draw(scr,DIGITAL_MASK[0+self.shine*14]+pFlag)
self.dgt2.draw(scr,DIGITAL_MASK[s//10%10]+pFlag)
self.dgt3.draw(scr,DIGITAL_MASK[s%10]+pFlag)
if (s-self.lastKT)!=0 and (s-self.lastKT)%5==0:
self.switch=1
clock.tick(60)
def event_key(self,event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_PERIOD:
pFlag = 128-pFlag
elif event.key == pygame.K_s:
self.switch= (self.switch+1)%4
self.lastKT = datetime.datetime.now().second
if event.type == pygame.QUIT:
pygame.quit()
sys.quit()
#main
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))
pygame.display.set_caption('DIGITAL LED')
clock = pygame.time.Clock()
clk = Clock(clock,0,0)
while True:
clk.draw(screen)
for event in pygame.event.get():
clk.event_key(event) | true |
8090147108307b6b51e2c238eb99c95733ebc444 | Python | RomekRJM/Playground | /assembly/6502/nes/mario_moving/mem_map.py | UTF-8 | 1,682 | 3.078125 | 3 | [] | no_license | import os
class VariableEnumerator():
def __init__(self):
self.counter = 0x10;
def enumerate_variables_in_file(self, file_name):
variables = []
with open(file_name, 'r+') as s:
while True:
line = s.readline()
if line.startswith('.define'):
_, name, addr = line.split()
adjusted_line = line.replace(addr, self.__hex_counter_in6502format())
s.seek(-len(line), 1)
s.write(adjusted_line)
variables.append((self.__hex_counter(), name))
self.counter += 1
elif not line:
break
return variables
def __hex_counter(self):
return hex(self.counter).split('x')[1]
def __hex_counter_in6502format(self):
return '${}'.format(self.__hex_counter().zfill(2))
def enumerate_all_variables(dir):
enumerator = VariableEnumerator()
variables = []
for root, dirs, files in os.walk(dir, topdown=False):
for file in files:
if file.endswith(".asm"):
variables += enumerator.enumerate_variables_in_file(os.path.join(root, file))
return variables
def save_to_map_file(file_name, variables):
cntr = 0
with open(file_name, 'w') as d:
for v in variables:
d.write('{} {}\n'.format(v[0], v[1]))
cntr += 1
while cntr < 24:
d.write('| |\n')
cntr += 1
if __name__ == '__main__':
num_to_display = 24
variables = enumerate_all_variables('.')
save_to_map_file('nesdemia.mem.txt', variables[-num_to_display:])
| true |
fb7c00a666c61bd71219a9286e1094e41eb10a00 | Python | ayy-em/psd-chatbot | /lots_of_strings.py | UTF-8 | 4,694 | 3.265625 | 3 | [] | no_license | import random
import string
# lots of text here, functions at the bottom
hi_msg = [
"hi",
"hello",
"yo",
"wassup",
"what's up",
"how you doin'?",
"bonjour",
"greetings",
"wassup yoo",
"good morning",
"hey",
"heya",
"sup",
"wazzup",
"yoo",
"yooo"
]
# reply to the start message, given by get_start_msg()
reply_start = "Hello there! I can help you do a lot of things.\n\n/weather - shows up-to-date weather in Amsterdam.\n/fact - get a random fun fact!\n/batavia - get Cafe Batavia menu.\nYou can also just talk to me, but i'm pretty stupid.\n\nMake sure to check out the channels i run: @vice_news (English), @adam24live (Russian), @ayy_maps (Russian)\n\nAnd definitely visit somethingreally.fun (permanent work in progress)! \nCheers."
# given by get_reply_string() if it's something a bot does not recognize
reply_unknown = [
"I have no idea what you mean.",
"What?",
"I don't get it",
"I must be stupid because I don't understand you",
"I'm sorry, what?",
"wut",
"You're confusing me.. What am I supposed to do?"
]
# a list of fun facts
reply_fun_fact = [
"Most elephants weigh less than a blue whale's tongue!",
"Pineapples used to be so expensive that people would rent them as a centrepiece for their party.",
"Scotland's national animal is a unicorn.",
"A single strand of spaghetti is called a spaghetto.",
"At birth, a baby panda is smaller than a mouse.",
"Violin bows are usually made from horse hair.",
"The colour red doesn't make bulls angry; they are colourblind.",
"It snows metal on planet Venus.",
"Bees tell their friends about good nearby flowers by dancing.",
"Kangaroos can't walk backwards.",
"In Switzerland, it's illegal to own just one guinea pig; if you have any, you have to have at least two. They get lonely!",
"Otters have skin pockets for their favorite rocks.",
"When a bee is chosen to be the new queen, they are given a special type of honey that completely changes their bodies. Kind of like how a Pokemon evolves.",
"Butterflies smell with their feet.",
"There are more stars than there are grains of sand on all the beaches in the world!",
"Cows can walk up stairs, but they can't walk down.",
"The surface of Mars is covered in rust, making the planet appear red.",
"Cows have best friends and get stressed when separated.",
"It takes a little over 8 minutes for the light from the Sun to get to earth.",
"Hippopotamus milk is pink.",
"Don't eat too many carrots or your skin will turn orange.",
"Humans are bioluminescent and glow in the dark, but the light that we emit is 1,000 times weaker than our human eyes are able to pick up.",
"Owls cannot be choked.",
"The filling in a Kit Kat is broken up Kit Kat's.",
"Giraffe tongues are black.",
"Dogs can tell when you're coming home by how much of your scent is left in the house if you have a daily routine.",
"Making pennies cost more than their actual value.",
"Lobsters were considered disgusting and low-class food, to the point that feeding them to prisoners too often was considered cruel and unusual punishment.",
"There are more ways to arrange a deck of cards than there are stars in our galaxy!",
"If you keep a goldfish in a dark room it will turn white.",
"There were wooly mammoths on the planet when the Pyramids were being built.",
"J.K. Rowling is richer than the Queen.",
"There is only one string in a tennis racquet.",
"A blue whale's heart is as big as a Volkswagen Beetle.",
"Oxford University is older than the Aztec empire."
]
# Process incoming message (from reply.py) and return the reply string
def get_reply_string(reply_to_check):
is_it_hi = check_contains_hi(reply_to_check)
if is_it_hi:
reply_string = get_hi()
else:
reply_string = random.choice(reply_unknown)
return reply_string
# when commanded, returns a string with a random fact
def get_reply_fact():
reply_msg_text = random.choice(reply_fun_fact)
return reply_msg_text
# gets this bot's start message
def get_start_msg():
reply_msg_start = str(reply_start)
return reply_msg_start
# check if the message contains a greeting, return T/F
def check_contains_hi(msg):
msg_check_str = msg.strip(string.punctuation)
msg_check_str_low = msg_check_str.lower()
if msg_check_str_low in hi_msg:
return True
else:
return False
# not really needed as an additional function...
def get_hi():
reply_msg_text_sm = random.choice(hi_msg)
reply_msg_text = reply_msg_text_sm.capitalize()
return reply_msg_text
| true |
7b35a4f75a561be663e57e196e1cd8fb7c7b51b5 | Python | EelcoHoogendoorn/Numpy_arraysetops_EP | /numpy_indexed/semantics.py | UTF-8 | 687 | 2.71875 | 3 | [
"MIT"
] | permissive | """
This toggle switches between preferred or backwards compatible semantics for dealing with key objects.
The behavior of numpy with respect to arguments to functions like np.unique is to flatten any input arrays.
Arguably, a more unified semantics is achieved by interpreting all key arguments as sequences of key objects,
whereby multi-dimensional arrays are simply interpreted as sequences of (complex) keys.
For reasons of backwards compatibility, one may prefer the same semantics as numpy 1.x though
"""
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
backwards_compatible = False
if backwards_compatible:
axis_default = None
else:
axis_default = 0
| true |
0fc850d94af0d046ec9c307de3d0a7827638f8eb | Python | nats-io/nats.py | /examples/jetstream.py | UTF-8 | 1,887 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | import asyncio
import nats
from nats.errors import TimeoutError
async def main():
nc = await nats.connect("localhost")
# Create JetStream context.
js = nc.jetstream()
# Persist messages on 'foo's subject.
await js.add_stream(name="sample-stream", subjects=["foo"])
for i in range(0, 10):
ack = await js.publish("foo", f"hello world: {i}".encode())
print(ack)
# Create pull based consumer on 'foo'.
psub = await js.pull_subscribe("foo", "psub")
# Fetch and ack messagess from consumer.
for i in range(0, 10):
msgs = await psub.fetch(1)
for msg in msgs:
print(msg)
# Create single ephemeral push based subscriber.
sub = await js.subscribe("foo")
msg = await sub.next_msg()
await msg.ack()
# Create single push based subscriber that is durable across restarts.
sub = await js.subscribe("foo", durable="myapp")
msg = await sub.next_msg()
await msg.ack()
# Create deliver group that will be have load balanced messages.
async def qsub_a(msg):
print("QSUB A:", msg)
await msg.ack()
async def qsub_b(msg):
print("QSUB B:", msg)
await msg.ack()
await js.subscribe("foo", "workers", cb=qsub_a)
await js.subscribe("foo", "workers", cb=qsub_b)
for i in range(0, 10):
ack = await js.publish("foo", f"hello world: {i}".encode())
print("\t", ack)
# Create ordered consumer with flow control and heartbeats
# that auto resumes on failures.
osub = await js.subscribe("foo", ordered_consumer=True)
data = bytearray()
while True:
try:
msg = await osub.next_msg()
data.extend(msg.data)
except TimeoutError:
break
print("All data in stream:", len(data))
await nc.close()
if __name__ == '__main__':
asyncio.run(main())
| true |
69440ed6d5eff5355ec91aa87af7d140d54cd4b0 | Python | nabeen/AtCoder | /abc/abc016/a.py | UTF-8 | 324 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://atcoder.jp/contests/abc016/tasks/abc016_1
def main() -> None:
M, D = map(int, input().split())
print(calc(M, D))
def calc(m: int, d: int) -> str:
if m % d == 0:
return "YES"
else:
return "NO"
if __name__ == '__main__':
main()
| true |
7175d070aca9bdcef0de736e6415a61b6005dad6 | Python | stoiver/anuga_core | /anuga/operators/mannings_operator.py | UTF-8 | 2,123 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive |
__author__="steve"
__date__ ="$11/11/2011 1:52:17 PM$"
from anuga.operators.base_operator import Operator
import numpy as num
class Mannings_operator(Operator):
"""
Class for setting up a mannings opeator to apply Mannings fricition
Applying
d/dt uh = -g nu^2 uh sqrt( uh^2 + vh^2 ) / h^{7/3}
d/dt vh = -g nu^2 vh sqrt( uh^2 + vh^2 ) / h^{7/3}
"""
def __init__(self, domain, verbose=False):
if verbose: log.critical('Mannings Operator: Beginning Initialisation')
Operator.__init__(self,domain)
self.gamma_c = num.zeros_like(self.stage_c)
self.height_c = num.zeros_like(self.stage_c)
self.friction_c = self.domain.quantities['friction'].centroid_values
self.g = self.domain.g
self.exp_gamma_max = 0.0
self.exp_gamma_min = 1.0
if verbose: log.critical('Mannings Operator: Initialisation Done')
def __call__(self):
timestep = self.domain.get_timestep()
self.height_c[:] = self.stage_c - self.elev_c
self.gamma_c[:] = -self.g * self.friction_c**2 * num.sqrt( self.xmom_c**2 + self.ymom_c**2 )
self.gamma_c[:] = num.where(self.height_c > 0.0, self.gamma_c/num.power(self.height_c,7.0/3.0),-100.0)
exp_gamma = num.exp(self.gamma_c*timestep)
self.exp_gamma_max = max(self.exp_gamma_max,num.max(exp_gamma))
self.exp_gamma_min = min(self.exp_gamma_min,num.min(exp_gamma))
#print "Mannings: ",exp_gamma_max,exp_gamma_min
self.xmom_c[:] = exp_gamma*self.xmom_c
self.ymom_c[:] = exp_gamma*self.ymom_c
def parallel_safe(self):
"""
This operator only works with centroid values of quantities
and so is parallel safe.
"""
return True
def statistics(self):
message = 'Manning Operator'
return message
def timestepping_statistics(self):
message = ' Manning Operator: Max and Min factors %f %f ' % (self.exp_gamma_max ,self.exp_gamma_min)
self.exp_gamma_max = 0.0
self.exp_gamma_min = 1.0
return message
| true |
9b78f4c2b9be157e5481d18c191b077283026f0d | Python | pangyouzhen/data-structure | /contest/findRelativeRanks.py | UTF-8 | 771 | 3.265625 | 3 | [] | no_license | from typing import List
import heapq
class Solution:
def findRelativeRanks(self, score: List[int]) -> List[str]:
negativate_score = [-i for i in score]
# heapq.heapify(negativate_score)
negativate_score.sort()
k = 1
for i in negativate_score:
ind: int = score.index(-i)
if k == 1:
score[ind] = "Gold Medal"
elif k == 2:
score[ind] = "Silver Medal"
elif k == 3:
score[ind] = "Bronze Medal"
else:
score[ind] = str(k)
k += 1
return score
if __name__ == '__main__':
nums = [5, 4, 3, 2, 1]
func = Solution().findRelativeRanks
print(func(nums))
| true |
33386156031f0b0709ca8c19bfce7c0ad95b9b2a | Python | Stingray2020/SphinxAdaptionScript | /lineWriter.py | UTF-8 | 1,701 | 3.109375 | 3 | [] | no_license |
# - add some transcription lines from file
#filename = input("what is the name of the file")
#if filename == "y":
filename = "arctic20.transcription"
# arctic_a0121
# need to find last line and take note of the number
# need to make sure you add the 0 before the output number
# use this number as the start pointer for the line to be transcribed
# alternativly Enter it manually
#line = input("enter current fileline")
file = open("transcript", "r") ##write functions
if (file == ''):
file = open("transcript", "w")
file.close()
file = open("transcript", "r") ##write functions
lines = [line.rstrip('\n') for line in file]
lines2 = []
for line in lines:
#line.rstrip(('.'))
line = line.replace('"', "")
line = line.replace(",", "")
line = line.replace(".", "")
line = line.replace("-", " ")
line = line.replace("—", " ")
line = line.replace("'", "")
line = line.replace(";", "")
lines2.append(line)
file.close()
print (lines2)
fileA = open(filename, "w")
fileA.write("this is out new text file \n")
fileA.write("and this is a new ewew line \n")
#<s> Text </s> (arctic_a0089)
p = 126
i = 126
newid =0
limit = p + len(lines2)
while p < limit:
text = "<s> "
ln = '{0:04}'.format(p)
newLine = lines2[newid]
transcriptline = f"{newLine}"
text += transcriptline
text += " </s>"
text += f" (arctic_a{ln}) \n"
fileA.write(text)
p += 1
newid+=1
fileA.close()
file2 = open("arctic20.fileids","w")##write functions
while i < limit:
ln2 = '{0:04}'.format(i)
text2 = f"arctic_a{ln2}\n"
file2.write(text2)
i += 1
file2.close()
#for i in range(10):
# nu = '{0:04}'.format(i)
# print(nu)
| true |
877addc2795a096c2b1108479476949fb3d20b6e | Python | ryhanahmedtamim/pattern_recognition | /lab1/pattern.py | UTF-8 | 1,484 | 3.78125 | 4 | [] | no_license | import math
training_data_list = []
number_of_training_data = int(input("Number of training data :"))
print()
print("Input the training data:")
print("Height(inc) - Weight(kg) - Class")
for i in range(number_of_training_data):
data = input()
height, weight ,class_name = data.split(' ')
data_dictionary = {'height' : height,
'weight' : weight,
'class' : class_name}
training_data_list.append(data_dictionary)
print()
print("Enter the query")
print("Height - Weight")
query_data = input()
query_data2 = query_data.split(" ")
test_height = int(query_data2[0])
test_weight = int(query_data2[1])
training_item = training_data_list[0]
training_item_height = int(training_item['height'])
training_item_weight = int(training_item['weight'])
training_item_class = training_item['class']
e_distance = math.sqrt((test_height-training_item_height)**2+
(test_weight-training_item_weight)**2)
output_class_name = training_item_class
for item in training_data_list:
training_item_height = int(item['height'])
training_item_weight = int(item['weight'])
training_item_class = item['class']
temp_e_distance = math.sqrt((test_height-training_item_height)**2+
(test_weight-training_item_weight)**2)
if temp_e_distance < e_distance:
e_distance = temp_e_distance
output_class_name = training_item_class
print("test data class is : ",output_class_name) | true |
711f20032de4894016647d8c89105551fda29d7a | Python | SebaArn/Ausbildung | /Effizienz Evaluierer/Effizienz_Evaluierer.py | UTF-8 | 9,981 | 2.765625 | 3 | [] | no_license | # import sys
import numpy as np # used to handle numbers, data structures and mathematical functions
import datetime # Used to convert our ascii dates into unix-seconds
import argparse # used to interpret parameters
import math
import sys
# import re
import os
def translate_date_to_sec(ymdhms):
"""
:param ymdhms: the year-month-day-hour-minute-second data (datetime.datetime) to be translated into unix-seconds.
:return: the amount of seconds passed since the first of january 1970 00:00 UTC, if invalid: "-1".
"""
x_ = str(ymdhms, 'utf-8')
if x_ == 'Unknown':
return -1
else:
temp_time = datetime.datetime.strptime(str(ymdhms, 'utf-8'), "%Y-%m-%d-%H-%M-%S") # convert into datetime
return temp_time.timestamp() # then convert into unix-seconds (timestamp)
def essential_par(parameters):
if (len(parameters)) < 1:
sys.stderr.write("needs at least one file (input)")
sys.exit()
sources_key, sources_nok, opt_para = [], [], []
while parameters:
if "-src=" == parameters[0][:5]:
sources_key.append(parameters[0][5:])
parameters = parameters[1:]
else:
if "-" in parameters[0][0:2]:
opt_para.append(parameters[0])
parameters = parameters[1:]
else:
sources_nok.append(parameters[0])
parameters = parameters[1:]
if not (sources_key or sources_nok):
sys.stderr.write("needs input file")
sys.exit()
sources = sources_key+sources_nok
return [sources, opt_para]
def translate_time_to_sec(time):
flag_days = False
if '-' in time:
flag_days = True
time = time.split('.')[0]
if len(time) < 2:
return 0
sub_splits = time.split('-')
seconds = 0
if flag_days:
seconds += 24 * 3600 * int(''.join(c for c in sub_splits[0] if c.isdigit()))
time_split_seconds = sub_splits[-1].split(':')
for i_ in range(len(time_split_seconds)):
seconds += int(''.join(c for c in (time_split_seconds[-(i_ + 1)]) if c.isdigit())) * int(math.pow(60, int(i_)))
return seconds
parser = argparse.ArgumentParser()
# Creating different parameters to allow the user to specify, what data is to be evaluated.
parser.add_argument("-src", nargs='*')
parser.add_argument('-p', dest='ProjectName', default="", type=str, nargs='?')
parser.add_argument('-l', dest='LowerLimit', default=360, type=int, nargs='?')
parser.add_argument('-s', dest='StartPoint', default="None", type=str, nargs='?')
parser.add_argument('--start', dest='StartPoint', default="None", type=str, nargs='?')
parser.add_argument('--project', dest='ProjectName', default="", type=str, nargs='?')
parser.add_argument('-max', dest='Maximum', default=0.3, type=float, nargs='?')
parser.add_argument('--Maximum', default=0.3, type=float, nargs='?')
parser.add_argument('-min', dest='Minimum', default=0.0, type=float, nargs='?')
parser.add_argument('--Minimum', default=0.0, type=float, nargs='?')
parser.add_argument('--separator', dest="Separator", default="\n", type=str, nargs='?')
parser.add_argument('-sep', dest="Separator", default="\n", type=str, nargs='?')
parser.add_argument('rest', type=str, nargs='*')
comma_sep = False
e_parameters = essential_par((sys.argv[1:]))
parameter = parser.parse_args()
LowerLimit = parameter.LowerLimit
if LowerLimit < 60:
LowerLimit = 60
# TODO: change csv semicolon separated file to default, make print case the special parameterized case instead.
if ";" in parameter.Separator:
comma_sep = True
if not parameter.Minimum:
mini = 0
else:
mini = parameter.Minimum
maxi = parameter.Maximum
project_name = parameter.ProjectName
start_point = parameter.StartPoint
if len(start_point) == 10: # appends hours, minutes and seconds if only date given
start_point += "-00-00-00"
if len(start_point) > 10:
start_datetime = datetime.datetime.strptime(start_point, "%Y-%m-%d-%H-%M-%S").timestamp()
else:
if not comma_sep:
print("no valid start_point given, using default")
originals = e_parameters[0]
originals = sorted(originals)
data_type = np.dtype(
[('JobID', '|S256'), ('Account', '|S256'), ('ReqCPUS', 'i4'), ('ReqMem', '|S256'), ('ReqNodes', 'i4'),
('AllocNodes', 'i4'), ('AllocCPUS', 'i4'), ('NNodes', 'i4'), ('NCPUS', 'i4'), ('CPUTimeRAW', 'uint64'),
('ElapsedRaw', 'uint64'), ('Start', '|S256'), ('End', '|S256'), ('TotalCPU', '|S256'), ('UserCPU', '|S256'),
('SystemCPU', '|S256')])
Data = np.loadtxt(originals[0], dtype=data_type, delimiter='|', skiprows=0,
usecols=(0, 1, 3, 4, 5, 6, 7, 8, 9, 12, 13, 26, 27, 14, 16, 15))
data_temp2 = []
counter = 0
if project_name:
filter_ = project_name
else:
filter_ = ""
for j in Data:
if 'Unknown' not in str(j['End']) and '.' not in str(j['JobID']) and filter_ in str(j['Account']):
data_temp2.append(j)
break
init = 0
for i in range(1, len(originals)):
Data_temp = np.loadtxt(originals[i], dtype=data_type, delimiter='|',
skiprows=0, usecols=(0, 1, 3, 4, 5, 6, 7, 8, 9, 12, 13, 26, 27, 14, 16, 15))
data_temp2 = []
for j in Data_temp:
if 'Unknown' not in str(j['End']) and '.' not in str(j['JobID']) and filter_ in str(j['Account']) and \
Data[i]["ElapsedRaw"] > LowerLimit:
data_temp2.append(j)
if data_temp2:
if len(Data) > 0:
if init:
Data = np.append(Data, data_temp2)
else:
Data = np.append(Data[0], data_temp2)
Data = Data[1:]
init = 1
else:
Data = data_temp2
Data_temp_2 = []
Data = Data[(Data[::]['End']).argsort()]
if len(Data) < 1:
sys.stderr.write("No data in file.")
sys.exit()
first_data = min(Data[::]['End'])
first_data = str(first_data)
first_data = first_data[2:-1]
if len(start_point) < 11:
start_point = first_data
start_datetime = datetime.datetime.strptime(start_point, "%Y-%m-%d-%H-%M-%S").timestamp()
if not comma_sep:
print("invalid start_datetime, using first occurrence.")
highest_data = max(Data[::]['End'])
highest_data = str(highest_data)
highest_data = highest_data[2:-1]
if highest_data < start_point:
sys.stderr.write('The start_point is after the latest date in the file')
sys.exit()
sp = 0
job_list = []
text_list = []
table_list = [";".join(["Job nr.", "Account (project name)", "efficiency (%)", "totalCPU (hours)",
'ElapsedRaw (Seconds)', "number of CPUs:", 'number of nodes:', "Memory", " Mn/Mc:",
"Corehours", "Parameter: efficiency >= " + str(mini*100) + "% and efficiency <= " +
str(maxi*100) + "%.", "Runtime >= " + str((LowerLimit//6)/10) + " minutes"])]
text_list.append("Parameter: efficiency >= " + str(mini * 100) + "% and efficiency >= " + str(maxi * 100) + "%.")
text_list.append("Runtime >= " + str((LowerLimit / 6) // 10) + " minutes")
for i in range(len(Data)):
if translate_date_to_sec(Data[i]['End']) > 0 and '.' not in str(Data[i]['JobID']) and filter_ in \
str(Data[i]['Account'])and str(Data[i]['End'])[2:-1] >= start_point and Data[i]["ElapsedRaw"] > LowerLimit:
if Data[i]["ElapsedRaw"] < 1:
continue
formated = Data[i]['TotalCPU']
formated = str(formated)[2:]
efficiency = translate_time_to_sec(formated) / (Data[i]['AllocCPUS']*Data[i]["ElapsedRaw"])
if mini <= efficiency <= maxi:
id_ = str(Data[i]['JobID'])
id_ = id_[2:-1:]
job_list.append(id_)
acc = str(Data[i]['Account'])
acc = acc[2:-1:]
seq_of_data = [str(id_), str(acc), str(int(abs(efficiency * 100000)) / 1000),
str((translate_time_to_sec(formated) // 36) / 100), str((Data[i]['ElapsedRaw'] // 36) / 100),
str(Data[i]['NCPUS']), str(Data[i]['NNodes']), (str(Data[i]['ReqMem'])[2:-3]),
(str(Data[i]['ReqMem'])[-3:-1]), str((int(Data[i]['CPUTimeRAW'] // 36) / 100))]
excl = ";".join(seq_of_data)
s = "Job nr. " + str(id_).ljust(8) + " (account = " + acc + ") has the efficiency " + \
str(int(abs(efficiency*1000))/10).ljust(5) + "% with a totalCPU of " \
+ str((translate_time_to_sec(formated)//360)/10).ljust(8) + " hours. Runtime in hours:" + \
str((Data[i]['ElapsedRaw']//360)/10).ljust(6) + " number of CPUs:" + str(Data[i]['NCPUS']).ljust(4) + \
' number of nodes:' + str(Data[i]['NNodes']).ljust(4)
if "Mc" in str(Data[i]['ReqMem'])[2:-1]:
s += ". Memory (Mc):"+(str(Data[i]['ReqMem'])[2:-1]).ljust(9)
else:
s += ". Memory (Mn):"+(str(Data[i]['ReqMem'])[2:-1]).ljust(9)
s += ". Corehours:"+str(int(Data[i]['CPUTimeRAW']//360)/10)
text_list.append(s)
table_list.append(excl)
if len(text_list) == 0:
sys.stderr.write("No data fitting the project_name within the specified borders within the specified time")
sys.exit()
if comma_sep:
pathname = os.path.dirname(originals[0])
if len(originals) < 2:
path_file = os.path.join(pathname, os.path.basename(originals[0].split('.')[0]) +
"_mi" + str(mini).replace(".", "_") + "_ma" + str(maxi).replace('.', '_') + ".csv")
else:
path_file = os.path.join(pathname, os.path.basename(originals[0].split('.')[0]) + '_bis_' +
os.path.basename(originals[-1].split('_')[1].split('.')[0] + "_mi" +
str(mini).replace(".", "_") + "_ma"+str(maxi).replace('.', '_') + ".csv"))
with open(path_file, 'w')as a:
a.write("\n".join(table_list))
print('wrote in: '+path_file)
else:
print("\n".join(text_list))
| true |
24a1939c8884d0282cb38f78e837ff9139a16ee3 | Python | mmikol/Simulating-Low-Density-Parity-Check-Codes | /code/decoder.py | UTF-8 | 2,591 | 2.796875 | 3 | [] | no_license | from tanner_graph import TannerGraph
# Decodes a corrupted codeword using belief propagation techniques
class Decoder:
def __init__(self, parity_matrix, decoding_method, max_iterations):
self.TG = TannerGraph(parity_matrix)
self.MAX_ITERATIONS = max_iterations
self.decoding_method = decoding_method
# Method for a Binary Erasure Channel
def bec_decode(self, Y):
# Initialization
I = 0
M = Y.copy()
E = [{} for c in self.TG.check_nodes]
while True:
# Check Messages
for check in self.TG.check_nodes:
for i in range(len(check.neighbors)):
bit = check.neighbors.pop(i)
if check.all_msgs_known(M):
E[check.index][bit] = check.sum_bits(M)
else:
E[check.index][bit] = -1
check.neighbors.insert(i, bit)
# Bit Messages
for bit in self.TG.bit_nodes:
if M[bit.index] == -1:
for check in bit.neighbors:
if E[check][bit.index] != -1:
M[bit.index] = E[check][bit.index]
# Test
if (not -1 in M) or (I == self.MAX_ITERATIONS):
return (M, I, f'QUIT: {I == self.MAX_ITERATIONS}')
else:
I += 1
# Method for a Binary Symmetric Channel
def bsc_decode(self, Y):
# Initialization
I = 0
M = Y.copy()
E = [{} for c in self.TG.check_nodes]
while True:
# Check Messages
for check in self.TG.check_nodes:
for i in range(len(check.neighbors)):
bit = check.neighbors.pop(i)
E[check.index][bit] = check.sum_bits(M)
check.neighbors.insert(i, bit)
# Bit Messages
for bit in self.TG.bit_nodes:
if bit.majority_checks_disagree(E, Y):
M[bit.index] = (M[bit.index] + 1) % 2
equations_satisfied = all(check.sum_bits(M) == 0 for check in self.TG.check_nodes)
if equations_satisfied or (I == self.MAX_ITERATIONS):
return (M, I, f'QUIT: {I == self.MAX_ITERATIONS}')
else:
I += 1
def run(self, arg):
return {
'erasure': self.bec_decode,
'flipping': self.bsc_decode,
}[self.decoding_method](arg) | true |
a3dc43e6cddfdc7d4318be30926e7b50c4cedb3f | Python | ahartloper/RESSPyLab | /RESSPyLab/uvc_constraints.py | UTF-8 | 8,031 | 3.125 | 3 | [
"MIT"
] | permissive | """@package vcu_constraints
Constraints for the updated Voce-Chaboche model to maintain a positive tangent modulus and positive parameters.
"""
import numpy as np
from numdifftools import nd_algopy as nda
def g_constraint(x, ep):
""" Returns the constraint that the tangent modulus is hardening (g function) in standard form.
:param np.array x: Updated Voce-Chaboche model parameters.
:param float ep: Plastic strain value.
:return float: Value of g.
"""
n_backstresses = int(len(x) - 6) / 2
g = x[4] * x[5] * np.exp(-x[5] * ep) - x[2] * x[3] * np.exp(-x[3] * ep)
for i in range(0, n_backstresses):
ck_ind = 6 + 2 * i
gk_ind = 7 + 2 * i
g += -x[ck_ind] * np.exp(-x[gk_ind] * ep)
return g
def g_gradient(x, ep):
""" Returns the gradient of the constraint function g.
:param np.array x: Updated Voce-Chaboche model parameters.
:param float ep: Plastic strain value.
:return np.array: (n, 1) Gradient of g, n = len(x).
"""
n_backstresses = int(len(x) - 6) / 2
grad = np.zeros((len(x), 1))
grad[2] = -x[3] * np.exp(-x[3] * ep)
grad[3] = (-x[2] + x[2] * x[3] * ep) * np.exp(-x[3] * ep)
grad[4] = x[5] * np.exp(-x[5] * ep)
grad[5] = (x[4] - x[4] * x[5] * ep) * np.exp(-x[5] * ep)
for i in range(0, n_backstresses):
ck_ind = 6 + 2 * i
gk_ind = 7 + 2 * i
grad[ck_ind] = -np.exp(-x[gk_ind] * ep)
grad[gk_ind] = x[ck_ind] * ep * np.exp(-x[gk_ind] * ep)
return grad
def g_hessian(x, ep):
""" Returns the Hessian matrix of the constraint g.
:param np.array x: Updated Voce-Chaboche model parameters.
:param float ep: Plastic strain value.
:return np.array: (n, n) Hessian of g, n = len(x).
"""
n_backstresses = int(len(x) - 6) / 2
hess = np.zeros((len(x), len(x)))
# row 2
hess[2, 3] = (-1. + x[3] * ep) * np.exp(-x[3] * ep)
# row 3
hess[3, 2] = (-1. + x[3] * ep) * np.exp(-x[3] * ep)
hess[3, 3] = (2. * x[2] * ep - x[2] * x[3] * ep ** 2) * np.exp(-x[3] * ep)
# row 4
hess[4, 5] = (1. - x[5] * ep) * np.exp(-x[5] * ep)
# row 5
hess[5, 4] = (1. - x[5] * ep) * np.exp(-x[5] * ep)
hess[5, 5] = (-2. * x[4] * ep + x[4] * x[5] * ep ** 2) * np.exp(-x[5] * ep)
for i in range(0, n_backstresses):
ck_ind = 6 + 2 * i
gk_ind = 7 + 2 * i
# row 6 + 2k
hess[ck_ind, gk_ind] = ep * np.exp(-x[gk_ind] * ep)
# row 7 + 2k
hess[gk_ind, ck_ind] = ep * np.exp(-x[gk_ind] * ep)
hess[gk_ind, gk_ind] = x[ck_ind] * ep ** 2 * np.exp(-x[gk_ind] * ep)
return hess
def g1_constraint(x, constants, variables):
""" Constraint that the initial value of tangent modulus > 0 at ep=0.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
g2 = g_constraint(x, 0.)
return g2
def g1_gradient(x, constants, variables):
""" Gradient of constraint that the initial value of tangent modulus > 0 at ep=0.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
grad_g2 = g_gradient(x, 0.)
return grad_g2
def g1_hessian(x, constants, variables):
""" Hessian of constraint that the initial value of tangent modulus > 0 at ep=0.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
hess_g2 = g_hessian(x, 0.)
return hess_g2
def g2_constraint(x, constants, variables):
""" Constraint for a positive derivative of tangent modulus at ep=0.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
n_backstresses = int((len(x) - 6) / 2)
g = x[3] ** 2 * x[2] - x[5] ** 2 * x[4]
for i in range(0, n_backstresses):
gk_ind = 7 + 2 * i
ck_ind = 6 + 2 * i
g += x[ck_ind] * x[gk_ind]
return g
def g2_gradient(x, constants, variables):
""" Gradient of constraint for a positive derivative of tangent modulus at ep=0.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
n_backstresses = int(len(x) - 6) / 2
grad = np.zeros((len(x), 1))
grad[2] = x[3] ** 2
grad[3] = 2. * x[2] * x[3]
grad[4] = -x[5] ** 2
grad[5] = -2. * x[4] * x[5]
for i in range(0, n_backstresses):
ck_ind = 6 + 2 * i
gk_ind = 7 + 2 * i
grad[ck_ind] = x[gk_ind]
grad[gk_ind] = x[ck_ind]
return grad
def g2_hessian(x, constants, variables):
""" Hessian of constraint for a positive derivative of tangent modulus at ep=0.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
n_backstresses = int(len(x) - 6) / 2
hess = np.zeros((len(x), len(x)))
# 2nd row
hess[2, 3] = 2. * x[3]
# 3rd row
hess[3, 2] = 2. * x[3]
hess[3, 3] = 2. * x[2]
# 4th row
hess[4, 5] = -2. * x[5]
# 5th row
hess[5, 4] = -2. * x[5]
hess[5, 5] = -2. * x[4]
# cKth and gKth rows
for i in range(0, n_backstresses):
ck_ind = 6 + 2 * i
gk_ind = 7 + 2 * i
hess[ck_ind, gk_ind] = 1.
hess[gk_ind, ck_ind] = 1.
return hess
def positive_x_constraint(x, constants, variables):
""" Returns the constraints that specify the model parameters are positive.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
Notes:
- The constants dictionary must contain the entry 'min_x': min_val, where min_val >= 0 is the minimum value that
any x[i] should be able to take (e.g., min_val = 0 specifies that all x[i] should be positive).
"""
min_val = constants['min_x']
g = [-xi + min_val for xi in x]
return g
def positive_x_gradient(x, constants, variables):
""" Returns gradients of the positive x constraints.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
dg = []
for i in range(0, len(x)):
v = np.zeros((len(x), 1))
v[i] = -1.0
dg.append(v)
return dg
def positive_x_hessian(x, constants, variables):
""" Returns the Hessians of the positive x constraints.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
hg = []
for i in range(0, len(x)):
m = np.zeros((len(x), len(x)))
hg.append(m)
return hg
| true |
64deb56abe08b5480466a6083db4b6ba4b47c7a0 | Python | shannonay0103/Unit-4-Lesson-5 | /Lesson 5/problem3.py | UTF-8 | 173 | 3.265625 | 3 | [] | no_license | from turtle import*
toht = Turtle()
toht.color('blue')
toht.pensize(5)
toht.speed(9)
toht.shape('turtle')
for x in range(6):
toht.forward(50)
toht.left(60)
mainloop()
| true |
36e5f7278c129b5c79965d0add6db390f84aad92 | Python | minxuanjun/tensorflow | /tutorial_project/data_generate.py | UTF-8 | 532 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 16:49:55 2018
@author: minxuan
"""
import numpy as np
import matplotlib.pyplot as plt
SEED = 2
def generateData():
rng = np.random.RandomState(SEED)
X = rng.randn(300,2)
Y_ = [int(x0*x0 + x1*x1 <2) for (x0, x1) in X]
color = ['red' if y else 'blue' for y in Y_]
X = np.vstack(X).reshape(-1,2)
Y_ = np.vstack(Y_).reshape(-1,1)
return X,Y_,color
if __name__ =='__main__':
X,Y_,color = generateData()
plt.scatter(X[:,0],X[:,1],c=color)
| true |
7d14d45b7045d50997ad4868a1ea85b49c0ded57 | Python | wanwanaa/autoencoder | /AE_cnn.py | UTF-8 | 2,434 | 2.53125 | 3 | [] | no_license | import torch
import torchvision
import torch.nn as nn
import torch.utils.data as Data
import matplotlib.pyplot as plt
# hyper Parameters
EPOCH = 1
BATCH_SIZE = 32
LR = 0.001
DOWNLOAD_MNIST = False
train_data = torchvision.datasets.MNIST(
root='\mnist',
train=True,
transform=torchvision.transforms.ToTensor(),
download=DOWNLOAD_MNIST
)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)
test_data = torchvision.datasets.MNIST(root='/mnist', train=False)
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2]/255
class AE(nn.Module):
def __init__(self):
super(AE, self).__init__()
self.encoder1 = nn.Sequential( # (1, 28, 28)
nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2), # (16, 28, 28)
nn.ReLU(),
nn.MaxPool2d(2), # (16, 14, 14)
)
self.encoder2 = nn.Sequential(
nn.Conv2d(16, 32, 5, 1, 2), # (32, 14, 14)
nn.ReLU(),
nn.MaxPool2d(2), # (32, 7, 7)
)
self.decoder = nn.Sequential(
# nn.MaxUnpool2d(2, stride=2), # (32, 14, 14) # indices
# nn.ConvTranspose2d(in_channels=32, out_channels=16, kernel_size=5, stride=1, padding=2), # (16, 14, 14)
# nn.ReLU(),
# nn.MaxUnpool2d(2, stride=2), # (16, 28, 28)
# nn.ConvTranspose2d(16, 1, 5, 1, 2), # (1, 28, 28)
# nn.Sigmoid()
nn.Linear(32 * 7 * 7, 512),
nn.Linear(512, 28*28),
nn.Sigmoid()
)
def forward(self, x):
x = self.encoder1(x)
x = self.encoder2(x)
x = x.view(-1, 32*7*7)
x = self.decoder(x)
x = x.view(-1, 1, 28, 28)
return x
autoencoder = AE()
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR)
loss_func = nn.MSELoss()
if __name__ == '__main__':
fig = plt.figure()
for epoch in range(EPOCH):
for step, (b_x, _) in enumerate(train_loader):
output = autoencoder(b_x)
loss = loss_func(output, b_x)
optimizer.zero_grad()
loss.backward()
optimizer.step()
result = autoencoder(test_x[0].view(1, 1, 28, 28))
plt.subplot(121)
plt.imshow(test_x[0].view(28, 28))
plt.subplot(122)
plt.imshow(result.data.view(28, 28))
plt.show() | true |
af62072f3dc4dcca99f88346452220310fd7b068 | Python | AaronBitman/TextFormatter | /output_buffer.py | UTF-8 | 3,289 | 3.734375 | 4 | [] | no_license | from justifiable_str import JustifiableStr
class OutputBuffer:
"""Class to hold and write the output"""
def __init__(self, output_file_name):
"""Create the file object and initialize the buffer."""
self.file_object = open(output_file_name, 'w')
self.buffer = ""
def write(self, text = "", new_paragraph = False):
""" Add the 'text' parameter to the output buffer
and maybe write some of that output."""
def send_to_file():
""" Determine what part, if any, of the text should
be written and write it to the output file."""
OUTPUT_LINE_LENGTH = 79
# While there's enough text to fill a line...
while len(self.buffer) >= OUTPUT_LINE_LENGTH:
# If there's EXACTLY enough text...
if len(self.buffer) == OUTPUT_LINE_LENGTH:
# ... we can simply write it
# without bothering to justify it...
self.file_object.write(self.buffer + '\n')
# ... and clear the buffer.
self.buffer = ""
else: # It must be greater than the output line length.
# So find the last point where we can break.
break_point = self.buffer.rfind(
" ", 0, OUTPUT_LINE_LENGTH + 1)
# The above line looked for a space.
# But a hyphen would do as well.
break_point_2 = self.buffer.rfind(
"-", 0, OUTPUT_LINE_LENGTH + 1) + 1
if break_point_2 > break_point:
break_point = break_point_2
# Justify the portion of the output
# we can handle and write it.
self.file_object.write(JustifiableStr(self.buffer[
:break_point]).full_justify(OUTPUT_LINE_LENGTH) + '\n')
# Now keep only the portion of
# the output we didn't write yet.
if break_point == break_point_2:
self.buffer = self.buffer[break_point:]
else:
self.buffer = self.buffer[break_point + 1:]
INDENT_LENGTH = 2
# Trim off the final '\n'.
text = str.rstrip(text)
# A blank line indicates the end of a paragraph...
if text == "":
# ... so if there's text to write...
if self.buffer != "":
# ...then we can just write it without justifying it.
self.file_object.write(self.buffer + "\n")
self.buffer = ""
else:
if new_paragraph:
# ... then indent.
self.buffer = " " * INDENT_LENGTH
# At the end of a line, add a space to separate
# it from the first word of the next line.
elif self.buffer != "":
self.buffer += " "
# Now we can add the new text.
self.buffer += text
send_to_file()
def close(self):
"""Close the file."""
self.file_object.close()
| true |
9d067bceb01b03d4ff17afe685058ae5ba088370 | Python | miguel1996/Ambilight | /PC/ColorCollector.py | UTF-8 | 1,527 | 2.96875 | 3 | [] | no_license | from PIL import ImageGrab
class Modes:
screen = 0
keyboard = 1
static = 2
class ColorCollector:
_sampleRes = None
_color = (255, 0, 0)
_counter = 0
_factor = 20
_width = 1920
_height = 1080
def __init__(self, sample_width, sample_height):
self.set_sample(sample_width, sample_height)
@classmethod
def set_sample(cls, width, height):
if not cls._sampleRes:
cls._width = width
cls._height = height
cls._sampleRes = (int(width * 0.4), int(height * 0.4), int(width * 0.8), int(height * 0.8))
@classmethod
def update_color(cls, mode=0):
if mode == Modes.screen:
cls._color = cls.__screen_color()
elif mode == Modes.keyboard:
cls._color = cls.__keyboard_color()
elif mode == Modes.static:
cls._color = cls.__static_color()
else:
cls._color = cls.__error_color()
@classmethod
def get_color(cls):
return cls._color
@classmethod
def __screen_color(cls):
image = ImageGrab.grab(bbox=cls._sampleRes).resize((cls._factor, int(cls._factor / (cls._width/cls._height))))
color = image.quantize(1).convert("RGB").getpixel((0, 0))
return color
@staticmethod
def __keyboard_color():
raise NotImplementedError
@staticmethod
def __static_color():
raise NotImplementedError
@staticmethod
def __error_color(error_color=(255, 0, 0)):
return error_color
| true |
dbb92b37f7ecfca8386c142b87b83fe8613abb42 | Python | shuaib7860/GraphHierarchy | /GraphHierarchy/GraphHierarchy.py | UTF-8 | 26,988 | 3 | 3 | [
"MIT"
] | permissive | from numpy import zeros, ones, ndarray, average
from networkx import adjacency_matrix, Graph
from scipy.sparse import diags, lil_matrix, spmatrix
from scipy.sparse.linalg import lsqr
def forward_hierarchical_levels(graph, weight=None):
"""Returns the forward hierarchical levels of the nodes of a network as an array.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward hierarchical levels : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes if graph object, otherwise indexed in the same the numpy/sparse array, holding the value of their forward hierarchical levels.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
k_in = A.sum(axis=1)
elif isinstance(graph, spmatrix):
A = graph.transpose()
k_in = A.sum(axis=1).A1
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
k_in = A.sum(axis=1).A1
D_in = diags(k_in, 0)
L_in = D_in - A
return lsqr(L_in, k_in)[0]
def backward_hierarchical_levels(graph, weight=None):
"""Returns the backward hierarchical levels of the nodes of a network as an array. This is the transpose of the original graph, so out-edges now become in-edges.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward hierarchical levels : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes if graph object, otherwise indexed in the same the numpy/sparse array, holding the value of their forward hierarchical levels.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
k_in = A.sum(axis=1)
elif isinstance(graph, spmatrix):
A = graph
k_in = A.sum(axis=1).A1
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
k_in = A.sum(axis=1).A1
D_in = diags(k_in, 0)
L_in = D_in - A
return lsqr(L_in, k_in)[0]
def hierarchical_levels(graph, weight=None):
"""Returns the hierarchical levels of the nodes of a network as an array which aids visualisation of the hierarchical structure in the network.
Parameters
----------
graph : Graph
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
hierarchical levels : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their hierarchical levels.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
return 0.5*(forward_hierarchical_levels(graph, weight=weight) - backward_hierarchical_levels(graph, weight=weight))
def sparse_forward_hierarchical_differences(graph, weight=None):
''' Just a copy of the forward hierarchical differences function that returns the sparse matrix, instead of the dense representation, in lil format'''
if isinstance(graph, (ndarray, spmatrix)):
A = graph.transpose()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
s = forward_hierarchical_levels(graph, weight=weight)
TD = lil_matrix(A.shape, dtype=float)
for i, j in zip(A.nonzero()[0], A.nonzero()[1]):
TD[i,j] = s[i] - s[j]
return TD
def forward_hierarchical_differences(graph, weight=None):
"""Returns the forward hierarchical differences over the edges of a network in the form of a weighted adjacency matrix
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward hierarchical differences : array
A NxN dimensional array representing a weighted adjacency matrix, with the edge weights corresponding to the forward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
TD = sparse_forward_hierarchical_differences(graph, weight=weight)
return TD.toarray()
def sparse_backward_hierarchical_differences(graph, weight=None):
''' Just a copy of the backward hierarchical differences function that returns the sparse matrix, instead of the dense representation, in lil format'''
if isinstance(graph, (ndarray, spmatrix)):
A = graph
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
s = backward_hierarchical_levels(graph, weight=weight)
TD = lil_matrix(A.shape, dtype=float)
for i, j in zip(A.nonzero()[0], A.nonzero()[1]):
TD[i,j] = s[i] - s[j]
return TD
def backward_hierarchical_differences(graph, weight=None):
"""Returns the backward hierarchical differences over the edges of a network in the form of a weighted adjacency matrix
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward hierarchical differences : array
A NxN dimensional array representing a weighted adjacency matrix, with the edge weights corresponding to the backward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
TD = sparse_backward_hierarchical_differences(graph, weight=weight)
return TD.toarray()
def forward_hierarchical_incoherence(graph, weight=None):
"""Returns the forward hierarchical differences over the edges of a network in the form of a weighted adjacency matrix,
mean of the distribution of differences and standard deviation of this distribution.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward hierarchical differences : sparse array
A NxN sparse dimensional sparse array representing a weighted adjancency matrix, with the edge weights corresponding to the forward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
mean hierarchical difference : float
The mean of the distribution of forward hierarchical differences.
forward hierarchical incoherence : float
The standard deviation of the distribution of forward hierarchical differences.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
TD = forward_hierarchical_differences(graph, weight=weight)
m = average(TD, weights=A)
m2 = average(TD**2, weights=A)
elif isinstance(graph, spmatrix):
A = graph.transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = (A.multiply(TD)).sum() / A.sum()
m2 = (A.multiply(TD.power(2))).sum() / A.sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = (A.multiply(TD)).sum() / A.sum()
m2 = (A.multiply(TD.power(2))).sum() / A.sum()
std = (m2 - m**2)**0.5
return TD, m, std
def backward_hierarchical_incoherence(graph, weight=None):
"""Returns the backward hierarchical differences over the edges of a network in the form of a weighted adjacency matrix,
mean of the distribution of differences and standard deviation of this distribution.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward hierarchical differences : sparse array
A NxN dimensional sparse array representing a weighted adjancency matrix, with the edge weights corresponding to the backward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
mean hierarchical difference : float
The mean of the distribution of backward hierarchical differences.
backward hierarchical incoherence : float
The standard deviation of the distribution of backward hierarchical differences.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
TD = backward_hierarchical_differences(graph, weight=weight)
m = average(TD, weights=A)
m2 = average(TD**2, weights=A)
elif isinstance(graph, spmatrix):
A = graph
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = (A.multiply(TD)).sum() / A.sum()
m2 = (A.multiply(TD.power(2))).sum() / A.sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = (A.multiply(TD)).sum() / A.sum()
m2 = (A.multiply(TD.power(2))).sum() / A.sum()
std = (m2 - m**2)**0.5
return TD, m, std
# Returns a measure of equitable controllability over the full graph/network
def forward_democracy_coefficient(graph, weight=None):
"""Returns the forward democracy coeffcient of a graph, a topological network metric.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward democracy coefficient : float
forward democracy coefficient of a graph
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
TD = forward_hierarchical_differences(graph, weight=weight)
m = average(TD, weights=A)
elif isinstance(graph, spmatrix):
A = graph.transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = (A.multiply(TD)).sum() / A.sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = (A.multiply(TD)).sum() / A.sum()
return 1 - m
# Returns a measure of equitable controllability over the full graph/network
def backward_democracy_coefficient(graph, weight=None):
"""Returns the backward democracy coeffcient of a graph, a topological network metric.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward democracy coefficient : float
backward democracy coefficient of a graph
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
TD = backward_hierarchical_differences(graph, weight=weight)
m = average(TD, weights=A)
elif isinstance(graph, spmatrix):
A = graph
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = (A.multiply(TD)).sum() / A.sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = (A.multiply(TD)).sum() / A.sum()
return 1 - m
def node_forward_influence_centrality(graph, node, weight=None):
"""Returns the forward influence centrality of the given node in the network.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
node : number
Label of the node as determined by the indexing of the graph.nodes() call or the index of the numpy/sparse array.
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward influence centrality : float
A node's forward influence centrality.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
index = node
TD = forward_hierarchical_differences(graph, weight=weight)
if A[index].sum() == 0:
m = 0
else:
m = average(TD[index], weights=A[index])
elif isinstance(graph, spmatrix):
A = graph.transpose()
index = node
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
if A[index].sum() == 0:
m = 0
else:
m = (A[index].multiply(TD[index])).sum() / A[index].sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
index = list(graph.nodes).index(node)
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
if A[index].sum() == 0:
m = 0
else:
m = (A[index].multiply(TD[index])).sum() / A[index].sum()
return 1 - m
def node_backward_influence_centrality(graph, node, weight=None):
"""Returns the backward influence centrality of the given node in the network.
Parameters
----------
graph : Graph array
A NetworkX graph or numpy/sparse array
node : number
Label of the node as determined by the indexing of the graph.nodes() call or the index of the numpy/sparse array.
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance, otherwise the default is None.
Returns
-------
backward influence centrality : float
A node's backward influence centrality.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
index = node
TD = backward_hierarchical_differences(graph, weight=weight)
if A[index].sum() == 0:
m = 0
else:
m = average(TD[index], weights=A[index])
elif isinstance(graph, spmatrix):
A = graph
index = node
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
if A[index].sum() == 0:
m = 0
else:
m = (A[index].multiply(TD[index])).sum() / A[index].sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
index = list(graph.nodes).index(node)
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
if A[index].sum() == 0:
m = 0
else:
m = (A[index].multiply(TD[index])).sum() / A[index].sum()
return 1 - m
def forward_influence_centrality(graph, weight=None):
"""Returns the forward influence centrality of the nodes in a network as an array.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance, otherwise the default is None.
Returns
-------
forward influence centrality : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their forward influence centralities.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
TD = forward_hierarchical_differences(graph, weight=weight)
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = average(TD[i], weights=A[i])
elif isinstance(graph, spmatrix):
A = graph.transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = (A[i].multiply(TD[i])).sum() / A[i].sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = (A[i].multiply(TD[i])).sum() / A[i].sum()
return ones((m.shape[0], 1)) - m
def backward_influence_centrality(graph, weight=None):
"""Returns the backward influence centrality of the nodes in a network as an array.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance, otherwise the default is None.
Returns
-------
backward influence centrality : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their backward influence centralities.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
TD = backward_hierarchical_differences(graph, weight=weight)
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = average(TD[i], weights=A[i])
elif isinstance(graph, spmatrix):
A = graph
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = (A[i].multiply(TD[i])).sum() / A[i].sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = (A[i].multiply(TD[i])).sum() / A[i].sum()
return ones((m.shape[0], 1)) - m
def forward_hierarchical_metrics(graph, weight=None):
''' This function returns all the foundational node, edge and graph metrics a forward hierarchical approach yields.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward hierarchical levels : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their forward hierarchical levels.
forward influence centrality : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their forward influence centralities.
forward hierarchical differences : sparse array
A NxN dimensional sparse array representing a weighted adjancency matrix, with the edge weights corresponding to the forward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
forward democracy coefficient : float
forward democracy coefficient of a graph
forward hierarchical incoherence : float
The standard deviation of the distribution of forward hierarchical differences.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358.
'''
s = forward_hierarchical_levels(graph, weight=weight)
ic = forward_influence_centrality(graph, weight=weight)
a = forward_hierarchical_incoherence(graph, weight=weight)
return s, ic, a[0], 1 - a[1], a[2]
def backward_hierarchical_metrics(graph, weight=None):
''' This function returns all the foundational node, edge and graph metrics a backward hierarchical approach yields.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward hierarchical levels : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their backward hierarchical levels.
backward influence centrality : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their backward influence centralities.
backward hierarchical differences : sparse array
A NxN dimensional sparse array representing a weighted adjancency matrix, with the edge weights corresponding to the backward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
backward democracy coefficient : float
backward democracy coefficient of a graph
backward hierarchical incoherence : float
The standard deviation of the distribution of backward hierarchical differences.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358.
'''
s = backward_hierarchical_levels(graph, weight=weight)
ic = backward_influence_centrality(graph, weight=weight)
a = backward_hierarchical_incoherence(graph, weight=weight)
return s, ic, a[0], 1 - a[1], a[2]
| true |
faf0f161102bd26dd3d8d1b6fc954cc38af07309 | Python | marcelomagri/py-scraper | /py-scraper/py-scraper/my_sql.py | UTF-8 | 544 | 2.6875 | 3 | [] | no_license | import sqlite3
import datetime
def openConn():
conn = sqlite3.connect('data/dados.db')
return conn
def closeConn(conn):
conn.close()
# Grava um item capturado no banco de dados
def WriteItem(conn, descricao, titulo, classificacao, country_code):
cursor = conn.cursor()
cursor.execute("INSERT INTO RAW_DATA (PACKAGE, DESCRIPTION, POSITION, ORIGIN, DATAHORA) VALUES ('%s', '%s', %s, '%s', '%s')" % (descricao, titulo, classificacao, country_code, str(datetime.datetime.utcnow())))
conn.commit()
# closeConn(db)
| true |
ee43c7688d414c53057ffca8832a3548ef6c80b6 | Python | stzvst/HWGeek6 | /1.py | UTF-8 | 655 | 3.703125 | 4 | [] | no_license | #Задание 1
from time import sleep
def in_sleep(time_out):
t = 0
while t < time_out:
t +=1
print(t)
sleep(1)
class Svetofor:
__color = ['RED', 'YELLOW', 'GREEN']
def running(self, times):
j = 0
while j < times:
i = 0
while i < 3:
print ('Светофор горит ->', Svetofor.__color[i])
if i == 0:
in_sleep(7)
elif i == 1 :
in_sleep(5)
elif i == 2:
in_sleep(3)
i += 1
j += 1
Svetofor = Svetofor()
Svetofor.running(1) | true |
69bf57a9059b5b6961e186f2c490b27f10535843 | Python | KedoKudo/ISAW | /PythonSources/Lib/spectrum2.py | UTF-8 | 2,002 | 2.71875 | 3 | [] | no_license | #--------------------------------------------------------
# function spectrum2
#--------------------------------------------------------
#! Obtain spectral correction from counts vs. time data
#! in a Bankxx_spectrum.asc file.
#! Fortran version: A. J. Schultz, July, 2009
#! Jython version: A. J. Schultz, March, 2010
# Also returns the relative sigma of the spectral correction.
# A. J. Schultz, April, 2011
# spectrum2 does not average over a +/- averageRange.
# This is because TOPAZ_spectrum now includes
# a Savitzky-Golay smoothing Filter.
# A. J. Schultz, September, 2010
# Parameters:
# wavelength = wavelength in Angstroms
# xtof = (L1 + detD)/hom; TOF = wl * xtof
# spect1 = spectrum at normalization wavlength, usually 1 Angstrom
# xtime = spectrum TOF array
# xcounts = spectrum counts array
from math import *
def spectrum2( wavelength, xtof, spect1, xtime, xcounts ):
"Returns the relative spectrum and detector efficiency correction."
# TOF = WL * XTOF in units of microseconds
TOF = wavelength * xtof
numTimeChannels = len( xtime )
spect = 0.0
# begin determining the spectrum correction
for j in range(numTimeChannels):
if xtime[j] > TOF:
deltaCounts = xcounts[j] - xcounts[j-1]
deltaTime = xtime[j] - xtime[j-1]
fraction = (TOF - xtime[j-1]) / deltaTime
spectx = xcounts[j-1] + deltaCounts*fraction # interpolate
break
if spect1 == 0.0:
spect = 0.0
relSigSpect = 0.0
return spect, relSigSpect
elif spectx <= 0.0:
spect = 0.0
relSigSpect = 0.0
return spect, relSigSpect
else:
spect = spectx / spect1
# relative sigma for spect
# relSigSpect**2 = (sqrt(spectx)/spectx)**2 + (sqrt(spect1)/spect1)**2
relSigSpect = sqrt((1.0/spectx) + (1.0/spect1))
return spect, relSigSpect
| true |
51e46f981c7a2ddd28ae1d47df4df18e6f804be9 | Python | LucianaRepetto/pythonbootcamp | /23functions_parametros.py | UTF-8 | 72 | 3.265625 | 3 | [] | no_license | def square(param):
return param**2
print(square(8))
print(square(6)) | true |
682563337990fc2bf81b702068ec5bf41c5f1dec | Python | udhayprakash/PythonMaterial | /python3/19_Concurrency_and_Parallel_Programming/01_MultiThreading/threading/Ch9.py | UTF-8 | 6,059 | 3.40625 | 3 | [] | no_license | import base64
import os
import random
import sys
from functools import reduce
# These examples are not in individual functions in the chapter, but
# to isolate them, they are separated into individual functions here
def lambda_sample():
# use lambda with filter
filter_me = [1, 2, 3, 4, 6, 7, 8, 11, 12, 14, 15, 19, 22]
# This will only return true for even numbers (because x%2 is 0, or False,
# for odd numbers)
result = filter(lambda x: x % 2 == 0, filter_me)
print(result)
def lambda_named_sample():
# use lambda with filter, but bind it to a name
filter_me = [1, 2, 3, 4, 6, 7, 8, 11, 12, 14, 15, 19, 22]
# This will only return true for even numbers (because x%2 is 0, or False,
# for odd numbers)
func = lambda x: x % 2 == 0
result = filter(func, filter_me)
print(result)
def reduce_sample():
# Use reduce with a lambda function to make small numbers into a
# very big number
reduce_me = [2, 4, 4, 2, 6]
result = reduce(lambda first, second: first**second, reduce_me)
print("The result of reduce is: %d" % result)
def map_sample():
# Now map gets to be run in the simple case
map_me = ["a", "b", "c", "d", "e", "f", "g"]
result = map(lambda x: "The letter is %s" % x, map_me)
print(result)
def list_comprehension_sample():
# First, just print even numbers
everything = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
print([x for x in everything if x % 2 == 0])
def range_sample():
# First an example of a range between two numbers
# range works differently with only one parameter
for number in range(10):
print("Number is now %d" % number)
# Using a stride
for number in range(5, 55, 4):
print("Number from 5 to 55, by fours: %d" % number)
# range provides a special case useful for large sets.
# In this case, it's overkill, but you can see it works the same way.
for r in range(0, 10):
print(r)
def string_substitution_sample():
person = {
"name": "James",
"camera": "nikon",
"handedness": "lefty",
"baseball_team": "angels",
"instrument": "guitar",
}
print("%(name)s, %(camera)s, %(baseball_team)s" % person)
person["height"] = 1.6
person["weight"] = 80
print(
"%(name)s, %(camera)s, %(baseball_team)s, %(height)2.2f, %(weight)2.2f" % person
)
# An alternative way to perform these substitutions
import string
person = {
"name": "James",
"camera": "nikon",
"handedness": "lefty",
"baseball_team": "angels",
"instrument": "guitar",
}
person["height"] = 1.6
person["weight"] = 80
t = string.Template("$name is $height m high and $weight kilos")
print(t.substitute(person))
def getopt_sample():
import getopt
import sys
# Remember, the first thing in the sys.argv list is the name of the command
# You don't need that.
cmdline_params = sys.argv[1:]
opts, args = getopt.getopt(cmdline_params, "hc:", ["help", "config="])
print(opts, args)
for option, parameter in opts:
if option == "-h" or option == "--help":
print("This program can be run with either -h or --help for this message,")
print(
"or with -c or --config=<file> to specify a different configuration file"
)
print()
if option in ("-c", "--config"): # this means the same as the above
print("Using configuration file %s" % parameter)
def gnu_getopt_sample():
import getopt
import sys
# Remember, the first thing in the sys.argv list is the name of the command
# You don't need that.
cmdline_params = sys.argv[1:]
opts, args = getopt.gnu_getopt(cmdline_params, "hc:", ["help", "config="])
print(opts, args)
for option, parameter in opts:
if option == "-h" or option == "--help":
print("This program can be run with either -h or --help for this message")
print(
"or with -c or --config=<file> to specify a different configuration file"
)
print()
if option in ("-c", "--config"): # this means the same as the above
print("Using configuration file %s" % parameter)
def fork_sample():
import os
pid = os.fork()
# fork and exec together
print("second test")
if pid == 0: # This is the child
print("this is the child")
print("I'm going to exec another program now")
os.execl("/bin/cat", "cat", "/etc/motd")
else:
print("the child is pid %d" % pid)
os.wait()
def determine_platform_sample():
import os
import sys
if sys.platform == "win32":
print("Running on a windows platform")
command = "C:\\winnt\\system32\\cmd.exe"
params = []
if sys.platform == "linux2":
print("Running on a Linux system, identified by %s" % sys.platform)
command = "/bin/uname"
params = ["uname", "-a"]
print("Running %s" % command)
os.spawnv(os.P_WAIT, command, params)
def os_system_sample():
# Now system
if sys.platform == "win32":
print("Running on a windows platform")
command = "cmd.exe"
if sys.platform == "linux2":
print("Running Linux")
command = "uname -a"
os.system(command)
def _gen_salt():
salt = [chr(random.randint(0, 255)) for i in range(4)]
return "".join(salt)
def make_pass(cleartext):
salt = _gen_salt()
text = salt + cleartext
hash = sha.new(text).digest()
data = salt + hash
return base64.encodestring(data)
def check_pass(cipher, cleartext):
cipher = base64.decodestring(cipher)
salt, hash = cipher[:4], cipher[4:]
hash2 = sha.new(salt + cleartext).digest()
return hash2 == hash
if __name__ == "__main__":
cipher = make_pass("TEST")
for word in "spam", "TEST", "Test", "omelette":
passwd = check_pass(cipher, word)
print("%s: %d" % (word, passwd))
| true |
d0beb1a105c3b1ba12b9117bb93012aa7c6067b0 | Python | Ravi-Seth/Python-Scripts | /OddEven.py | UTF-8 | 582 | 4.21875 | 4 | [] | no_license | # Make a large List
n=int(input("Enter How many numbers :"))
a=[]
for i in range(0,n):
print (i+1)
x=int(input("Enter number : " ))
a.append(x)
EvenList=[]
OddList =[]
for i in a:
if (i%2==0):
EvenList.append(i)
else:
OddList.append(i)
EvenList.sort()
OddList.sort()
print (" The Contents of Even List are :" )
print (EvenList)
print (" Greatest number in EvenList :")
print EvenList[-1]
print (" The Contents of Odd List are : :")
print (OddList)
print (" Greatest number in OddList :")
print OddList[-1]
| true |
c9a32cfe1c4f1ee68852c07843116a0851b21cbc | Python | nangnh/PhotoScan_Scripts | /PS_get_gcp_checkpoint_errors.py | UTF-8 | 3,655 | 2.578125 | 3 | [
"MIT"
] | permissive | # Script for finding the error at each GCP in a PhotoScan model.
# This script only runs inside Agisoft PhotoScan Pro.
# This script iterates through all enabled markers (with valid coordinates),
# turning off one at a time, optimizing the cameras, and checking the errors.
# The errors are written to a text file in the same directory as the .psx file.
import PhotoScan
# get the current file name minus the .psx part
fName = PhotoScan.app.document.path
fName = fName[:len(fName) - 4]
# open a text file to save the errors
outfile = open('{}{}'.format(fName, '_ERRORS.txt'), 'w')
outfile.write('PhotoScan file: {}\n\n'.format(fName))
print('\nrunning:\n PS_get_gcp_checkpoint_errors.py\n')
def getOptimizeParams(chunk):
# get enabled optimizeCamera parameters from chunk.meta
paramList = [
'f', 'cx', 'cy', 'b1', 'b2', 'k1', 'k2', 'k3', 'k4', 'p1', 'p2', 'p3',
'p4', 'shutter'
]
enabledList = chunk.meta['optimize/fit_flags']
enabledList2 = enabledList.split()
paramDict = {}
for param in paramList:
if param in enabledList2:
paramDict['fit_{}'.format(param)] = True
else:
paramDict['fit_{}'.format(param)] = False
return paramDict
for chunk in PhotoScan.app.document.chunks:
print('Processing chunk: {}'.format(chunk.label))
outfile.write('chunk = {}\n'.format(chunk.label))
# get the last optimizeCameras parameters used
optimizeParamsDict = getOptimizeParams(chunk)
errorList = []
errorList2 = []
markerList = []
for marker in chunk.markers:
if marker.reference.enabled and marker.projections.values():
print('processing marker:', marker.label)
markerList.append(marker.label)
# turn off the marker to find the error
marker.reference.enabled = False
# re-optimize the cameras without the marker
chunk.optimizeCameras(**optimizeParamsDict)
# measured marker locations in geocentric coordinates
source = chunk.crs.unproject(marker.reference.location)
# estimated coordinates in geocentric coordinates
estim = chunk.transform.matrix.mulp(marker.position)
local = chunk.crs.localframe(
chunk.transform.matrix.mulp(
marker.position)) # local LSE coordinates
error = local.mulv(estim - source) # error at the GCP marker
errorList.append(error.norm()) # list of errors
errorList2.append(error.norm()**2) # list of squared errors
# turn the marker back on
marker.reference.enabled = True
outfile.write(' marker {}: error = {}\n'.format(
marker.label, error.norm()))
outfile.flush()
elif marker.reference.enabled and not marker.projections.values():
print(
'Marker {} not processed, no projections'.format(marker.label))
suma = sum(errorList2)
n = len(errorList)
rmsError = (suma / n)**0.5
# write the RMS error
outfile.write('\n RMS = {}\n\n\n'.format(rmsError))
outfile.flush()
# re-optimize the cameras with all markers enabled
chunk.optimizeCameras(**optimizeParamsDict)
# write the errors to the console
print('\n')
print('\nErrors for chunk {}:'.format(chunk.label))
for ii, nm in enumerate(markerList):
print('marker {}: error = {}'.format(nm, errorList[ii]))
print(
'\n total rms error in chunk {} = {}\n'.format(chunk.label, rmsError))
outfile.close()
print('Errors written to file: {}{}'.format(fName, '_ERRORS.txt'))
| true |
62f137bdd0722fabc8776594717e480a87905098 | Python | billhu422/python_stdlib | /builtin_functions/builtin_functions.py | UTF-8 | 8,779 | 3.875 | 4 | [] | no_license | #encoding:utf-8
'''
Created on 2014年7月22日
@author: Administrator
'''
#encoding:utf-8
'''
Created on 2014年7月21日
@author: Administrator
'''
'''
abs绝对值
abs(x)
Return the absolute value of a number. The argument may be a plain or long integer or a floating point number. If the
argument is a complex number, its magnitude is returned.
'''
# print abs(-12)
'''
any(iterable) 如果有一个是真 返回真 否则是假
Return True if any element of the iterable is true. If the iterable is empty, return False. Equivalent to:
def any(iterable):
for element in iterable:
if element:
return True
return False
'''
# a=(False,False,False,True)
# print any(a)
'''
basestring() 不能被实例化,str和unicode的父类,用来测试是否是str或unicode的实例
This abstract type is the superclass for str and unicode. It cannot be called or instantiated, but it can be used to
test whether an object is an instance of str or unicode. isinstance(obj, basestring) is equivalent to
isinstance(obj, (str, unicode)).
'''
# basestring()#报错,不能实例化
# print isinstance('song', basestring)
# print isinstance(u'song', basestring)
'''
callable(object)
Return True if the object argument appears callable, False if not. If this returns true, it is still possible that a
call fails, but if it is false, calling object will never succeed. Note that classes are callable (calling a class
returns a new instance); class instances are callable if they have a __call__() method.
'''
# class a:
# print 'bbb'
# print callable(a)
'''
chr(i) int=>ascII
Return a string of one character whose ASCII code is the integer i. For example, chr(97) returns the string 'a'.
This is the inverse of ord(). The argument must be in the range [0..255], inclusive; ValueError will be raised if i is
outside that range. See also unichr().
'''
# print chr(126)
'''
unichr(i) 类似chr 范围更大
Return the Unicode string of one character whose Unicode code is the integer i. For example, unichr(97) returns the
string u'a'. This is the inverse of ord() for Unicode strings. The valid range for the argument depends how Python
was configured – it may be either UCS2 [0..0xFFFF] or UCS4 [0..0x10FFFF]. ValueError is raised otherwise. For ASCII
and 8-bit strings see chr().
'''
# print unichr(97)
'''
ord(c) unicode字符=>unicode 8bitstring=>value
Given a string of length one, return an integer representing the Unicode code point of the character when the argument
is a unicode object, or the value of the byte when the argument is an 8-bit string. For example, ord('a') returns the
integer 97, ord(u'\u2020') returns 8224. This is the inverse of chr() for 8-bit strings and of unichr() for unicode
objects. If a unicode argument is given and Python was built with UCS2 Unicode, then the character’s code point must be
in the range [0..65535] inclusive; otherwise the string length is two, and a TypeError will be raised.
'''
# print ord(u'\u2020')
'''
pow(x, y[, z]) x**y. pow(x, y) % z
Return x to the power y; if z is present, return x to the power y, modulo z (computed more efficiently than
pow(x, y) % z). The two-argument form pow(x, y) is equivalent to using the power operator: x**y.
'''
# print pow(2,3)
# print pow(2,3,3)
'''
??????????
property([fget[, fset[, fdel[, doc]]]])
如果要使用property函数,首先定义class的时候必须是object的子类。通过property的定义,当获取成员x的值时,就会调用getx函数,
当给成员x赋值时,就会调用setx函数,当删除x时,就会调用delx函数。使用属性的好处就是因为在调用函数,可以做一些检查。如果没有严
格的要求,直接使用实例属性可能更方便。
同时,还可以通过指定doc的值来为类成员定义docstring。
Return a property attribute for new-style classes (classes that derive from object).
'''
# class C(object):
# def __init__(self):
# self._x = None
#
# def getx(self):
# return self._x
# def setx(self, value):
# self._x = value
# def delx(self):
# del self._x
# # x = property(getx, setx, delx, "I'm the 'x' property.")
'''
range() return a list
'''
# print range(18)
'''
classmethod(function)
Return a class method for function.
A class method receives the class as implicit first argument, just like an instance method receives the instance. To declare a class method, use this idiom:
class C(object):
@classmethod
def f(cls, arg1, arg2, ...):
...
The @classmethod form is a function decorator – see the description of function definitions in Function definitions for details.
It can be called either on the class (such as C.f()) or on an instance (such as C().f()). The instance is ignored except for its class. If a class method is called for a derived class, the derived class object is passed as the implied first argument.
Class methods are different than C++ or Java static methods. If you want those, see staticmethod() in this section.
'''
# class a:
# @classmethod
# def a1(self):
# pass
'''
staticmethod(function)
Return a static method for function.
A static method does not receive an implicit first argument. To declare a static method, use this idiom:
class C(object):
@staticmethod
def f(arg1, arg2, ...):
...
The @staticmethod form is a function decorator – see the description of function definitions in Function definitions for details.
It can be called either on the class (such as C.f()) or on an instance (such as C().f()). The instance is ignored except for its class.
Static methods in Python are similar to those found in Java or C++. Also see classmethod() for a variant that is useful for creating alternate class constructors.
'''
# class a:
# @staticmethod
# def a1(self):
# pass
'''
cmp(x, y)
Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y.
'''
# print cmp(2,4)
'''
compile(source, filename, mode[, flags[, dont_inherit]]) ????
中文说明:将source编译为代码或者AST对象。代码对象能够通过exec语句来执行或者eval()进行求值。
参数source:字符串或者AST(Abstract Syntax Trees)对象。
参数 filename:代码文件名称,如果不是从文件读取代码则传递一些可辨认的值。
参数model:指定编译代码的种类。可以指定为 ‘exec’,’eval’,’single’。
参数flag和dont_inherit:这两个参数暂不介绍,可选参数。
版本:在python2.3、2.6、2.7、3.2中均有不同,使用时要引起注意,兼容python3
'''
##操作类属性
'''
delattr(object, name)
This is a relative of setattr(). The arguments are an object and a string. The string must be the name of one of the object’s attributes. The function deletes the named attribute, provided the object allows it. For example, delattr(x, 'foobar') is equivalent to del x.foobar.
'''
'''
setattr(object, name, value)
This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, 'foobar', 123) is equivalent to x.foobar = 123.
'''
'''
getattr(object, name[, default])
Return the value of the named attribute of object. name must be a string. If the string is the name of one of the object’s attributes, the result is the value of that attribute. For example, getattr(x, 'foobar') is equivalent to x.foobar. If the named attribute does not exist, default is returned if provided, otherwise AttributeError is raised
'''
'''
divmod(a, b)
Take two (non complex) numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division. With mixed operand types, the rules for binary arithmetic operators apply. For plain and long integers, the result is the same as (a // b, a % b). For floating point numbers the result is (q, a % b), where q is usually math.floor(a / b) but may be 1 less than that. In any case q * b + a % b is very close to a, if a % b is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b).
'''
# print divmod(3,2)#返回一个元组
| true |
9efc34a47abb0d540b141fe79f0f118c0c58fc9a | Python | JayanthMouli/fwi-NET | /regression/fwiNET.py | UTF-8 | 1,637 | 2.953125 | 3 | [
"MIT"
] | permissive | #fwiNET multilayer perceptron regressor
#created by Jayanth Mouli 2019
###########################################################################################################################################
import numpy as np
import pandas
from keras.layers import Dense, Activation
from keras.models import Sequential
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# Importing the dataset
dataframe = pandas.read_csv("fwi2015firesnormalized.csv").dropna().astype(np.float32)
dataset = dataframe.values
X = dataset[:,2:]
y = dataset[:,1]
# correlation = dataframe['FWI'].corr(dataframe['FIRE_SIZE'])
# print correlation
# Splitting the dataset into the Training set and Test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.008, random_state = 0)
# Initialising the ANN
model = Sequential()
# Adding the input layer and the first hidden layer
model.add(Dense(32, activation = 'relu', input_dim = 6))
# Adding the second hidden layer
model.add(Dense(units = 32, activation = 'relu'))
# Adding the third hidden layer
model.add(Dense(units = 32, activation = 'relu'))
# Adding the output layer
model.add(Dense(units = 1))
#model.add(Dense(1)) does the same thing
# Compiling the ANN
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the ANN to the Training set
model.fit(X_train, y_train, batch_size = 128, epochs = 500)
y_pred = model.predict(X_test)
plt.plot(y_test, color = 'red', label = 'Real data')
plt.plot(y_pred, color = 'blue', label = 'Predicted data')
plt.title('Prediction')
plt.legend()
plt.savefig('traintest1.png')
plt.show()
| true |
dee6a132081b9780a6c8def25f2706b71cdab04e | Python | Aasthaengg/IBMdataset | /Python_codes/p02262/s669334396.py | UTF-8 | 667 | 3.46875 | 3 | [] | no_license | def insertion_sort(A, N, gap):
cnt = 0
for i in range(gap, N):
tmp = A[i]
j = i - gap
while j>=0 and A[j] > tmp:
A[j+gap] = A[j]
j = j - gap
cnt = cnt + 1
A[j+gap] = tmp
return cnt
def shell_sort(A, N):
cnt = 0
gaps = [int((3**item-1)/2) for item in range(1, 100) if int((3**item-1)/2) <= N][::-1]
m = len(gaps)
for gap in gaps:
cnt = cnt + insertion_sort(A, N, gap)
print(m)
print(' '.join([str(gap) for gap in gaps]))
print(cnt)
for i in range(0, N):
print(A[i])
N = int(input())
A = [int(input()) for i in range(N)]
shell_sort(A, N) | true |
4865500160e1b3c27fa19fe29f83a735253fd135 | Python | vandycknick/resume | /infra/deploy.py | UTF-8 | 2,179 | 2.609375 | 3 | [
"MIT"
] | permissive | import os
import sys
import stat
import argparse
from pathlib import Path
from azure.storage.blob import BlobServiceClient, ContentSettings
def is_input_redirected() -> bool:
if os.isatty(sys.stdin.fileno()):
return False
else:
mode = os.fstat(0).st_mode
if stat.S_ISFIFO(mode):
return True
elif stat.S_ISREG(mode):
return True
else:
return False
content_types_map = {
".html": "text/html",
".pdf": "application/pdf"
}
def get_content_type_for_file(filename: str, fallback="text/plain") -> str:
extension = Path(filename).suffix
return content_types_map.get(extension, fallback)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog='az-uploader',
description='Upload static files to an azure blob storage account.')
parser.add_argument("--cwd", action="store", type=str)
parser.add_argument("--container", action="store", type=str, required=True)
parser.add_argument("--connection-string", action="store", type=str)
args = parser.parse_args(sys.argv[1:])
if is_input_redirected():
connection_string = next(sys.stdin).strip()
else:
connection_string = args.connection_string.strip()
cwd = Path(args.cwd or os.getcwd())
container = args.container
if not connection_string:
raise Exception("connection string is required!")
if cwd.is_dir() is False:
raise Exception("cwd should point to an existing directory!")
files = [file for file in cwd.iterdir() if file.is_file()]
blob_service_client = BlobServiceClient.from_connection_string(connection_string)
blob_container = blob_service_client.get_container_client(container)
for file in files:
relative = os.path.relpath(file, cwd)
print("Uploading file: {0}".format(file))
content_type = get_content_type_for_file(relative)
content_settings = ContentSettings(content_type=content_type)
blob_container.upload_blob(
relative,
file.read_bytes(),
overwrite=True,
content_settings=content_settings,
)
| true |
bda4370a1c11b63eb3c77396a8e7e691a1820837 | Python | wylkson/TCC-Ci-ncia-de-dados-e-Big-Data | /TCC.py | UTF-8 | 2,920 | 2.828125 | 3 | [] | no_license | from surprise import Dataset, Reader, KNNBasic, KNNWithMeans, accuracy
from surprise.model_selection import train_test_split
from collections import defaultdict
import pandas as pd
import numpy as np
def carregar_filmes(path):
filmes = pd.read_csv(path)
return filmes
def carregar_avaliacoes(path):
avaliacoes = pd.read_csv(path)
return avaliacoes
#carregar um arquivo padrão Movielens definido na variavel PATH
def carregar_dataset(path):
reader = Reader (line_format = 'user item rating timestamp', sep = ',', rating_scale=(0.5,5), skip_lines=1)
data = Dataset.load_from_file(path, reader = reader)
return data
#Configura um modelo de recomendação, executa o treinamento e realiza a validação
def rodar_modelo(data, teste_tamanho, sim_opcoes, k):
treina, testa = train_test_split(data, teste_tamanho)
knn = KNNBasic(k=k, sim_options=sim_opcoes)
knn.fit(treina)
knn_predicoes = knn.test(testa)
accuracy.rmse(knn_predicoes)
return knn
#Configura um modelo de recomendação, executa o treinamento e realiza a validação
def rodar_modelo_sem_teste(data, sim_opcoes, k):
treina = data.build_full_trainset()
knn = KNNBasic(k=k, sim_options=sim_opcoes)
knn.fit(treina)
return knn
#Realiza uma previsão de avaliação para um usuario com base no modelo treinado
def prever_avaliacao(modelo, id_usuario, id_filme, mostrar_tela):
return modelo.predict(str(id_usuario), str(id_filme), verbose = mostrar_tela)
#Retorna os n vizinhos de um usuário com base no modele treinado
def encontrar_vizinhos(modelo, id_usuario, n):
return modelo.get_neighbors(id_usuario, n)
#Realiza a recomendação dos filmes não vistos que tem as maiores previsoes de avaliação com base no modelo treinado
def recomendar_filmes(modelo, data, id_usuario, n):
print('Montando base...')
data_treina_prever = data.build_full_trainset()
print('Encontrando filmes não avaliados...')
data_treina_prever = data_treina_prever.build_anti_testset()
lista_filmes_nao_assistidos = []
for id_usu, id_filme, estimativa_default in data_treina_prever:
if str(id_usu) == str(id_usuario):
lista_filmes_nao_assistidos.append(id_filme)
#print (sorted(lista_filmes))
#return 1
top_filmes = []
print('Avaliando filmes...')
for id_filme in lista_filmes_nao_assistidos:
top_filmes.append([id_filme, prever_avaliacao(modelo, id_usuario, id_filme, False).est])
top_filmes = sorted(top_filmes, key=lambda x: x[1], reverse=True)[:n]
print('Previsões realizadas. Recomendados '+str(n)+' filmes.')
filmes_recomendados = np.array(top_filmes)
return filmes_recomendados
#retorna um dataframe com os dados de uma lista de filmes
def encontrar_detalhes_filmes(lista_filmes, id_filmes):
detalhes_filmes = lista_filmes[lista_filmes['movieId'].isin(id_filmes)]
return detalhes_filmes
| true |
544829bb4ccda15c7e895f9b6b39a8791cae77e9 | Python | tackme/exercism | /python/isogram/isogram.py | UTF-8 | 147 | 3.140625 | 3 | [] | no_license | import re
def is_isogram(string):
converted_str = re.sub("[ -]", "", string.lower())
return len(converted_str) == len(set(converted_str))
| true |
591f583acb235cb52af111815ec6b9309e33598b | Python | songkai237/algorithm | /sort/merge_sort.py | UTF-8 | 840 | 3.953125 | 4 | [] | no_license | def mergeSort(alist):
print(alist)
if len(alist) > 1:
mid = len(alist) // 2
leftlist = alist[:mid]
rightlist = alist[mid:]
mergeSort(leftlist)
mergeSort(rightlist)
i, j, k = 0, 0, 0
while i < len(leftlist) and j < len(rightlist):
if leftlist[i] < rightlist[j]:
alist[k] = leftlist[i]
i = i + 1
k = k + 1
else:
alist[k] = rightlist[j]
j = j + 1
k = k + 1
while i < len(leftlist):
alist[k] = leftlist[i]
i = i + 1
k = k + 1
while j < len(rightlist):
alist[k] = rightlist[j]
j = j + 1
k = k + 1
print(alist)
list = [1, 5, 6, 2, 8, 9, 3, 7, 4]
mergeSort(list)
| true |
d645f8307ab101d884150e6a427ae247b779f389 | Python | orifake/leetcode-python | /src/Check If It Is a Straight Line.py | UTF-8 | 617 | 3.609375 | 4 | [] | no_license | from typing import List
class Solution:
def checkStraightLine(self, coordinates: List[List[int]]) -> bool:
if len(coordinates) <= 2:
return True
for i in range(2,len(coordinates)):
if not self.isLine(coordinates[0],coordinates[1],coordinates[i]):
return False
return True
def isLine(self, p0, p1, p2):
a = p1[1] - p0[1]
b = p1[0] - p0[0]
c = p2[1] - p0[1]
d = p2[0] - p0[0]
return True if (a * d == b * c) else False
t = Solution()
print(t.checkStraightLine([[1,2],[2,3],[3,4],[4,5],[5,6],[6,7]])) | true |
d602109f1440474cafb12f051f803fb2155d04af | Python | dasfaha/AdjointFashematians | /free-style/drunken_beer_reviewer.py | UTF-8 | 1,309 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python
""""
EXAMPLES
--------
Drunken Robo Reviewer 5000 Says...
Review: very light belgian yeasts and green bottled beers. taste either. but primarily grapefruit , and does n't smack taking a smooth experience
Drunken Robo Reviewer 5000 Says...
thick syrupy smells like a slightly buttery malt. i will continue to the smoke is wet on me , and easy to. tangerine highlights when it in a finger off with some grassy , and 1 % rating
Drunken Robo Reviewer 5000 Says...
Review: pours a bit of citrus and pirckly , with the mouth it up front. i can a super high , with a tad away. pours fresh air
"""
from nltk.tokenize import word_tokenize
from pymarkovchain import MarkovChain
def process(line):
line = line.replace('review/text:', '')
line = line.lower()
return word_tokenize(line)
f = open('beeradvocate.txt', 'r')
text_data = []
name_data = []
line_count = 0
print 'Please wait, Robo reviewing 5000 is consuming beer..'
for line in f.readlines():
if line.startswith('review/text'):
text_data.append(process(line))
line_count += 1
if line_count > 2000:
break
print 'Drunken Robo Reviewer 5000 Says...'
MarkovChain().generateDatabase(' '.join(sum(text_data, [])))
print 'Review:', '. '.join([MarkovChain().generateString() for i in range(3)])
| true |
44cb02d2777e4872a6e90d1d23d19b85a5d89417 | Python | vevanonarain/Automation | /automation.py | UTF-8 | 911 | 5 | 5 | [] | no_license | def add (x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
def modulo(x, y):
return x % y
print("Select Operation: \n 1) Addition \n 2) Subtraction \n 3) Multiplication \n 4) Division \n 5) Modulo")
choice = input("\nEnter choice 1/2/3/4/5: ")
number1 = int(input("\nEnter first number: "))
number2 = int(input("Enter second number: "))
if choice == "1":
print("\nSum is --> " + str(add(number1, number2)))
elif choice == "2":
print("\nDifference is --> " + str(subtract(number1, number2)))
elif choice == "3":
print("\nProduct is --> " + str(multiply(number1, number2)))
elif choice == "4":
print("\nQuotient is --> " + str(divide(number1, number2)))
elif choice == "5":
print("\nRemainder is --> " + str(modulo(number1, number2)))
else:
print("Invalid input") | true |
bd7a22e751a48eacbe73b4809eea469134ce7514 | Python | idsc-frazzoli/kartsim | /src/simulator/model/kinematic_mpc_model.py | UTF-8 | 5,373 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created 28.05.19 09:38
@author: mvb
"""
import numpy as np
from simulator.model.dynamic_model_input_converter import MotorFunction, BrakeFunction, SteeringFunction
class KinematicVehicleMPC:
def __init__(self, wheel_base=1.19, dist_cog_rear_axle= 0.46, direct_input=True):
self.name = "mpc_kinematic"
self.wheel_base = wheel_base
self.dist_cog_rear_axle = dist_cog_rear_axle
self.direct_input = direct_input
self.motor_function = MotorFunction().get_vectorized_motor_function()
self.brake_function = BrakeFunction().get_vectorized_brake_function()
self.steering_function = SteeringFunction().get_vecortized_steering_function()
def get_name(self):
return self.name
def get_direct_input_mode(self):
return self.direct_input
def get_state_changes(self, X, U):
wheel_base = 1.19 # length of vehicle
P = [X[1], X[2], X[3]]
V = [X[4], 0, X[6]]
# VELX, VELY, VELROTZ = V
BETA, AB, TV = U
c, s = np.cos(float(P[2])), np.sin(float(P[2]))
R = np.array(((c, -s), (s, c)))
Vabs = np.matmul(V[:2], R.transpose())
VELROTZ = V[0] / wheel_base * np.tan(BETA)
return [Vabs[0], Vabs[1], VELROTZ, AB, 0, 0]
def get_accelerations(self, velocities, system_inputs):
if not self.direct_input:
if isinstance(velocities, list):
velx, vely, velrotz = velocities
steering_angle, brake_position, motor_current_l, motor_current_r = system_inputs
else:
velx = velocities[:, 0]
# vely = velocities[:, 1]
velrotz = velocities[:, 2]
steering_angle = system_inputs[:, 0]
brake_position = system_inputs[:, 1]
motor_current_l = system_inputs[:, 2]
motor_current_r = system_inputs[:, 3]
# turning_rate = system_inputs[:, 4]
turning_angle, acceleration_rear_axle, torque_tv = self.transform_inputs(steering_angle,
brake_position,
motor_current_l,
motor_current_r,
velx)
else:
if isinstance(velocities, list):
velx, vely, velrotz = velocities
if isinstance(velx, np.float64) or isinstance(velx, float):
turning_angle, acceleration_rear_axle, torque_tv = system_inputs
acceleration_rear_axle = float(acceleration_rear_axle)
if abs(velx) < 0.25 and acceleration_rear_axle < 0:
acceleration_rear_axle *= velx * 4.0
else:
velx = velocities[:, 0]
# vely = velocities[:, 1]
velrotz = velocities[:, 2]
turning_angle = system_inputs[:, 0]
acceleration_rear_axle = system_inputs[:, 1]
torque_tv = system_inputs[:, 2]
# turning_rate = system_inputs[:, 3]
if isinstance(velocities, list):
if turning_angle != 0:
turn_circle_midpoint_steer = self.wheel_base / np.tan(turning_angle)
else:
turn_circle_midpoint_steer = 1000000
else:
turning_angle[turning_angle == 0] = 0.000001
turn_circle_midpoint_steer = np.divide(self.wheel_base, np.tan(turning_angle))
velrotz_target = velx/turn_circle_midpoint_steer
k = 2.2
dVELROTZ = k * (velrotz_target - velrotz)
if isinstance(acceleration_rear_axle, float):
return [acceleration_rear_axle, 0, dVELROTZ]
elif len(acceleration_rear_axle) == 1:
return [float(acceleration_rear_axle), 0, float(dVELROTZ)]
else:
return np.array([acceleration_rear_axle, np.zeros((len(acceleration_rear_axle))), dVELROTZ]).transpose()
def transform_inputs(self, steering_angle, brake_position, motor_current_l, motor_current_r, velx):
brake_acceleration_factor = 1
if isinstance(velx, np.float64) or isinstance(velx, float):
if abs(velx) < 0.25:
brake_acceleration_factor = abs(velx) * 4.0
# pass
else:
brake_acceleration_factor = np.add(np.multiply(np.array([abs(vx) < 0.05 for vx in velx]), -0.9), 1)
brake_acceleration = self.brake_function(brake_position)
acceleration_left_wheel = self.motor_function(velx, motor_current_l) - np.sign(
velx) * brake_acceleration * brake_acceleration_factor
acceleration_right_wheel = self.motor_function(velx, motor_current_r) - np.sign(
velx) * brake_acceleration * brake_acceleration_factor
acceleration_rear_axle = (acceleration_left_wheel + acceleration_right_wheel) / 2.0
torque_tv = (acceleration_right_wheel - acceleration_left_wheel) / 2.0
turning_angle = -0.63 * np.power(steering_angle, 3) + 0.94 * steering_angle
return turning_angle, acceleration_rear_axle, torque_tv
| true |
623de3d97f4de1174221480d55e78397c39b4cdc | Python | silky/bell-ppls | /env/lib/python2.7/site-packages/observations/r/uk_house_of_commons.py | UTF-8 | 2,505 | 2.65625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def uk_house_of_commons(path):
"""1992 United Kingdom electoral returns
Electoral returns, selected constituencies, 1992 general election for
the British House of Commons
A data frame with 521 observations on the following 12 variables.
`constituency`
a character vector, name of the House of Commons constituency
`county`
a character vector, county of the House of Commons constituency
`y1`
a numeric vector, log-odds of Conservative to LibDem vote share
`y2`
a numeric vector, log-odds of Labor to LibDem vote share
`y1lag`
a numeric vector, `y1` from previous election
`y2lag`
a numeric vector, `y2` from previous election
`coninc`
a numeric vector, 1 if the incumbent is a Conservative, 0 otherwise
`labinc`
a numeric vector, 1 if the incumbent is from the Labor Party, 0
otherwise
`libinc`
a numeric vector, 1 if the incumbent is from the LibDems, 0
otherwise
`v1`
a numeric vector, Conservative vote share (proportion of 3 party
vote)
`v2`
a numeric vector, Labor vote share (proportion of 3 party vote)
`v3`
a numeric vector, LibDem vote share (proportion of 3 party vote)
Jonathan Katz; Gary King. 1999. "Replication data for: A Statistical
Model of Multiparty Electoral Data",
http://hdl.handle.net/1902.1/QIGTWZYTLZ
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `uk_house_of_commons.csv`.
Returns:
Tuple of np.ndarray `x_train` with 521 rows and 12 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'uk_house_of_commons.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/pscl/UKHouseOfCommons.csv'
maybe_download_and_extract(path, url,
save_file_name='uk_house_of_commons.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| true |
4fd7132b4549043c78faf0342e00526743cf3b76 | Python | antooa/familyFinanceTracker | /iBudget/income/views.py | UTF-8 | 4,194 | 2.640625 | 3 | [] | no_license | """
This module provides functions for income specifying.
"""
import json
from django.http import HttpResponse, JsonResponse
from django.views.decorators.http import require_http_methods
from group.models import Group
from income_history.models import IncomeHistory
from utils.aws_helper import AwsService
from utils.universal_category_methods import total_value_for_category
from utils.validators import is_valid_data_new_income
from .models import IncomeCategories
@require_http_methods(["POST"])
def create_category(request):
"""Handling request for creating new spending category.
Args:
request (HttpRequest): Data for new category.
Returns:
HttpResponse object.
"""
user = request.user
data = json.loads(request.body)
name = data['name']
icon = data['icon']
date = data["date"]
value = int(data["value"])
owner = user
if not is_valid_data_new_income(data):
return HttpResponse("Bad request", status=400)
income = IncomeCategories.filter_by_owner_name(owner=owner, name=name)
if income:
return HttpResponse("Sorry, but such category exists...\n OK", status=202)
income = IncomeCategories(name=name, icon=icon, date=date, value=value, owner=owner)
if not income:
return HttpResponse(status=406)
income.save()
return HttpResponse("You've just created category '{}'. \n OK".format(name), status=201)
@require_http_methods(["GET"])
def show_income_ind(request):
"""
Handling request for creating list of users incomes.
Args:
request (HttpRequest): Limitation data.
Returns:
HttpResponse object.
"""
user = request.user
if user:
user_categories = []
for entry in IncomeCategories.filter_by_user(user):
user_categories.append({'id': entry.id, 'name': entry.name})
return JsonResponse(user_categories, status=200, safe=False)
return JsonResponse({}, status=400)
@require_http_methods(["GET"])
def show_income_group(request):
"""
Handling request for creating list of funds for specific group.
Args:
request (HttpRequest): Limitation data.
Returns:
HttpResponse object.
"""
user = request.user
users_funds = []
if user:
for group in Group.filter_groups_by_user_id(user):
for shared_fund in Group.filter_funds_by_group(group):
users_funds.append({'id_fund': shared_fund['id'],
'name_fund': shared_fund['name'],
'id_group': group.id,
'group_name': group.name
})
return JsonResponse(users_funds, status=200, safe=False)
return JsonResponse({}, status=400)
@require_http_methods(["DELETE"])
def delete_income(request, income_category_id):
"""Handling request for delete income.
Args:
request (HttpRequest): Data for delete income.
income_category_id: IncomeCategories Id
Returns:
HttpResponse object.
"""
user = request.user
if user:
income = IncomeCategories.get_by_id(income_category_id)
if not income:
return HttpResponse(status=406)
income.is_active = False
try:
income.save()
except(ValueError, AttributeError):
return HttpResponse(status=400)
return HttpResponse(f"You've just deleted income: {income.name}", status=200)
@require_http_methods(["POST"])
def income_summary(request):
"""
Handling request for getting summary info about income category.
Args:
request (HttpRequest) which consists income_id
Returns:
JsonResponse object with summary info
"""
income_id = json.loads(request.body)['income_id']
income = IncomeCategories.get_by_id(income_id)
income_info = {'icon': AwsService.get_image_url(income.icon), 'name': income.name}
history = IncomeHistory.objects.filter(income=income_id)
income_info = {**total_value_for_category(history, True), **income_info}
return JsonResponse(income_info)
| true |
ab62b17628e6568285b6bd516fd85192037c6eb3 | Python | charleskausihanuni/B2-Group-Project | /Main Program FINAL.py | UTF-8 | 11,861 | 3 | 3 | [] | no_license | #GUI
import Tkinter
from Tkinter import *
import tkMessageBox
class Labels:
def __init__(self, root):
self.root = root
self.label=Label(root, text='Search Criteria ',height=2).grid(row=1,column=1,sticky=W)
self.label=Label(root, text='Location: ').grid(row=2,column=1,sticky=E)
self.label=Label(root, text='Number of Seats: ').grid(row=3,column=1,sticky=E)
self.label=Label(root, text='Number of Doors: ').grid(row=4,column=1,sticky=E)
self.label=Label(root, text='Colour: ').grid(row=5,column=1,sticky=E)
self.label=Label(root, text='Minimum Price: ').grid(row=6,column=1,sticky=E)
self.label=Label(root, text='Maximum Price: ').grid(row=6,column=3,sticky=E)
self.label=Label(root, text='Sort by: ', height=5).grid(row=16,column=2,sticky=E)
self.label=Label(root, text='Results: ', height=5).grid(row=16,column=1,sticky=W)
class DropDownMenu:
def __init__(self, root, loc, col, sort):
self.root = root
self.loc = loc
self.col = col
self.sort = sort
#Location
self.OptionMenu=OptionMenu(self.root, self.loc, "Any","", "Birmingham", "Cardiff", "Dublin", "Glasgow", "London", "Manchester").grid(row=2,column=2,sticky=W+E)
#Colour
self.OptionMenu=OptionMenu(self.root, self.col, "Any","", "Black", "Blue", "Green", "Red", "Silver", "White").grid(row=5,column=2,sticky=W+E)
#Sorting
self.OptionMenu=OptionMenu(self.root, self.sort, "Any","", "High to Low", "Low to High").grid(row=16,column=3,sticky=W+E)
class Entrys:
def __init__(self,root):
self.root = root
#Minimum
self.entry = Entry(self.root)
self.entry.grid(row=6,column=2)
self.entry.insert(END, unichr(163))
#Maximum
self.entry = Entry(self.root)
self.entry.grid(row=6,column=4)
self.entry.insert(END, unichr(163))
class Checkbuttons:
def __init__(self, root):
self.root = root
self.checkbutton = Checkbutton(self.root)
#Number of Seats
self.checkbutton = Checkbutton(text="2").grid(row=3,column=2,sticky=W)
self.checkbutton = Checkbutton(text="5").grid(row=3,column=2,sticky=N)
self.checkbutton = Checkbutton(text="7").grid(row=3,column=2,sticky=E)
self.checkbutton = Checkbutton(text="Any").grid(row=3,column=3,sticky=E)
#Number of Doors
self.checkbutton = Checkbutton(text="3").grid(row=4,column=2,sticky=W)
self.checkbutton = Checkbutton(text="5").grid(row=4,column=2,sticky=N)
self.checkbutton = Checkbutton(text="Any").grid(row=4,column=3,sticky=E)
class Buttons:
def __init__(self, root, popup):
self.root = root
self.button = Button(self.root)
self.popup = popup
#Search Button
self.button = Button(text="Search", height=5).grid(row=15,column=3,sticky=W+E)
#More Info Button
self.button = Button(text="Click for more information", command = self.info).grid(row=1,column=5,sticky=W+E)
def info(self):
self.popup("Click for more information", "Please fill in the desied search criteria sections in order to conduct a search")
def main():
root = Tk()
label = Labels(root)
loc = StringVar(root)
col = StringVar(root)
sort = StringVar(root)
drop = DropDownMenu(root, loc, col, sort)
entry = Entrys(root)
checkbutton = Checkbuttons(root)
popup = tkMessageBox.showinfo
button = Buttons(root, popup)
root.mainloop()
if __name__ == '__main__':
sys.exit(main())
#Map Animation
from Tkinter import *
import time
class Gui:
def __init__(self, root):
self.root = root
self.frame1 = Frame(self.root)
self.frame1.pack()
self.button = Button(self.frame1,text="Move",command=self.movement)
self.button.pack(side=LEFT)
self.frame2 = Frame(self.root)
self.frame2.pack()
self.canvas = Canvas(self.frame2,width = 500, height = 640, bg = 'red')
self.canvas.pack()
self.gif1 = PhotoImage(file = 'Map.gif')
self.gif2 = PhotoImage(file = 'Stickman.gif')
# put gif image on canvas
# pic's upper left corner (NW) on the canvas is at x=50 y=10
self.canvas.create_image(0, 0, image = self.gif1, anchor = NW)
self.robot = self.canvas.create_image(10, 590, image = self.gif2, anchor = NW)
self.location = str(input("Please Specify the location of the garage."))
def animation(self, destX, destY):
""" Code to animate travel between the different locations."""
canvas = self.canvas
robotX, robotY = canvas.coords(self.robot)
while robotX != destX or robotY != destY:
if robotX < destX and robotY < destY:
canvas.coords(self.robot, robotX+1, robotY+1)
elif robotX > destX and robotY < destY:
canvas.coords(self.robot, robotX-1, robotY+1)
elif robotX < destX and robotY > destY:
canvas.coords(self.robot, robotX+1, robotY-1)
elif robotX > destX and robotY > destY:
canvas.coords(self.robot, robotX-1, robotY-1)
elif robotX < destX:
canvas.coords(self.robot, robotX+1, robotY)
elif robotY < destY:
canvas.coords(self.robot, robotX, robotY+1)
elif robotX > destX:
canvas.coords(self.robot, robotX-1, robotY)
elif robotY > destY:
canvas.coords(self.robot, robotX, robotY-1)
canvas.update()
robotX, robotY = canvas.coords(self.robot)
time.sleep(0.005)
def movement(self):
""" Function that moves the robot to the specified locations."""
if self.location == "London" or self.location == "london" or self.location == "Birmingham" or self.location == "birmingham" or self.location == "Manchester" or self.location == "manchester" or self.location == "Cardiff" or self.location == "cardiff" or self.location == "glasgow" or self.location == "Glasgow" or self.location == "Dublin" or self.location == "dublin":
canvas = self.canvas
# Move to Birmingham
if self.location == "Birmingham" or self.location == "birmingham":
self.animation(360,410)
# Move to Manchester
elif self.location == "Manchester" or self.location == "manchester":
self.animation(325,350)
# Move to Cardiff
elif self.location == "Cardiff" or self.location == "cardiff":
self.animation(275,450)
# Move to Dublin
elif self.location == "Dublin" or self.location == "dublin":
self.animation(150,360)
# Move to London
elif self.location == "London" or self.location == "london":
self.animation(380,475)
# Move to Glasgow
elif self.location == "Glasgow" or self.location == "glasgow":
self.animation(260,220)
else:
print("Not A Valid Location")
if __name__ == '__main__':
root = Tk()
g = Gui(root)
root.mainloop()
#Searching
import sqlite3 as sql
colour_id = input("Colour of the car? :")
location_id = input("Location of the car?: ")
seat_id = int(input("How many seats in the car?: "))
door_id = int(input("How many doors in the car?: "))
minPrice = float(input("Min price of the car?: "))
maxPrice = float(input("Max price of the car?: "))
#Search algorithm for all criteria
db = sql.connect('Car_Database.sqlite')
cursor = db.cursor()
cursor.execute('''SELECT * FROM Cars WHERE Colour=? and Location=? and Seats=? and Doors=? and Price BETWEEN ? AND ?''', (colour_id, location_id, seat_id, door_id, minPrice, maxPrice,))
user = cursor.fetchall()
print(user)
db.close()
#list of results from searching
resultList1=user
#list for prices to be sorted
priceList1=[]
#final sorted list
Sorted_Results_List1=[]
#putting prices for individual car in seperate list
for n in resultList1:
priceList1.append(n[6])
def swap(i, j):
rList[i], rList[j] = rList[j], rList[i]
def heaping(end,i):
l=2 * i + 1
r=2 * (i + 1)
max=i
if l < end and rList[i] < rList[l]:
max = l
if r < end and rList[max] < rList[r]:
max = r
if max != i:
swap(i, max)
heaping(end, max)
def heapSort():
end = len(rList)
start = end // 2 - 1
for i in range(start, -1, -1):
heapify(end, i)
for i in range(end-1, 0, -1):
swap(i, 0)
heaping(i, 0)
rList = priceList1
heapSort()
'''comparing resultList and priceList and adding results in asending order to
sorted_Results_List'''
for x in priceList1:
for y in resultList1:
if x!=y[6]:
pass
else:
Sorted_Results_List1.append(y)
for n in Sorted_Results_List1:
print(n)
#Sorting high to low
#list of result from searching
resultList=user
#list for prices to be sorted
priceList=[]
#final sorted list
Sorted_Results_List=[]
for n in resultList:
priceList.append(n[6])
def insertion_sort( seq ):
for i in range( 1, len( seq ) ):
t = seq[i]
k = i
while k > 0 and t < seq[k - 1]:
seq[k] = seq[k - 1]
k -= 1
seq[k] = t
seq = priceList
insertion_sort( seq )
for x in priceList:
for y in resultList:
if x!=y[6]:
pass
else:
Sorted_Results_List.append(y)
Sorted_Results_List.reverse()
for n in Sorted_Results_List:
print(n)
import sys
def main():
#list of result from searching
resultList=user
#list for prices to be sorted
priceList=[]
#final sorted list
Sorted_Results_List=[]
for n in resultList:
priceList.append(n[6])
def insertionsort( aList ):
for i in range( 1, len( aList ) ):
tmp = aList[i]
k = i
while k > 0 and tmp < aList[k - 1]:
aList[k] = aList[k - 1]
k -= 1
aList[k] = tmp
aList = priceList
insertionsort( aList )
for x in priceList:
for y in resultList:
if x!=y[6]:
pass
else:
Sorted_Results_List.append(y)
Sorted_Results_List.reverse()
print(Sorted_Results_List)
if __name__ == '__main__':
sys.exit(main())
#Sorting low to high
import sys
def main():
#list of results from searching
resultList1=user
#list for prices to be sorted
priceList1=[]
#final sorted list
Sorted_Results_List1=[]
#putting prices for individual car in seperate list
for n in resultList:
priceList1.append(n[6])
def swap(i, j):
sqc[i], sqc[j] = sqc[j], sqc[i]
def heapify(end,i):
l=2 * i + 1
r=2 * (i + 1)
max=i
if l < end and sqc[i] < sqc[l]:
max = l
if r < end and sqc[max] < sqc[r]:
max = r
if max != i:
swap(i, max)
heapify(end, max)
def heap_sort():
end = len(sqc)
start = end // 2 - 1
for i in range(start, -1, -1):
heapify(end, i)
for i in range(end-1, 0, -1):
swap(i, 0)
heapify(i, 0)
sqc = priceList1
heap_sort()
'''comparing resultList and priceList and adding results in asending order to
sorted_Results_List'''
for x in priceList1:
for y in resultList1:
if x!=y[6]:
pass
else:
Sorted_Results_List1.append(y)
print(Sorted_Results_List1)
if __name__ == '__main__':
sys.exit(main())
| true |
c11ea0a1c9d9713cb3ef043aa1e53642c15f6a68 | Python | mrillusi0n/compete | /koba.py | UTF-8 | 158 | 3.515625 | 4 | [
"MIT"
] | permissive | digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
tens_digit = '7'
for digit in digits:
print('{}{}'.format(tens_digit, digit))
print('80')
| true |
784f113ef31a3d352f0093ec476232518f103126 | Python | boonchu/python3lab | /coursera.org/python3/quiz/homework2/quiz9-1.py | UTF-8 | 673 | 4.3125 | 4 | [] | no_license | #! /usr/bin/env python
"""
Create a rectagular grid and iterate through
a subset of its cells in a specified direction
If running this code snippet prints 13 in the console,
what are the non-negative values of row and col?
"""
GRID_WIDTH = 6
GRID_HEIGHT = 4
# Create a rectangular grid using nested list comprehension
# Inner comprehension creates a single row
EXAMPLE_GRID = [ [row + col for col in range(GRID_WIDTH)] for row in range(GRID_HEIGHT)]
def run_example():
"""
Run several example calls of traverse_grid()
"""
print "Print out values in grid"
for row in range(GRID_HEIGHT):
print EXAMPLE_GRID[row]
print
run_example()
| true |
fb9d54392d38221690c2ea6c1a12ef9fcdabac60 | Python | NordMan10/OptimalMotion3.1-py- | /Model.py | UTF-8 | 29,852 | 3.015625 | 3 | [] | no_license | import copy
from AircraftInputDataGenerator import AircraftInputDataGenerator
from static.ProgramConstants import ProgramConstants
from Runway import Runway
from SpecialPlace import SpecialPlace
from TableRow import TableRow
from TakingOffAircraft import TakingOffAircraft
from Interval import Interval
from static.CommonInputData import CommonInputData
class Model(object):
"""Основной класс. Содержит главный метод get_output_data().
Содержит главный метод GetOutputData(), инициирующий работу всей программы, и вспомогательные методы.
Метод GetOutputData() предоставляет выходные данные для их графического представления
"""
def __init__(self, runway_count, special_place_count):
self._runways = []
self._special_places = []
# self._aircraft_input_data_generator = AircraftInputDataGenerator()
self.init_runways(runway_count)
self.init_special_places(special_place_count)
@property
def runways(self):
return self._runways
@property
def special_places(self):
return self._special_places
def init_runways(self, runway_count):
"""
Создает заданное количество ВПП.
:param runway_count: Количество ВПП
"""
for i in range(ProgramConstants.start_id_value, runway_count + ProgramConstants.start_id_value):
self.runways.append(Runway(str(i)))
def init_special_places(self, special_place_count):
"""
Создает заданное количество Спец. площадок.
:param special_place_count:Количество Спец. площадок
"""
for i in range(ProgramConstants.start_id_value, special_place_count + ProgramConstants.start_id_value):
self.special_places.append(SpecialPlace(i))
def get_output_data(self, unused_planned_taking_off_moments):
"""
Главный метод программы, инициирующий начало всей работы. На основе заданных и сгенерированных входящих данных
рассчитывает все необходимые выходные данные и предоставляет их в виде списка экземпляров класса CTableRow.
:param unused_planned_taking_off_moments: Список неиспользованных плановых моментов взлета
:return: Список выходящих данных, содержащий экземпляры класса CTableRow, представляющие строки таблицы с выходными данными
"""
# Создаем набор данных о ВС в формате строки таблицы
table_rows = []
# Получаем список ВС с заданными возможными и стартовыми моментами, упорядоченный по возможным моментам
ordered_configured_taking_off_aircrafts = self._get_ordered_configured_taking_off_aircrafts(
unused_planned_taking_off_moments)
# Задаем разрешенные моменты и резервные ВС в полученном списке ВС
self._reconfigure_aircrafts_with_reserve(ordered_configured_taking_off_aircrafts)
# Для всех ВС задаем время ожидания на ПРДВ
self._set_PS_waiting_time(ordered_configured_taking_off_aircrafts)
# Упорядочим список ВС по разрешенным моментам
ordered_configured_taking_off_aircrafts.sort(key=TakingOffAircraft.sort_by_permitted_moments)
# Заполняем набор данных о ВС
for aircraft in ordered_configured_taking_off_aircrafts:
table_row = self._get_table_row(aircraft)
table_rows.append(table_row)
# Возвращаем строки данных для таблицы
return table_rows
def _get_ordered_configured_taking_off_aircrafts(self, unused_planned_taking_off_moments):
"""
Создает список ВС на основе переданных плановых моментов старта, рассчитывает возможный момент вылета и соответствующий
ему момент старта для каждого ВС.
:param unused_planned_taking_off_moments: Список плановых моментов взлета
:return Возвращает упорядоченный по возможным моментам список ВС с заданными возможными моментами взлета и соответствующими моментами старта.
"""
# Создаем список ВС
taking_off_aircrafts = []
# Создаем упорядоченный список плановых моментов на основе переданного списка
ordered_planned_taking_off_moments = copy.deepcopy(unused_planned_taking_off_moments)
ordered_planned_taking_off_moments.sort()
# Берем каждый плановый момент
for i in range(0, len(ordered_planned_taking_off_moments)):
# Генерируем входные данные для нового ВС
aircraft_input_data = AircraftInputDataGenerator. \
get_aircraft_input_data(ordered_planned_taking_off_moments[i], self.runways)
# Создаем ВС
taking_off_aircraft = TakingOffAircraft(aircraft_input_data)
start_moment = 0
# Рассчитываем интервал взлета
taking_off_interval = Interval(taking_off_aircraft.creation_moments.planned_taking_off -
taking_off_aircraft.creation_intervals.taking_off,
taking_off_aircraft.creation_moments.planned_taking_off)
# Получаем задержку
start_delay = self._get_runway_start_delay(taking_off_aircraft, taking_off_interval)
# Если нужна обработка
if taking_off_aircraft.processing_necessity:
# Рассчитываем и задаем момент старта при необходимости обработки
SP_arrive_moment = taking_off_interval.start_moment - taking_off_aircraft.creation_intervals.motion_from_PS_to_ES - \
taking_off_aircraft.creation_intervals.motion_from_SP_to_PS - \
taking_off_aircraft.creation_intervals.processing
# Получаем задержку
start_delay += self._get_special_place_start_delay(taking_off_aircraft, SP_arrive_moment)
start_moment = SP_arrive_moment - taking_off_aircraft.creation_intervals.motion_from_parking_to_SP + \
start_delay - CommonInputData.spare_arrival_time_interval.end_moment
else:
# Рассчитываем и задаем момент старта при отсутствии необходимости обработки
start_moment = taking_off_interval.start_moment - taking_off_aircraft.creation_intervals.motion_from_PS_to_ES - \
taking_off_aircraft.creation_intervals.motion_from_parking_to_PS + start_delay - \
CommonInputData.spare_arrival_time_interval.end_moment
# Задаем рассчитанный момент старта текущему ВС
taking_off_aircraft.calculating_moments.start = start_moment
# Рассчитываем и задаем возможный момент взлета
taking_off_aircraft.calculating_moments.possible_taking_off = taking_off_aircraft.creation_moments.planned_taking_off + start_delay
taking_off_aircrafts.append(taking_off_aircraft)
# Упорядочиваем ВС по возможному моменту
taking_off_aircrafts.sort(key=TakingOffAircraft.sort_by_possible_moments)
# Возвращаем упорядоченный список ВС
return taking_off_aircrafts
def _get_runway_start_delay(self, taking_off_aircraft, taking_off_interval):
"""
Рассчитывает задержку момента старта от ВПП. Задержка может образоваться из-за занятости ВПП другим ВС.
:param taking_off_aircraft: ВС, для которого нужно рассчитать задержку.
:param taking_off_interval: Интервал взлета ВС (время, которое он будет занимать ВПП).
:return: Величину задержки в секундах.
"""
# Находим ВПП, на которую движется ВС
this_runway = next(runway for runway in self.runways if runway.id == taking_off_aircraft.runway_id)
# Получаем свободный интервал от ВПП
free_runway_interval = this_runway.get_free_interval(taking_off_interval)
# Добавляем полученный новый интервал в ВПП
this_runway.add_aircraft_interval(taking_off_aircraft.id, free_runway_interval)
# Рассчитываем и возвращаем задержку
return free_runway_interval.start_moment - taking_off_interval.start_moment
# noinspection PyPep8Naming
def _get_special_place_start_delay(self, taking_off_aircraft, SP_arrive_moment):
"""
Рассчитывает задержку момента старта от Спец. площадки. Задержка может образоваться из-за занятости Спец. площадки другим ВС.
:param taking_off_aircraft: ВС, для которого нужно рассчитать задержку.
:param SP_arrive_moment: Момент прибытия ВС Спец. площадку.
:return: Величину задержки в секундах.
"""
# Находим Спец. площадку, на которую движется ВС
this_special_place = next(special_place for special_place in self.special_places if special_place.id ==
taking_off_aircraft.special_place_id)
# Создаем интервал обработки ВС
processing_interval = Interval(SP_arrive_moment,
SP_arrive_moment + taking_off_aircraft.creation_intervals.processing)
# Передаем интервал обработки ВС и получаем свободный интервал от Спец. площадки
free_SP_interval = this_special_place.get_free_interval(processing_interval)
# Добавляем полученный новый интервал в Спец. площадку
this_special_place.add_aircraft_interval(taking_off_aircraft.id, free_SP_interval)
# Рассчитываем и возвращаем задержку
return free_SP_interval.start_moment - processing_interval.start_moment
def _reconfigure_aircrafts_with_reserve(self, ordered_taking_off_aircrafts):
"""
На основе переданного списка ВС с заданными возможными моментами взлета и моментами старта определяет возможность назначения и
допустимое количество резервных ВС для каждого ВС, которое уже не назначено резервным. Находит и задает разрешенные моменты взлета для всех ВС.
Рассчитывает и задает новые моменты старта для резервных ВС.
:param ordered_taking_off_aircrafts: Список ВС с заданными возможными моментами взлета и моментами старта
"""
# Создаем список использованных индексов
used_indexes = []
# Берем каждый ВС
for i in range(0, len(ordered_taking_off_aircrafts)):
# Проверяем, использовался ли уже этот индекс ВС
if i in used_indexes:
# Если да, то пропускаем его
continue
# Если нет, то:
# Получаем возможный момент ВС
possible_moment = ordered_taking_off_aircrafts[i].calculating_moments.possible_taking_off
# Пытаемся получить ближайший к возможному моменту разрешенный момент
nearest_permitted_moment = CommonInputData.input_taking_off_moments.get_nearest_permitted_moment(possible_moment)
# Проверяем
if nearest_permitted_moment is None:
# Если получили nullptr, значит разрешенный момент не найден
# Отмечаем это соответствующим значением
ordered_taking_off_aircrafts[i].calculating_moments.start = -1
ordered_taking_off_aircrafts[i].calculating_moments.permitted_taking_off = -1
# И пропускаем это ВС
continue
# Если же получили не nullptr, то отмечаем, что это проверенный разрешенный момент
verified_permitted_moment = nearest_permitted_moment
# Рассчитываем задержку для текущего ВС, возможный момент которого мы рассматриваем
start_delay = verified_permitted_moment - possible_moment
# Рассчитываем момент старта для этого же ВС
current_aircraft_start_moment = ordered_taking_off_aircrafts[i].calculating_moments.start + start_delay
# Получаем список стартовых моментов для резервных ВС
reserve_aircraft_start_moments = self._get_reserve_aircraft_start_moments(verified_permitted_moment, i, ordered_taking_off_aircrafts)
# Создаем один общий список пар значений <индекс ВС : момент старта> для текущего и резервных ВС
all_aircraft_start_moments_data = {i: current_aircraft_start_moment}
[all_aircraft_start_moments_data.update({aircraft_index: start_moment}) for aircraft_index, start_moment in
reserve_aircraft_start_moments.items()]
# Задаем моменты старта для текущего и резервных ВС
self._set_prepared_start_moments(all_aircraft_start_moments_data, ordered_taking_off_aircrafts)
# Получаем индекс ВС, имеющего наибольший приоритет (среди текущего и резервных ВС)
most_priority_aircraft_index = self._get_most_priority_aircraft_index(all_aircraft_start_moments_data, ordered_taking_off_aircrafts)
# Берем каждую пару значений из созданного общего списка ВС
for aircraft_index, start_moment in all_aircraft_start_moments_data.items():
# Задаем разрешенный момент
ordered_taking_off_aircrafts[aircraft_index].calculating_moments.permitted_taking_off = verified_permitted_moment
# Сравниваем индекс ВС и индекс наиболее приритетного ВС
if aircraft_index != most_priority_aircraft_index:
# Если данное ВС не является наиболее приоритетным => помечаем его как резервное
ordered_taking_off_aircrafts[aircraft_index].is_reserve = True
# Задаем резервный разрешенный момент (момент взлета, если это ВС останется резервным и не заменит главное ВС)
ordered_taking_off_aircrafts[aircraft_index].calculating_moments.reserve_permitted_taking_off = \
CommonInputData.input_taking_off_moments.get_next_permitted_moment()
# Добавляем индекс текущего ВС в список использованных
used_indexes.append(aircraft_index)
def _get_reserve_aircraft_start_moments(self, permitted_moment, main_aircraft_index, ordered_taking_off_aircrafts):
"""
Рассчитывает моменты старта для резервных ВС по заданному разрешенному моменту.
:param permitted_moment: Разрешенный момент.
:param main_aircraft_index: Индекс ВС в общем списке, которое является основным, опорным по отношению к резервным ВС.
:param ordered_taking_off_aircrafts: Общий список ВС.
:return: Словарь, с ключами в виде индексов ВС и значениями в виде моментов старта.
"""
# Создаем словарь с ключами в виде индексов ВС и значениями в виде моментов старта
reserve_start_moments_data = {}
# Получаем список возможных моментов взлета
possible_taking_off_moments = [aircraft.calculating_moments.possible_taking_off for aircraft in ordered_taking_off_aircrafts]
# Проверяем, есть ли еще возможные моменты
if main_aircraft_index < len(possible_taking_off_moments) - 1:
# Определяем допустимое количество резервных ВС
reserve_aircraft_count = self._get_reserve_aircraft_count(permitted_moment, main_aircraft_index, possible_taking_off_moments)
for i in range(1, reserve_aircraft_count + 1):
# Проверяем, есть ли еще возможные моменты и совпадают ли Id ВПП у ВС, которым принадлежат эти моменты
if main_aircraft_index + i < len(possible_taking_off_moments) and \
ordered_taking_off_aircrafts[main_aircraft_index].runway_id == ordered_taking_off_aircrafts[
main_aircraft_index + i].runway_id:
# Берем возможный момент для резервного ВС
reserve_aircraft_possible_moment = possible_taking_off_moments[main_aircraft_index + i]
# Рассчитываем задержку для момента старта резервного ВС
start_delay = permitted_moment - reserve_aircraft_possible_moment
# Задаем момент старта для резервного ВС
reserve_start_aircraft_moment = ordered_taking_off_aircrafts[main_aircraft_index + i].calculating_moments.start + start_delay
# Добавляем момент старта
reserve_start_moments_data[main_aircraft_index + i] = reserve_start_aircraft_moment
# Возаращаем либо пустой, либо заполненный старовыми моментами словарь
return reserve_start_moments_data
def _get_reserve_aircraft_count(self, permitted_moment, main_aircraft_index, possible_taking_off_moments):
"""
На основе заданного в классе CCommonInputData критерия допустимого количества резервных ВС и переданного разрешенного момента
определяет возможное количество резервных ВС для переданного по индексу ВС.
:param permitted_moment: Разрешенный момент.
:param main_aircraft_index: Индекс ВС, для которого нужно найти возможное количество резервных ВС.
:param possible_taking_off_moments: Список возможных моментов взлета.
:return: Возможное количество резервных ВС для переданного ВС.
"""
# Задаем начальное количество резервных ВС
reserve_aircraft_count = 0
index = 1
# Определяем максимально возможное количество резервных ВС.
# Пока имеются возможные моменты и разрешенный момент входит в разрешенный страховочный интервал
while main_aircraft_index + index < len(possible_taking_off_moments) - 1 and \
permitted_moment - CommonInputData.spare_arrival_time_interval.start_moment >= \
possible_taking_off_moments[main_aircraft_index + index]:
# Увеличиваем количество резервных ВС
reserve_aircraft_count += 1
# Увеличиваем индекс
index += 1
# Проверяем полученное количество по заданному критерию
# time_to_last_taking_off_moment = 0
permitted_time = 0
while True:
# По заданному критерию, в зависимости от определенного количества резервных ВС, находим допустимое время простоя резервных ВС
permissible_reserve_aircraft_count_list = CommonInputData.permissible_reserve_aircraft_count
for aircraft_count, waiting_time in permissible_reserve_aircraft_count_list.items():
if reserve_aircraft_count <= aircraft_count:
permitted_time = waiting_time
break
# Рассчитываем время простоя (время, которое пройдет с момента взлета первого (основного) ВС до момента взлета последнего резервного ВС)
time_to_last_taking_off_moment = possible_taking_off_moments[main_aircraft_index + reserve_aircraft_count] - \
possible_taking_off_moments[main_aircraft_index]
# Если рассчитанное время простоя больше допустимого => уменьшаем количество резервных ВС
if time_to_last_taking_off_moment > permitted_time:
reserve_aircraft_count -= 1
# Повторяем, пока не удовлетровим заданному критерию
if time_to_last_taking_off_moment <= permitted_time:
break
# Возвращаем количество резервных ВС
return reserve_aircraft_count
def _set_prepared_start_moments(self, all_aircraft_start_moments_data, ordered_taking_off_aircrafts):
"""
Задает заранее подготовленные и переданные моменты старта для переданных ВС.
:param all_aircraft_start_moments_data: Моменты старта.
:param ordered_taking_off_aircrafts: Список ВС.
"""
for aircraft_index, start_moment in all_aircraft_start_moments_data.items():
ordered_taking_off_aircrafts[aircraft_index].calculating_moments.start = start_moment
def _get_most_priority_aircraft_index(self, all_aircraft_start_moments_data, ordered_taking_off_aircrafts):
most_priority_aircraft_index = list(all_aircraft_start_moments_data)[0]
for aircraft_index, start_moment in all_aircraft_start_moments_data.items():
if ordered_taking_off_aircrafts[aircraft_index].priority > ordered_taking_off_aircrafts[most_priority_aircraft_index].priority:
most_priority_aircraft_index = aircraft_index
return most_priority_aircraft_index
# noinspection PyPep8Naming
def _set_PS_waiting_time(self, ordered_configured_taking_off_aircrafts):
"""
Рассчитывает и задает время ожидания на ПРДВ для переданных ВС.
:param ordered_configured_taking_off_aircrafts: Список ВС
"""
for aircraft in ordered_configured_taking_off_aircrafts:
# Рассчитываем момент прибытия на ПРДВ
arrival_to_PS_moment = 0
if aircraft.processing_necessity:
arrival_to_PS_moment = aircraft.calculating_moments.start + aircraft.creation_intervals.motion_from_parking_to_SP + \
aircraft.creation_intervals.processing + aircraft.creation_intervals.motion_from_SP_to_PS
else:
arrival_to_PS_moment = aircraft.calculating_moments.start + aircraft.creation_intervals.motion_from_parking_to_PS
# Рассчитываем время простоя
if aircraft.is_reserve:
aircraft.calculating_intervals.PS_delay = aircraft.calculating_moments.reserve_permitted_taking_off - \
arrival_to_PS_moment - aircraft.creation_intervals.motion_from_PS_to_ES - \
aircraft.creation_intervals.taking_off
else:
aircraft.calculating_intervals.PS_delay = aircraft.calculating_moments.permitted_taking_off - \
arrival_to_PS_moment - aircraft.creation_intervals.motion_from_PS_to_ES - \
aircraft.creation_intervals.taking_off
temp = 4
def _get_table_row(self, aircraft):
"""
Создает экземпляр класса CTableRow и заполняет его выходными данными ВС.
:param aircraft: ВС, выходные данные которого нужно представить
:return: Экземпляр класса CTableRow, представляющих формат выходных данных.
"""
# Рассчитываем общее время движения ВС (без учета времени обработки)
aircraft_total_motion_time = aircraft.creation_intervals.taking_off + aircraft.creation_intervals.motion_from_PS_to_ES
if aircraft.processing_necessity:
aircraft_total_motion_time += aircraft.creation_intervals.motion_from_SP_to_PS + aircraft.creation_intervals.motion_from_parking_to_SP
else:
aircraft_total_motion_time += aircraft.creation_intervals.motion_from_parking_to_PS
# Получаем значение разрешенного момента
permitted_moment = str(aircraft.calculating_moments.permitted_taking_off) if aircraft.calculating_moments.permitted_taking_off != -1 else \
"Не найден"
# Получаем время обработки
processing_time = str(aircraft.creation_intervals.processing) if aircraft.processing_necessity else "-"
# Получаем Id Спец. площадки
special_place_id = str(aircraft.special_place_id) if aircraft.processing_necessity else "-"
# Извлекаем все оставшиеся необходимые данные из экземпляра ВС и озвращаем указатель на экземпляр класса CTableRow
return TableRow(str(aircraft.id), str(aircraft.creation_moments.planned_taking_off), str(aircraft.calculating_moments.possible_taking_off),
permitted_moment, str(aircraft.calculating_moments.start), str(aircraft_total_motion_time), str(processing_time),
aircraft.processing_necessity, str(int(aircraft.priority)), aircraft.is_reserve, str(aircraft.calculating_intervals.PS_delay),
aircraft.runway_id, special_place_id)
def reset_runways(self):
for runway in self.runways:
runway.reset()
def reset_special_places(self):
for special_place in self.special_places:
special_place.reset()
| true |
96dcb0551e29e267c82f5cb5d72b6b66e8b8b7e4 | Python | TheSlimvReal/PSE---LA-meets-ML | /modules/test/unittest/view/test_cli_output_service.py | UTF-8 | 2,259 | 2.515625 | 3 | [
"BSD-2-Clause"
] | permissive | from mock import patch, call
from modules.exception.exceptions import IllegalArgumentException
from modules.view.cli_output_service import CLIOutputService
from modules.view.observable import Observable
@patch("modules.view.command_line_interface.CommandLineInterface")
def test_create_observable_to_print_three_values(mocked_cli):
values = [
0,
25,
50,
75,
100
]
expected_calls = [call.print_overriding("downloading %s values" % str(i)) for i in values]
obs = Observable()
output_service = CLIOutputService(mocked_cli)
output_service.print_stream("downloading %s values", obs)
[obs.next(str(i)) for i in values]
mocked_cli.assert_has_calls(expected_calls)
@patch("modules.view.command_line_interface.CommandLineInterface")
def test_print_overriding_with_incomplete_input(mocked_cli):
values = [
0,
25,
50,
75,
100
]
expected_calls = [call.print_overriding("downloading %s" % str(i)) for i in values]
obs = Observable()
output_service = CLIOutputService(mocked_cli)
output_service.print_stream("downloading", obs)
[obs.next(str(i)) for i in values]
mocked_cli.assert_has_calls(expected_calls)
@patch("modules.view.command_line_interface.CommandLineInterface")
def test_print_line(mocked_cli):
output_service = CLIOutputService(mocked_cli)
output_service.print_line("Hallo")
mocked_cli.assert_has_calls([call.print("Hallo")])
@patch("modules.view.command_line_interface.CommandLineInterface")
def test_print_error(mocked_cli):
error = IllegalArgumentException("Error")
output_service = CLIOutputService(mocked_cli)
output_service.print_error(error)
mocked_cli.assert_has_calls([call.print(error.get_type() + ": " + "Error")])
@patch("modules.view.cli_output_service.CLIOutputService")
def test_removing_subscribers_from_observable_works(mocked_output_service):
expected_calls = [
call.update("printed"),
call.finished()
]
obs = Observable()
obs.add_subscriber(mocked_output_service)
obs.next("printed")
obs.remove_subscriber(mocked_output_service)
obs.next("not displayed")
mocked_output_service.assert_has_calls(expected_calls)
| true |
3357ef51d96e6844849fa8242905a804c39aea6b | Python | VikasSingh-DS/Mechanisms-of-Action-Prediction-Kaggle | /train.py | UTF-8 | 3,542 | 2.640625 | 3 | [] | no_license |
def run_training(fold, seed):
seed_everything(seed)
train = processed(folds)
test_ = processed(test)
trn_idx = train[train['kfold'] != fold].index
val_idx = train[train['kfold'] == fold].index
train_df = train[train['kfold'] != fold].reset_index(drop=True)
valid_df = train[train['kfold'] == fold].reset_index(drop=True)
x_train, y_train = train_df[num_features].values, train_df[target_cols].values
x_valid, y_valid = valid_df[num_features].values, valid_df[target_cols].values
train_dataset = MoADataset(x_train, y_train)
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_dataset = MoADataset(x_valid, y_valid)
valid_data_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=False)
model = Model(
len_features=len_features,
len_targets=len_targets,
hidden_size=hidden_size,
)
model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.1, div_factor=1e3,
max_lr=1e-2, epochs=EPOCHS, steps_per_epoch=len(train_data_loader))
loss_fn = nn.BCEWithLogitsLoss()
early_stopping_steps = EARLY_STOPPING_STEPS
early_step = 0
oof = np.zeros((len(train), len(target_cols)))
best_loss = np.inf
for epoch in range(EPOCHS):
train_loss = train_fn(model, optimizer, scheduler, loss_fn, train_data_loader, DEVICE)
print(f"FOLD: {fold}, EPOCH: {epoch}, train_loss: {train_loss}")
valid_loss, valid_preds = valid_fn(model, loss_fn, valid_data_loader, DEVICE)
print(f"FOLD: {fold}, EPOCH: {epoch}, train_loss: {valid_loss}")
if valid_loss < best_loss:
best_loss = valid_loss
oof[val_idx] = valid_preds
torch.save(model.state_dict(), f"FOLD{fold}_.pth")
elif(EARLY_STOP == True):
early_step += 1
if (early_step >= early_stopping_steps):
break
# Prediction
x_test = test_[num_features].values
testdataset = TestDataset(x_test)
test_data_loader = torch.utils.data.DataLoader(testdataset, batch_size=BATCH_SIZE, shuffle=False)
model = Model(
len_features=len_features,
len_targets=len_targets,
hidden_size=hidden_size,
)
model.load_state_dict(torch.load(f"FOLD{fold}_.pth"))
model.to(DEVICE)
predictions = np.zeros((len(test_), len(target_cols)))
predictions = inference_fn(model, test_data_loader, DEVICE)
return oof, predictions
def run_k_fold(NFOLDS, seed):
oof = np.zeros((len(train), len(target_cols)))
predictions = np.zeros((len(test), len(target_cols)))
for fold in range(NFOLDS):
oof_, pred_ = run_training(fold, seed)
predictions += pred_ / NFOLDS
oof += oof_
return oof, predictions
#Averaging on multiple SEEDS
SEED = [0, 1, 2, 3, 4, 5, 6]
oof = np.zeros((len(train), len(target_cols)))
predictions = np.zeros((len(test), len(target_cols)))
for seed in SEED:
oof_, predictions_ = run_k_fold(NFOLDS, seed)
oof += oof_ / len(SEED)
predictions += predictions_ / len(SEED)
train[target_cols] = oof
test[target_cols] = predictions | true |
e965a1263d085e18b7979b5da3cd034b42ef8796 | Python | khaleeque-ansari/Online-Coding-Problems-Solutions-Python | /ProjectEulerCode/prob38.py | UTF-8 | 1,555 | 3.28125 | 3 | [
"Apache-2.0"
] | permissive | def ispandigital(x):
s = set()
for c in str(x):
s = s.union([int(c)])
if s == set([1,2,3,4,5,6,7,8,9]) :
return True
else:
return False
max_concat_prod = 918273645
for x in range(90,100):
temp = ''
stop = False
i = 0
while(stop != True):
i +=1
temp = temp + str(x*i)
if len(temp) >9:
stop = True
elif len(temp) == 9:
if ispandigital(int(temp)):
if int(temp) > max_concat_prod :
max_concat_prod = int(temp)
print int(temp)
for x in range(900,1000):
temp = ''
stop = False
i = 0
while(stop != True):
i +=1
temp = temp + str(x*i)
if len(temp) >9:
stop = True
elif len(temp) == 9:
if ispandigital(int(temp)):
if int(temp) > max_concat_prod :
max_concat_prod = int(temp)
print int(temp)
for x in range(9000,10000):
temp = ''
stop = False
i = 0
while(stop != True):
i +=1
temp = temp + str(x*i)
if len(temp) >9:
stop = True
elif len(temp) == 9:
if ispandigital(int(temp)):
if int(temp) > max_concat_prod :
max_concat_prod = int(temp)
print int(temp)
print "the answer is : " + str(max_concat_prod)
| true |
77f8753af2356e7e748d01c09ae626b6e1ab1ee2 | Python | nazaninsbr/Stack | /get_min.py | UTF-8 | 1,003 | 3.890625 | 4 | [] | no_license | class Stack:
def __init__(self):
self.values = []
self.min = ''
def push(self, x):
self.values.append(x)
if self.min=='':
self.min=x
elif self.min > x:
self.min = x
def top(self):
if len(self.values)==0:
return -1
return self.values[-1]
def isEmpty(self):
if len(self.values)==0:
return True
return False
def pop(self):
if not self.isEmpty():
x = self.values[-1]
del self.values[-1]
return x
return -1
def insertAtBottom(self, item):
if self.isEmpty():
self.push(item)
else:
temp = self.pop()
self.insertAtBottom(item)
self.push(temp)
def seeLastElement(self):
tempStack = Stack()
while not self.isEmpty():
x = self.pop()
tempStack.push(x)
returnVal = tempStack.top()
while not tempStack.isEmpty():
x = tempStack.pop()
self.push(x)
return returnVal
def getMin(self):
return self.min
if __name__ == '__main__':
s = Stack()
s.push(12)
s.push(1)
s.push(8)
s.push(122)
s.push(-12)
print(s.getMin()) | true |
67e9e440e0fcc86c47758e57f47cce98b2bc651b | Python | EugeneBudzinskiy/Dino_RL | /NN.py | UTF-8 | 3,478 | 2.578125 | 3 | [] | no_license | from config import *
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.framework.errors_impl import NotFoundError
from CustomException import LoadNNException
class NeuralNetwork:
def __init__(self, input_size, output_size):
self.input_size = input_size
self.output_size = output_size
self.hidden_size_1 = 256
self.hidden_size_2 = 256
inputs = keras.layers.Input(shape=(input_size,))
layer1 = keras.layers.Dense(self.hidden_size_1, activation="relu")(inputs)
layer2 = keras.layers.Dense(self.hidden_size_2, activation="relu")(layer1)
outputs = keras.layers.Dense(self.output_size, activation="linear")(layer2)
self.model = keras.Model(inputs=inputs, outputs=outputs)
self.optimizer = keras.optimizers.Adam(learning_rate=LEARNING_RATE, clipnorm=1.0)
self.loss_function = keras.losses.Huber()
def save_weights(self, file_prefix=500):
path = FILE_PATH + '_' + str(file_prefix)
self.model.save(path)
def load_weights(self):
path = FILE_PATH
try:
self.model = keras.models.load_model(path, compile=False)
except NotFoundError:
raise LoadNNException(path) from None
class Memory:
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.__action_array = None
self.__state_array = None
self.__next_state_array = None
self.__reward_array = None
self.__done_array = None
def __len__(self):
return len(self.__done_array)
def add(self, action, state, next_state, reward, done):
"""Add a new experience to memory."""
if self.__done_array is None:
self.__action_array = np.array([action])
self.__state_array = np.array([state])
self.__next_state_array = np.array([next_state])
self.__reward_array = np.array([reward])
self.__done_array = np.array([done])
else:
if self.__len__() >= self.buffer_size:
self.__action_array = np.delete(self.__action_array, 0, axis=0)
self.__state_array = np.delete(self.__state_array, 0, axis=0)
self.__next_state_array = np.delete(self.__next_state_array, 0, axis=0)
self.__reward_array = np.delete(self.__reward_array, 0, axis=0)
self.__done_array = np.delete(self.__done_array, 0, axis=0)
self.__action_array = np.append(self.__action_array, action)
self.__state_array = np.append(self.__state_array, [state], axis=0)
self.__next_state_array = np.append(self.__next_state_array, [next_state], axis=0)
self.__reward_array = np.append(self.__reward_array, reward)
self.__done_array = np.append(self.__done_array, done)
def sample(self, batch_size):
"""Randomly sample a batch of experiences from memory."""
indices = np.random.choice(self.__len__(), batch_size)
actions = [self.__action_array[x] for x in indices]
states = np.array([self.__state_array[x] for x in indices])
next_states = np.array([self.__next_state_array[x] for x in indices])
rewards = [self.__reward_array[x] for x in indices]
dones = tf.convert_to_tensor(
[float(self.__done_array[x]) for x in indices]
)
return actions, states, next_states, rewards, dones
| true |
6bd56107666055fd8bdcc34bd857586000d255c8 | Python | z-x-z/rl | /src/environments/GridworldEnv.py | UTF-8 | 5,477 | 2.859375 | 3 | [] | no_license | # coding: utf-8
import numpy as np
import sys
from time import sleep
from gym import utils
from gym.envs.toy_text import discrete
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
ACTION_MAP = ["up", "right", "down", "left"]
ARROW_MAP = ["↑", "→", "↓", "←"]
MAPS = {
'5*5': ["OOOOO", "OSOOO", "OOOXO", "OOXGO", "OOOOO"],
'4*4': ['OOOS', 'XOOX', 'OXOO', 'OOGO'],
"6*6": ['OXOOXO', 'OGOXXO', 'OXXXOO', 'OOOOOO', 'OXOOXO', 'OOOOXS'],
"8*6": ['OOOOSO', 'OOOXXX', 'OOOOOO', 'OXOOOO', 'OXOOOO', 'OOXOXX', 'XXOOOO', 'OXOXGO'],
"10*10": [
'OOOOOOOOOO', 'SXOOOOXXXO', 'OOOOOXOOXO', 'OOOXOOXXOX', 'OOXOXOXOOO',\
'OXOXOXXOXX', 'OOOOOXOOOO', 'OXXXOOOOOO', 'XXXOXOXOOO', 'OOXOXOOOOG']
}
REWARD_MAP = {b'O': 0, b'S': 0, b'X': -1, b'G': 1}
class GridworldEnv(discrete.DiscreteEnv):
"""
FrozenLakeEnv1 is a copy environment from GYM toy_text FrozenLake-01
You are an agent on an 4x4 grid and your goal is to reach the terminal
state at the bottom right corner.
For example, a 4x4 grid looks as follows:
S O O O
O X O X
O O O X
X O O G
S : starting point, safe
O : frozen surface, safe
X : hole, fall to your doom
G : goal, where the frisbee is located
The episode ends when you reach the goal or fall in a hole.
You receive a reward of 1 if you reach the goal, and zero otherwise.
You can take actions in each direction (UP=0, RIGHT=1, DOWN=2, LEFT=3).
Actions going off the edge leave you in your current state.
step()
return next_state, reward, done, {'prob': p}
"""
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, desc=None, map_name='4*4'):
self.desc = desc = np.asarray(MAPS[map_name], dtype='c')
self.nrow, self.ncol = nrow, ncol = desc.shape
self.shape = desc.shape
nA = 4 # 动作集个数
nS = np.prod(desc.shape) # 状态集个数 = desc.shape[0] * desc.shape[1] * ...
MAP_ROWS = desc.shape[0]
MAP_COLS = desc.shape[1]
MAP_CELLS = MAP_ROWS * MAP_COLS
# Differ state nums for action.
ACTIONS_DS = [-MAP_COLS, 1, MAP_COLS, -1]
# initial state distribution [ 1. 0. 0. ...]
isd = np.array(desc == b'S').astype('float64').ravel()
isd /= isd.sum()
P = {}
state_grid = np.arange(nS).reshape(self.shape)
it = np.nditer(state_grid, flags=['multi_index'])
def is_done(s):
if (s < 0 or s >= MAP_CELLS):
return True
else:
# / 单纯的出发返回的是浮点数,//返回的则是整数
return desc[s // MAP_COLS][s % MAP_COLS] in b'GX'
def get_reward(s):
if (s < 0 or s >= MAP_CELLS):
return -1
else:
return REWARD_MAP[desc[s // MAP_COLS][s % MAP_COLS]]
while not it.finished:
s = it.iterindex
r, c = it.multi_index
# P[s][a] == [(probability, nextstate, reward, done), ...]
P[s] = {a: [] for a in range(nA)}
s_latter = desc[r][c]
# is_done = lambda letter: letter in b'GX'
# get_char = lambda p_d1: desc[p_d1 // MAP_COLS][p_d1 % MAP_COLS]
if is_done(s):
reward = REWARD_MAP[s_latter]
P[s][UP] = [(1.0, s, reward, True)]
P[s][RIGHT] = [(1.0, s, reward, True)]
P[s][DOWN] = [(1.0, s, reward, True)]
P[s][LEFT] = [(1.0, s, reward, True)]
else:
ns_up = s if r == 0 else s - MAP_COLS
ns_right = s if c == (MAP_COLS - 1) else s + 1
ns_down = s if r == (MAP_ROWS - 1) else s + MAP_COLS
ns_left = s if c == 0 else s - 1
P[s][UP] = [(1.0, ns_up, get_reward(s - MAP_COLS), is_done(s - MAP_COLS))]
P[s][RIGHT] = [(1.0, ns_right, get_reward(s + 1), is_done(s + 1))]
P[s][DOWN] = [(1.0, ns_down, get_reward(s + MAP_COLS), is_done(s + MAP_COLS))]
P[s][LEFT] = [(1.0, ns_left, get_reward(s - 1), is_done(s - 1))]
it.iternext()
self.P = P
super(GridworldEnv, self).__init__(nS, nA, P, isd)
def store_cursor(self):
print("\033[s", end="")
def recover_cursor(self):
print("\033[u", end="")
def animate_render(self, episode_i, sleep_seconds=None):
self.store_cursor()
print("Episode: {}".format(episode_i))
self.render()
if sleep_seconds is not None:
sleep(sleep_seconds)
self.recover_cursor()
def render(self, mode='human', close=False):
if close: # 初始化环境Environment的时候不显示
return
outfile = StringIO() if mode == 'ansi' else sys.stdout
desc = self.desc.tolist()
desc = [[c.decode('utf-8') for c in line] for line in desc]
state_grid = np.arange(self.nS).reshape(self.shape)
it = np.nditer(state_grid, flags=['multi_index'])
while not it.finished:
s = it.iterindex
y, x = it.multi_index
# 对于当前状态用红色标注
if self.s == s:
desc[y][x] = utils.colorize(desc[y][x], "red", highlight=True)
it.iternext()
outfile.write("\n".join(' '.join(line) for line in desc) + "\n")
if mode != 'human':
return outfile
| true |
c33e3859014f0ebb5f176177c35f9ec18b7121ef | Python | afcarl/isa | /code/transforms/radialgaussianization.py | UTF-8 | 4,420 | 2.9375 | 3 | [
"MIT"
] | permissive | __license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
from transform import Transform
from scipy.stats import chi
from scipy.special import gamma, erf, erfinv
from scipy.optimize import bisect
from tools import gammaincinv, logsumexp
from numpy import sqrt, sum, square, multiply, zeros_like, zeros, log
class RadialGaussianization(Transform):
def __init__(self, gsm):
"""
@type gsm: L{GSM}
@param gsm: Gaussian scale mixture used for Gaussianization
"""
self.gsm = gsm
def apply(self, data):
"""
Radially Gaussianizes the given data.
@type data: array_like
@param data: data points stored in columns
"""
def rcdf(norm):
"""
Radial cumulative distribution function (CDF).
"""
# allocate memory
result = zeros_like(norm)
for j in range(self.gsm.num_scales):
result += grcdf(norm / self.gsm.scales[j], self.gsm.dim)
result /= self.gsm.num_scales
result[result > 1.] = 1.
return result
# radially Gaussianize data
norm = sqrt(sum(square(data), 0))
return multiply(igrcdf(rcdf(norm), self.gsm.dim) / norm, data)
def inverse(self, data, max_iter=100):
"""
Applies the inverse transformation to the given set of data points.
@type data: array_like
@param data: data points stored in columns
"""
def rcdf(norm):
"""
Radial cumulative distribution function for real values.
@type norm: float
@param norm: one-dimensional, positive input
"""
return sum(grcdf(norm / self.gsm.scales, self.gsm.dim)) / self.gsm.num_scales
# compute norm
norm = sqrt(sum(square(data), 0))
# normalize data
data = data / norm
# apply Gaussian radial CDF
norm = grcdf(norm, self.gsm.dim)
# apply inverse radial CDF
norm_max = 1.
for t in range(len(norm)):
# make sure root lies between zero and norm_max
while rcdf(norm_max) < norm[t]:
norm_max += 1.
# find root numerically
norm[t] = bisect(
f=lambda x: rcdf(x) - norm[t],
a=0.,
b=norm_max,
maxiter=max_iter,
disp=False)
# inverse radial Gaussianization
data = multiply(norm, data)
return data
def logjacobian(self, data):
"""
Returns the log-determinant of the Jacobian of radial Gaussianization
evaluated at the given data points.
@type data: array_like
@param data: data points stored in columns
@rtype: ndarray
@return: the logarithm of the Jacobian determinants
"""
def rcdf(norm):
"""
Radial cumulative distribution function (CDF).
"""
# allocate memory
result = zeros_like(norm)
for j in range(self.gsm.num_scales):
result += grcdf(norm / self.gsm.scales[j], self.gsm.dim)
result /= self.gsm.num_scales
result[result > 1.] = 1.
return result
def logdrcdf(norm):
"""
Logarithm of the derivative of the radial CDF.
"""
# allocate memory
result = zeros([self.gsm.num_scales, len(norm)])
for j in range(self.gsm.num_scales):
result[j, :] = logdgrcdf(norm / self.gsm.scales[j], self.gsm.dim) - log(self.gsm.scales[j])
result -= log(self.gsm.num_scales)
return logsumexp(result, 0)
# data norm
norm = sqrt(sum(square(data), 0))
# radial gaussianization function applied to the norm
norm_rg = igrcdf(rcdf(norm), self.gsm.dim)
logj = logdrcdf(norm) - \
logdgrcdf(norm_rg, self.gsm.dim) + (self.gsm.dim - 1) * log(norm_rg / norm)
return logj.reshape(1, -1)
def grcdf(norm, dim):
"""
Gaussian radial CDF.
@type norm: array_like
@param norm: norms of the data points
@type dim: integer
@param dim: dimensionality of the Gaussian
"""
if dim < 2:
return erf(norm / sqrt(2.))
else:
return chi.cdf(norm, dim)
def igrcdf(norm, dim):
"""
Inverse Gaussian radial CDF.
@type norm: array_like
@param norm: norms of the data points
@type dim: integer
@param dim: dimensionality of the Gaussian
"""
if dim < 2:
result = erfinv(norm)
result[result > 6.] = 6.
return sqrt(2.) * result
else:
return sqrt(2.) * sqrt(gammaincinv(dim / 2., norm))
def logdgrcdf(norm, dim):
"""
Logarithm of the derivative of the Gaussian radial CDF.
@type norm: array_like
@param norm: norms of the data points
@type dim: integer
@param dim: dimensionality of the Gaussian
"""
tmp = square(norm) / 2.
return (dim / 2. - 1.) * log(tmp) - tmp - log(gamma(dim / 2)) + log(norm)
| true |
c9d08e23d4cb4c0ec32f63e0b5a93775d03b2435 | Python | Sarthakg91/PythonMiniProjects | /Sentiment Analysis/LIWC Analysis of Comments/LIWCMeta.py | UTF-8 | 1,578 | 2.796875 | 3 | [] | no_license |
"""
Created on Fri Mar 01 13:35:34 2016
@author: Sarthak Ghosh
"""
import re
def getLex(post, liwc_lexicons):
liwc_lexicon_to_count = {}
for liwc_lexicon_name in set(liwc_lexicons.keys()):
liwc_lexicon_to_count[liwc_lexicon_name] = 0
for name, items in liwc_lexicons.iteritems():
for item in items:
pattern = re.compile(item)
count = len(pattern.findall(post))
liwc_lexicon_to_count[name] += count
liwc_lexicon_to_count[name] /= float(len(post.split(' ')))
if liwc_lexicon_to_count[name]< 0.0:
print "negative :", post
if name=="positive_affect" and liwc_lexicon_to_count[name] == 1.0:
print "hi: ", post
return liwc_lexicon_to_count
def get_liwc_dictionary():
liwc_lex_dict = {}
liwc_lex_dict["positive_affect"] = get_liwc_lexicons("positive_affect")
liwc_lex_dict["negative_affect"] = get_liwc_lexicons("negative_affect")
liwc_lex_dict["anger"] = get_liwc_lexicons("anger")
liwc_lex_dict["anxiety"] = get_liwc_lexicons("anxiety")
liwc_lex_dict["sadness"] = get_liwc_lexicons("sadness")
liwc_lex_dict["swear"] = get_liwc_lexicons("swear")
return liwc_lex_dict
def get_liwc_lexicons(category):
liwc_lexicons_directory = "LIWC_lexicons"
lexicon = []
lexicon_str = "("
with open("{0}/{1}".format(liwc_lexicons_directory, category), "r") as file_handle:
for line in file_handle:
item = line.strip()
if "*" in item:
item = r"\b{0}\b".format(item.replace("*", ".*?"))
else:
item = r"\b{0}\b".format(item)
lexicon_str += item + "|"
lexicon_str = lexicon_str[:-1] + ")"
lexicon.append(lexicon_str)
return lexicon
| true |
2d84c5dff92041ba4b89ef6415b47e496953378d | Python | Brian-nyabuto12/contacts-app | /contact.py | UTF-8 | 706 | 3.34375 | 3 | [] | no_license | class Contact:
"""
Class that generates new instance of contacts
"""
contact_list =[]
def __init__(self, first_name, last_name, number):
self.first_name= first_name
self.last_name= last_name
self.number= number
def save(self):
self.contact_list.append(self)
@staticmethod
def displays_all_contacts():
"""
displays all the contacts stored in the
contact_list
"""
return Contact.contact_list
@classmethod
def delete_contact(cls, contact):
"""deletes the contact from the list"""
Contact.contact_list.remove(contact)
| true |
990de5c4582a84055d0da79e067ccda0ac45fa56 | Python | ImogenHay/Graphs | /Depth-first Search (traversing graph).py | UTF-8 | 1,819 | 4.03125 | 4 | [] | no_license | ### Depth-first Search using Node Class ###
class Node(object):
# Constructor
def __init__(self, name):
print("A Node has been created")
# Attributes
self.name = name
self.children_amount = 0
self.childList = []
# String Method called on print
def __str__(self):
report = "\nNode Name: " + str(self.name) + "\n"
report = report + "Number of Children: " + str(self.children_amount) + "\n"
return report
# Other Methods
def getChildren(self):
return self.childList
def getName(self):
return self.name
def setName(self,n):
self.name = n
def addChild(self,item):
self.childList.append(item)
self.children_amount = self.children_amount + 1
## Main Code ##
print("\nCreating Nodes")
a = Node("A")
b = Node("B")
c = Node("C")
d = Node("D")
e = Node("E")
f = Node("F")
g = Node("G")
h = Node("H")
j = Node("J")
k = Node("K")
a.addChild(c)
a.addChild(b)
b.addChild(k)
b.addChild(a)
c.addChild(f)
c.addChild(d)
c.addChild(a)
d.addChild(g)
d.addChild(h)
d.addChild(e)
d.addChild(c)
e.addChild(d)
f.addChild(j)
f.addChild(g)
f.addChild(c)
g.addChild(f)
g.addChild(d)
h.addChild(d)
j.addChild(h)
j.addChild(f)
k.addChild(b)
done = []
node = a
print(node)
done.append(node)
x = 1
while x > 0:
if (node.getChildren())[0] in done:
node
x = 0
else:
node = (node.getChildren())[0]
done.append(node)
print(node)
done.remove(done[-2])
x = 1
i = 0
while x > 0:
while (node.getChildren())[int(i)] in done:
i = i + 1
node = (node.getChildren())[int(i)]
done.append(node)
print(node)
x = 0
input("\n\nPress the enter key to exit.")
| true |
e98a22f9f6cdc8439e59ccf64533185e69b36dac | Python | YoshimitsuMatsutaIe/car_ctr | /sotsuron/make_figs.py | UTF-8 | 18,986 | 2.59375 | 3 | [
"MIT"
] | permissive | """グラフ作成ツール"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anm
import matplotlib.patches as patches
import matplotlib.cm as cm
import math
from math import pi, sin, cos, tan
import time
import datetime
#import scipy as sy
import pathlib
from matplotlib.font_manager import FontProperties
fp = FontProperties(fname=r'C:\WINDOWS\Fonts\Arial.ttf', size=30)
def make_youshi_fig(path_conventional, path_proposed, path_obs):
"""要旨に使う図を作成"""
save_path = pathlib.Path(r'D:\control_study_2020\RMPver18\卒論\提出版卒論に使用') # 家
#save_path = pathlib.Path(r'C:\Users\Elis\Documents\制御2020\リーマン制御2020\Exp_Data_scgool\卒論用') # 大学
state_history_con = np.loadtxt(path_conventional, delimiter=',', dtype='float32')
state_history_pro = np.loadtxt(path_proposed, delimiter=',', dtype='float32')
obs_history = np.loadtxt(path_obs, delimiter=',', dtype='float32')
print("データ読み込み完了")
posi_g = np.array([[10, 0.5]])
## グラフ化 ###
# グラフサイズ決定
posi_all = np.concatenate([posi_g, state_history_con[:, 1:3], state_history_pro[:, 1:3]], axis = 0)
fig_xmin = np.amin(posi_all[:, 0:1])
fig_xmax = np.amax(posi_all[:, 0:1])
fig_ymin = np.amin(posi_all[:, 1:2])
fig_ymax = np.amax(posi_all[:, 1:2])
fig_max_length = 13
x_scale = fig_xmax - fig_xmin
y_scale = fig_ymax - fig_ymin
if x_scale >= y_scale:
fig_W = fig_max_length
fig_L = fig_max_length * y_scale / x_scale
else:
fig_W = fig_max_length * x_scale / y_scale
fig_L = fig_max_length
fig_W = 8
fig_L = 8
obs_init = obs_history[0:1, :].reshape(int(obs_history[0:1, :].size/2), 2)
print("グラフ化中...")
##軌跡(グラデーション付き)
fig_Tra, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (fig_W, fig_L))
ax.scatter(posi_g[0, 0], posi_g[0, 1],
s = 350, label = 'goal point', marker = '*', color = '#ff7f00', alpha = 1, linewidths = 1.5, edgecolors = 'red')
ax.scatter(obs_init[:, 0], obs_init[:, 1],
s = 200, label = 'obstacle points', marker = '+', color = 'k', alpha = 1, linewidths = 1)
ax.scatter(0, 0,
s = 100, label = 'start point', marker = ',', color = 'k', alpha = 1)
## グラデーション付き
#v_max = np.amax(np.concatenate([state_history_con[:, 7], state_history_pro[:, 7]], axis = 0))
#v_min = np.amin(np.concatenate([state_history_con[:, 7], state_history_pro[:, 7]], axis = 0))
#for i in np.arange(0,637, 1):
# ax.plot(state_history_con[i:i+2, 1].T,
# state_history_con[i:i+2, 2].T,
# color = cm.winter((state_history_con[i, 7] - v_min) / (v_max - v_min)),
# markersize = 1.5)
# ax.plot(state_history_pro[i:i+2, 1].T,
# state_history_pro[i:i+2, 2].T,
# color = cm.winter((state_history_pro[i, 7] - v_min) / (v_max - v_min)),
# markersize = 1.5)
# グラデーション無し
ax.plot(state_history_con[:, 1].T,
state_history_con[:, 2].T,
label = 'Conventional', linestyle = 'dashed', color = 'k', linewidth = 2.0)
ax.plot(state_history_pro[:, 1].T,
state_history_pro[:, 2].T,
label = 'Proposed', linestyle = 'solid', color = 'k', linewidth = 2.0)
ax.set_xlabel('$\it{X(m)}$', fontsize = 10)
ax.set_ylabel('$\it{Y(m)}$', fontsize = 10)
ax.grid(True)
## データより自動で決定
#ax.set_xlim(fig_xmin - 1, fig_xmax + 1)
#ax.set_ylim(fig_ymin - 1, fig_ymax + 1)
# 手動
ax.set_xlim(-1, 12)
ax.set_ylim(-1.7, 4.3)
ax.legend(loc='best', fontsize = 10)
ax.set_aspect('equal')
fig_Tra_name = "重ね合わせ動的.png"
fig_Tra.savefig(save_path / fig_Tra_name)
plt.show()
def make_soturon_fig():
"""卒論の実験結果グラフを作成"""
#save_path = pathlib.Path(r'D:\control2020\RMP_exp\Exp_data_home\2021_01_27') # 家
save_path = pathlib.Path(r'C:\Users\Elis\Documents\制御2020\リーマン制御2020\Exp_Data_scgool\卒論用') # 大学
path_conventional = r'C:\Users\Elis\Documents\制御2020\リーマン制御2020\Exp_Data_scgool\卒論用\succes__提案手法ためし動的2021-02-15--06-56-55__State_history_temp.csv'
path_obs = r'C:\Users\Elis\Documents\制御2020\リーマン制御2020\Exp_Data_scgool\卒論用\succes__従来手法ためし静的2021-02-15--06-19-55__obs_history_temp.csv'
path_proposed = r'C:\Users\Elis\Documents\制御2020\リーマン制御2020\Exp_Data_scgool\卒論用\succes__従来手法ためし静的2021-02-15--06-19-55__State_history_temp.csv'
state_history_con = np.loadtxt(path_conventional, delimiter=',', dtype='float32')
state_history_pro = np.loadtxt(path_proposed, delimiter=',', dtype='float32')
obs_history = np.loadtxt(path_obs, delimiter=',', dtype='float32')
print("データ読み込み完了")
posi_g = np.array([[10, 0.5]])
## グラフ化 ###
# グラフサイズ決定
#fig_xmin = -1
#fig_xmax = 11
#fig_ymin = -2
#fig_ymax = 6
#fig_max_length = 13
#x_scale = fig_xmax - fig_xmin
#y_scale = fig_ymax - fig_ymin
#if x_scale >= y_scale:
# fig_W = fig_max_length
# fig_L = fig_max_length * y_scale / x_scale
#else:
# fig_W = fig_max_length * x_scale / y_scale
# fig_L = fig_max_length
fig_W = 15
fig_L = 10
obs_init = obs_history[9:10, :].reshape(int(obs_history[9:10, :].size/2), 2)
print("グラフ化中...")
##軌跡(グラデーション付き)
plt.rcParams["font.size"] = 30
fig_Tra, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (fig_W, fig_L))
ax.scatter(posi_g[0, 0], posi_g[0, 1],
s = 800, label = 'goal point', marker = '*', color = '#ff7f00', alpha = 1, linewidths = 1.5, edgecolors = 'red')
ax.scatter(obs_init[:, 0], obs_init[:, 1],
s = 400, label = 'obstacle points', marker = '+', color = 'k', alpha = 1, linewidths = 1)
ax.scatter(0, 0,
s = 200, label = 'start point', marker = ',', color = 'k', alpha = 1)
#ax.scatter(posi_g[0, 0], posi_g[0, 1],
# label = 'goal point', marker = '*', color = '#ff7f00', alpha = 1, linewidths = 1.5, edgecolors = 'red')
#ax.scatter(obs_init[:, 0], obs_init[:, 1],
# label = 'obstacle points', marker = '+', color = 'k', alpha = 1, linewidths = 1)
#ax.scatter(0, 0,
# label = 'start point', marker = ',', color = 'k', alpha = 1)
## グラデーション付き
#v_max = np.amax(np.concatenate([state_history_con[:, 7], state_history_pro[:, 7]], axis = 0))
#v_min = np.amin(np.concatenate([state_history_con[:, 7], state_history_pro[:, 7]], axis = 0))
#for i in np.arange(0, state_history_con.shape[0], 1):
# ax.plot(state_history_con[i:i+2, 1].T,
# state_history_con[i:i+2, 2].T,
# color = cm.winter((state_history_con[i, 7] - v_min) / (v_max - v_min)),
# linewidth = 5)
#ax.plot(state_history_pro[i:i+2, 1].T,
# state_history_pro[i:i+2, 2].T,
# color = cm.winter((state_history_pro[i, 7] - v_min) / (v_max - v_min)),
# markersize = 1.5)
## グラデーション無し
#ax.plot(state_history_con[:, 1].T,
# state_history_con[:, 2].T,
# label = 'Conventional', linestyle = 'dashed', color = 'k', linewidth = 2.0)
#ax.plot(state_history_pro[:, 1].T,
# state_history_pro[:, 2].T,
# label = 'Proposed', linestyle = 'solid', color = 'k', linewidth = 2.0)
#ax.set_xlabel('$\it{X}$(m)', fontsize = 30)
#ax.set_ylabel('$\it{Y}$(m)', fontsize = 30)
ax.set_xlabel('$\it{X}$(m)')
ax.set_ylabel('$\it{Y}$(m)')
ax.grid(True)
## データより自動で決定
#ax.set_xlim(fig_xmin - 1, fig_xmax + 1)
#ax.set_ylim(fig_ymin - 1, fig_ymax + 1)
# 手動
ax.set_xlim(-1, 12)
ax.set_ylim(-2, 6)
#ax.legend(loc='best', fontsize = 30)
ax.legend(loc='best')
ax.set_aspect('equal')
fig_Tra_name = "実験環境.png"
fig_Tra.savefig(save_path / fig_Tra_name)
plt.show()
def make_soturon_fig_iroiro():
"""卒論の実験結果グラフ(状態,入力)を作成"""
#save_path = pathlib.Path(r'D:\control2020\RMP_exp\Exp_data_home\2021_01_27') # 家
save_path = pathlib.Path(r'C:\Users\Elis\Documents\制御2020\リーマン制御2020\Exp_Data_scgool\卒論用') # 大学
#path = r'C:\Users\Elis\Documents\制御2020\リーマン制御2020\Exp_Data_scgool\卒論用\succes__提案手法ためし動的2021-02-15--06-56-55__State_history_temp.csv'
#path = r'C:\Users\Elis\Documents\制御2020\リーマン制御2020\Exp_Data_scgool\卒論用\timeover__提案手法ためし静的2021-02-15--06-24-43__State_history_temp.csv'
#path = r'C:\Users\Elis\Documents\制御2020\リーマン制御2020\Exp_Data_scgool\卒論用\collision__従来手法ためし動的2021-02-15--06-55-17__State_history_temp.csv'
path = r'C:\Users\Elis\Documents\制御2020\リーマン制御2020\Exp_Data_scgool\卒論用\succes__従来手法ためし静的2021-02-15--06-19-55__State_history_temp.csv'
state_history = np.loadtxt(path, delimiter=',', dtype='float32')
print(state_history.shape)
tend = state_history[state_history.shape[0]-1, 0]
print("データ読み込み完了")
fig_W = 20
fig_L = 13
print("グラフ化中...")
comment = "従来静的"
plt.rcParams["font.size"] = 30
fig_x, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (fig_W, fig_L))
ax.plot(state_history[:, 0], state_history[:, 1], linewidth = 5)
ax.set_xlabel('Time $\it{t}$ [s]')
ax.set_ylabel('Position $\it{x}$ [m]')
ax.grid(True)
ax.set_xlim(0, tend)
#ax.set_ylim(-2, 6)
fig_x_name = comment + "x.png"
fig_x.savefig(save_path / fig_x_name)
fig_y, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (fig_W, fig_L))
ax.plot(state_history[:, 0], state_history[:, 2], linewidth = 5)
ax.set_xlabel('Time $\it{t}$ [s]')
ax.set_ylabel('Position $\it{y}$ [m]')
ax.grid(True)
ax.set_xlim(0, tend)
#ax.set_ylim(-2, 6)
fig_y_name = comment + "y.png"
fig_y.savefig(save_path / fig_y_name)
fig_theta, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (fig_W, fig_L))
ax.plot(state_history[:, 0], state_history[:, 3], linewidth = 5)
ax.set_xlabel('Time $\it{t}$ [s]')
ax.set_ylabel(r'Posture $\theta$ [rad]')
ax.grid(True)
ax.set_xlim(0, tend)
#ax.set_ylim(-pi, pi)
fig_theta_name = comment + "theta.png"
fig_theta.savefig(save_path / fig_theta_name)
fig_dxdt, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (fig_W, fig_L))
ax.plot(state_history[:, 0], state_history[:, 4], linewidth = 5)
ax.set_xlabel('Time $\it{t}$ [s]')
ax.set_ylabel('Velocity-x $\.{x}$ [m/s]')
ax.grid(True)
ax.set_xlim(0, tend)
#ax.set_ylim(-2, 6)
fig_dxdt_name = comment + "dxdt.png"
fig_dxdt.savefig(save_path / fig_dxdt_name)
fig_dydt, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (fig_W, fig_L))
ax.plot(state_history[:, 0], state_history[:, 5], linewidth = 5)
ax.set_xlabel('Time $\it{t}$ [s]')
ax.set_ylabel('Velocity-y $\.{y}$ [m/s]')
ax.grid(True)
ax.set_xlim(0, tend)
#ax.set_ylim(-2, 6)
fig_dydt_name = comment + "dydt.png"
fig_dydt.savefig(save_path / fig_dydt_name)
fig_dthetadt, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (fig_W, fig_L))
ax.plot(state_history[:, 0], state_history[:, 6], linewidth = 5)
ax.set_xlabel('Time $\it{t}$ [s]')
ax.set_ylabel(r'Angular Velocity $\.{θ}$ [rad/s]')
ax.grid(True)
ax.set_xlim(0, tend)
#ax.set_ylim(-2, 6)
fig_dthetadt_name = comment + "dthetadt.png"
fig_dthetadt.savefig(save_path / fig_dthetadt_name)
fig_v, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (fig_W, fig_L))
ax.plot(state_history[:, 0], state_history[:, 7], linewidth = 5)
ax.set_xlabel('Time $\it{t}$ [s]')
ax.set_ylabel('Forward Velocity $\it{v}$ [m/s]')
ax.grid(True)
ax.set_xlim(0, tend)
#ax.set_ylim(-2, 6)
fig_v_name = comment + "v.png"
fig_v.savefig(save_path / fig_v_name)
fig_xi, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (fig_W, fig_L))
ax.plot(state_history[:, 0], state_history[:, 8], linewidth = 5)
ax.set_xlabel('Time $\it{t}$ [s]')
ax.set_ylabel(r'Steering Angle $\it{ξ}$ [rad]')
ax.grid(True)
ax.set_xlim(0, tend)
#ax.set_ylim(-2, 6)
fig_xi_name = comment + "xi.png"
fig_xi.savefig(save_path / fig_xi_name)
fig_dvdt, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (fig_W, fig_L))
ax.plot(state_history[:, 0], state_history[:, 10], linewidth = 5)
ax.set_xlabel('Time $\it{t}$ [s]')
ax.set_ylabel(r'Forward Acceleration $\.{v} [m/s^{2}]$')
ax.grid(True)
ax.set_xlim(0, tend)
#ax.set_ylim(-2, 6)
fig_dvdt_name = comment + "dvdt.png"
fig_dvdt.savefig(save_path / fig_dvdt_name)
fig_dxidt, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (fig_W, fig_L))
ax.plot(state_history[:, 0], state_history[:, 11], linewidth = 5)
ax.set_xlabel('Time $\it{t}$ [s]')
ax.set_ylabel(r'Steering Angular Velocity $\.{ξ}$ [rad/s]')
ax.grid(True)
ax.set_xlim(0, tend)
#ax.set_ylim(-2, 6)
fig_dxidt_name = comment + "dxidt.png"
fig_dxidt.savefig(save_path / fig_dxidt_name)
def make_soturon_gif():
save_path = pathlib.Path(r'D:\control_study_2020\RMPver18\卒論\提出版卒論に使用') # 家
#save_path = pathlib.Path(r'C:\Users\Elis\Documents\制御2020\リーマン制御2020\Exp_Data_scgool\卒論用') # 大学
path_conventional = r'D:\control_study_2020\RMPver18\卒論\提出版卒論に使用\collision__従来手法ためし動的2021-02-15--06-55-17__State_history_temp.csv'
path_obs = r'D:\control_study_2020\RMPver18\卒論\提出版卒論に使用\succes__提案手法ためし動的2021-02-15--06-56-55__obs_history_temp.csv'
path_proposed = r'D:\control_study_2020\RMPver18\卒論\提出版卒論に使用\succes__提案手法ためし動的2021-02-15--06-56-55__State_history_temp.csv'
state_history_con = np.loadtxt(path_conventional, delimiter=',', dtype='float32')
state_history_pro = np.loadtxt(path_proposed, delimiter=',', dtype='float32')
obs_history = np.loadtxt(path_obs, delimiter=',', dtype='float32')
print("データ読み込み完了")
posi_g = np.array([[10, 0.5]])
fig_W = 15
fig_L = 10
obs_init = obs_history[9:10, :].reshape(int(obs_history[9:10, :].size/2), 2)
print("グラフ化中...")
plt.rcParams["font.size"] = 30
fig_ani = plt.figure(figsize = (fig_W, fig_L))
ax = fig_ani.add_subplot(111, xlim=(-1, 12), ylim=(-2, 4))
ax.set_aspect('equal')
ax.grid(True)
ax.set_xlabel('X [m]')
ax.set_ylabel('Y [m]')
#ax.legend("best")
ax.scatter(posi_g[0,0], posi_g[0,1],
s = 300, label = 'goal point', marker = '*', color = '#ff7f00', alpha = 1, linewidths = 1.5, edgecolors = 'red')
center, = ax.plot([], [], '-m', lw=2) # 中心軌跡
# 以下制御点
cpoint0, = ax.plot([], [], '.-.r', lw=3) # 鼻先
cpoint1, = ax.plot([], [], '.-.k', lw=1)
cpoint2, = ax.plot([], [], '.-.k', lw=1)
cpoint3, = ax.plot([], [], '.-.k', lw=1)
cpoint4, = ax.plot([], [], '.-.k', lw=1)
cpoint5, = ax.plot([], [], '.-.k', lw=1)
cpoint6, = ax.plot([], [], '.-.k', lw=1)
cpoint7, = ax.plot([], [], '.-.k', lw=1)
cpoint8, = ax.plot([], [], '.-.k', lw=1)
cpoint9, = ax.plot([], [], '.-.k', lw=1)
cpoint10, = ax.plot([], [], '.-.k', lw=1)
cpoint11, = ax.plot([], [], '.-.k', lw=1)
cpoints = [cpoint0, cpoint1, cpoint2, cpoint3, cpoint4, cpoint5, cpoint6, cpoint7, cpoint8, cpoint9, cpoint10, cpoint11] # リスト化
c2enter, = ax.plot([], [], '-b', lw=2) # 中心軌跡
# 以下制御点
c2point0, = ax.plot([], [], '.-.r', lw=3) # 鼻先
c2point1, = ax.plot([], [], '.-.k', lw=1)
c2point2, = ax.plot([], [], '.-.k', lw=1)
c2point3, = ax.plot([], [], '.-.k', lw=1)
c2point4, = ax.plot([], [], '.-.k', lw=1)
c2point5, = ax.plot([], [], '.-.k', lw=1)
c2point6, = ax.plot([], [], '.-.k', lw=1)
c2point7, = ax.plot([], [], '.-.k', lw=1)
c2point8, = ax.plot([], [], '.-.k', lw=1)
c2point9, = ax.plot([], [], '.-.k', lw=1)
c2point10, = ax.plot([], [], '.-.k', lw=1)
c2point11, = ax.plot([], [], '.-.k', lw=1)
c2points = [c2point0, c2point1, c2point2, c2point3, c2point4, c2point5, c2point6, c2point7, c2point8, c2point9, c2point10, c2point11] # リスト化
od, = ax.plot([], [], '+k', lw = 3) # 移動障害物
# 時刻表示
time_template = 'time = %s' + '[s]'
time_text = ax.text(0.05, 0.9, '', transform = ax.transAxes)
def animate_exist_move_obs(i):
"""アニメーションの関数(動的障害物あり)"""
center.set_data(state_history_con[0:i, 1].T, state_history_con[0:i, 2].T)
c2enter.set_data(state_history_pro[0:i, 1].T, state_history_pro[0:i, 2].T)
obs_history_u = obs_history[i:i+1, :].reshape(int(obs_history[i:i+1, :].size/2), 2)
od.set_data(obs_history_u[:, 0], obs_history_u[:, 1])
for k in np.arange(0, 12, 1): # 制御点毎に位置を描写
cpoints[k].set_data(state_history_con[i:i+1, 12 + 2 * k], state_history_con[i:i+1, 13 + 2 * k])
c2points[k].set_data(state_history_pro[i:i+1, 12 + 2 * k], state_history_pro[i:i+1, 13 + 2 * k])
time_text.set_text(time_template % (i * 0.1))
return center, cpoints[0], cpoints[1], cpoints[2], cpoints[3], cpoints[4], cpoints[5], cpoints[6], cpoints[7], cpoints[8], cpoints[9], cpoints[10], cpoints[11], od, time_text,\
c2enter, c2points[0], c2points[1], c2points[2], c2points[3], c2points[4], c2points[5], c2points[6], c2points[7], c2points[8], c2points[9], c2points[10], c2points[11]
ani = anm.FuncAnimation(fig = fig_ani,
func = animate_exist_move_obs,
frames = np.arange(0, 790, 10), # 枚数.一枚毎にするとクソ遅い.
init_func = None,
fargs = None,
interval = 0.1 * 10e-3, # コマ送り間隔[ms]
blit = True)
ani_name = "スライド用.gif"
ani.save(filename = save_path / ani_name,
fps = 1 / 0.1,
writer='pillow')
print("アニメ化完了")
plt.show()
| true |
634d13b1e5f356d670ed50f6ce3c41b3443904af | Python | pypr-2021/w02-workshop | /fibonacci.py | UTF-8 | 532 | 3.859375 | 4 | [] | no_license | # Task 1: Generalised Fibonacci
# Define your function here:
# Tests
print(p_q_fibo(1, 1, 15))
# Expected result: [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
print(p_q_fibo(1, 3, 15))
# Expected result: [1, 1, 4, 7, 19, 40, 97, 217, 508, 1159, 2683, 6160, 14209, 32689, 75316]
print(p_q_fibo(2, 2, 15))
# Expected result: [1, 1, 4, 10, 28, 76, 208, 568, 1552, 4240, 11584, 31648, 86464, 236224, 645376]
print(p_q_fibo(6, 4, 10))
# Expected result: [1, 1, 10, 64, 424, 2800, 18496, 122176, 807040, 5330944]
| true |
15a3d9071fb5dae0e0ce26f170034092d564a1cb | Python | Gencid/practice-python | /22 Read From File.py | UTF-8 | 915 | 3.5625 | 4 | [] | no_license | """https://www.practicepython.org/exercise/2014/12/06/22-read-from-file.html"""
import re
dictnames = dict()
textnames = open("nameslist.txt", "r")
for linenames in textnames:
names = linenames.split()
for name in names:
if name not in dictnames:
dictnames[name] = 1
else:
dictnames[name] += 1
for itemnames in dictnames:
print(itemnames, "is repeated", dictnames[itemnames], "times.")
dictsun = dict()
sunnyplace = ""
sunnynumber = 0
textsun = open("Training_01.txt", "r")
for linesun in textsun:
if re.search("sun_", linesun):
sunnynumber = linesun.find("sun_")
sunnyplace = (linesun[:sunnynumber])
if sunnyplace not in dictsun:
dictsun[sunnyplace] = 1
else:
dictsun[sunnyplace] += 1
for itemsun in dictsun:
print(itemsun, "is repeated", dictsun[itemsun], "times.")
| true |
83d1c56690f91007533132ef5868e63fc4fea139 | Python | natachabourg/Bluebottles | /observation_data/analysis_observation_data/analysis_sydney_obs.py | UTF-8 | 29,927 | 2.578125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Thu June 06 12:10:20 2019
@author : Natacha
"""
import datetime
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import pandas as pd
import numpy as np
import math
import glob
from windrose import plot_windrose
class time:
def __init__(self, day, month, year):
self.day = day
self.month = month
self.year = year
def jan_to_01(self):
list_name=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
list_numbers=['1','2','3','4','5','6','7','8','9','10','11','12']
for i in range(len(list_name)):
if self.month==list_name[i]:
self.month=list_numbers[i]
def GetDateSomeLikelyNone(beach_nb,bluebottle_nb):
date_number = []
for j in range(len(date[beach_nb])):
if bluebottles[beach_nb][j]==bluebottle_nb:
date_number.append(date[beach_nb][j])
return date_number
def DayEqual(object1, object2):
if object1.day==object2.day and object1.month==object2.month and object1.year==object2.year:
return True
else:
return False
def BoxPlot(nb,date_plot,BOMdaily):
"""
Box plot pour la plage numero nb de wind direction pour les 3 cas : none likely observed
"""
location=['Clovelly','Coogee','Maroubra']
wind_direction_box0=[]
wind_direction_box1=[]
wind_direction_box2=[]
one_day = datetime.timedelta(days=1)
for i in range(len(date_box[nb][0])):
for j in range(len(date_plot)):
if date_box[nb][0][i]==(date_plot[j]+one_day):
if np.isnan(BOMdaily[j])==False:
wind_direction_box0.append(BOMdaily[j])
for i in range(len(date_box[nb][1])):
for j in range(len(date_plot)):
if date_box[nb][1][i]==(date_plot[j]+one_day):
if np.isnan(BOMdaily[j])==False:
wind_direction_box1.append(BOMdaily[j])
for i in range(len(date_box[nb][2])):
for j in range(len(date_plot)):
if date_box[nb][2][i]==(date_plot[j]+one_day):
if np.isnan(BOMdaily[j])==False:
wind_direction_box2.append(BOMdaily[j])
x=[wind_direction_box0, wind_direction_box1, wind_direction_box2]
fig = plt.figure(figsize=(12,9))
plt.title(location[nb]+" 1 day before")
plt.ylabel('Wind direction (degrees)')
plt.boxplot(x,whis=[5,95])
plt.xticks([1,2,3],['None','Likely','Some'])
# fig.savefig("../outputs_observation_data/sydney_obs/box_plots/oneday_"+str(location[nb])+".png",dpi=300)
def GetVariables(filename):
"""
Return date, water temp, #of bluebottles of a file
"""
date, datee, water_temp, bluebottles, description, wave_height = [], [], [], [], [], []
for i in range(0,len(filename)):
day=''
month=''
year=''
date.append(str(filename.Name[i][:-12]))
for j in range(0,2):
if(date[i][j]!='/'):
day+=date[i][j]
for j in range(2,len(date[i])-4):
if(date[i][j]!='/'):
month+=date[i][j]
for j in range(len(date[i])-4,len(date[i])):
if(date[i][j]!='/'):
year+=date[i][j]
if filename.Water_temp[i]!=14: #dont take values for water_temp=14C
datee.append(time(str(day),str(month),str(year)))
water_temp.append(filename.Water_temp[i])
description.append(filename.Description[i])
wave_height.append(filename.Wave_height[i])
if filename.Bluebottles[i]=='none':
bluebottles.append(0.)
elif filename.Bluebottles[i]=='some' or filename.Bluebottles[i]=='many':
bluebottles.append(1.)
# elif filename.Bluebottles[i]=='many':
# bluebottles.append(2.)
elif filename.Bluebottles[i]=='likely':
bluebottles.append(0.5)
middle_date = []
final_date, final_water_temp, final_bluebottles, final_description, final_wave_height = [], [], [], [], []
for l in range(len(datee)):
middle_date.append(datetime.date(int(datee[l].year), int(datee[l].month), int(datee[l].day)))
final_date.append(middle_date[0])
final_water_temp.append(water_temp[0])
final_bluebottles.append(bluebottles[0])
final_description.append(description[0])
final_wave_height.append(wave_height[0])
for l in range(1,len(middle_date)):
if middle_date[l]!=middle_date[l-1]: #to only have one value per day
final_date.append(middle_date[l])
final_water_temp.append(water_temp[l])
final_bluebottles.append(bluebottles[l])
final_description.append(description[l])
final_wave_height.append(wave_height[l])
return final_date, final_water_temp, final_bluebottles, final_description, final_wave_height
files_name = glob.glob('../raw_observation_data/bluebottle_lifeguard_reports/*2.xlsx') #0Clovelly 1Coogee 2Maroubra
beach=[]
date_bb=[0,1,2]
date=[0,1,2]
water_temp=[0,1,2]
bluebottles=[0,1,2]
description=[0,1,2]
wave_height=[0,1,2]
date_box=[0,1,2]
for i in range(0,len(files_name)):
beach.append(pd.read_excel(files_name[i]))
for i in range(0,len(water_temp)):
date_bb[i], water_temp[i], bluebottles[i], description[i], wave_height[i] = GetVariables(beach[i])
date[0]=date_bb[0]
date[1]=date_bb[1][:1036] #delete data before 05/2016
date[2]=date_bb[2][:1025] #delete data before 05/2016
water_temp[1]=water_temp[1][:1036]
water_temp[2]=water_temp[2][:1025] #delete data before 05/2016
bluebottles[1]=bluebottles[1][:1036]
bluebottles[2]=bluebottles[2][:1025]
description[1]=description[1][:1036]
description[2]=description[2][:1025]
wave_height[1]=wave_height[1][:1036]
wave_height[2]=wave_height[2][:1025]
for i in range(0,len(water_temp)):
date_box[i]=[GetDateSomeLikelyNone(i,0.),GetDateSomeLikelyNone(i,0.5),GetDateSomeLikelyNone(i,1.)]
for beachnb in (0,1,2):
for i in range(len(wave_height[beachnb])):
if wave_height[beachnb][i][0]=='b':
wave_height[beachnb][i]=0
elif wave_height[beachnb][i][1]=='o':
wave_height[beachnb][i]=0.5
elif wave_height[beachnb][i][-1]=='l':
wave_height[beachnb][i]=1
elif wave_height[beachnb][i][4]=='p':
wave_height[beachnb][i]=1.5
elif wave_height[beachnb][i][0]=='t':
wave_height[beachnb][i]=2
elif wave_height[beachnb][i][1]=='l':
wave_height[beachnb][i]=3
def nonans(array):
'''
author : Dr. Schaeffer
Return input array [1D numpy array] with
all nan values removed
'''
return array[~np.isnan(array)]
def pol2cart(rho, phi):
"""
author : Dr. Schaeffer
"""
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
def cart2pol(x, y):
"""
author : Dr. Schaeffer
"""
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def GetU(speed,direction):
wind_dir_deg=(90-direction+180)
wind_u = - speed * np.sin(np.pi / 180 * wind_dir_deg)
return wind_u
def GetV(speed,direction):
wind_dir_deg=(90-direction+180)
wind_v = - speed * np.cos(np.pi / 180 * wind_dir_deg)
return wind_v
def GetData(file):
date=[]
time =[]
minutes=file.MI_local_time
hours=file.HH24
day=file.DD
month=file.MM
year=file.YYYY
speed=file.Wind_speed_ms
direction=file.Wind_direction_degrees
gust_speed=file.Windgust_speed_ms
for i in range(len(file)):
date.append(datetime.date(int(year[i]),int(month[i]),int(day[i])))
time.append(date[i].toordinal() + hours[i]/24 + minutes[i]/(24*60))
return np.asarray(date), np.asarray(time), np.asarray(speed), np.asarray(direction), np.asarray(gust_speed)
def PolarPlot(nb,direction,speed):
blueb=[]
daily=[]
markersize=[]
marker=[]
list_marker=["s","s","D","D","D","o","o","o","^","^","^","s"]
fig=plt.figure(figsize=(12,9))
location=['Clovelly','Coogee','Maroubra']
for i in range(len(direction)): #start in 2017
markersize.append(speed[i]*speed[i])
marker.append(list_marker[int(date_obs[i].month-1)])
for j in range(len(date[nb])):
if (date_obs[i]+datetime.timedelta(days=1))==date[nb][j]:
daily.append(direction[i]*np.pi/180)
if bluebottles[nb][j]==0.:
blueb.append('hotpink')
elif bluebottles[nb][j]==0.5:
blueb.append('palegreen')
elif bluebottles[nb][j]==1.:
blueb.append('dodgerblue')
ax = plt.subplot(111, projection='polar')
theta = daily
r=8.*np.random.rand(len(daily))+1
colors = blueb
markz=marker
size=markersize
for i in range(len(theta)):
ax.scatter(theta[i], r[i], c=colors[i], cmap='hsv', alpha=0.75,s=size[i],marker=markz[i])
ax.set_rorigin(-2.5)
ax.set_theta_zero_location('W', offset=10)
plt.title("Daily averaged wind direction at "+str(location[nb]))
legend_elements = [Line2D([0],[0],marker='s',label='Summer', color='w',markerfacecolor='dodgerblue', markersize=10),
Line2D([0],[0],marker='D',label='Autumn', color='w',markerfacecolor='dodgerblue', markersize=10),
Line2D([0],[0],marker='o',label='Winter', color='w',markerfacecolor='dodgerblue', markersize=10),
Line2D([0],[0],marker='^',label='Spring', color='w',markerfacecolor='dodgerblue', markersize=10)]
legend_elements_two = [Patch(facecolor='hotpink', edgecolor='hotpink',label='None'),
Patch(facecolor='palegreen', edgecolor='palegreen',label='Likely'),
Patch(facecolor='dodgerblue', edgecolor='dodgerblue',label='Observed')]
legend1=plt.legend(handles=legend_elements, loc='lower right')
legend2=plt.legend(handles=legend_elements_two, loc='upper right')
ax.add_artist(legend1)
ax.add_artist(legend2)
plt.show()
# fig.savefig("../outputs_observation_data/sydney_obs/daily_averaged/polar_plot_"+str(location[nb])+"_pastday.png",dpi=300)
def RosePlot(beachnb,bluebnb,date_obs,direction_obs,speed_obs):
"""
returns a rose plot of the wind for the past day for the beach beachnb
and for the bluebottle scenario bluenb (0:none, 1:likely, 2:some)
"""
location=['Clovelly','Coogee','Maroubra']
blueb=['none','likely','some']
wind_speed=[]
wind_direction=[]
one_day = datetime.timedelta(days=1)
for i in range(len(date_obs)):
for j in range(len(date_box[beachnb][bluebnb])):
if (date_obs[i]+one_day)==date_box[beachnb][bluebnb][j]:
wind_speed.append(speed_obs[i])
wind_direction.append(direction_obs[i])
df = pd.DataFrame({"speed": wind_speed, "direction": wind_direction})
bins = np.arange(0.01, 24, 4)
kind = "bar"
# fig=plt.figure()
plot_windrose(df, kind=kind, normed=True, opening=0.8, edgecolor="white",bins=bins)
plt.title("Daily averaged wind direction 1 day before at "+str(location[beachnb])+" "+str(blueb[bluebnb]))
plt.legend('wind speed (m/s)')
# fig2=plt.figure()
# plt.hist(wind_direction,bins_new)
plt.savefig("../outputs_observation_data/sydney_obs/daily_averaged/rose"+str(location[beachnb])+"_"+str(blueb[bluebnb])+"_pastday.png",dpi=300)
def TimeSeriesPlot():
color=np.zeros(len(date_obs))
for j in range(len(date_box[2][2])):
for i in range(len(date_obs)):
if date_obs[i]==date_box[2][2][j]:
color[i]=1
fig=plt.figure()
plt.subplot(511)
plt.plot(date_obs,color)
plt.ylabel('1 : Bluebottles')
plt.subplot(512)
plt.plot(date_obs,wind_direction_daily)
plt.ylabel('daily averaged direction')
plt.subplot(513)
plt.plot(date_obs,wind_speed_daily)
plt.ylabel('daily averaged speed')
plt.subplot(514)
plt.plot(date_obs,u_daily)
plt.ylabel('daily averaged U')
plt.subplot(515)
plt.plot(date_obs,v_daily)
plt.ylabel('daily averaged V')
plt.show()
# fig.savefig("../outputs_observation_data/sydney_obs/timeseries_5.png",dpi=300)
file_name = '../raw_observation_data/wind_kurnell_sydney_observatory/wind_66043_local_time.csv'
filename=pd.read_csv(file_name)
df = filename.apply(pd.to_numeric, args=('coerce',)) # inserts NaNs where empty cell!!! grrrr
date_obs_full, time_obs, speed_obs, direction_obs, gust_speed=GetData(df)
date_obs_full=date_obs_full[276838:] #take data from 2016
date_obs=list(dict.fromkeys(date_obs_full)) #remove repetition
time_obs=time_obs[276838:]
speed_obs=speed_obs[276838:]
direction_obs=direction_obs[276838:]
gust_speed=gust_speed[276838:]#[450500:] = forsydney obs
u_obs=GetU(speed_obs,direction_obs)
v_obs=GetV(speed_obs,direction_obs)
def ToOceano(meteo_direction):
oceano_direction=np.zeros(len(meteo_direction))
for i in range(0,len(meteo_direction)):
if meteo_direction[i]<=270:
oceano_direction[i]=270-meteo_direction[i]
else:
oceano_direction[i]=360+270-meteo_direction[i]
return oceano_direction
def ToMeteo(oceano_direction):
meteo_direction=np.zeros(len(oceano_direction))
for i in range(0,len(oceano_direction)):
if oceano_direction[i]<=270:
meteo_direction[i]=270-oceano_direction[i]
else:
meteo_direction[i]=360+270-oceano_direction[i]
return meteo_direction
def ToNormal(from_u_direction):
"""
from -180;+180 to 0;360
"""
normal_direction=np.zeros(len(from_u_direction))
for i in range(0,len(from_u_direction)):
if from_u_direction[i]<0:
normal_direction[i]=360+from_u_direction[i]
else:
normal_direction[i]=from_u_direction[i]
return normal_direction
"""
day from midnight to 9
"""
time_obs=np.asarray(time_obs)-0.375
direction_obs_new=ToOceano(direction_obs)
u_all, v_all = pol2cart(speed_obs,direction_obs_new*np.pi/180) #seem correct
t=[]
for i in range(len(time_obs)):
t.append(time_obs[i].astype('int')) #list of days in time format
t=list(dict.fromkeys(t[:]))#remove repetition
#t=t[:-1] #remove last day bc not in date_obs
u_daily = np.zeros((len(t)))
v_daily = np.zeros((len(t)))
LENN = np.zeros((len(t)))
time_new=[]
for i in range (len(time_obs)):
time_new.append(int(time_obs[i]))
for i in range(len(t)):
tt0 = np.where(time_new==t[i]) #find all items from the same day
LENN[i] = sum(np.isfinite(u_all[tt0]))
if LENN[i]>0:
u_daily[i] = np.mean(nonans(u_all[tt0])) #daily mean of wind direction
v_daily[i] = np.mean(nonans(v_all[tt0])) #daily mean of wind speed
wind_speed_daily, direction_daily_o=cart2pol(u_daily,v_daily)
direction_daily_o=direction_daily_o*180/np.pi #rad to deg
direction_daily_step=ToNormal(direction_daily_o)
wind_direction_daily=ToMeteo(direction_daily_step)
#TimeSeriesPlot()
def GetMonthIndex(monthnb, beachnb):
"""
return the index of the month nbmonth(1,..,12) in a date dataset
"""
index = [date[beachnb].index(d) for d in date[beachnb] if d.month==monthnb]
blueb_month = np.asarray(bluebottles[beachnb])[index]
none = [b for b in blueb_month if b==0]
likely = [b for b in blueb_month if b==0.5]
observed = [b for b in blueb_month if b==1]
print(str(beachnb)+" "+str(monthnb)+"None : "+str(len(none))+", Likely : "+str(len(likely))+", Observed : "+str(len(observed)))
""""
Histogram plots for each season
"""
date_obs_array=np.asarray(date_obs)
summer=[d for d in date_obs if d.month == 12 or d.month == 1 or d.month == 2]
autumn=[d for d in date_obs if d.month == 3 or d.month == 4 or d.month == 5]
winter=[d for d in date_obs if d.month == 6 or d.month == 7 or d.month == 8]
spring=[d for d in date_obs if d.month == 9 or d.month == 10 or d.month == 11]
location=['Clovelly','Coogee','Maroubra']
both_summer=set(date_obs_array).intersection(summer)
index_summer = [date_obs.index(x) for x in both_summer]
direction_daily_summer=wind_direction_daily[index_summer]
both_autumn=set(date_obs_array).intersection(autumn)
index_autumn = [date_obs.index(x) for x in both_autumn]
direction_daily_autumn=wind_direction_daily[index_autumn]
both_winter=set(date_obs_array).intersection(winter)
index_winter = [date_obs.index(x) for x in both_winter]
direction_daily_winter=wind_direction_daily[index_winter]
both_spring=set(date_obs_array).intersection(spring)
index_spring = [date_obs.index(x) for x in both_spring]
direction_daily_spring=wind_direction_daily[index_spring]
direction_season=[direction_daily_spring,direction_daily_summer,direction_daily_autumn,direction_daily_winter]
index_season=[index_spring,index_summer,index_autumn,index_winter]
def ColorHist(nb,seas):
direction_daily=direction_season[seas]
index=index_season[seas]
date_obs_new=date_obs_array[index]
NE=np.where(np.logical_and(direction_daily>11.25, direction_daily<=101.25))
SE=np.where(np.logical_and(direction_daily>101.25, direction_daily<=191.25))
SW=np.where(np.logical_and(direction_daily>191.25, direction_daily<=281.25))
NW=np.where(np.logical_or(direction_daily>281.25, direction_daily<=11.25))
season=['spring', 'summer', 'autumn', 'winter']
date=np.asarray(date_obs_new)
date=[date[NE],date[SE],date[SW],date[NW]]
observed_list, none_list = [], []
for l in range(len(date)):
observed=0
none=0
for i in range(len(date[l])):
for j in range(len(date_box[nb][2])):
if date[l][i]==date_box[nb][2][j]:
observed+=1
for i in range(len(date[l])):
for j in range(len(date_box[nb][0])):
if date[l][i]==date_box[nb][0][j]:
none+=1
observed_list.append(observed/(observed+none))
none_list.append(none/(observed+none))
ind = np.arange(4)
width=0.2
fig=plt.figure()
ax = fig.add_subplot(111)
plt.xticks(ind, ('NE','SE','SW','NW'))
ax.bar(ind-width/2, none_list, width=width, color='lightgrey', align='center',label='None')
ax.bar(ind+width/2, observed_list, width=width, color='dodgerblue', align='center',label='Observed')
plt.legend()
plt.title(location[nb]+' '+str(season[seas]))
plt.show()
fig.savefig('../outputs_observation_data/kurnell/histograms_observation/seasonal_histograms/direction_'+str(location[nb])+'_'+str(season[seas])+'.png',dpi=300)
def Sth(nb,seas):
direction_daily=direction_season[seas]
index=index_season[seas]
date_obs_new=date_obs_array[index]
NE=np.where(np.logical_and(direction_daily>11.25, direction_daily<=101.25))
SE=np.where(np.logical_and(direction_daily>101.25, direction_daily<=191.25))
SW=np.where(np.logical_and(direction_daily>191.25, direction_daily<=281.25))
NW=np.where(np.logical_or(direction_daily>281.25, direction_daily<=11.25))
season=['spring', 'summer', 'autumn', 'winter']
date=np.asarray(date_obs_new)
date=[date[NE],date[SE],date[SW],date[NW]]
NE_list, SE_list, SW_list, NW_list =[0,0], [0,0], [0,0], [0,0]
liste=[NE_list, SE_list, SW_list, NW_list]
sum_none=0.
sum_observed=0.
for l in range(len(date)):
observed=0
none=0
for i in range(len(date[l])):
for j in range(len(date_box[nb][2])): #nb
if date[l][i]==date_box[nb][2][j]:
observed+=1
for i in range(len(date[l])):
for j in range(len(date_box[nb][0])):
if date[l][i]==date_box[nb][0][j]:
none+=1
liste[l][0]=none
liste[l][1]=observed
sum_none+=none
sum_observed+=observed
for l in range(len(date)):
liste[l][0]=liste[l][0]/sum_none
liste[l][1]=liste[l][1]/sum_observed
xbar=np.arange(2)
width=0.2
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xticks(xbar, ('None', 'Some'))
ax.bar(xbar-3*width/2, liste[0], width=0.2, color='olivedrab', align='center',label='NE')
ax.bar(xbar-width/2, liste[1], width=0.2, color='skyblue', align='center',label='SE')
ax.bar(xbar+width/2, liste[2], width=0.2, color='plum', align='center',label='SW')
ax.bar(xbar+3*width/2, liste[3], width=0.2, color='orange', align='center',label='NW')
plt.legend()
plt.title(location[nb]+' '+str(season[seas]))
plt.show()
fig.savefig('../outputs_observation_data/kurnell/histograms_observation/seasonal_histograms/situation_'+str(location[nb])+'_'+str(season[seas])+'.png',dpi=300)
def WaveHeightPlot():
we=[0,1,2]
we_0=[0,1,2]
for beachnb in (0,1,2):
we[beachnb]=[]
we_0[beachnb]=[]
for i in range(len(date[beachnb])):
for j in range(len(date_box[beachnb][2])):
if (date[beachnb][i])==date_box[beachnb][2][j]:
we[beachnb].append(wave_height[beachnb][i])
if (date[beachnb][i])==date_box[beachnb][0][j]:
we_0[beachnb].append(wave_height[beachnb][i])
fig=plt.figure(figsize=(15,7))
plt.suptitle('Wave height')
plt.subplot(2,2,3)
plt.hist(we[0],alpha=0.3,label='obs',normed=True)
plt.hist(we_0[0],alpha=0.3,label='none',normed=True)
plt.title('Clovelly')
plt.grid()
plt.legend()
plt.subplot(2,2,2)
plt.hist(we[1],alpha=0.3,label='obs',normed=True)
plt.hist(we_0[1],alpha=0.3,label='none',normed=True)
plt.title('Coogee')
plt.grid()
plt.legend()
plt.subplot(2,2,1)
plt.hist(we[2],alpha=0.3,label='obs',normed=True)
plt.hist(we_0[2],alpha=0.3,label='none',normed=True)
plt.title('Maroubra')
plt.grid()
plt.legend()
def ToRotateShelf(Wind_u, Wind_v):
rot_deg_angle = - 25
Wind_u_rot = np.cos(rot_deg_angle * np.pi / 180) * Wind_u + np.sin(rot_deg_angle * np.pi / 180) * Wind_v; # across-shelf
Wind_v_rot = - np.sin(rot_deg_angle * np.pi / 180) * Wind_u + np.cos(rot_deg_angle * np.pi / 180) * Wind_v; # along -shelf
return Wind_u_rot, Wind_v_rot
"""
UV data
file=pd.read_csv('../raw_observation_data/file_adcp_SYD_2016_2019.csv')
uv_datetime=[datetime.datetime.strptime(day, '%Y-%m-%d') for day in file.DATE]
u_syd_int=file['UCUR_ROT_int'].values.astype('float')
v_syd_int=file['VCUR_ROT_int'].values.astype('float')
u_syd_17=file['UCURrot_17m'].values.astype('float')
v_syd_17=file['VCURrot_17m'].values.astype('float')
uv_date=[datetime.date() for datetime in uv_datetime]
speed_current, direction_current_weird = cart2pol(u_syd_int, v_syd_int)
direction_current_oceano = ToNormal(direction_current_weird*180/np.pi)
#Get the index of observed BB days at maroubra
index=[]
for i in range(0,len(uv_date)):
if np.any(np.asarray(uv_date[i])==np.asarray(date_box[2][2])):
index.append(i)
uv_date_mar_obs=np.asarray(uv_date)[index]
def PlotHistUV(beachnb):
index=[]
index0=[]
for i in range(0,len(uv_date)):
if np.any(np.asarray(uv_date[i])==np.asarray(date_box[beachnb][2])):
index.append(i)
if np.any(np.asarray(uv_date[i])==np.asarray(date_box[beachnb][0])):
index0.append(i)
bins=np.linspace(-0.15,0.15,40)
fig=plt.figure(figsize=(15,7))
plt.suptitle(str(location[beachnb]))
plt.subplot(2,2,1)
plt.hist(u_syd_17[index],alpha=0.3,label='obs',bins=bins,normed=True)
plt.hist(u_syd_17[index0],alpha=0.3,label='none',bins=bins,normed=True)
plt.title('u ors 17')
plt.grid()
plt.legend()
plt.subplot(2,2,2)
plt.hist(u_syd_int[index],alpha=0.3,label='obs',bins=bins,normed=True)
plt.hist(u_syd_int[index0],alpha=0.3,label='none',bins=bins,normed=True)
plt.title('u ors int')
plt.grid()
plt.legend()
plt.subplot(2,2,3)
bins_v=np.linspace(-1,1,40)
plt.hist(v_syd_17[index],alpha=0.3,label='obs',bins=bins_v,normed=True)
plt.hist(v_syd_17[index0],alpha=0.3,label='none',bins=bins_v,normed=True)
plt.title('v ors 17')
plt.grid()
plt.legend()
plt.subplot(2,2,4)
plt.hist(v_syd_int[index],alpha=0.3,label='obs',bins=bins_v,normed=True)
plt.hist(v_syd_int[index0],alpha=0.3,label='none',bins=bins_v,normed=True)
plt.title('v ors int')
plt.grid()
plt.legend()
fig.savefig('u_v_current_hist'+str(location[beachnb])+'.png',dpi=300)
def RosePlotCurrent():
df = pd.DataFrame({"speed": speed_current, "direction": ToMeteo(direction_current_oceano)})
bins = np.arange(0.01, 1, 0.2)
kind = "bar"
# fig=plt.figure()
plot_windrose(df, kind=kind, normed=True, opening=0.8, edgecolor="white",bins=bins,blowto=True)
plt.title('Daily averaged current SYD100, oceano')
RosePlotCurrent()
"""
"""
NE=np.where(np.logical_and(wind_direction_daily>11.25, wind_direction_daily<=101.25))
SE=np.where(np.logical_and(wind_direction_daily>101.25, wind_direction_daily<=191.25))
SW=np.where(np.logical_and(wind_direction_daily>191.25, wind_direction_daily<=281.25))
NW=np.where(np.logical_or(wind_direction_daily>281.25, wind_direction_daily<=11.25))
date=np.asarray(date_obs)
date=[date[NE],date[SE],date[SW],date[NW]]
observed_list, none_list = [], []
for l in range(len(date)):
observed=0
none=0
for i in range(len(date[l])):
for j in range(len(date_box[1][2])): #Coogee
if date[l][i]==date_box[1][2][j]:
observed+=1
for i in range(len(date[l])):
for j in range(len(date_box[1][0])):
if date[l][i]==date_box[1][0][j]:
none+=1
observed_list.append(observed/(observed+none))
none_list.append(none/(observed+none))
ind = np.arange(4)
width=0.2
plt.xticks(ind, ('NE','SE','SW','NW'))
ax = plt.subplot(111)
ax.bar(ind-width/2, none_list, width=width, color='lightgrey', align='center',label='None')
ax.bar(ind+width/2, observed_list, width=width, color='dodgerblue', align='center',label='Observed')
plt.legend()
plt.title('Coogee')
plt.show()
fig.savefig('../outputs_observation_data/kurnell/histograms_observation/direction_coogee.png',dpi=300)
date=np.asarray(date_obs)
date=[date[NE],date[SE],date[SW],date[NW]]
NE_list, SE_list, SW_list, NW_list =[0,0], [0,0], [0,0], [0,0]
liste=[NE_list, SE_list, SW_list, NW_list]
sum_none=0.
sum_observed=0.
for l in range(len(date)):
observed=0
none=0
for i in range(len(date[l])):
for j in range(len(date_box[1][2])): #Coogee
if date[l][i]==date_box[1][2][j]:
observed+=1
for i in range(len(date[l])):
for j in range(len(date_box[1][0])):
if date[l][i]==date_box[1][0][j]:
none+=1
liste[l][0]=none
liste[l][1]=observed
sum_none+=none
sum_observed+=observed
for l in range(len(date)):
liste[l][0]=liste[l][0]/sum_none
liste[l][1]=liste[l][1]/sum_observed
xbar=np.arange(2)
ax = plt.subplot(111)
plt.xticks(xbar, ('None', 'Some'))
ax.bar(xbar-3*width/2, liste[0], width=0.2, color='olivedrab', align='center',label='NE')
ax.bar(xbar-width/2, liste[1], width=0.2, color='skyblue', align='center',label='SE')
ax.bar(xbar+width/2, liste[2], width=0.2, color='plum', align='center',label='SW')
ax.bar(xbar+3*width/2, liste[3], width=0.2, color='orange', align='center',label='NW')
plt.legend()
plt.title('Coogee')
plt.show()
fig.savefig('../outputs_observation_data/kurnell/histograms_observation/situation_coogee.png',dpi=300)
TimeSeriesPlot()
BoxPlot(0,date_obs,wind_direction_daily)
BoxPlot(1,date_obs,wind_direction_daily)
BoxPlot(2,date_obs,wind_direction_daily)
PolarPlot(0, wind_direction_daily, wind_speed_daily)
PolarPlot(1, wind_direction_daily, wind_speed_daily)
PolarPlot(2, wind_direction_daily, wind_speed_daily)
BoxPlot(0,date_obs,wind_direction_daily)
BoxPlot(1,date_obs,wind_direction_daily)
BoxPlot(2,date_obs,wind_direction_daily)
RosePlot(0,0,date_obs,wind_direction_daily,wind_speed_daily)
RosePlot(0,1,date_obs,wind_direction_daily,wind_speed_daily)
RosePlot(0,2,date_obs,wind_direction_daily,wind_speed_daily)
RosePlot(1,0,date_obs,wind_direction_daily,wind_speed_daily)
RosePlot(1,1,date_obs,wind_direction_daily,wind_speed_daily)
RosePlot(1,2,date_obs,wind_direction_daily,wind_speed_daily)
RosePlot(2,0,date_obs,wind_direction_daily,wind_speed_daily)
RosePlot(2,1,date_obs,wind_direction_daily,wind_speed_daily)
RosePlot(2,2,date_obs,wind_direction_daily,wind_speed_daily)
timeseries plot
import cmocean
fig=plt.figure()
ax=plt.axes()
years = mdates.YearLocator() # every year
months = mdates.MonthLocator(range(0, 12), interval=2) # every 2month
years_fmt = mdates.DateFormatter('%Y')
month_fmt = mdates.DateFormatter('%m')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
ax.xaxis.set_minor_formatter(month_fmt)
fig.autofmt_xdate()
try_date=[]
try_speed=[]
try_direction=[]
ax.set_title('Dots for BB at Coogee')
ax.set_ylabel('Wind speed in m/s')
for i in range(len(date_obs)-50000,len(date_obs)-1):
for j in range(len(date[2])):
if date_obs[i+1]==date[2][j]:
if bluebottles[2][j]==1:
ax.scatter(date_obs[i],20,marker='+',s=10,c='skyblue')
try_date.append(date_obs[i])
try_speed.append(speed_obs[i])
try_direction.append(direction_obs[i])
sc=ax.scatter(try_date[:],try_speed[:],c=try_direction[:],marker='+',cmap=cmocean.cm.phase)
cbar=plt.colorbar(sc)
cbar.set_label('Wind direction in degree')
fig.savefig("../outputs_observation_data/sydney_obs/timeseries_bb_only_maroubra_past.png",dpi=300)
"""
| true |
e0474cc8deaeb1a20bc142d1cacbda49b8db3158 | Python | velagalasailaja/Become_coder_python | /primecount_in_lists.py | UTF-8 | 464 | 3.15625 | 3 | [] | no_license | import math as m
def countprime(n,data):
def isprime(n):
if n==1:
return 0
s=int(m.sqrt(n))
for i in range(2,s+1):
if n%i==0:
return 0
return 1
for i in data:
pc=0
if isprime(i):
pc+=1
return pc
n=int(input())
data=list(map(int,input().split()))
pc=countprime(n,data)
print(pc)
| true |
485dcdb73f6c3463f35f83de5f1ded1f2bfa4757 | Python | florisvb/ArduinoStepper | /python/examples/basics.py | UTF-8 | 2,065 | 3.078125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import time
import numpy as np
import arduino_stepper.arduino_stepper as arduino_stepper
if __name__ == '__main__':
# set up variables for serial communication
port = '/dev/ttyACM0'
baudrate = 19200
timeout = 1
# instantiate stepper class
print 'Initiating arduino, allow a few seconds'
astep = arduino_stepper.Arduino_Stepper(port=port,timeout=timeout, baudrate=baudrate)
print
# reset position to zero
astep.reset_step_counter()
# go forward 500 steps at 200 Hz
print 'Moving forward 500 steps at 200 Hz'
astep.go_to_pos(500, 200, wait_until_done=True)
# get position, and print it to the console - it had better be 500. Also, show roundtrip latency
t = time.time()
print 'Stepper Position: ', astep.get_pos()
print 'Roundtrip Latency: ', time.time()-t, ' sec'
print
# pause for 1 second
print 'pausing'
time.sleep(1)
print
# move at 500 Hz for 2 second. Then stop. Then move at 500 Hz in the opposite direction for 1 second.
print 'Moving in square wave'
t_start = time.time()
t_elapsed = time.time()-t_start
astep.set_vel(500)
while t_elapsed < 2:
t_elapsed = time.time()-t_start
astep.set_vel(0)
t_start = time.time()
t_elapsed = time.time()-t_start
astep.set_vel(-500)
while t_elapsed < 2:
t_elapsed = time.time()-t_start
astep.set_vel(0)
print
# pause for 1 second
print 'pausing'
time.sleep(1)
print
# move at 440 Hz for 1 second, and print out the position every 0.05 seconds
print 'Moving at 440 Hz and sending position data'
t_start = time.time()
t_prev = t_start
t_elapsed = time.time()-t_start
astep.set_vel(440)
while t_elapsed < 1:
t_now = time.time()
t_elapsed = t_now-t_start
if t_now-t_prev >= 0.05:
print 'Stepper Position: ', astep.get_pos()
t_prev = t_now
astep.set_vel(0)
print
print 'Done!'
| true |
90d3937b3f14b3217128e7020ab4155b0a62522f | Python | venkat-oss/SPOJ | /BITPLAY.py | UTF-8 | 303 | 2.65625 | 3 | [] | no_license | T = int(input())
for _ in range(T):
N, K = map(int, input().split())
if K == 0 or N == 1:
print(-1)
continue
result = 1
K -= 1
for i in range(30, -1, -1):
if K > 0 and result + (1 << i) < N:
K -= 1
result += (1 << i)
print(result)
| true |
708ea2057bbcd91ab83d57346e343c41901f4158 | Python | Falmouth-Games-Academy/bsc-course-materials | /COMP110/05/graphs.py | UTF-8 | 3,017 | 3.109375 | 3 | [] | no_license | __author__ = 'Ed'
import matplotlib.pyplot as plt
plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams.update({'font.size': 22})
x = np.linspace(1, 100)
y_const = x*0 + 5
plt.ylim(0, 10)
plt.plot(x, y_const)
plt.xlabel('Students')
plt.ylabel('Time (hours)')
plt.savefig('plot_constant.pdf', bbox_inches='tight')
plt.clf()
y_linear = x
plt.plot(x, y_linear)
plt.xlabel('Students')
plt.ylabel('Time (hours)')
plt.savefig('plot_linear.pdf', bbox_inches='tight')
plt.clf()
y_quad = 0.5 * x * (x-1) * (10.0 / 60.0)
plt.plot(x, y_quad)
plt.xlabel('Students')
plt.ylabel('Time (hours)')
plt.savefig('plot_quadratic.pdf', bbox_inches='tight')
plt.clf()
plt.ylim(0, 200)
plt.plot(x, y_const)
plt.plot(x, y_linear)
plt.plot(x, y_quad)
plt.xlabel('Students')
plt.ylabel('Time (hours)')
plt.savefig('plot_all.pdf', bbox_inches='tight')
plt.clf()
plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
plt.plot(x, y_linear)
plt.xlabel('Elements')
plt.ylabel('Time')
plt.savefig('plot2_linear.pdf', bbox_inches='tight')
plt.clf()
plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
plt.plot(x, y_const)
plt.xlabel('Elements')
plt.ylabel('Time')
plt.savefig('plot2_constant.pdf', bbox_inches='tight')
plt.clf()
y_log = np.log(x)
plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
plt.plot(x, y_log)
plt.xlabel('Elements')
plt.ylabel('Time')
plt.savefig('plot2_log.pdf', bbox_inches='tight')
plt.clf()
y_log = np.log(x)
plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
plt.plot(x, y_linear)
plt.plot(x, y_log * 5)
plt.xlabel('Elements')
plt.ylabel('Time')
plt.savefig('plot2_linear_log.pdf', bbox_inches='tight')
plt.clf()
y_nlogn = x * np.log(x)
plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
plt.plot(x, y_nlogn)
plt.xlabel('Elements')
plt.ylabel('Time')
plt.savefig('plot2_nlogn.pdf', bbox_inches='tight')
"""plt.subplot(2,4,1)
plt.ylim(0, 1.2)
plt.plot(x, x*0+1)
plt.title("Constant")
plt.subplot(2,4,2)
plt.plot(x, np.log(x))
plt.title("Logarithmic")
plt.subplot(2,4,3)
plt.plot(x, x**0.5)
plt.title("Fractional power")
plt.subplot(2,4,4)
plt.plot(x, x)
plt.title("Linear")
plt.subplot(2,4,5)
plt.plot(x, x**2)
plt.title("Quadratic")
plt.subplot(2,4,6)
plt.plot(x, x**4)
plt.title("Polynomial")
plt.subplot(2,4,7)
plt.plot(x, 2**x)
plt.title("Exponential")
plt.subplot(2,4,8)
n = range(100+1)
plt.plot(n, [np.math.factorial(i) for i in n])
plt.title("Factorial")
plt.show()
"""
| true |
60c682e0ff926dcfca0727559f5cc78ee033272e | Python | zOOGal/Computer-Vision | /HW6 Image Segmentation/funcs.py | UTF-8 | 4,090 | 2.890625 | 3 | [] | no_license | import numpy as np
import cv2
import copy
def Otsu(ch_info):
'''
:param img: gray-scale input image
:param tag: True: forebackground is class C0, False:background is class C0
:return: black-white image/channel
'''
img_size = len(ch_info)
histRange = (0, 256)
hist, bin_edge = np.histogram(ch_info, bins=256, range=histRange)
weight_total = sum(hist * bin_edge[:-1])
weight_back = 0
num_back = 0
threshold = -1
max_btw_var = 0
for i in range(256):
num_back = num_back + hist[i]
num_fore = img_size - num_back
weight_back = weight_back + i * hist[i] # mu of background
weight_fore = np.int(weight_total - weight_back) # mu of foreground
prob_fore = num_fore / img_size
if prob_fore == 0 or prob_fore == 1: # foreground and background are not distinguished
continue
prob_back = 1 - prob_fore
btw_var = prob_fore * prob_back * (weight_back - weight_fore) ** 2
# the max btw_var gives out the best resolution for distinguish black and white
if btw_var > max_btw_var:
max_btw_var = btw_var
threshold = i
if threshold == -1:
print('\nFinding threshold failed!!!!\n')
return threshold
def segmentation_rgb(img, iteration, img_num, invert=None, tag=True):
'''
:param img: color-img
:param iteration:
:param img_num:
:param invert: a list to choose which layer to invert, 1 means invert
:param tag: True means foreground is C0 class
:return:
'''
if invert is None:
invert = [0, 0, 0]
height, width = img.shape[0:2]
out_img = np.full((height, width), 255, dtype='uint8')
for ch in range(3):
ch_img = img[:, :, ch]
ch_info = copy.deepcopy(ch_img).ravel()
for i in range(iteration):
thresh = Otsu(ch_info)
ch_mask = np.zeros((height, width), dtype='uint8')
if tag is True:
ch_mask[ch_img <= thresh] = 255
temp_info = [j for j in ch_info if j <= thresh]
else:
ch_mask[ch_img > thresh] = 255
temp_info = [j for j in ch_info if j > thresh]
ch_info = np.asarray(temp_info)
cv2.imwrite('img%(num)d_channel%(ch)d.jpg' % {"num": img_num, "ch": ch}, ch_mask)
if invert[ch] == 1:
out_img = cv2.bitwise_and(out_img, cv2.bitwise_not(ch_mask))
else:
out_img = cv2.bitwise_and(out_img, ch_mask)
return out_img
def texture_segmentation(img, win_list, img_num):
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
output = np.zeros((gray_img.shape[0], gray_img.shape[1], len(win_list)), dtype='uint8')
for win_index, win_size in enumerate(win_list):
half = int(win_size / 2)
for i in range(half, gray_img.shape[0] - half):
for j in range(half, gray_img.shape[1] - half):
output[i, j, win_index] = np.var(gray_img[i - half:i + half + 1, j - half:j + half + 1])
output[:, :, win_index] = (255 * (output[:, :, win_index] / output[:, :, win_index].max())).astype(np.uint8)
cv2.imwrite('img%(img_num)d_texture_win%(win_size)d_texture.jpg' % {"img_num": img_num, "win_size": win_size},
output[:, :, win_index])
return output
def find_contour(img):
img_contour = np.zeros((img.shape[0], img.shape[1]), dtype='uint8')
# 8-neighbour detection
for i in range(1, img.shape[0] - 1):
for j in range(1, img.shape[1] - 1):
if img[i, j] != 0:
neighbour = img[i - 1:i + 2, j - 1:j + 2]
if np.all(neighbour):
img_contour[i, j] = 0
else:
img_contour[i, j] = 255
return img_contour
def draw_contour(img, img_contour):
img_merge = copy.deepcopy(img)
img_merge[np.where(img_contour == 255)] = 0
return img_merge
| true |
d3eae88cfb1e39767b7479ea81438d0168e0f0e2 | Python | kevinguy28/Finding-the-Minimum-Spanning-Tree-of-a-graph | /find_mst.py | UTF-8 | 3,378 | 4.375 | 4 | [] | no_license | # Graphs will be input as an list of edges. Each edge will be a tuple of the form (u,v,w)
# Where u and v represent the endpoints of an edge, and w represents the cost of that edge
# The function will also take as input n, the number of vertices in the graph
# The graph is undirected(you can travel from u to v or from v to u)
# Your function should output a list of edges from the original graph which form the minumum spanning tree
def minimum_spanning_tree(graph, n):
traversedNodes = []
traversedPaths = []
graph = sortGraphByWeight(graph)
totalWeight = graph[0][2]
traversedNodes = addToTraversedNodes(graph[0], traversedNodes)
traversedPaths.append(graph[0])
return(findMinSpanTree(graph, n, traversedNodes, traversedPaths, totalWeight))
def findMinSpanTree(graph, n, traversedNodes, traversedPaths, totalWeight):
if(n == len(traversedNodes)):
return(traversedPaths)
else:
minWeightNode = None
for i in range(len(traversedNodes)):
for a in range(len(graph)):
if(traversedNodes[i] == graph[a][0] or graph[a][1]):
if(graph[a] not in traversedPaths):
if(minWeightNode == None):
if((graph[a][0] in traversedNodes and graph[a][1] not in traversedNodes) or (graph[a][0] not in traversedNodes and graph[a][1] in traversedNodes)):
minWeightNode = graph[a]
else:
if((graph[a][0] in traversedNodes and graph[a][1] not in traversedNodes) or (graph[a][0] not in traversedNodes and graph[a][1] in traversedNodes)):
if(minWeightNode[2] > graph[a][2]):
minWeightNode = graph[a]
traversedNodes = addToTraversedNodes(minWeightNode, traversedNodes)
traversedPaths.append(minWeightNode)
totalWeight += minWeightNode[2]
return(findMinSpanTree(graph, n, traversedNodes, traversedPaths, totalWeight))
# Sorts the nodes by ascending weights
def sortGraphByWeight(graph):
graphHolder = 0
for i in range(len(graph)-1, 0, -1):
for a in range(i):
if(graph[a][2] > graph[a+1][2]):
graphHolder = graph[a]
graph[a] = graph[a+1]
graph[a+1] = graphHolder
return(graph)
def addToTraversedNodes(theTuple, traversedNodes):
for i in range(2):
if(theTuple[i] not in traversedNodes):
traversedNodes.append(theTuple[i])
return(traversedNodes)
def print_graph(graph):
k = 0
for e in graph:
k += e[2]
print("Minimum Spanning Tree")
print("---------------------")
print("Weight:\t" + str(k))
print("Edges:")
print(graph)
print("#####################\n\n")
g1 = [
(0,1,2),
(0,2,2),
(1,2,1)
]
g2 = [
(0,1,4),
(0,2,3),
(0,3,2),
(0,4,7),
(1,3,3),
(2,3,1),
]
g3 = [
(0,1,4),
(0,2,3),
(0,5,7),
(2,3,4),
(1,3,2),
(2,4,7),
(4,5,4),
(5,9,1),
(8,9,3),
(5,8,2),
(1,7,10),
(5,6,5),
(6,7,2),
(7,8,1),
]
mst1 = minimum_spanning_tree(g1, 3)
print_graph(mst1) # Expeted Weight: 3
mst2 = minimum_spanning_tree(g2, 5)
print_graph(mst2) # Expeted Weight: 13
mst3 = minimum_spanning_tree(g3, 10)
print_graph(mst3) # Expeted Weight: 26
| true |
9481b349cdf3bb5b283b2b1010787cb0ac6c9618 | Python | ehdgua01/Algorithms | /coding_test/codility/cyclic_rotation/test.py | UTF-8 | 521 | 3.015625 | 3 | [] | no_license | import unittest
from .solution import solution
class TestCase(unittest.TestCase):
def test_case_1(self) -> None:
self.assertEqual(solution([3, 8, 9, 7, 6], 3), [9, 7, 6, 3, 8])
self.assertEqual(solution([0, 0, 0], 1), [0, 0, 0])
self.assertEqual(solution([1, 2, 3, 4], 4), [1, 2, 3, 4])
def test_case_2(self) -> None:
self.assertEqual(solution([], 39), [])
self.assertEqual(solution([1], 5), [1])
self.assertEqual(solution([1, 1, 2, 3, 5], 7), [3, 5, 1, 1, 2])
| true |