blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
af2dad5cd4bc92bfcd3b5067a7117018b11e6fc3
|
Python
|
darshilthakore/catalogue-back
|
/catalogue/models.py
|
UTF-8
| 780
| 2.53125
| 3
|
[] |
no_license
|
from django.db import models
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=32)
def __str__(self):
return f"{self.name}"
class Subcategory(models.Model):
name = models.CharField(max_length=32)
category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name="subcategory")
def __str__(self):
return f"{self.name}"
class Product(models.Model):
name = models.CharField(max_length=64)
category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name="productcat")
subcategory = models.ForeignKey(Subcategory, on_delete=models.CASCADE, related_name="productsubcat")
def __str__(self):
return f"{self.name} | {self.subcategory} | {self.category}"
| true
|
59a8b41888a2e057025b4ffd683f97335ba3da7c
|
Python
|
lolamathematician/AOC_2019
|
/2/2.1.py
|
UTF-8
| 718
| 3.5625
| 4
|
[] |
no_license
|
with open("input.txt", "r") as f:
codes = [int(i) for i in f.read().split(",")]
def main():
for opcode_position in range(0, len(codes)-1, 4):
if codes[opcode_position] == 99:
return codes[0]
else:
input_position_1 = codes[opcode_position+1]
input_position_2 = codes[opcode_position+2]
output_position = codes[opcode_position+3]
if codes[opcode_position] == 1:
codes[output_position] = codes[input_position_1] + codes[input_position_2]
elif codes[opcode_position] == 2:
codes[output_position] = codes[input_position_1] * codes[input_position_2]
else:
print("An error has occurred. Opcode: {}. Opcode position: {}".format(codes[output_position], output_position))
print(main())
| true
|
bb539656cdc7eb538768299822dbd751a04d75ce
|
Python
|
voidlessVoid/advent_of_code_2020
|
/day_15/mischa/solution.py
|
UTF-8
| 1,426
| 2.859375
| 3
|
[] |
no_license
|
import os
import sys
import pandas as pd
import numpy as np
import math
import datetime
import operator
from copy import deepcopy
from collections import Counter, ChainMap, defaultdict, deque
from itertools import cycle
from more_itertools import locate
from functools import reduce
CURRENT_DIRECTORY = os.path.dirname(__file__)
os.chdir(CURRENT_DIRECTORY)
def read_input_lines():
with open('input.txt', 'r') as fh:
return [x.strip() for x in fh.readlines()]
def read_input_text():
with open('input.txt', 'r') as fh:
return fh.read().strip()
data = [13,0,10,12,1,5,8]
def part_a():
while len(data) != 2020:
last_num = data[-1]
if last_num not in data[:-1]:
data.append(0)
else:
last_occ = list(locate(data, lambda x: x == last_num))
next_num = last_occ[-1]-last_occ[-2]
data.append(next_num)
print(data[-1])
part_a()
data = [13,0,10,12,1,5,8]
def part_b():
dict_of_nums = {13:1, 0:2, 10:3, 12:4, 1:5, 5:6, 8:7}
last_num = data[-1]
next_num = 0
step = len(data)+1
while len(data) != 30000000:
if last_num not in dict_of_nums:
next_num = 0
data.append(0)
else:
next_num = (step-dict_of_nums[last_num])-1
data.append(next_num)
dict_of_nums[last_num]=step-1
last_num = next_num
step+=1
print(next_num)
part_b()
| true
|
ecab57d896892b91b4352049e57bb4bd4b8d986e
|
Python
|
sebastiandres/mat281_2018S2
|
/m01_introduccion/02_data_science_toolkit/labFunctions.py
|
UTF-8
| 1,108
| 3.609375
| 4
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
def tribonacci(n):
if n in (1, 2, 3):
return 1
else:
return tribonacci(n - 1) + tribonacci(n - 2) + tribonacci(n - 3)
def tallest_player(nba_player_data):
height_dict = {}
for player, value in nba_player_data.items():
ft, inch = value[3].split('-')
tmp_height = int(ft) * 0.3048 + int(inch) * 0.0254
height_dict[player] = tmp_height
height_max = max(height_dict.values())
tallest_list = [player for player in height_dict.keys() if height_dict[player] == height_max]
return tallest_list
def more_time_position_player(nba_player_data, position):
if position not in ['F-C', 'C-F', 'C', 'G', 'F', 'G-F', 'F-G']:
print('Ingrese una posición válida.')
return
else:
time_dict = {}
for player, value in nba_player_data.items():
if value[2] == position:
time_dict[player] = value[1] - value[0]
time_max = max(time_dict.values())
more_time_list = [player for player in time_dict.keys() if time_dict[player] == time_max]
return more_time_list
| true
|
8d24e5fda74320b877045ccc78b27c6e7de361a2
|
Python
|
xiao2912008572/Appium
|
/StoneUIFramework/public/common/readconfig.py
|
UTF-8
| 408
| 2.546875
| 3
|
[] |
no_license
|
__author__ = 'xiaoj'
import configparser
class Config:
def __init__(self,configPath):
self.configPath = configPath
def get_PATH(self,path_Section,path_NO):
cf = configparser.ConfigParser()
cf.read(self.configPath)
# path_section填写"PATH_YUNKU"
# 此处path_config = "path_001"以此类推
path = cf.get(path_Section,path_NO)
return path
| true
|
969f525ec7e65683a9abd6e9448de56a780031f7
|
Python
|
NandaGopal56/Programming
|
/PROGRAMMING/python practice/oops-3.py
|
UTF-8
| 355
| 3.421875
| 3
|
[] |
no_license
|
class test:
def m1(self):
print('i am non static method')
@classmethod
def m2(cls):
print('i am class method')
@staticmethod
def m3():
print('i am static method')
def main():
obj=test()
test.m1(obj)
obj.m1()
test.m2()
obj.m2()
test.m3()
obj.m3()
main()
| true
|
f904d1b7d7b7b0e997884274f33a4918e96584ad
|
Python
|
astreltsov/firstproject
|
/Eric_Matthes_BOOK/DICTIONARY/poll.py
|
UTF-8
| 335
| 2.765625
| 3
|
[] |
no_license
|
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python'
}
coders = ['jen', 'edward', 'peter']
for coder in coders:
if coder in favorite_languages.keys():
print(f"{coder.title()}, thank you for taking poll!")
else:
print(f"{coder.title()}, need to take a poll.")
| true
|
5ad511514dcf2df93c6c27fc7c1a9471e196fd38
|
Python
|
martintb/typyCreator
|
/molecules/stickyBead.py
|
UTF-8
| 788
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
from molecule import molecule
import numpy as np
def create(*args,**kwargs):
return bead(*args,**kwargs)
class bead(molecule):
def __init__(self,
bigDiameter=1.0,
bigType='A',
stickyDiameter=1.0,
stickyType='B',
stickyBondType='bondA',
stickyBondLength=1.0/3.0,
):
super(bead,self).__init__()
self.name='bead'
self.placed=False
self.natoms=2
self.positions=np.array([[0.,0.,0.],[0.,0.,stickyBondLength]])
self.types=[bigType,stickyType]
self.diameters=[bigDiameter,stickyDiameter]
self.bonds=[[stickyBondType, 0, 1]]
self.dihedrals=[]
self.angles=[]
self.bodies=[-1,-1]
self.beadVol=(4.0/3.0) * np.pi *(bigDiameter/2.0)**(3.0)
| true
|
7a97aaca4afb1e5e23a19ba19307a4f8e8f4ce7e
|
Python
|
Zillow-SJ/Cluster_Zillow
|
/explore_final.py
|
UTF-8
| 10,683
| 2.734375
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
import pandas_profiling
import prep
import seaborn as sns
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
# df = prep.prep_df()
# df_2 = df.drop(columns = ["fips", "latitude", "longitude", "regionidcity", "regionidcounty", "regionidzip"])
# explore_df = pd.Series(df_2.corrwith(df["logerror"]))
# explore_df.nlargest(n=5)
# explore_df.nsmallest(n=5)
# #Seeing 5 largest and 5 smallest correlations.
# profile = df.profile_report()
# rejected_variables = profile.get_rejected_variables(threshold=0.9)
# profile
# X_train, y_train, X_test, y_test = prep.get_train_test_split(df_2)
# from sklearn.linear_model import LinearRegression
# from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS
# lm = LinearRegression()
# efs = EFS(lm, min_features=4, max_features=7, \
# scoring='MSE', cv=10, n_jobs=-1)
# efs.fit(X_train, y_train)
# print('Best subset:', efs.best_idx_)
# df_2.columns
# Best Params = Bathrooms, Tax_value, lotsizesquarefeet
#Scaling and refitting.
# from sklearn import preprocessing
# scaler = preprocessing.StandardScaler()
# scaled_log = scaler.fit_transform(df_2[["logerror"]])
# df_3 = df_2[["bathrooms", "tax_value", "lotsizesquarefeet", "logerror"]]
# train_2, test_2 = train_test_split(df_3, train_size = .75, random_state = 123)
# X_train_2 = train_2.drop(columns=["logerror"])
# y_train_2 = train_2["logerror"]
# X_test_2 = test_2.drop(columns=["logerror"])
# y_test_2 = test_2["logerror"]
# reg = lm.fit()
def scatter_plot(feature, target):
import matplotlib.pyplot as plt
plt.figure(figsize=(16,8))
plt.scatter(df_3[feature], df_3[target], c="black")
plt.xlabel(f"{feature}")
plt.ylabel(f"{target}")
plt.show()
# scatter_plot("tax_value", "logerror")
# scatter_plot("bathrooms", "logerror")
# scatter_plot("lotsizesquarefeet", "logerror")
# scatter_plot("yearbuilt", "logerror")
# funtion to cluster on y_train and merge back to train dataframe.
def target_cluster(y_train,X_train,num_clusters):
kmeans =KMeans(n_clusters=num_clusters)
kmeans.fit(y_train)
y_train['cluster'] = kmeans.predict(y_train)
train = X_train.merge(y_train,left_index=True,right_index=True)
return train
def elbow_plot(target):
ks = range(1,10)
sse = []
for k in ks:
kmeans = KMeans(n_clusters=k)
kmeans.fit(target)
# inertia: Sum of squared distances of samples to their closest cluster center.
sse.append(kmeans.inertia_)
print(pd.DataFrame(dict(k=ks, sse=sse)))
plt.plot(ks, sse, 'bx-')
plt.xlabel('k')
plt.ylabel('SSE')
plt.title('The Elbow Method to find the optimal k')
plt.show()
#function to cluster on X_train and merge back with train dataframe
def x_cluster(X_train,X_test,num_clusters):
kmeans = KMeans(n_clusters=num_clusters)
kmeans.fit(X_train)
X_train['x_cluster'] = kmeans.predict(X_train)
X_test['x_cluster'] = kmeans.predict(X_test)
return X_train, X_test, kmeans
def bad_dist():
import pandas as pd
from sklearn.cluster import KMeans
from sklearn import preprocessing
import matplotlib.pyplot as plt
import seaborn as sns
import prep
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
df = prep.prep_df()
df["tax_per_sqft"] = df.tax_value/df.sqft
train, test = prep.get_train_and_test(df)
train.drop(columns=["tax_value", "sqft"], inplace=True)
test.drop(columns=["tax_value", "sqft"], inplace=True)
scaler = preprocessing.MinMaxScaler()
train.drop(columns=["latitude", "longitude"], inplace=True)
test.drop(columns=["latitude", "longitude"], inplace=True)
scaled_train = scaler.fit_transform(train[["logerror"]])
scaled_test = scaler.fit_transform(test[["logerror"]])
train["logerror"] = scaled_train
test["logerror"] = scaled_test
X_train, y_train, X_test, y_test = prep.get_train_test_split(train, test)
def uneven_dist_chart_train():
sns.distplot(y_train)
plt.xlim(.4, .8)
plt.suptitle("Train Logerror Distribution")
plt.show()
def uneven_dist_chart_test():
sns.distplot(y_test)
plt.xlim(.4,.8)
plt.suptitle("Test Logerror Distribution")
plt.show()
x = uneven_dist_chart_train()
y = uneven_dist_chart_test()
return x, y
def logerror_outliers():
df = prep.prep_df_initial()
train, test = prep.get_train_and_test(df)
logerror_outliers = train[(train.logerror < -1)]
logerrors_below = logerror_outliers.mean()
logerrors_below = pd.DataFrame(logerrors_below)
logerrors_below = logerrors_below.T
logerrors_below
from statistics import stdev
logerrors_normal = train[(train.logerror < 0.03) | (train.logerror > -0.02)]
logerrors_normal = logerrors_normal.mean()
logerrors_normal = pd.DataFrame(logerrors_normal)
logerrors_normal = logerrors_normal.T
logerrors_normal
logerror_outliers_above = train[(train.logerror > 1)]
logerrors_above = logerror_outliers_above.mean()
logerrors_above = pd.DataFrame(logerrors_above)
logerrors_above = logerrors_above.T
logerrors_above["price_sqft"] = logerrors_above.tax_value/logerrors_above.sqft
logerrors_below["price_sqft"] = logerrors_below.tax_value/logerrors_below.sqft
logerrors_normal["price_sqft"] = logerrors_normal.tax_value/logerrors_normal.sqft
df = logerrors_above.append(logerrors_normal)
df = df.append(logerrors_below)
df.drop(columns='tax_per_sqft',inplace=True)
df.index = ['logerrors<1', 'logerrors~0', 'logerrors>1']
return df
def strat():
import pandas as pd
from sklearn.cluster import KMeans
from sklearn import preprocessing
import matplotlib.pyplot as plt
import seaborn as sns
import prep
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedShuffleSplit
import numpy as np
df = prep.prep_df()
df["tax_per_sqft"] = df.tax_value/df.sqft
df["logbin"] = df.logerror.round(decimals=1)
df = df[df.logbin != 2.8]
df = df[df.logbin != -3.7]
df = df[df.logbin != -4.7]
df = df[df.logbin != 5.3]
df.logbin.value_counts()
X = df.drop(columns=["logerror"])
y = df[["logerror"]]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.30, random_state=42, stratify=df.logbin)
train = X_train.join(y_train)
test = X_test.join(y_test)
def dist_chart_train():
sns.distplot(y_train)
plt.suptitle("Train Logerror Distribution")
plt.show()
def dist_chart_test():
sns.distplot(y_test)
plt.suptitle("Test Logerror Distribution")
plt.show()
x = dist_chart_train()
y = dist_chart_test()
return x, y
def strat_test_train():
import pandas as pd
from sklearn.cluster import KMeans
from sklearn import preprocessing
import matplotlib.pyplot as plt
import seaborn as sns
import prep
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedShuffleSplit
import numpy as np
df = prep.prep_df()
df["tax_per_sqft"] = df.tax_value/df.sqft
df["logbin"] = df.logerror.round(decimals=1)
df = df[df.logbin != 2.8]
df = df[df.logbin != -3.7]
df = df[df.logbin != -4.7]
df = df[df.logbin != 5.3]
df.logbin.value_counts()
X = df.drop(columns=["logerror"])
y = df[["logerror"]]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.30, random_state=42, stratify=df.logbin)
train = X_train.join(y_train)
test = X_test.join(y_test)
return train, test
def final_model():
import pandas as pd
from sklearn.cluster import KMeans
from sklearn import preprocessing
import matplotlib.pyplot as plt
import seaborn as sns
import prep
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedShuffleSplit
import numpy as np
df = prep.prep_df()
df["tax_per_sqft"] = df.tax_value/df.sqft
df["logbin"] = df.logerror.round(decimals=1)
df = df[df.logbin != 2.8]
df = df[df.logbin != -3.7]
df = df[df.logbin != -4.7]
df = df[df.logbin != 5.3]
df.logbin.value_counts()
X = df.drop(columns=["logerror"])
y = df[["logerror"]]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.30, random_state=42, stratify=df.logbin)
train = X_train.join(y_train)
test = X_test.join(y_test)
train.drop(columns=["tax_value", "sqft", "logbin"], inplace=True)
test.drop(columns=["tax_value", "sqft", "logbin"], inplace=True)
scaler = preprocessing.MinMaxScaler()
train.drop(columns=["latitude", "longitude"], inplace=True)
test.drop(columns=["latitude", "longitude"], inplace=True)
scaled_train = scaler.fit_transform(train[["logerror"]])
scaled_test = scaler.transform(test[["logerror"]])
train["logerror"] = scaled_train
test["logerror"] = scaled_test
X_train, y_train, X_test, y_test = prep.get_train_test_split(train, test)
#MinMaxScale Logerror, drop lat and long, split to train and test data on logerror.
lm = LinearRegression()
regr = lm.fit(X_train, y_train)
ypred_train = regr.predict(X_train)
ypred_test = regr.predict(X_test)
x = mean_squared_error(y_train, ypred_train)
#output MSE for Train model is: 0.02817864224808966
y = mean_squared_error(y_test, ypred_test)
print (f"MSE on Train:{x}, MSE on Test {y}")
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
fig, axes = plt.subplots(1,1, figsize=(5,3))
axes.plot(test.logerror, y_test, "bo", label = "actuals", alpha=0.5)
axes.plot(test.logerror, ypred_test, "ro", label="predictions", alpha=0.5)
plt.xlabel("train.logerror")
plt.ylabel("Logerror")
plt.legend()
plt.suptitle("Linear Regression")
plt.show()
fig, axes = plt.subplots(1,1, figsize=(5,3))
axes.plot(train.logerror, y_train, "bo", label = "actuals", alpha=0.5)
axes.plot(train.logerror, ypred_train, "ro", label="predictions", alpha=0.5)
plt.xlabel("train.logerror")
plt.ylabel("Logerror")
plt.legend()
plt.suptitle("Linear Regression")
plt.show()
| true
|
db691d0b4552ff99d0da63fa2617f073bb265eb0
|
Python
|
StevenChen8759/DSAI_2021_HW04
|
/trainer.py
|
UTF-8
| 8,753
| 2.65625
| 3
|
[] |
no_license
|
import time
import argparse
from loguru import logger
import pandas as pd
from utils import csvIO, modelIO
from preprocessor import (
sales_feature,
data_operation,
data_integrator,
data_cleaner,
data_normalizer,
)
from predictor import DTR, XGBoost, kMeans
def main_old():
total_month_count = 34
item_count = 22170
shop_count = 60
logger.info("Reading dataset, file name: ./dataset/train_monthly_sales.csv")
monthly_sales_train = csvIO.read_csv_to_pddf("./dataset/train_monthly_sales.csv")
# print(monthly_sales_train)
logger.debug(f"Expected: {total_month_count * item_count * shop_count}, Real: {len(monthly_sales_train)} ({len(monthly_sales_train) * 100 / (total_month_count * item_count * shop_count):.2f}%)")
# # Do feature extraction (Deprecated)
# logger.info("Do feature extraction")
# feature_collection = data_operation.feature_extract(monthly_sales_train)
# # Encode Data - Input with Item ID, Shop ID and Month ID (Deprecated)
# logger.info("[Data Composing] Build Data for model fitting")
# data_in, data_out = data_operation.compose_data(monthly_sales_train.query("item_cnt_day > 0"), feature_collection) # Filter sales < 0
# Do Time Series Data Encoding
logger.info("[Time Series Data] Encode Data for model fitting")
data_in, data_out = data_operation.encode_time_series_data(monthly_sales_train)
# decision_tree_model = DTR.train(data_in, data_out)
# modelIO.save(decision_tree_model, "dtr_8_all_ts")
xgbr, xgbr_rmse = XGBoost.train(data_in, data_out.drop("shop_id", axis=1))
modelIO.save(xgbr, f"xgbRegressor_all_ts")
# xgbr_list = []
# xgbr_rmse_list = []
# shop_list = range(60) # 6, 9, 12, 15, 18, 20, 22, 25, 27, 28, 31, 42, 43, 54, 55, 57
# for i in shop_list:
# logger.info(f"Individual shop ID: {i}")
# train_in = data_in.query(f"shop_id == {i}").reset_index(drop=True)
# train_out = data_out.query(f"shop_id == {i}").reset_index(drop=True)
# train_in = train_in.drop("shop_id", axis=1)
# train_out = train_out.drop("shop_id", axis=1)
# print(train_in)
# print(train_out)
# # decision_tree_model = DTR.train(data_in, data_out)
# # modelIO.save(decision_tree_model, "dtr_8")
# xgbr, xgbr_rmse = XGBoost.train(train_in, train_out)
# xgbr_list.append(xgbr)
# xgbr_rmse_list.append(xgbr_rmse)
# list_avg = lambda x: sum(x) / len(x)
# logger.info(f"Overall RMSE average: {list_avg(xgbr_rmse_list)}")
# print(xgbr_rmse_list)
# for i in range(len(xgbr_list)):
# modelIO.save(xgbr_list[i], f"xgbRegressor_shop_{i}")
def main():
#-----------------------------------------------------------------------------------
# Phase 1. Read Data
# a. Read original training data
logger.info("Reading dataset, file name: ./dataset/sales_train.csv")
original_train_data = csvIO.read_csv_to_pddf("./dataset/sales_train.csv")
# b. Read item information - including category info
logger.info("Reading dataset, file name: ./dataset/items.csv")
item_info = csvIO.read_csv_to_pddf("./dataset/train_monthly_sales.csv")
#-----------------------------------------------------------------------------------
# Phase 2. Integrate Training Data and Do Feature Extraction
# a. Integrate train data to monthly information, then join category information
logger.info("Aggregate original training data to monthly sales.")
agg_sales_train_data = data_integrator.integrate_monthly_sales(original_train_data)
# b. Join Category Info
logger.info("Join category information.")
agg_sales_train_data = data_integrator.join_category_info(agg_sales_train_data, item_info)
# c. Permutate agg_sales_train_data
agg_sales_train_data = agg_sales_train_data[
['date_block_num', 'shop_id', 'item_id', 'item_category_id',
'avg_sales_price', 'total_sales', 'total_record_count']
]
# d. Do statistics - Monthly sales of specific category on specific shop
logger.info("Do statistics - Monthly sales of specific category on specific shop")
category_sales_on_shop_per_month = sales_feature.shop_seasonal_sales_of_category(agg_sales_train_data)
# e. Do statistics - Monthly sales of all item without distinguishing shop
logger.info("Do statistics - Monthly sales of all item without distinguishing shop")
monthly_total_item_sales = sales_feature.item_total_sales(agg_sales_train_data)
logger.info("Do statistics - Monthly sales of all item category without distinguishing shop")
monthly_total_item_cat_sales = sales_feature.item_category_total_sales(agg_sales_train_data)
#-----------------------------------------------------------------------------------
# Phase 3. Data Cleaning and Normalization
# a. Clean outlier of training data
logger.info("Clean Outliers of Training Data")
clean_outlier_train_data = data_cleaner.remove_outlier(agg_sales_train_data)
#-----------------------------------------------------------------------------------
# Phase 4. k-Means clustering for extract feature of popularity
logger.info("Do sales heat auto clustering")
category_heat_value = kMeans.extract_sales_heat(category_sales_on_shop_per_month)
csvIO.write_pd_to_csv(
category_heat_value,
"category_heat_value.csv",
False,
)
# category_heat_value = csvIO.read_csv_to_pddf(
# "./output/category_heat_value.csv"
# )
# print(category_heat_value)
monthly_item_heat_value = kMeans.extract_monthly_item_sales_heat(
monthly_total_item_sales
)
csvIO.write_pd_to_csv(
monthly_item_heat_value,
"monthly_item_heat_value.csv",
False
)
# monthly_item_heat_value = csvIO.read_csv_to_pddf(
# "./output/monthly_item_heat_value.csv"
# )
monthly_item_cat_heat_value = kMeans.extract_monthly_item_cat_sales_heat(
monthly_total_item_cat_sales
)
csvIO.write_pd_to_csv(
monthly_item_cat_heat_value,
"monthly_item_cat_heat_value.csv",
False
)
# monthly_item_heat_value = csvIO.read_csv_to_pddf(
# "./output/monthly_item_heat_value.csv"
# )
#-----------------------------------------------------------------------------------
# Phase 5. Do Input Data Normalization
# Step 1. Integrate features
logger.info("Join sales heat feature")
train_data_with_feature = data_integrator.feature_join(
clean_outlier_train_data,
category_heat_value,
monthly_item_heat_value,
monthly_item_cat_heat_value,
)
assert len(clean_outlier_train_data) == len(train_data_with_feature), "Inconsistant length of data before/after join operation"
train_data_with_feature.drop(
columns=[
'cat_shop_total_sales_sum',
'monthly_total_item_sales_sum',
'monthly_total_item_cat_sales_sum',
'cat_shop_total_sales_mean',
'monthly_total_item_sales_mean',
'monthly_total_item_cat_sales_mean',
# 'avg_sales_price',
'total_record_count',
],
inplace=True
)
# Step 2. Normalization
# logger.info("Data Normalization")
norm_train_data = (
data_normalizer.train_norm(train_data_with_feature)
)
logger.debug("Use Original value on fitting target - total_sales")
# Cancel Normalization on Output Data and Specific Columns
norm_train_data["date_block_num"] = train_data_with_feature["date_block_num"]
norm_train_data["total_sales"] = train_data_with_feature["total_sales"]
# norm_train_data = train_data_with_feature
print(norm_train_data.columns)
# ts_train_data_norm = ts_train_data
#-----------------------------------------------------------------------------------
# Phase 6. Encode Time Series Data
# Step 1. Encode all features as time series data
logger.info("Encode Time Series Data")
ts_train_data = data_integrator.encode_time_series_data(
norm_train_data,
24,
)
# Step 2. Do normalization for month ID
ts_train_data["date_block_num"] = ts_train_data["date_block_num"].apply(
lambda x: (x % 12) - 0 / 11
)
print(len(ts_train_data))
#-----------------------------------------------------------------------------------
# Phase 7. XGBoostRegressor Training and Evaluate Performance
logger.info("Fit XGBoost Regressor")
xgbr = XGBoost.train(ts_train_data, 0.20)
modelIO.save(xgbr, "xgbr_new_feature")
#-----------------------------------------------------------------------------------
# Phase 8. Do Inference Based on Training Result
if __name__ == "__main__":
main()
| true
|
7c8906d675445eef4d3a953dcc28893c456b30f6
|
Python
|
riklauder/ProjectMokman
|
/src/astartwo.py
|
UTF-8
| 4,905
| 3.453125
| 3
|
[] |
no_license
|
#Currently in use by Ghosts in ghosts.py
import numpy as np
import heapq
class Node:
"""
A node class for A* Pathfinding
parent is parent of the current Node
position is current position of the Node in the maze
g is cost from start to current Node
h is heuristic based estimated cost for current Node to end Node
f is total cost of present node i.e. : f = g + h
"""
def __init__(self, parent=None, position=None):
self.parent = parent
self.position = position
self.g = 0
self.h = 0
self.f = 0
def __eq__(self, other):
return self.position == other.position
#This function return the path of the search
def return_path(current_node,maze):
path = []
no_rows, no_columns = np.shape(maze)
# here we create the initialized result maze with -1 in every position
result = [[-1 for i in range(no_columns)] for j in range(no_rows)]
current = current_node
while current is not None:
path.append(current.position)
current = current.parent
# Return reversed path as we need to show from start to end path
path = path[::-1]
start_value = 0
# we update the path of start to end found by A-star serch with every step incremented by 1
for i in range(len(path)):
result[path[i][0]][path[i][1]] = start_value
start_value += 1
return result
def search(maze, cost, start, end):
"""
currently switches start and end to optimize for ghosts path TO player
:param maze: Map with walls as 1 path 0
:param cost
:param start:
:param end:
:return:
"""
# Create start and end node with initized values for g, h and f
start_node = Node(None, tuple(start))
start_node.g = start_node.h = start_node.f = 0
end_node = Node(None, tuple(end))
end_node.g = end_node.h = end_node.f = 0
yet_to_visit_list = []
visited_list = []
# Add the start node
yet_to_visit_list.append(start_node)
# SEntinel after some reasonable number of steps
outer_iterations = 0
max_iterations = (len(maze) // 2) ** 10
# what squares to search - direction in this rotation
move = [[-1, 0 ], # go up
[ 0, -1], # go left
[ 1, 0 ], # go down
[ 0, 1 ]] # go right
#find maze has got how many rows and columns
no_rows, no_columns = np.shape(maze)
# Loop until end
while len(yet_to_visit_list) > 0:
outer_iterations += 1
# Get the current node
current_node = yet_to_visit_list[0]
current_index = 0
for index, item in enumerate(yet_to_visit_list):
if item.f < current_node.f:
current_node = item
current_index = index
# return the path as is partioal solution or computation 2 high
if outer_iterations > max_iterations:
print ("giving up on pathfinding too many iterations")
return return_path(current_node,maze)
# Pop current node
yet_to_visit_list.pop(current_index)
visited_list.append(current_node)
# goal is reached or not
if current_node == end_node:
return return_path(current_node,maze)
# Generate children from adj squares
children = []
for new_position in move:
node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])
# Make sure within range of maze
if (node_position[0] > (no_rows - 1) or
node_position[0] < 0 or
node_position[1] > (no_columns -1) or
node_position[1] < 0):
continue
# Verify terrain/forntier
if maze[node_position[0]][node_position[1]] != 0:
continue
new_node = Node(current_node, node_position)
children.append(new_node)
# Loop through children
for child in children:
# child on the visited list (search visited list)
if len([visited_child for visited_child in visited_list if visited_child == child]) > 0:
continue
# Create heiristic costs
child.g = current_node.g + cost
## Heuristic costs calculated here, this is using eucledian distance
child.h = (((child.position[0] - end_node.position[0]) ** 2) +
((child.position[1] - end_node.position[1]) ** 2))
child.f = child.g + child.h
# Child is already in the yet_to_visit list and g cost is already lower
if len([i for i in yet_to_visit_list if child == i and child.g > i.g]) > 0:
continue
# Add the child to the yet_to_visit list
yet_to_visit_list.append(child)
| true
|
b4f1f4eea9ce750cde5a609c0db3196932a6e89b
|
Python
|
antoinebeck/Codingame
|
/Puzzles/Python/Easy/horse_racing_duals.py
|
UTF-8
| 459
| 3.203125
| 3
|
[] |
no_license
|
## Array optimisation from codingame "Horse Racing Duals" puzzle
## https://www.codingame.com/training/easy/horse-racing-duals
## solution by Antoine BECK 03-15-2017
import sys
import math
n = int(input())
pi = []
diff = 10000000
tmp = 0
for i in range(n):
pi.append(int(input()))
pi.sort() # Using the sort function (optimized)
for i in range(n-1):
tmp = pi[i+1] - pi[i]
if tmp < diff:
diff = tmp
if diff == 0:
break
print(diff)
| true
|
3883852ad50bbb1b1279aac8f484af200ce4e7d6
|
Python
|
LuanReinheimer/Work_Space-Python
|
/CursoPython/Ex084 anotacoes.py
|
UTF-8
| 733
| 3.921875
| 4
|
[] |
no_license
|
pessoas = [['lucas',23], ['luan',23], ['bebeto', 25]]
for nome in pessoas:
print(f' {nome[0]} tem {nome[1]} anos de idade. ')
#-----------------------------------------------------------------------------
galera = []
dado = []
totalmaior = 0
totalmenor = 0
for c in range(5):
dado.append(str(input('NOME: ')))
dado.append(int(input('IDADE: ')))
galera.append(dado.copy())
dado.clear()
print(f'A Galera da lista é {galera}')
for p in galera:
if p[1] >= 21:
print(f'{p[0]} é maior de idade.')
totalmaior = totalmaior + 1
else:
print(f'{p[0]} é menor de idade')
totalmenor = totalmenor = 1
print(f'Temos {totalmaior} maiores de idade e {totalmenor} menores de idade.')
| true
|
195d7c2004dac773b8f569dcc61865a09e0edcbd
|
Python
|
mkdvice/Python-Iniciante-
|
/ReajusteSalarial.py
|
UTF-8
| 353
| 3.65625
| 4
|
[] |
no_license
|
def salario_reajuste(salario, reajuste): # craição da função
return salario * reajuste // 100 + salario #calculo do reajuste
reajuste = salario_reajuste(float(input("Digite o valor do salário: ")), float(input("Digite o valor do reajuste: "))) # entrada dos valores
print("Seu salário agora é R${}".format(reajuste)) # resultado do reajuste
| true
|
2f532d4365a8dd7ad59f53cfb0a9567c4f8b96e9
|
Python
|
jfpio/TKOM-Interpreter
|
/interpreter/models/constants.py
|
UTF-8
| 3,285
| 2.890625
| 3
|
[] |
no_license
|
from dataclasses import dataclass
from enum import Enum
from typing import Union, Type
from interpreter.token.token_type import TokenType
@dataclass
class CurrencyType:
name: str
@dataclass
class CurrencyValue(CurrencyType):
value: float
def __add__(self, other):
return CurrencyValue(self.name, self.value + other.value)
def __sub__(self, other):
return CurrencyValue(self.name, self.value - other.value)
def __mul__(self, other):
return CurrencyValue(self.name, self.value * other)
def __truediv__(self, other):
return CurrencyValue(self.name, self.value / other)
def __divmod__(self, other):
return CurrencyValue(self.name, self.value % other)
def __str__(self):
return f"{self.value}{self.name}"
def __int__(self):
return int(self.value)
def __float__(self):
return self.value
def __bool__(self):
if self.value == 0:
return False
else:
return True
CustomTypeOfTypes = Union[Type, CurrencyType]
PossibleTypes = Union[int, float, str, bool, CurrencyValue]
TOKEN_TYPES_INTO_TYPES = {
TokenType.INT: int,
TokenType.FLOAT: float,
TokenType.STRING: str,
TokenType.BOOL: bool,
}
POSSIBLE_TOKEN_TYPES = list(TOKEN_TYPES_INTO_TYPES.keys()) + [TokenType.CURRENCY]
class RelationshipOperator(Enum):
EQUAL_OPERATOR = '=='
NOT_EQUAL_OPERATOR = '/='
LESS_THAN_OPERATOR = '<'
GREATER_THAN_OPERATOR = '>'
LESS_THAN_OR_EQUAL_OPERATOR = '<='
GREATER_THAN_OPERATOR_OR_EQUAL_OPERATOR = '>='
TOKEN_TYPE_INTO_RELATIONSHIP_OPERAND = {
TokenType.EQUAL_OPERATOR: RelationshipOperator.EQUAL_OPERATOR,
TokenType.NOT_EQUAL_OPERATOR: RelationshipOperator.NOT_EQUAL_OPERATOR,
TokenType.LESS_THAN_OPERATOR: RelationshipOperator.LESS_THAN_OPERATOR,
TokenType.GREATER_THAN_OPERATOR: RelationshipOperator.GREATER_THAN_OPERATOR,
TokenType.LESS_THAN_OR_EQUAL_OPERATOR: RelationshipOperator.LESS_THAN_OR_EQUAL_OPERATOR,
TokenType.GREATER_THAN_OPERATOR_OR_EQUAL: RelationshipOperator.GREATER_THAN_OPERATOR_OR_EQUAL_OPERATOR
}
RELATIONSHIP_OPERAND_INTO_LAMBDA_EXPRESSION = {
RelationshipOperator.EQUAL_OPERATOR: lambda x, y: x == y,
RelationshipOperator.NOT_EQUAL_OPERATOR: lambda x, y: x != y,
RelationshipOperator.LESS_THAN_OPERATOR: lambda x, y: x < y,
RelationshipOperator.GREATER_THAN_OPERATOR: lambda x, y: x > y,
RelationshipOperator.LESS_THAN_OR_EQUAL_OPERATOR: lambda x, y: x <= y,
RelationshipOperator.GREATER_THAN_OPERATOR_OR_EQUAL_OPERATOR: lambda x, y: x >= y
}
class SumOperator(Enum):
ADD = '+'
SUB = '-'
TOKEN_TYPE_INTO_SUM_OPERATOR = {
TokenType.ADD_OPERATOR: SumOperator.ADD,
TokenType.SUB_OPERATOR: SumOperator.SUB
}
class MulOperator(Enum):
MUL = '*'
DIV = '/'
MODULO = '%'
token_type_into_mul_operator = {
TokenType.MUL_OPERATOR: MulOperator.MUL,
TokenType.DIV_OPERATOR: MulOperator.DIV,
TokenType.MODULO_OPERATOR: MulOperator.MODULO
}
ARITHMETIC_OPERATOR_INTO_LAMBDA_EXPRESSION = {
SumOperator.ADD: lambda x, y: x + y,
SumOperator.SUB: lambda x, y: x - y,
MulOperator.MUL: lambda x, y: x * y,
MulOperator.DIV: lambda x, y: x / y,
MulOperator.MODULO: lambda x, y: x % y
}
| true
|
dbd88e6b83d319de373314f4645b7061456a49e4
|
Python
|
Satwik95/Coding-101
|
/LeetCode/Top 100/sub_array_sum.py
|
UTF-8
| 707
| 3.21875
| 3
|
[] |
no_license
|
class Solution(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
#return sum(sum(nums[j:i]) == k for i in range(len(nums)+1) for j in range(i))
# have to keep track of how many time a particular sub array sum has also occured,
# hence we can't simply use a dp array, we have to use a hash and keep track of the count
# of the sum as well
cur_sum, count, dp = 0, 0, {0:1}
for num in nums:
cur_sum += num
count += dp.get(cur_sum-k, 0)
dp[cur_sum] = dp.get(cur_sum, 0) + 1
return count
print(subarraySum([1,3,4,8], 7))
| true
|
79a15354b49f44d3a715e41ad0609820e3d8a4c9
|
Python
|
dixantmittal/image-clustering-using-expectation-maximization
|
/exp_max.py
|
UTF-8
| 2,343
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from matplotlib import image
import matplotlib.pyplot as plt
from k_means import *
import scipy.stats as st
from tqdm import tqdm
def initialize_params(k, d):
pi = np.random.rand(k)
# normalize pi
pi = pi / np.sum(pi)
mew = np.random.randn(k, d) * 100
# identity matrix
sigma = np.identity(d) * 100
sigma = np.asarray([sigma] * k)
return pi, mew, sigma
def cal_expectation(data, pi, mew, sigma, k):
n, d = data.shape
gamma = np.zeros((k, n))
for i in range(k):
gamma[i] = pi[i] * st.multivariate_normal.pdf(data, mew[i], sigma[i])
sum_gamma = np.sum(gamma, axis=0)
gamma = gamma / sum_gamma
return gamma
def cal_maximization(data, gamma, k):
n, d = data.shape
Nk = np.sum(gamma, axis=1)
pi = Nk / n
mew = np.zeros((k, d))
for i in range(k):
mew[i] = np.sum(gamma[i] * data.transpose()) / Nk[i]
sigma = np.empty((k, d, d))
for i in range(k):
diff = data - mew[i]
sigma[i] = np.dot(gamma[i] * diff.T, diff) / Nk[i]
return pi, mew, sigma
def main():
img_mat = image.imread('images/zebra.jpg')
k = 2
h, w, d = img_mat.shape
img_mat = img_mat.reshape((h * w, d))
pi, mew, sigma = initialize_params(k, d)
mew = calculate_k_means(img_mat, k)
pi_old, mew_old, sigma_old = pi, mew, sigma
print('starting EM')
for i in tqdm(range(100)):
gamma = cal_expectation(img_mat, pi, mew, sigma, k)
pi, mew, sigma = cal_maximization(img_mat, gamma, k)
# until convergence
if np.sum((mew - mew_old) ** 2) < 1e-10:
break
mew_old = mew
prob = np.zeros((k, h * w))
for i in range(k):
prob[i] = pi[i] * st.multivariate_normal.pdf(img_mat, mew[i], sigma[i])
assignment = np.argmax(prob, axis=0)
# assignment = np.asarray(np.dstack((assignment, assignment, assignment)), dtype=np.float32) / (k - 1 + 1e-20)
img_mat = img_mat.reshape((h, w, d))
mask1 = np.asarray(img_mat * 255 * assignment.reshape((h, w, 1)), dtype=np.float32)
plt.imshow(mask1)
plt.show()
assignment = -assignment + 1
mask2 = np.asarray(img_mat * 255 * assignment.reshape((h, w, 1)), dtype=np.float32)
plt.imshow(mask2, cmap=plt.cm.gray)
plt.show()
if __name__ == '__main__':
main()
| true
|
e6a32147efb576d8b7348c2ac71009e6a2d8e49f
|
Python
|
vainotuisk/valikkursuse_materjalid
|
/Pygame/kliinik2.py
|
UTF-8
| 251
| 3.234375
| 3
|
[] |
no_license
|
## Väino Tuisk
## Kliinik 2 - nulliga jagamine ja tulemuse täpsus
jagatav = float(input("sisesta jagatav: "))
jagaja = float(input("sisesta jagaja: "))
if (jagaja == 0):
print ("viga!")
else:
print ("Jagatis on: " + str(float(jagatav/jagaja)))
| true
|
841f326894e90fd82ad008ca98bdbfe53315fc97
|
Python
|
Energy1190/railroads-maps
|
/railmap.py
|
UTF-8
| 46,249
| 2.703125
| 3
|
[] |
no_license
|
import math
import pickle
from parserus import *
from database import *
class Station():
def __init__(self, tuple_obj):
assert len(tuple_obj) == 9
self.name = tuple_obj[0]
self.coordX = tuple_obj[-2]
self.coordY = tuple_obj[-1]
self.coords = (self.coordX, self.coordY)
self.neighbors = []
self.neighbors_num = 0
self.neighbors_coords = []
def set_neighbors(self):
x = get_one_entry('neighbors', self.name)
if x:
self.neighbors = list(filter(lambda x: False if x == 'NULL' else True, x[2:]))
self.neighbors_num = int(x[1])
for i in self.neighbors:
y = get_one_entry('stations', i, extend=True)
if y:
cx,cy = y[-2:]
self.neighbors_coords.append((cx,cy))
def get_over_station(self, name):
return get_one_entry('stations', name, fild='name', extend=True)
def get_over_station_coords(self, coords):
return get_one_entry('stations', coords[0], fild='coordinateX', name2=coords[1], fild2='coordinateY')
def check_neighbors(self, list_obj, coords_list):
def math_check(self, neighbors_coords, coords_list, coordX=self.coordX, coordY=self.coordY, result=[], count=0):
count += 1
if count == 500:
return result
areaX = (coordX-0.00001, coordX+0.00001)
areaY = (coordY-0.00001, coordY+0.00001)
for i in neighbors_coords:
if areaX[1] > i[0] > areaX[0] and areaY[1] > i[1] > areaY[0] and len(result) <4 and (i[0], i[1]) != self.coords:
result.append(i)
for i in coords_list:
if areaX[1] > i[0] > areaX[0] and areaY[1] > i[1] > areaY[0] and len(result) < 4 and (i[0], i[1]) != self.coords:
result.append(i)
if len(result) == 4 or len(result) == len(neighbors_coords):
return result
else:
return math_check(self, neighbors_coords, coords_list, coordX=coordX, coordY=coordY, result=result, count=count)
x = []
for i in list_obj:
if i in self.neighbors_coords:
x.append(i)
if self.neighbors_num == len(list_obj):
x = []
for i in list_obj:
x.append(i)
print("List is: ", list_obj)
self.parent = x
elif self.neighbors_num != len(list_obj):
alls = []
coords = []
for i in self.neighbors:
x = self.get_over_station(i)
if x and (x[0], x[-2], x[-1]) not in alls and (x[-2], x[-1]) not in coords:
alls.append((x[0], x[-2], x[-1]))
coords.append((x[-2], x[-1]))
for i in list_obj:
x = self.get_over_station_coords(i)
if x and (x[0], x[-2], x[-1]) not in alls and (x[-2], x[-1]) not in coords:
alls.append((x[0], x[-2], x[-1]))
coords.append((x[-2], x[-1]))
self.parent = math_check(self, coords, coords_list)
# print(self.name, self.neighbors_num, x)
def bild_stations():
def fail_parser():
# Почему-то не добавляется, видимо происходит ошибка из-за ссылки не на координаты, а на область.
insert_to_table('stations', ('Щелково', 'Щёлково', 'NULL', 'щелково', 'http://www.openstreetmap.org/node/4085266440#map=18/55.90939/38.01063&layers=N', 'NULL','NULL', '55.9093905', '38.0087521'), size='one')
# Видимо переименовано в Болдино, у станции нет координат на карте
insert_to_table('stations', ('Сушнево', 'Сушнево', 'NULL', 'сушнево', 'http://www.openstreetmap.org/#map=18/55.96049/39.77948&layers=N', 'NULL','NULL', '55.96049', '39.77948'), size='one')
size='many'
base_len = 9
drop_table('stations')
drop_table('neighbors')
drop_table('error_stations')
create_table('stations', params='(name TEXT, second_name TEXT, third_name TEXT, check_name TEXT, link TEXT, location TEXT, number REAL, coordinateX REAL, coordinateY REAL)')
create_table('neighbors', params='(name TEXT, count REAL, neighbor1 TEXT, neighbor2 TEXT, neighbor3 TEXT, neighbor4 TEXT, neighbor5 TEXT)')
create_table('error_stations', params='(name TEXT, second_name TEXT, third_name TEXT, check_name TEXT, link TEXT, location TEXT, number REAL)')
for i in ['http://osm.sbin.ru/esr/region:mosobl:l', 'http://osm.sbin.ru/esr/region:ryazan:l', 'http://osm.sbin.ru/esr/region:tul:l',
'http://osm.sbin.ru/esr/region:kaluzh:l', 'http://osm.sbin.ru/esr/region:smol:l', 'http://osm.sbin.ru/esr/region:tver:l',
'http://osm.sbin.ru/esr/region:yarosl:l', 'http://osm.sbin.ru/esr/region:ivanov:l', 'http://osm.sbin.ru/esr/region:vladimir:l']:
x = get_stations2(url=i)
datas = list(filter(None, prepare_data(x, size=size, ver=2, base_len=base_len)))
insert_to_table('stations', datas, size=size, len='({0})'.format(str('?,'*base_len)[:-1]))
fail_parser()
def bild_schedule():
def generation_of_dates(list_object):
result = []
mouth_list = [0, 'Январь', 'Февраль', 'Март', 'Апрель', 'Май', 'Июнь', 'Июль', 'Август', 'Сентябрь', 'Октябрь', 'Ноябрь', 'Декабрь']
for i in list_object:
x = i['mouth'].split(sep='(')
year = x[1][:-1]
if x[0] in mouth_list:
mouth = mouth_list.index(x[0])
else:
assert False
for j in i['days']:
y = (year, mouth, j)
result.append(y)
return result
def generation_of_times(time_list):
result = []
if len(time_list) != 4:
return False
if time_list[0] and len(time_list[0]) and ':' in time_list[0]:
x = time_list[0].split(sep=':')
result.append((int(x[0]), int(x[1])))
else:
result.append(None)
if time_list[1] and len(time_list[1]) and 'м' in time_list[1]:
x = re.findall(r'(\d*)\s?ч?м?\s?(\d*)\s?м?', time_list[1])
x = x[0]
if len(x) == 2 and x[1]:
result.append((int(x[0]), int(x[1])))
else:
result.append((None, int(x[0])))
else:
result.append(None)
if time_list[2] and len(time_list[2]) and ':' in time_list[2]:
x = time_list[0].split(sep=':')
result.append((int(x[0]), int(x[1])))
else:
result.append(None)
if time_list[3] and len(time_list[3]) and 'м' in time_list[3]:
x = re.findall(r'(\d*)\s?ч?м?\s?(\d*)\s?м?', time_list[3])
x = x[0]
if len(x) == 2 and x[1]:
result.append((int(x[0]), int(x[1])))
else:
result.append((None, int(x[0])))
else:
result.append(None)
return result
drop_table('trains')
drop_table('schedule')
drop_table('working_days')
create_table('trains', params='(number TEXT, link TEXT, departure TEXT, arrival TEXT, periodicity TEXT)')
create_table('schedule', params='(train TEXT, departure TEXT, arrival TEXT, station_link TEXT, station_name TEXT, station_number REAL, coming_time BLOB,'
' waiting_time BLOB, out_time BLOB, total_time BLOB)')
create_table('working_days', params='(train TEXT, departure TEXT, arrival TEXT, days BLOB)')
lines, trains = get_schedule2('http://moskva.elektrichki.net/raspisanie/')
trains_insert = []
schedule_insert = []
working_days_insert = []
for i in trains:
i['schedule'] = get_schedule_station(i['main_link'])
da = i['path'].split(sep=' → ')
for j in i['schedule']:
times = generation_of_times([j['coming_time'], j['waiting_time'], j['out_time'], j['total_time']])
times = list(map(pickle.dumps, times))
schedule_insert.append((i['train_number'], da[0], da[1], j['link'], j['name'],
j['station_number'], times[0],
times[1], times[2], times[3]))
if i.get('periodicity_link'):
days = get_days_of_work(i['periodicity_link'])
days = generation_of_dates(days)
days = pickle.dumps(days)
else:
days = None
trains_insert.append((i['train_number'], i['main_link'], da[0], da[1], i['periodicity']))
working_days_insert.append((i['train_number'], da[0], da[1], days))
insert_to_table('trains', trains_insert, size='many', len='(?,?,?,?,?)')
insert_to_table('schedule', schedule_insert, size='many', len='(?,?,?,?,?,?,?,?,?,?)')
insert_to_table('working_days', working_days_insert, size='many', len='(?,?,?,?)')
drop_table('lines')
create_table('lines', params='(list BLOB, empty TEXT)')
insert_to_table('lines', [(pickle.dumps(lines), None)], size='many', len='(?,?)')
def check_regexp(name):
def check(name):
count = 0
x = get_one_entry('stations', name, extend=True)
if x:
count = 1
y = get_many_entry('stations', x[0], limit=10, fild='name')
if len(y) > 1:
count = len(y)
x = y
return (x, count)
def logic(symbol, name, checks, symbol2=None):
if symbol in name or symbol2 and symbol in name and symbol2 in name:
x = check(checks)
if x[1]:
return checks
return None
if check(name)[1]:
return check(name)
tests = []
tests.append(logic('-', name, 'Аэропорт', symbol2='Внуково'))
tests.append(logic('Аэропорт Внуково', name, 'Аэропорт'))
tests.append(logic('Аэропорт', name, name.replace(' ', '-')))
tests.append(logic('Остановочный Пункт', name, ' '.join(name.split(sep= ' ')[0:2]).lower()))
tests.append(logic('Платформа', name, ' '.join(name.split(sep=' ')[-2:]).lower(), symbol2='Км'))
tests.append(logic('Пост', name, ' '.join(name.split(sep=' ')[-2:]).lower(), symbol2='Км'))
tests.append(logic('1', name, name.replace(' 1', '-I')))
tests.append(logic('1', name, name.replace(' 1', '-1')))
tests.append(logic('1', name, name.replace(' 1', ' I')))
tests.append(logic('2', name, name.replace(' 2', '-II')))
tests.append(logic('2', name, name.replace(' 2', '-2')))
tests.append(logic('2', name, name.replace(' 2', ' II')))
tests.append(logic('3', name, name.replace(' 3', '-III')))
tests.append(logic('3', name, name.replace(' 3', '-3')))
tests.append(logic('3', name, name.replace(' 3', ' III')))
tests.append(logic('1', name, name.replace(' 1', '')))
tests.append(logic('2', name, name.replace(' 2', '')))
tests.append(logic('3', name, name.replace(' 3', '')))
tests.append(logic('Платформа', name, name.replace('Платформа ', '')))
tests.append(logic(' ', name, name.replace(' ', '-')))
tests.append(logic('е', name, name.replace('е', 'ё', 1)))
tests.append(logic('ё', name, name.replace('ё', 'е', 1).lower()))
tests.append(logic('И', name, name.replace('И', 'и').replace('М', 'м')))
tests.append(logic('и', name, name.upper()))
tests.append(logic('Депо', name, 'Депо'))
tests.append(logic('Березки Дачные', name, 'Берёзки-Дачные'))
tests.append(logic('оо', name, name.replace('оо', 'о')))
tests.append(logic('ая', name, name.replace('ая', 'ое')))
tests.append(logic('й', name, name.replace('й', 'и')))
tests.append(logic('Пл.', name, name.replace('Пл. ', '')))
tests.append(logic('Пл.', name, ' '.join(name.split(sep=' ')[1:3])))
tests.append(logic('о.п.', name, ' '.join(name.split(sep=' ')[1:3])))
tests.append(logic(' (по треб.)', name, name.replace(' (по треб.)', '')))
tests.append(logic('(', name, ' '.join(name.split(sep=' ')[0])))
tests.append(logic('(', name, ' '.join(name.split(sep=' ')[:1])))
tests.append(logic('(', name, ' '.join(name.split(sep=' ')[:2]).lower().replace('Пл. ', '')))
tests.append(logic('(', name, ' '.join(name.split(sep=' ')[:3]).lower().replace('Пл. ', '')))
tests.append(logic('Москва ', name, name.replace('Москва ', 'Москва-Пассажирская-')))
tests.append(logic('(', name, ' '.join(name.split(sep=' ')[1:3])))
tests.append(logic('(', name, ' '.join(name.split(sep=' ')[2:4]).replace('(', '').replace(')', '')))
tests.append(logic(' Тов.', name, name.replace(' Тов.', '-II')))
tests.append(logic(' Пасс.', name, name.replace(' Пасс.', '-I')))
tests.append(logic(' Пасс.', name, name.replace(' Пасс.', '-Пассажирская')))
tests.append(logic(' Тов.', name, name.replace(' Тов.', '')))
tests.append(logic(' Пасс.', name, name.replace(' Пасс.', '')))
tests.append(logic(' Тов.', name, name.replace(' Тов.', '-Товарная')))
tests.append(logic(' Сорт.', name, name.replace(' Сорт.', '-Сотировочное')))
tests.append(logic(' Сорт.', name, name.replace(' Сорт.', '-Сотировочная')))
tests.append(logic(' Центр.', name, name.replace(' Центр.', '-Центральное')))
tests.append(logic('Москва Савёловская', name, name.replace('Москва Савёловская', 'Москва-Бутырская')))
tests.append(logic(' Белорусская', name, name.replace(' Белорусская', '-Пассажирская-Смоленская')))
tests.append(logic(' Ленинградская', name, name.replace(' Ленинградская', '-Пассажирская')))
tests.append(logic('Пос.', name, name.replace('Пос.', 'Посёлок')))
tests.append(logic('Кашира', name, name.replace('Кашира', 'Кашира-Пассажирская')))
tests.append(logic('Рязань', name, 'Рязань-I'))
tests.append(logic('Бекасово Сорт.', name, 'Бекасово-Сортировочное'))
tests.append(logic('Малые Вязёмы', name, 'Малая Вязёма'))
tests.append(logic('Москва Сорт.', name, 'Москва-Сортировочная'))
tests.append(logic('Каланчёвская', name, name.replace('Каланчёвская', 'Москва-Каланчёвская')))
tests.append(logic('К', name, name + ' I'))
tests.append(logic('О', name, name + ' I'))
tests.append(logic('Е', name, name + ' I'))
try:
tests.append(logic(' ', name, ' '.join([name.split(sep=' ')[0], name.split(sep=' ')[1].upper()])))
except:
pass
for i in tests:
if i:
return check(i)
return False
def build_graph():
result = []
lines_list = ['https://www.tutu.ru/06.php', 'https://www.tutu.ru/01.php', 'https://www.tutu.ru/02.php', 'https://www.tutu.ru/05.php',
'https://www.tutu.ru/04.php', 'https://www.tutu.ru/08.php', 'https://www.tutu.ru/03.php', 'https://www.tutu.ru/07.php',
'https://www.tutu.ru/09.php', 'https://www.tutu.ru/10.php']
for i in lines_list:
result.append(get_line_map(i))
pickle.dump(result, map_file(action='wb',filename='graph.db'))
def bild_coord_graph():
def check_duplicate_coord(obj_list, num):
r = []
x = obj_list[0][-2:]
r.append(x)
for i in range(num):
if x != obj_list[i][-2:]:
r.append(obj_list[i][-2:])
return r
def filter_station():
nonlocal stations
for i in stations:
for j in stations[i]:
while stations[i].count(j) > 1:
stations[i].remove(j)
def pre_check_bild_stations():
nonlocal fails, stations
for i in pickle.load(map_file(filename='graph.db')):
for j in i:
flag = False
g = []
x = check_regexp(j)
if x:
if x[1] > 1:
g = check_duplicate_coord(x[0], x[1])
if len(g) > 1:
flag = True
z = tuple(x[0])
if type(z[0]) == tuple:
z = z[0]
if not stations.get((j, z[-2:], tuple(g), flag)):
stations[(j, z[-2:], tuple(g), flag)] = i[j]
else:
stations[(j, z[-2:], tuple(g), flag)] = stations[(j, z[-2:], tuple(g), flag)] + i[j]
else:
fails[j] = i[j]
def correct_fail():
nonlocal fails, stations
# 'Пл. Аэропорт (Внуково) (старая платф.)' - исключение, удалить нафиг
try:
del fails['Пл. Аэропорт (Внуково) (старая платф.)']
except:
pass
for i in fails:
if fails[i] and len(fails[i]) == 2:
for n in stations:
if fails[i][0] == n[0]:
stations[n].append(fails[i][1])
elif fails[i][1] == n[0]:
stations[n].append(fails[i][0])
def get_graph_coord():
nonlocal stations
for i in stations:
for j in range(len(stations[i])):
g = []
flag = False
x = check_regexp(stations[i][j])
if x:
if x[1] > 1:
g = check_duplicate_coord(x[0], x[1])
if len(g) > 1:
flag = True
z = tuple(x[0])
if type(z[0]) == tuple:
z = z[0]
stations[i][j] = (stations[i][j], z[-2:], tuple(g), flag)
else:
print(stations[i][j])
fails = {}
stations = {}
pre_check_bild_stations()
filter_station()
correct_fail()
get_graph_coord()
pickle.dump(stations, map_file(action='wb', filename='full_map.db'))
def bild_short_map(file):
def build_sqad(obj, obj_list):
x,y = obj
for i in range(len(obj_list)):
if i:
l = []
a, b = obj_list[i-1]
a2, b2 = obj_list[i]
for j in [(min(a,a2), max(b,b2), 4), (max(a,a2), min(b,b2), 2), (min(a,a2), min(b,b2), 1), (max(a,a2), max(b,b2), 3)]:
if j not in l:
l.append(j)
l.sort(key=lambda x:x[-1])
if l[0][0] < x < l[1][0] and l[0][1] < y < l[-1][1]:
return ((obj_list[i], obj_list[i-1]))
def detect_coord(coord_list, over_coord_lists):
check_list = []
exit_list = []
for i in over_coord_lists:
if not i[-1]:
check_list.append(i[1])
if len(check_list) < 2:
return (False, 0)
else:
result = []
for i in coord_list:
x = build_sqad(i, check_list)
result.append(x)
for i in range(len(result)):
if result[i]:
exit_list.append((coord_list[i], result[i]))
if exit_list and len(exit_list) == 1:
return (exit_list[0][0],0)
elif exit_list and len(exit_list) > 1:
return (exit_list,len(exit_list))
else:
return (False, 0)
def recusion_build(station_dict, num=1):
exit_dict = {}
for i in station_dict:
if i[-num]:
x,y = detect_coord(i[-2], [j for j in station_dict[i]])
if x and not y:
exit_dict[(i[0], x, (), False, 0)] = station_dict[i]
elif y:
for b in range(y):
exit_dict[(i[0], x[b][0], (), False, b)] = station_dict[i]
else:
t = list(i)
t.append(0)
exit_dict[tuple(t)] = station_dict[i]
else:
t = list(i)
t.append(0)
exit_dict[tuple(t)] = station_dict[i]
for i in exit_dict:
for j in range(len(exit_dict[i])):
for n in exit_dict:
if exit_dict[i][j][0] == n[0] and not n[-2]:
exit_dict[i][j] = n
return exit_dict
stations = pickle.load(map_file(filename=file))
short_map = recusion_build(stations)
for i in short_map:
remove_elems = []
obj = i[1]
g = 1
t = 1
for j in range(len(short_map[i])):
g = t
if j:
x,y = (short_map[i][j][1], short_map[i][j-1][1])
if type(x) == tuple and type(y) == tuple:
t = build_sqad(obj,[x,y])
if not t and not g:
remove_elems.append(short_map[i][j-1])
for j in remove_elems:
short_map[i].remove(j)
pickle.dump(short_map, map_file(action='wb', filename='short_map.db'))
def bild_coords_map(file):
def remove_exeption(exeption_name, exeption_val):
nonlocal regenerate_maps
nonlocal maps
if regenerate_maps[exeption_name].count(exeption_val):
regenerate_maps[exeption_name].remove(exeption_val)
if maps.count((exeption_name, exeption_val)):
maps.remove((exeption_name, exeption_val))
if regenerate_maps[exeption_val].count(exeption_name):
regenerate_maps[exeption_val].remove(exeption_name)
if maps.count((exeption_val, exeption_name)):
maps.remove((exeption_val, exeption_name))
def check_objects(obj, obj2):
def x(o):
if type(o[0]) == tuple:
return o[0]
else:
return o
for i in range(10):
r1 = x(obj)
r2 = x(obj2)
return (r1,r2)
# Расстояние между станциями не должно быть слишком большим - 0.2 градусаов максимум
def simple_len_check(obj, list_obj):
def check_result(list_obj):
r = []
for i in range(len(list_obj)):
if list_obj[i] > 0.2:
r.append(i)
return r
resultX = []
resultY = []
x,y = obj
for i in list_obj:
a,b = i
l1 = max(a,x) - min(a,x)
l2 = max(b,y) - min(b,y)
resultX.append(l1)
resultY.append(l2)
x = check_result(resultX)
y = check_result(resultY)
if x or y:
rem = list(set.union(set(x),set(y)))
r = []
for i in rem:
r.append(list_obj[i])
for i in r:
list_obj.remove(i)
return list_obj
# Станция не может иметь больше 4 станций-соседей
def simple_count_check(stations):
nonlocal station_count
if not station_count.get(stations):
station_count[stations] = 1
else:
station_count[stations] = station_count[stations] + 1
# Путь к станции не может пересекать другой путь, дважды так точно
def simple_collision_check(line_obj, over_line):
if line_obj[0] == over_line[0] or line_obj[0] == over_line[1]:
return False
elif line_obj[1] == over_line[0] or line_obj[1] == over_line[1]:
return False
line_obj = tuple(map(lambda x: tuple(map(lambda y: int(y * 10000000),x)), line_obj))
over_line = tuple(map(lambda x: tuple(map(lambda y: int(y * 10000000),x)), over_line))
x,y = line_obj[0]
x1,y1 = line_obj[1]
a,b = over_line[0]
a1,b1 = over_line[1]
ma = max(over_line[0][0], over_line[1][0])
mia = min(over_line[0][0], over_line[1][0])
mb = max(over_line[0][1], over_line[1][1])
mib = min(over_line[0][1], over_line[1][1])
mx = max(line_obj[0][0], line_obj[1][0])
mix = min(line_obj[0][0], line_obj[1][0])
my = max(line_obj[0][1], line_obj[1][1])
miy = min(line_obj[0][1], line_obj[1][1])
n = ((a*b1 - a1*b)*(x1-x) - (a1-a)*(x*y1 - x1*y)) / ((a1 - a)*(y-y1) - (x1-x)*(b-b1))
if mia < n < ma and mix < n < mx:
check = ((y-y1)*n + (x*y1 - x1*y)) / (x1-x)
if mib < -check < mb and miy < -check < my:
return True
stations = pickle.load(map_file(filename=file))
station_count = {}
coords = {}
maps = []
for i in stations:
coords[i[1]] = []
for j in stations[i]:
if type(j[1]) == tuple:
coords[i[1]].append(j[1])
for i in coords:
coords[i] = simple_len_check(i, coords[i])
for i in coords:
for j in coords[i]:
simple_count_check(j)
for i in station_count:
if station_count[i] > 4:
for j in coords:
for jj in coords[j]:
if i == jj:
coords[j].remove(jj)
for i in coords:
for j in coords[i]:
x,y = check_objects(i,j)
if (x,y) not in maps:
maps.append((x,y))
collision_fails = []
for i in maps:
for j in maps:
if i != j:
if simple_collision_check(i, j): collision_fails.append((i,j))
collision_fails_count = {}
for i in collision_fails:
for j in i:
if collision_fails_count.get(j):
collision_fails_count[j] = collision_fails_count[j] + 1
else:
collision_fails_count[j] = 1
for i in collision_fails_count:
if collision_fails_count[i] > 4:
maps.remove(i)
regenerate_maps = {}
for i in maps:
if not regenerate_maps.get(i[0]):
regenerate_maps[i[0]] = []
if not regenerate_maps.get(i[1]):
regenerate_maps[i[1]] = []
if i[1] not in regenerate_maps[i[0]]: regenerate_maps[i[0]].append(i[1])
if i[0] not in regenerate_maps[i[1]]: regenerate_maps[i[1]].append(i[0])
# Линии-паразиты: [55.7285836, 37.640936],[55.745199, 37.6893076]
# [55.7045344, 37.6238334],[55.745199, 37.6893076]
# [55.7237247, 37.3974261],[55.6100116, 37.2660261]
exeption_list = [((55.7285836, 37.640936),(55.745199, 37.6893076)),((55.7045344, 37.6238334),(55.745199, 37.6893076)),
((55.7237247, 37.3974261),(55.6100116, 37.2660261))]
for i in exeption_list:
remove_exeption(i[0],i[1])
pickle.dump(regenerate_maps, map_file(action='wb', filename='coords_maps.db'))
pickle.dump(maps, map_file(action='wb', filename='maps.db'))
def correct_coords_map(file):
def simple_len_check(obj1, obj2):
x,y = obj1
x1,y1 = obj2
l1 = max(x1,x) - min(x1,x)
l2 = max(y1,y) - min(y1,y)
if l1 < 0.3 and l2 < 0.3:
return True
def return_neighbors(neighbors_list, exeption):
result = []
if not neighbors_list:
return False
for i in neighbors_list[2:]:
if i != "NULL" or i != exeption:
x = get_one_entry('stations', i)
if x: result.append(x)
if result:
return result
def get_parent():
nonlocal stations, stations_end
for i in stations:
if len(stations[i]) <= 1:
y = get_one_entry('stations', i[0], fild='coordinateX', name2=i[1], fild2='coordinateY')
if len(stations[i]) == 1:
e = get_one_entry('stations', stations[i][0][0], fild='coordinateX', name2=stations[i][0][1],
fild2='coordinateY')
if e: e = e[0]
else:
e = "NULL"
n = get_one_entry('neighbors', y[0])
parent = return_neighbors(n, e)
stations_end[y] = parent
stations_end = {}
stations = pickle.load(map_file(filename=file))
get_parent()
for i in stations_end:
for j in stations_end:
if stations_end[i] == stations_end[j] and i != j and stations_end[i] and stations_end[j]:
if simple_len_check(i[-2:], stations_end[j][0][-2:]):
if stations_end[j][0][-2:] not in stations[i[-2:]]: stations[i[-2:]].append(stations_end[j][0][-2:])
if simple_len_check(j[-2:], stations_end[j][0][-2:]):
if stations_end[j][0][-2:] not in stations[j[-2:]]: stations[j[-2:]].append(stations_end[j][0][-2:])
stations_end = {}
get_parent()
for i in stations_end:
if not stations[i[-2:]]:
stations[i[-2:]].append(stations_end[i][0][-2:])
stations_end = {}
get_parent()
for i in stations_end:
for j in stations_end:
if stations_end[j] and i[0] == stations_end[j][0][0]:
if simple_len_check(i[-2:], j[-2:]):
if j[-2:] not in stations[i[-2:]]: stations[i[-2:]].append(j[-2:])
if i[-2:] not in stations[j[-2:]]: stations[j[-2:]].append(i[-2:])
stations_end = {}
get_parent()
maps = []
for i in stations:
for j in stations[i]:
if (i,j) not in maps:
maps.append((i,j))
pickle.dump(stations, map_file(action='wb', filename=file))
pickle.dump(maps, map_file(action='wb', filename='maps.db'))
def build_stations_coord(file):
addict = pickle.load(map_file(filename=file))
add = []
for i in addict:
x = get_one_entry('stations', i[0], fild='coordinateX', name2=i[1], fild2='coordinateY')
if x and (x[0], x[-2], x[-1]) not in add:
add.append((x[0], x[-2], x[-1]))
stations = []
stations_for_schedule = []
for i in get_table('schedule', fild='station_name'):
if i[4] not in stations_for_schedule:
stations_for_schedule.append(i[4])
for i in stations_for_schedule:
x = check_regexp(i)[0]
if type(x) == list:
for j in check_regexp(i)[0]:
if (j[0], j[-2], j[-1]) not in stations:
stations.append((j[0], j[-2], j[-1]))
else:
if (x[0], x[-2], x[-1]) not in stations:
stations.append((x[0], x[-2], x[-1]))
for i in add:
if i not in stations:
stations.append(i)
pickle.dump(stations, map_file(action='wb', filename='stations.db'))
def correct_map_and_stations(file_stations, file_coordinate, file_map):
stations_name = pickle.load(map_file(filename=file_stations))
stations_coords = pickle.load(map_file(filename=file_coordinate))
stations_map = pickle.load(map_file(filename=file_map))
combinate = {}
removeble = []
for i in stations_name:
if i[-2:] in stations_coords:
combinate[i] = stations_coords[i[-2:]]
removeble.append(i)
for i in removeble:
stations_name.remove(i)
del stations_coords[i[-2:]]
for i in stations_coords:
for j in stations_coords[i]:
if (i,j) in stations_map: stations_map.remove((i, j))
for i in stations_name:
combinate[i] = []
pickle.dump(combinate, map_file(action='wb', filename='combinate_maps.db'))
pickle.dump(stations_map, map_file(action='wb', filename=file_map))
def correct_checks(file):
def generate_ends():
nonlocal ends, alone, combinate
ends = {}
alone = []
for i in combinate:
if len(combinate[i]) == 1:
ends[i] = combinate[i]
if len(combinate[i]) == 0:
alone.append(i)
def ends_simple_detect(ends):
def get_lines(main, parent):
r = [0,0]
x,y = main
x1,y1 = parent
mX,mY,miX,miY=(max(x,x1),max(y,y1),min(x,x1),min(y,y1))
if mX == x:
r[0] = lambda main,b: True if main<b else False
else:
r[0] = lambda main,b: True if main>b else False
if mY == y:
r[1] = lambda main,b: True if main<b else False
else:
r[1] = lambda main,b: True if main>b else False
return r
def get_sqad(obj,obj2):
x,y = obj
x1,y1 = obj2
mX, mY, miX, miY = (max(x, x1), max(y, y1), min(x, x1), min(y, y1))
sqad = (mX, mY, miX, miY)
return ((mX-miX), (mY-miY), sqad)
def get_perimeter(sqad):
l1 = sqad[0] - sqad[2]
l2 = sqad[1] - sqad[3]
return (l1,l2)
def get_diag(obj,obj2):
x = get_sqad(obj,obj2)
l1,l2 = get_perimeter(x[-1])
line = math.sqrt(l1*l1 + l2*l2)
return line
def recursive(num=0.15, noconnect=False):
nonlocal ends, connect
endings = []
for i in ends:
for j in ends:
if i != j:
x, y = get_lines(i[-2:], ends[i][0])
if x(i[1], j[1]) and y(i[2], j[2]):
xl, yl, sqad = get_sqad(i[-2:], j[-2:])
if xl < num and yl < num and 'Москва-Пассажирская' not in i[
0] and 'Москва-Пассажирская' not in j[0]:
if 'Москва-Рижская' not in i[0] and 'Москва-Рижская' not in j[0]:
if not noconnect:
for n in alone:
if sqad[2] < n[1] < sqad[0] and sqad[3] < n[2] < sqad[1]:
if not connect.get(n):
connect[n] = [i, j]
else:
endings.append((i,j))
if noconnect:
return endings
nonlocal alone,combinate
while True:
connect = {}
recursive()
if any(connect):
for i in connect:
l1 = get_diag(i[-2:], connect[i][0][-2:])
l2 = get_diag(i[-2:], connect[i][1][-2:])
if l1 > l2 and connect[i][1] not in combinate[i]:
combinate[i].append(connect[i][1])
elif l1 < l2 and connect[i][0] not in combinate[i]:
combinate[i].append(connect[i][0])
generate_ends()
else:
x = recursive(num=0.1,noconnect=True)
for i in x:
if i[1] not in combinate[i[0]]: combinate[i[0]].append(i[1])
if i[0] not in combinate[i[1]]: combinate[i[1]].append(i[0])
break
combinate = pickle.load(map_file(filename=file))
maps = pickle.load(map_file(filename='maps.db'))
stations = pickle.load(map_file(filename='stations.db'))
maxX = max(combinate, key=lambda x:x[1] if combinate[x] else 0)[1]
minX = min(combinate, key=lambda x:x[1] if combinate[x] else 1000)[1]
maxY = max(combinate, key=lambda x:x[2] if combinate[x] else 0)[2]
minY = min(combinate, key=lambda x:x[2] if combinate[x] else 1000)[2]
sqad = (maxX, minX, maxY, minY)
removeble = []
for i in combinate:
if not minX < i[1] < maxX or not minY < i[2] < maxY:
removeble.append(i)
for i in removeble:
del combinate[i]
ends = {}
alone = []
generate_ends()
ends_simple_detect(ends)
for i in combinate:
for j in combinate[i]:
if (i[-2:],j[-2:]) not in maps:
maps.append((i[-2:],j[-2:]))
pickle.dump(combinate, map_file(action='wb', filename=file))
pickle.dump(stations, map_file(action='wb', filename='stations.db'))
pickle.dump(maps, map_file(action='wb', filename='maps.db'))
def correct_exeptions():
def get_coord(name, count=0):
nonlocal combinate, stations
x = 0
for i in combinate:
if i[0] == name:
x = i[-2:]
if not count:
return x
else:
count -= 1
for i in stations:
if i[0] == name:
x = i[-2:]
if not count:
return x
else:
count -= 1
if x:
return x
def maps_operator(obj, operator=None):
nonlocal maps
if operator == 'add' and obj not in maps:
maps.append(obj)
elif operator == 'rem':
if obj in maps:
maps.remove(obj)
if (obj[1], obj[0]) in maps:
maps.remove((obj[1], obj[0]))
def len_check(x,y):
a,b = x
a1,b1 = y
if max(a,a1) - min(a,a1) < 0.3 and max(b,b1) - min(b,b1) < 0.3:
return True
def added(station_name, station_name2, combinate):
x = get_coord(station_name)
y = get_coord(station_name2)
count = 0
while not len_check(x,y) and count < 10:
count += 1
x = get_coord(station_name, count)
y = get_coord(station_name2, count)
if x and y:
for i in combinate:
if i[-2:] == x and y not in combinate[i]:
combinate[i].append(y)
if i[-2:] == y and x not in combinate[i]:
combinate[i].append(x)
maps_operator((x,y), 'add')
else:
print('Error')
print(station_name, station_name2)
print(x,y)
return combinate
def removed(station_name, station_name2, combinate):
x = get_coord(station_name)
y = get_coord(station_name2)
for i in combinate:
for j in range(len(combinate[i])):
if len(combinate[i][j]) == 3: combinate[i][j] = combinate[i][j][-2:]
if i[-2:] == x and y in combinate[i]:
combinate[i].remove(y)
if i[-2:] == y and x in combinate[i]:
combinate[i].remove(x)
maps_operator((x, y), 'rem')
return combinate
combinate = pickle.load(map_file(filename='combinate_maps.db'))
stations = pickle.load(map_file(filename='stations.db'))
maps = pickle.load(map_file(filename='maps.db'))
combinate = added('Москва-Товарная-Павелецкая', 'ЗИЛ', combinate)
combinate = removed('Новопеределкино', 'Ромашково', combinate)
combinate = removed('Захарово', 'Сушкинская', combinate)
combinate = added('Голицыно', 'Захарово', combinate)
combinate = added('199 км', 'Кубинка I', combinate)
combinate = added('Туманово', 'Мещёрская', combinate)
combinate = added('Азарово', 'Садовая (154 км)', combinate)
combinate = added('Муратовка', 'Садовая (154 км)', combinate)
combinate = added('Калуга II', 'Горенская', combinate)
combinate = added('Тихонова Пустынь', 'Сляднево', combinate)
combinate = added('Нара', 'Зосимова Пустынь', combinate)
combinate = added('Бекасово I', 'Ожигово', combinate)
combinate = removed('Латышская', 'Пожитково', combinate)
combinate = removed('Зосимова Пустынь', 'Пожитково', combinate)
combinate = removed('Зосимова Пустынь', 'Ожигово', combinate)
combinate = removed('Ожигово', 'Пожитково', combinate)
combinate = removed('Зосимова Пустынь', 'Посёлок Киевский', combinate)
combinate = added('Бекасово I', 'Посёлок Киевский', combinate)
combinate = added('Бекасово I', 'Пожитково', combinate)
combinate = added('Сандарово', 'Столбовая', combinate)
combinate = added('Космос', 'Аэропорт-Домодедово', combinate)
combinate = added('Ленинская', 'Домодедово', combinate)
combinate = added('Лагерный', 'Рязань-II', combinate)
combinate = added('Рязань-I', 'Рязань-II', combinate)
combinate = added('Осаново', 'Пожога', combinate)
combinate = added('Ундол', 'Сушнево', combinate)
combinate = added('Металлург', 'Фрязево', combinate)
combinate = added('Наугольный', '81 км', combinate)
combinate = added('Бужаниново', '90 км', combinate)
combinate = added('Арсаки', '90 км', combinate)
combinate = added('Струнино', 'Александров', combinate)
combinate = added('Струнино', 'Александров-2', combinate)
combinate = added('Александров', 'Александров-2', combinate)
combinate = added('Александров', 'Мошнино', combinate)
combinate = added('71 км', 'Костино', combinate)
combinate = removed('Жилино', 'Поваровка', combinate)
combinate = removed('Депо', 'Поваровка', combinate)
combinate = added('Депо', 'Жилино', combinate)
combinate = removed('Поваровка', 'Берёзки-Дачные', combinate)
combinate = added('Поварово I', 'Берёзки-Дачные', combinate)
combinate = added('Шереметьевская', 'Аэропорт Шереметьево', combinate)
combinate = added('Манихино I', '50 км', combinate)
combinate = added('Ромашково', 'Рабочий Посёлок', combinate)
combinate = removed('Депо', '142 км', combinate)
pickle.dump(combinate, map_file(action='wb', filename='combinate_maps.db'))
pickle.dump(stations, map_file(action='wb', filename='stations.db'))
pickle.dump(maps, map_file(action='wb', filename='maps.db'))
# --------------------------------- main function ----------------------------------------------------------------------
# Если отсутсвуют данные собирает информация о станциях, поездах и линиях из модуля parserus.py. И Выполняет их обработку
# для построения карты линий и станций. Возвращает список всех необходимых к построению линий и список всех необходимых к
# построению станций. Создание большого кол-ва вспомогательных файлов обусловленно большим временем исполнения операции
def main():
x = os.path.dirname(os.path.abspath(__file__))
def parser_work():
if not check_exist_table('stations'):
bild_stations()
if not check_exist_table('trains'):
bild_schedule()
def maps_work(x):
if not os.path.exists(os.path.join(x, 'data', 'graph.db')):
build_graph()
if not os.path.exists(os.path.join(x, 'data', 'full_map.db')):
bild_coord_graph()
if not os.path.exists(os.path.join(x, 'data', 'short_map.db')):
bild_short_map('full_map.db')
if not os.path.exists(os.path.join(x, 'data', 'coords_maps.db')):
bild_coords_map('short_map.db')
def stations_work(x):
if not os.path.exists(os.path.join(x, 'data', 'stations.db')):
build_stations_coord('coords_maps.db')
def correction_work(x):
correct_coords_map('coords_maps.db')
correct_map_and_stations('stations.db', 'coords_maps.db', 'maps.db')
correct_checks('combinate_maps.db')
correct_exeptions()
def final_correct(x):
y = pickle.load(map_file(filename='combinate_maps.db'))
for i in ['maps.db', 'stations.db', 'full_map.db', 'graph.db', 'short_map.db', 'coords_maps.db',
'combinate_maps.db']:
map_file(filename=i).close()
if os.path.exists(os.path.join(x, 'data', i)):
os.remove(os.path.join(x, 'data', i))
removeble = []
for i in y:
if not y[i]:
removeble.append(i)
for j in range(len(y[i])):
if len(y[i][j]) == 3: y[i][j] = y[i][j][-2:]
for i in removeble:
del y[i]
pickle.dump(y, map_file(action='wb', filename='fmaps.db'))
def generate_maps(dict_obj):
maps = []
for i in xd:
for j in xd[i]:
if (i[-2:], j) not in maps and (j, i[-2:]) not in maps:
maps.append((i[-2:], j))
maps = list(map(list, maps))
for i in range(len(maps)):
maps[i] = list(map(list, maps[i]))
return maps
if not os.path.exists(os.path.join(x, 'data', 'data.db')):
parser_work()
if not os.path.exists(os.path.join(x, 'data', 'fmaps.db')):
maps_work(x)
stations_work(x)
correction_work(x)
final_correct(x)
x = map_file(filename='fmaps.db')
xd = pickle.load(x)
stations = [[i[0], [i[-2],i[-1]]] for i in xd]
maps = generate_maps(xd)
return (maps, stations)
# ----------------------------------------------------------------------------------------------------------------------
main()
| true
|
76d4e8a0c297775cece57c9453ef381abe4ab549
|
Python
|
martwo/ndhist
|
/test/constant_bin_width_axis_test.py
|
UTF-8
| 2,407
| 3.015625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
import unittest
import numpy as np
import ndhist
class Test(unittest.TestCase):
def test_constant_bin_width_axis(self):
"""Tests if the constant_bin_width_axis class works properly.
"""
import math
stop = 10
start = 0
width = 1
axis = ndhist.axes.linear(start,stop,width, label='MyLabel', name='MyAxis')
def _check_axis(axis):
self.assertTrue(axis.name == 'MyAxis')
self.assertTrue(axis.label == 'MyLabel')
# The name and label is changeable. So lets try to change it.
axis.name = 'MyNewAxis'
self.assertTrue(axis.name == 'MyNewAxis')
axis.label = 'MyNewLabel'
self.assertTrue(axis.label == 'MyNewLabel')
# Change back the label and name.
axis.name = 'MyAxis'
self.assertTrue(axis.name == 'MyAxis')
axis.label = 'MyLabel'
self.assertTrue(axis.label == 'MyLabel')
# The dtype of the axis is choosen automatically by the numpy.linspace
# function.
nbins = int(math.ceil((stop - start) / width)) + 1
edges_dtype = np.linspace(start, stop, num=nbins, endpoint=True).dtype
self.assertTrue(axis.dtype == edges_dtype)
self.assertTrue(axis.has_underflow_bin)
self.assertTrue(axis.has_overflow_bin)
self.assertFalse(axis.is_extendable)
self.assertTrue(axis.nbins == 12)
self.assertTrue(np.all(axis.binedges == np.array(
[-np.inf,0,1,2,3,4,5,6,7,8,9,10,+np.inf]
)))
self.assertTrue(np.all(axis.lower_binedges == np.array(
[-np.inf,0,1,2,3,4,5,6,7,8,9,10]
)))
self.assertTrue(np.all(axis.upper_binedges == np.array(
[0,1,2,3,4,5,6,7,8,9,10,+np.inf]
)))
self.assertTrue(np.all(axis.bincenters == np.array(
[-np.inf,0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,+np.inf]
)))
self.assertTrue(np.all(axis.binwidths == np.array(
[np.inf,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,np.inf]
)))
_check_axis(axis)
# Put the axis into a ndhist object and check the values again.
h = ndhist.ndhist((axis,))
_check_axis(h.axes[0])
if(__name__ == "__main__"):
unittest.main()
| true
|
473c9ae4b2a8a2a10642546c1f77384ab49f2027
|
Python
|
AlvinJS/Python-practice
|
/caesar.py
|
UTF-8
| 445
| 3.84375
| 4
|
[] |
no_license
|
# alphabet = [a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z]
def encrypt (text,s):
result = ""
for i in range (len(text)):
char=text[i]
if (char.isupper()):
result += char((ord(char) +s - 65)%26 + 65)
else:
result += char((ord(char) + s-97)%26 + 97)
return result
word= str(input("Enter a word to encrypt: "))
s = int(input("Enter the key: "))
print("Cipher text is :" + encrypt(word, s))
| true
|
99dbea9918329739c5a198efcab44586136770d0
|
Python
|
cris-cs/Titanic
|
/Main3.py
|
UTF-8
| 1,191
| 3.515625
| 4
|
[] |
no_license
|
from Titanic01 import sexe, survided, name, age
nbPassagers = len(sexe)
def analyseTitanic(totalPassagers, sexePassager):
nbSurvivants = 0
nbPassagersCritere = 0
for passager in range(nbPassagers):
if survided[passager] == 1:
nbSurvivants += 1
if sexe[passager] == sexePassager:
nbPassagersCritere += 1
def analyseTitanic2(totalPassagers, agemin, agemax):
nbSurvivants = 0
nbPassagersCritere = 0
for passager in range(nbPassagers):
if survided[passager] == 1:
nbSurvivants += 1
if agemax >= age[passager] >= agemin:
nbPassagersCritere += 1
taux = nbPassagersCritere / nbSurvivants * 100
return taux
# print(analyseTitanic(nbPassagers))
# tauxSurvie = analyseTitanic(nbPassagers) / nbPassagers * 100
# print(f'{tauxSurvie:.1f}%')
print(analyseTitanic(nbPassagers, 'male'))
print(analyseTitanic(nbPassagers, 'female'))
print('0-19')
print(analyseTitanic2(nbPassagers, 0, 19))
print('20-39')
print(analyseTitanic2(nbPassagers, 20, 39))
print('40-59')
print(analyseTitanic2(nbPassagers, 40, 59))
print('60->')
print(analyseTitanic2(nbPassagers, 60, 100000))
| true
|
2a93e72ffb964b6672667b3c8b6f223bc024069d
|
Python
|
mathewdgardner/sklearn-porter
|
/sklearn_porter/utils/Shell.py
|
UTF-8
| 607
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
import subprocess as subp
class Shell(object):
@staticmethod
def call(command, cwd=None):
if isinstance(command, str):
command = command.split()
if isinstance(command, list):
return subp.call(command, cwd=cwd)
return None
@staticmethod
def check_output(command, cwd=None, shell=True, stderr=subp.STDOUT):
if isinstance(command, str):
command = command.split()
if isinstance(command, list):
subp.check_output(command, shell=shell, cwd=cwd, stderr=stderr)
return None
| true
|
3e056101d6d0d12cbec35bd70a0e8e5d07629e2f
|
Python
|
messierspheroid/instruction
|
/D26 - NATO alphbet/pandas_for_loop.py
|
UTF-8
| 829
| 3.3125
| 3
|
[] |
no_license
|
student_dict = {
"student": ["Angela", "James", "Lily"],
"score": [56, 76, 98],
}
# # looping through dictionaries
# for (key, value) in student_dict.items():
# print(key)
# print(value)
import pandas
student_data_frame = pandas.DataFrame(student_dict)
# print(student_data_frame)
# # loop through a data frame is not time efficient bc pandas has its own built in for loop func
# for (key, value) in student_data_frame.items():
# print(key)
# print(value)
# # loop through rows of a data frame
# for (index, row) in student_data_frame.iterrows():
# # print(index)
# # this provides an overview of the DataFrame
# # each row is a pandas series object
# if row.student == "Angela":
# print(row.score)
# new_dict = {new_key: new_value for (key, value) in dict.items() if test}
| true
|
067398217e8d02458390a83533edfadd741b6cf5
|
Python
|
mglavitsch/jumbler-python
|
/tests/test_jumble.py
|
UTF-8
| 1,667
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
import unittest
from jumblepkg.jumble import Jumbler
class TestJumbler(unittest.TestCase):
def test_indices(self):
# print(sys.getdefaultencoding())
jumbler = Jumbler(" ")
indices = jumbler.get_indices()
self.assertEqual(indices, [])
jumbler.text = "Zaphod Beeblebrox"
indices = jumbler.get_indices()
self.assertEqual(indices, [[0, 5], [7, 16]])
jumbler.text = " Zaphod __äöü___ $ Beeble123brox !?+ "
indices = jumbler.get_indices()
self.assertEqual(indices, [[3, 8], [12, 14], [21, 26], [30, 33]])
jumbler.text = "Arthur Dent\nFord Prefect\nTricia McMillan\nZaphod Beeblebrox"
indices = jumbler.get_indices()
self.assertEqual(indices, [[0, 5], [7, 10], [12, 15], [17, 23], [
25, 30], [32, 39], [41, 46], [48, 57]])
def test_jumble(self):
jumbler = Jumbler(" ")
jumbled_text = jumbler.get_jumbled_text(True, 1000)
self.assertEqual(jumbled_text, " ")
jumbler.text = "Zaphod Beeblebrox"
jumbled_text = jumbler.get_jumbled_text(True, 1000)
self.assertEqual(jumbled_text, "Zohapd Bbeeboelrx")
jumbler.text = " Zaphod __äöü___ $ Beeble123brox !?+ "
jumbled_text = jumbler.get_jumbled_text(True, 1000)
self.assertEqual(
jumbled_text, " Zohapd __äöü___ $ Bbelee123borx !?+ ")
jumbler.text = "Arthur Dent\nFord Prefect\nTricia McMillan\nZaphod Beeblebrox"
jumbled_text = jumbler.get_jumbled_text(True, 1000)
self.assertEqual(
jumbled_text, "Auhrtr Dnet\nFrod Pcereft\nTciira MaiMlcln\nZoahpd Blerbboeex")
if __name__ == '__main__':
unittest.main()
| true
|
343f78f1f7c9a42d18e89f618abf203cb4ee4dd3
|
Python
|
Vikas-KM/python-programming
|
/partial_func.py
|
UTF-8
| 112
| 2.796875
| 3
|
[] |
no_license
|
from functools import partial
def multiply(x, y):
return x * y
db1 = partial(multiply, 2)
print(db1(3))
| true
|
00f15580c91354e88e889f14c97bc492ba560d80
|
Python
|
sethhardik/face-recognition-
|
/train.py
|
UTF-8
| 1,462
| 2.734375
| 3
|
[] |
no_license
|
import numpy as np
from sklearn.model_selection import train_test_split
from keras_vggface.vggface import VGGFace
from keras.engine import Model
from keras.layers import Input
import numpy as np
import keras
from keras.layers import Dense
# extracting file saved by data_prep.py
data = np.load('face_data.npz')
x , y = data['x'], data['y']
#categorical conversion of data label
y = keras.utils.to_categorical(y, 6)
# using transfer learning to reduce the time required to train the algo
resnet = VGGFace(model='resnet50',input_shape=(224, 224, 3))
layer_name = resnet.layers[-2].name
#adding our own custom layers to make the model work on our datatset
out = resnet.get_layer(layer_name).output
out = Dense(6,activation='softmax')(out)
resnet_4 = Model(resnet.input, out)
# removing last layer of the model and adding my own layer to it
for layer in resnet_4.layers[:-1]:
layer.trainable = False
resnet_4.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
#checking the final created dataset
print (resnet_4.summary())
# training the model we have created with our own dataset
resnet_4.fit(x, y,batch_size=10,epochs=10,shuffle=True)
#saving the trained model so that it can be used afterwards
resnet_4.save("/home/hardik/Desktop/model_save_face.h5")
# checking the accuracy of the model on training data only as i used a very small dataset
scores = resnet_4.evaluate(x, y, verbose=1)
print('Test accuracy:', scores[1])
| true
|
b8bf56e9fd3c760a5f46de01bfecfd4baf23c1cb
|
Python
|
yszpatt/PythonStart
|
/pythonlearn/train/prac9.py
|
UTF-8
| 153
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding:utf-8
# 暂停一秒输出。
import time
j = int(input("输入暂停时间:"))
time.sleep(j)
print("计时时间到")
| true
|
da05558ba14a3f086a6c05fb454e7d5b3e450a57
|
Python
|
DaHuO/Supergraph
|
/codes/CodeJamCrawler/16_0_1/rbonvall/sheep.py
|
UTF-8
| 389
| 3.484375
| 3
|
[] |
no_license
|
#!python3
def main():
T = int(input())
for t in range(T):
n = int(input())
print("Case #{}: {}".format(t + 1, solve(n)))
def solve(n):
if n == 0:
return 'INSOMNIA'
digits = set(range(10))
i = 0
while digits:
i += 1
m = i * n
while m:
digits.discard(m % 10)
m //= 10
return i * n
main()
| true
|
318daa2aceee700891dff06bad61ebd07270ac47
|
Python
|
mburq/dynamic_optimization_benchmarks
|
/src/envs/matching/matching_env.py
|
UTF-8
| 6,226
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
import networkx as nx
from src.envs.matching.vertex import basic_vertex_generator
from src.envs.matching.taxi_vertex import taxi_vertex_generator
from src.envs.matching.kidney_vertex import unweighted_kidney_vertex_generator
class matching_env(object):
"""
Implements a simple dynamic matching environment,
where arrivals are generated by a ``vertex_generator''.
Assumes that departing unmatched yields non-negative value.
This allows us to remove negative-weight edges and sparsify the graph.
"""
def __init__(self, p):
"""
Input: parameter object p used for the vertex_generator
"""
self.vertex_generator = self.get_vertex_generator(p.vertex_generator_name,
offset=p.offset,
r_seed=p.r_seed,
iterations=p.T,
dep_rate=p.dep_rate,
dep_mode=p.dep_mode)
self.reset()
def reset(self):
"""
Resets the environment to an empty graph.
"""
self.state = nx.Graph() # The state is a networkx graph object
self.offline_graph = nx.Graph()
self.present_vertices = [] # Keeps track of the offline graph.
self.total_reward = 0
self.last_reward = 0
self.time = 0 # time is discrete.
arriving_vertex = self.vertex_generator.new_vertex(self.time)
self.arrival(arriving_vertex)
return self.state
def step(self, action):
"""
Main function of the matching environment.
Input: - An action in the form of a list of pairs of vertices
to be matched.
It proceeds in 4 steps:
- Removes matched vertices from the graph,
- Computes the value of matched edges,
- Computes departures among unmatched vertices,
- Samples new vertex arrivals.
Output: - A state in the form of a networkx graph.
- The reward associated with the action.
"""
self.time += 1
matched_vertices = action
reward = self.match(matched_vertices)
self.departures(self.time)
arriving_vertex = self.vertex_generator.new_vertex(self.time)
self.arrival(arriving_vertex)
return self.state, reward
def arrival(self, new_vertex):
"""
Arrival of a new vertex to the system
Authorizes self loops, it is up to the vertex class to declare 0 weights for self loops
Edges are associated two weights:
- ``true_w'' corresponds to the true value of matching that edge.
- ``weight'' corresponds to an auxiliary value that a matching algorithm
may use to compute perturbed max-weight-matchings. This is used by the
re-optimization algorithm.
"""
# update state space
self.state.add_node(new_vertex)
for v in self.state.nodes():
match_weight = new_vertex.match_value(v)
if match_weight > 0 and v != new_vertex:
self.state.add_edge(new_vertex, v, weight=match_weight, true_w=match_weight)
# Update offline graph
self.offline_graph.add_node(new_vertex)
self.present_vertices.append(new_vertex)
for v in self.present_vertices:
match_weight = new_vertex.match_value(v)
self.offline_graph.add_edge(new_vertex, v, weight=match_weight, true_w=match_weight)
def departures(self, time):
"""
Departure of all vertices that have been waiting too long.
"""
reward = 0
to_remove = []
for v in self.state.nodes():
if v.departure(time):
to_remove.append(v)
reward += v.unmatched_value()
self.state.remove_nodes_from(to_remove)
self.total_reward += reward
self.last_reward += reward
self.present_vertices[:] = [v for v in self.present_vertices if not v.departure(time)]
return reward
def match(self, matched_vertices):
"""
Removes the matched_vertices, and computes the reward.
Input: list of vertex pairs
"""
reward = 0
for (v1, v2) in matched_vertices:
reward += self.state[v1][v2]['true_w']
if v1 == v2:
self.state.remove_node(v1)
else:
self.state.remove_node(v1)
self.state.remove_node(v2)
self.last_reward += reward
self.total_reward += reward
return reward
def get_vertex_generator(self, arr, offset=0, r_seed=1, iterations=10000, dep_rate=10,
dep_mode='deterministic'):
"""
Decides whether to generate vertices:
- Randomly: ``basic''.
- By re-sampling vertices from a dataset of kidney exchange patient-donor
pairs: ``kidney_unweighted''.
- By sampling from a 1-hour period of the New-York taxi dataset: ``taxi''.
The departure process is stochastic, and depends on the ``dep_mode'' variable.
The departure rate ``dep_rate'' corresponds to the expected waiting time
of a vertex in the system.
"""
if arr == 'basic':
return basic_vertex_generator()
elif arr == 'taxi':
return taxi_vertex_generator("data/taxi/rides.csv",
mode="deterministic",
dep_mode=dep_mode,
shift_arrivals=offset,
dep_rate=dep_rate)
elif arr == 'kidney_unweighted':
return unweighted_kidney_vertex_generator('data/kidney/',
mode='random',
dep_mode=dep_mode,
dep_rate=dep_rate,
iterations=iterations)
else:
assert False, "Arrival type {} not supported".format(arr)
| true
|
f436dae491ed6cfb16212c1bbbe2e4fdb3e044be
|
Python
|
guoshan45/guoshan-pyschool
|
/Conditionals/02.py
|
UTF-8
| 106
| 2.75
| 3
|
[] |
no_license
|
def isIsosceles(x, y, z):
a = x > 0 and y > 0 and z > 0
return a and (x == y or y == z or z == x)
| true
|
664ae376e61201be721af6804da20565b123521b
|
Python
|
rifkhan95/karsten
|
/generalRunFiles/MpiTest.py
|
UTF-8
| 804
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
from mpi4py import MPI
import numpy as np
import pandas as pd
def fixDataframe(array):
array = comm.gather(array, root=0)
array2 = np.sum(array, axis=0)
return array2
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
temp = np.zeros((30,5))
data = pd.DataFrame(temp)
rows = [rank + size * i for i in range(int(30/size)+1) if comm.rank + comm.size*i < 30]
for i in rows:
print i
j = np.ones((1,5))
temp[i,:]=i*j
data.iloc[i] = i*j
comm.Barrier()
#print temp
#print data
#data = comm.gather(data.values, root=0)
#data2 = np.sum(data, axis=0)
#print data2
temp2 = fixDataframe(data)
print temp2
#print 'before gather'
#temp = comm.gather(temp,root=0)
#
#print temp
#
#if rank ==0:
# temp2 = np.sum(temp,axis=0)
# print 'temp2'
# print temp2
| true
|
1eb8f3e5dfa786454bd28102e444cee67605b47f
|
Python
|
mustafakadi/pr_watcher
|
/app/exception_definitions/reg_key_cannot_be_read_error.py
|
UTF-8
| 372
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
class RegKeyCannotBeReadError(Exception):
"""
Custom exception definition, that will be raised in case of an error in reading process of a registry key
:param msg: The custom message to be shown.
"""
def __init__(self, msg, key_name):
super().__init__("Registry Key Cannot be Read! Msg: " + str(msg))
self.key_name = key_name
| true
|
f3148bde324cf1c76bc36e64a4480d1bed8df230
|
Python
|
Giovanacarmazio/Projeto-operadora-e-regiao
|
/codigo.py
|
UTF-8
| 415
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
import phonenumbers
from phonenumbers import geocoder , carrier
#Inserir o número com codigo do país e o ddd
phoneNumer = phonenumbers.parse("+5551999999999")
#Procura a operadora
operadora = carrier.name_for_number(phoneNumer, 'pt-br')
#Procura a regiao
regiao = geocoder.description_for_number(phoneNumer, 'pt-br')
#Printa os resultados
print("A Operadora é: " + operadora)
print("O estado é: " + regiao)
| true
|
2f5e412ac36573c31d841d4cc80f4f29bed1a737
|
Python
|
miracleave-ltd/mirameetVol24
|
/src/UpdateDeleteBigQuery03.py
|
UTF-8
| 809
| 2.515625
| 3
|
[] |
no_license
|
import os
import OperationObject # 操作対象の設定情報取得
from google.cloud import bigquery
# GCP認証設定
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = OperationObject.GOOGLE_APPLICATION_CREDENTIALS
# BigQueryクライアントAPIの利用宣言
client = bigquery.Client()
# 更新SQL生成
updateQuery = "UPDATE `{0}.{1}.{2}` SET mira_text = '更新' WHERE id = 2".\
format(OperationObject.project_id, OperationObject.dataset_id, OperationObject.table_id)
# SQL実行
client.query(updateQuery).result()
print("Updated ID=2.")
# 削除SQL生成
deleteQuery = "DELETE `{0}.{1}.{2}` WHERE id = 3".\
format(OperationObject.project_id, OperationObject.dataset_id, OperationObject.table_id)
# SQL実行
client.query(deleteQuery).result()
print("Deleted ID=3.")
| true
|
f4fd27a7355d5253d3208cd6f684adbf9b0a7ce0
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_199/2160.py
|
UTF-8
| 593
| 3.359375
| 3
|
[] |
no_license
|
def solve(pancakes, k):
n = 0
# find a pancake (-)
for i in range(len(pancakes)):
if pancakes[i] == '-':
if i + k > len(pancakes):
return None
new_block = ''.join(['-' if c == '+' else '+' for p in pancakes[i:i+k] for c in p])
pancakes = pancakes[:i] + new_block + pancakes[i+k:]
n += 1
return n
num_cases = int(input())
for i in range(num_cases):
pancakes, k = input().split(' ')
k = int(k)
n = solve(pancakes, k)
print("Case #%d: %s" % (i+1, n if n is not None else 'IMPOSSIBLE'))
| true
|
7a7e1da61ca78d3f132879a9f4015466ff373c8a
|
Python
|
amrayach/pml_streamlit
|
/app.py
|
UTF-8
| 3,536
| 2.5625
| 3
|
[] |
no_license
|
from predictExplain import ModelsDeploy
import numpy as np
import pandas as pd
import streamlit as st
from spacy.tokens import Doc, Span
from spacy_streamlit import visualize_ner
def to_rgba(hex, val):
val = int(val) * 10
val = abs(val)
val = 255 if val > 255 else val
hex = hex + "{:02x}".format(val)
return hex
deploy = ModelsDeploy()
st.set_page_config(page_title='Character-Level CNN Predict & Explain:', page_icon='random', layout='wide', initial_sidebar_state='collapsed')
st.title('Character-Level CNN Predict & Explain:')
st.text('Select Model:')
model_in = st.selectbox('Models:', ['Yelp-Review-Polarity', 'AG-News-Category-Classifier'], index=0)
# widget slider for choosing number of top n important words in making decision
slider_range=50
slider_start_value=5
top_n_words = 5
sentence = st.text_input('Enter Sentence:', value="Like any Barnes & Noble, it has a nice comfy cafe, and a large selection of books. The staff is very friendly and helpful. They stock a decent selection, and the prices are pretty reasonable. Obviously it's hard for them to compete with Amazon. However since all the small shop bookstores are gone, it's nice to walk into one every once in a while.")
col_library = {'positive': '#FF0000', 'negative': '#0000FF'}
if model_in == 'Yelp-Review-Polarity':
prediction, probs, heatmap = deploy.explain(sentence, model='yelp')
st.text('--------------------------------')
if prediction == 0:
st.text("The Prediction is Negative")
else:
st.text("The Prediction is Positive")
st.text('--------------------------------')
st.text('Class Probabilities:')
dataframe = pd.DataFrame(
np.array([probs]),
columns=('Negative', 'Positive'))
st.dataframe(dataframe.style.highlight_max(axis=0))
else:
prediction, probs, heatmap = deploy.explain(sentence, model='ag_news')
st.text('--------------------------------')
if prediction == 0:
st.text("The Prediction is World")
elif prediction == 1:
st.text("The Prediction is Sports")
elif prediction == 2:
st.text("The Prediction is Business")
else:
st.text("The Prediction is Sci/Tech")
st.text('--------------------------------')
st.text('Class Probabilities:')
dataframe = pd.DataFrame(
np.array([probs]),
columns=('World', 'Sports', 'Business', 'Sci/Tech'))
st.dataframe(dataframe.style.highlight_max(axis=0))
words = [i[0] for i in heatmap]
vals = [i[1] for i in heatmap]
spaces = [True] * (len(words) - 1)
spaces.append(False)
doc = Doc(deploy.nlp.vocab, words=words, spaces=spaces)
ents = []
tags = []
for j, i in enumerate(doc):
new_ent = Span(doc, j, j + 1, label=str(j))
ents.append(new_ent)
tags.append(str(j))
doc.ents = []
doc.ents = ents
col_library = {'positive': '#FF0000', 'negative': '#0000FF'}
colors = [to_rgba(col_library['positive'], x) if x >= 0 else to_rgba(col_library['negative'], x) for x in vals]
tags = tuple(list(map(lambda x: ''.join(list(map(lambda y: y.upper(), x.split('_')))), tags)))
col_dict = {}
for i in range(len(tags)):
col_dict[tags[i]] = colors[i]
for i in range(len(heatmap)):
heatmap[i] += (str(i),)
heatmap_neg = list(sorted(list(filter(lambda x: x[1] < 0, heatmap)), key=lambda x: x[1]))
heatmap_pos = list(sorted(list(filter(lambda x: x[1] >= 0, heatmap)), key=lambda x: x[1], reverse=True))
visualize_ner(doc, labels=tags, colors=col_dict, show_table=True, title='Character2Word Attention Heatmap:')
| true
|
380e290674a1b6fdc635af6215a6fba1ef250671
|
Python
|
CO18325/UNIVARIATE-LINEAR-REGRESSION
|
/script.py
|
UTF-8
| 9,231
| 3.546875
| 4
|
[] |
no_license
|
import matplotlib.pyplot as plt
plt.style.use('ggplot')
''' %matplotlib inline
%matplotlib inline sets the backend of matplotlib to the 'inline' backend:
With this backend, the output of plotting commands is displayed inline within
frontends like the Jupyter notebook, directly below the code cell that produced
it. The resulting plots will then also be stored in the notebook document.
'''
import numpy as np
import pandas as pd
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
# THIS FUNCTION DISPLAYS THE RAW DATA GIVEN TO US
# IN THE FORM OF A SCATTER PLOT
# THIS WILL HELP US TO UNDERSTAND THE DATA BETTER
def visualize_raw_data(data):
# X-AXIS WILL CONTAIN THE POPULATION
# Y-AXIS WILL CONTAIN THE PROFIT
x = 'Population'
y = 'Profit'
title = 'POPULATION(in 10000) V/S PROFIT(in $10000)'
# NOW WE CALL THE FUNCTION FROM SNS LIBRARY
graph = sns.scatterplot(x=x,y=y,data=data)
graph.set_title(title)
# MATPLOTLIB FUNCTION TO SHOW ALL THE GRAPHS
plt.show()
#FUNCTION COMPUTE COST
# X,y,y_pred ARE ALL MATRICES AND ALL OPERATIONS ARE MATRICE OPERATIONS
# EVEN THETA IS ALSO A MATRICE
#COST J(theta) IS PARAMETRIZED BY THETA MATRICE AND NOT X or y!!!
def cost_function(X,y,theta):
# I.E. NO. OF ENTRES IN THE DATA SET
m = len(y)
# THIS IS THE MATHEMATICAL DOT PRODUCT
# TO OBTAIN PREDICTION WRT TO THETA AND X
y_pred = X.dot(theta)
# FIND THE SQUARED ERROR
sq_error = (y_pred - y) ** 2
# AND THEN WE FINALLY RETURN THE COST FUNCTION CALCULATED
return 1/(m * 2) * np.sum(sq_error)
# GRADIENT DESCENT FUNCTION
# TO CALCULATE THE MINIMUM COST
# WE WILL USE AN ALGORITHM CALLED BATCH GRADIENT DESCENT
# WITH EACH ITERATION IN THIS ALGO THE PARAMETERS I.E. THETA COMES CLOSER TO THEIR OPTIMAL VALUE
def gradient_descent(X,y,theta,alpha,iterations):
m = len(y)
# WE WILL STORE THE COST FUNCTIONS CALCULATED EACH TIME HERE
# THIS IS JUST TO VISUALIZE OUR CONVERGENCE TOWARDS OPTIMAL COST FUNCTION
# WRT THE ITERATIONS DONE
costs = []
for i in range(0,iterations):
# CALCULATE THE PREDICTION WRT TO theta
y_pred = X.dot(theta)
# CALCULATE THE SUBMASSION PART OF THE GRADIENT DESENCT
# GO BACK TO SLIDE
error = np.dot(X.transpose(),(y_pred - y))
# CHANGE THE THETA FOR ACHEIVING THE OPTIMAL COST FUNCTION
theta -= alpha * error * (1/m)
# STORE THE COST FUNCTION OBTAINED IN OUR costs VARIABLE
# FOR FUTURE VISULAIZATIONS
costs.append(cost_function(X,y,theta))
# AND FINALLY RETURN THE OPTIMAL THETA FOUND
# AND THE COSTS STORED OVER ITERATIONS
return theta,costs
# VISUALIZING THE COST FUNCTION ON A 3D GRAPH
# IN THIS FUNCTION WE ARE EXPLICITLY GIVING THETA VALUES TO THE COST FUNCTION
# FOR THESE THETA VALUES THE COST FUNCTION GIVES US THE COST
# WHICH IS STORED IN AN ARRAY (cost_values)
# THESE VALUES ARE NOW SENT TO THE graph_formation() FUNCTION
# IN graph_formation() THE ARRAY OF COST STORED IS USED TO CONSTRUCT A 3D GRAPH
# THE PURPOSE OF VISUALIZATION IS TO SEE HOW GRADIENT FUNCTION WILL MOVE
def visualize_cost_function(X, y):
# THE BELOW FUNCTION np.linespace DEFNES THAT
# RETURN 100 EQUISACED VALUES B/W -10 AND 10
theta_0 = np.linspace(-10,10,100)
theta_1 = np.linspace(-1,4,100)
# DEFINE A MATRICE TO STORE THETA VALUES
#MATRICE OF SIZE THETA_0 X THETA_1
# THIS IS DONE TO INCORPORATE EACH AND EVERY COMBINATION OF THETAS
cost_values = np.zeros((len(theta_0),len(theta_1)))
for i in range(len(theta_0)):
for j in range(len(theta_1)):
# GET CURRENT THETA VALUES
specific_theta = np.array([theta_0[i], theta_1[j]])
# CALCULATE THE COST BASED ON THE ABOVE DUMMY THETA VALUES
cost_values[i,j] = cost_function(X, y, specific_theta)
# NOW WE CAN FORM THE GRAPH
graph_formation(theta_0,theta_1,cost_values)
def graph_formation(theta_0,theta_1,cost_values):
fig = plt.figure(figsize=(12,8))
# DEFINIG GRAPH TYPE
graph = fig.gca(projection='3d')
surf = graph.plot_surface(theta_0,theta_1,cost_values,cmap='viridis')
# COLORBAR OR THE LEGEND
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.xlabel('THETA_0')
plt.ylabel('THETA_1')
graph.set_zlabel('J OF THETA')
# TO GIVE INITIAL ANGLE TO THE GRAPH FOR BETTER VIEW
graph.view_init(30,330)
plt.show()
# PLOT THE CONVERGENCE
# IN THIS GRAPH WE ARE GOING TO PLOT J(THETA) AGAINST NO. OF ITERATIONS
# THE OBJECTIVE IS TO VIEW HOW J(THETA) IS MOVING TOWARDS MINIMUM VALUE IN EACH ITERATION
# THE Costs(i.e. J(THETA)) HAVE BEEN ALREADY CALCULATED USING gradient_descent FUNCTION
# ARGUMENTS : Cost AND No. of Iterations
def convergence_grah(costs, iterations):
plt.plot(costs)
plt.xlabel('ITERATIONS')
plt.ylabel('J OF THETA')
plt.title('COST FUNCTION VALUES VS ITERATIONS OF GRADIENT DESCENT')
plt.show()
# THIS FUNCTION CONSTRUCTS TH BEST FIT LINEAR REGRESSION LINE ON OUR DATA
# IT USES THE FINAL VALUES OF THE THETA
def regression_fit(data, theta):
# FIRST WE WILL CONSTRUCT THE SCATTER PLOT WITH OUR DATA
x = 'Population'
y = 'Profit'
title = 'REGRESSION FIT'
graph = sns.scatterplot(x=x,y=y,data=data)
graph.set_title(title)
# NOW WE WILL OVERLAY THE REGRESSION LINE
#THETA IS A 2*1 ARRAY WHICH IS NOT FIT FOR MATRIC MULTIPLICATION
# SO, WE NEED TO SQUEEZE THE THETA MATRICE
theta = np.squeeze(theta) # NOW THETA IS 1*2 MATRICE OR WE CAN SAY AN ARRAY
# NOW GETTING THE POINTS FOR THE LINEAR REGRESSION LINE
x_value = [x for x in range(5,25)] # 5-25 AS OUR POPULATION IS BETWEEN THIS RANGE
y_value = [(x*theta[1] + theta[0]) for x in x_value] # PREDICTED VALUES FOR TRAINING DATA
#SEABORN FUNCTION TO CONSTRUCT THE LINE
sns.lineplot(x_value, y_value)
plt.show()
# THIS FUNCTION WILL PREDCT THE PROFIT FOR UNKNOWN POPULATION
# USING THE FINAL THETA VALUES
# IT ALSO TAKES THE 2*1 X MATRICE AS ARGUMENT
# 2*1 MATRICE FOR SUCCESSFUL DOT PRODUCT WITH THETA
def predict_data(theta, X):
theta = np.squeeze(theta)
#print(theta.transpose())
#print(X)
y_pred = np.dot(theta.transpose(), X)
return y_pred
#############################################################################
def main():
# THIS IS THE SIZE OF THE WINDOW THAT WILL OPEN TO SHOW ALL THE GRAPHS
plt.rcParams['figure.figsize'] = (12,8)
# DATA RECEIVED
# DATA RECEIVED IS M0STLY IN THE FORM OF DATA FRAME
# DATA FRAME IS VERY USEFUL DATASTRUCTURE IN PANDAS LIBRARY IN TERMS OF DATA SET MANAGEMENT
data = pd.read_csv('bike_sharing_data.txt')
# TO PRINT FIRST FIVE ENTRIES OF THE DATASET TO UNDERSTAND THE DATA BETTER
print(data.head())
#POPULATION OF CITIES IN 10000s
#PROFIT IN UNITS 10000 DOLLARS
#TO GET MORE INFO ABOUT THE CSV FILE:-
# THIS WILL HELP US UNDERSTAND THE DATA TYPES OF THE DATA
# AND ALSO CHECK IF THERE IS ANY NULL VALUES PRESENT HERE
print(data.info())
#VISUALIZATION OF DATA
visualize_raw_data(data)
#######################################################################################
#SETTING UP REGRESSION VARIABLES:
#I.E. NO. OF ENTRIES
m = data.Population.values.size
#X is a Matrice of 1's and the Population Column
# 1's is to accomodate the Theta0 i.e. the Interept
# X is a m * 2 Matrice
X = np.append(np.ones((m,1)), data.Population.values.reshape(m,1), axis=1)
# y is the data set of the profits. It is also m*1 Matrice
y = data.Profit.values.reshape(m,1)
#theta is a 2*1 Matrice
#Initializing the values of theta with ZERO!
# Although it is not a very good practice
theta = np.zeros((2,1))
# JUST TO TEST THE COST FUNCTION
print(cost_function(X, y, theta))
#######################################################################################
#LEARNING RATE
alpha = 0.01
#NO. OF ITERATION FOR CONVERGENCE
iterations = 2000
theta, costs = gradient_descent(X, y, theta, alpha, iterations)
# TO CHECK THE VALUE OF h(x) AND FINAL VALUE OF THE THETA MATRICE
print("h(x) = {} + {}x1".format(str(round(theta[0,0],2)), str(round(theta[1,0],2))))
#VISUALIZE THE COST FUNCTION WITH EXPLICIT THETA VALUES IN A 3D GRAPH
visualize_cost_function(X, y)
# VISUALIZE THE COST WITH RESPECT TO NUMBER OF ITERATIONS OF GRADIENT DESCENT
convergence_grah(costs, iterations)
# TO VISUALIZE THE REGRESSION LINE
regression_fit(data, theta)
print(theta)
# PREDICT THE RESULTS FOR UNKNOWN VALUES
input_population = float(input('ENTER THE POPULATION IN 10000: '))
# CONVERTING THE INPUT TO 2*1 MATRICE FOR DOT PRODUCT
input_population_matrice = np.append( 1, input_population)
#print(input_population_matrice)
predicted_profit = predict_data(theta, input_population_matrice)
print(predicted_profit)
print('PROFIT FOR POPULATION OF {} WOULD BE Rs{}'.format(str(round(input_population*10000)), str(round(predicted_profit*1000))))
main()
| true
|
34baa5bf047df6437a509d3617678b56fdd1ab14
|
Python
|
kymy86/machine-learning
|
/nb_trainer.py
|
UTF-8
| 5,026
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/python3
import re
import pickle
import os
from random import randrange
from pathlib import Path
from math import log10
from logger import Logger
class Trainer(Logger):
_DATASET = 'dataset/SMSSpamCollection'
STOREDATA = 'dataset/MemoryTrainingData'
STORETESTDATA = 'dataset/MemoryTestData'
#rejection treshold
_TRESHOLD = 100
# document dataset
_dataset = []
# list of test data
test_data = []
#occurences of each word in each document
words_counter = []
# dictionary with all words in dataset and spam/ham probability
dictionary = []
# tot number of spam
m_spam = 0
# tot number of ham
m_ham = 0
# tot number of documents
m_tot = 0
n_words = 0
offset = 0
w_spam = 0
#tot number of ham
w_ham = 0
def __init__(self, dataset_size=0.7):
super(Trainer, self).__init__()
if not self._is_memory():
self._load_data(dataset_size)
self._compute_dict()
self._init_params()
def _load_data(self, split_ratio):
"""
Load the sample data to train the
algorithm
"""
with open(self._DATASET, 'r') as dataset_filename:
lines = dataset_filename.readlines()
self._dataset = [line.strip("\n").split("\t") for line in lines]
self._compute_train_test_dataset(split_ratio)
def _compute_train_test_dataset(self, split_ratio):
dataset_size = int(len(self._dataset)*split_ratio)
train_set = []
test_set = list(self._dataset)
while len(train_set) < dataset_size:
index = randrange(len(test_set))
train_set.append(test_set.pop(index))
self._dataset = train_set
self.test_data = test_set
def _compute_dict(self):
for document in self._dataset:
words = self.get_list_words(document[1])
# count the occurences of each word in each document
self.words_counter.append({i:words.count(i) for i in words})
for word in words:
if word not in self.dictionary:
#add words in the dictionary, by initializing them with score of 1
self.dictionary.append({'p_spam':1, 'p_ham':1, 'word':word})
def _init_params(self):
#count the number of spam/ham documents
for item in self._dataset:
if item[0] == 'spam':
self.m_spam += 1
else:
self.m_ham += 1
self.m_tot = len(self._dataset)
self.n_words = len(self.dictionary)
self.w_spam = self.n_words
self.w_ham = self.n_words
# offset the rejection treshold
self.offset = log10(self._TRESHOLD)+log10(self.m_ham)-log10(self.m_spam)
@staticmethod
def get_list_words(document):
"""
From the given document, create a list of
allow words. Remove the single char words
an the most frequent words
"""
regex = '(the|to|you|he|she|only|if|it|[.,#!?]|)'
words = re.sub(regex, '', document, flags=re.IGNORECASE).lower().split()
for word in words:
if re.match('[a-z0-9]{1}', word, flags=re.IGNORECASE):
words.remove(word)
return words
def train_agent(self):
"""
From the dictionary loaded,
start to train the agent
"""
if not self._is_memory():
for idx, line in enumerate(self._dataset):
#assign a score to each word
if line[0] == 'spam':
self._count_occurences('p_spam', idx)
else:
self._count_occurences('p_ham', idx)
self._normalization()
self._store_samples_in_memory()
def _normalization(self):
#normalize count to get the probabilities
for word in self.dictionary:
word['p_ham'] = word['p_ham']/self.w_ham
word['p_spam'] = word['p_spam']/self.w_spam
def _count_occurences(self, key, idx):
for word in self.dictionary:
if word['word'] in self.words_counter[idx]:
# counter is the number of times a given word appear in the given document
counter = self.words_counter[idx][word['word']]
word[key] = word[key] + counter
if key == 'p_spam':
self.w_spam = self.w_spam + counter
else:
self.w_ham = self.w_ham + counter
def _store_samples_in_memory(self):
pickle.dump(self.dictionary, open(self.STOREDATA, 'wb'))
pickle.dump(self.test_data, open(self.STORETESTDATA, 'wb'))
def _is_memory(self):
memory_name = Path(self.STOREDATA)
return memory_name.is_file()
def reset_memory(self):
"""
Reset memory and re-train the agent
"""
if self._is_memory():
os.remove(self.STOREDATA)
os.remove(self.STORETESTDATA)
| true
|
76f69e43d157bd6b7a8cfba0dbba871d90533482
|
Python
|
brandonkim0511/Python
|
/class/loop.py
|
UTF-8
| 1,817
| 3.3125
| 3
|
[] |
no_license
|
# while 1 == 2 : print("Chaeyoung isnot the most beautiful in Twice ")
# adj = ["red", "big", "tasty"]
# fruits = ["apple", "banana", "cherry"]
#
# for x in adj:
# print(x)
# for y in fruits:
# print(y)
# number1 = [1, 2, 3, 4]
# number2 = [1, 2, 3, 4]
# number3 = [1, 2, 3, 4]
# number4 = [1, 2, 3, 4]
#
# cnt = 0
#
#
# for a in number1:
# for b in number2:
# for c in number3:
# for d in number4:
# cnt += 1
# print(a, b, c, d, cnt)
#
#
# print(cnt)
#
#
# while True :
# input("sdgfdf\n")
#
# oddNumberList = [1]
#
# limit = int(input(" set boundary : "))
#
# while True:
# nextval = (oddNumberList[len(oddNumberList)- 1] + 2)
# if nextval > limit:
# break
# oddNumberList.append(nextval)
#
# print(oddNumberList)
fibonacciList = [1, 1]
#
limit = int(input(" set boundary : "))
while True:
newVal = fibonacciList[len(fibonacciList) - 1] + fibonacciList[len(fibonacciList) - 2]
if newVal > limit:
break
fibonacciList.append(newVal)
print(fibonacciList)
# prepUpgraded = range(limit) # [0, 1, 2, 3, 4 , ...]
# finalArr = []
#
#
# for x in prepUpgraded :
# temp = 0
# for idx in range(prepUpgraded.index(x)): #[0,1]
# temp += prepUpgraded[idx]
# finalArr.append(temp)
#
# print(finalArr)
# fibonacciListUpgraded = []
# result = 0
#
# for x in fibonacciList:
# print(x)
# print('||||||||')
# for idx in range(fibonacciList.index(x)):
# print(range(fibonacciList.index(x)))
# print(idx)
# print(fibonacciList[idx])
# print("--------")
# # result = result + x
# result += fibonacciList[idx]
# fibonacciListUpgraded.append(result)
#
# print(fibonacciListUpgraded)
#
#
#
# x = 1
# while x < 1000:
# print(x)
# x += x
| true
|
ee8a8a3920b21e40d425ef7e940bb7b24835cdb4
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03993/s210070602.py
|
UTF-8
| 445
| 3.078125
| 3
|
[] |
no_license
|
import sys
import collections
def swap(t):
if t[1] < t[0] :
return (t[1],t[0])
else:
return t
n = int(sys.stdin.readline().rstrip())
a = [int(x) for x in sys.stdin.readline().rstrip().split()]
zippeda = list(map(swap,list(zip(range(1,n+1),a))))
zippeda.sort()
c = collections.Counter(zippeda)
counter = 0
for a in c.most_common():
if a[1] < 2:
break
else:
counter = counter + 1
print(counter)
| true
|
64aeb331d52223e2b3dce26416ad6fede258ef57
|
Python
|
hacklinshell/learn-python
|
/进程和线程/do_threadLocal.py
|
UTF-8
| 1,144
| 3.75
| 4
|
[] |
no_license
|
import threading
#一个ThreadLocal变量虽然是全局变量,但每个线程都只能读写自己线程的独立副本,互不干扰。ThreadLocal解决了参数在一个线程中各个函数之间互相传递的问题。
loca_school = threading.local() #全局变量local_school是一个ThreadLocal对象
def process_student():
std = loca_school.student # Thread对它都可以读写student属性 每个属性如local_school.student都是线程的局部变量 可以任意读写而互不干扰,也不用管理锁的问题,ThreadLocal内部会处理。
print('hello , %s (in %s )' % (std,threading.current_thread().name))
def prcess_thread(name):
loca_school.student = name #全局变量local_school是一个dict,不但可以用local_school.student,还可以绑定其他变量,如local_school.teacher等等。
process_student()
t1 = threading.Thread(target=prcess_thread,args=('Alice',),name='thread-1')
t2 = threading.Thread(target=prcess_thread,args=('BOB',),name='thread-2')
t1.start()
t2.start()
t1.join()
t2.join()
#hello , Alice (in thread-1 )
#hello , BOB (in thread-2 )
| true
|
8e743394c2379246bb2b70f092802d3d23dd1709
|
Python
|
kulkarniharsha/my_code
|
/FAC_SVM.py
|
UTF-8
| 2,330
| 3.6875
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
"""Playing with Harshvardhan's SVM"""
""" Lets see what we have."""
import scipy
import pandas as pd
import matplotlib.pyplot as plt
import sklearn, sklearn.svm
import random
"""We will import data from "final.xlsx" into the Pandas dataframe df."""
df = pd.read_excel("final.xlsx", sheetname="Sheet1", skiprows=1)
"""We want to use "Temperature", "pH", "CrContent", "DO" and "FluidVelocity"
to predict "MassTransferCoefficient".The former
we club in a list called Xvars and the latter is Yvar."""
Xvars = ["temperature","pH","CrContent","DO","FluidVelocity"]
Yvar = "MassTransferCoefficient"
"""We will have to divide the dataset into training and validation sets.
The training set is 70% of the data and the validation is the rest.
We will sample randomly from the data for the training set."""
list_indices = scipy.array(list(df.index))
frac_train = 0.9
n_train = int(len(df)*frac_train)
training_indices = random.sample(list(list_indices), n_train) #Sample n_train items from list_indices without replacement
validation_indices = scipy.array(list(set(list_indices) - set(training_indices))) #Subtracted two sets
len(validation_indices), len(training_indices)
"""Now that we have the indices, lets split the dataset into training and validation sections.
Fit the SVM on the training section and test the fit on the validation section."""
df_training = df.iloc[training_indices]
df_validation = df.iloc[validation_indices]
Xvars_training = df_training[Xvars].as_matrix()
Yvar_training = df_training[Yvar].as_matrix()
Xvars_validation = df_validation[Xvars].as_matrix()
Yvar_validation = df_validation[Yvar].as_matrix()
"""Now lets get the SVM """
clf = sklearn.svm.SVR(C = 100.0, gamma = 0.0009, epsilon = 0.00089, kernel='rbf') #make our SVM object. Call it whatever you want
"""Lets train it on the training set """
clf.fit(Xvars_training, Yvar_training)
"""And now lets see how well it fits."""
Yvar_pred = clf.predict(Xvars_validation)
plt.plot(Yvar_validation, Yvar_pred, 'ro')
plt.plot([0, max(Yvar_validation)],[0, max(Yvar_validation)], 'b')
plt.show()
"""Let's get a correlation:"""
r, p = scipy.stats.pearsonr(Yvar_validation, Yvar_pred)
"""Percent correlation is:"""
r_sqr=r**2*100
print (r)
print(r_sqr)
| true
|
3ee525fda461634088fc291225738ac33458a6a9
|
Python
|
abu-bakarr/holbertonschool-web_back_end
|
/0x00-python_variable_annotations/3-to_str.py
|
UTF-8
| 171
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
"""convert to string
transform to string a value
"""
def to_str(n: float) -> str:
"""takes n and returns his string form"""
return str(n)
| true
|
5040cbd883bdd87066fb9ed7aa00a4748bb6428c
|
Python
|
PaulaG9/MSFCalc
|
/calc/functions.py
|
UTF-8
| 756
| 2.8125
| 3
|
[] |
no_license
|
import pandas as pd
def getNetPatients(numpatients, duration, monincrease):
if duration/30>1:
final_mth_patients=numpatients+(round(duration/30)-1)*monincrease
net_patients=((numpatients+final_mth_patients)*round(duration/30))/2
else:
net_patients=numpatients+monincrease
return net_patients
def getEstimate(net_patients, duration, frequency, units, packaging, packaging_size):
if packaging=='bottle':
estimate=(net_patients * duration * frequency * units* 5)/packaging_size
else:
estimate=(net_patients * duration * frequency * units)
return estimate
| true
|
c39cc4fc83896753683d71ceb2b4c07b16d67eac
|
Python
|
JackTJC/LeetCode
|
/sort_alg/HeapSort.py
|
UTF-8
| 969
| 3.25
| 3
|
[] |
no_license
|
from typing import List
class Solution:
def smallestK(self, arr: List[int], k: int) -> List[int]:
# topK问题,使用堆排序解决
def adjustHead(heap, i, length):
temp = heap[i]
k = 2 * i + 1
while k < length:
if k + 1 < length and heap[k] < heap[k + 1]:
k += 1
if heap[k] > temp:
heap[i] = heap[k]
i = k
else:
break
k = 2 * k + 1
heap[i] = temp
# 维护一个大小为k的大顶堆
if k == 0:
return []
if k >= len(arr):
return arr
heap = arr[:k]
for i in range(int(k / 2) - 1, -1, -1):
adjustHead(heap, i, k)
for i in range(k, len(arr)):
if arr[i] < heap[0]:
heap[0] = arr[i]
adjustHead(heap, 0, k)
return heap
| true
|
ed48571077750543079a11c8452aa1eb7b362d43
|
Python
|
SonicMadushi/Week-01
|
/codes/7.0 Numpy.py
|
UTF-8
| 333
| 3
| 3
|
[] |
no_license
|
import numpy as np
a=np.array(([1,2,3],[4,5,6]))
#print(a.shape)
b=np.ones((5,2),dtype=np.int)
#b=np.zeros((5,2),dtype=np.int)
#print(b)
c=np.random.randint(0,5,(4,10))
#print(c)
x=np.random.randint(0,10,(1000,500))
y=np.random.randint(0,10,(500,1000))
#print(x)
#print(y)
z=np.matmul(x,y)
print(z)
| true
|
231c588aebe7f66eae4c9556c27eccaa8fcdc46e
|
Python
|
huidou74/CMDB-01
|
/hc_auth/auth_data.py
|
UTF-8
| 3,998
| 2.890625
| 3
|
[] |
no_license
|
#!/usr/bin/python
#-*- coding:utf8 -*-
#BY: H.c
def menu_auth(host_obj,request):
obj_all = host_obj.values('name', # 使用values()方法时,对象必须是queryset_list
'pos__name',
'pos__auth__url', # 这是 url 路径
'pos__auth__name', # 这是 对应的 名字
'pos__auth__to_display__url', # 权限对应权限的 路径
'pos__auth__to_display__name', # 权限对应权限的 名字
'pos__auth__group__name', # 权限组名
'pos__auth__group__ti__title', # 菜单名
)
menu_dict = {} # 自定义字典数据
for i in obj_all:
menu_auth_dict = { # 临时字典
'url': i.get('pos__auth__url'), # 把数据库取到的值,赋给字典
'name': i.get('pos__auth__name'),
'display_url': i.get('pos__auth__to_display__url'), # to_display 里面是对应单个查询的权限名的ID,
'display_name': i.get('pos__auth__to_display__name'), # ID里面包含了 对应ID 的url + name
} # {'url': '/auth/update/(\\d+)/', 'name': '编辑权限', 'display_url': '/auth/list/', 'display_name': '查询权限'}
# print (i.get('pos__auth__group__ti__title'),menu_dict.keys()) # 菜单名 ( 主机 , dict_keys([]) )
if i.get('pos__auth__group__ti__title') in menu_dict.keys(): # 第一次是空字典,所以是走else
if not i.get('pos__auth__to_display__name'): # 第二次 字典有值了 判断 to_display 一对多关系
# not + None 为负负得正 # 如果是空的则是母菜单,有值才是该母菜单的子菜单,层级关系
menu_dict[i.get('pos__auth__group__ti__title')]['lower'].append(menu_auth_dict)
# 将字典的lower 的值, 是一个列表 [{},{},{}],往里面添加列表的值(字典) -> append(dict)
else:
menu_dict[i.get('pos__auth__group__ti__title')] = {} # 创建新字典格式 # {'主机': {}}
menu_dict[i.get('pos__auth__group__ti__title')]['title'] = i.get('pos__auth__group__ti__title')
# print (menu_dict[i.get('pos__auth__group__ti__title')]) # {'title': '主机'}
# print (menu_dict[i.get('pos__auth__group__ti__title')]['title'] ) # title -> 主机
# print (i.get('pos__auth__to_display__name')) # -> None 取不到
if not i.get('pos__auth__to_display__name'): # not + None 为负负得正
menu_dict[i.get('pos__auth__group__ti__title')]['lower'] = [menu_auth_dict, ]
# 创建 lower -> {'lower': [{}]}
else:
menu_dict[i.get('pos__auth__group__ti__title')]['lower'] = []
# i.get('pos__auth__to_display__name') 取到值了,就创建列表
# print('菜单 --- ', menu_dict)
request.session['menu_dict']=menu_dict # 存入session
request.session.set_expiry(0) # 意思就是关闭浏览器就清掉session, (10) 就是10秒
auth_dict = {}
for i in obj_all:
if i.get('pos__auth__group__name') in auth_dict.keys():
auth_dict.get(i.get('pos__auth__group__name')).get('url').append(i.get('pos__auth__url'))
# 将每个url 以列表的append的方法添加进去,成为单个权限组名的 值 {'用户表':{url: ['a','b','c',]}, }
else:
auth_dict[i.get('pos__auth__group__name')] = {'url': [i.get('pos__auth__url'), ], }
# 将 权限组名 作为 auth_dict 字典的 keys, 将对应权限组的url 作为 auth_dict 字典的 values
# print('权限 --- ', auth_dict)
request.session['auth_dict']=auth_dict # 存入session
request.session.set_expiry(0) # 意思就是关闭浏览器就清掉session, (10) 就是10秒
| true
|
9420cdae068bd89c05621749680673c187e4c3ef
|
Python
|
Shank2358/Loistic-Regression
|
/Logistic Regression/.idea/Logistic Regression.py
|
UTF-8
| 2,793
| 3.5625
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
from numpy import *
from os import listdir
# data=[]
# label=[]
def loadData(direction):
print(direction)
dataArray = []
labelArray = []
trainfileList = listdir(direction)
m = len(trainfileList)
for i in range(m):
filename = trainfileList[i]
fr = open('%s/%s' % (direction, filename))
for line in fr.readlines():
lineArr = line.strip().split()
dataArray.append([float(lineArr[0]),float(lineArr[1])])
labelArray.append([int(lineArr[2])])
fr.close()
# data=transpose(dataArray)
# label = transpose(labelArray)
# print(data)
# print(label)
return dataArray, labelArray
# sigmoid(inX)函数
def sigmoid(inX):
return 1.0 / (1 + exp(-inX))
# 用梯度下降法计算得到回归系数,alpha是步长,maxCycles是迭代步数。
# def gradAscent(dataArray, labelArray, alpha, maxCycles):
# dataMat = mat(dataArray) # size:m*n
# labelMat = mat(labelArray) # size:m*1
# m, n = shape(dataMat)
# weigh = ones((n, 1))
# for i in range(maxCycles):
# h = sigmoid(dataMat * weigh)
# error = labelMat - h # size:m*1
# weigh = weigh + alpha * dataMat.transpose() * error
# return weigh
def gradAscent(dataArray, labelArray, alpha, maxCycles):
dataMat = mat(dataArray) # size:m*n
labelMat = mat(labelArray) # size:m*1
m, n = shape(dataMat)
weigh = mat(ones((n, 1)))
for i in range(maxCycles):
h = sigmoid(dataMat * weigh)
error = labelMat - h # size:m*1
weigh = weigh + alpha * dataMat.transpose() * error
return weigh
# 分类函数,根据参数weigh对测试样本进行预测,同时计算错误率
def classfy(testdir, weigh):
dataArray, labelArray = loadData(testdir)
dataMat = mat(dataArray)
labelMat = mat(labelArray)
h = sigmoid(dataMat * weigh) # size:m*1
print(h)
m = len(h)
error = 0.0
for i in range(m):
if int(h[i]) > 0.5:
print(int(labelMat[i]), 'is classfied as: 1')
if int(labelMat[i]) != 1:
error += 1
print('error')
else:
print(int(labelMat[i]), 'is classfied as: 0')
if int(labelMat[i]) != 0:
error += 1
print('error')
print('error rate is:', '%.4f' % (error / m))
"""
用loadData函数从train里面读取训练数据,接着根据这些数据,用gradAscent函数得出参数weigh,最后就可以用拟
合参数weigh来分类了。
"""
def digitRecognition(trainDir, testDir, alpha, maxCycles):
data, label = loadData(trainDir)
weigh = gradAscent(data, label, alpha, maxCycles)
classfy(testDir, weigh)
# 运行函数
digitRecognition('train', 'test', 0.01, 50)
| true
|
47637d0ff5c637740250c1650bbafd61f4ca8192
|
Python
|
brunner-itb/masters
|
/classes_backup.py
|
UTF-8
| 14,160
| 2.546875
| 3
|
[] |
no_license
|
class InitialCondition(Expression):
def eval_cell(self, value, x, ufc_cell):
value[0] = np.random.rand(1)
u_D = Expression("rand()/100000", degree=1)
class FEMMesh:
'A class which should be able to incorporate all meshes, created or given, and provides all necessary parameters and values'
def __init__(self, Mesh, Name, Gender, Source, h_real, u_max_start, SaveFile): #createMesh = True):
#if createMesh == False:
#read in the given mesh
self.mesh = Mesh
#define name for FileName purposes
self.name = Name
#define the gender
self.gender = Gender
#get the h value
self.h_real = h_real
#make it a dolfin Constant:
self.h = Constant(h_real)
#define the u_max_start value, the max concentration of Cdc42, used for h calculation:
self.u_max_start = u_max_start
#create the File to save the calculated data in
try:
self.saveFile = File(SaveFile)
except RuntimeError:
self.saveFile = XDMFFile(SaveFile)
self.mesh = Mesh
#Make it an only surface mesh, unordered, meaning every normal vector points outwards
self.boundaryMesh = BoundaryMesh(Mesh, 'exterior', False)
#write it to File
#self.fileName = 'mesh_%s_unordered.xml' % self.name
#File(self.fileName) << self.boundaryMesh
#parse it back in to extract the Orientation
#self.tree = ET.parse(self.fileName)
#self.triangles = self.tree.findall('mesh/cells/triangle')
#order the mesh so it can be iterated over
self.boundaryMesh.order()
#get vertex coordinates for growing purposes
self.coordinates = self.boundaryMesh.coordinates()
#initialize vertex edge connectivity
self.boundaryMesh.init(0,1)
#splice the mesh into x splices and determine which vertex to put into which splice_
#self.classArrayHoldingSlices = meshSlicing(self, amountOfSlices)
#save every cells orientation as an array
self.orientation = meshClassOrientation(self, straightLengthFactor)
#self.orientation = myCellOrientation(self, amountOfSlices, straightLengthFactor)
#get normalvector for every cell:
self.normalVectors = cellNormals(self.boundaryMesh)
#get the starting rmax() (inner radius of all cells, max) value to compare against to trigger Refinement
self.start_hmax = self.boundaryMesh.hmax()
#create a functionSpace, for future use
self.functionSpace = FunctionSpace(self.boundaryMesh, 'CG', 1)
#create trial and test-functions:
if activeSurfaceSource == True:
self.trialFunction = interpolate(Constant(0.0), self.functionSpace) #(u)
else:
self.trialFunction = interpolate(u_D, self.functionSpace) #interpolate(Constant(0.0), self.functionSpace)
self.testFunction = TestFunction(self.functionSpace) #(v)
#create function for solutions at current time-step: (u_1_n)
self.currentSolutionFunction = Function(self.functionSpace)
#define the meshes Source
self.source = Source
#define the meshes Stimulus, refers to global parameters as of now, should be changed in the future
#Element is given so dolfin evaluates the optimal quadrature degree according to the given Expression.
#testStimulus is for plotting in 2D
#self.stimulus = Expression('(1.0-h)*Ka/std::sqrt(pow(x[0] - source0, 2) + pow(x[1] - source1, 2) + pow(x[2] - source2, 2)) * exp(-std::sqrt(pow(x[0] - source0, 2) + pow(x[1] - source1, 2) + pow(x[2] - source2, 2))/std::sqrt(Ds/kb))',\
# element = self.functionSpace.ufl_element(), Ka = Ka, Ds = Ds, kb = kb, h=Constant(self.h_real), source0=self.source[0], source1=self.source[1], source2=self.source[2])
self.twoDStimulus = Expression('(1.0-h)*Ka/std::sqrt(pow(x[0] - source0, 2) + pow(x[1] - source1, 2)) * exp(-std::sqrt(pow(x[0] - source0, 2) + pow(x[1] - source1, 2))/std::sqrt(Ds/(kb * Ka/std::sqrt(pow(x[0] - source0, 2) + pow(x[1] - source1, 2)))))',\
element = self.functionSpace.ufl_element(), Ka = Ka, Ds = Ds, kb = kb, h=Constant(self.h_real), source0=self.source[0], source1=self.source[1])
self.stimulus = Expression('(1.0-h)*Ka/std::sqrt(pow(x[0] - source0, 2) + pow(x[1] - source1, 2) + pow(x[2] - source2, 2)) * exp(-std::sqrt(pow(x[0] - source0, 2) + pow(x[1] - source1, 2) + pow(x[2] - source2, 2))/std::sqrt(Ds/(kb * Ka/std::sqrt(pow(x[0] - source0, 2) + pow(x[1] - source1, 2) + pow(x[2] - source2, 2)))))',\
element = self.functionSpace.ufl_element(), Ka = Ka, Ds = Ds, kb = kb, h=Constant(self.h_real), source0=self.source[0], source1=self.source[1], source2=self.source[2])
#get the stimulusString, required for the user defined add function
self.stimulusString = self.getStimulusString()
#get the starting vertex and the corresponding cell:
if activeSurfaceSource == True:
self.stimulusCell = correspondingCell(self.boundaryMesh, self.coordinates[closestVertex(self.coordinates, self.source)])
#init the PDE. Rmember that the myRefinement Function creates a new PDE so PDE and after-refinement-mesh are matching.
#Because of lack of better knowing it is just these lines copied. Always change both if you want to make changes!
self.PDE = inner((self.currentSolutionFunction - self.trialFunction) / k, self.testFunction)*dx - Dm*inner(nabla_grad(self.trialFunction), nabla_grad(self.testFunction))*dx \
- (1.0-self.h)*(nu*k0 + (nu*K*self.trialFunction**2)/(Km**2 + self.trialFunction**2))*self.testFunction*dx + eta*self.trialFunction*self.testFunction*dx - self.stimulus*self.testFunction*dx
#create a variable that saves the inital u_sum, should be assigned only at n=0 of course
self.u_sum_initial = None
#get the needed maps:
#get a vertex to cells map, so which vertex is part of which cells
self.vertex_to_cells_map = vertex_to_cells_map(self)
#create array with vertices and corresponding normals:
self.vertex_normal_map = vertex_normal_map(self, self.vertex_to_cells_map, self.normalVectors)
#calculate the map to get the vertices to be grown. Which cell has which vertices
self.cell_to_vertices_map = cell_to_vertices_map(self)
#initialize the gradient, for later use
self.gradient = None
#calculate the startingvolumes of the cells to scale u_max later on. Since the volume is expanding more Cdc42 should be available
self.startVolume = self.getVolume()
#if activeSurfaceSource == False, a growthThreshold is needed
self.growthThreshold = None
#initialize the cellGrowthDeterminingArray, is used in the growth
self.cellGrowthDeterminingArray = None
#init list of vertices to Grow
self.verticesToGrow = None
#create CellFunction
self.cell_markers_boundary = MeshFunction('bool', self.boundaryMesh, self.boundaryMesh.topology().dim(), False) #CellFunction("bool", self.boundaryMesh)
#self.cell_markers_boundary.set_all(True)
self.isThereRefinementNecessary = False
self.hmin = self.boundaryMesh.hmin()
#force on vertex:
self.vertex_to_edges_map = vertex_to_edges_map(self)
self.initial_cell_edges_and_opposite_angle_map = cell_edges_and_opposite_angle_map(self)
#at creation these should be equal, so no need to calculate twice
self.cell_edges_and_opposite_angle_map = None #self.initial_cell_edges_and_opposite_angle_map #cell_edges_and_opposite_angle_map(self)
self.force_on_vertex_list = [None]*self.boundaryMesh.num_vertices()
self.Estar = None
self.turgor_pressure_on_vertex_list = [None]*self.boundaryMesh.num_vertices()
self.cell_volumes = cell_volumes(self)
#calculate the volume of the mesh
def getVolume(self):
return assemble(Constant(1)*Measure("dx", domain=self.boundaryMesh))
# helper function, the stimulus string is needed to add Stimuli and create fitting Expressions
def getStimulusString(self):
tempStimulusString = '(1.0-%s)*Ka/std::sqrt(pow(x[0] - %f, 2) + pow(x[1] - %f, 2) + pow(x[2] - %f, 2)) * exp(-std::sqrt(pow(x[0] - %f, 2) + pow(x[1] - %f, 2) + pow(x[2] - %f, 2))/std::sqrt(Ds/kb))' %(self.name ,self.source[0],self.source[1],self.source[2],self.source[0],self.source[1],self.source[2])
return tempStimulusString
def getStimulusStringWithH(self):
tempStimulusString = '(1.0-h)*Ka/std::sqrt(pow(x[0] - %f, 2) + pow(x[1] - %f, 2) + pow(x[2] - %f, 2)) * exp(-std::sqrt(pow(x[0] - %f, 2) + pow(x[1] - %f, 2) + pow(x[2] - %f, 2))/std::sqrt(Ds/kb))' %(self.source[0],self.source[1],self.source[2],self.source[0],self.source[1],self.source[2])
return tempStimulusString
# ONLY USED TO ADD THE STIMULI, NOT THE WHOLE CLASS!
def __add__(self, other):
if isinstance(self, FEMMesh):
if isinstance(other, FEMMesh):
#print 'self and other are FEMMesh'
newExpressionString = self.getStimulusString() + ' + ' + other.getStimulusString()
#print(newExpressionString)
#print("first source: ", self.source[:], 'second source: ', other.source[:])
kwargs = {'element' : self.functionSpace.ufl_element() ,str(self.name) : Constant(self.h_real), str(other.name) : Constant(other.h_real), "Ka" : Ka, 'Ds' : Ds, 'kb' : kb}
return Expression(newExpressionString, **kwargs)
elif isinstance(other, Expression):
#print 'self is FEMMesh, other is Expression'
newExpressionString = self.getStimulusString()
kwargs = {'element' : self.functionSpace.ufl_element() ,str(self.name) : Constant(self.h_real), "Ka" : Ka, 'Ds' : Ds, 'kb' : kb}
return Expression(newExpressionString, **kwargs) + other
# if the other is already a Sum of two Expressions, needed an extra case:
elif str(type(other)) == "<class 'ufl.algebra.Sum'>":
newExpressionString = self.getStimulusString()
kwargs = {'element' : self.functionSpace.ufl_element() ,str(self.name) : Constant(self.h_real), "Ka" : Ka, 'Ds' : Ds, 'kb' : kb}
return Expression(newExpressionString, **kwargs) + other
# elif isinstance(self, Expression):
# if isinstance(other, FEMMesh):
# #print 'self is Expression, other is FEMMesh'
# newExpressionString = other.getStimulusString()
# kwargs = {'element' : other.functionSpace.ufl_element() ,str(other.name) : Constant(other.h_real), "Ka" : Ka, 'Ds' : Ds, 'kb' : kb}
# return Expression(newExpressionString, **kwargs) + self
# elif isinstance(other, Expression):
# #print 'self and other are Expressions'
# return self + other
# get the gradient of all relevant Stimuli on my mesh
def getGradient(self, usedMeshesList):
tempCumulatedStimuli = None
for Mesh in usedMeshesList:
if Mesh != self: #everyone but myself
if Mesh.gender != self.gender: #everyone of the opposite gender
#print self.name, self.gender,'s opposite gender:', Mesh.name, Mesh.gender
if tempCumulatedStimuli == None:
tempCumulatedStimuli = Mesh
else:
tempCumulatedStimuli = Mesh + tempCumulatedStimuli
#if no other gender is detected, take your "own" stimulus. Should mean an artifical stimulus has been applied
if tempCumulatedStimuli == None:
tempCumulatedStimuli = self.stimulus
#print tempCumulatedStimuli
#check if the overloaded add function has been used, if not make an Expression:
try:
self.gradient = gradient(project(tempCumulatedStimuli, self.functionSpace))
return self.gradient
except TypeError:
if isinstance(tempCumulatedStimuli, Expression):
print( 'gradient creation: tempCumulatedStimuli is an Expression')
self.gradient = gradient(interpolate(tempCumulatedStimuli, self.functionSpace))
return self.gradient
elif isinstance(tempCumulatedStimuli, self.__class__):
print( 'gradient creation: tempCumulatedStimuli is a Sum')
self.gradient = gradient(interpolate(tempCumulatedStimuli, self.functionSpace))
return self.gradient
# elif isinstance(tempCumulatedStimuli, FEMMesh): #create an Expression which can be used in the gradient function. Similar to the __add__ function, just with one class
# print 'gradient creation: tempCumulatedStimuli is a FEMMesh'
# kwargs = {'element' : self.functionSpace.ufl_element() ,str(tempCumulatedStimuli.name) : Constant(tempCumulatedStimuli.h_real), "Ka" : Ka, 'Ds' : Ds, 'kb' : kb}
# tempOnlyOneStimulusExpression = Expression(tempCumulatedStimuli.getStimulusString(), **kwargs)
# self.gradient = gradient(project(tempOnlyOneStimulusExpression, self.functionSpace))
# return self.gradient
return self.gradient
# if there is only one Mesh for the gradient to consider:
except:
#kwargs = {'element' : self.functionSpace.ufl_element() ,str(tempCumulatedStimuli.name) : Constant(tempCumulatedStimuli.h_real), "Ka" : Ka, 'Ds' : Ds, 'kb' : kb}
#tempOnlyOneStimulusExpression = Expression(tempCumulatedStimuli.getStimulusString(), **kwargs)
self.gradient = gradient(interpolate(tempCumulatedStimuli.stimulus, self.functionSpace))
return self.gradient
#if there are no activeSurfaceAreas the stimulus for each mesh has to be precalculated after initializing all meshes.
#this is achieved by adding the stimuli strings of all other FEMMeshes and compiling it into an Expression
def initSources(self, usedMeshesList):
#i have to create a new self.stimulus which is basically all stimuli but my own added
tempNewStimulusString = None
for FEMMeshes in usedMeshesList:
if FEMMeshes != self and FEMMeshes.gender != self.gender:
if tempNewStimulusString == None:
tempNewStimulusString = FEMMeshes.getStimulusStringWithH()
else:
tempNewStimulusString = tempNewStimulusString + ' + ' + FEMMeshes.getStimulusStringWithH()
#if no other gender is detected, take your "own" stimulus. Should mean an artifical stimulus has been applied
if tempNewStimulusString == None:
tempNewStimulusString = self.getStimulusStringWithH()
print('i am here:', tempNewStimulusString)
kwargs = {'element' : self.functionSpace.ufl_element(), 'h' : Constant(self.h_real), "Ka" : Ka, 'Ds' : Ds, 'kb' : kb}
self.stimulus = Expression(tempNewStimulusString, **kwargs)
# used to reinitialize the PDE after initSources() was run. Updates the PDE function with the latest stimuli
def initPDE(self):
self.PDE = inner((self.currentSolutionFunction - self.trialFunction) / k, self.testFunction)*dx - Dm*inner(nabla_grad(self.trialFunction), nabla_grad(self.testFunction))*dx \
- (1.0-self.h)*(nu*k0 + (nu*K*self.trialFunction**2)/(Km**2 + self.trialFunction**2))*self.testFunction*dx + eta*self.trialFunction*self.testFunction*dx - self.stimulus*self.testFunction*dx
| true
|
59388671cf47001a1cc0abb8149a42083e380ed5
|
Python
|
LangII/GoCalc
|
/obsolete/taxicabinflcalc.py
|
UTF-8
| 5,326
| 2.828125
| 3
|
[] |
no_license
|
from kivy.app import App
""" This is stupid... Not sure why, but this import line needs to be commented out if
running from main_console.py. """
from gamelogic.stone import Stone
# def getStoneRawInfluenceGrid(self, pos, opponent=False):
def getStoneRawInfluenceGrid(pos, opponent=False, board_grid=None):
board_grid = App.get_running_app().data['board'].grid
# pos = [4, 4]
raw_infl_max_steps = 36
raw_infl_bias = 5
raw_infl_ceiling = 10
""" Returns a list-of-lists parallel to board.grid where the individual values are floats
representing the influence of an individual stone. """
if isinstance(pos, Stone): pos = pos.pos
influence_grid = []
for y, row in enumerate(board_grid):
influence_row = []
for x, stone in enumerate(row):
# influence_grid ignores grid positions with stones (sets influence to 0).
if board_grid[y][x] != None or (y == pos[0] and x == pos[1]):
raw_influence = 0
else:
# Total (taxicab geometry) steps from pos to [y, x].
total_steps = abs(pos[0] - y) + abs(pos[1] - x)
# Invert steps to make value assignment on grid applicable.
# inverted_steps = self.RAW_INFLUENCE_MAX_STEPS - total_steps
inverted_steps = raw_infl_max_steps - total_steps
# Apply bias (to make points closer to stone more valuable).
# raw_influence = inverted_steps ** self.RAW_INFLUENCE_BIAS
raw_influence = inverted_steps ** raw_infl_bias
influence_row += [ raw_influence ]
influence_grid += [ influence_row ]
# Calculate ceiling_transition value:
max_infl = max([ i for row in influence_grid for i in row ])
# ceiling_transition = self.RAW_INFLUENCE_CEILING / max_infl
ceiling_transition = raw_infl_ceiling / max_infl
for y in range(len(influence_grid)):
for x in range(len(influence_grid[0])):
# Apply ceiling_transition to each value in influence_grid.
influence_grid[y][x] = influence_grid[y][x] * ceiling_transition
# If calculating for opponent, make all values negative.
if opponent: influence_grid[y][x] = -influence_grid[y][x]
return influence_grid
# def getAllStonesRawInfluenceGrid(self, opponent=False):
def getAllStonesRawInfluenceGrid(opponent=False):
board = App.get_running_app().data['board']
# influence_grids is a list of individual influence grids for each individual stone.
if opponent:
# opponent_color = 'white' if self.color == 'black' else 'black'
opponent_color = 'white'
influence_grids = []
# for stone in self.board.stones[opponent_color]:
for stone in board.stones[opponent_color]:
stone_infl = getStoneRawInfluenceGrid(stone, True)
influence_grids += [ stone_infl ]
else:
influence_grids = []
# for stone in self.stones:
for stone in board.players['black'].stones:
stone_infl = getStoneRawInfluenceGrid(stone)
influence_grids += [ stone_infl ]
# Generate the base all_influence_grid.
all_influence_grid = []
# for _ in range(self.board.size[0]):
for _ in range(board.size[0]):
# all_influence_grid += [[ 0 for _ in range(self.board.size[1]) ]]
all_influence_grid += [[ 0 for _ in range(board.size[1]) ]]
# Populate all_influence_grid with influence_grids.
for y in range(len(all_influence_grid)):
for x in range(len(all_influence_grid[0])):
all_influence_grid[y][x] = sum([ grid[y][x] for grid in influence_grids ])
return all_influence_grid
# def getWholeBoardRawInfluenceGrid(self, to_print=False):
def getWholeBoardRawInfluenceGrid(to_print=False):
print_justify_by = 7
print_round_dec_by = 2
board = App.get_running_app().data['board']
bot_influence = getAllStonesRawInfluenceGrid()
opponent_influence = getAllStonesRawInfluenceGrid(opponent=True)
# Generate the base all_influence_grid.
whole_board_influence = []
for _ in range(board.size[0]):
whole_board_influence += [[ 0 for _ in range(board.size[1]) ]]
# Populate whole_board_influence with influence_grids.
for y in range(len(whole_board_influence)):
for x in range(len(whole_board_influence[0])):
whole_board_influence[y][x] = bot_influence[y][x] + opponent_influence[y][x]
# If requested, convert to a single pretty-print string.
if to_print:
to_print = []
for y, row in enumerate(whole_board_influence):
new_row = []
for x, i in enumerate(row):
if isinstance(board.grid[y][x], Stone):
i = '(%s)' % board.grid[y][x].print_char
new_row += [ i.center(print_justify_by) ]
elif i > 0:
i = '+%.*f' % (print_round_dec_by, i)
new_row += [ i.rjust(print_justify_by) ]
elif i <= 0:
i = '%.*f' % (print_round_dec_by, i)
new_row += [ i.rjust(print_justify_by) ]
to_print += [ ' '.join(new_row) ]
whole_board_influence = '\n\n'.join(to_print)
return whole_board_influence
| true
|
3655311243e4b23054ea1aaa198ef0739b1db7c8
|
Python
|
feczo/pythonclass
|
/2048/main_7.py
|
UTF-8
| 1,246
| 2.8125
| 3
|
[] |
no_license
|
from numpy import random, array
a = array([[None for i in range(4)] for i in range(4)])
def addblock():
col = random.randint(4)
row = random.randint(4)
if not a[row, col]:
a[row, col] = 2
else:
addblock()
def move(way):
change = False
if way in ['down', 'right']:
rows = list(reversed(range(1, 4)))
if way in ['up', 'left']:
rows = range(3)
for col in range(4):
for row in rows:
if way == 'down':
curr = (row, col)
prev = (row-1, col)
if way == 'up':
curr = (row, col)
prev = (row+1, col)
if way == 'left':
curr = (col, row)
prev = (col, row+1)
if way == 'right':
curr = (col, row)
prev = (col, row-1)
if a[prev]:
if a[prev] == a[curr]:
a[curr] *= 2
a[prev] = None
change = True
if not a[curr]:
a[curr] = a[prev]
a[prev] = None
change = True
if change:
move(way)
else:
addblock()
addblock()
addblock()
| true
|
2be99bbb0b5e033bdb1df4cc2036d481ea8ecc9b
|
Python
|
mohan-sharan/python-programming
|
/List/list_1.py
|
UTF-8
| 126
| 3.703125
| 4
|
[] |
no_license
|
#CREATE A LIST TO STORE ANY 5 EVEN NUMBERS
evenNumbers = [2, 4, 6, 8, 10]
print(evenNumbers)
#OUTPUT
#[2, 4, 6, 8, 10]
| true
|
ec10969a35c55617e100cb701921e112474fe2ac
|
Python
|
jiseungshin/pm4py-source
|
/pm4py/log/exporter/csv.py
|
UTF-8
| 1,239
| 2.71875
| 3
|
[
"Apache-2.0"
] |
permissive
|
from lxml import etree
from pm4py.log import log as log_instance
from pm4py.log import transform as log_transform
import pandas as pd
def get_dataframe_from_log(log):
"""
Return a Pandas dataframe from a given log
Parameters
-----------
log: :class:`pm4py.log.log.EventLog`
Event log. Also, can take a trace log and convert it to event log
Returns
-----------
df
Pandas dataframe
"""
if type(log) is log_instance.TraceLog:
log = log_transform.transform_trace_log_to_event_log(log)
transfLog = [dict(x) for x in log]
df = pd.DataFrame.from_dict(transfLog)
return df
def export_log_as_string(log):
"""
Exports the given log to string format
Parameters
-----------
log: :class:`pm4py.log.log.EventLog`
Event log. Also, can take a trace log and convert it to event log
Returns
-----------
string
String representing the CSV log
"""
df = get_dataframe_from_log(log)
return df.to_string()
def export_log(log, outputFilePath):
"""
Exports the given log to CSV format
Parameters
----------
log: :class:`pm4py.log.log.EventLog`
Event log. Also, can take a trace log and convert it to event log
outputFilePath:
Output file path
"""
df = get_dataframe_from_log(log)
df.to_csv(outputFilePath)
| true
|
93258b947347cf231d5b38148090071c91a39dbf
|
Python
|
bigalex95/tkinterExamples
|
/tkinter/tkinterExamples/whiteboard/tm copy.py
|
UTF-8
| 4,994
| 3.0625
| 3
|
[] |
no_license
|
# Easy Machine Learning & Object Detection with Teachable Machine
#
# Michael D'Argenio
# mjdargen@gmail.com
# https://dargenio.dev
# https://github.com/mjdargen
# Created: February 6, 2020
# Last Modified: February 6, 2020
#
# This program uses Tensorflow and OpenCV to detect objects in the video
# captured from your webcam. This program is meant to be used with machine
# learning models generated with Teachable Machine.
#
# Teachable Machine is a great machine learning model trainer and generator
# created by Google. You can use Teachable Machine to create models to detect
# objects in images, sounds in audio, or poses in images. For more info, go to:
# https://teachablemachine.withgoogle.com/
#
# For this project, you will be generating a image object detection model. Go
# to the website, click "Get Started" then go to "Image Project". Follow the
# steps to create a model. Export the model as a "Tensorflow->Keras" model.
#
# To run this code in your environment, you will need to:
# * Install Python 3 & library dependencies
# * Follow instructions for your setup
# * Export your teachable machine tensorflow keras model and unzip it.
# * You need both the .h5 file and labels.txt
# * Update model_path to point to location of your keras model
# * Update labels_path to point to location of your labels.txt
# * Adjust width and height of your webcam for your system
# * Adjust frameWidth with your video feed width in pixels
# * Adjust frameHeight with your video feed height in pixels
# * Set your confidence threshold
# * conf_threshold by default is 90
# * If video does not show up properly, use the matplotlib implementation
# * Uncomment "import matplotlib...."
# * Comment out "cv2.imshow" and "cv2.waitKey" lines
# * Uncomment plt lines of code below
# * Run "python3 tm_obj_det.py"
import multiprocessing
import numpy as np
import cv2
import tensorflow.keras as tf
import pyttsx3
import math
# main line code
# if statement to circumvent issue in windows
if __name__ == '__main__':
# read .txt file to get labels
labels_path = "converted_keras/labels.txt"
# open input file label.txt
labelsfile = open(labels_path, 'r')
# initialize classes and read in lines until there are no more
classes = []
line = labelsfile.readline()
while line:
# retrieve just class name and append to classes
classes.append(line.split(' ', 1)[1].rstrip())
line = labelsfile.readline()
# close label file
labelsfile.close()
# load the teachable machine model
model_path = 'converted_keras/keras_model.h5'
model = tf.models.load_model(model_path, compile=False)
# initialize webcam video object
cap = cv2.VideoCapture(0)
# width & height of webcam video in pixels -> adjust to your size
# adjust values if you see black bars on the sides of capture window
frameWidth = 1280
frameHeight = 720
# set width and height in pixels
cap.set(cv2.CAP_PROP_FRAME_WIDTH, frameWidth)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frameHeight)
# enable auto gain
cap.set(cv2.CAP_PROP_GAIN, 0)
# keeps program running forever until ctrl+c or window is closed
while True:
# disable scientific notation for clarity
np.set_printoptions(suppress=True)
# Create the array of the right shape to feed into the keras model.
# We are inputting 1x 224x224 pixel RGB image.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# capture image
check, frame = cap.read()
# mirror image - mirrored by default in Teachable Machine
# depending upon your computer/webcam, you may have to flip the video
# frame = cv2.flip(frame, 1)
# crop to square for use with TM model
margin = int(((frameWidth-frameHeight)/2))
square_frame = frame[0:frameHeight, margin:margin + frameHeight]
# resize to 224x224 for use with TM model
resized_img = cv2.resize(square_frame, (224, 224))
# convert image color to go to model
model_img = cv2.cvtColor(resized_img, cv2.COLOR_BGR2RGB)
# turn the image into a numpy array
image_array = np.asarray(model_img)
# normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# load the image into the array
data[0] = normalized_image_array
# run the prediction
predictions = model.predict(data)
# confidence threshold is 90%.
conf_threshold = 90
confidence = []
conf_label = ""
threshold_class = ""
# for each one of the classes
for i in range(0, len(classes)):
# scale prediction confidence to % and apppend to 1-D list
confidence.append(int(predictions[0][i]*100))
print(confidence[0])
# original video feed implementation
#cv2.imshow("Capturing", data)
# cv2.waitKey(10)
| true
|
d1a535bf4ab30adabce392c6fa34d16b363d1b6c
|
Python
|
namnt1410/stock_yfinance
|
/main.py
|
UTF-8
| 2,462
| 3.25
| 3
|
[] |
no_license
|
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import yfinance as yf
import pandas as pd
import requests
from bs4 import BeautifulSoup
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def make_graph(stock_data, revenue_data, stock):
fig = make_subplots(rows=2, cols=1, shared_xaxes=True, subplot_titles=("Historical Share Price", "Historical Revenue"), vertical_spacing = .3)
fig.add_trace(go.Scatter(x=pd.to_datetime(stock_data.Date, infer_datetime_format=True), y=stock_data.Close.astype("float"), name="Share Price"), row=1, col=1)
fig.add_trace(go.Scatter(x=pd.to_datetime(revenue_data.Date, infer_datetime_format=True), y=revenue_data.Revenue.astype("float"), name="Revenue"), row=2, col=1)
fig.update_xaxes(title_text="Date", row=1, col=1)
fig.update_xaxes(title_text="Date", row=2, col=1)
fig.update_yaxes(title_text="Price ($US)", row=1, col=1)
fig.update_yaxes(title_text="Revenue ($US Millions)", row=2, col=1)
fig.update_layout(showlegend=False,
height=900,
title=stock,
xaxis_rangeslider_visible=True)
fig.show()
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
# print_hi('PyCharm')
Tesla = yf.Ticker('TSLA')
tesla_data = Tesla.history(period="max")
tesla_data.reset_index(inplace=True)
print(tesla_data.head())
url = "https://www.macrotrends.net/stocks/charts/TSLA/tesla/revenue"
html_data = requests.get(url).text
soup = BeautifulSoup(html_data, "html.parser")
soup.find_all('title')
tesla_revenue = pd.DataFrame(columns=['Date', 'Revenue'])
for row in soup.find_all("tbody")[1].find_all("tr"):
col = row.find_all("td")
date = col[0].text
revenue = col[1].text.replace("$", "").replace(",", "")
tesla_revenue = tesla_revenue.append({"Date": date, "Revenue": revenue}, ignore_index=True)
tesla_revenue.dropna(inplace=True)
tesla_revenue = tesla_revenue[tesla_revenue['Revenue'] != ""]
print(tesla_revenue.tail())
make_graph(tesla_data, tesla_revenue, 'Tesla')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| true
|
2bd1582de4c96c7298709790ca2f2be8e1843617
|
Python
|
madhuriagrawal/python_assignment
|
/ouputTask2.py
|
UTF-8
| 364
| 3.8125
| 4
|
[] |
no_license
|
x=123
i = 0
count = 0
for i in x:
print(i)
#it will give the error :'int' object is not iterable
while i < 5:
print(i)
i += 1
if i == 3:
break
else:
print("error")
# output will be
# 0
# error
# 1
# error
# 2
while True:
print(count)
count += 1
if count >= 5:
break
#output will be
# 0
# 1
# 2
# 3
# 4
| true
|
886f09114d627aabfb18a6fcbdf7af8873332b03
|
Python
|
Deci-AI/super-gradients
|
/src/super_gradients/training/losses/cwd_loss.py
|
UTF-8
| 2,374
| 2.609375
| 3
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
from typing import Optional
import torch.nn as nn
import torch
class ChannelWiseKnowledgeDistillationLoss(nn.Module):
"""
Implementation of Channel-wise Knowledge distillation loss.
paper: "Channel-wise Knowledge Distillation for Dense Prediction", https://arxiv.org/abs/2011.13256
Official implementation: https://github.com/irfanICMLL/TorchDistiller/tree/main/SemSeg-distill
"""
def __init__(self, normalization_mode: str = "channel_wise", temperature: float = 4.0, ignore_index: Optional[int] = None):
"""
:param normalization_mode: default is for `channel-wise` normalization as implemented in the original paper,
softmax is applied upon the spatial dimensions. For vanilla normalization, to apply softmax upon the channel
dimension, set this value as `spatial_wise`.
:param temperature: temperature relaxation value applied upon the logits before the normalization. default value
is set to `4.0` as the original implementation.
"""
super().__init__()
self.T = temperature
self.ignore_index = ignore_index
self.kl_div = nn.KLDivLoss(reduction="sum" if ignore_index is None else "none")
if normalization_mode not in ["channel_wise", "spatial_wise"]:
raise ValueError(f"Unsupported normalization mode: {normalization_mode}")
self.normalization_mode = normalization_mode
def forward(self, student_preds: torch.Tensor, teacher_preds: torch.Tensor, target: Optional[torch.Tensor] = None):
B, C, H, W = student_preds.size()
# set the normalization axis and the averaging scalar.
norm_axis = -1 if self.normalization_mode == "channel_wise" else 1
averaging_scalar = (B * C) if self.normalization_mode == "channel_wise" else (B * H * W)
# Softmax normalization
softmax_teacher = torch.softmax(teacher_preds.view(B, C, -1) / self.T, dim=norm_axis)
log_softmax_student = torch.log_softmax(student_preds.view(B, C, -1) / self.T, dim=norm_axis)
loss = self.kl_div(log_softmax_student, softmax_teacher)
if self.ignore_index is not None:
valid_mask = target.view(B, -1).ne(self.ignore_index).unsqueeze(1).expand_as(loss)
loss = (loss * valid_mask).sum()
loss = loss * (self.T**2) / averaging_scalar
return loss
| true
|
29b8be49e416f26ea2ce60edccb04068c22a0128
|
Python
|
aquinzi/tdf-actividades
|
/_admin-scripts/jsontocsv(activities-name).py
|
UTF-8
| 814
| 2.53125
| 3
|
[
"CC0-1.0"
] |
permissive
|
'''
run where the files are
'''
import json
import os
final_file = "tipo,nombre,nombre_alt\n"
for root, subFolders, files in os.walk(os.getcwd()):
for filename in files:
filePath = os.path.join(root, filename)
if not filePath.endswith(".json") or filename.startswith("_"):
continue
print (" processing " + filePath)
current_text = ""
with open(filePath, 'r', encoding='utf-8-sig') as readme:
current_text = readme.read()
tmp_file = json.loads(current_text)
nombre_alt = "\"\""
if "nombre_alt" in tmp_file:
nombre_alt = tmp_file["nombre_alt"]
final_file += tmp_file["tipo"] + "," + tmp_file["nombre"] + "," + nombre_alt + "\n"
with open(os.path.join(os.getcwd(),"actividades_merged.csv"), 'w', encoding='utf-8-sig') as saveme:
saveme.writelines(final_file)
| true
|
839ca2222455c92e04d24a70d3d999f1e8f24360
|
Python
|
flipelunico/WestWorld
|
/Miner.py
|
UTF-8
| 3,091
| 2.671875
| 3
|
[] |
no_license
|
from BaseGameEntity import BaseGameEntityClass
import EntityNames
from location_type import location_type
from MinerOwnedStates.GoHomeAndSleepTilRested import GoHomeAndSleepTilRested
class Miner(BaseGameEntityClass):
ComFortLevel = 5
MaxNuggets = 3
ThirstLevel = 5
TirednessThreshold = 5
m_pCurrentState = None
m_Location = None
m_iGoldCarried = None
m_iMoneyInBank = None
m_iThirst = None
m_iFatigue = None
ThirstLevel = 5
TirednessThreshold = 5
MaxNuggets = 3
ComfortLevel = 5
def __init__(self, EntityNames):
# super(EntityNames)
global m_pCurrentState, m_Location, m_iGoldCarried, m_iMoneyInBank, m_iThirst, m_iFatigue
m_Location = location_type.shack
m_iGoldCarried = 0
m_iMoneyInBank = 0
m_iThirst = 0
m_iFatigue = 0
m_pCurrentState = GoHomeAndSleepTilRested.getInstance()
def ChangeState(self, State):
global m_pCurrentState
# make sure both states are both valid before attempting to
# call their methods
# call the exit method of the existing state
m_pCurrentState.Exit(self)
# change state to the new state
m_pCurrentState = State
# call the entry method of the new state
m_pCurrentState.Enter(self)
def Update(self):
global m_iThirst
m_iThirst += 1
if (m_pCurrentState):
m_pCurrentState.Execute(self)
def AddToGoldCarried(self, val):
global m_iGoldCarried
m_iGoldCarried += val
if (m_iGoldCarried < 0):
m_iGoldCarried = 0
def AddToWealth(self, val):
global m_iMoneyInBank
m_iMoneyInBank += val
if (m_iMoneyInBank < 0):
m_iMoneyInBank = 0
def Thirsty(self):
global m_iThirst
global ThirstLevel
ThirstLevel = 5
if (m_iThirst >= ThirstLevel):
return True
else:
return False
def Fatigued(self):
global m_iFatigue, TirednessThreshold
if (m_iFatigue > 5):
return True
else:
return False
def Location(self):
global m_Location
return m_Location
def ChangeLocation(self, location_type):
global m_Location
m_Location = location_type
def IncreaseFatigue(self):
global m_iFatigue
m_iFatigue += 1
def DecreaseFatigue(self):
global m_iFatigue
m_iFatigue -= 1
def PocketsFull(self):
global m_iGoldCarried, MaxNuggets
MaxNuggets = 3
if (m_iGoldCarried >= MaxNuggets):
return True
else:
return False
def GoldCarried(self):
global m_iGoldCarried
return m_iGoldCarried
def SetGoldCarried(self, val):
global m_iGoldCarried
m_iGoldCarried = val
def Wealth(self):
global m_iMoneyInBank
return m_iMoneyInBank
def BuyAndDrinkAWhiskey(self):
global m_iThirst, m_iMoneyInBank
m_iThirst = 0
m_iMoneyInBank -= 2
| true
|
cf0e7f4feb2924a1f252b1b4108f2ea0622d68fb
|
Python
|
sajandc/Python-Tutorial
|
/python8.py
|
UTF-8
| 68
| 2.734375
| 3
|
[] |
no_license
|
l=[]
l=[i for i in input().split(',')]
l.sort()
print(','.join(l))
| true
|
213878e0b157e5dd22e6cfb6ff4407903899646c
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_135/4118.py
|
UTF-8
| 1,724
| 3.46875
| 3
|
[] |
no_license
|
def get_matrix(filename):
"""This function reads a file and returns a matrix """
line = []
try:
handler = open(filename, 'r')
line = [ map(int, line.split(' ')) for line in handler]
return line
except Exception, e:
pass
def get_row(n,matrix):
"""this function takes row selected and the matrix and returns a list for that row """
return matrix[n-1]
def find_number(ls1, ls2):
count = 0
answer = 0
for i in ls1:
for j in ls2:
if i == j:
count +=1
answer = i
if answer == 0:
return 'Volunteer cheated!'
if count !=1:
return 'Bad magician!'
if answer !=0 and count==1:
return answer
def write_to_file(output, filename):
""" """
try:
handler = open(filename, 'w+')
handler.write(output+"\n")
handler.close()
except Exception, e:
pass
matrix = get_matrix('list.txt')
first = matrix[0]
del matrix[0]
counter = 0
row = []
ls = []
for i in range(0,len(matrix),5):
row.append(matrix.pop(counter))
counter = counter+4
# print ls
for i in range(0, len(matrix),4):
ls.append(matrix[i:i+4])
def magic(first_matrix, second_matrix, row1, row2):
first_list, second_list = first_matrix[row1-1], second_matrix[row2-1]
return find_number(first_list, second_list)
counter = 0
handler = open('output.out','w+')
for i in range(0,len(ls),2):
result = magic(ls[i],ls[i+1], row[i][0], row[i+1][0])
counter = counter+1
handler.write('Case #%s: %s\n'%(counter, result))
handler.close()
| true
|
8619dae93878e0eb42da3e9658e8987a249782cf
|
Python
|
KevinZZZZ1/machinelearning
|
/logistic_regression.py
|
UTF-8
| 4,555
| 2.9375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 15 16:17:31 2018
斜率没有什么问题,但是偏置b始终存在问题,而且没找到 = =
补充:b不对的问题好像找到了,问题似乎是出在前期数据处理时进行的特征缩放,把特征缩放去掉之后,经过100000次的迭代得到了正确的解,至于原因还没弄清楚
@author: keivn
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
import scipy.optimize as op
def sigmoid(x):
# 定义的sigmoid函数,非线性
# 而且sigmoid函数的导数为,sigmoid(x)*(1-sigmoid(x))
y = 1/(1 + np.exp(-x))
return y
def sigmoidGradient(w,x,y):
n = np.shape(x)[1] # 样本点的个数
dw = (np.dot(x,(sigmoid(np.dot(w.T,x))-y).T))/n
return dw
def feature_scaling(x,y):
#进行特征缩放
m = np.shape(x)[0]
x_max = x.max(axis=1).reshape((m,1)) # 其中x.max(axis=1)返回是一个(m,)的array,
x_min = x.min(axis=1).reshape((m,1))
y_max = y.max(axis=1).reshape((1,1))
y_min = y.min(axis=1).reshape((1,1))
y_ = (y - y_min)/(y_max - y_min)
x_ = (x - x_min)/(x_max - x_min)
return x_,y_
def gradient_descent(x,y,epsilon,learningrate=0.001):
m = np.shape(x)[0] # 特征的个数(已经包括偏置b)
n = np.shape(x)[1] # 样本点的个数
#将w,b都初始化为零向量和0
w = np.zeros((m,1))
#初始的costfunction的值
cost = costfunction(w,x,y)
print(cost)
dw = np.zeros((m,1))
#cost function对于参数w和b的偏导数
#dw = (np.dot(x,(sigmoid(np.dot(w.T,x))-y).T))/n
#for i in range(n):
# dw = (sigmoid(np.dot(w.T,x[:,i].reshape(m,1)))-y[:,i])*x[:,i].reshape(m,1)
#dw = dw / n
#print("1 dw:")
#print(dw)
dw = (np.dot(x,(sigmoid(np.dot(w.T,x))-y).T))/n
k = 0
while(k < epsilon):
w = w - learningrate*dw
cost = costfunction(w,x,y)
print('w:')
print(w)
print('dw:')
print(dw)
print('cost:')
print(cost)
k = k + 1
dw = (np.dot(x,(sigmoid(np.dot(w.T,x))-y).T))/n
#dw = (np.dot(x,(y*(1-sigmoid(np.dot(w.T,x)))).T) + np.dot(x,((1-y)*sigmoid(np.dot(w.T,x))).T))/n
return w
def costfunction(w,x,y):
# *表示对应位置每个元素间的乘积,np.dot()表示两个矩阵的乘积,axis = 0(默认)表示横向的,axis = 1表示竖向的
n = np.shape(x)[1]
j = np.sum(-y*np.log(sigmoid(np.dot(w.T,x)))-(1-y)*np.log(1-sigmoid(np.dot(w.T,x))))
return j/n
def loadDataSet(fileName):
numFeat = len(open(fileName).readline().split(',')) - 1
x = []; y = []
fr = open(fileName)
for line in fr.readlines():
lineArrX =[]
lineArrY =[]
curLine = line.strip().split(',')
for i in range(numFeat):
lineArrX.append(float(curLine[i]))
lineArrY.append(float(curLine[-1]))
x.append(lineArrX)
y.append(lineArrY)
return x,y
def train(fileurl):
file = fileurl
x_,y_ = loadDataSet(file)
x = np.array(x_).T
y = np.array(y_).T
bias = np.ones((1,x.shape[1])) # 处理偏置项
x = np.concatenate((x,bias))
w = gradient_descent(x,y,100000)
return w
x,y = loadDataSet('ex2data1.txt')
m = len(x)
for i in range(m):
if(y[i][0]==0.0): plt.plot(x[i][0],x[i][1],'ro')
if(y[i][0]==1.0): plt.plot(x[i][0],x[i][1],'go')
plt.show()
w1,w2,b1 = train('ex2data1.txt')
w = -w1/w2
b = -b1/w2
x1 = np.linspace(30,100,90)
for i in range(m):
if(y[i][0]==0.0): plt.plot(x[i][0],x[i][1],'ro')
if(y[i][0]==1.0): plt.plot(x[i][0],x[i][1],'go')
def y1(x1):
y1 = w*x1+b
return y1
plt.plot(x1, y1(x1), 'r-',linewidth=1,label='f(x)')
plt.show()
X = np.array(x).T
Y = np.array(y).T
bias = np.ones((1,X.shape[1])) # 处理偏置项
X = np.concatenate((bias,X))
test_theta = np.array([[-24], [0.2], [0.2]])
cost = costfunction(test_theta, X, Y)
grad = sigmoidGradient(test_theta, X, Y)
print('Cost at test theta: {}'.format(cost))
print('Expected cost (approx): 0.218')
print('Gradient at test theta: {}'.format(grad))
print('Expected gradients (approx): 0.043 2.566 2.647')
options = {'full_output': True, 'maxiter': 400}
initial_theta = np.zeros((X.shape[0],1))
theta, cost, _, _, _ = op.fmin(lambda t: costfunction(t, X, Y), initial_theta, **options)
| true
|
958a29dccb7693995fff1a65b569c636c0eb626b
|
Python
|
nel215/color-clustering
|
/clustering.py
|
UTF-8
| 877
| 2.703125
| 3
|
[] |
no_license
|
import argparse
import numpy as np
import cv2
class ColorClustering:
def __init__(self):
self.K = 16
def run(self, src, dst):
src_img = cv2.imread(src)
samples = np.float32(src_img.reshape((-1, 3)))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
attempts = 3
compactness, labels, centers = cv2.kmeans(samples, self.K, criteria, attempts, cv2.KMEANS_PP_CENTERS)
centers = np.uint8(centers)
dst_img = centers[labels.flatten()].reshape(src_img.shape)
cv2.imwrite(dst, dst_img)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--src', required=True)
parser.add_argument('-d', '--dst', required=True)
args = parser.parse_args()
color_clustering = ColorClustering()
color_clustering.run(args.src, args.dst)
| true
|
c57aac802105b24c5749f46cc34ab770e50e7549
|
Python
|
jgi302/IoT
|
/MQTT_Coffee/coffeeGUI-BT.py
|
UTF-8
| 2,492
| 3.078125
| 3
|
[] |
no_license
|
import tkinter as tk
from PIL import ImageTk
import socket
# -----------------------------------------------------------------------------
#
# -----------------------------------------------------------------------------
class MainWindow():
def __init__(self, main):
# canvas for image
self.canvas = tk.Canvas(main, width=800, height=600)
self.canvas.grid(row=0, column=0)
# images
self.my_images = []
self.my_images.append(ImageTk.PhotoImage(file = "Coffee_1.jpg"))
self.my_images.append(ImageTk.PhotoImage(file = "Coffee_2.jpg"))
self.my_image_number = 0
# set first image on canvas
self.image_on_canvas = self.canvas.create_image(
0, 0, anchor = tk.NW, image = self.my_images[self.my_image_number])
# button to change image and turn on/off coffee maker
self.button_txt = tk.StringVar()
self.button_txt.set('OFF')
self.button = tk.Button(main, textvariable=self.button_txt, command=self.onButton, height= 1, width=5)
self.button.place(x=375,y=500)
# socket for bluetooth
#self.s = socket.socket(socket.AF_BLUETOOTH,
# socket.SOCK_STREAM,
# socket.BTPROTO_RFCOMM)
#self.s.connect(('B8:27:EB:BD:1F:E4', 3))
#self.s.send(bytes('OFF', 'UTF-8'))
# -------------------------------------------------------------------------
#
# -------------------------------------------------------------------------
def onButton(self):
if self.my_image_number == 0:
self.my_image_number = 1
self.button_txt.set('ON')
# self.s.send(bytes('ON', 'UTF-8'))
else:
self.my_image_number = 0
self.button_txt.set('OFF')
# self.s.send(bytes('OFF', 'UTF-8'))
# change image
self.canvas.itemconfig(self.image_on_canvas,
image = self.my_images[self.my_image_number])
# -----------------------------------------------------------------------------
#
# -----------------------------------------------------------------------------
def main():
root = tk.Tk()
root.title("Coffee Maker")
MainWindow(root)
root.mainloop()
# -----------------------------------------------------------------------------
#
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
| true
|
d11138f8c239b3a7700ea4ab59c96dbb3524d920
|
Python
|
btrif/Python_dev_repo
|
/Algorithms/backtracking/Hamiltonian Cycle.py
|
UTF-8
| 4,394
| 4.28125
| 4
|
[] |
no_license
|
# Created by Bogdan Trif on 26-10-2017 , 11:24 AM.
'''
https://en.wikipedia.org/wiki/Hamiltonian_path
Hamiltonian Path in an undirected graph is a path that visits each vertex exactly once.
A Hamiltonian cycle (or Hamiltonian circuit) is a Hamiltonian Path such that there is an edge (in graph)
from the last vertex to the first vertex of the Hamiltonian Path.
Determine whether a given graph contains Hamiltonian Cycle or not.
If it contains, then print the path. Following are the input and output of the required function.
Input:
A 2D array graph[V][V] where V is the number of vertices in graph and graph[V][V] is adjacency matrix
representation of the graph. A value graph[i][j] is 1 if there is a direct edge from i to j, otherwise graph[i][j] is 0.
Output:
An array path[V] that should contain the Hamiltonian Path.
path[i] should represent the ith vertex in the Hamiltonian Path.
The code should also return false if there is no Hamiltonian Cycle in the graph.
For example, a Hamiltonian Cycle in the following graph is {0, 1, 2, 4, 3, 0}.
There are more Hamiltonian Cycles in the graph like {0, 3, 4, 2, 1, 0}
(0)--(1)--(2)
| / \ |
| / \ |
| / \ |
(3)-------(4)
And the following graph doesn’t contain any Hamiltonian Cycle.
(0)--(1)--(2)
| / \ |
| / \ |
| / \ |
(3) (4)
'''
# Python program for solution of
# hamiltonian cycle problem
class Graph():
def __init__(self, vertices):
self.graph = [[0 for column in range(vertices)]\
for row in range(vertices)]
self.V = vertices
''' Check if this vertex is an adjacent vertex
of the previously added vertex and is not
included in the path earlier '''
def isSafe(self, v, pos, path):
# Check if current vertex and last vertex
# in path are adjacent
if self.graph[ path[pos-1] ][v] == 0:
return False
# Check if current vertex not already in path
for vertex in path:
if vertex == v:
return False
return True
# A recursive utility function to solve
# hamiltonian cycle problem
def hamCycleUtil(self, path, pos):
# base case: if all vertices are
# included in the path
if pos == self.V:
# Last vertex must be adjacent to the
# first vertex in path to make a cyle
if self.graph[ path[pos-1] ][ path[0] ] == 1:
return True
else:
return False
# Try different vertices as a next candidate
# in Hamiltonian Cycle. We don't try for 0 as
# we included 0 as starting point in in hamCycle()
for v in range(1,self.V):
if self.isSafe(v, pos, path) == True:
path[pos] = v
if self.hamCycleUtil(path, pos+1) == True:
return True
# Remove current vertex if it doesn't
# lead to a solution
path[pos] = -1
return False
def hamCycle(self):
path = [-1] * self.V
''' Let us put vertex 0 as the first vertex
in the path. If there is a Hamiltonian Cycle,
then the path can be started from any point
of the cycle as the graph is undirected '''
path[0] = 0
if self.hamCycleUtil(path,1) == False:
print ("Solution does not exist\n")
return False
self.printSolution(path)
return True
def printSolution(self, path):
print( "Solution Exists: Following is one Hamiltonian Cycle")
for vertex in path:
print( vertex,)
print (path[0], "\n")
# Driver Code
''' Let us create the following graph
(0)--(1)--(2)
| / \ |
| / \ |
| / \ |
(3)-------(4) '''
g1 = Graph(5)
g1.graph = [ [0, 1, 0, 1, 0], [1, 0, 1, 1, 1],
[0, 1, 0, 0, 1,],[1, 1, 0, 0, 1],
[0, 1, 1, 1, 0], ]
# Print the solution
g1.hamCycle();
''' Let us create the following graph
(0)--(1)--(2)
| / \ |
| / \ |
| / \ |
(3) (4) '''
g2 = Graph(5)
g2.graph = [ [0, 1, 0, 1, 0], [1, 0, 1, 1, 1],
[0, 1, 0, 0, 1,], [1, 1, 0, 0, 0],
[0, 1, 1, 0, 0], ]
# Print the solution
g2.hamCycle();
# This code is contributed by Divyanshu Mehta
| true
|
65cc9b0f1d3a313f35e277378dba4f872184c66c
|
Python
|
Etheri/bioproject_py
|
/unique_genes/bi_task_6.py
|
UTF-8
| 500
| 3.015625
| 3
|
[] |
no_license
|
from collections import Counter
def readListFF(name):
# Read list of genes from file
f = open(name, 'r')
out = [line.strip() for line in f]
return out
f.close()
def outInFile(name, l):
# Write list of genes into file
f = open(name, 'w')
for index in l:
f.write(index + '\n')
f.close()
def main():
name = 'geneList.txt'
name_w = 'geneList_out.txt'
outInFile(name_w, set(readListFF(name)))
if __name__=='__main__':
main()
| true
|
72477ca383c50a535745f4024c41f67f33a02045
|
Python
|
VibhorKukreja/refuel
|
/vehicles/models.py
|
UTF-8
| 1,367
| 2.5625
| 3
|
[] |
no_license
|
from django.db import models
# Create your models here.
VEHICLE_TYPE = (
('BIKE', 'Bike'),
('CAR', 'Car'),
)
FUEL_TYPE = (
('PETROL', 'Petrol'),
('DIESEL', 'Diesel'),
)
class Vehicle(models.Model):
brand = models.CharField(max_length=255)
model = models.CharField(max_length=255)
registration_number = models.CharField(max_length=15)
engine_capacity = models.IntegerField()
user_profile = models.ForeignKey('users.UserProfile', on_delete=models.CASCADE)
vehicle_type = models.CharField(
max_length=10,
choices=VEHICLE_TYPE,
default='BIKE',
)
fuel_type = models.CharField(
max_length=10,
choices=FUEL_TYPE,
default='PETROL',
)
def __str__(self):
return '{} - {} - {}'.format(self.brand, self.model, self.registration_number)
class Fuel(models.Model):
vehicle_id = models.ForeignKey('Vehicle', on_delete=models.CASCADE)
quantity = models.IntegerField()
amount = models.IntegerField()
transaction_date = models.DateTimeField(auto_now_add=True)
fuel_type = models.CharField(
max_length=10,
choices=FUEL_TYPE,
default='PETROL',
)
autocomplete_fields = ['fuel_type']
def __str__(self):
return '{} - {} - {}'.format(self.fuel_type, self.vehicle_id, self.quantity)
| true
|
3924900e83fb73185f4af0fec5ae2b10920e9db8
|
Python
|
varanasisrikar/Programs
|
/Python/LinAlg_EigenValues,Vectors.py
|
UTF-8
| 337
| 2.96875
| 3
|
[] |
no_license
|
import numpy as np
import numpy.linalg as alg
l1 = []
rows = int(input("enter rows:"))
cols = int(input("enter cols:"))
for i in range(rows):
for j in range(cols):
l1.append(int(input()))
print(l1)
m = np.reshape(l1, (rows, cols))
print(m)
Values, Vectors = alg.eig(m)
print(Values)
print(Vectors[:, 0])
print(Vectors[:, 1])
| true
|
9c4adc1944249d8c6d100fab3e090345906d93cd
|
Python
|
ngoldbaum/unyt
|
/unyt/exceptions.py
|
UTF-8
| 8,679
| 3.34375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""
Exception classes defined by unyt
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2018, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the LICENSE file, distributed with this software.
# -----------------------------------------------------------------------------
class UnitOperationError(ValueError):
"""An exception that is raised when unit operations are not allowed
Example
-------
>>> import unyt as u
>>> 3*u.g + 4*u.m\
# doctest: +IGNORE_EXCEPTION_DETAIL +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
unyt.exceptions.UnitOperationError: The <ufunc 'add'> operator
for unyt_arrays with units "g" (dimensions "(mass)") and
"m" (dimensions "(length)") is not well defined.
"""
def __init__(self, operation, unit1, unit2=None):
self.operation = operation
self.unit1 = unit1
self.unit2 = unit2
ValueError.__init__(self)
def __str__(self):
err = (
'The %s operator for unyt_arrays with units "%s" '
'(dimensions "%s") ' % (self.operation, self.unit1, self.unit1.dimensions)
)
if self.unit2 is not None:
err += f'and "{self.unit2}" (dimensions "{self.unit2.dimensions}") '
err += "is not well defined."
return err
class UnitConversionError(Exception):
"""An error raised when converting to a unit with different dimensions.
Example
-------
>>> import unyt as u
>>> data = 3*u.g
>>> data.to('m') # doctest: +IGNORE_EXCEPTION_DETAIL +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
unyt.exceptions.UnitConversionError: Cannot convert between 'g'
(dim '(mass)') and 'm' (dim '(length)').
"""
def __init__(self, unit1, dimension1, unit2, dimension2):
self.unit1 = unit1
self.unit2 = unit2
self.dimension1 = dimension1
self.dimension2 = dimension2
Exception.__init__(self)
def __str__(self):
err = "Cannot convert between '%s' (dim '%s') and '%s' " "(dim '%s')." % (
self.unit1,
self.dimension1,
self.unit2,
self.dimension2,
)
return err
class MissingMKSCurrent(Exception):
"""Raised when querying a unit system for MKS current dimensions
Since current is a base dimension for SI or SI-like unit systems but not in
CGS or CGS-like unit systems, dimensions that include the MKS current
dimension (the dimension of ampere) are not representable in CGS-like unit
systems. When a CGS-like unit system is queried for such a dimension, this
error is raised.
Example
-------
>>> from unyt.unit_systems import cgs_unit_system as us
>>> from unyt import ampere
>>> us[ampere.dimensions]\
# doctest: +IGNORE_EXCEPTION_DETAIL +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
unyt.exceptions.MissingMKSCurrent: The cgs unit system does not
have a MKS current base unit
"""
def __init__(self, unit_system_name):
self.unit_system_name = unit_system_name
def __str__(self):
err = (
"The %s unit system does not have a MKS current base unit"
% self.unit_system_name
)
return err
class MKSCGSConversionError(Exception):
"""Raised when conversion between MKS and CGS units cannot be performed
This error is raised and caught internally and will expose itself
to the user as part of a chained exception leading to a
UnitConversionError.
"""
pass
class UnitsNotReducible(Exception):
"""Raised when a unit cannot be safely represented in a unit system
Example
-------
>>> from unyt import A, cm
>>> data = 12*A/cm
>>> data.in_cgs()\
# doctest: +IGNORE_EXCEPTION_DETAIL +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
unyt.exceptions.UnitsNotReducible: The unit "A/cm" (dimensions
"(current_mks)/(length)") cannot be reduced to an expression
within the cgs system of units.
"""
def __init__(self, unit, units_base):
self.unit = unit
self.units_base = units_base
Exception.__init__(self)
def __str__(self):
err = (
'The unit "%s" (dimensions "%s") cannot be reduced to an '
"expression within the %s system of units."
% (self.unit, self.unit.dimensions, self.units_base)
)
return err
class IterableUnitCoercionError(Exception):
"""Raised when an iterable cannot be converted to a unyt_array
Example
-------
>>> from unyt import g, cm, unyt_array
>>> data = [2*cm, 3*g]
>>> unyt_array(data)\
# doctest: +IGNORE_EXCEPTION_DETAIL +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
unyt.exceptions.IterableUnitCoercionError: Received a list or
tuple of quantities with nonuniform units:
[unyt_quantity(2., 'cm'), unyt_quantity(3., 'g')]
"""
def __init__(self, quantity_list):
self.quantity_list = quantity_list
def __str__(self):
err = (
"Received a list or tuple of quantities with nonuniform units: "
"%s" % self.quantity_list
)
return err
class InvalidUnitEquivalence(Exception):
"""Raised an equivalence does not apply to a unit conversion
Example
-------
>>> import unyt as u
>>> data = 12*u.g
>>> data.to('erg', equivalence='thermal')\
# doctest: +IGNORE_EXCEPTION_DETAIL +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
unyt.exceptions.InvalidUnitEquivalence: The unit equivalence
'thermal' does not exist for the units 'g' and 'erg'.
"""
def __init__(self, equiv, unit1, unit2):
self.equiv = equiv
self.unit1 = unit1
self.unit2 = unit2
def __str__(self):
from unyt.unit_object import Unit
if isinstance(self.unit2, Unit):
msg = (
"The unit equivalence '%s' does not exist for the units "
"'%s' and '%s'."
)
else:
msg = (
"The unit equivalence '%s' does not exist for units '%s' "
"to convert to a new unit with dimensions '%s'."
)
return msg % (self.equiv, self.unit1, self.unit2)
class InvalidUnitOperation(Exception):
"""Raised when an operation on a unit object is not allowed
Example
-------
>>> from unyt import cm, g
>>> cm + g # doctest: +IGNORE_EXCEPTION_DETAIL +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
unyt.exceptions.InvalidUnitOperation: addition with unit objects
is not allowed
"""
pass
class SymbolNotFoundError(Exception):
"""Raised when a unit name is not available in a unit registry
Example
-------
>>> from unyt.unit_registry import default_unit_registry
>>> default_unit_registry['made_up_unit']\
# doctest: +IGNORE_EXCEPTION_DETAIL +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
unyt.exceptions.SymbolNotFoundError: The symbol 'made_up_unit'
does not exist in this registry.
"""
pass
class UnitParseError(Exception):
"""Raised when a string unit name is not parseable as a valid unit
Example
-------
>>> from unyt import Unit
>>> Unit('hello')\
# doctest: +IGNORE_EXCEPTION_DETAIL +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
unyt.exceptions.UnitParseError: Could not find unit symbol
'hello' in the provided symbols.
"""
pass
class IllDefinedUnitSystem(Exception):
"""Raised when the dimensions of the base units of a unit system are
inconsistent.
Example
-------
>>> from unyt.unit_systems import UnitSystem
>>> UnitSystem('atomic', 'nm', 'fs', 'nK', 'rad')\
# doctest: +IGNORE_EXCEPTION_DETAIL +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
unyt.exceptions.IllDefinedUnitSystem: Cannot create unit system
with inconsistent mapping from
dimensions to units. Received:
OrderedDict([((length), nm), ((mass), fs), ((time), nK),
((temperature), rad), ((angle), rad),
((current_mks), A), ((luminous_intensity), cd)])
"""
def __init__(self, units_map):
self.units_map = units_map
def __str__(self):
return (
"Cannot create unit system with inconsistent mapping from "
"dimensions to units. Received:\n%s" % self.units_map
)
| true
|
71dd7fbc4e7c66357b48c98b81eaa298829a5dd3
|
Python
|
kho903/python_algorithms
|
/programmers/level2/타겟 넘버.py
|
UTF-8
| 529
| 3.1875
| 3
|
[] |
no_license
|
answer = 0
def dfs(numbers, num, target, length):
global answer
if length == len(numbers):
if num == target:
answer += 1
return
else:
return
else:
dfs(numbers, num + numbers[length], target, length + 1)
dfs(numbers, num - numbers[length], target, length + 1)
def solution(numbers, target):
global answer
dfs(numbers, numbers[0], target, 1)
dfs(numbers, -numbers[0], target, 1)
return answer
print(solution([1, 1, 1, 1, 1], 3))
| true
|
9b51deb8bbdf63ebbf0121ba94472e999968b61e
|
Python
|
edelcorcoran/PandS-Project-2019
|
/boxplot.py
|
UTF-8
| 409
| 3.484375
| 3
|
[] |
no_license
|
#Boxplot Iris Dataset - looks at the 4 attributes
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#read csv file
iris = pd.read_csv('iris.csv')
sns.set()
#Generates a Boxplot for each column [SL, SW, PL, PW)
iris.boxplot()
#Assign a title to the Boxplot
plt.title('Iris Dataset Boxplot')
#Labels the Y-axis
plt.ylabel('Length in CM')
#Show plot.
plt.show()
| true
|
07c1594632f5f5b3afa85e49cb319f5478ff4673
|
Python
|
modalsoul0226/LeetcodeRepo
|
/easy/Pascal's Triangle.py
|
UTF-8
| 1,074
| 3.421875
| 3
|
[] |
no_license
|
class Solution:
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = [[1], [1, 1]]
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
elif numRows == 2:
return [[1], [1,1]]
else:
for i in range(3, numRows + 1, 1):
temp = [1 for _ in range(i)]
j = 1
for i in range(len(res[-1]) - 1):
temp[j] = res[-1][i] + res[-1][i + 1]
j += 1
res.append(temp)
return res
# Alternative solution:
# class Solution:
# def generate(self, numRows):
# """
# :type numRows: int
# :rtype: List[List[int]]
# """
# res = [[1]]
# for i in range(1,numRows):
# res.append(list(map(lambda x, y : x + y, res[-1]+[0], [0]+res[-1])))
# return res[:numRows]
if __name__ == '__main__':
sol = Solution()
print(sol.generate(5))
| true
|
828069d3a9354f848d7e85862891d210faaa6600
|
Python
|
StaticNoiseLog/python
|
/ffhs/heron_sqrt.py
|
UTF-8
| 200
| 3
| 3
|
[] |
no_license
|
epsilon = 0.0000001
x = close_history_list(input("Square root of: "))
h_previous = 1
h = 2
while abs(h - h_previous) > epsilon:
h_previous = h
h = (h_previous + x/h_previous)/2
print(h)
| true
|
a23a7f3f059226b7af92f221433a7dcf057a1a1e
|
Python
|
RoboticsLabURJC/2014-pfc-JoseAntonio-Fernandez
|
/MapClient/tools/WayPoint.py
|
UTF-8
| 551
| 2.546875
| 3
|
[] |
no_license
|
from MapClient.classes import Pose3DI
class WayPoint:
def __init__(self, x=0, y=0, lat=0, lon=0, h=0):
self.x = x
self.y = y
self.h = h
self.lat= lat
self.lon = lon
#todo mejorar para que se calculen automagicamente
@staticmethod
def waypoint_to_pose(self, awaypoint):
pose = Pose3DI(0,0,0,0,0,0,0,0)
data = pose.getPose3DData()
data.x = awaypoint.lon
data.y = awaypoint.lat
data.h = awaypoint.h
pose.setPose3DData(data)
return pose
| true
|
976ff05664c723c330b39b6888363f58be6521a2
|
Python
|
nharini27/harini
|
/count.py
|
UTF-8
| 75
| 3.53125
| 4
|
[] |
no_license
|
num=int(input())
count=0
while(num>0):
num=num//10
count+=1
print(count)
| true
|
d24b19895a18d0307b9a191780685dd2631d1659
|
Python
|
theY4Kman/yaknowman
|
/yakbot/ext.py
|
UTF-8
| 1,531
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
CMDNAME_ATTR = '__cmdname__'
ALIASES_ATTR = '__aliases__'
def command(name=None, aliases=()):
""" Decorator to register a command handler in a Plugin. """
fn = None
if callable(name):
fn = name
name = None
def _command(fn):
setattr(fn, CMDNAME_ATTR, fn.__name__ if name is None else name)
setattr(fn, ALIASES_ATTR, aliases)
return fn
if fn:
return _command(fn)
return _command
class PluginMeta(type):
def __new__(cls, name, bases, attrs):
plugin = type.__new__(cls, name, bases, attrs)
if bases == (object,):
# Skip metaclass magic for Plugin base class
return plugin
if plugin.name is None:
setattr(plugin, 'name', name)
commands = []
for name, value in attrs.iteritems():
if callable(value) and hasattr(value, CMDNAME_ATTR):
cmdname = getattr(value, CMDNAME_ATTR)
aliases = getattr(value, ALIASES_ATTR, ())
commands.append((cmdname, value, aliases))
plugin._commands = commands
return plugin
class Plugin(object):
__metaclass__ = PluginMeta
name = None # Populated with the class name if None
private = False # Whether the plug-in should be hidden in !list
_commands = None
def __init__(self, yakbot, irc):
self.yakbot = yakbot
self.irc = irc
def __hash__(self):
return hash(self.name)
def on_unload(self):
pass
| true
|
ff6923510ad01f8deb4170816490ecf325c54043
|
Python
|
pwdemars/projecteuler
|
/josh/Problems/54.py
|
UTF-8
| 2,581
| 2.90625
| 3
|
[] |
no_license
|
hands_file = open('/Users/joshuajacob/Downloads/p054_poker.txt', 'r').read().split()
from operator import itemgetter
def num_func(num):
if num == 'T':
return(10)
if num == 'J':
return(11)
if num == 'Q':
return(12)
if num == 'K':
return(13)
if num == 'A':
return(14)
if num == 'C':
return(1)
if num == 'D':
return(2)
if num == 'H':
return(3)
if num == 'S':
return(4)
else:
return(int(num))
hands_file = [sorted([[num_func(x[0]),num_func(x[1])] for x in hands_file[5*i:5*i+5]]) for i in range(2000)]
'''print(hands_file[0])
print(sorted(hands_file[0], key = itemgetter(1)))'''
def frequency_checker(hand):
z = [0 for m in range(14)]
x = 0
while x<5:
z[hand[x][0]-1] += 1
x += 1
return(z)
def flush(hand):
if hand[0][1] == hand[1][1] == hand[2][1] == hand[3][1] == hand[4][1]:
return(True)
def hand_checker(hand):
freak = frequency_checker(hand)
if max(freak) == 1:
if hand[0][0]+4==hand[4][0] or hand[3][0]== 5 and hand[4][0]-9 == 13:
if flush(hand):
print(hand,'straight flush')
return([8,0,0])
else:
print(hand,'straight')
return([4,0,0])
elif flush(hand):
print(hand,'flush')
return([5,0,0])
else:
return([0,0,0])
elif max(freak) == 2:
if freak.count(2) == 2:
return([2,13-freak[::-1].index(2),freak.index(2)])
exit()
else:
return([1,freak.index(2),0])
elif max(freak) == 3:
if freak.count(2) == 1:
return([6,freak.index(3),freak.index(2)])
else:
return([3,freak.index(3),0])
elif max(freak) == 4:
return([7,freak.index(4),0])
a = 0
b = 0
c = 0
r = 0
while r <1999:
if hand_checker(hands_file[r]) == hand_checker(hands_file[r+1]):
for x in range(5):
if hands_file[r][-x-1][0] > hands_file[r+1][-x-1][0]:
a += 1
break
elif hands_file[r][-x-1][0] < hands_file[r+1][-x-1][0]:
b += 1
break
elif x == 4:
c += 1
print(hands_file[r], hands_file[r+1][-x][0],'happy')
elif hand_checker(hands_file[r]) > hand_checker(hands_file[r+1]):
a += 1
else:
b += 1
r += 2
| true
|
635cbcb9d69589f8f05c2bb703a1f90908e1a8f5
|
Python
|
gowshalinirajalingam/Advanced-regression-modeling
|
/House_Prices_Advanced_Regression_Techniques.py
|
UTF-8
| 15,607
| 2.890625
| 3
|
[] |
no_license
|
# coding: utf-8
# In[1]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
from sklearn.preprocessing import LabelEncoder ###for encode a categorical values
from sklearn.model_selection import train_test_split ## for spliting the data
# from lightgbm import LGBMRegressor ## for import our model
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt # Matlab-style plotting
import seaborn as sns
from scipy.stats import norm, skew #for some statistics
# In[2]:
train_dataset = pd.read_csv('G:\\sliit DS\\4th year 1st seme\\thawes kaggle\\house-prices-advanced-regression-techniques\\train.csv')
test_dataset = pd.read_csv('G:\\sliit DS\\4th year 1st seme\\thawes kaggle\\house-prices-advanced-regression-techniques\\test.csv')
# In[3]:
train_dataset.head()
# In[4]:
train_dataset.shape
# In[5]:
train_dataset.describe()
# In[6]:
test_dataset.shape
# In[7]:
test_dataset.head()
# In[8]:
train_dataset.columns
# In[9]:
test_dataset.columns
Feature Engineering
# In[10]:
train_dataset.dtypes
# In[11]:
#change salesPrice int64 type to float64
train_dataset["SalePrice"]=train_dataset["SalePrice"].astype("float64")
# In[12]:
test_dataset.dtypes
# In[13]:
#Save the 'Id' column
train_ID = train_dataset['Id']
test_ID = test_dataset['Id']
# In[14]:
#Now drop the 'Id' colum since it's unnecessary for the prediction process.
train_dataset.drop("Id", axis = 1, inplace = True)
test_dataset.drop("Id", axis = 1, inplace = True)
# In[15]:
#Check for null values
train_dataset.isnull().sum()
# In[16]:
#drop major missing value columns in train_dataset
train_dataset.drop("Alley", axis = 1, inplace = True)
train_dataset.drop("PoolQC", axis = 1, inplace = True)
train_dataset.drop("Fence", axis = 1, inplace = True)
train_dataset.drop("MiscFeature", axis = 1, inplace = True)
# In[17]:
train_dataset.isnull().sum()
# In[18]:
# We have fill numerical columns misssing values with median and We will fill character missing values with most used value count
col_miss_val_train = [col for col in train_dataset.columns if train_dataset[col].isnull().any()]
print(col_miss_val_train)
for col in col_miss_val_train:
if(train_dataset[col].dtype == np.dtype('O')):
train_dataset[col]=train_dataset[col].fillna(train_dataset[col].value_counts().index[0]) #replace NaN values with most frequent value
else:
train_dataset[col] = train_dataset[col].fillna(train_dataset[col].median())
# In[19]:
test_dataset.isnull().sum()
# In[20]:
#drop major missing value columns in test_dataset
test_dataset.drop("Alley", axis = 1, inplace = True)
test_dataset.drop("PoolQC", axis = 1, inplace = True)
test_dataset.drop("Fence", axis = 1, inplace = True)
test_dataset.drop("MiscFeature", axis = 1, inplace = True)
# In[21]:
test_dataset.isnull().sum()
# In[22]:
# We have fill numerical columns misssing values with median and We will fill character missing values with most used value count
col_miss_val_test = [col for col in test_dataset.columns if test_dataset[col].isnull().any()]
print(col_miss_val_test)
for col in col_miss_val_test:
if(test_dataset[col].dtype == np.dtype('O')):
test_dataset[col]=test_dataset[col].fillna(test_dataset[col].value_counts().index[0]) #replace NaN values with most frequent value
else:
test_dataset[col] = test_dataset[col].fillna(test_dataset[col].median())
# In[23]:
#Outliers
fig, ax = plt.subplots()
ax.scatter(x = train_dataset['GrLivArea'], y = train_dataset['SalePrice'])
plt.ylabel('SalePrice', fontsize=13)
plt.xlabel('GrLivArea', fontsize=13)
plt.show()
# In[24]:
#We can see at the bottom right two with extremely large GrLivArea that are of a low price. These values are huge oultliers. Therefore, we can safely delete them.
#Deleting outliers
train_dataset = train_dataset.drop(train_dataset[(train_dataset['GrLivArea']>4000) & (train_dataset['SalePrice']<300000)].index)
# In[25]:
#Check the graphic again
fig, ax = plt.subplots()
ax.scatter(x = train_dataset['GrLivArea'], y = train_dataset['SalePrice'])
plt.ylabel('SalePrice', fontsize=13)
plt.xlabel('GrLivArea', fontsize=13)
plt.show()
# In[26]:
#Coding categorical value into numerical value.Label encoder
train_dataset.select_dtypes(include=['object'])
LE = LabelEncoder()
for col in train_dataset.select_dtypes(include=['object']):
train_dataset[col] = LE.fit_transform(train_dataset[col])
train_dataset.head()
# In[27]:
test_dataset.select_dtypes(include=['object'])
LE = LabelEncoder()
for col in test_dataset.select_dtypes(include=['object']):
test_dataset[col] = LE.fit_transform(test_dataset[col])
test_dataset.head()
# Target Variable
# In[28]:
#Check for normal distribution for y variable.Models love normal distribution.
sns.distplot(train_dataset['SalePrice'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(train_dataset['SalePrice'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# In[29]:
#Get also the QQ-plot
from scipy import stats
fig = plt.figure()
res = stats.probplot(train_dataset['SalePrice'], plot=plt)
plt.show()
# In[30]:
#The target variable is right skewed. As (linear) models love normally distributed data ,
#we need to transform this variable and make it more normally distributed
# Log-transformation of the target variable
#We use the numpy fuction log1p which applies log(1+x) to all elements of the column
train_dataset["SalePrice"] = np.log1p(train_dataset["SalePrice"])
#Check the new distribution
sns.distplot(train_dataset['SalePrice'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(train_dataset['SalePrice'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(train_dataset['SalePrice'], plot=plt)
plt.show()
# In[31]:
#Data correlation
#Correlation map to see how features are correlated with SalePrice
corrmat = train_dataset.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True)
# Predictive model building
# In[32]:
#Split train_dataset
x = train_dataset.iloc[:,0:-1]
y = train_dataset.iloc[:,-1]
# In[33]:
#split train,test
x_train , x_test , y_train , y_test = train_test_split(x , y ,test_size = 0.1,random_state = 1)
# In[34]:
x_train
# In[35]:
#Import libraries
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb
# In[36]:
#Base models
#LASSO Regression
# This model may be very sensitive to outliers. So we need to made it more robust on them.
# For that we use the sklearn's Robustscaler() method on pipeline
lasso = make_pipeline(RobustScaler(), Lasso(alpha =0.0005, random_state=1))
# Elastic Net Regression
# again made robust to outliers
ENet = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3))
# Kernel Ridge Regression
KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
# Gradient Boosting Regression :
# With huber loss that makes it robust to outliers
GBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state =5)
# XGBoost :
model_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468,
learning_rate=0.05, max_depth=3,
min_child_weight=1.7817, n_estimators=2200,
reg_alpha=0.4640, reg_lambda=0.8571,
subsample=0.5213, silent=1,
random_state =7, nthread = -1)
# LightGBM :
model_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=5,
learning_rate=0.05, n_estimators=720,
max_bin = 55, bagging_fraction = 0.8,
bagging_freq = 5, feature_fraction = 0.2319,
feature_fraction_seed=9, bagging_seed=9,
min_data_in_leaf =6, min_sum_hessian_in_leaf = 11)
# => Select Best models
# In[37]:
#Base model scores
#We use cross validation score for neg_mean_squared_error
#Validation function
# If y variable is continuous variable we use RMSE Measure. Not accuracy measure.accuracy score is used in classification problem
n_folds = 5
# RMSLE (Root Mean Square Logaithmic Error)
#This method is used for use RMSE meseare inside cross validation method
def rmsle_cv(model):
kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(x_train.values)
rmse= np.sqrt(-cross_val_score(model, x_train.values, y_train.values, scoring="neg_mean_squared_error", cv = kf))
return(rmse)
# In[38]:
score = rmsle_cv(lasso)
print("Lasso score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
# In[39]:
score = rmsle_cv(ENet)
print("ElasticNet score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
# In[40]:
# score = rmsle_cv(KRR)
# print("Kernel Ridge score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
# In[41]:
score = rmsle_cv(GBoost)
print("Gradient Boosting score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
# In[42]:
score = rmsle_cv(model_xgb)
print("Xgboost score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
# In[43]:
score = rmsle_cv(model_lgb)
print("LGBM score: {:.4f} ({:.4f})\n" .format(score.mean(), score.std()))
# In[44]:
#Average base models score
class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, models):
self.models = models
# we define clones of the original models to fit the data in
def fit(self, X, y):
self.models_ = [clone(x) for x in self.models]
# Train cloned base models
for model in self.models_:
model.fit(X, y)
return self
#Now we do the predictions for cloned models and average them
def predict(self, X):
predictions = np.column_stack([
model.predict(X) for model in self.models_
])
return np.mean(predictions, axis=1)
averaged_models = AveragingModels(models = (ENet, GBoost, lasso, model_xgb, model_lgb))
score = rmsle_cv(averaged_models)
print(" Averaged base models score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
# => Fit the model to test_dataset and make predictions
# In[45]:
# Ensemble Technique
# Stacking
# In[46]:
class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, base_models, meta_model, n_folds=5):
self.base_models = base_models
self.meta_model = meta_model
self.n_folds = n_folds
# We again fit the data on clones of the original models
def fit(self, X, y):
self.base_models_ = [list() for x in self.base_models]
self.meta_model_ = clone(self.meta_model)
kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)
# Train cloned base models then create out-of-fold predictions
# that are needed to train the cloned meta-model
out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_index, holdout_index in kfold.split(X, y):
instance = clone(model)
self.base_models_[i].append(instance)
instance.fit(X[train_index], y[train_index])
y_pred = instance.predict(X[holdout_index])
out_of_fold_predictions[holdout_index, i] = y_pred
# Now train the cloned meta-model using the out-of-fold predictions as new feature
self.meta_model_.fit(out_of_fold_predictions, y)
return self
#Do the predictions of all base models on the test data and use the averaged predictions as
#meta-features for the final prediction which is done by the meta-model
def predict(self, X):
meta_features = np.column_stack([
np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)
for base_models in self.base_models_ ])
return self.meta_model_.predict(meta_features)
# In[47]:
# meta model is used to Use the predictions from 3) (called out-of-folds predictions) as the inputs, and the correct responses
# (target variable) as the outputs to train a higher level.
stacked_averaged_models = StackingAveragedModels(base_models = (ENet, GBoost, lasso, model_lgb), meta_model = model_xgb)
# In[48]:
# RMSLE(Root Mean Square logarithmic Error)
#This method is for calculate RMSE between predicted y values and actual y values
def rmsle(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
# In[49]:
# Stacked Regressor
stacked_averaged_models.fit(x_train.values, y_train.values)
stacked_x_test_pred = stacked_averaged_models.predict(x_test.values)
print("RMSLE Score for Stacked Regressor :{}".format(rmsle(y_test.values, stacked_x_test_pred)))
# In[50]:
# XGBoost
model_xgb.fit(x_train, y_train)
xgb_x_test_pred = model_xgb.predict(x_test)
print("RMSLE Score for XGBoost :{}".format(rmsle(y_test, xgb_x_test_pred)))
# In[51]:
# GradientBoost:
GBoost.fit(x_train, y_train)
gb_x_test_pred = GBoost.predict(x_test)
print("RMSLE Score for GradientBoost :{}".format(rmsle(y_test, gb_x_test_pred)))
# In[52]:
stacked_test_dataset_pred = np.expm1(stacked_averaged_models.predict(test_dataset)) #np.expm1() means [(Exponential value of array element) - (1)].
# This mathematical function helps user to calculate exponential of all the elements subtracting 1 from all the input array elements.
xgb_test_dataset_pred = np.expm1(model_xgb.predict(test_dataset))
gb_test_dataset_pred = np.expm1(GBoost.predict(test_dataset))
# In[53]:
'''RMSE on the entire Train data when averaging'''
print('RMSLE score on x_test data:')
print(rmsle(y_test,stacked_x_test_pred*0.7 +
xgb_x_test_pred*0.15 + gb_x_test_pred*0.15 ))
# In[56]:
#Weigted Average
# This is an extension of the averaging method.
# All models are assigned different weights defining the importance of each model for prediction.
#weights have been chosen where rmse value is low on x_test data
#Esemble prediction
ensemble = stacked_test_dataset_pred*0.7 + xgb_test_dataset_pred*0.15 + gb_test_dataset_pred*0.15
ensemble
# In[57]:
sub = pd.DataFrame()
sub['Id'] = test_ID
sub['SalePrice'] = ensemble
sub.to_csv('submission.csv',index=False)
| true
|
d31f6836dd4cc4abbf6cee63ef4db80b569ebb67
|
Python
|
bruno-antonio-pinho/Projeto1_Kernel
|
/demo_selector.py
|
UTF-8
| 512
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/python3
import selectors
import sys
Timeout = 5 # 5 segundos
# um callback para ler do fileobj
def handle(fileobj):
s = fileobj.readline()
print('Lido:', s)
sched = selectors.DefaultSelector()
sched.register(sys.stdin, selectors.EVENT_READ, handle)
while True:
eventos = sched.select(Timeout)
if not eventos: # timeout !
print(eventos)
print('Timeout !')
else:
print(eventos)
for key,mask in eventos:
cb = key.data # este é o callback !
cb(key.fileobj)
| true
|
984297216a6d99d92bf82a745db908e3dbefd396
|
Python
|
ConnorJSmith2/First-Pitch-Analyzer
|
/firstPitchAnalyzer.py
|
UTF-8
| 2,153
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
import sys
import csv
import copy
#If there are 2 files, return the first (command line), if none then exit
def getCommandLineArg():
if (len(sys.argv) == 2):
return sys.argv[1]
else:
print ("Error: No file inputted. \nUsage is: python firstPitchAnalyzer.py <filename.csv>")
exit()
def printCSV(filename):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
print (row)
def readCSV(filename):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
FAILURES = ["GIDP", "STRIKEOUT", "OUT", "FIDP", "FC"]
SUCCESSES = ["HIT", "BB"]
FAILURE_DICT = { i : 0 for i in FAILURES }
SUCCESS_DICT = { i : 0 for i in SUCCESSES }
PITCH_DICT = {"SUCCESS": copy.deepcopy(SUCCESS_DICT), "FAILURE": copy.deepcopy(FAILURE_DICT)}
firstPitchData = {"STRIKE": copy.deepcopy(PITCH_DICT), "BALL": copy.deepcopy(PITCH_DICT)}
firstPitchType = None
# Skips header row
next(csv_reader, None)
for row in csv_reader:
if (row[1] == "0-0"):
# Resets first pitch, useful if data incomplete and new at-bat starts before event type found
firstPitchType = None
continue
elif (row[1] == "0-1"):
firstPitchType = "STRIKE"
elif (row[1] == "1-0"):
firstPitchType = "BALL"
if (firstPitchType and row[4]):
# if event, register in dict
event = row[4].upper()
if (event in SUCCESSES):
firstPitchData[firstPitchType]["SUCCESS"][event] += 1
elif (event in FAILURES):
firstPitchData[firstPitchType]["FAILURE"][event] += 1
# At-bat event, reset first pitch
firstPitchType = None
return firstPitchData
def anaylzeData(pitchData):
pass
#return dict from notes
def printPitchData(atBatData):
for pitchType, pitchData in atBatData.items():
print("********", pitchType, "********")
for statusType, statusData in pitchData.items():
print("/", statusType, "\\")
for eventType, eventData in statusData.items():
print(eventType, ": ", eventData)
def main():
filename = getCommandLineArg()
atBatData = readCSV(filename)
printPitchData(atBatData)
if __name__ == "__main__":
main()
| true
|
352348e53951c63fd7353a21cf6783b9ab4ecb7b
|
Python
|
efikalti/File-Parsing
|
/mapper.py
|
UTF-8
| 1,078
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import sys
import re, string
from operator import itemgetter
reg_ex = '|'
#open files
file2 = open(sys.argv[1], "r")
#read fields of file1
fields1 = sys.stdin.readline().strip().split(reg_ex)
#read fields of file 2
fields2 = file2.readline().strip().split(reg_ex)
#read every line of file2 into one variable
file2_lines = file2.readlines()
#parse every line of file1
num1 = 2
for line in sys.stdin:
line = line.strip()
#split the lines into parts
parts1 = line.split(reg_ex)
#parse every line of file2
num2 = 2
for line2 in file2_lines:
output = ''
parts2 = line2.strip().split(reg_ex)
for part1 in parts1:
for part2 in parts2:
words = part2.strip().split(' ')
for word in words:
if word in part1.split(' '):
output = output + word + ' '
output = re.sub(r'\W[^A-Za-z0-9|]\W', "", output)
if output != '':
output = output + ' | line_in_file_1: ' + str(num1) + ' | line_in_file_2: ' + str(num2) + '\n'
print output
num2 = num2 + 1
num1 = num1 + 1
file2.close()
| true
|
3d83bbc502b68539559b60050f40dc64d43152af
|
Python
|
yuki-uchida/Competitive_programming
|
/AtcoderBeginnerContest/162/d.py
|
UTF-8
| 2,426
| 3.03125
| 3
|
[] |
no_license
|
import bisect
N = int(input()) # N<= 4000 6*10^10なので、削減しないとだめ
S = list(input())
# Si Sj Skがどれも別のもの。ただしi<j<k
# また、j-i != k-j
# 1,2,3はだめ1,2,4はok
# 1,3,5もだめ
# この組の数を求める
# count = 0
# for i in range(N):
# for j in range(i + 1, N):
# for k in range(j + 1, N):
# if j - i != k - j:
# if S[i] != S[j] and S[j] != S[k] and S[i] != S[k]:
# count += 1
indexes = {'R': [], "G": [], "B": []}
for i, char in enumerate(S):
indexes[char].append(i + 1)
# print(permutation_hash)
count = 0
R_indexes_count = len(indexes['R'])
G_indexes_count = len(indexes['G'])
B_indexes_count = len(indexes['B'])
indexes_count = {'R': R_indexes_count,
'G': G_indexes_count, 'B': B_indexes_count}
# 短いやつを使うとかする?,
if R_indexes_count <= G_indexes_count and R_indexes_count <= B_indexes_count:
min_type = 'R'
if G_indexes_count <= B_indexes_count:
second_min_type = 'G'
last_type = 'B'
else:
second_min_type = 'B'
last_type = 'G'
elif G_indexes_count <= R_indexes_count and G_indexes_count <= B_indexes_count:
min_type = 'G'
if R_indexes_count <= B_indexes_count:
second_min_type = 'R'
last_type = 'B'
else:
second_min_type = 'B'
last_type = 'R'
else:
min_type = 'B'
if G_indexes_count <= R_indexes_count:
second_min_type = 'G'
last_type = 'R'
else:
second_min_type = 'R'
last_type = 'G'
# print(min_type, second_min_type, last_type)
for R_index in indexes[min_type]:
for G_index in indexes[second_min_type]:
middle_index = (R_index + G_index) / 2
remove_conut = 0
if middle_index != R_index and middle_index != G_index:
if middle_index in indexes[last_type]:
remove_conut += 1
div = abs(G_index - R_index)
if G_index < R_index:
if G_index - div in indexes[last_type]:
remove_conut += 1
if R_index + div in indexes[last_type]:
remove_conut += 1
else:
if G_index + div in indexes[last_type]:
remove_conut += 1
if R_index - div in indexes[last_type]:
remove_conut += 1
count += (indexes_count[last_type] - remove_conut)
print(count)
| true
|
174632ac0ff7e15c818051c7fa0cfd0604be9b90
|
Python
|
mingxoxo/Algorithm
|
/baekjoon/3053.py
|
UTF-8
| 225
| 3.03125
| 3
|
[] |
no_license
|
#택시 기하학
#https://www.acmicpc.net/problem/3053
import math
R = int(input())
#유클리드 기하학과 맨헤튼 거리의 원은 모양이 다름
print("{:.6f}".format(R*R*math.pi))
print("{:.6f}".format(R*R*2))
| true
|
184539ad02b03c70c9347e4f3132c05c60e6c6eb
|
Python
|
haochengz/superlists
|
/functional_test/base.py
|
UTF-8
| 1,288
| 2.515625
| 3
|
[] |
no_license
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
import time
class FunctionalTest(StaticLiveServerTestCase):
def setUp(self):
self.browser = self.open_a_browser()
def reset_browser(self):
self.browser.quit()
self.browser = self.open_a_browser()
def open_a_browser(self):
firefox_options = webdriver.FirefoxOptions()
firefox_options.set_headless()
browser = webdriver.Firefox(options=firefox_options)
browser.implicitly_wait(3)
return browser
def check_for_row_of_table_contains_item(self, row_text):
# html文件再如不完整将引发selenium查找节点失败的错误
time.sleep(3)
table = self.browser.find_element_by_id('id_list_table')
self.assertIn(row_text,
"".join([row.text for row in table.find_elements_by_tag_name('tr')]))
def submit_a_item_at_index_page(self, item_text):
inputbox = self.get_input_box()
inputbox.send_keys(item_text)
inputbox.send_keys(Keys.ENTER)
def get_input_box(self):
return self.browser.find_element_by_id('id_text')
def tearDown(self):
self.browser.close()
| true
|
f875542359851f332040d8038a8ba7ef9ea0a6cf
|
Python
|
BedirT/games-puzzles-algorithms
|
/old/lib/games_puzzles_algorithms/puzzles/solvable_sliding_tile_puzzle.py
|
UTF-8
| 1,587
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
from games_puzzles_algorithms.puzzles.sliding_tile_puzzle import SlidingTilePuzzle
from games_puzzles_algorithms.twod_array import TwoDArray
import random
class SolvableSlidingTilePuzzle(SlidingTilePuzzle):
"""A representation of a sliding tile puzzle guaranteed to be solvable."""
def __init__(self, size1=3, seed=None, size2=None):
"""
Initialize a rectangular sliding tile puzzle with size1 * size2 tiles.
size1 gives the number of rows. The number of columns is given by size2
or size1 if size2 is None.
The opptional seed argument is used to seed the random number generator
for randomizing the initial puzzle layerout if it is set,
so the same puzzle can be generated repeatedly by setting the same seed.
"""
if seed:
random.seed(seed)
self.size1 = size1
self.size2 = size2
if size2 is None:
self.size2 = size1
self.puzzle = list(range(self.size1 * self.size2))
self.puzzle = TwoDArray((self.size1, self.size2), self.puzzle)
self.num_correct_tiles = 0
for i in range(self.size1):
for j in range(self.size2):
if self.puzzle[(i, j)] == self.BLANK:
self.blank_index = (i, j)
if self.puzzle[(i, j)] == self.correct_num((i, j)):
self.num_correct_tiles += 1
for i in range(self.size1 * self.size2 * 10):
moves = self.valid_moves()
self.apply_move(random.choice(moves))
| true
|
757d3b358b58127d74723e996aebba19e8ce5d44
|
Python
|
varenius/oso
|
/VGOS/VGOS_prep.py
|
UTF-8
| 8,774
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
import sys, os
import datetime
print("Welcome to the OTT VGOS_prep script. Please answer the following questions:")
###########################
exp = input("QUESTION: Experiment, e.g. b22082 ? ").strip().lower()
print("INFO: OK, try to process experiment "+exp)
print()
###########################
dlans = input("QUESTION: Download schedule from IVS (yes/no) ? ").strip().lower()
if dlans =="yes" or dlans =="y":
print("INFO: Will download schedule from IVS")
dl = True
else:
if os.path.exists("/usr2/sched/"+exp+".skd") or os.path.exists("/usr2/sched/"+exp+".vex"):
print("INFO: Will using already existing file in /usr2/sched/")
dl = False
else:
print("ERROR: Download not requested, but no matching skd/vex file found in /usr2/sched/. Did you forget to put it here?")
print("ABORTING!")
sys.exit(1)
print()
###########################
prcs = {"1":"vo-default",
"2": "vt2077-alternative",
"3": "on1323",
"4": "on1324",
"5": "on1325",
"6": "X-band from S/X R11091 (8x8 MHz overlap)",
"7": "X-band from S/X RV157 (4x16 MHz overlap)",
"8": "X-band from S/X T2 (8x4 MHz overlap)"
}
prcks = " ".join(["\n" + i+")"+prcs[i] for i in sorted(prcs.keys())])
print("INFO: Available frequency setup default PRC files are:\n{}\n".format(prcks))
prc = input("QUESTION: Please select setup using digit 1, 2, ...: ").strip().lower()
selprc = prcs[prc]
print("INFO: OK, selected setup is "+selprc)
print()
###########################
# get hostname of this FS machine, fulla or freja
host = os.uname()[1]
# Translate hostname to telescope 2 letter code for drudg
tels = {"fulla":"oe", "freja":"ow"}
tel = tels[host]
ant = input("QUESTION: This is machine " + host + " so I assume you will use antenna " + tel + " (yes/no) ? ")
if not (ant =="yes" or ant =="y"):
tel = input("QUESTION: Then which antenna (oe/ow)? ").strip().lower()
print("INFO: OK, using " + tel)
print("")
###########################
recs = {"fulla":"gyller","freja":"skirner"}
rec = recs[host]
ans = input("QUESTION: This is machine " + host + " so I assume you will use recorder " + rec + " (yes/no) ? ")
if not (ans =="yes" or ans =="y"):
rec = input("QUESTION: Then which recorder (gyller/skirner/kare)?").strip().lower()
print("INFO: OK, using " + rec)
print("")
###########################
mirrorans = input("QUESTION: Mirror other station snap file - useful to tag-along Oe/Ow with a On S/X experiment - (yes/no) ? ")
mirror = False
if (mirrorans =="yes" or mirrorans =="y"):
mirror = True
tagtel = input("QUESTION: Then which antenna to mirror (normally On)? ").strip().lower()
print("INFO: OK, will mirror SNP file for " + tagtel)
print("")
###########################
print("QUESTION: Which experiment to start after this one?")
nextexp = input(" Type experiment name (e.g. b22087 without oe/ow), else leave blank: ").lower().strip()
print("")
###########################
a_offans = input("QUESTION: Do you want to add a antenna=off after prepant (yes/no) ? ").lower().strip()
a_off = False
if (a_offans =="yes" or a_offans =="y"):
a_off = True
print("")
###########################
# PRINT SUMMARY
print("")
print("###########################")
print("")
print("YOU HAVE MADE THE FOLLOWING CHOICES:")
print("Experiment: "+exp)
print("Download from IVS: "+dlans)
print("Frequency setup: "+selprc)
print("Telescope: " + tel)
print("Mirror other station snap file: " + mirrorans)
if mirror:
print(" ... will mirror SNP file for " + tagtel)
print("Recorder: " + rec)
print("Next exp after sched_end:" + nextexp)
print("Put 'antenna=off' after prepant: " + a_offans)
print("")
print("###########################")
print("")
check = input("FINAL QUESTION: Ready to prepare, and possibly overwrite, experiment files for exp " + exp + ". Proceed (yes/no) ? " ).strip().lower()
if not (check == "yes" or check == "y"):
print("ABORTING!")
sys.exit(0)
###########################
# START ACTUAL SCRIPT!
# Get path of script
scriptpath = os.path.dirname(os.path.realpath(__file__))
if dl:
# Get schedule via wget, saving it in /usr2/sched/, e.g. /usr2/sched/vt9248.skd
print("INFO: Downloading sked file...")
wgetcmd = "fesh -f " + exp
os.system(wgetcmd)
# drudg skd/vex file for SNP.
if mirror:
print("Mirroring, so will run DRUGG for antenna " + tagtel + " and then replace with actual antenna " + tel + " ...")
drudgtel = tagtel
else:
drudgtel = tel
print("INFO: Running DRUDG for telescope " + drudgtel + " ...")
drudgcmd = "drudg /usr2/sched/" + exp + " " + drudgtel + " 3 0"
os.system(drudgcmd)
if mirror:
movecmd = "mv /usr2/sched/" + exp + drudgtel + ".snp /usr2/sched/" + exp + tel + ".snp"
os.system(movecmd)
replacecmd = "sed -i 's/"+exp+","+tagtel+",/"+exp+","+tel+",/g' /usr2/sched/" + exp + tel + ".snp"
os.system(replacecmd)
# change setupsx to setupbb in SNP file
#print("INFO: Changing setupsx to setupbb and commenting out in snp file...")
sedcmd = "sed -i 's/setupsx/\"setupbb/g' /usr2/sched/"+exp+tel+".snp"
os.system(sedcmd)
#print("INFO: Commenting out any setupxx-calls in snp file...")
sedcmd = "sed -i 's/setupxx/\"setupxx/g' /usr2/sched/"+exp+tel+".snp"
os.system(sedcmd)
#print("INFO: Commenting out any setup01-calls in snp file...")
sedcmd = "sed -i 's/setup01/\"setup01/g' /usr2/sched/"+exp+tel+".snp"
os.system(sedcmd)
# copy template PRC file to /usr2/proc/expST.prc where ST is oe or ow
print("INFO: Instead of drudging for PRC, copy template PRC for "+ selprc)
if selprc=="vo-default":
cpcmd = "cp " + scriptpath + "/PRC/VGOS_default_prc." + tel + " /usr2/proc/" + exp + tel + ".prc"
# Templates below are for Ow, but Oe is identical since differences are absorbed in station.prc
elif selprc=="vt2077-alternative":
cpcmd = "cp " + scriptpath + "/PRC/vt2077ow.prc /usr2/proc/" + exp + tel + ".prc"
elif selprc=="on1323":
cpcmd = "cp " + scriptpath + "/PRC/on1323ow.prc /usr2/proc/" + exp + tel + ".prc"
elif selprc=="on1324":
cpcmd = "cp " + scriptpath + "/PRC/on1324ow.prc /usr2/proc/" + exp + tel + ".prc"
elif selprc=="on1325":
cpcmd = "cp " + scriptpath + "/PRC/on1325ow.prc /usr2/proc/" + exp + tel + ".prc"
elif selprc=="X-band from S/X R11091 (8x8 MHz overlap)":
cpcmd = "cp " + scriptpath + "/PRC/r11091_8x8MHz_xband_centered_32MHz_ow.prc /usr2/proc/" + exp + tel + ".prc"
elif selprc=="X-band from S/X RV157 (4x16 MHz overlap)":
cpcmd = "cp " + scriptpath + "/PRC/rv157_4x16MHz_xband_centered_32MHz_ow.prc /usr2/proc/" + exp + tel + ".prc"
elif selprc=="X-band from S/X T2 (8x4 MHz overlap)":
cpcmd = "cp " + scriptpath + "/PRC/t2_8x4MHz_xband_centered_32MHz_ow.prc /usr2/proc/" + exp + tel + ".prc"
os.system(cpcmd)
snpf = "/usr2/sched/"+exp+tel+".snp"
# Store lines in array
lines = []
for line in open(snpf):
lines.append(line)
# Find first timetag
for line in lines:
if line.startswith("!20"):
starttime = datetime.datetime.strptime(line.strip()[1:], "%Y.%j.%H:%M:%S")
break
preptime = (starttime+datetime.timedelta(minutes=-10)).strftime("%Y.%j.%H:%M:%S")
#print("starttime=", starttime, "preptime=", preptime)
wf = open(snpf, "w")
for line in lines:
wf.write(line)
if "Rack=DBBC" in line:
#Do not set recorded here, too late, instead set in PRC, see code later in this script
wf.write("prepant\n")
if a_off:
wf.write("antenna=off\n")
wf.write("!"+preptime + "\n")
wf.write("antenna=run\n")
if exp.startswith("b2") or exp.startswith("c2"):
#VGOSB/C session, include auto-transfer to ishioka
print("INFO: This is a VGOSB or VGOSC experiment, so adding automatic data transfer to GSI after exp finish.")
wf.write("sy=etransfer.py {0} gsi {1} {2}\n".format(rec, exp, tel))
if not nextexp=="":
print("INFO: Adding schedule={0}{1},#1 as last line of SNP file.".format(nextexp, tel))
wf.write("schedule={0}{1},#1\n".format(nextexp, tel))
else:
wf.write("antenna=off\n")
wf.close()
# Also edit PRC file to insert flexbuff_config command at start of exper_initi.
# Previously we had this at start of SNP file, but this is after exper_initi,
# so FS will set the "fb_mode" and similar commands first and THEN change
# recorder, which means the recording would fail to record properly (since no mode set).
prcf = "/usr2/proc/"+exp+tel+".prc"
# Store lines in array
plines = []
for line in open(prcf):
plines.append(line)
pf = open(prcf, "w")
for line in plines:
pf.write(line)
if "define exper_initi" in line:
pf.write("init_{0}\n".format(rec))
print("INFO: All done. You may want to check the resulting /usr2/sched/{0}{1}.snp and /usr2/proc/{0}{1}.prc files.".format(exp,tel))
| true
|
67d06e23cf161abe692fae0421b02323445fe788
|
Python
|
parzibyte/crud-mysql-python
|
/eliminar.py
|
UTF-8
| 643
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
"""
Tutorial de CRUD con MySQL y Python 3
parzibyte.me/blog
"""
import pymysql
try:
conexion = pymysql.connect(host='localhost',
user='root',
password='',
db='peliculas')
try:
with conexion.cursor() as cursor:
consulta = "DELETE FROM peliculas WHERE anio < %s;"
anio = 2000
cursor.execute(consulta, (anio))
# No olvidemos hacer commit cuando hacemos un cambio a la BD
conexion.commit()
finally:
conexion.close()
except (pymysql.err.OperationalError, pymysql.err.InternalError) as e:
print("Ocurrió un error al conectar: ", e)
| true
|
7f0c8544da84b7c67bd2a897a7911e5cb1ae4752
|
Python
|
zachselin/TrafficSim_TeamDangerous
|
/bufferbuildercar.py
|
UTF-8
| 3,127
| 2.625
| 3
|
[] |
no_license
|
from car import Car
import math
import numpy as np
import random
import shared as g
class BufferBuilder(Car):
def __init__(self, sim, lane, speed, maxspeed, id, carAhead, carUpAhead, carDownAhead, laneidx, size, canvasheight,
lanes, slowdown):
super(BufferBuilder, self).__init__(sim, lane, speed, maxspeed, id, carAhead, carUpAhead, carDownAhead, laneidx, size,
canvasheight, lanes, slowdown)
self.aheadbufmin = 2
self.aheadbufmax = 8
self.accel = 0
self.delay = 1500
self.reaction = 0
self.color = g.buffercolor
self.max_accel = 0.002
self.name = "bufferbuilder"
def general_behavior(self):
# use different acceleration functins depending on difference between v0 and vF
# currenlty just linearly accelerates(or deaccelerates) to v of front car
if (self.ahead != None):
if(self.ahead.posx - self.posx - (self.length * 1.2) < 0):
self.accel = -self.max_accel * 10
if(self.ahead.posx - self.posx - (self.length) - self.speedx < 0.05):
self.accel = -self.speedx
elif (self.aheadbufmax * self.length >= self.ahead.posx - self.posx): # if at max buffer
prevAccel = self.accel # save previous accel
# find distance between the two cars
dist = self.ahead.posx - self.posx - (self.aheadbufmin * self.length)
if (self.reaction == 0): #if not reacting
# find accel so that after dist, this car will be matching the speed of front car
safespeed = 0.000001
self.accel = (math.pow(self.ahead.speedx + safespeed, 2) - math.pow(self.speedx + safespeed, 2)) / (dist * 2)
#check accel changed from + to - or vice versa
if ((prevAccel < 0 and self.accel > 0) or (prevAccel > 0 and self.accel < 0)):
self.accel = 0 #dont move car
#self.reaction = 100 #set delay
else:
pass #self.reaction -= 1
if (prevAccel <= 0 and self.accel > 0 and self.speedx < 1 and self.aheadbufmin * self.length >= self.ahead.posx - self.posx):
self.accel = 0
else:
if (self.speedx < self.maxspeed):
self.accel = self.speedx * .005
else: # front car
# for delay amount of time make car come to a stop
if (self.SLOWDOWN and self.delay > g.TICKS):
self.accel = -self.speedx * .003
# after delay accel to maxspeed
else:
if (self.speedx < self.maxspeed):
#self.accel = self.speedx * .003
self.accel = self.max_accel
# limit accel to max
self.accel = min(self.accel, self.max_accel)
# update speed to reflect accel change
self.speedx = self.speedx + self.accel
self.speedx = min(self.inst_max, self.speedx)
| true
|
d154f9979dfdf2994a0a8a4bc0c7e3df2f6ce289
|
Python
|
a-w/astyle
|
/AStyleDev/src-p/ExampleByte.py
|
UTF-8
| 11,436
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/python
""" ExampleByte.py
This program calls the Artistic Style DLL to format the AStyle source files.
The Artistic Style DLL must be in the same directory as this script.
The Artistic Style DLL must have the same bit size (32 or 64) as the Python executable.
It will work with either Python version 2 or 3 (unicode).
For Python 3 the files are retained in byte format and not converted to Unicode.
"""
# to disable the print statement and use the print() function (version 3 format)
from __future__ import print_function
import os
import platform
import sys
from ctypes import *
# global variables ------------------------------------------------------------
# will be updated from the platform properties by initialize_platform()
__is_iron_python__ = False
__is_unicode__ = False
# -----------------------------------------------------------------------------
def main():
""" Main processing function.
"""
files = ["ASBeautifier.cpp", "ASFormatter.cpp", "astyle.h"]
# options are byte code for AStyle input
option_bytes = b"-A2tOP"
#initialization
print("ExampleByte",
platform.python_implementation(),
platform.python_version(),
platform.architecture()[0])
initialize_platform()
libc = initialize_library()
version_bytes = get_astyle_version_bytes(libc)
print("Artistic Style Version " + version_bytes.decode('utf-8'))
# process the input files
for file_path in files:
file_path = get_project_directory(file_path)
bytes_in = get_source_code_bytes(file_path)
formatted_bytes = format_source_bytes(libc, bytes_in, option_bytes)
# if an error occurs, the return is a type(None) object
if type(formatted_bytes) is type(None):
print("Error in formatting", file_path)
os._exit(1)
save_source_code_bytes(formatted_bytes, file_path)
# allocated memory is deleted here, not in the allocation function
del formatted_bytes
print("Formatted", file_path)
# -----------------------------------------------------------------------------
def format_source_bytes(libc, bytes_in, option_bytes):
""" Format the bytes_in by calling the AStyle shared object (DLL).
The variable bytes_in is expected to be a byte string.
The return value is a byte string.
If an error occurs, the return value is a NoneType object.
"""
astyle_main = libc.AStyleMain
astyle_main.restype = c_char_p
formatted_bytes = astyle_main(bytes_in,
option_bytes,
ERROR_HANDLER,
MEMORY_ALLOCATION)
return formatted_bytes
# -----------------------------------------------------------------------------
def get_astyle_version_bytes(libc):
""" Get the version number from the AStyle shared object (DLL).
The function return value is a byte string.
"""
astyle_version = libc.AStyleGetVersion
astyle_version.restype = c_char_p
version_bytes = astyle_version()
return version_bytes
# -----------------------------------------------------------------------------
def get_project_directory(file_name):
""" Find the directory path and prepend it to the file name.
The source is expected to be in the "src-p" directory.
This may need to be changed for your directory structure.
"""
file_path = sys.path[0]
end = file_path.find("src-p")
if end == -1:
print("Cannot find source directory", file_path)
os._exit(1)
file_path = file_path[0:end]
file_path = file_path + "test-data" + os.sep + file_name
return file_path
# -----------------------------------------------------------------------------
def get_source_code_bytes(file_path):
""" Get the source code as bytes.
Opening the file as binary will read it as a byte string.
The return value is a byte string.
"""
# read the file as a byte string by declaring it as binary
# version 3 will read unicode if not declared as binary
try:
file_in = open(file_path, 'rb')
bytes_in = file_in.read()
except IOError as err:
# "No such file or directory: <file>"
print(err)
print("Cannot open", file_path)
os._exit(1)
file_in.close()
return bytes_in
# -----------------------------------------------------------------------------
def initialize_library():
""" Set the file path and load the shared object (DLL).
Return the handle to the shared object (DLL).
"""
# change directory to the path where this script is located
pydir = sys.path[0]
# remove the file name for Iron Python
if pydir[-3:] == ".py":
pydir = os.path.dirname(sys.path[0])
os.chdir(pydir)
# return the handle to the shared object
if os.name == "nt":
libc = load_windows_dll()
else:
libc = load_linux_so()
return libc
# -----------------------------------------------------------------------------
def initialize_platform():
""" Check the python_implementation and the python_version.
Update the global variables __is_iron_python__ and __is_unicode__.
"""
global __is_iron_python__, __is_unicode__
if platform.python_implementation() == "CPython":
if platform.python_version_tuple()[0] >= '3':
__is_unicode__ = True
elif platform.python_implementation() == "IronPython":
__is_iron_python__ = True
__is_unicode__ = True
# -----------------------------------------------------------------------------
def load_linux_so():
""" Load the shared object for Linux platforms.
The shared object must be in the same folder as this python script.
"""
shared = os.path.join(sys.path[0], "libastyle-2.06.so")
# os.name does not always work for mac
if sys.platform == "darwin":
shared = shared.replace(".so", ".dylib")
try:
libc = cdll.LoadLibrary(shared)
except OSError as err:
# "cannot open shared object file: No such file or directory"
print(err)
print("Cannot find", shared)
os._exit(1)
return libc
# -----------------------------------------------------------------------------
def load_windows_dll():
""" Load the dll for Windows platforms.
The shared object must be in the same folder as this python script.
An exception is handled if the dll bits do not match the Python
executable bits (32 vs 64).
"""
dll = "AStyle-2.06.dll"
try:
libc = windll.LoadLibrary(dll)
# exception for CPython
except WindowsError as err:
# print(err)
print("Cannot load library", dll)
if err.winerror == 126: # "The specified module could not be found"
print("Cannot find", dll)
elif err.winerror == 193: # "%1 is not a valid Win32 application"
print("You may be mixing 32 and 64 bit code")
else:
print(err.strerror)
os._exit(1)
# exception for IronPython
except OSError as err:
print("Cannot load library", dll)
print("If the library is available you may be mixing 32 and 64 bit code")
os._exit(1)
# exception for IronPython
# this sometimes occurs with IronPython during debug
# rerunning will probably fix
except TypeError as err:
print("TypeError - rerunning will probably fix")
os._exit(1)
return libc
# -----------------------------------------------------------------------------
def save_source_code_bytes(bytes_out, file_path):
""" Save the source code as bytes.
The variable bytes_in is expected to be a byte string.
Opening the file as binary will save it as a byte string.
"""
# remove old .orig, if any
backup_path = file_path + ".orig"
if os.path.isfile(backup_path):
os.remove(backup_path)
# rename original to backup
os.rename(file_path, backup_path)
# save the file as a byte string by opening it as binary
# version 3 will attempt to write unicode if not declared as binary
file_out = open(file_path, 'wb')
file_out.write(bytes_out)
file_out.close()
# -----------------------------------------------------------------------------
# // astyle ASTYLE_LIB declarations
# typedef void (STDCALL *fpError)(int, char*); // pointer to callback error handler
# typedef char* (STDCALL *fpAlloc)(unsigned long); // pointer to callback memory allocation
# extern "C" EXPORT char* STDCALL AStyleMain(const char*, const char*, fpError, fpAlloc);
# extern "C" EXPORT const char* STDCALL AStyleGetVersion (void);
# -----------------------------------------------------------------------------
# AStyle Error Handler Callback
def error_handler(num, err):
""" AStyle callback error handler.
The return error string (err) is always byte type.
It is converted to unicode for Python 3.
"""
print("Error in input {0}".format(num))
if __is_unicode__:
err = err.decode()
print(err)
os._exit(1)
# -----------------------------------------------------------------------------
# global to create the error handler callback function
if os.name == "nt":
ERROR_HANDLER_CALLBACK = WINFUNCTYPE(None, c_int, c_char_p)
else:
ERROR_HANDLER_CALLBACK = CFUNCTYPE(None, c_int, c_char_p)
ERROR_HANDLER = ERROR_HANDLER_CALLBACK(error_handler)
# global allocation variable --------------------------------------------------
# global memory allocation returned to artistic style
# must be global for CPython
# IronPython doesn't need global, but it doesn't hurt
__allocated__ = c_char_p
# -----------------------------------------------------------------------------
# AStyle Memory Allocation Callback
def memory_allocation(size):
""" AStyle callback memory allocation.
The size to allocate is always byte type.
The allocated memory MUST BE FREED by the calling function.
"""
# ctypes are different for CPython and IronPython
global __allocated__
# ctypes for IronPython do NOT seem to be mutable
# using ctype variables in IronPython results in a
# "System.AccessViolationException: Attempted to read or write protected memory"
# IronPython must use create_string_buffer()
if __is_iron_python__:
__allocated__ = create_string_buffer(size)
return __allocated__
# ctypes for CPython ARE mutable and can be used for input
# using create_string_buffer() in CPython results in a
# "TypeError: string or integer address expected instead of c_char_Array"
# CPython must use c_char_Array object
else:
arr_type = c_char * size # create a c_char array
__allocated__ = arr_type() # create an array object
return addressof(__allocated__)
# -----------------------------------------------------------------------------
# global to create the memory allocation callback function
if os.name == "nt":
MEMORY_ALLOCATION_CALLBACK = WINFUNCTYPE(c_char_p, c_ulong)
else:
MEMORY_ALLOCATION_CALLBACK = CFUNCTYPE(c_char_p, c_ulong)
MEMORY_ALLOCATION = MEMORY_ALLOCATION_CALLBACK(memory_allocation)
# -----------------------------------------------------------------------------
# make the module executable
if __name__ == "__main__":
main()
os._exit
| true
|
e1753a9b57a697ecc0d4fd06df813330e9c4dbee
|
Python
|
DDDDDaryl/guidance_line_extraction
|
/cam_accelerate.py
|
UTF-8
| 993
| 3.03125
| 3
|
[] |
no_license
|
import threading
import cv2
class camCapture:
def __init__(self, dev):
self.Frame = 0
self.status = False
self.isstop = False
# 摄影机连接。
self.capture = cv2.VideoCapture(dev)
# self.capture.set(3, 1280)
# self.capture.set(4, 720)
def isOpened(self):
return self.capture.isOpened()
def start(self):
# 把程序放进子线程,daemon=True 表示该线程会随着主线程关闭而关闭。
print('cam started!')
threading.Thread(target=self.queryframe, daemon=True, args=()).start()
def stop(self):
# 记得要设计停止无限循环的开关。
self.isstop = True
print('cam stopped!')
def getframe(self):
# 当有需要影像时,再回传最新的影像。
return self.Frame
def queryframe(self):
while (not self.isstop):
self.status, self.Frame = self.capture.read()
self.capture.release()
| true
|
874cfaaa3a2c67a0699fb8949c1e75aded0456b5
|
Python
|
kweird/githubintro
|
/4_Building_Tools/buildingtools.py
|
UTF-8
| 6,907
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
# Import the modules we use in our code
import random
import operator
import matplotlib.pyplot
import time
# We set the random seed to a certain value so we have reproducable results
# for testing. This can be commented out when not testing.
random.seed(0)
# We create a variable called start_time that stores the current time in
# seconds. We do this with the intention of recording the end time when the
# entire code has finished and to calculate the running time.
start_time = time.time()
# Set up the empty agents list
agents = []
# Create variables that will determine how many agents there are, and how
# many iterations or steps they move
num_of_agents = 500
num_of_iterations = 3
# We create a function called distance_between to work out the distance
# between any pair of [y, x] coords using Pythagoras' theorem.
# We drop a dimension when passing in the pair of coords because we are only
# going to pass in two separate [y, x] coords, not a list of lists containing
# two sets of [y, x] coords.
# e.g. we refer to agents_row_a[0] instead of agents_row_a[0][0]
def distance_between(agents_row_a, agents_row_b):
"""
This function calculates and returns the distance between two sets of
points on the x, y plane.
Parameters
----------
agents_row_a : list with two elements that are numbers
A list representing 2D coordinates.
agents_row_b : list with two elements that are numbers
A list representing 2D coordinates.
Returns
-------
Float
Returns the calculated Pythagorean distance between two sets of points
(agents_row_a and agents_row_b) on the x, y plane.
"""
return (((agents_row_a[0] - agents_row_b[0])**2) + \
((agents_row_a[1] - agents_row_b[1])**2))**0.5
# We use a for loop to create the number of agents as specified in the
# num_of_agents variable, still with random [y, x] coords, and append to the
# initially empty agents list
for i in range(num_of_agents):
agents.append([random.randint(0,99),random.randint(0,99)])
# Print each agent coords individually using another for loop
#for i in range(len(agents)):
#print("Agent", i, "starting coords:", agents[i])
# For every iteration specified randomly move each agent in the agents list
# once on the y and x axis
# We generate a new random_number inside both for loops so that the probabilty
# is individual for each agent during each iteration
# We use the modulus operator to effectively turn our 100 x 100 environment
# into a torus, where if an agent moves off either axis (either positively or
# negatively) it appears at the other extreme of the axis
for j in range(num_of_iterations):
for i in range(num_of_agents):
random_number = random.random()
if random_number < 0.5:
agents[i][0] = (agents[i][0] + 1) % 100
else:
agents[i][0] = (agents[i][0] - 1) % 100
random_number = random.random()
if random_number < 0.5:
agents[i][1] = (agents[i][1] + 1) % 100
else:
agents[i][1] = (agents[i][1] - 1) % 100
# These are test values to check that the Pythagorean formula below is
# calculating the distance between agents correctly. Uncomment the lines
# below and the answer should be 5, a well known solution that we can test.
# distance = distance_between([0, 0], [4, 3])
# print(distance)
# We create two variables to store the max / min distances between two agents
# We manually calculate the max distance between the first two agents using
# Pythagoras' theorem, and set the min distance to be the same, then we update
# these values in the for loop below if any distance is greater for max
# distance, or smaller for min distance
# max_distance = (((agents[0][1] - agents[1][1])**2) \
# + ((agents[0][0] - agents[1][0])**2))**0.5
max_distance = distance_between(agents[0], agents[1])
min_distance = max_distance
# We create a for loop to calculate the distance between every agent and every
# other agent
for i in agents:
for j in agents:
if i < j:
distance = distance_between(i, j)
max_distance = max(max_distance, distance)
min_distance = min (min_distance, distance)
#print("Distance between", i, "and", j, "is:", distance)
#print("Max distance:", max_distance)
#print("Min distance:", min_distance)
# This is an alternative way of calculating the distance between every agent
# for i in range(0, num_of_agents, 1):
# for j in range(i, num_of_agents, 1):
# distance = distance_between(agents[i], agents[j])
# max_distance = max(max_distance, distance)
# min_distance = min (min_distance, distance)
# #print("Distance between", i, "and", j, "is:", distance)
# #print("Max distance:", max_distance)
# #print("Min distance:", min_distance)
# This line of code prints the [y, x] coords of the agent that is furthest
# east (has the largest x value). It does this by comparing the second
# element of each element within the agents list of lists.
# print(max(agents, key=operator.itemgetter(1)))
# We now create a variable with the [y, x] coords of the agent that is
# furthest east, so that we can easily reference the individual coords
# for use with matplotlib.pyplot.scatter
most_easterly = max(agents, key=operator.itemgetter(1))
print("The coords of the most easterly agent are:", most_easterly)
# most_westerly = min(agents, key=operator.itemgetter(1))
# print("The coords of the most westerly agent are:", most_westerly)
# most_northerly = max(agents, key=operator.itemgetter(0))
# print("The coords of the most northerly agent are:", most_northerly)
# most_southerly = min(agents, key=operator.itemgetter(0))
# print("The coords of the most southerly agent are:", most_southerly)
# Using matplotlib we set the y and x axes, we scatter plot the coordinates
# of our two agents onto the plot, then we show the plot
matplotlib.pyplot.ylim(0, 99)
matplotlib.pyplot.xlim(0, 99)
# matplotlib.pyplot.scatter(agents[0][1],agents[0][0])
# matplotlib.pyplot.scatter(agents[1][1],agents[1][0])
# We are now using a for loop to scatter plot all the agents in the agents
# list, no matter how many agents there are
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i][1],agents[i][0])
# We re-plot our most easterly agent, but this time we set it red for
# identification. Because we plot this last it overwrites any previous colour
matplotlib.pyplot.scatter(most_easterly[1], most_easterly[0], color = 'red')
matplotlib.pyplot.show()
# We create a variable called end_time that stores the current time in
# seconds. We then subtract the end time from the starting time and print the
# total running time of the code.
end_time = time.time()
print("Total running time = " + str(end_time - start_time) + " seconds")
| true
|
c0d34c9d305b546ad501660f812066c5c6753bdb
|
Python
|
denizozger/coffee
|
/alert_slack_when_button_is_pressed.py
|
UTF-8
| 2,004
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/python
# Recommended usage: $ nohup python3 this_file.py >this_file.py.log 2>&1 </dev/null &
import os
import requests
import time
from datetime import datetime
import RPi.GPIO as GPIO
url = os.getenv('SLACK_CHANNEL_URL')
response = None
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# LED setup
RED = 18
YELLOW = 23
GREEN = 24
GPIO.setup(RED, GPIO.OUT)
GPIO.setup(YELLOW, GPIO.OUT)
GPIO.setup(GREEN, GPIO.OUT)
# Button setup
ButtonPin = 25 # Set pin 25 as an input pin
GPIO.setup(ButtonPin, GPIO.IN)
# Buzzer setup
BUZZER = 22
GPIO.setup(BUZZER, GPIO.OUT)
def main():
reset_devices()
GPIO.output(RED, GPIO.HIGH) # Default state is red
while True:
if GPIO.input(ButtonPin) == False: # If the button is pressed, ButtonPin will be "false"
print('Button pressed', str(datetime.now()))
GPIO.output(RED, GPIO.LOW)
GPIO.output(YELLOW, GPIO.HIGH)
message = { 'text': 'Ready!'}
response = requests.post(url, json=message, allow_redirects=True)
print(response.status_code, str(datetime.now()))
if response.status_code == 200:
buzz()
GPIO.output(YELLOW, GPIO.LOW)
GPIO.output(GREEN, GPIO.HIGH)
time.sleep(5) # Coffee stays fresh for 40 minutes - I think
else:
GPIO.output(YELLOW, GPIO.LOW)
for x in range(0, 3):
GPIO.output(RED, GPIO.HIGH)
buzz()
time.sleep(1)
GPIO.output(RED, GPIO.LOW)
time.sleep(1)
else:
reset_devices()
GPIO.output(RED, GPIO.HIGH)
time.sleep(0.5)
def buzz(duration: int=0.3):
GPIO.output(BUZZER, GPIO.HIGH)
time.sleep(duration)
GPIO.output(BUZZER, GPIO.LOW)
def reset_devices():
GPIO.output(BUZZER, GPIO.LOW)
GPIO.output(RED, GPIO.LOW)
GPIO.output(YELLOW, GPIO.LOW)
GPIO.output(GREEN, GPIO.LOW)
try:
main()
except Exception as e:
print(e)
GPIO.output(BUZZER, GPIO.LOW)
GPIO.output(RED, GPIO.LOW)
GPIO.output(YELLOW, GPIO.LOW)
GPIO.output(GREEN, GPIO.LOW)
GPIO.cleanup()
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| true
|
90e1c8ed8a7881304e0cde04c68732745860cb94
|
Python
|
rpw505/aoc_2020
|
/day_03/q1.py
|
UTF-8
| 1,769
| 3.421875
| 3
|
[] |
no_license
|
from itertools import cycle
from dataclasses import dataclass
from pprint import pprint
from typing import List
from functools import reduce
import operator
TEST_INPUT = [
'..##.......',
'#...#...#..',
'.#....#..#.',
'..#.#...#.#',
'.#...##..#.',
'..#.##.....',
'.#.#.#....#',
'.#........#',
'#.##...#...',
'#...##....#',
'.#..#...#.#'
]
class TreeRow:
@staticmethod
def from_input(lines):
return [TreeRow(line) for line in lines]
def __init__(self, line):
#'..##.......'
self.text = line.strip()
self.tree_array = [t == '#' for t in self.text]
def __getitem__(self, index):
row = cycle(self.tree_array)
if index < 0:
raise IndexError()
for _ in range(index):
next(row)
return next(row)
def __repr__(self):
return f'<{self.text}>'
def slope(rows: List[TreeRow], x, y):
max_y = len(rows)
tally = 0
i = 0
for j in range(0, max_y, y):
tally += rows[j][i]
i += x
print(f'tally ({x}, {y}): {tally}')
return tally
def check_slopes(rows):
slopes = [
(1, 1),
(3, 1),
(5, 1),
(7, 1),
(1, 2)
]
result = reduce(operator.mul, [slope(rows, *grad) for grad in slopes])
print('Check slopes: ', result)
return result
def tests():
print('Tests\n-----\n')
rows = TreeRow.from_input(TEST_INPUT)
print(rows)
check_slopes(rows)
def main():
print('Main\n----\n')
with open("input_1.txt", 'r') as input_file:
rows = TreeRow.from_input(input_file.readlines())
slope(rows, 3, 1)
check_slopes(rows)
if __name__ == '__main__':
tests()
print('')
main()
| true
|
8c61220653fa86b86cb77a37e704ea4d8be2cb61
|
Python
|
Leo-Wang-JL/force-riscv
|
/utils/regression/common/threads.py
|
UTF-8
| 10,681
| 2.546875
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
import sys
import threading
import sys
from threading import Thread
from common.sys_utils import SysUtils
from common.msg_utils import Msg
class HiThread( threading.Thread ):
def __init__( self, arg_options ):
# the HiThread has two states state 1 has no thread loop thus there is no need for control to be halted
# to allow other thread processes to continue, State 2 executes a loop until the shutdown sequence is invoked
# which determines whether or not the thread a) has more work to do, or b) will wait for another process to
# finished. A Thread will remain in the shutdown sequence until the terminate flag is set. When the terminate
# flag is set then the thread will execute the terminate sequence.
my_target = SysUtils.ifthen( arg_options.get( "noloop", False ), self.run_once, self.run_loop )
super().__init__( name = arg_options.get( "name", None ), target = my_target )
Msg.dbg( "Created Thread[%s] Processing...." % ( self.name ))
# useful Thread events
self.on_start = arg_options.get( "on-start" , None )
self.on_execute = arg_options.get( "on-execute" , None )
self.on_done = arg_options.get( "on-done" , None )
self.on_shutdown = arg_options.get( "on-shutdown", None )
self.on_finished = arg_options.get( "on-finished", None )
# thread flags
self.shutdown = False # shutdown request has been received
self.finished = False # thread has completed all work and has exited
# the thread heartbeat is set to write a debug message every 30 seconds if the thread
# is waiting. I do not like the term blocking because waiting is a more controlled event.
# The waiting thread asks for permission to procees rather than the thread that is busy having
# to create a block. Blocks are notorious for causing deadlock requiring less than a graceful
# shutdown
self.heartbeat_rate = arg_options.get( "heartbeat-rate", 30 )
self.sleep_period = arg_options.get( "sleep-period" , 1000 )
self.current_tick = 0
# not a good idea to use daemons as a blanket rule, makes bad designs seem to function
# but there are always underlying issues with these. The daemon attribute exists in the
# abstract as does the handler. Setting it here will ensure the thread acts as we expect
self.daemon = arg_options.get( "daemon", False )
if arg_options.get( "active", False ):
self.start_thread()
# perform any remaining initialization outside the thread space
# using a callback handler if initialized then starts the thread
def start_thread( self ):
if self.on_start is not None:
self.on_start( )
self.start()
# waits for the thread to exit then executes a notify callback if initialized
def wait_for( self ):
Msg.dbg( "Before Thread[%s] Join" % ( self.name ))
self.join()
Msg.dbg( "After Thread[%s] Join" % ( self.name ))
# thread has finishedd its work, trigger notify thread done
if self.on_finished is not None:
self.on_finished( )
# general thread loop
def run_loop( self ):
Msg.info( "Entering HIThread[%s] Loop Processing...." % ( self.name ) )
while not self.terminated():
# initialize iteration for work performed in execute
Msg.dbg( "HiThread[%s]::run_loop(1)" % ( self.name ) )
if self.on_execute is not None:
Msg.dbg( "HiThread[%s]::run_loop(2)" % ( self.name ) )
self.on_execute( )
# perform the iteration work
Msg.dbg( "HiThread[%s]::run_loop(3)" % ( self.name ) )
self.execute()
# finish work prior the next iteratation
Msg.dbg( "HiThread[%s]::run_loop(4)" % ( self.name ) )
if self.on_done is not None:
Msg.dbg( "HiThread[%s]::run_loop(5)" % ( self.name ) )
self.on_done( )
Msg.dbg( "HiThread[%s]::run_loop(6)" % ( self.name ) )
Msg.info( "Leaving HIThread[%s] Loop Processing...." % ( self.name ) )
# general thread execute
def run_once( self ):
Msg.dbg( "Entering Thread[%s] Processing...." % ( self.name ))
# initialize thread for work performed in execute
if self.on_execute is not None:
self.on_execute( )
# perform the thread work
self.execute()
# perform any remaining work prior to exit thread space
if self.on_done is not None:
self.on_done( )
Msg.dbg( "Leaving Thread[%s] Processing...." % ( self.name ))
# returns True once Thread has exited
def terminated( self ):
# Msg.info( "HiThread::terminated() - self.finished: %s, self.is_alive(): %s, returns: [%s]" % (str(self.finished),str(self.is_alive())),str(self.finished or ( not self.is_alive() )))
Msg.info( "HiThread[%s]::terminated() - self.finished: %s, self.is_alive(): %s" % (self.name, str(self.finished),str(self.is_alive())) )
my_retval = self.finished or ( not self.is_alive())
Msg.info( "HiThread[%s]::terminated() - returns: [%s]" % ( self.name, str( my_retval )))
return self.finished or ( not self.is_alive() )
def heartbeat( self ):
# increment the heartbeat and when debug messages are enabled then a heartbeat message will be
# posted every self.heartbeat-interval ticks. Whenever the heartbeat method is called the current_tick
# is updated
self.current_tick += 1
if not bool( self.current_tick % self.heartbeat_rate ):
Msg.dbg( "HiThread[%s] Still Working" % ( self.name ) )
# the sleep in SysUtils uses milliseconds as does the rest of the computing world instead of
# fractions of a second. Thus this will pause this thread for 10 seconds allowing the process
# thread some processor time
SysUtils.sleep(self.sleep_period)
return False
def trigger_shutdown( self ):
Msg.dbg( "HiThread[%s]::trigger_shutdown() - enter " % ( self.name ))
if self.on_shutdown is not None:
self.on_shutdown(self)
else:
self.shutdown = True
Msg.dbg( "HiThread[%s]::trigger_shutdown() - exit " % ( self.name ))
def execute( self ):
raise NotImplementedError( "Thread::execute() must be implemented" )
class HiOldThread( threading.Thread ):
def __init__( self, arg_create_active = False ):
super().__init__( name = "HiThread-01" )
# Ensure that all threads are killed when master thread is exited for any reason by marking it as a daemon
self.daemon = True
if arg_create_active:
self.start()
#pass
def run( self ):
pass
def HeartBeat( self ):
# Enable this if it's a good idea to have a periodic printing heartbeat
#Msg.dbg("[Thread %s]: Heartbeat" % (self.threadName))
pass
class HiEvent( threading.Event ):
def __init__( self, arg_options ):
super().__init__()
# default to return immediately
self.timeout = arg_options.get( "timeout" , None )
self.before_signal = arg_options.get( "before-sig" , None ) # use this to perform some action prior to setting event
self.after_signal = arg_options.get( "after-sig" , None ) # use this to perform some action after to setting event
self.before_unsignal = arg_options.get( "before-unsig" , None ) # use this to perform some action prior to unsetting event
self.after_unsignal = arg_options.get( "after-unsig" , None ) # use this to perform some action after to unsetting event
def Signal( self, arg_sender = None ):
# perform any activities prior to notification, this could include finishing some work
# that could make the system unstable. This is a callback that is part of the dictionary
# used to initialize
if self.before_signal is not None:
self.before_signal( arg_stat )
# signal the event
self.set()
# perform any activities once notification has been dispatched, this can be used to notify the caller the even has
# been signaled
# This is a callback that is part of the dictionary used to initialize
if self.after_signal is not None:
self.after_signal( arg_stat )
def Unsignal( self, arg_sender = None ):
# perform any activities prior to notification the event has been cleared and will block, this could include initializing to
# prevent system instability. This is a callback that is part of the dictionary used to initialize the Event
if self.before_unsignal is not None:
self.before_unsignal( self )
self.clear( )
if self.after_unsignal is not None:
self.after_unsignal( self )
def Signaled( self, arg_sender = None ):
return self.isSet()
def Reset( self, arg_sender = None ):
self.clear()
## {{{{ TTTTTTTT OOOOOO DDDDDD OOOOOO }}}}
## {{ TTTTTTTT OOOOOOOO DDDDDDD OOOOOOOO }}
## {{ TT OO OO DD DDD OO OO }}
## {{ TT OO OO DD DD OO OO }}
## {{ TT OO OO DD DDD OO OO }}
## {{ TT OOOOOOOO DDDDDDD OOOOOOOO }}
## {{{{ TT OOOOOO DDDDDD OOOOOO }}}}
##
## GET RID OF THIS INSANITY, replace with proper thread management
# This event is signalled when all the worker threads are completed.
workers_done_event = HiEvent({})
# Summary thread signals master thread that it's done
summary_done_event = HiEvent({})
class HiSemaphore( threading.Semaphore ):
def test( self ):
my_lock = threading.Lock()
# class HiMutex( threading.Lock ):
# def test( self ):
# pass
#
#
# class HiCriticalSection( threading.Lock ):
# pass
#
#
| true
|
5236846a1e7234ed817ca17e9086eb7f3a0df9f0
|
Python
|
zxqfengdi/graduation-project
|
/project_code/camera.py
|
UTF-8
| 3,707
| 2.578125
| 3
|
[] |
no_license
|
# coding:utf-8
"""
@author: fengdi
@file: camera.py
@time: 2018-04-21 22:12
"""
import cv2
import face_recognition
class CameraRecognize(object):
def __init__(self):
super().__init__()
def camera_recognize(self):
video_capture = cv2.VideoCapture(1)
jobs_image = face_recognition.load_image_file("/media/fengdi/数据/graduation-project/facedata/jobs.png")
curry_image = face_recognition.load_image_file("/media/fengdi/数据/graduation-project/facedata/curry.jpg")
putin_image = face_recognition.load_image_file("/media/fengdi/数据/graduation-project/facedata/putin.jpeg")
# xidada_image = face_recognition.load_image_file("/media/fengdi/数据/graduation-project/facedata/xidada.jpg")
# jobs_image = face_recognition.load_image_file(r"E:\graduation-project\facedata\jobs.png")
# curry_image = face_recognition.load_image_file(r"E:\graduation-project\facedata\curry.jpg")
# putin_image = face_recognition.load_image_file(r"E:\graduation-project\facedata\putin.jpeg")
# xidada_image = face_recognition.load_image_file(r"E:\graduation-project\facedata\xidada.jpg")
jobs_face_encoding = face_recognition.face_encodings(jobs_image)[0]
curry_face_encoding = face_recognition.face_encodings(curry_image)[0]
putin_encoding = face_recognition.face_encodings(putin_image)[0]
# xidada_encoding = face_recognition.face_encodings(xidada_image)[0]
known_face_encodings = [jobs_face_encoding, curry_face_encoding, putin_encoding]
known_face_names = ["jobs", "curry", "putin"]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while video_capture.isOpened():
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(
rgb_small_frame)
face_encodings = face_recognition.face_encodings(
rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(
known_face_encodings, face_encoding)
name = "unknown"
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(
face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
start = (left, top)
end = (right, bottom)
color = (0, 255, 0)
thickness = 1
cv2.rectangle(frame, start, end, color, thickness)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
(128, 128, 128), cv2.FILLED)
font = cv2.FONT_HERSHEY_COMPLEX
cv2.putText(frame, name, (left + 30, bottom - 10), font, 1.0,
(0, 0, 0), 1)
cv2.imshow('camera', frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
camera = CameraRecognize()
camera.camera_recognize()
| true
|
0574e1aadc0b48372a74e9b11afa6588dd8130a2
|
Python
|
arnavdas88/qiskit_helper_functions
|
/qcg/Dynamics/quantum_dynamics.py
|
UTF-8
| 4,783
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
from qiskit import QuantumCircuit, QuantumRegister
import sys
import math
import numpy as np
class Dynamics:
"""
Class to implement the simulation of quantum dynamics as described
in Section 4.7 of Nielsen & Chuang (Quantum computation and quantum
information (10th anniv. version), 2010.)
A circuit implementing the quantum simulation can be generated for a given
problem Hamiltonian parameterized by calling the gen_circuit() method.
Attributes
----------
H : ??
The given Hamiltonian whose dynamics we want to simulate
barriers : bool
should barriers be included in the generated circuit
measure : bool
should a ClassicalRegister and measurements be added to the circuit
regname : str
optional string to name the quantum and classical registers. This
allows for the easy concatenation of multiple QuantumCircuits.
qr : QuantumRegister
Qiskit QuantumRegister holding all of the quantum bits
circ : QuantumCircuit
Qiskit QuantumCircuit that represents the uccsd circuit
"""
def __init__(self, H, barriers=False, measure=False, regname=None):
# Hamiltonian
self.H = H
# set flags for circuit generation
self.barriers = barriers
self.nq = self.get_num_qubits()
# create a QuantumCircuit object
if regname is None:
self.qr = QuantumRegister(self.nq)
else:
self.qr = QuantumRegister(self.nq, name=regname)
self.circ = QuantumCircuit(self.qr)
# Create and add an ancilla register to the circuit
self.ancQ = QuantumRegister(1, 'ancQ')
self.circ.add_register(self.ancQ)
def get_num_qubits(self):
"""
Given the problem Hamiltonian, return the appropriate number of qubits
needed to simulate its dynamics.
This number does not include the single ancilla qubit that is added
to the circuit.
"""
numq = 0
for term in self.H:
if len(term) > numq:
numq = len(term)
return numq
def compute_to_Z_basis(self, pauli_str):
"""
Take the given pauli_str of the form ABCD and apply operations to the
circuit which will take it from the ABCD basis to the ZZZZ basis
Parameters
----------
pauli_str : str
string of the form 'p1p2p3...pN' where pK is a Pauli matrix
"""
for i, pauli in enumerate(pauli_str):
if pauli is 'X':
self.circ.h(self.qr[i])
elif pauli is 'Y':
self.circ.h(self.qr[i])
self.circ.s(self.qr[i])
def uncompute_to_Z_basis(self, pauli_str):
"""
Take the given pauli_str of the form ABCD and apply operations to the
circuit which will take it from the ZZZZ basis to the ABCD basis
Parameters
----------
pauli_str : str
string of the form 'p1p2p3...pN' where pK is a Pauli matrix
"""
for i, pauli in enumerate(pauli_str):
if pauli is 'X':
self.circ.h(self.qr[i])
elif pauli is 'Y':
self.circ.sdg(self.qr[i])
self.circ.h(self.qr[i])
def apply_phase_shift(self, delta_t):
"""
Simulate the evolution of exp(-i(dt)Z)
"""
# apply CNOT ladder -> compute parity
for i in range(self.nq):
self.circ.cx(self.qr[i], self.ancQ[0])
# apply phase shift to the ancilla
# rz applies the unitary: exp(-i*theta*Z/2)
self.circ.rz(2*delta_t, self.ancQ[0])
# apply CNOT ladder -> uncompute parity
for i in range(self.nq-1, -1, -1):
self.circ.cx(self.qr[i], self.ancQ[0])
def gen_circuit(self):
"""
Create a circuit implementing the quantum dynamics simulation
Returns
-------
QuantumCircuit
QuantumCircuit object of size nq with no ClassicalRegister and
no measurements
"""
# generate a naive version of a simulation circuit
for term in self.H:
self.compute_to_Z_basis(term)
if self.barriers:
self.circ.barrier()
self.apply_phase_shift(1)
if self.barriers:
self.circ.barrier()
self.uncompute_to_Z_basis(term)
if self.barriers:
self.circ.barrier()
# generate a commutation aware version of a simulation circuit
# simulate all commuting terms simulataneously by using 1 ancilla per
# term that will encode the phase shift based on the parity of the term.
return self.circ
| true
|
6a974c556298f24c8f03c12e19bafea5271b40a5
|
Python
|
iradukundas/TFT-WebScrapper
|
/main.py
|
UTF-8
| 5,599
| 2.53125
| 3
|
[] |
no_license
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from discord.ext import commands
from discord import Color
import discord
import os
#Setting up for webscrapping
driver = webdriver.Chrome(executable_path = "/app/.chromedriver/bin/chromedriver")
#discord side
Client = commands.Bot(command_prefix = '!')
@Client.event
async def on_ready():
print("We have logged in as {0.user}".format(Client))
#an event is a piece of code that runs when a bot detects certain activity
driver.get("https://lolchess.gg")
@Client.command()
async def check(ctx, *, username):
channel = Client.get_channel(778311945476374570)
try:
#webscraping
driver.find_element_by_xpath('//*[@id="gnb-search-box"]/form/input').clear()
sBox = driver.find_element_by_xpath('//*[@id="gnb-search-box"]/form/input') #looking for search box based on the xpath(obtained via inspect). stored in searchBox variable
sBox.send_keys(username) #entering something in the box
searchButton = driver.find_element_by_xpath('/html/body/div[1]/header/div[3]/div/form/button')
searchButton.click() #clicking the search button
#changing colors for various elements on the website
script = 'document.getElementsByClassName("profile__tier")[0].style.backgroundColor = "#2f3136";' #top part of stats
script2 = 'document.getElementsByClassName("profile__tier__stats")[0].style.backgroundColor = "#2f3136";' #bottom part of stats
script3 = 'document.getElementsByClassName("profile__tier__stats")[0].style.color = "#FFFFFF";' #text color
script4 = 'document.getElementsByClassName("profile__tier")[0].style.border = "1px solid #2f3136";' #border color
script5 = 'document.getElementsByClassName("profile__tier__info")[0].style.color = "#FFFFFF";' #LP Color
script6 = 'for (i = 0; i < 5; i++) {document.getElementsByClassName("profile__tier__stat__text")[i].style.color = "#FFFFFF";}' #"top" color
script7 = 'for (i = 0; i < 3; i++) {document.getElementsByClassName("profile__tier__stat__value float-right")[i].style.color = "#FFFFFF"};' #"top" color
script8 = 'document.getElementsByClassName("text-dark-gray d-none d-md-block")[0].style.color = "#FFFFFF";' #Tier Color
script9 = 'document.getElementsByClassName("top-percent")[0].style.color = "#FFFFFF";' #top % color
script10 = 'document.getElementsByClassName("rank-region")[0].style.color = "#FFFFFF";' #rank region color
#executing all the scripts
driver.execute_script(script)
driver.execute_script(script2)
driver.execute_script(script3)
driver.execute_script(script4)
driver.execute_script(script5)
driver.execute_script(script6)
driver.execute_script(script7)
driver.execute_script(script8)
driver.execute_script(script9)
driver.execute_script(script10)
#taking screenshot on the stats page
profileTierInfo = driver.find_element_by_class_name('profile__tier')
profileTierInfo.screenshot('./stats.png')
#locating player icon the page
profileIcon = driver.find_element_by_class_name('profile__icon').find_element_by_tag_name('img').get_attribute('src')
#embed
embed = discord.Embed()
file = discord.File('./stats.png')
embed.set_image(url = "attachment://stats.png" )
embed.set_author(name = username, icon_url = profileIcon)
embed.set_footer(text ='src: lolchess.gg')
await ctx.send(embed = embed, file = file)
driver.get("https://lolchess.gg")
except Exception as E:
print(E) #printing the exception
notFound = discord.Embed(title = '{} not found. Checking spelling and try again'.format(username))
await ctx.send(embed=notFound)
driver.get("https://lolchess.gg")
@Client.command()
async def lb(ctx):
ranks = driver.find_elements_by_class_name('rank')
ranksT = ''
for rank in ranks:
ranksT += rank.text + '\n'
summoners = driver.find_elements_by_class_name('summoner')
summonersT = ''
for summoner in summoners:
summonersT += summoner.text + '\n'
regions = driver.find_elements_by_class_name('region')
regionsT = ''
for region in regions:
regionsT += region.text + '\n'
tiers = driver.find_elements_by_class_name('tier')
tiersT = ''
for tier in tiers:
tiersT += tier.text + '\n'
#not yet added
lps = driver.find_elements_by_class_name('lp')
lpsT = ''
for lp in lps:
lpsT += lp.text + '\n'
winrates = driver.find_elements_by_class_name('winrate')
winratesT = ''
for winrate in winrates:
winratesT += winrate.text + '\n'
wins = driver.find_elements_by_class_name('wins')
winsT = ''
for win in wins:
winsT += win.text + '\n'
losses = driver.find_elements_by_class_name('losses')
lossesT = ''
for loss in losses:
lossesT += loss.text + '\n'
embed = discord.Embed(title = 'Leaderboard Data')
embed.add_field(name = '#', value = ranksT , inline=True)
embed.add_field(name = 'Location', value = regionsT, inline=True)
embed.add_field(name = 'Name', value = summonersT, inline=True)
await ctx.send(embed = embed)
driver.get("https://lolchess.gg")
Client.run('')
# git add ., git commit -m "msg", git push heroku <--- for changes
#heroku logs -a discord-tft-stats --tail
| true
|
50a745345dae7439ae93218a26aa9c44562698d2
|
Python
|
Eulleraang12/Inspection-robot
|
/Move/movimentação.py
|
UTF-8
| 2,338
| 2.984375
| 3
|
[] |
no_license
|
import RPi.GPIO as GPIO
import time
from MotorDireito import *
from MotorEsquerdo import *
from multiprocessing import Process
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
def frente(tempo, voltas):
for i in range(voltas):
for j in passo:
GPIO.output(bobinas1[0],int(j[0]))
GPIO.output(bobinas1[1],int(j[1]))
GPIO.output(bobinas1[2],int(j[2]))
GPIO.output(bobinas1[3],int(j[3]))
GPIO.output(bobinas2[0],int(j[0]))
GPIO.output(bobinas2[1],int(j[1]))
GPIO.output(bobinas2[2],int(j[2]))
GPIO.output(bobinas2[3],int(j[3]))
time.sleep(tempo/1000)
def re(tempo, voltas):
for i in range(voltas):
for j in passo_inversa:
GPIO.output(bobinas1[0],int(j[0]))
GPIO.output(bobinas1[1],int(j[1]))
GPIO.output(bobinas1[2],int(j[2]))
GPIO.output(bobinas1[3],int(j[3]))
GPIO.output(bobinas2[0],int(j[0]))
GPIO.output(bobinas2[1],int(j[1]))
GPIO.output(bobinas2[2],int(j[2]))
GPIO.output(bobinas2[3],int(j[3]))
time.sleep(tempo/1000)
while True:
try:
direcao = str(input("f -> Frente ou r -> Ré e c -> Curva: "))
if direcao == 'f':
tempo = int(input("Intervalo de passo (2 ms recomendado): "))
voltas = 50* int(input("Quantas voltas? "))
frente(tempo, voltas)
if direcao == 'r':
tempo = int(input("Intervalo de passo (2 ms recomendado): "))
voltas = 50* int(input("Quantas voltas? "))
re(tempo, voltas)
if direcao == 'c':
direita = int(input("Intervalo de passo do motor DIREITO (2 ms recomendado): "))
esquerda = int(input("Intervalo de passo do motor ESQUERDO (2 ms recomendado): "))
voltas= 50* int(input("Quantas voltas motor? "))
processo_d = Process(target=motor_direita, args=(direita,voltas))
processo_e = Process(target=motor_esquerda, args=(esquerda,voltas))
processo_d.start()
processo_e.start()
# processo_d.join() #espera o termino do programa
# processo_e.join()
except KeyboardInterrupt:
GPIO.cleanup()
| true
|
43b76ff31f463de16f07ff768d7341bdb608a53c
|
Python
|
WZQ1397/kickstart
|
/my220927-2.py
|
UTF-8
| 3,110
| 2.859375
| 3
|
[] |
no_license
|
# Author: Zach.Wang
# Module: fileEnc.py
import pickle
from sys import argv
from datetime import datetime
filename=None
class initFileName(object):
def __init__(self,filename=filename) -> None:
self.__filename = filename
def get_filename(self) -> str:
return "".join(self.__filename.split('.')[:-1])
def get_extension(self) -> str:
return self.__filename.split('.')[-1]
def gen_time_subfix(self) -> str:
return datetime.strftime(datetime.now(),"%Y-%m-%d-%H-%M-%S")
def gen_pickle_file_name(self) -> str:
return f"{self.get_filename()}.pickle"
def gen_save_file_name(self) -> str:
return f"{self.get_filename()}." \
f"{self.gen_time_subfix()}." \
f"{self.get_extension()}"
class genEncFile(object):
def __init__(self,filename=filename) -> None:
self.__filename = filename
self.__FileNameOps = initFileName(self.__filename)
self.__pickleFile = self.__FileNameOps.gen_pickle_file_name()
self.__saveFile = self.__FileNameOps.gen_save_file_name()
def picklize_file(self) -> None:
with open(self.__filename, "rb") as fread,\
open(self.__pickleFile, 'wb') as fpickle:
print(f"{self.__filename} ==> {self.__pickleFile}")
for data in fread.readlines():
pickle.dump(data,fpickle, pickle.HIGHEST_PROTOCOL)
def unpack_file(self,filename:str=None,extension: str = None) -> None:
if filename is not None:
self.__pickleFile = filename
if extension is not None:
self.__saveFile = f"{self.__saveFile}.{extension}"
with open(self.__saveFile, "wb") as fsave,\
open(self.__pickleFile, 'rb') as fpickle:
print(f"{self.__pickleFile} ==> {self.__saveFile}")
while True:
try:
item=pickle.load(fpickle)
fsave.write(item)
except EOFError as _:
break
class Pic2Base64:
# Author: Zach.Wang
# Module: Pic2Base64.py
import base64
def __init__(self,binfilename=None):
self.filename= "MicrosoftTeams-image.png"
self.savebinfile=filename+".new.txt"
self.savepicfile=filename+".new.png"
# saveFiletoBase64()
readBase64code(binfilename)
def saveFiletoBase64(self,onScr: bool = True):
with open(self.filename, "rb") as fread, open(self.savebinfile,"wb") as fsave:
data = base64.b64encode(fread.read())
fsave.write(data)
if onScr:
print(data)
def readBase64code(self,savebinfile=savebinfile):
with open(self.savebinfile, "rb") as fread, open(self.savepicfile,"wb") as fsave:
data = base64.b64decode(fread.read())
fsave.write(data)
if __name__ == "__main__":
while filename is None:
filename = argv[1]
else:
while filename is None:
filename = input("Input A filename: ")
fileOps = genEncFile(filename)
# fileOps.picklize_file()
fileOps.unpack_file(filename,"7z")
| true
|