blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bfe512be9ddced55c4065aeac9ec5b40b2c9dd8a | Python | thdchang/Biodiversity-Dashboard | /app.py | UTF-8 | 4,154 | 2.640625 | 3 | [] | no_license | ## import dependencies
from flask import Flask, render_template, jsonify
#from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, inspect, column
import os
import pandas as pd
from flask_sqlalchemy import SQLAlchemy
#---------------------------------------
# Flask App Setup
app = Flask(__name__)
#create an engine object
engine = create_engine("sqlite:///bellybutton.sqlite")
## Database setup
Base = automap_base()
#reflect the tables in the database
Base.prepare(engine, reflect=True)
# map the tables
Samples = Base.classes.samples
Samples_metadata = Base.classes.sample_metadata
# -------------------------------------------
# Below is script of a function to get values, labels for creating pie chart
def pieData(sample):
#query the mapped table with desired sample, sort the column in descending order to display the top 10 otu_id count
top_10_samples_values = engine.execute(f"SELECT otu_id, otu_label, \"{sample}\" FROM samples ORDER BY \"{sample}\" DESC LIMIT 10;")
# -------------------------------------------
# Below is script to get the pie chart trace
#create arrays for value, label, and hovertext arrays to create pie chart
pie_value = []
pie_label = []
pie_hovertext = []
pie_dict = {}
#loop through query and to create dictionary of values for
for x, y, z in top_10_samples_values:
pie_value.append(z)
pie_hovertext.append(y)
pie_label.append(x)
pie_dict = {"values": pie_value, "labels": pie_label, "hoverinfo": pie_hovertext}
return pie_dict
# -------------------------------------------
# below is the script of a function to get the bubble chart trace and layout
def bubbleData(sample):
#query to get the trace for bubblechart
bubble_query = engine.execute(f"SELECT otu_id, otu_label, \"{sample}\" FROM samples;")
# create lists
bubble_Xvalue = []
bubble_Yvalue = []
bubble_MarkerSize = []
bubble_MarkerColor = []
bubble_textValues = []
bubble_dict = {}
for ID, label, value in bubble_query:
bubble_Xvalue.append(ID)
bubble_Yvalue.append(value)
bubble_MarkerSize.append(value)
bubble_MarkerColor.append(ID)
bubble_textValues.append(label)
bubble_dict= {"x": bubble_Xvalue, "y": bubble_Yvalue, "text": bubble_textValues, "color": bubble_MarkerColor, "size": bubble_MarkerSize}
#bubble_dict= {"x": bubble_Xvalue, "y": bubble_Yvalue, "text": bubble_textValues, "marker": {"color": bubble_MarkerColor, "size": bubble_MarkerSize}}
return bubble_dict
#-------------------------------------
# flask app routes
@app.route("/")
def index():
# create dataframe from query
df = pd.read_sql("SELECT * FROM samples", engine)
#manipulate queries dataframe
belly_button_samples = df.T.iloc[2:].index.values.tolist()
#render index.html with belly button samples to create and append option tags for dropdown menu
return render_template("index.html", belly_button_samples=belly_button_samples)
#route for retrieving sample data
@app.route("/samples/<sample>")
def samples(sample):
data = []
pie_sample = pieData(sample)
bubble_sample = bubbleData(sample)
data.append(pie_sample)
data.append(bubble_sample)
# return javascript object to be used for plotting in app.js file
return jsonify(data)
#route to retrieve metadata for sample
@app.route("/metadata/<sample>")
def metadata(sample):
#query table for metadata for a sample
metadata_query = engine.execute(f"SELECT * FROM sample_metadata WHERE sample = \"{sample}\";")
# create dictionary from the queried data
for x in metadata_query:
metadata = {"AGE": x.AGE, "BBTYPE": x.BBTYPE, "ETHNICITY": x.ETHNICITY, "GENDER": x.GENDER, "LOCATION": x.LOCATION, "SAMPLEID": x.sample}
#return a json format of metadata to js file
return jsonify(metadata)
#initialize flask app; set debug=True, unable to deploy on Heroku
if __name__ == "__main__":
app.run(debug=True) | true |
d24ae522c6f7ca4c8f9c100e2b30c37aaeacd3e6 | Python | EnzoZuniga/Python | /Seance 1/Exo02.py | UTF-8 | 126 | 3.609375 | 4 | [] | no_license | a=int(input('Entrez votre valeur a: '))
b=int(input('Entrez votre valeur b: '))
temp=a
a=b
b=temp
print('a=',a)
print('b=',b)
| true |
8f12a088af02afb940530ba89cbe246fa97d6958 | Python | martiansideofthemoon/Photometric-Redshifts | /test_codes/tf_redshifts.py | UTF-8 | 5,390 | 2.734375 | 3 | [] | no_license | import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer
import math
from math import isnan
import numpy as np
#COMMENT: np.random.seed(1337)
import time
import matplotlib.pyplot as plt
#COMMENT: tf.set_random_seed(1337)
def gloret(name, shape):
return tf.get_variable(name, shape=shape,
initializer=xavier_initializer())
INPUT_SIZE = 5
NUM_CLASSES = 1
random_seed = 20
# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
flags.DEFINE_integer('max_steps', 2000, 'Number of steps to run trainer.')
flags.DEFINE_integer('hidden1', 100, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 30, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('batch_size', 100, 'Batch size. '
'Must divide evenly into the dataset sizes.')
flags.DEFINE_integer('num_epoch', 10, 'Epoch size')
flags.DEFINE_string('train_dir', 'data', 'Directory to put the training data.')
flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data '
'for unit testing.')
batch_size = FLAGS.batch_size
hidden_size1 = FLAGS.hidden1
hidden_size2 = FLAGS.hidden2
learning_rate = FLAGS.learning_rate
num_epoch = FLAGS.num_epoch
print "Reading data..."
data = np.genfromtxt("data/psf2.csv",delimiter=",")[2:,1:]
print "Data loaded..."
print "Filtering data..."
data = np.array(filter(lambda x:((not isnan(x[2])) and min(x[2:])!=-9999), data))
print "Data filtered..."
print "Generating I/O vectors..."
# the parameters
X = data[:,2:]
# the following is TMP
assert X.shape[1] == 5
# the output
Y = data[:,:1]
print "Normalizing data..."
minx, maxx = np.min(X), np.max(X)
miny, maxy = np.min(Y), np.max(Y)
X = (X-minx)/(maxx-minx)
Y = (Y-miny)/(maxy-miny)
print "Data normalized..."
training_X = X[:100000,:]
training_Y = Y[:100000,:]
test_X = X[100000:110000,:]
test_Y = Y[100000:110000,:]
with tf.Graph().as_default():
tf.set_random_seed(random_seed)
with tf.variable_scope('inputs'):
photo_placeholder = tf.placeholder(tf.float32, shape=(batch_size, INPUT_SIZE), name='photometric_data')
z_placeholder = tf.placeholder(tf.float32, shape=(batch_size), name='redshift')
# COMMENT: Use Xavier/Glorot initialization; An implementation is available here
# http://deliprao.com/archives/100
# TensorFlow also has a xavier initializer:
# https://www.tensorflow.org/versions/r0.8/api_docs/python/contrib.layers.html#xavier_initializer
with tf.variable_scope('hidden1'):
stddev = 1.0 / math.sqrt(float(INPUT_SIZE))
weights = tf.Variable(gloret('weights', [INPUT_SIZE, hidden_size1]).initialized_value())
biases = tf.Variable(tf.zeros([hidden_size1]), name='biases')
hidden1 = tf.matmul(photo_placeholder, weights) + biases
with tf.variable_scope('hidden2'):
stddev = 1.0 / math.sqrt(float(hidden_size1))
weights = tf.Variable(gloret('weights', [hidden_size1, hidden_size2]).initialized_value())
biases = tf.Variable(tf.zeros([hidden_size2]), name='biases')
hidden2 = tf.matmul(hidden1, weights) + biases
with tf.variable_scope('softmax_linear'):
weights = tf.Variable(gloret('weights', [hidden_size2, NUM_CLASSES]).initialized_value())
biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases')
logits = tf.matmul(hidden2, weights) + biases
with tf.variable_scope('sigmoid'):
logits = tf.sigmoid(logits)
with tf.variable_scope('loss'):
loss = tf.reduce_mean(tf.squared_difference(logits, z_placeholder))
tf.scalar_summary(loss.op.name, loss)
with tf.variable_scope('train'):
# Create the rmsprop optimizer with the given learning rate.
optimizer = tf.train.RMSPropOptimizer(learning_rate)
# Create a variable to track the global step.
#global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter("logs/", sess.graph)
sess.run(init)
steps_per_epoch = len(training_X) / batch_size
print str(steps_per_epoch) + " steps per epoch"
for epoch in range(0, num_epoch):
start_time = time.time()
for step in range(0, steps_per_epoch):
feed_dict = {
photo_placeholder: training_X[step*batch_size:(step+1)*batch_size],
z_placeholder: training_Y[step*batch_size:(step+1)*batch_size,0]
}
_, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
result = sess.run(merged, feed_dict=feed_dict)
writer.add_summary(result, epoch*steps_per_epoch + step)
duration = time.time() - start_time
print('Epoch %d: loss = %.5f (%.3f sec)' % (epoch, loss_value, duration))
steps_per_epoch = len(test_X) / batch_size
total = []
for step in range(0, steps_per_epoch):
feed_dict = {
photo_placeholder: test_X[step*batch_size:(step+1)*batch_size],
z_placeholder: test_Y[step*batch_size:(step+1)*batch_size,0]
}
total.extend(sess.run(logits, feed_dict=feed_dict)[:,0])
plt.scatter(test_Y[:,0], total)
plt.show()
#print "Final loss is " + str(math.sqrt(total/10000.0)) | true |
2b2cb710ed0d4bac2d3a687058a2203c5eb12b42 | Python | sylgas/HistoricalSocialNetworkAnalysis | /src/analysis/graph/centrality.py | UTF-8 | 2,962 | 2.875 | 3 | [] | no_license | import operator
import networkx as nx
from src.analysis.printer import FunctionPrinter
class CentralityMeasurer:
def __init__(self, graph):
self.graph = graph
def print_all(self):
FunctionPrinter.print_statistic(self.degree_centrality_ranking)
FunctionPrinter.print_statistic(self.betweeness_centrality_ranking)
FunctionPrinter.print_statistic(self.closeness_centrality_ranking)
FunctionPrinter.print_statistic(self.eigenvector_centrality_ranking)
FunctionPrinter.print_statistic(self.page_rank_ranking)
def degree_centrality_ranking(self):
res = nx.degree_centrality(self.graph)
return self.create_ranking(res)
def betweeness_centrality_ranking(self):
# results = nx.betweenness_centrality(self.graph, k=200)
results = nx.betweenness_centrality(self.graph)
return self.create_ranking(results)
def closeness_centrality_ranking(self):
results = nx.closeness_centrality(self.graph)
return self.create_ranking(results)
def eigenvector_centrality_ranking(self):
try:
results = nx.eigenvector_centrality(self.graph)
except nx.NetworkXError:
print('Eigenvector error')
results = {}
return self.create_ranking(results)
def page_rank_ranking(self):
results = nx.pagerank(self.graph)
return self.create_ranking(results)
def sum_centrality(self):
degree = nx.degree_centrality(self.graph)
betweeness = nx.betweenness_centrality(self.graph)
closeness = nx.closeness_centrality(self.graph)
eigenvector = nx.eigenvector_centrality(self.graph)
page_rank = nx.pagerank(self.graph)
centrality = dict()
for key in degree.keys():
centrality[key] = degree[key] + betweeness[key] + closeness[key] + eigenvector[key] + page_rank[key]
return centrality
def print_sum_centrality(self):
degree = nx.degree_centrality(self.graph)
self.print_ranking(degree)
betweeness = nx.betweenness_centrality(self.graph)
self.print_ranking(betweeness)
closeness = nx.closeness_centrality(self.graph)
self.print_ranking(closeness)
eigenvector = nx.eigenvector_centrality(self.graph)
self.print_ranking(eigenvector)
page_rank = nx.pagerank(self.graph)
self.print_ranking(page_rank)
centrality = dict()
for key in degree.keys():
centrality[key] = (degree[key] + betweeness[key] + closeness[key] + eigenvector[key] + page_rank[key]) / 5
self.print_ranking(centrality)
return centrality
def print_ranking(self, results):
ranking = self.create_ranking(results)
print(ranking)
@staticmethod
def create_ranking(results):
sorted_results = sorted(results.items(), key=operator.itemgetter(1), reverse=True)
return sorted_results[0: 20]
| true |
e7fb6ba6a037eda946364550bb77e027525fb562 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_116/1153.py | UTF-8 | 1,160 | 3.28125 | 3 | [] | no_license | # Python version 2.7
import sys
size = 4
def oneCase():
game = []
for i in range(size):
game.append(sys.stdin.readline())
mX = map(lambda line: map(lambda c: 1 if c=='X' or c=='T' else 0, line), game)
mO = map(lambda line: map(lambda c: 1 if c=='O' or c=='T' else 0, line), game)
haveEmpty = any(map(lambda line: any(map(lambda c: c=='.', line)), game))
sumX = []
sumO = []
for i in range(size):
sumX.append(sum(mX[i]))
sumO.append(sum(mO[i]))
sumX.append(sum(map(lambda x: x[i], mX)))
sumO.append(sum(map(lambda x: x[i], mO)))
sumX.append(sum(map(lambda i: mX[i][i], range(size))))
sumO.append(sum(map(lambda i: mO[i][i], range(size))))
sumX.append(sum(map(lambda i: mX[i][size-i-1], range(size))))
sumO.append(sum(map(lambda i: mO[i][size-i-1], range(size))))
if max(sumX) == size: return "X won"
if max(sumO) == size: return "O won"
return "Game has not completed" if haveEmpty else "Draw"
cases = int(sys.stdin.readline())
for i in range(cases):
print "Case #" + str(i+1) + ": " + oneCase()
sys.stdin.readline()
| true |
79c267d328202511b55729dfe866e2b4417e14f6 | Python | Raj-kar/Python | /Nptel/week- 03 solutions.py | UTF-8 | 714 | 3.5625 | 4 | [] | no_license | # <------ solution programming assignment 1 -------> #
s1 = int(input())
s2 = int(input())
s3 = int(input())
s4 = int(input())
s5 = int(input())
print((s1 + s2 + s3 + s4 + s5) / 5, end="")
# <------ solution programming assignment 2 -------> #
list_1 = []
for i in range(1, 51):
list_1.append(i)
a, b = input().split()
a = int(a)
b = int(b)
new_list = list_1[a:b]
for i in new_list:
print(i)
# <------ solution programming assignment 3 -------> #
list_1 = []
for i in range(1, 51):
list_1.append(i)
num = int(input())
count = 0
list_1 = list_1[num:]
for i in list_1:
if i % num == 0:
count += 1
print(count, end="")
# <-------- Practiced purposed only < Raj™ /> --------------> # | true |
6eaf3cca5a631e40872c7ae44318c0058192527b | Python | renfanzi/python3_Variance_Chisquare | /common/util/myAnalysis.py | UTF-8 | 3,744 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from scipy.stats import chisqprob
from common.base import my_log
import pandas as pd
from statsmodels.formula.api import ols
import statsmodels.api as sm
from pandas import DataFrame
def MyVariance(df_dropna, variableOne, variableTwo):
try:
# df_dropna = MyVarianceModel(variableOne, variableTwo, table, where)
flag = 1
expr = '{}~C({})'.format(variableOne, variableTwo)
v2sum = 0
for i in range(len(df_dropna[variableTwo])):
if (df_dropna[variableTwo]).iloc[0] == (df_dropna[variableTwo]).iloc[i]:
v2sum += 1
if v2sum == len(df_dropna[variableTwo]):
flag = 0
if flag == 1:
mod = ols(expr, data=df_dropna).fit()
anova_table = sm.stats.anova_lm(mod)
ret = {'df': list(anova_table.df),
'sum_sq': list(anova_table.sum_sq),
'mean_sq': list(anova_table.mean_sq),
'F': list(anova_table.F)[0],
'P': list(anova_table.values.T[-1])[0]
}
else:
ret = {"df": "NAN", "sum_sq": "NAN", "mean_sq": "NAN", "F": "NAN", "P": "NAN"} # P大写
except Exception as e:
my_log.error(e)
ret = {"df": "NAN", "sum_sq": "NAN", "mean_sq": "NAN", "F": "NAN", "P": "NAN"}
return ret
class MyChiSquare2way():
def __init__(self):
pass
@classmethod
def run(cls, df_dropna, variableOne, variableTwo):
try:
setVariableOne = list(set(df_dropna[variableOne]))
setvariableTwo = list(set(df_dropna[variableTwo]))
myNewPandasDict = {}
for i in setVariableOne:
myNewPandasSubKeyDataDict = {}
for j in range(len(df_dropna.index)):
if i == df_dropna.iloc[j][variableOne]:
myNewPandasSubKeyDataDict[df_dropna.iloc[j][variableTwo]] = df_dropna.iloc[j]["count"]
myNewPandasDict[i] = myNewPandasSubKeyDataDict
dataCountValueDataFrame = DataFrame(myNewPandasDict).fillna(0)
floatNi = [] # 行的和
floatNj = [] # 列的和
chisq = 0
for i in dataCountValueDataFrame.index:
floatNi.append(dataCountValueDataFrame.ix[i].sum())
for i in dataCountValueDataFrame.columns:
floatNj.append(dataCountValueDataFrame[i].sum())
sumTotal = sum(floatNi)
Eij = [] # 理论频率, 二维数组
for i in range(len(dataCountValueDataFrame.index)):
for j in range(len(dataCountValueDataFrame.iloc[i])):
subValue = dataCountValueDataFrame.iloc[i].iloc[j] # 原始值
subEij = floatNi[i] * floatNj[j] / sumTotal # 理论频率
subChisqValue = ((subValue - subEij) * (subValue - subEij)) / float(subEij)
# print("=============")
# print(subValue)
# print(floatNi[i]) # 行的和
# print(floatNj[j])
# print(subEij)
# print(subChisqValue)
# print("############")
chisq += subChisqValue
df = (len(floatNi) - 1) * (len(floatNj) - 1)
P = chisqprob(chisq, df)
# df 自由度
# p 显著性
# 值 pvalue
# chisq 卡方
ChiSquare2wayValue = {"chisq": chisq, "df": df, "N": sumTotal, "P": P}
except Exception as e:
ChiSquare2wayValue = {"chisq": "NAN", "df": "NAN", "P": "NAN", "N": "NAN"}
return ChiSquare2wayValue
| true |
28342e000112f51658bc1608fd9f0bf3fc21a38b | Python | andrea841/pythonbackup | /1.4.2/Jia_1.4.2.py | UTF-8 | 9,885 | 3.390625 | 3 | [] | no_license |
''' Part 1: Working with a File System '''
#4 C:/Users/Student login/Desktop/nice.jpg
#5 ../Student login/Desktop/nice.jpg
#6 C:\\Windows\\Cursors\\cursor1.png is an absolute filename, and can make
# sense no matter which directory it is currently in. The difference between
# these commands is that they perform different functions in order to
# manipulate files.
''' Part 2: Rendering an Image on the Screen '''
#7
# '''
# JDoe_JSmith_1_4_2: Read and show an image. (*This is old code)
# '''
# import matplotlib.pyplot as plt
# import os.path
# import numpy as np # 'as' lets us use standard abbreviations
# '''Read the image data'''
# # Get the directory of this python script
# directory = os.path.dirname(os.path.abspath(__file__))
# # Build an absolute filename from directory + filename
# filename = os.path.join(directory, 'woman.jpg')
# # Read the image data into an array
# img = plt.imread(filename)
# '''Show the image data'''
# # Create figure with 1 subplot
# fig, ax = plt.subplots(1, 1)
# # Show the image data in a subplot
# ax.imshow(img, interpolation='none')
# # Show the figure on the screen
# fig.show()
# '''
# JDoe_JSmith_1_4_2: Read and show an image.
# '''
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os.path
import numpy as np # 'as' lets us use standard abbreviations
# '''Read the image data'''
# # Get the directory of this python script
# directory = os.path.dirname(os.path.abspath(__file__))
# # Build an absolute filename from directory + filename
# filename = os.path.join(directory, 'woman.jpg')
# # Read the image data into an array
# img = plt.imread(filename)
# '''Show the image data'''
# # Create figure with 1 subplot
# fig, ax = plt.subplots(1, 1)
# # Show the image data in a subplot
# ax.imshow(img, interpolation='none')
# # Show the figure on the screen
# # fig.show()
# fig.savefig('women_plot')
''' 7: The differences include "matplotlib.use("Agg")" and
"fig.savefig('women_plot')" - these fixed the code because of the way the
environment was set up by the code. The Cloud 9 Python environment is a
non-GUI (graphic user interface) workspace, while matplotlib is set up to
work in a GUI environment. '''
#7a (280, 400)
#7b (60, 40)
# '''
# JDoe_JSmith_1_4_2: Read and show an image.
# '''
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# import os.path
# import numpy as np # 'as' lets us use standard abbreviations
# '''Read the image data'''
# # Get the directory of this python script
# directory = os.path.dirname(os.path.abspath(__file__))
# # Build an absolute filename from directory + filename
# filename = os.path.join(directory, 'cat1-a.gif')
# # Read the image data into an array
# img = plt.imread(filename)
# '''Show the image data'''
# # Create figure with 1 subplot
# fig, ax = plt.subplots(1, 1)
# # Show the image data in a subplot
# ax.imshow(img, interpolation='none')
# # Show the figure on the screen
# # fig.show()
# fig.savefig('cat_plot')
''' Part Three: Objects and Methods '''
#8a fig is an instance of the class Figure.
#8a ax is an instance of the class AxesSubplot.
#8b Similarly, in line 25, the method savefig() is being called on the object fig.
#That method is being given 1 arguments. That method is a method of the
#class Figure.
#8c The lines of comments right above a certain line of code explain what it
# does. The triple quote comments explain the general function of a section
# of code.
''' Part IV: Arrays of Objects'''
#9a The method imshow() is being called on the object ax.
# '''
# JDoe_JSmith_1_4_2: Read and show an image.
# '''
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# import os.path
# import numpy as np # 'as' lets us use standard abbreviations
# '''Read the image data'''
# # Get the directory of this python script
# directory = os.path.dirname(os.path.abspath(__file__))
# # Build an absolute filename from directory + filename
# filename = os.path.join(directory, 'cat1-a.gif')
# # Read the image data into an array
# img = plt.imread(filename)
# '''Show the image data'''
# # Create a 1x2 grid of subplots
# # fig is the Figure, and ax is an ndarray of AxesSubplots
# # ax[0] and ax[1] are the two Axes Subplots
# fig, ax = plt.subplots(1, 2)
# # Show the image data in the first subplot
# ax[0].imshow(img, interpolation='none')
# ax[1].imshow(img, interpolation='none')
# # Show the figure on the screen
# # fig.show()
# fig.savefig('cat_plot')
#9b: Check 1.4.2 directory
# '''
# two_women.png
# '''
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# import os.path
# import numpy as np # 'as' lets us use standard abbreviations
# '''Read the image data'''
# # Get the directory of this python script
# directory = os.path.dirname(os.path.abspath(__file__))
# # Build an absolute filename from directory + filename
# filename = os.path.join(directory, 'woman.jpg')
# # Read the image data into an array
# img = plt.imread(filename)
# '''Show the image data'''
# # Create a 1x2 grid of subplots
# # fig is the Figure, and ax is an ndarray of AxesSubplots
# # ax[0] and ax[1] are the two Axes Subplots
# fig, ax = plt.subplots(1, 2)
# # Show the image data in the first subplot
# ax[0].imshow(img, interpolation='none')
# ax[1].imshow(img, interpolation='none')
# # Show the figure on the screen
# # fig.show()
# fig.savefig('two_women')
# '''
# five_cats.png
# '''
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# import os.path
# import numpy as np # 'as' lets us use standard abbreviations
# '''Read the image data'''
# # Get the directory of this python script
# directory = os.path.dirname(os.path.abspath(__file__))
# # Build an absolute filename from directory + filename
# filename = os.path.join(directory, 'cat1-a.gif')
# # Read the image data into an array
# img = plt.imread(filename)
# '''Show the image data'''
# # Create a 1x2 grid of subplots
# # fig is the Figure, and ax is an ndarray of AxesSubplots
# # ax[0] and ax[1] are the two Axes Subplots
# fig, ax = plt.subplots(1, 5)
# # Show the image data in the first subplot
# ax[0].imshow(img, interpolation='none')
# ax[1].imshow(img, interpolation='none')
# ax[2].imshow(img, interpolation='none')
# ax[3].imshow(img, interpolation='none')
# ax[4].imshow(img, interpolation='none')
# # Show the figure on the screen
# # fig.show()
# fig.savefig('five_cats')
''' Part V: Keyword = Value Pairs '''
#10 Interpolation between data points causes the image to become blurred, as the
# code finds the average of two distinct points.
#11a See experiment.png
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# import os.path
# import numpy as np # 'as' lets us use standard abbreviations
# directory = os.path.dirname(os.path.abspath(__file__))
# # Build an absolute filename from directory + filename
# filename = os.path.join(directory, 'woman.jpg')
# # Read the image data into an array
# img = plt.imread(filename)
# # Create figure with 2 subplots
# fig, ax = plt.subplots(1, 2)
# # Show the image data in the first subplot
# ax[0].imshow(img, interpolation='none') # Override the default
# ax[1].imshow(img)
# ax[0].set_xlim(135, 165)
# ax[0].set_ylim(470, 420)
# ax[1].set_xlim(135, 165)
# ax[1].set_ylim(470, 420)
# ax[0].axis('off')
# # Show the figure on the screen
# # fig.show()
# fig.savefig('experiment')
#11b See three_closeup.png
'''Read the image data'''
# Get the directory of this python script
# directory = os.path.dirname(os.path.abspath(__file__))
# # Build an absolute filename from directory + filename
# filename = os.path.join(directory, 'cat1-a.gif')
# # Read the image data into an array
# img = plt.imread(filename)
# # Create figure with 2 subplots
# fig, ax = plt.subplots(1, 3)
# # Show the image data in the first subplot
# ax[0].imshow(img, interpolation='none') # Override the default
# ax[1].imshow(img, interpolation='none')
# ax[2].imshow(img, interpolation='none')
# ax[0].set_xlim(40, 50)
# ax[0].set_ylim(20, 30)
# ax[1].set_xlim(50, 60)
# ax[1].set_ylim(10, 20)
# ax[2].set_xlim(20, 30)
# ax[2].set_ylim(30, 40)
# # Show the figure on the screen
# # fig.show()
# fig.savefig('three_closeup')
#12 Axes.pie() is another method of AxesSubplot that makes a pie chart. One
# optional argument is radius, and its default value is None, which creates
# a pie chart with radius 1.
#13 See crazy_mice.png
# Get the directory of this python script
directory = os.path.dirname(os.path.abspath(__file__))
# Build an absolute filename from directory + filename
filename = os.path.join(directory, 'PCWmice1.jpg')
# Read the image data into an array
img = plt.imread(filename)
# Create figure with 2 subplots
fig, ax = plt.subplots(1, 1)
# Show the image data in the first subplot
ax.imshow(img, interpolation='none')
ax.plot(138, 42, 'ro') #white mouse right eye
ax.plot(116, 41, 'ro') #white mouse left eye
ax.plot(37, 49, 'ro') #black mouse eye
# Show the figure on the screen
fig.savefig('crazy_mice')
''' Conclusion '''
#1 Absolute file names contain all the parent directories of the file specified,
# while relative file paths contain only directories necessary from that
# position in the file tree.
#2 An object is an instance of a certain class that it is instantiated in.
#3 Methods are functions that manipulate the object and are specific to a
# certain class; properties are characteristics of the object, which may be
# specified when they are instantiated or set to the default value of the
# class.
#4 When a method is called on an object, a certain action is done to the object,
# and properties of that object may be changed.
| true |
8c83732d355d1f5712eae04426ce22ab9ed74377 | Python | sinoroc/pmpc | /tests/test_fsm.py | UTF-8 | 3,686 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | """ Tests for finite state machine
"""
import unittest
import pmpc.fsm
class Machine: # pylint: disable=too-few-public-methods
""" FSM test subject
"""
def __init__(self):
states = {
'one': {
'transitions': {
'switch': {
'next_state': 'two',
},
},
'handlers': {
'toggle': self._handle_toggle,
'set': self._handle_set,
},
'leave': self._leaving_one,
},
'two': {
'enter': self._entering_two,
},
}
self.fsm = pmpc.fsm.Fsm(states, 'one')
self.toggle_flag = False
self.register = None
self.left_one_register = None
self.entered_two_register = None
return None
def _handle_toggle(self, dummy_event):
self.toggle_flag = True
return None
def _handle_set(self, event):
self.register = event['value']
return None
def _leaving_one(self, event):
self.left_one_register = event['value']
return None
def _entering_two(self, event):
self.entered_two_register = event['value']
return None
class TestFsm(unittest.TestCase):
""" Test cases for finite state machine
"""
VALUE = 1
def setUp(self):
self.machine = Machine()
self.fsm = self.machine.fsm
return None
def test_00_inital_state(self):
""" Test initial state
"""
now = self.fsm._current_state_name # pylint: disable=protected-access
self.assertEqual(now, 'one')
return None
def test_01_transition(self):
""" Test transition from one state to another on event
"""
event = {
'type': 'switch',
'value': None,
}
now = self.fsm._current_state_name # pylint: disable=protected-access
self.assertEqual(now, 'one')
self.fsm.handle_event(event)
now = self.fsm._current_state_name # pylint: disable=protected-access
self.assertEqual(now, 'two')
return None
def test_02_handler(self):
""" Test call of handler on event
"""
event = {
'type': 'toggle',
'value': None,
}
self.assertEqual(self.machine.toggle_flag, False)
self.fsm.handle_event(event)
self.assertEqual(self.machine.toggle_flag, True)
return None
def test_03_event(self):
""" Test transmission of event to handler
"""
event = {
'type': 'set',
'value': self.VALUE,
}
self.assertNotEqual(self.machine.register, self.VALUE)
self.fsm.handle_event(event)
self.assertEqual(self.machine.register, self.VALUE)
return None
def test_04_leave(self):
""" Test call of handler on leaving a state
"""
event = {
'type': 'switch',
'value': self.VALUE,
}
self.assertNotEqual(self.machine.left_one_register, self.VALUE)
self.fsm.handle_event(event)
self.assertEqual(self.machine.left_one_register, self.VALUE)
return None
def test_05_enter(self):
""" Test call of handler on entering a state
"""
event = {
'type': 'switch',
'value': self.VALUE,
}
self.assertNotEqual(self.machine.entered_two_register, self.VALUE)
self.fsm.handle_event(event)
self.assertEqual(self.machine.entered_two_register, self.VALUE)
return None
# EOF
| true |
80280279f8355601eb1c7a934290d2f1b4da19fd | Python | turnkeylinux/octohub | /contrib/offline-issues/parse.py | UTF-8 | 3,627 | 2.828125 | 3 | [] | no_license | #!/usr/bin/python3
# Copyright (c) 2013 Alon Swartz <alon@turnkeylinux.org>
#
# This file is part of octohub/contrib
#
# OctoHub is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
"""
Parse local Github issues and generate directory listing
Arguments:
issues.json Path to json encoded issues
outdir Path to create directory listing
Options:
--noinit Don't initialize directory listing (use with care)
Generated directory listing:
all/:issue.number
state/:issue.state/:issue.title|slug -> ../../all/:issue.number
labels/:issue.label/:issue.title|slug -> ../../all/:issue.number
assignee/:assignee.login/:issue.title|slug -> ../../all/:issue.number
"""
import re
import os
import sys
import getopt
import shutil
import unicodedata
import json
from octohub.response import parse_element
def fatal(e):
print('Error: ' + str(e), file=sys.stderr)
sys.exit(1)
def usage(e=None):
if e:
print('Error:', e, file=sys.stderr)
print('Syntax: %s [-options] issues.json outdir' % sys.argv[0],
file=sys.stderr)
print(__doc__.strip(), file=sys.stderr)
sys.exit(1)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def symlink(target, link_name):
if not os.path.exists(link_name):
mkdir(os.path.dirname(link_name))
os.symlink(target, link_name)
def slugify(value):
"""Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = re.sub(rb'[^\w\s-]', b'', value).strip().lower()
return re.sub(rb'[-\s]+', b'-', value).decode()
def output_issues(issues, outdir):
"""Parse issues and output directory listing"""
for issue in issues:
slug = slugify(issue.title)
path = os.path.join(outdir, 'all', str(issue.number))
path_symlink = '../../all/%s' % str(issue.number)
mkdir(os.path.dirname(path))
with open(path, 'w') as fob:
json.dump(issue, fob, indent=1)
path = os.path.join(outdir, 'state', issue.state, slug)
symlink(path_symlink, path)
for label in issue.labels:
path = os.path.join(outdir, 'labels', label.name, slug)
symlink(path_symlink, path)
if issue.assignee:
path = os.path.join(outdir, 'assignee', issue.assignee.login, slug)
symlink(path_symlink, path)
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'h', ['help', 'noinit'])
except getopt.GetoptError as e:
usage(e)
init = True
for opt, val in opts:
if opt in ('-h', '--help'):
usage()
if opt == '--noinit':
init = False
if len(args) == 0:
usage()
if len(args) != 2:
usage('incorrect number of arguments')
infile = args[0]
outdir = args[1]
if not os.path.exists(infile):
fatal('path does not exist: %s' % infile)
if init:
for dir in ('state', 'labels', 'assignee'):
path = os.path.join(outdir, dir)
if os.path.exists(path):
shutil.rmtree(path)
with open(infile, 'r') as fob:
issues_dict = json.loads(fob)
issues_parsed = parse_element(issues_dict)
output_issues(issues_parsed, outdir)
if __name__ == "__main__":
main()
| true |
1bd40c0567e1be830c061d73e6028a2e84e9f218 | Python | tiagodavi70/sentiment_models | /image_training/training_models.py | UTF-8 | 3,932 | 2.609375 | 3 | [] | no_license |
from keras import *
import keras
import keras.preprocessing.image as im
import cv2 as cv
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import model_utils as utils
import argparse
from keras.applications.imagenet_utils import preprocess_input
### Plot charts or images, wrapper for matplotlib axes and figs
def get_ax(rows=1, cols=1,figsize=(4,4), imgmode=False, returnfig=False):
fig, axes = plt.subplots(figsize=figsize, dpi = 100, nrows=rows, ncols=cols)
if imgmode:
if rows == 1 and cols == 1:
axes.clear()
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
else:
for ax in axes:
if (isinstance(ax,np.ndarray)):
for a in ax:
a.clear()
a.get_xaxis().set_visible(False)
a.get_yaxis().set_visible(False)
else:
ax.clear()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
return (fig, axes) if returnfig else axes
def datafromDir(imshape, batch_size=32):
train_datagen = im.ImageDataGenerator(
preprocessing_function=preprocess_input,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
val_datagen = im.ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(
'dataset/Training',
target_size=(imshape[0],imshape[1]), # 4h:15 pra chegar nessa solucao
batch_size=batch_size,
class_mode="categorical")
validation_generator = val_datagen.flow_from_directory(
'dataset/PublicTest',
target_size=(imshape[0],imshape[1]),
class_mode="categorical")
return {"training": train_generator, "validation": validation_generator}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--modelname', required=True, help="Model name")
# parser.add_argument('-o', '--output', required=True, help="path of the output directory")
args = parser.parse_args()
alphas = [1e-4, 1e-5, 1e-6, 1e-7]
lambdas = [1e-4, 1e-5, 1e-6]
batch_size = 64
num_classes = 6
epochs = 50
dir_path = "training_results/" + args.modelname
imshape = (48*2, 48*2, 3)
for alpha in alphas:
for lamb in lambdas:
prefix = 'FaceSentiment_'+'{:.0e}'.format(alpha)+'_decay_'+'{:.0e}'.format(lamb)
exec_path = os.path.join(dir_path, prefix)
if not os.path.isdir(exec_path):
opt = keras.optimizers.rmsprop(lr=alpha, decay=lamb)
print("####################################")
print("Starting: " + prefix)
data = datafromDir(imshape, batch_size)
# class indices
# {'angry': 0, 'fear': 1, 'happy': 2, 'neutral': 3, 'sad': 4, 'surprise': 5}
model = utils.createModel(args.modelname, imshape, num_classes)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['acc'])
hist = model.fit_generator(data["training"],
steps_per_epoch=len(data["training"]), # generator was crated with batch of 32 images
epochs=epochs,
validation_data=data["validation"],
shuffle=True, verbose=1)
scores = model.evaluate_generator(data["validation"], verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
utils.saveModel(exec_path, model, scores, hist,saveH5=False)
else:
print(prefix + ' exists')
| true |
4e8adc4f033b90345c01d279fdabf83c8d3bdce5 | Python | augustsemrau/Pandas_Scikit-Learn_Classification_Model-Optimization | /models.py | UTF-8 | 3,033 | 2.828125 | 3 | [] | no_license | """
Building predictive model on classification principles.
@AugustSemrau
"""
from data_loader import dataLoader
# SciKit-Learn
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import xgboost as xgb
from xgboost import XGBClassifier
class Models:
"""Class containing all classification models"""
# Init
def __init__(self, agenan="median"):
self.X, self.y = dataLoader(test=False, optimize_set=False, ageNAN=agenan)
self.y = self.y.values.ravel()
# Logistic Regression Model
def build_model_LR(self):
model = LogisticRegression(max_iter=1000, random_state=0)
model.fit(self.X, self.y)
return model
# Naive Bayes Model
def build_model_NB(self):
model = GaussianNB()
model.fit(self.X, self.y)
return model
# Stochastic Gradient Descent Model
def build_model_SGD(self):
model = SGDClassifier(loss='squared_loss', shuffle=True, random_state=0)
model.fit(self.X, self.y)
return model
# K-Nearest Neighbors Model
def build_model_KNN(self):
model = KNeighborsClassifier(n_neighbors=10)
model.fit(self.X, self.y)
return model
# Decision Tree Model
def build_model_DT(self):
model = DecisionTreeClassifier(random_state=0)
model.fit(self.X, self.y)
return model
# Random Forest Model, no tuning
def build_model_RF(self):
model = RandomForestClassifier(oob_score=True, random_state=0)
model.fit(self.X, self.y)
return model
# Random Forest Model, Bayesian Optimization tuned
def build_optimized_RF(self):
model = RandomForestClassifier(n_estimators=146, max_depth=20, max_features='sqrt', criterion='entropy',
oob_score=True, random_state=0)
# model = RandomForestClassifier(n_estimators=141, max_depth=10, max_features='sqrt', criterion='entropy',
# oob_score=True, random_state=0)
model.fit(self.X, self.y)
return model
# Support Vector Machine Model
def build_model_SVC(self):
model = SVC(kernel='linear', C=0.025, random_state=0)
model.fit(self.X, self.y)
return model
# Extreme Gradient Boosting Model
def build_model_XGB(self):
model = XGBClassifier(n_estimators=1000, learning_rate=0.5)
model.fit(self.X, self.y)
return model
# # Setting model parameters
# # param = {
# # 'eta': 0.3, # Learning rate
# # 'max_depth': 3,
# # 'objective': 'multi:softprob',
# # 'num_class': 3}
# # Train model
# model = xgb.sklearn.XGBClassifier(eta=0.3, max_depth=3, objective='multi:softprob', num_class=3, steps=20)
| true |
8eab802feb245be2a89c161a24ecda4ca966ef70 | Python | j-pettit/pfinance | /tests/test_functions.py | UTF-8 | 14,822 | 3.015625 | 3 | [
"MIT"
] | permissive | from pfinance import conversion, depreciation, general, securities, time_value
# Helper functions
def _compare_list_float(list1, list2, rounding_precision):
# Compares two lists of floats after rounding.
if len(list1) != len(list2):
print("Lists are different lengths.")
print("list1 length:", len(list1))
print("list2 length:", len(list2))
return False
for i in range(len(list1)):
if round(list1[i], rounding_precision) != round(list2[i], rounding_precision):
print("List index", str(i), "have different values.")
print("list1[" + str(i) + "] =", round(list1[i], rounding_precision))
print("list2[" + str(i) + "] =", round(list2[i], rounding_precision))
return False
return True
def test_compare_list_float():
list1 = [1.12, 1.22, 1.32, 1.42]
list2 = [1.12, 1.22, 1.32]
list3 = [1.123, 2.123, 3.123, 4.123]
list4 = [1.123, 2.123, 3.126, 4.123]
list5 = [2.457, 9.528, 9.65, 4.182]
list6 = [2.46, 9.53, 9.65, 4.18]
assert not(_compare_list_float(list1, list2, 1)) # Fail different lengths
assert not(_compare_list_float(list3, list4, 2)) # Fail different values
assert _compare_list_float(list5, list6, 2)
# General
def test_simple_interest():
assert general.simple_interest(100, 0, 10) == 100.00
assert general.simple_interest(0, 0.20, 5) == 0
assert general.simple_interest(100, 0.10, 10) == 200.00
def test_compound_interest():
assert general.compound_interest(100, 0, 10) == 100.00
assert round(general.compound_interest(100, 0.10, 10, 12), 2) == 270.70
def test_effective_interest():
assert general.effective_interest(0, 12) == 0
assert round(general.effective_interest(0.05, 12), 6) == 0.051162
assert round(general.effective_interest(0.0525, 4), 7) == 0.0535427
assert round(general.effective_interest(1.25, 7), 6) == 2.158576
def test_loan_payment():
assert general.loan_payment(1000, 0, 1, 10) == 100.00
assert round(general.loan_payment(100000, 0.10, 12, 60), 2) == 2124.70
assert round(general.loan_payment(150000, 0.10, 12, 60, 50000), 2) == 2124.70
def test_equivalent_interest_rate():
assert round(general.equivalent_interest_rate(10000, 11000, 96), 7) == 0.0009933
assert round(general.equivalent_interest_rate(1000, 10000, 5), 3) == 0.585
assert round(general.equivalent_interest_rate(700, 300, 12), 4) == -0.0682
def test_loan_payment_schedule():
principal_payment1 = [162.55, 164.17, 165.82, 167.47, 169.15, 170.84]
interest_payment1 = [10.0, 8.37, 6.73, 5.07, 3.40, 1.71]
remaining_balance1 = [837.45, 673.28, 507.46, 339.99, 170.84, 0.00]
principal_payment2 = [74.02, 74.39, 74.77, 75.14, 75.51, 75.89, 76.27]
interest_payment2 = [2.63, 2.26, 1.89, 1.51, 1.14, 0.76, 0.38]
remaining_balance2 = [451.98, 377.58, 302.82, 227.68, 152.16, 76.27, 0.0]
principal_payment3 = [100.0, 100.0, 100.0, 100.0, 100.0]
interest_payment3 = [0.0, 0.0, 0.0, 0.0, 0.0]
remaining_balance3 = [400.0, 300.0, 200.0, 100.0, 0.0]
assert _compare_list_float(general.loan_payment_schedule(1000, 0.12, 12, 6)['principal_payment'], principal_payment1, 2)
assert _compare_list_float(general.loan_payment_schedule(1000, 0.12, 12, 6)['interest_payment'], interest_payment1, 2)
assert _compare_list_float(general.loan_payment_schedule(1000, 0.12, 12, 6)['remaining_balance'], remaining_balance1, 2)
assert _compare_list_float(general.loan_payment_schedule(526, 0.06, 12, 7)['principal_payment'], principal_payment2, 2)
assert _compare_list_float(general.loan_payment_schedule(526, 0.06, 12, 7)['interest_payment'], interest_payment2, 2)
assert _compare_list_float(general.loan_payment_schedule(526, 0.06, 12, 7)['remaining_balance'], remaining_balance2, 2)
assert _compare_list_float(general.loan_payment_schedule(500, 0, 12, 5)['principal_payment'], principal_payment3, 2)
assert _compare_list_float(general.loan_payment_schedule(500, 0, 12, 5)['interest_payment'], interest_payment3, 2)
assert _compare_list_float(general.loan_payment_schedule(500, 0, 12, 5)['remaining_balance'], remaining_balance3, 2)
def test_number_periods_loan():
assert general.number_periods_loan(1000, 0.1, 100) == -1
assert round(general.number_periods_loan(1000, 0.1, 200), 2) == 7.27
assert round(general.number_periods_loan(7541, 0.06, 864), 2) == 12.73
def test_sum_product():
assert general.sum_product() is None
assert general.sum_product([1, 2, 3, 4], [1, 2, 3, 4, 5]) is None
assert general.sum_product([1, 2, 3, 4, 5], [1, 2, 3, 4, 5]) == 55
assert round(general.sum_product([1.2, 2, 2.6, 4], [5.2, 1.7, 8.6, 9.4], [5.5, 2.6, 4.8, 7.5]), 2) == 432.49
def test_sum_squares():
assert general.sum_squares([0]) == 0
assert general.sum_squares([5, 2, 1, 3]) == 39
assert general.sum_squares([-5, 2, -1, 3]) == 39
assert round(general.sum_squares([1.1, 2.2, 3.3]), 2) == 16.94
def test_factorial():
assert general.factorial(0) == 1
assert general.factorial(1) == 1
assert general.factorial(10) == 3628800
assert general.factorial(69) == \
171122452428141311372468338881272839092270544893520369393648040923257279754140647424000000000000000
def test_sum_diff_squares():
assert general.sum_diff_squares([], []) == 0
assert general.sum_diff_squares([5, 2, 3], [3, -1, 4]) == 12
assert general.sum_diff_squares([5, 2, 3], [2, 1, 0]) == 33
assert round(general.sum_diff_squares([1.1, 2.2, 3.3], [4.4, 5.5, 6.6]), 2) == -76.23
# Time Value
def test_future_value_series():
assert time_value.future_value_series(100, 0, 10) == 1000.00
assert round(time_value.future_value_series(100, 0.05, 10, 12), 2) == 15528.23
assert round(time_value.future_value_series(100, 0.05, 10, 12, True), 2) == 15592.93
def test_present_value():
assert time_value.present_value(100, 0, 12, 0, False) == -1200.00
assert round(time_value.present_value(500, 0.06, 48, 9000, False), 2) == -8374.00
assert round(time_value.present_value(200, 0.07, 36, 1000, True), 2) == -2877.07
def test_discounted_cash_flow():
assert time_value.discounted_cash_flow([], 0) == 0.00
assert round(time_value.discounted_cash_flow([1000, 1000, 4000, 4000, 6000], 0.05), 2) == 13306.73
def test_modified_internal_rate_of_return():
assert time_value.modified_internal_rate_of_return([], 0.1, 0.1) is None
assert time_value.modified_internal_rate_of_return([10, 10], 0.1, 0.1) is None
assert time_value.modified_internal_rate_of_return([-10, -10], 0.1, 0.1) is None
assert round(time_value.modified_internal_rate_of_return([-120000, 39000, 30000, 21000, 37000], 0.1, 0.12), 3) == 0.063
assert round(time_value.modified_internal_rate_of_return([24, -96, -52, 27, -17, 15, -2, 0, 0], 0.05, 0.07), 3) == -0.056
def test_future_value_schedule():
assert round(time_value.future_value_schedule(1000, [0.02, 0.03, 0.04, 0.05]), 2) == 1147.26
assert round(time_value.future_value_schedule(123.45, [0, 0, 0, 0, 0, 0, 0, 0, 0]), 2) == 123.45
assert round(time_value.future_value_schedule(15973, [0.02, 0.09, -0.08, 0.2]), 2) == 19605.69
# Conversion
def test_dollar_decimal():
assert round(conversion.dollar_decimal(1.2, 16), 2) == 2.25
assert round(conversion.dollar_decimal(9000.4123, 200), 4) == 9002.0615
assert round(conversion.dollar_decimal(703.238, 23), 5) == 704.03478
def test_dollar_fractional():
assert (round(conversion.dollar_fractional(1.125, 16), 2)) == 1.02
assert (round(conversion.dollar_fractional(1.125, 32), 2)) == 1.04
assert (round(conversion.dollar_fractional(738.526, 29), 5)) == 738.15254
def test_percent_to_basis():
assert conversion.percent_to_basis(0) == 0
assert conversion.percent_to_basis(0.01) == 100
assert conversion.percent_to_basis(-0.005) == -50
def test_increase_to_basis():
assert conversion.increase_to_basis(50, 50) == 0
assert round(conversion.increase_to_basis(1000, 1050), 2) == 500
assert round(conversion.increase_to_basis(200, 150), 2) == -2500
def test_basis_to_percent():
assert conversion.basis_to_percent(0) == 0
assert conversion.basis_to_percent(100) == 0.01
assert conversion.basis_to_percent(-50) == -0.005
# Depreciation
def test_straight_line_depreciation():
assert round(depreciation.straight_line_depreciation(2000, 500, 5)) == 300
assert depreciation.straight_line_depreciation(30000, 7500, 10) == 2250
def test_sum_of_years_depreciation():
asset_value1 = [1000.0, 673.33, 412.00, 216.00, 85.33, 20.00]
depreciation1 = [0.0, 326.67, 261.33, 196.00, 130.67, 65.33]
asset_value2 = [12345, 9339.00, 6762.43, 4615.29, 2897.57, 1609.29, 750.43, 321.00]
depreciation2 = [0.0, 3006.00, 2576.57, 2147.14, 1717.71, 1288.29, 858.86, 429.43]
asset_value3 = [100.0, 0.0]
depreciation3 = [0.0, 100.0]
assert _compare_list_float(depreciation.sum_of_years_depreciation(1000, 20, 5)['asset_value'], asset_value1, 2)
assert _compare_list_float(depreciation.sum_of_years_depreciation(1000, 20, 5)['periodic_depreciation'], depreciation1, 2)
assert _compare_list_float(depreciation.sum_of_years_depreciation(12345, 321, 7)['asset_value'], asset_value2, 2)
assert _compare_list_float(
depreciation.sum_of_years_depreciation(12345, 321, 7)['periodic_depreciation'],
depreciation2,
2
)
assert _compare_list_float(depreciation.sum_of_years_depreciation(100, 0, 1)['asset_value'], asset_value3, 2)
assert _compare_list_float(depreciation.sum_of_years_depreciation(100, 0, 1)['periodic_depreciation'], depreciation3, 2)
def test_double_declining_balance_depreciation():
asset_value1 = [10000.0, 6000.0, 3600.0, 2160.0, 2000.0, 2000.0]
depreciation1 = [0.0, 4000.0, 2400.0, 1440.0, 160.0, 0.0]
asset_value2 = [20000.0, 10000.0, 5000.0, 2500.0, 1250.00, 1000.0, 1000.0]
depreciation2 = [0.0, 10000.0, 5000.0, 2500.0, 1250.0, 250.0, 0.0]
asset_value3 = [100.0, 100.0, 100.0]
depreciation3 = [0.0, 0.0, 0.0]
asset_value4 = [100.0, 0.0]
depreciation4 = [0.0, 100.0]
assert _compare_list_float(
depreciation.double_declining_balance_depreciation(10000, 2000, 5)['asset_value'],
asset_value1,
2
)
assert _compare_list_float(
depreciation.double_declining_balance_depreciation(10000, 2000, 5)['periodic_depreciation'],
depreciation1,
2
)
assert _compare_list_float(
depreciation.double_declining_balance_depreciation(20000, 1000, 6, 3)['asset_value'],
asset_value2,
2
)
assert _compare_list_float(
depreciation.double_declining_balance_depreciation(20000, 1000, 6, 3)['periodic_depreciation'],
depreciation2,
2
)
assert _compare_list_float(
depreciation.double_declining_balance_depreciation(100, 200, 2)['asset_value'],
asset_value3,
2
)
assert _compare_list_float(
depreciation.double_declining_balance_depreciation(100, 200, 2)['periodic_depreciation'],
depreciation3,
2
)
assert _compare_list_float(
depreciation.double_declining_balance_depreciation(100, 0, 1)['asset_value'],
asset_value4,
2
)
assert _compare_list_float(
depreciation.double_declining_balance_depreciation(100, 0, 1)['periodic_depreciation'],
depreciation4,
2
)
def test_units_of_production_depreciation():
assert depreciation.units_of_production_depreciation(25000, 0, 100, 4) == 1000.0
assert depreciation.units_of_production_depreciation(500000, 20000, 240000, 10000) == 20000.0
def test_declining_balance():
asset_value1 = [1000.0, 681.0, 463.76, 315.82, 215.07, 146.47, 99.74]
depreciation1 = [0.0, 319.0, 217.24, 147.94, 100.75, 68.61, 46.72]
asset_value2 = [500.0, 448.12, 262.15, 153.36, 89.72, 52.48, 30.70, 21.15]
depreciation2 = [0.0, 51.88, 185.97, 108.79, 63.64, 37.23, 21.78, 9.56]
asset_value3 = [900.0, 639.08, 321.45, 161.69, 81.33, 40.91, 32.44]
depreciation3 = [0.0, 260.93, 317.62, 159.76, 80.36, 40.42, 8.47]
assert _compare_list_float(depreciation.declining_balance(1000, 100, 6)['asset_value'], asset_value1, 2)
assert _compare_list_float(depreciation.declining_balance(1000, 100, 6)['periodic_depreciation'], depreciation1, 2)
assert _compare_list_float(depreciation.declining_balance(500, 20, 6, 3)['asset_value'], asset_value2, 2)
assert _compare_list_float(depreciation.declining_balance(500, 20, 6, 3)['periodic_depreciation'], depreciation2, 2)
assert _compare_list_float(depreciation.declining_balance(900, 29, 5, 7)['asset_value'], asset_value3, 2)
assert _compare_list_float(depreciation.declining_balance(900, 29, 5, 7)['periodic_depreciation'], depreciation3, 2)
# Securities
def test_bond_coupon_rate():
assert securities.bond_coupon_rate(1000, 0) == 0.00
assert securities.bond_coupon_rate(1000, 10) == 0.01
assert securities.bond_coupon_rate(1000, 25, 5) == 0.125
def norberts_gambit():
assert securities.norberts_gambit(0, 0, 0)['base_value'] == 0
assert securities.norberts_gambit(10, 50, 45)['base_value'] == 450
assert securities.norberts_gambit(10, 50, 45)['base_gain'] == -50
assert securities.norberts_gambit(10, 10, 9, 1.1)['base_value'] == 99
assert securities.norberts_gambit(10, 10, 9, 1.1)['base_gain'] == -1
assert securities.norberts_gambit(10, 10, 9, 1.1)['converted_value'] == 90
assert round(securities.norberts_gambit(10, 10, 9, 1.1)['converted_gain'], 2) == 90.91
assert securities.norberts_gambit(20, 15, 10, 1.5, 7.5, 5)['base_value'] == 292.5
assert securities.norberts_gambit(20, 15, 10, 1.5, 7.5, 5)['base_gain'] == -15
assert securities.norberts_gambit(20, 15, 10, 1.5, 7.5, 5)['converted_value'] == 195
assert securities.norberts_gambit(20, 15, 10, 1.5, 7.5, 5)['converted_gain'] == -10
def alpha():
assert securities.alpha(0, 0) == 0
assert securities.alpha(40, 7.5) == 32.5
assert securities.alpha(4, 6.5) == -2.5
def expected_rate_of_return():
assert securities.expected_rate_of_return(0, 0, 0) == 0
assert securities.expected_rate_of_return(0.02, 1.5, 0.02) == 0.05
assert securities.expected_rate_of_return(0.10, 1.1, 0.20) == 0.32
def test_adjusted_cost_base():
test_acb = securities.adjusted_cost_base()
test_acb.buy(10, 10.00, 5.00)
assert round(test_acb.get_acb(), 2) == 10.50
assert round(test_acb.sell(5, 15.00, 5.00), 2) == 17.5
assert round(test_acb.get_acb(), 2) == 10.50
test_acb.buy(5, 20.00, 5.00)
assert round(test_acb.get_acb(), 2) == 15.75
assert round(test_acb.sell(10, 10.00, 5.00), 2) == -62.50
assert round(test_acb.get_acb(), 2) == 0.00
| true |
bcf1a981f51dde8e4cab65bf6906b8c1fe3a4d6e | Python | DexiongYung/NLPNoiseModel | /Utilities/Json.py | UTF-8 | 311 | 2.828125 | 3 | [] | no_license | from collections import OrderedDict
import json
def load_json(jsonpath: str) -> dict:
with open(jsonpath) as jsonfile:
return json.load(jsonfile, object_pairs_hook=OrderedDict)
def save_json(jsonpath: str, content):
with open(jsonpath, 'w') as jsonfile:
json.dump(content, jsonfile)
| true |
822fa085209ee6e52c9e32dcd29fc70f80302d70 | Python | ryukinix/programming-techniques-ufc | /src/Pratica_9/ex1.py | UTF-8 | 666 | 3.375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © 2017 Manoel Vilela
#
# @project: Prática 9 - Python
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
from empregados import Empregados
def main():
e1 = Empregados('Wendley', 'Silva', 8000)
e2 = Empregados('Silvio', 'Santos', 1000000)
e3 = Empregados('Bolsista', 'IC', -400)
empregados = [e1, e2, e3]
print(":: Empregados")
for e in empregados:
e.descreva()
aumento = 0.12
print(":: Aumento de salários! WOW! +{}%".format(aumento*100))
for e in empregados:
e.aumentar_salario(aumento)
e.descreva()
if __name__ == '__main__':
main()
| true |
7a46791326bac025c86fefed530bbc5fd92f166e | Python | boconlonton/python-deep-dive | /part-3/4-specialized_dictionary/exercise-3.py | UTF-8 | 1,027 | 3.265625 | 3 | [
"MIT"
] | permissive | """
Write a function that has a single argument (env name) and returns the "combined" dictionary
that merge 2 dictionaries together with the environment specific settings overriding any common settings already defined
"""
import json
from contextlib import ExitStack
from collections import ChainMap
# Declare setting files' name
SETTINGS_FILES = ['common.json', 'dev.json', 'prod.json']
# Read file
with ExitStack() as stack:
SETTINGS = {
f_name.split('.')[0]: json.load(stack.enter_context(open(f_name)))
for f_name in SETTINGS_FILES
}
# Choose which setting is the base
BASE_SETTINGS = 'common'
def chain_recursive_fred(overrided, base):
chain = ChainMap(overrided, base)
for k, v in overrided.items():
if isinstance(v, dict) and k in base:
chain[k] = chain_recursive_fred(v, base[k])
return chain
def settings_for_env(env):
result = chain_recursive_fred(SETTINGS[env], SETTINGS[BASE_SETTINGS])
return result
dev = settings_for_env('dev')
print(dev)
| true |
68c89f991398db2a153c52f21ce0687f14cea59e | Python | krishnakadiyala/PythonExercisesFromKirk | /Week2.py | UTF-8 | 1,365 | 3.578125 | 4 | [] | no_license | """f = open("new.txt")
output = f.read()
print(type(output))
print(output)
f.close()
Another way to read a file - using a context manager form
with open("new.txt") as f:
output = f.readlines()
print(type(output))
Python automatically closes the file, we don't have to explicitly close the file.
Exercise2:
my_list = ['192.124.1.2', '145.62.71.98']
print(len(my_list))
my_list.append('12.143.121.1')
my_list.extend(['121.19.90.1', '192.123.45.6'])
print(my_list[0])
my_list.pop(0)
my_list.pop(len(my_list)-1)
print(my_list)
my_list[0] = '2.2.2.2'
print(my_list)
Exercise 3:
with open("show_arp.txt") as f:
output = f.readlines()
output = output[1:]
#from pprint import pprint
#pprint(output)
output.sort()
my_entries = output[:3]
print(my_entries)
my_entries = "\n".join(my_entries)
print(my_entries)
with open("new.txt", "wt") as f:
f.write(my_entries)
Exercise 4:s
with open("show_ip_int_brief.txt") as f:
output = f.readlines()
entry = output[5].split()
ip = entry[0]
int = entry[1]
my_tuple = (int, ip)
print(my_tuple)
Exercise 5:
"""
with open("show_ip_bgp_summ.txt") as f:
output = f.read()
output = output.splitlines()
first = output[0]
last = output[-1]
asn = first.split()[-1]
peer_ip = last.split()[0]
print("Local AS number: {}".format(asn))
print("BGP peer IP address: {}".format(peer_ip))
| true |
35e8790fc68171b4f1c5098617328f62b381dc0f | Python | brianchu5/abcsmc | /particle.py | UTF-8 | 4,738 | 2.546875 | 3 | [] | no_license | import numpy as np
from operator import attrgetter
from numpy import random as rnd
from copy import deepcopy
class perturbationKernel:
def __init__(self,lower,upper):
scale = upper - lower
self.lower = -scale/2.0
self.upper = scale/2.0
def reinitialize(self,lower,upper):
scale = upper - lower
self.lower = -scale/2.0
self.upper = scale/2.0
class populations:
def __init__(self):
self.particles =[]
self.previous_particles = []
self.kernels = [perturbationKernel(0,0),perturbationKernel(0,0),perturbationKernel(0,0),perturbationKernel(0,0)]
self.weights = []
def getParameterList(self,index):
return [particle.parameters[index] for particle in self.previous_particles]
def addToPopulation(self,particle):
self.particles.append(particle)
def postprocessfromprior(self):
self.previous_particles = deepcopy(self.particles)
def postprocess(self):
self.previous_particles = deepcopy(self.particles)
self.computeKernel(self.kernels)
self.weightParticle()
self.weights = [particle.weight for particle in self.previous_particles]
self.particles = []
def computeKernel(self,kernels):
ind = 0
for kernel in self.kernels:
kernel.reinitialize(self.getMinParameter(ind),self.getMaxParameter(ind))
ind+=1
def pickParticle(self):
return np.random.choice(self.previous_particles,p=self.weights)
def weightParticle(self):
totalweight = 0
totalweight = sum((i.weight for i in self.previous_particles))
for particle in self.previous_particles:
particle.weight=particle.weight/totalweight
def assignWeight(self,cparticle):
num = 1
pweights=[]
for particle in self.previous_particles:
pweight = particle.weight
for i in range(4):
scale2 = particle.parameters[i] + self.kernels[i].upper
scale1 = particle.parameters[i] + self.kernels[i].lower
prob = pdensity(scale1,scale2,cparticle.parameters[i])
pweight = pweight * prob
pweights.append(particle.weight*pweight)
denom = sum(pweights)
cparticle.weight = num/denom
def getMaxParameter(self,index):
return max(p.parameters[index] for p in self.previous_particles)
def getMinParameter(self,index):
return min(p.parameters[index] for p in self.previous_particles)
class particle:
def __init__(self):
self.priors = [[0.0,10.0],[0.0,2.0],[50.0,150.0],[10.0,30.0]]
self.parameters = [sampleuniform(1,self.priors[0][0],self.priors[0][1]),sampleuniform(1,self.priors[1][0],self.priors[1][1]),sampleuniform(1,self.priors[2][0],self.priors[2][1]),sampleuniform(1,self.priors[3][0],self.priors[3][1])]
self.weight = 1.0/2.0
def perturbAll(self,kernel):
self.parameters = [self.perturbation(self.parameters[ind],self.priors[ind],kernel[ind]) for ind in range(4)]
def perturbation(self,parameter,prior,kernel):
lflag = parameter + kernel.lower < prior[0]
uflag = parameter + kernel.upper > prior[1]
lower = kernel.lower
upper = kernel.upper
if lflag == True:
lower = -(parameter - prior[0])
if uflag == True:
upper = prior[1] - parameter
if uflag == False and lflag == False:
delta = sampleuniform(1,lower,upper)
else:
positive = rnd.uniform(0,1) > abs(lower)/(abs(lower)+upper)
if positive == True:
delta = sampleuniform(1,0,upper)
else:
delta = sampleuniform(1,lower,0)
return parameter + delta
def pdensity(scale1,scale2,parameter):
if ((parameter>scale2) or (parameter<scale1)):
return 0.0
else:
return 1.0/float(scale2-scale1)
def sampleuniform(scale,lb,ub):
return scale*np.random.uniform(lb,ub)
def main():
pop = populations()
p1 = particle()
p2 = particle()
pop.addToPopulation(p1)
pop.addToPopulation(p2)
print pop.particles[0].parameters[0]
pop.postprocess()
print len(pop.previous_particles)
p1.perturbAll(pop.kernels)
| true |
e66380d8b529ff5ccea2e3bf3845be66b0b16a5a | Python | mrcszk/BOIKWD | /Lab 02/zad1.py | UTF-8 | 428 | 2.875 | 3 | [] | no_license | from saport.simplex.model import Model
model = Model("zad1")
x1 = model.create_variable("x1")
x2 = model.create_variable("x2")
x3 = model.create_variable("x3")
model.add_constraint(x1 + x2 + x3 <= 30)
model.add_constraint(x1 + 2*x2 + x3 >= 10)
model.add_constraint(0 * x1 + 2*x2 + x3 <= 20)
model.maximize(2*x1 + x2 + 3*x3)
print("Before solving:")
print(model)
solution = model.solve()
print("Solution: ")
print(solution)
| true |
7d38ebaf65635b01508b077c8cb302f858d17667 | Python | nikhilgk/halo-ml | /pyspark_feature_importance.py | UTF-8 | 3,119 | 2.5625 | 3 | [] | no_license | from pyspark import SparkConf, SparkContext
from sklearn.tree import DecisionTreeRegressor
from sklearn.base import copy
import sys
import pandas as pd
import numpy as np
import json
input_file = sys.argv[1]
output_file = input_file[6:-18]+'_importances.json'
def getSparkContext():
"""
Gets the Spark Context
"""
conf = (SparkConf()
.setMaster('spark://master:7077')
.setAppName("Feature Importance") # Name of App
.set('spark.akka.frameSize',"50")
.set("spark.executor.memory", "2g")) # Set 1 gig of memory
sc = SparkContext(conf = conf)
return sc
### Preprocessing Function
def preprocess (hlist_filename):
### Import data
dataframe=pd.read_csv(hlist_filename,sep=' ')
dataframe=dataframe.drop(['id(1)','desc_scale(2)','num_prog(4)','phantom(8)',\
'Unnamed: 62','Orig_halo_ID(30)','A[x](45)',\
'b_to_a(43)', 'c_to_a(500c)(49)' ,'A[z](500c)(52)' ,\
'b_to_a(500c)(48)','A[z](47)' ,'A[y](46)',\
'A[x](500c)(50)','#scale(0)','A[y](500c)(51)'\
],axis=1)
### Switch columns so shape is last as dependent variable
cols = dataframe.columns.tolist()
switch_column= cols[-1]
cols[cols.index('c_to_a(44)')]=switch_column
cols[-1]='c_to_a(44)'
dataframe=dataframe[cols]
train_data,train_labels=np.hsplit(np.array(dataframe),[-1])
train_labels=np.ravel(train_labels).astype(float,casting='unsafe')
dataframe=dataframe.drop(['c_to_a(44)'],axis=1)
return dict(train_data=train_data, train_labels=train_labels)
###Extracting good_columns
def extract(sample):
dataframe=pd.read_csv(sample,sep=' ')
dataframe=dataframe.drop(['id(1)','desc_scale(2)','num_prog(4)','phantom(8)',\
'Unnamed: 62','Orig_halo_ID(30)','A[x](45)',\
'b_to_a(43)','c_to_a(44)', 'c_to_a(500c)(49)' ,'A[z](500c)(52)' ,\
'b_to_a(500c)(48)','A[z](47)' ,'A[y](46)',\
'A[x](500c)(50)','#scale(0)','A[y](500c)(51)'\
],axis=1)
good_columns=np.array(dataframe.columns.values.tolist())
return good_columns
###Training Function
def train(data):
train_data = data['train_data']
train_labels = data['train_labels']
decision_tree=DecisionTreeRegressor().fit(train_data,train_labels)
return decision_tree
###Finishing Function
def finish(model):
attributes = model.feature_importances_
return attributes
def main():
###Loading data from sources
print 'before preprocess'
data = [preprocess(input_file)]
print 'after preprocess'
#get spark context
sc = getSparkContext()
print 'before parallelize'
###Parallelize compute
forest = sc.parallelize(data)
print 'after parallelize'
map_reduce=forest.map(train).map(finish).collect()
print 'ml training'
good_columns = extract(input_file)
print good_columns
### Calculate feature weights
feature_weights = np.ravel(map_reduce[0])
sorted_indexes=np.argsort(feature_weights)[::-1]
sorted_features=good_columns[sorted_indexes]
sorted_feature_weights=feature_weights[sorted_indexes]
zipped_vals=zip(sorted_features,sorted_feature_weights)
print zipped_vals
with open(output_file,'w') as f:
f.write(json.dumps(zipped_vals))
if __name__=='__main__':
main()
| true |
cc0a7562c80f992488a5ba869e79bb282543d9fb | Python | j32u4ukh/GrandResolution | /loss/__init__.py | UTF-8 | 11,669 | 3.03125 | 3 | [
"MIT"
] | permissive | import cv2
import numpy as np
import tensorflow as tf
import torch
from tensorflow.math import (
greater,
add,
subtract,
multiply,
divide,
square,
pow as tf_pow,
reduce_mean as tf_mean,
reduce_std as tf_std
)
from utils import (
showImage
)
from utils.math import (
log,
multiOperation
)
class PyTorchLoss:
# shape: [N, C, H, W]
@staticmethod
def ssim(x, y, is_normalized=False):
k1 = 0.01
k2 = 0.03
L = 1.0 if is_normalized else 255.0
c1 = np.power(k1 * L, 2.0)
c2 = np.power(k2 * L, 2.0)
c3 = c2 / 2.0
ux = x.mean()
uy = y.mean()
std_x = x.std()
std_y = y.std()
xy = (x - ux) * (y - uy)
std_xy = xy.mean()
l_xy = (2.0 * ux * uy + c1) / (ux ** 2.0 + uy ** 2.0 + c1)
c_xy = (2.0 * std_x * std_y + c2) / (std_x ** 2.0 + std_y ** 2.0 + c2)
s_xy = (std_xy + c3) / (std_x * std_y + c3)
ssim = l_xy * c_xy * s_xy
ssim = torch.clamp(ssim, -1.0, 1.0)
return ssim
@staticmethod
def ssim3(x, y, is_normalized=True):
xr, xg, xb = torch.split(x, 1, dim=0)
yr, yg, yb = torch.split(y, 1, dim=0)
r = PyTorchLoss.ssim(xr, yr, is_normalized)
g = PyTorchLoss.ssim(xg, yg, is_normalized)
b = PyTorchLoss.ssim(xb, yb, is_normalized)
result = (r + g + b) / 3.0
return result
@staticmethod
def ssim4(x, y, is_normalized=True):
ssim4_loss = 0
n_image = 0
for x_image, y_image in zip(x, y):
ssim4_loss += PyTorchLoss.ssim3(x_image, y_image, is_normalized)
n_image += 1
ssim4_loss /= n_image
return ssim4_loss
# psnr: 其值不能很好地反映人眼主觀感受
def psnr(y_label, y_pred):
"""
PSNR is Peek Signal to Noise Ratio, which is similar to mean squared error.
It can be calculated as
PSNR = 20 * log10(MAXp) - 10 * log10(MSE)
When providing an unscaled input, MAXp = 255. Therefore 20 * log10(255)== 48.1308036087.
However, since we are scaling our input, MAXp = 1. Therefore 20 * log10(1) = 0.
Thus we remove that component completely and only compute the remaining MSE component.
"""
_result = subtract(y_label, y_pred)
_result = square(_result)
_result = tf_mean(_result)
_result = multiply(-10., log(_result, 10.))
return _result
def ssim(x, y, is_normalized=False):
k1 = 0.01
k2 = 0.03
L = 1.0 if is_normalized else 255.0
c1 = np.power(k1 * L, 2)
c2 = np.power(k2 * L, 2)
c3 = c2 / 2
ux = x.mean()
uy = y.mean()
std_x = x.std()
std_y = y.std()
xy = (x - ux) * (y - uy)
std_xy = xy.mean()
l_xy = (2 * ux * uy + c1) / (np.power(ux, 2) + np.power(uy, 2) + c1)
c_xy = (2 * std_x * std_y + c2) / (np.power(std_x, 2) + np.power(std_y, 2) + c2)
s_xy = (std_xy + c3) / (std_x * std_y + c3)
_ssim = l_xy * c_xy * s_xy
_ssim = np.clip(_ssim, -1.0, 1.0)
return _ssim
def tf_ssim(x, y, is_normalized=False):
"""
k1 = 0.01
k2 = 0.03
L = 1.0 if is_normalized else 255.0
c1 = np.power(k1 * L, 2)
c2 = np.power(k2 * L, 2)
c3 = c2 / 2
"""
k1 = 0.01
k2 = 0.03
L = 1.0 if is_normalized else 255.0
c1 = tf_pow(multiply(k1, L), 2.0)
c2 = tf_pow(multiply(k2, L), 2.0)
c3 = divide(c2, 2.0)
# if type(x) is np.ndarray:
# x = tf.convert_to_tensor(x, dtype=tf.float32)
# if type(y) is np.ndarray:
# y = tf.convert_to_tensor(y, dtype=tf.float32)
"""
ux = x.mean()
uy = y.mean()
"""
ux = tf_mean(x)
uy = tf_mean(y)
"""
std_x = x.std()
std_y = y.std()
"""
std_x = tf_std(x)
std_y = tf_std(y)
"""
xy = (x - ux) * (y - uy)
std_xy = xy.mean()
"""
xy = multiply(subtract(x, ux), subtract(y, uy))
std_xy = tf_mean(xy)
"""
l_xy = (2 * ux * uy + c1) / (np.power(ux, 2) + np.power(uy, 2) + c1)
"""
l_son = add(multiOperation(multiply, 2.0, ux, uy), c1)
l_mom = multiOperation(add, tf_pow(ux, 2.0), tf_pow(uy, 2.0), c1)
l_xy = divide(l_son, l_mom)
"""
c_xy = (2 * std_x * std_y + c2) / (np.power(std_x, 2) + np.power(std_y, 2) + c2)
"""
c_son = add(multiOperation(multiply, 2.0, std_x, std_y), c2)
c_mom = multiOperation(add, tf_pow(std_x, 2.0), tf_pow(std_y, 2.0), c2)
c_xy = divide(c_son, c_mom)
"""
s_xy = (std_xy + c3) / (std_x * std_y + c3)
"""
s_son = add(std_xy, c3)
s_mom = add(multiply(std_x, std_y), c3)
s_xy = divide(s_son, s_mom)
one = tf.constant(1.0)
_ssim = multiOperation(multiply, l_xy, c_xy, s_xy)
_result = tf.cond(greater(_ssim, one), lambda: one, lambda: _ssim)
return _result
def ssim3(x, y, is_normalized=True):
x1, x2, x3 = np.split(x, 3, axis=2)
y1, y2, y3 = np.split(y, 3, axis=2)
s1 = ssim(x1, y1, is_normalized)
s2 = ssim(x2, y2, is_normalized)
s3 = ssim(x3, y3, is_normalized)
result = (s1 + s2 + s3) / 3.0
return result
def tf_ssim3(x, y, is_normalized=True):
[x1, x2, x3] = tf.split(x, 3, axis=2)
[y1, y2, y3] = tf.split(y, 3, axis=2)
s1 = tf_ssim(x1, y1, is_normalized)
s2 = tf_ssim(x2, y2, is_normalized)
s3 = tf_ssim(x3, y3, is_normalized)
three = tf.constant(3.0)
result = divide(multiOperation(add, s1, s2, s3), three)
return result
def tf_ssim3_(xy):
x, y = tf.split(xy, 2, axis=3)
x = tf.squeeze(x)
print("[tf_ssim3_] x.shape:", x.shape)
y = tf.squeeze(y)
return tf_ssim3(x, y, is_normalized=False)
def tf_ssim3_norm(xy):
x, y = tf.split(xy, 2, axis=3)
x = tf.squeeze(x)
print("[tf_ssim3_norm] x.shape:", x.shape)
y = tf.squeeze(y)
return tf_ssim3(x, y, is_normalized=True)
def ssim4(x, y, is_normalized=False):
each_loss = []
for _x, _y in zip(x, y):
each_loss.append(ssim3(_x, _y, is_normalized))
each_loss = np.array(each_loss)
total_loss = each_loss.mean()
return total_loss, each_loss
def tf_ssim4(x, y, is_normalized=False):
print("x.shape: ", x.shape)
print("y.shape: ", y.shape)
stack = tf.stack([x, y], axis=4)
print("stack.shape: ", stack.shape)
# tf.map_fn: 接受參數為一個的函數,如 tf_ssim3_norm 等
# 第二個參數為前一個函式的參數,若要傳遞多個參數,則須將他們合併成一個(元組不知是否可以)
if is_normalized:
each_loss = tf.map_fn(tf_ssim3_norm, stack)
else:
each_loss = tf.map_fn(tf_ssim3_, stack)
total_loss = tf_mean(each_loss)
return total_loss, each_loss
if __name__ == "__main__":
def ssimTest(img_x, img_y):
x = tf.placeholder(dtype=tf.float32,
shape=[None, None, None, 3])
y = tf.placeholder(dtype=tf.float32,
shape=[None, None, None, 3])
compute_ssim = tf_ssim4(x, y, True)
# ssim(self.labels, self.pred, is_normalized=True)
# labels[i].shape = (None, label_size, label_size, c_dim)
# pred: (?, 32, 32, self.c_dim)
tf_config = tf.ConfigProto(log_device_placement=True)
tf_config.gpu_options.per_process_gpu_memory_fraction = 0.1
with tf.Session(config=tf_config) as sess:
# ssim_value = sess.run(compute_ssim,
# feed_dict={x: img_x,
# y: img_y})
# print("result:", ssim_value)
total_loss, each_loss = sess.run(compute_ssim,
feed_dict={x: img_x,
y: img_y})
print("total_loss:", total_loss) # total_loss: 0.9782068
print("each_loss:", each_loss) # each_loss: [0.9740695 0.97524875 0.98075956 0.98274946]
# ================================================================================
img1 = cv2.imread("data/splice1.png")
img2 = cv2.imread("data/splice2.png")
img3 = cv2.imread("data/splice3.png")
img4 = cv2.imread("data/splice4.png")
images = [img1, img2, img3, img4]
showImage(img1, img2, img3, img4)
dsts = []
for i in range(len(images)):
img = images[i] / 255.0
# INTER_CUBIC = cv2.resize(_img, (_resize_cols, _resize_rows), interpolation=cv2.INTER_CUBIC)
images[i] = cv2.resize(img, (256, 256), interpolation=cv2.INTER_CUBIC)
dst = cv2.GaussianBlur(images[i].copy(), (5, 5), 0)
dsts.append(dst)
images = np.array(images)
dsts = np.array(dsts)
ssimTest(images, dsts)
# ssim loss: 0.9724881069549962
loss1 = ssim(images[0], dsts[0], is_normalized=True)
loss2 = ssim(images[1], dsts[1], is_normalized=True)
loss3 = ssim(images[2], dsts[2], is_normalized=True)
loss4 = ssim(images[3], dsts[3], is_normalized=True)
print("ssim loss1:", loss1) # ssim loss1: 0.9748280551931702
print("ssim loss2:", loss2) # ssim loss2: 0.9754028104752155
print("ssim loss3:", loss3) # ssim loss3: 0.9837840687997413
print("ssim loss4:", loss4) # ssim loss4: 0.9857441547142608
# ssim total loss: 0.979939772295597
print("ssim total loss:", np.mean([loss1, loss2, loss3, loss4]))
pt_images = torch.from_numpy(images)
pt_images = pt_images.permute(0, 3, 1, 2) # pt_images.shape = torch.Size([4, 3, 256, 256])
pt_dsts = torch.from_numpy(dsts)
pt_dsts = pt_dsts.permute(0, 3, 1, 2) # pt_dsts.shape = torch.Size([4, 3, 256, 256])
pt_image1 = pt_images[0]
print("pt_image1.shape:", pt_image1.shape) # torch.Size([3, 256, 256])
result = torch.split(pt_image1, 1, dim=0)
pt_loss1 = PyTorchLoss.ssim(pt_images[0], pt_dsts[0], is_normalized=True)
print("pt_loss1:", pt_loss1) # pt_loss1: tensor(0.9748, dtype=torch.float64)
pt_loss2 = PyTorchLoss.ssim(pt_images[1], pt_dsts[1], is_normalized=True)
print("pt_loss2:", pt_loss2) # pt_loss2: tensor(0.9754, dtype=torch.float64)
pt_loss3 = PyTorchLoss.ssim(pt_images[2], pt_dsts[2], is_normalized=True)
print("pt_loss3:", pt_loss3) # pt_loss3: tensor(0.9838, dtype=torch.float64)
pt_loss4 = PyTorchLoss.ssim(pt_images[3], pt_dsts[3], is_normalized=True)
print("pt_loss4:", pt_loss4) # pt_loss4: tensor(0.9857, dtype=torch.float64)
# pt_total_loss: tensor(0.9851, dtype=torch.float64)
pt_total_loss = torch.mean(torch.tensor([pt_loss1, pt_loss2, pt_loss3, pt_loss4]))
pt_loss = PyTorchLoss.ssim(torch.from_numpy(images), torch.from_numpy(dsts), is_normalized=True)
print("pt_loss:", pt_loss) # pt_loss: tensor(0.9851, dtype=torch.float64)
"""
上面會將圖片的三個通道"共同"計算 ssim 值
下面則將圖片的三個通道"分別"計算 ssim 值
"""
pt_ssim3_loss = 0
for img, dst in zip(pt_images, pt_dsts):
pt_ssim3_loss += PyTorchLoss.ssim3(img, dst, is_normalized=True)
pt_ssim3_loss /= pt_images.shape[0]
# pt_ssim3_loss: 0.9782
print("pt_ssim3_loss:", pt_ssim3_loss)
ssim3_loss = 0
for img, dst in zip(images, dsts):
ssim3_loss += ssim3(img, dst, is_normalized=True)
ssim3_loss /= len(images)
# ssim3_loss: 0.9782067609078682
print("ssim3_loss:", ssim3_loss)
# ssim4: (0.9782067609078682, array([0.97406934, 0.97524874, 0.9807595 , 0.98274946]))
print("ssim4:", ssim4(images, dsts, is_normalized=True))
# PyTorch.ssim4: tensor(0.9782, dtype=torch.float64)
pt_dsts = pt_dsts.requires_grad_(True)
ssim4_loss = PyTorchLoss.ssim4(pt_images, pt_dsts, is_normalized=True)
print("PyTorch.ssim4:", ssim4_loss)
| true |
19e93a469db2c48e6bd137aa75c7a7259b7885f5 | Python | LLisowskaya/Echo_server | /server.py | UTF-8 | 1,234 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import socket
# Set logging parameters.
log_format = '%(asctime)s %(name)s: %(levelname)s: %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
log_name = "TCP-Server"
logging.basicConfig(
format=log_format, level=logging.INFO, datefmt=date_format)
# Create logging instance.
logger = logging.getLogger(log_name)
logger.info("Server is started.")
# Create IP/TCP socket.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind server to 9090 port on all addresses.
sock.bind(('', 9090))
logger.info("Listening on 9090 port...")
# Run listening with 1 connection in the thread.
sock.listen(1)
# Get created socket and an address of a client.
while True:
conn, addr = sock.accept()
if not conn:
continue
logger.info(f"Client {addr[0]}:{addr[1]} is connected.")
while True:
data = conn.recv(1024)
logger.info(f"Receive 1024 bytes from client {addr[0]}:{addr[1]}.")
if not data:
logger.info(f"Client {addr[0]}:{addr[1]} is disconnected.")
break
conn.send(data.upper())
logger.info(f"Send data to client {addr[0]}:{addr[1]}.")
conn.close()
logger.info("Server is stopped.") | true |
ac4fdf645616b90ef3784787c828842c68fa3bff | Python | Johnson-xie/jeetcode | /contest/199/02.py | UTF-8 | 314 | 3.171875 | 3 | [] | no_license | class Solution:
def minFlips(self, target: str) -> int:
target.lstrip('0')
if not target:
return 0
status = [i for i in target.split('0') if i]
return 2 * len(status) - 1
if __name__ == '__main__':
s = '00000'
ret = Solution().minFlips(s)
print(ret)
| true |
a217d19630e7601e7d1147207ad97ea0765351a1 | Python | at3103/Leetcode | /Add_and_Search_word.py | UTF-8 | 1,395 | 3.96875 | 4 | [] | no_license | import re
class WordDictionary(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.data = dict()
self.wlength = set()
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: void
"""
self.data[word] = 1
self.wlength.add(len(word))
def search(self, word):
"""
Returns if the word is in the data structure. A word could
contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
flag = False
if word.count('.') == 0:
return bool(self.data.get(word,0))
elif word.count('.') == len(word):
if len(word) in self.wlength:
flag = True
else:
length = len(word)
if bool(self.data.get(word,0)):
return True
l = self.data.keys()
for k in l:
if length == len(k) and bool(re.match(word,k)):
self.addWord(word)
flag = True
break
return flag
# Your WordDictionary object will be instantiated and called as such:
# wordDictionary = WordDictionary()
# wordDictionary.addWord("word")
# wordDictionary.search("pattern") | true |
72da56cf2f6d20de419062b8b5dfa144f462aece | Python | Daehyun-Bigbread/Bigbread-Python | /python_for_everyone/17A-trun.py | UTF-8 | 2,167 | 4.21875 | 4 | [] | no_license | # 터틀런 만들기1
import turtle as t
import random
te = t.Turtle() # 악당 거북이(빨간색)
te.shape("turtle")
te.color("red")
te.speed(0)
te.up()
te.goto(0, 200)
ts = t.Turtle() # 먹이(초록색 동그라미)
ts.shape("circle")
ts.color("green")
ts.speed(0)
ts.up()
ts.goto(0, -200)
def turn_right(): # 오른쪽으로 방향을 바꿉니다.
t.setheading(0)
def turn_up(): # 위로 방향을 바꿉니다.
t.setheading(90)
def turn_left(): # 왼쪽으로 방향을 바꿉니다.
t.setheading(180)
def turn_down(): # 아래로 방향을 바꿉니다.
t.setheading(270)
def play(): # 게임을 실제로 플레이하는 함수입니다.
t.forward(10) # 주인공 거북이 10만큼 앞으로 이동합니다.
ang = te.towards(t.pos())
te.setheading(ang) # 악당 거북이의 방향을 주인공 거북이를 향하도록 맞춥니다.
te.forward(9) # 악당 거북이 9만큼 앞으로 이동합니다.
if t.distance(ts) < 12: # 주인공과 먹이와의 거리가 12보다 작을 때(가깝게 있으면)
star_x = random.randint(-230, 230)
star_y = random.randint(-230, 230)
ts.goto(star_x, star_y) # 먹이를 다른 곳으로 옮깁니다.
if t.distance(te) >= 12: # 주인공과 악당의 거리가 12이상이면 (멀리 있으면)
t.ontimer(play, 100) # 0.1초후 play 함수를 실행합니다(게임을 계속 합니다).
t.setup(500, 500)
t.bgcolor("orange")
t.shape("turtle") # ‘거북이 모양’의 커서를 사용합니다.
t.speed(0) # 거북이 속도를 가장 빠르게로 지정합니다.
t.up()
t.color("white")
t.onkeypress(turn_right, "Right") # [→]를 누르면 turn_right 함수를 실행하도록 합니다.
t.onkeypress(turn_up, "Up")
t.onkeypress(turn_left, "Left")
t.onkeypress(turn_down, "Down")
t.listen() # 거북이 그래픽 창이 키보드 입력을 받도록 합니다.
play() # play 함수를 호출해서 게임을 시작합니다.
| true |
2e140c1749176e13deb85868d4f9dff1e61eaf9e | Python | bihutchins/word2vec_pipeline | /pipeline_src/__main__.py | UTF-8 | 2,032 | 2.546875 | 3 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | #! /usr/bin/env python
"""
Usage:
word2vec_pipeline import_data
word2vec_pipeline phrase
word2vec_pipeline parse
word2vec_pipeline embed
word2vec_pipeline score
word2vec_pipeline predict
word2vec_pipeline metacluster
word2vec_pipeline analyze
The code that is run by each command is found in the filename in current
directory that corresponds to each command. The function
BLANK_from_config(config) is the entry point for each file,
and can be found at the bottom of each file.
"""
from docopt import docopt
import simple_config
import logging
import sys
_python_version = sys.version_info
if _python_version < (3,):
raise ValueError(
"Pipeline now requires python 3, you have", _python_version
)
logging.basicConfig(level=logging.INFO)
def main():
args = docopt(__doc__)
config = simple_config.load()
if args["import_data"]:
from import_data import import_data_from_config
import_data_from_config(config)
elif args["phrase"]:
from phrase import phrases_from_config
phrases_from_config(config)
if args["parse"]:
from parse import parse_from_config
parse_from_config(config)
if args["embed"]:
from embed import embed_from_config
embed_from_config(config)
if args["score"]:
from score import score_from_config
score_from_config(config)
if args["predict"]:
from predict import predict_from_config
predict_from_config(config)
if args["metacluster"]:
from metacluster import metacluster_from_config
metacluster_from_config(config)
if args["analyze"]:
import postprocessing.analyze_metaclusters as pam
pam.analyze_metacluster_from_config(config)
# elif func == "LIME":
# import postprocessing.lime_explainer as le
# le.explain_metaclusters(config)
# else:
# raise KeyError("Analyze Function {} not known".format(func))
if __name__ == "__main__":
main()
| true |
77e14c64d71ec550082b1b264b2005c3007fa2cb | Python | josue9912/Repositorios_Empresa | /Archivo.py | UTF-8 | 1,000 | 2.828125 | 3 | [] | no_license | import os
# coding: utf-8
import shutil
thisdir = "/Users/josuesantanagalvan/Desktop/Carpeta/Carpeta1/Carpeta2"
for r, d, f in os.walk(thisdir): #Creo las carpetas
if 'CarpetaFinal' in r:
try:
r1 = os.path.join(r, 'main')
os.mkdir(r1)
r2 = os.path.join(r, 'main','V01')
os.mkdir(r2)
except FileExistsError:
break
print("Ya existen las carpetas main y V01")
for r, d, f in os.walk(thisdir): #MODIFICO EL NOMBRE DE LOS ARCHIVOS
for file in f:
if file.endswith(".py"):
ruta_antigua = os.path.join(r,file)
nuevo_fichero = file.replace('jercicio','1')
ruta_nueva = os.path.join(r,nuevo_fichero)
os.rename(ruta_antigua,ruta_nueva)
r_destino = os.path.join(r,"main","V01")
try:
shutil.move(ruta_nueva,r_destino)
except:
print("Error ")
| true |
21e56e635432d33bb9c55a4449e4c0730937b6b0 | Python | microsoft/SDNet | /Utils/GeneralUtils.py | UTF-8 | 3,127 | 2.65625 | 3 | [
"MIT",
"Apache-2.0",
"LGPL-2.1-or-later"
] | permissive | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
import re
from Utils.Constants import *
import spacy
import torch
import torch.nn.functional as F
import unicodedata
import sys
from torch.autograd import Variable
nlp = spacy.load('en', parser = False)
# normalize sentence
def normalize_text(text):
return unicodedata.normalize('NFD', text)
def space_extend( matchobj):
return ' ' + matchobj.group(0) + ' '
# get rid of punctuation stuff and stripping
def pre_proc(text):
text = re.sub(u'-|\u2010|\u2011|\u2012|\u2013|\u2014|\u2015|%|\[|\]|:|\(|\)|/|\t', space_extend, text)
text = text.strip(' \n')
text = re.sub('\s+', ' ', text)
return text
# get a set of vocabulary
def load_glove_vocab(file, wv_dim, to_lower = True):
glove_vocab = set()
print('Loading glove vocabulary from ' + file)
lineCnt = 0
with open(file, encoding = 'utf-8') as f:
for line in f:
# delete!!!
#if lineCnt == 20000:
# print('delete!')
# break
lineCnt = lineCnt + 1
if lineCnt % 100000 == 0:
print('.', end = '',flush=True)
elems = line.split()
token = normalize_text(''.join(elems[0:-wv_dim]))
if to_lower:
token = token.lower()
glove_vocab.add(token)
print('\n')
print('%d words loaded from Glove\n' % len(glove_vocab))
return glove_vocab
def token2id(docs, vocab, unk_id=None):
w2id = {w: i for i, w in enumerate(vocab)}
ids = [[w2id[w] if w in w2id else unk_id for w in doc] for doc in docs]
return ids
def char2id(docs, char_vocab, unk_id=None):
c2id = {c: i for i, c in enumerate(char_vocab)}
ids = [[[c2id["<STA>"]] + [c2id[c] if c in c2id else unk_id for c in w] + [c2id["<END>"]] for w in doc] for doc in docs]
return ids
def removeInvalidChar(sentence):
ordId = list(sentence.encode('utf-8', errors='ignore'))
ordId = [x for x in ordId if x >= 0 and x < 256]
return ''.join([chr(x) for x in ordId])
def makeVariable(x, use_cuda):
if use_cuda:
x = x.pin_memory()
return Variable(x.cuda(async = True), requires_grad = False)
else:
return Variable(x, requires_grad = False)
'''
Input:
nlp is an instance of spacy
sentence is a string
Output:
A list of tokens, entity and POS tags
'''
def spacyTokenize(sentence, vocab_ent=None, vocab_tag=None):
sentence = sentence.lower()
sentence = pre_proc(sentence)
raw_tokens = nlp(sentence)
tokens = [normalize_text(token.text) for token in raw_tokens if not token.is_punct | token.is_space]
ent = None
if vocab_ent is not None:
ent = [token2id(token.ent_type_, vocab_ent) + 1 for token in raw_tokens if not token.is_punct | token.is_space]
tag = None
if vocab_tag is not None:
tag = [token2id(token.tag_, vocab_tag) + 1 for token in raw_tokens if not token.is_punct | token.is_space]
return tokens, ent, tag
| true |
48c16b77fe36d451e5514a23e7c4b50d7dcabb37 | Python | GlorianY/My-Coding-Puzzles-Solutions | /Hackerrank/count_triplets.py | UTF-8 | 1,020 | 3.265625 | 3 | [] | no_license | from collections import Counter
def countTriplets(arr, r):
r2 = Counter()
r3 = Counter()
count = 0
for v in arr:
if v in r3:
# get the count from the r2, and increment r2 using that count
# use the count from r3 (r3 stores the last value of a complete triplet)
# so, if our v is 1,5,5,25, then we can get the count from both 5
# as in 1,5 (from the first 5),25 and 1,5 (from the second 5),25
count += r3[v]
if v in r2:
# increment the count of the "waiting" last r (triplet)
# use the count from r2 (r2 stores the second value of a complete triplet)
# e.g. if the v is 5 (in which we found in r2 where the value comes from the previous values 1 * 5) and r is 5, then the next value that we should find is 25 (from 5*5)
r3[v*r] += r2[v]
# increment the count of the "waiting" r
# e.g. if the v is 1 and r is 5, then the r2 will be 5
r2[v*r] += 1
return count
| true |
7a007d3edf34cc8ca8ee76c167d6902fdeba5696 | Python | Dinesh-Sivanandam/Data-Structures | /Queue/printing the binary numbers.py | UTF-8 | 1,867 | 4.5 | 4 | [] | no_license | #importing the deque module from collections
from collections import deque
#creating the class for queue
class Queue:
#constructor which automatically executes when the object is created
#it creates the queue
def __init__(self):
self.buffer = deque()
#function for enqueue
#it adds the value to the left of the queue
def enqueue(self, val):
self.buffer.appendleft(val)
#function for dequeue
#it removes or pops the last element that is forst entered element to the queue and returns it
def dequeue(self):
return self.buffer.pop()
#function to check the queue is empty
#if the self.buffer is empty it returns true
def is_empty(self):
return len(self.buffer)==0
#function to return the size if the queue by the len method
def size(self):
return len(self.buffer)
#function for returning the front element
def front(self):
return self.buffer[-1]
#function for producing the binary values
def produce_binary_numbers(n):
#creating the queue for storing values
if n == 0:
print("0")
numbers_queue = Queue()
#appending '1' by enqueue method
numbers_queue.enqueue("1")
#iterating till the size reached
for i in range(n):
#getting the front value of the queue
front = numbers_queue.front()
#printing the front value
print(" ", front)
#after printing appending the values +0 and +1 in the queue
numbers_queue.enqueue(front + "0")
numbers_queue.enqueue(front + "1")
#removing the printed element from the queue
numbers_queue.dequeue()
#starting the main
if __name__ == '__main__':
#generating the binary numbers for specified value
produce_binary_numbers(5) | true |
755fcbe0d3d60aed527b3ec955fbea27d1d77783 | Python | izxle/FuelCellCatalystAnalysis | /fccalib/electrode.py | UTF-8 | 6,265 | 2.8125 | 3 | [] | no_license | from numbers import Real
from numpy import pi
class Area(object):
# TODO: maybe inherit d dict
_format = '6.3f'
def __init__(self, geom=None, CO=None, H=None, CV=None):
self.geom = geom
self.CO = CO
self.H = H
self.CV = CV
def big(self):
# TODO: mejorar
stuff = self.vars()
stuff['default'] = 1
del stuff['geom']
for k, v in self.vars().items():
if not v:
del stuff[k]
maxV = max(stuff.values())
return maxV # if maxV else self.geom
def update(self, **stuff):
vars(self).update(**stuff)
def get(self, key):
return vars(self).get(key)
def values(self):
for v in list(vars(self).values()): yield v
def keys(self):
for k in list(vars(self).values()): yield k
def __iter__(self):
for k in list(vars(self).keys()): yield k
def __getitem__(self, key):
return vars(self)[key]
def vars(self):
return dict(vars(self))
def __str__(self):
txt_CO = f'{self.CO:{self._format}} cm^2' if self.CO is not None else ' None'
txt_H = f'{self.H:{self._format}} cm^2' if self.H is not None else ' None'
txt_CV = f'{self.CV:{self._format}} cm^2' if self.CV is not None else ' None'
text = f'''
Area:
geom: {self.geom:{self._format}} cm^2
CO: {txt_CO}
CO-H: {txt_H}
CV-H: {txt_CV}
'''
return text
def __format__(self, format_spec):
return f'{str(self):{format_spec}}'
# ..
class Catalyst(object):
def __init__(self, name: str, mass: Real,
active_center_name: str, active_center_percentage: Real,
support_name: str = '', support_mass: Real = None):
self.ecsa = None
self.name = name
self.mass = mass
active_center_mass = mass * active_center_percentage / 100
self.active_center = ActiveCenter(mass=active_center_mass,
name=active_center_name,
percentage=active_center_percentage)
self.support = Support(support_name, support_mass)
def copy(self, scale: Real = 1):
sample_catalyst_mass = self.mass * scale
sample_support_mass = self.support.mass * scale if self.support.mass is not None else None
catalyst = Catalyst(name=self.name, mass=sample_catalyst_mass,
active_center_name=self.active_center.name,
active_center_percentage=self.active_center.percentage,
support_name=self.support.name,
support_mass=sample_support_mass)
return catalyst
def set_ecsa(self, area_real):
# m^2 / g
self.ecsa = area_real * 1e2 / self.active_center.mass
@property
def ecsa_str(self):
if self.ecsa is not None:
txt = f'{self.ecsa:7.3f} m^2 / g_{self.active_center.name}'
else:
txt = 'None'
return txt
def __str__(self):
text = (f'{self.name:14} {self.mass:5.1f} ug\n'
f'{self.active_center}\n'
f'ECSA = {self.ecsa_str}')
return text
def __format__(self, format_spec):
return f'{str(self):{format_spec}}'
# ..
class ActiveCenter(object):
def __init__(self, mass: Real, name: str = '', percentage: Real = 100):
self.name = name
self.mass = mass
if percentage == 100:
# TODO: implement log
print('Warning: using an active center percentage of 100%')
self.percentage = percentage
def __str__(self):
name = self.name if self.name else 'Active center'
return f'{name:13} {self.mass:5.1f} ug'
def __format__(self, format_spec):
return f'{str(self):{format_spec}}'
# ..
class Support(object):
def __init__(self, name: str = 'Support', mass: Real = 0, percentage: Real = None):
self.name = name
self.mass = mass
self.percentage = percentage
def __format__(self, format_spec):
return f'{str(self):{format_spec}}'
# ..
class Solvent(object):
def __init__(self, volume: Real, name: str = 'Support'):
self.name = name
if not isinstance(volume, Real):
raise ValueError(f'volume: got {type(volume)} expected <Real>')
self.volume = volume
def __format__(self, format_spec):
return f'{str(self):{format_spec}}'
# ..
class Ink(object):
def __init__(self, catalyst: Catalyst, solvent: Solvent):
self.solvent = solvent
self.catalyst = catalyst
# TODO: check if aliases are useful
# aliases
self.catalyst_mass = catalyst.mass
self.active_center_name = catalyst.active_center.name
self.active_center_mass = catalyst.active_center.mass
self.active_center_percentage = catalyst.active_center.percentage
self.support_name = catalyst.support.name
self.support_mass = catalyst.support.mass
def sample(self, volume: Real = 0) -> Catalyst:
"""
Returns a Catalyst object with mass corresponding to the provided sample volume
:param volume: in uL
:return Catalyst: with mass in ug
"""
scale = volume / self.solvent.volume
catalyst = self.catalyst.copy(scale=scale)
return catalyst
# ..
class Electrode(object):
def __init__(self, catalyst: Catalyst, area: Real = None, diameter: Real = None):
if area:
self.area = Area(float(area))
self.diameter = (area * 4. / pi) ** 0.5
elif diameter:
self.diameter = float(diameter)
self.area = Area((pi * diameter ** 2.) / 4.)
else:
raise ValueError("Missing area or diameter of the electrode.")
# catalyst with mass in ug
self.catalyst = catalyst
catalyst_load = catalyst.active_center.mass / self.area.geom # ug/cm2
self.catalyst_load = catalyst_load
# default activities
# TODO: create class for catalytic activities
def __str__(self):
text = (f'{self.area}\n'
f'{self.catalyst}')
return text
| true |
b5dba3f09768297ca0c34ea0ad1af441d392e49f | Python | indirap/state-of-states | /src/join_with_wb_data.py | UTF-8 | 5,369 | 3.25 | 3 | [] | no_license | '''
join_with_wb_data.py
Combines
* datascraped from wikipedia
* data obtained from the world bank
* country codes used for the d3 map.
Takes as input
1: World Bank CSV
2: Feature TSV file from Wikipedia
3: ISO file from D3 Map
'''
import os
import csv
import sys
import os
from types import *
def clean_filename(name):
'''Given a filename ending with .csv, returns the basename minus .csv'''
name = os.path.splitext(name)[0]
name = name.replace("_"," ")
name = name.replace("data","")
return name
def transform_name(name):
'''
Called on all 'name' values in all data sets,
transforms the value into a standardized one,
not found in any one of the datsets
'''
if name.endswith(".csv"):
name = clean_filename(name)
#Sub in countries
if name == "Kyrgyz Republic":
name = "Kyrgyzstan"
elif name == "Congo, Dem. Rep.":
name = "Democratic Republic of the Congo"
elif name == "Congo, Rep.":
name = "Republic of the Congo"
elif name == "Russian Federation":
name = "Russia"
elif name == "Myanmar":
name = "Burma"
elif name == "Kingdom of the Netherlands":
name = "Netherlands"
elif name == "Iran, Islamic Rep.":
name = "Iran"
elif name == "Syrian Arab Republic":
name = "Syria"
elif name == "Yemen, Rep.":
name = "Yemen"
elif name == "Egypt, Arab Rep.":
name = "Egypt"
elif name =="Lao PDR":
name = "Laos"
elif name =="Republic of Ireland":
name = "Ireland"
elif name =="Venezuela, RB":
name = "Venezuela"
elif 'Bahamas' in name:
name = 'Bahamas'
elif name == 'Cabo Verde':
name = 'Cape Verde'
elif 'Brunei' in name:
name = 'Brunei'
elif name == 'Timor-Leste':
name = 'East Timor'
elif 'Micronesia' in name:
name = 'Micronesia'
elif 'Georgia' in name:
name = 'Georgia'
elif name == "Cote d'Ivoire":
name = 'Ivory Coast'
elif 'Kosovo' in name:
name = 'Kosovo'
elif name == 'Korea, Dem. Rep.':
name = 'North Korea'
elif 'Ireland' in name:
name = 'Ireland'
elif name == 'Macedonia, FYR':
name = 'Macedonia'
elif name == 'S%C3%A3o Tom%C3%A9 and Pr%C3%ADncipe':
name = 'Sao Tome and Principe'
elif 'Kitts and Nevis' in name:
name = 'Saint Kitts and Nevis'
elif name == 'St. Lucia':
name = 'Saint Lucia'
elif name == 'St. Vincent and the Grenadines':
name = 'Saint Vincent and the Grenadines'
elif name == 'Slovak Republic':
name = 'Slovakia'
elif name == 'Korea, Rep.':
name = 'South Korea'
elif 'Gambia' in name:
name = 'Gambia'
return name.strip().lower() #an important part
#Open files
#ISO file
iso_file = open(sys.argv[3],'r')
iso_reader = csv.reader(iso_file, delimiter='\t')
iso_reader.next()
iso_dict = {}
#Read iso file into dict: {transformed_name: (id, code)
for line in iso_reader:
iso_dict[transform_name(line[2])] = (line[0], line[1])
#Feature file
feature_file = open(sys.argv[2],'r')
feature_reader = csv.reader(feature_file, delimiter='\t')
feature_dict = {}
#Read data into dictionary
for country in feature_reader:
feature_dict[transform_name(country[0])] = country[1:]
#World Bank File
wb_file = open(sys.argv[1],'r')
reader = csv.reader(wb_file)
reader.next()
indicators = set() # Set of all indicators found so far
data = {}
# Get all data in form:
# {country_code: { country_indicator: rest_of_line }}
for line in reader: #for line in world bank data set
c_code = transform_name(line[0]) #Get country code
c_indicator = line[3]
indicators |= set([c_indicator]) #Add to indicator set
if not c_code in data:
data[c_code] = {}
data[c_code][c_indicator] = line
field = 58 # Per WB data file, but the column for 2013
prefix = "" #Per WB data file, just for ease of identifying output
for ind in indicators: # for each indicator
# Keeps track of num of countries with missing WB data
# If this number goes over 50 then ignore this indicator
num_empty = 0
for country_key in data.keys(): # For each country
country = data[country_key]
assert type(country) is DictType
#If no indicator for this country
if not ind in country:
num_empty +=1 #No data, increase counter, continue
continue
if country[ind][field] == "":
num_empty +=1 #Or if value is empty
continue
if num_empty < 50:
#Create output file
fd = open("res_"+prefix+"_"+ind+".tsv", 'w+')
writer = csv.writer(fd, delimiter='\t')
# Write header
writer.writerow(['id', 'code', 'country', 'indicator', 'citations', 'fsize', 'links'])
for country_key in data.keys(): #Write out each country
country = data[country_key]
tname = transform_name(country[ind][0])
if (not tname in feature_dict) or (not tname in iso_dict):
continue
#print iso_dict[tname][0]
#print iso_dict[tname][1]
#print country[ind][field]
writer.writerow([iso_dict[tname][0], iso_dict[tname][1], tname, country[ind][field]] + feature_dict[tname])
fd.close()
else:
print "Too few countries have this indicator: %s" % ind
| true |
8dd8c967a056c9fe9c25e6d8ef870b5846d4fac4 | Python | dcurto95/Energy-consumption-prediction | /src/plot.py | UTF-8 | 7,385 | 2.9375 | 3 | [] | no_license | import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split
def multiple_line_plot(x_list, y_list, labels, file_name, folder='.', title='', figsize=(20, 20)):
# plot the data
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1)
for x, y, color, label in zip(x_list, y_list, mcolors.TABLEAU_COLORS, labels):
ax.plot(x, y, color=color, label=label)
if len(x_list[0]) > 10:
indices = np.arange(0, len(x_list[0]), len(x_list[0]) // 10)
else:
indices = np.arange(0, len(x_list[0]))
ax.set_xticks(np.asarray(x_list[0])[indices].tolist())
ax.set_xticklabels(np.asarray(x_list[0])[indices].tolist(), rotation=20)
ax.set_title(title)
ax.legend()
plt.savefig(
("../logs/" + folder + "/" + file_name + ".png"))
plt.close()
def draw_history(history, folder, test_name):
# list all data in history
print(history.history.keys())
plt.figure()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='best')
plt.grid(True)
plt.savefig('../logs/' + folder + '/Loss_' + test_name + '.jpg')
plt.close('all')
def plot_consumption(df):
df.index = df["Datetime"]
fig, ax = plt.subplots(figsize=(15, 5))
plt.plot(df.index, df["PJME_MW"])
plt.title("PJM East Coast Energy consumption")
plt.ylabel("Consumed energy (MW)")
def plot_prediction(gt, pred):
plt.figure(figsize=(15, 6))
plt.plot(gt[0:1000], 'b', linewidth=2.5, linestyle="-", label='real')
plt.plot(pred[0:1000], 'r', linewidth=2.5, linestyle="-", label='prediction')
plt.legend(loc='best')
plt.show()
def line_subplot(x_list, y_list, labels, subplots, file_name, folder='.', title='', figsize=(20, 20)):
# plot the data
fig = plt.figure(figsize=figsize)
if len(x_list[0]) > 10:
indices = np.arange(0, len(x_list[0]), len(x_list[0]) // 10)
else:
indices = np.arange(0, len(x_list[0]))
for i, (x, y, color, label) in enumerate(zip(x_list, y_list, mcolors.TABLEAU_COLORS, labels)):
ax = fig.add_subplot(subplots[0], subplots[1], i + 1)
ax.plot(x, y, color=color)
ax.set_xticks(np.asarray(x_list[0])[indices].tolist())
ax.set_xticklabels(np.asarray(x_list[0])[indices].tolist())
ax.grid(True)
ax.set_title(label)
plt.suptitle(title)
plt.savefig(
("../logs/" + folder + "/" + file_name + ".png"))
plt.close()
def plot_average_consumption(dataframe, column):
plt.figure(figsize=(15, 6))
plt.show()
def consumption_distribution_quarter(df):
Q1 = df[df["Quarter"] == 1]
Q2 = df[df["Quarter"] == 2]
Q3 = df[df["Quarter"] == 3]
Q4 = df[df["Quarter"] == 4]
fig, axes = plt.subplots(2, 2, figsize=(17, 7), sharex=True, sharey=True)
sns.distplot(Q1["PJME_MW"], color="blue", ax=axes[0, 0]).set_title("Quarter 1")
sns.distplot(Q2["PJME_MW"], color="green", ax=axes[0, 1]).set_title("Quarter 2")
sns.distplot(Q3["PJME_MW"], color="orange", ax=axes[1, 0]).set_title("Quarter 3")
sns.distplot(Q4["PJME_MW"], color="brown", ax=axes[1, 1]).set_title("Quarter 4")
fig.suptitle("Energy consumption distribution by Quarter", fontsize=20)
def consumption_distribution_hour(df):
modified= df.pivot_table(index=df['Hour'],
columns='Hour',
values='PJME_MW',
aggfunc='sum')
plt.figure(figsize=(17, 7))
sns.distplot(modified["PJME_MW"])
def plot_real_and_prediction(dataframe):
gt = dataframe["RealMW"]
pred = dataframe["PredictionMW"]
gt = [v[0] for v in list(gt)]
pred = [v[0] for v in list(pred)]
plt.figure(figsize=(15, 6))
plt.plot(gt, 'b', linewidth=2.5, linestyle="-", label='real')
plt.plot(pred, 'r', linewidth=2.5, linestyle="-", label='prediction')
plt.legend(loc='best')
def plot_best_worst_day(best,worst, df):
year_best, month_best, day_best = extract_year_month_day(list(best.name))
year_worst, month_worst, day_worst = extract_year_month_day(list(worst.name))
worst_df = df.loc[(df['Year'] == year_worst) & (df['Month'] == month_worst) & (df['Day'] == day_worst)]
best_df = df[(df['Year'] == year_best) & (df['Month'] == month_best) & (df['Day'] == day_best)]
plot_real_and_prediction(worst_df)
plot_real_and_prediction(best_df)
plt.show()
def extract_year_month_day(value):
return value[0], value[1], value[2]
def plot_train_val_test(df):
df.index = df["Datetime"]
df = df.drop(columns=["Datetime"])
train_x, test_x = train_test_split(df, test_size=0.2, shuffle=False)
validation_x, test_x= train_test_split(test_x, test_size=0.5, shuffle=False)
joined = test_x \
.rename(columns={'PJME_MW': 'TEST SET'}) \
.join(validation_x.rename(columns={'PJME_MW': 'VALIDATION SET'}), how='outer')
joined = joined \
.join(train_x.rename(columns={'PJME_MW': 'TRAIN SET'}), how='outer')\
.plot(figsize=(15, 5), title='PJM East', style='.')
def plot_hourly_consumption(df):
mean_per_hour = df.groupby("Hour")["PJME_MW"].agg(["mean"])
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(mean_per_hour.index, mean_per_hour["mean"], "k--", color="blue", lw=3)
ax.fill_between(mean_per_hour.index, 0, mean_per_hour["mean"], color="blue", alpha=.3)
upper_limit = mean_per_hour["mean"].max() + mean_per_hour["mean"].max() / 20
lower_limit = mean_per_hour["mean"].min()
ax.set_xticks(np.arange(len(mean_per_hour.index)))
plt.ylim(top=upper_limit, bottom=lower_limit)
plt.grid(True)
plt.xlabel("Hour")
plt.ylabel("Mean consumption (MW)")
plt.title("Average consumption by hour")
def plot_daily_consumption(df):
mean_per_day = df.groupby("Weekday")["PJME_MW"].agg(["mean"])
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(mean_per_day.index, mean_per_day["mean"], "k--", color="blue", lw=3)
ax.fill_between(mean_per_day.index, 0, mean_per_day["mean"], color="blue", alpha=.3)
upper_limit = mean_per_day["mean"].max() + mean_per_day["mean"].max() / 20
lower_limit = mean_per_day["mean"].min()
ax.set_xticks(np.arange(len(mean_per_day.index)))
ax.set_xticklabels(["Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday", "Sunday"])
plt.ylim(top=upper_limit, bottom=lower_limit)
plt.grid(True)
plt.xlabel("Day of week")
plt.ylabel("Mean consumption (MW)")
plt.title("Average consumption by day of week")
def plot_consumption_distribution(df):
sns.distplot(df["PJME_MW"], bins=20, hist=True)
sns.distplot(df["PJME_MW"], hist=False, color="g", kde_kws={"shade": True})
plt.show()
def plot_dataset(df):
df.index = df["Datetime"]
fig, ax = plt.subplots(figsize=(15, 5))
plt.plot(df.index, df["PJME_MW"])
plt.title("PJM East Coast Energy consumption")
plt.ylabel("Consumed energy (MW)")
def data_discovery(dataframe):
consumption_distribution_quarter(dataframe)
plot_consumption_distribution(dataframe)
plot_hourly_consumption(dataframe)
plot_daily_consumption(dataframe)
plt.show()
| true |
957ed0a05f52820941b958f374e329be478c0486 | Python | kaedub/data-structures-and-algorithms | /recursion/every_other.py | UTF-8 | 534 | 3.53125 | 4 | [] | no_license | def every_other(s, i=0):
if i >= len(s):
return ''
print(s[i], end='')
return every_other(s, i+2)
def every_other2(s, i=0):
if i >= len(s):
return ''
return s[i] + every_other2(s, i+2)
print('should print every other char')
every_other("hello")
print('')
print('should print every other char')
every_other("kevin")
print('')
print('should return every other char', every_other2("hello"), every_other2("hello") == 'hlo')
print('should return every other char', every_other2("kevin"), every_other2("kevin") == 'kvn') | true |
8efe1cecf9fda2d4dd356fd80163f5aaccff5a36 | Python | Nyrt/time_skip_RL | /basic_q_agent.py | UTF-8 | 4,042 | 3.171875 | 3 | [] | no_license | import gym
import numpy as np
import pandas
import random
import matplotlib.pyplot as plt
# Adapted from https://github.com/vmayoral/basic_reinforcement_learning/blob/master/tutorial4/README.md
class Q_agent:
def __init__(self, actions, epsilon, alpha, gamma):
self.q = {} # Q-learning table, indexed with tuples
self.epsilon = epsilon
self.alpha = alpha
self.gamma = gamma
self.actions = actions
# Gets the expected reward from a given action in a given state
def get_Q(self, state, action):
return self.q.get((state, action), 0.0)
def learn(self, prev_state, prev_action, reward, state):
# Get the best possible reward expected reward for this time step
max_q = max([self.get_Q(state, action) for action in self.actions])
q_prev = self.q.get((prev_state, prev_action), None)
if q_prev is None:
self.q[(prev_state, prev_action)] = reward
else:
self.q[(prev_state, prev_action)] = q_prev + self.alpha * ((reward + self.gamma * max_q) - q_prev)
def select_action(self, state):
q = np.array([self.get_Q(state, action) for action in self.actions], dtype=np.float64)
max_q = np.max(q)
if random.random() < self.epsilon: # Explore!
min_q = np.min(q)
noise_magnitude = max(abs(min_q), abs(max_q))
q += np.random.uniform(noise_magnitude / 2.0, -noise_magnitude / 2.0, q.shape)
max_q = np.max(q)
action = 0
if np.sum(q == max_q) > 1:
action = random.choice([action for action in xrange(len(self.actions)) if q[action] == max_q])
else:
action = q.argmax()
return self.actions[action]
def build_state(features):
return int("".join(map(lambda feature: str(int(feature)), features)))
def to_bin(value, bins):
return np.digitize(x=[value], bins=bins)[0]
if __name__ == '__main__':
env = gym.make('CartPole-v0')
### Stolen ###
max_number_of_steps = 201
n_bins = 8
n_bins_angle = 10
number_of_features = env.observation_space.shape[0]
last_time_steps = np.ndarray(0)
# Number of states is huge so in order to simplify the situation
# we discretize the space to: 10 ** number_of_features
cart_position_bins = pandas.cut([-2.4, 2.4], bins=n_bins, retbins=True)[1][1:-1]
pole_angle_bins = pandas.cut([-2, 2], bins=n_bins_angle, retbins=True)[1][1:-1]
cart_velocity_bins = pandas.cut([-1, 1], bins=n_bins, retbins=True)[1][1:-1]
angle_rate_bins = pandas.cut([-3.5, 3.5], bins=n_bins_angle, retbins=True)[1][1:-1]
def make_state(observation):
cart_position, pole_angle, cart_velocity, angle_rate_of_change = observation
state = build_state([to_bin(cart_position, cart_position_bins),
to_bin(pole_angle, pole_angle_bins),
to_bin(cart_velocity, cart_velocity_bins),
to_bin(angle_rate_of_change, angle_rate_bins)])
return state
qlearn = Q_agent(actions=xrange(env.action_space.n),
alpha=0.5, gamma=0.90, epsilon=0.1)
### End stolen ###
scores = []
for i_episode in range(3000):
observation = env.reset()
state = make_state(observation)
for t in range(max_number_of_steps):
action = qlearn.select_action(state)
observation, reward, done, info = env.step(action)
next_state = make_state(observation)
if not(done):
qlearn.learn(state, action, reward, next_state)
state = next_state
else: # learn that dying is bad!
reward = -200
qlearn.learn(state, action, reward, next_state)
print "iteration %i score: %i"%(i_episode, t)
scores.append(t)
break
if done:
print("Episode finished after {} timesteps".format(t+1))
break
plt.plot(scores)
plt.show() | true |
f364c2f38dd7fcff3802db1117cd521dd02419c4 | Python | Vojtech-Sassmann/programmingProblemsAnalysis | /ASTAnalysis.py | UTF-8 | 7,702 | 2.640625 | 3 | [] | no_license | import codecs
import csv
import ast
import os
import sys
import math
from os import listdir
from os.path import isfile, join
from collections import namedtuple
from data import tasks
# searched_nodes = [
# "+", "-", "*", "/", "for", "while", "print", "%", "if", "==", "is"
# ]
searched_nodes = [
"Add", "Sub", "Mult", "Div", "For", "While", "Print", "Mod", "If", "Eq", "Is",
]
output_path = "resources/tmp/test.csv"
binary = False
# solution_number = 1
avarage_number = 5
minimal_vector_size = 2
minimal_submitted = 300
parsed_solutions_mode = True
minimal_parsable = 10
skip_print = False
class Solution:
def __init__(self, data_vector):
self.data_vector = data_vector
self.examples = []
self.count = 1
def __hash__(self):
return self.data_vector.__hash__()
def __eq__(self, other):
return self.data_vector.__eq__(other.data_vector)
def add_solution(self, solution):
self.examples.append(solution)
class HashableDict(dict):
def __hash__(self):
return hash(tuple(sorted(self.items())))
class AnalyseResults:
def __init__(self):
self.submitted = 0
self.parsable = 0
self.correct = 0
self.solutions = {}
class MyVisitor(ast.NodeVisitor):
data_vector = None
def __init__(self, data_vector):
self.data_vector = data_vector
for searched_node in searched_nodes:
data_vector[searched_node] = 0
def generic_visit(self, node):
node_type = type(node).__name__
if node_type in searched_nodes:
self.increase_data(node_type)
ast.NodeVisitor.generic_visit(self, node)
def increase_data(self, key):
if self.data_vector[key] == 0:
self.data_vector[key] = 1
else:
if not binary:
self.data_vector[key] += 1
def to_data_string(data):
result = ""
empty = True
for key, value in data.items():
if value > 0:
empty = False
result += str(value) + ";"
if empty:
return ""
result = result[:-1]
return result
def check_features(solution, data_vector):
for node in searched_nodes:
data_vector[node] = solution.count(node)
if binary:
if data_vector[node] > 0:
data_vector[node] = 1
def analyze_solution(raw_solution, results):
data_vector = {}
results.submitted += 1
solution_string = raw_solution.replace("\\n", "\n")
try:
tree = ast.parse(solution_string)
MyVisitor(data_vector).visit(tree)
results.parsable += 1
except SyntaxError:
return
result = HashableDict(data_vector)
solution = Solution(result)
if calculate_vector_size(result) >= minimal_vector_size:
if solution not in results.solutions:
solution.add_solution(raw_solution)
results.solutions[solution] = 1
else:
results.solutions[solution] += 1
for s in results.solutions:
if s.__eq__(solution):
s.add_solution(raw_solution)
def parse_code(line):
prefix_size = len(line[0])
solution = line[2][prefix_size:]
if solution.startswith("SUBMIT"):
return solution[6:]
else:
return None
def print_results(results):
print("submitted: %s" % str(results.submitted))
print("parsable: %s" % str(results.parsable))
'''"" TODO""'''
# print("correct: %s" % str(results.correct))
# print("\n-----\n")
#
number_of_printed_solutions = 5
for solution, count in sorted(results.solutions.items(), key=lambda x: x[1], reverse=True):
if len(solution.data_vector) > 0:
print(solution.data_vector, "-> ", count)
number_of_printed_solutions -= 1
if number_of_printed_solutions is 0:
break
def print_header(file_name):
print("--------------------------------------------------------------------------------")
print("----- ", file_name, " -----")
def save_results(results, file_name):
if results.submitted < minimal_submitted:
return
if results.parsable < minimal_parsable:
return
with codecs.open(output_path, 'a') as f:
counter = 0
average_data_vector = None
f.write(file_name[:-4] + ";")
sorted_solutions = sorted(results.solutions.items(), key=lambda x: x[1], reverse=True)
for solution, value in sorted_solutions:
counter += 1
data_vector = solution.data_vector
if counter == 1:
average_data_vector = data_vector
else:
for key in data_vector:
average_data_vector[key] += data_vector[key]
if counter == avarage_number:
break
for key in average_data_vector:
average_data_vector[key] /= float(counter)
first = True
for node_type in searched_nodes:
if first:
f.write(str('%.2f' % average_data_vector[node_type]))
first = False
else:
f.write(";" + str('%.2f' % average_data_vector[node_type]))
f.write("\n")
def save_solutions(results, file_name):
number_of_printed_feature_vectors = 500
number_of_printed_feature_solutions = 20
with codecs.open("resources/tmp/solutions/" + file_name, 'w', encoding="UTF-8") as f:
for solution, count in sorted(results.solutions.items(), key=lambda x: x[1], reverse=True):
if len(solution.data_vector) > 0:
print >> f, solution.data_vector, count
for example in solution.examples:
number_of_printed_feature_solutions -= 1
# print >> f, example
if number_of_printed_feature_solutions <= 0:
number_of_printed_feature_solutions = 20
break
number_of_printed_feature_vectors -= 1
if number_of_printed_feature_vectors == 0:
break
def analyze_file(file_name):
print_header(file_name)
results = AnalyseResults()
with codecs.open("resources/tasks/parsed/" + file_name, 'rb', encoding='UTF-8') as f:
reader = csv.reader(f, delimiter=';', quoting=csv.QUOTE_NONE)
previous_code = None
for line in reader:
if parsed_solutions_mode:
if len(line) is 1:
code = line[0]
if code is not None:
if previous_code != code:
analyze_solution(code, results)
previous_code = code
else:
if len(line) is 3:
code = parse_code(line)
if code is not None:
if previous_code != code:
analyze_solution(code, results)
previous_code = code
if not skip_print:
print_results(results)
save_solutions(results, file_name)
save_results(results, file_name)
def calculate_vector_size(v):
size = 0
for key in v:
size += v[key]
return size
def save_header():
"""prepare output file"""
header = "name"
with codecs.open(output_path, 'w') as f:
for node in searched_nodes:
header += ";"
header += node
# print(header, file=f)
print >> f, header
def analyze_files():
path = 'resources/tasks/parsed'
save_header()
files = [f for f in listdir(path) if isfile(join(path, f))]
for f in files:
analyze_file(f)
analyze_files()
| true |
72e8a134272d57f0692fe99ea8bb30160b8b48ce | Python | wudlike/spherical-coordinates-transform- | /sph_coor_transf.py | UTF-8 | 2,556 | 2.96875 | 3 | [] | no_license | import numpy as np
def point_change(init_lon, w_e, init_lat, s_n, r, psi):
psi = np.deg2rad(psi)
ang_c = np.pi/2-np.deg2rad(init_lat)
cos_a = np.cos(r)*np.cos(ang_c)+np.sin(r)*np.sin(ang_c)*np.cos(psi)
new_lati = np.abs(90-np.rad2deg(np.arccos(cos_a)))
ang_B = np.rad2deg(np.arcsin(np.sin(psi)*np.sin(r)/np.sqrt(1-cos_a**2)))
a_val = abs(np.arccos(cos_a))
new_longi = np.abs(
init_lon-np.rad2deg(np.arcsin(np.sin(psi)*np.sin(r)/np.sqrt(1-cos_a**2))))
# East or west longitude
if w_e is 'W':
if (init_lon + ang_B) <= 180 and (init_lon + ang_B) > 0:
new_longi = init_lon + ang_B
W_E = 'W'
elif init_lon + ang_B > 180:
new_longi = abs(init_lon + ang_B - 360)
W_E = 'E'
elif init_lon + ang_B < 0:
new_longi = abs(init_lon + ang_B)
W_E = 'E'
elif w_e is 'E':
if (init_lon - ang_B) <= 180 and (init_lon - ang_B) > 0:
new_longi = init_lon - ang_B
W_E = 'E'
elif init_lon - ang_B > 180:
new_longi = abs(init_lon - ang_B - 360)
W_E = 'E'
elif init_lon - ang_B < 0:
new_longi = abs(init_lon - ang_B)
W_E = 'W'
# South or north latitude
if (np.pi/2 - a_val) > 0:
S_N = 'N'
else:
S_N = 'S'
angle_change = np.rad2deg(
np.arccos((np.cos(ang_c)-cos_a*np.cos(r))/(np.sqrt(1-cos_a**2)*np.sin(r))))
angle_change = 180 - angle_change
return angle_change, new_longi, W_E, new_lati, S_N
if __name__ == "__main__":
# The unit of r is rad
# The moving step_size should be less than 90*pi/180
'''
# input from terminal
init_longitude, w_e_input, init_latitude, s_n_input, move_step_size, init_included_angle = \
map(str,input('Enter initial_longitude, latitude, moving step_size and angular separation,\n, e.g., 60 E 30 N 1 120 >>> ').split())
init_longitude = float(init_longitude)
init_latitude = float(init_latitude)
move_step_size = float(move_step_size)
init_included_angle = float(init_included_angle)
included_angle, new_longi, w_e, new_lati, s_n = \
point_change(init_longitude, w_e_input, init_latitude, s_n_input, move_step_size, init_included_angle)
'''
#input from code
included_angle, new_longi, w_e, new_lati, s_n = point_change(
0, 'W', 0, 'N', 60*np.pi/180, 90)
print(('new angular separation = %.2f\nnew position = (%.2f' +
w_e+', %.2f'+s_n+')') % (included_angle, new_longi, new_lati))
| true |
f5fdc435cec97770a7dc562f82b26c316cce02c4 | Python | s152b/py-crawler | /PyCrawler/web_crawler/house_price/house_crawler.py | UTF-8 | 3,348 | 2.59375 | 3 | [] | no_license | # -*- coding: utf-8
## sinyi crawler, with thread
from bs4 import BeautifulSoup
from threading import Thread,Lock
import Queue
import sqlite3
import pandas as pd
import datetime,requests,ipdb
import time
def getZipCode(address):
# given chinese address return zipCode
zipCodeApi = 'http://zipcode.mosky.tw/api/find?address=' + address
req = requests.get(zipCodeApi)
return int(req.json()['result'])
def doCrawl(*args):
# task to crawl by thread agent
Que = args[0]
while Que.qsize()>0:
job=Que.get()
job.toCrawl()
# Q.join()
url='http://buy.sinyi.com.tw/cgi/search/listSearch.json'
resTest = requests.get(url)
cookie=resTest.headers['set-cookie']
headers={'Cookie':cookie}
class CrawlerJob:
def __init__(self,page,headers):
self.page = page
# self.params = {"returnParams":"NO,name,address,areaLand,areaBuilding,areaBuildingMain,areaBalcony,price,type,use,room,hall,bathroom,age,floor,lift,parking",\
self.params = {"returnParams":"NO,name,address,areaLand,areaBuilding,areaBuildingMain,areaBalcony,price,priceFirst,discount,type,use,room,hall,bathroom,openroom,roomplus,hallplus,bathroomplus,openroomplus,age,floor,staffpick,decoar,pingratesup,lift,parking,customize,keyword",\
"page":page,"limit":"30"}
self.headers = headers
self.lock = Lock()
# req = requests.post(self.url,self.params,headers=self.headers)
# self.totalpages = req.json()['OPT']['totalPage'] # totalPages
def toCrawl(self):
# scrape the page and store it into db
# print "crawling...page:%d"%self.page
global TOTALPAGE
req = requests.post(url,self.params,headers=self.headers)
try:
TOTALPAGE = req.json()['OPT']['totalPage']
print "page:%d/%d,resopnse:%s"%(self.page,TOTALPAGE,req)
except keyError:
print "key Error..."
# ipdb.set_trace()
with self.lock:
dataPage = pd.DataFrame(req.json()['OPT']['List'])
today = datetime.date.today()
dataPage["crawlDate"]=today
dataPage["source"] = u'信義房屋'
print "getting zipcode on page:%d"%self.page
zipCodeList = [getZipCode(e) for e in dataPage['address']]
zipCode = pd.Series(zipCodeList)
dataPage['zipCode']=zipCode
print "storing page %d to database..."%self.page
con = sqlite3.dbapi2.connect('house_price.db')
dataPage.to_sql("house",con,if_exists='append',index=False)
con.close()
start = datetime.datetime.now()
# queue: put the CrawleJob into que (qsize=no. of pages)
que =Queue.Queue()
# job = CrawlerJob(page=1)
totalpages = 2731
for page,_ in enumerate(range(totalpages),1):
# print page
que.put(CrawlerJob(page,headers))
print("[Info] Queue size={0}...\n".format(que.qsize()))
# open multi-thread
tds = []
noOfthreads = 10
for i in range(noOfthreads):
td = Thread(target=doCrawl,args=(que,))
# thread.daemon=True
td.start()
tds.append(td)
for td in tds:
while td.is_alive():
time.sleep(30)
break
td.join(timeout=1)
end = datetime.datetime.now()
print "[Info] Spending time={0}!".format(end-start)
| true |
ce5ad0ce2c6fa081b36a7a777091590dde1ba4ef | Python | utrade/muTradeApi2 | /SampleCode-I20_RHEL8/src/benchmarking/benchmarkStats.py | UTF-8 | 2,327 | 2.765625 | 3 | [] | no_license | import numpy as np
from os import listdir
from os.path import isfile, join
def processFile(inputFile):
#inputFile = raw_input("Enter input file:")
a = np.array([])
iFile = open(inputFile, "r")
x=iFile.readline().split(",")
maxCount = 50000
count=0
for i in x:
count +=1
if count >maxCount:
break
if i.isdigit():
a= np.append(a,int(i))
outputFile = inputFile +"_Conclusion";
oFile = open(outputFile, "w")
oFile.write("Count:"+ str(np.size(a)) + "\n")
oFile.write("Median:" + str(np.median(a)) + "\n")
oFile.write("Percentiles:\n")
oFile.write("50 Percentile:" + str(np.percentile(a, 50)) + "\n")
oFile.write("90 Percentile:" + str(np.percentile(a, 90)) + "\n")
oFile.write("95 Percentile:" + str(np.percentile(a, 95)) + "\n")
oFile.write("99 Percentile:" + str(np.percentile(a, 99)) + "\n")
oFile.write("99.9 Percentile:" + str(np.percentile(a, 99.9)) + "\n")
oFile.write("99.99 Percentile:" + str(np.percentile(a, 99.99)) + "\n")
for i in range(100):
oFile.write(str(i)+ " Percentile:" + str(np.percentile(a, i)) + "\n")
oFile.close()
def displayConclusions(fileName):
iFile = open(fileName,"r")
x = fileName.replace("LatencyNumbers_","").replace("_Conclusion","") + "== Count: " + str(int(iFile.readline().strip().split(":")[1])) + " Median: " + str(float(iFile.readline().strip().split(":")[1])/1000)
print x
return x
mypath = "."
files = [ f for f in listdir(mypath) if isfile(join(mypath,f)) ]
fileList = []
for i in files:
if "LatencyNumbers_" in i and not "_Conclusion" in i :
fileList.append(i)
for i in fileList:
processFile(i)
files = [ f for f in listdir(mypath) if isfile(join(mypath,f)) ]
fileList = []
for i in files:
if "_Conclusion" in i :
fileList.append(i)
latencyNumbers = {}
outFile = open("LatencyNumbers", "w");
internals = []
for i in fileList:
if "Internal" in i:
internals.append(i)
others = []
for i in fileList:
if not "Internal" in i:
others.append(i)
print "Internals\n=============================================\n"
for i in internals:
x = displayConclusions(i)
outFile.write(x+ "\n")
print "Others\n=============================================\n"
for i in others:
x = displayConclusions(i)
outFile.write(x+ "\n")
| true |
74e17324d438086a8de5fddaabbf2b58eb22fa31 | Python | Aasthaengg/IBMdataset | /Python_codes/p03231/s345072979.py | UTF-8 | 264 | 2.546875 | 3 | [] | no_license | import fractions
N,M=map(int,input().split())
S=input()
T=input()
n=N//fractions.gcd(N,M)
m=M//fractions.gcd(N,M)
g=fractions.gcd(N,M)
judge=True
for i in range(g):
if S[i*n]!=T[i*m]:
judge=False
if judge==True:
print(N*M//g)
else:
print(-1) | true |
eeba37421d5593dbbbd123aaa8ad627f4f3d1a65 | Python | RonenNess/Fileter | /tests/test_sources.py | UTF-8 | 6,646 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the file filters.
"""
import fileter
import unittest
class TestSources(unittest.TestCase):
"""
Unittests to test file sources.
"""
def _list_by_iter(self, source):
"""
this helper function iterate over a source and return a list with all files in it.
basically convert from source to list of files using for x in y syntax.
"""
iter_ret = []
for i in source:
iter_ret.append(i)
return iter_ret
def __fix_sep(self, values):
"""
Unify path separators to match win/linux shenanigans.
We just replace all separators to be /
"""
return [i.replace("\\", "/") for i in values]
def __test_source(self, source, expected):
"""
Test a given files source with all methods - by get_all() and by iteration.
"""
expected = self.__fix_sep(expected)
self.assertListEqual(self.__fix_sep(source.get_all()), expected)
self.assertListEqual(self.__fix_sep(self._list_by_iter(source)), expected)
def test_creating_source(self):
"""
Testing creation of a basic filter.
"""
# create a custom source
class TestSource(fileter.sources.SourceAPI):
def next(self):
yield "1"
yield "2"
raise StopIteration
_test = TestSource()
# test getting all files in source as list
self.__test_source(_test, ["1", "2"])
def test_file_source(self):
"""
Test the basic file(s) source.
"""
# basic file source with a single file
_test = fileter.sources.FileSource("test.exe")
self.__test_source(_test, ["test.exe"])
# file source with a list of files
_test = fileter.sources.FileSource(["test.exe", "foo.bar"])
self.__test_source(_test, ["test.exe", "foo.bar"])
def test_folder_source(self):
"""
Test the folder source.
Take a look at test_dir for expected values.
"""
# iterate files with a single depth level
_test = fileter.sources.FolderSource("test_dir", 0)
self.__test_source(_test, ['test_dir/0_a', 'test_dir/0_b', 'test_dir/0_c.txt'])
# iterate files with a 1-depth level
_test = fileter.sources.FolderSource("test_dir", 1)
self.__test_source(_test, ['test_dir/0_a', 'test_dir/0_b', 'test_dir/0_c.txt',
'test_dir/depth1/1_a', 'test_dir/depth1/1_b.exe',
'test_dir/foo/bar.txt'])
# iterate files without depth limit
_test = fileter.sources.FolderSource("test_dir")
self.__test_source(_test, ['test_dir/0_a', 'test_dir/0_b', 'test_dir/0_c.txt',
'test_dir/depth1/1_a', 'test_dir/depth1/1_b.exe',
'test_dir/depth1/depth2/2_a', 'test_dir/depth1/depth2/bar.txt',
'test_dir/depth1/depth2/depth3/3',
'test_dir/foo/bar.txt'])
# iterate *folders* with unlimited depth level
_test = fileter.sources.FolderSource("test_dir", ret_files=False, ret_folders=True)
self.__test_source(_test, ['test_dir', 'test_dir/depth1', 'test_dir/depth1/depth2',
'test_dir/depth1/depth2/depth3', 'test_dir/foo'])
# iterate *folders and files* with a single depth level
_test = fileter.sources.FolderSource("test_dir", depth_limit=0, ret_files=True, ret_folders=True)
self.__test_source(_test, ['test_dir', 'test_dir/0_a', 'test_dir/0_b', 'test_dir/0_c.txt'])
def test_filtered_folder_source(self):
"""
Test the filtered folders source.
Take a look at test_dir for expected values.
"""
# filter only things inside "depth1" folder.
_test = fileter.sources.FilteredFolderSource("test_dir", ".*depth1.*")
self.__test_source(_test, ['test_dir/depth1/1_a', 'test_dir/depth1/1_b.exe',
'test_dir/depth1/depth2/2_a', 'test_dir/depth1/depth2/bar.txt',
'test_dir/depth1/depth2/depth3/3'])
# filter out directories that have '3' in their name, eg "depth3"
_test = fileter.sources.FilteredFolderSource("test_dir", "(?!^.*depth3.*$).*")
self.__test_source(_test, ['test_dir/0_a', 'test_dir/0_b', 'test_dir/0_c.txt',
'test_dir/depth1/1_a', 'test_dir/depth1/1_b.exe',
'test_dir/depth1/depth2/2_a', 'test_dir/depth1/depth2/bar.txt',
'test_dir/foo/bar.txt'])
# filter only things inside "depth1" folder, but only with 1 depth level.
_test = fileter.sources.FilteredFolderSource("test_dir", ".*depth1.*", 1)
self.__test_source(_test, ['test_dir/depth1/1_a', 'test_dir/depth1/1_b.exe'])
# iterate *folders* with unlimited depth level
_test = fileter.sources.FilteredFolderSource("test_dir", ".*depth1.*", ret_files=False, ret_folders=True)
self.__test_source(_test, ['test_dir/depth1', 'test_dir/depth1/depth2', 'test_dir/depth1/depth2/depth3'])
# iterate *folders and files* with a single depth level
_test = fileter.sources.FilteredFolderSource("test_dir", ".*test_dir.*", depth_limit=0,
ret_files=True, ret_folders=True)
self.__test_source(_test, ['test_dir', 'test_dir/0_a', 'test_dir/0_b', 'test_dir/0_c.txt'])
def test_pattern_source(self):
"""
Test the pattern files source.
Take a look at test_dir for expected values.
"""
# only files inside "depth1" folder.
_test = fileter.sources.PatternSource("*depth1*", "test_dir")
self.__test_source(_test, ['test_dir/depth1/1_a', 'test_dir/depth1/1_b.exe',
'test_dir/depth1/depth2/2_a', 'test_dir/depth1/depth2/bar.txt',
'test_dir/depth1/depth2/depth3/3'])
# only files in depth 2
_test = fileter.sources.PatternSource("*/depth2/*", "test_dir")
self.__test_source(_test, ['test_dir/depth1/depth2/2_a', 'test_dir/depth1/depth2/bar.txt',
'test_dir/depth1/depth2/depth3/3'])
# filter exe file in 1 level deep
_test = fileter.sources.PatternSource("*.exe", "test_dir", 1)
self.__test_source(_test, ['test_dir/depth1/1_b.exe'])
| true |
31929518da193b45ba838f29a3663d754a981951 | Python | pactg97/Codigos_TFM | /Codigos/norm_min_cut_dendogram.py | UTF-8 | 7,338 | 3.03125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 8 13:07:58 2021
@author: 34625
"""
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
from itertools import chain, combinations
def normalized_cut(G,c):
A=nx.adjacency_matrix(G)
n=G.order()
corte=sum([sum([A[i,j] for j in range(n) if(j in c[1])]) for i in range(n) if(i in c[0])])
assoc1=sum([sum([A[i,j] for j in range(n)]) for i in range(n) if(i in c[0])])
assoc2=sum([sum([A[i,j] for j in range(n)]) for i in range(n) if(i in c[1])])
return corte/assoc1 + corte/assoc2
## A partir de nuestro grafo calculamos la matriz L y el autovector asociado al segundo autovalor más pequeño
def normalized_min_cut(graph):
"""Clusters graph nodes according to normalized minimum cut algorithm.
All nodes must have at least 1 edge. Uses zero as decision boundary.
Parameters
-----------
graph: a networkx graph to cluster
Returns
-----------
vector containing -1 or 1 for every node
References
----------
J. Shi and J. Malik, *Normalized Cuts and Image Segmentation*,
IEEE Transactions on Pattern Analysis and Machine Learning, vol. 22, pp. 888-905
"""
nodos=list(graph.nodes())
if(nx.is_connected(graph)):
m_adjacency = np.array(nx.to_numpy_matrix(graph))
D = np.diag(np.sum(m_adjacency, 0))
D_half_inv = np.diag(1.0 / np.sqrt(np.sum(m_adjacency, 0)))
M = np.dot(D_half_inv, np.dot((D - m_adjacency), D_half_inv))
(w, v) = np.linalg.eig(M)
#find index of second smallest eigenvalue
index = np.argsort(w)[1]
v_partition = v[:, index]
v_partition = np.sign(v_partition)
A=set()
B=set()
colors=[]
for i in range(len(v_partition)):
if(v_partition[i]>0):
A.add(nodos[i])
colors.append("red")
else:
B.add(nodos[i])
colors.append("green")
return [B,A],colors
else:
comunidades=list(nx.connected_components(graph))
colores=["red" for i in range(graph.order())]
return comunidades,colores
def comunidades_por_etapa(graph):
n=graph.order()
comunidades_inicial,_=normalized_min_cut(graph)
conjunto_particiones=[tuple(comunidades_inicial)]
while(len(conjunto_particiones[-1])<= n-1):
ultima_particion=conjunto_particiones[-1]
nueva_particion=[]
for comunidad in list(ultima_particion):
if(len(comunidad)==1):
nueva_particion.append(comunidad)
else:
subgrafo=graph.subgraph(list(comunidad))
biparticion,_=normalized_min_cut(subgrafo)
nueva_particion.append(biparticion[0])
nueva_particion.append(biparticion[1])
conjunto_particiones.append(tuple(nueva_particion))
return conjunto_particiones
## Ejemplo: obtenido del artículo " M.E.J. Newman, Modularity and community structure
## in networks, Proc. Natl. Acad. Sci. 103 (23) (2006) 8577–8582"
## Represento gráficamente el grafo original
G6=nx.karate_club_graph()
mapping={}
N=G6.order()
for i in range(N):
mapping[i]=i+1
G7=nx.relabel_nodes(G6,mapping)
plt.figure(figsize=(8, 6))
biparticion,colores=normalized_min_cut(G7)
nx.draw_shell(G7,with_labels=True,node_color=colores,node_size=700,font_size=18)
#plt.savefig("zachary_norm_min_cut.jpg")
communities=comunidades_por_etapa(G7)
# building initial dict of node_id to each possible subset:
node_id = 0
init_node2community_dict = {node_id: communities[0][0].union(communities[0][1])}
for comm in communities:
for subset in list(comm):
if subset not in init_node2community_dict.values():
node_id += 1
init_node2community_dict[node_id] = subset
# turning this dictionary to the desired format in @mdml's answer
node_id_to_children = {e: [] for e in init_node2community_dict.keys()}
for node_id1, node_id2 in combinations(init_node2community_dict.keys(), 2):
for node_id_parent, group in init_node2community_dict.items():
if len(init_node2community_dict[node_id1].intersection(init_node2community_dict[node_id2])) == 0 and group == init_node2community_dict[node_id1].union(init_node2community_dict[node_id2]):
node_id_to_children[node_id_parent].append(node_id1)
node_id_to_children[node_id_parent].append(node_id2)
# also recording node_labels dict for the correct label for dendrogram leaves
node_labels = dict()
for node_id, group in init_node2community_dict.items():
if len(group) == 1:
node_labels[node_id] = list(group)[0]
else:
node_labels[node_id] = ''
# also needing a subset to rank dict to later know within all k-length merges which came first
subset_rank_dict = dict()
rank = 0
for e in communities[::-1]:
for p in list(e):
if tuple(p) not in subset_rank_dict:
subset_rank_dict[tuple(sorted(p))] = rank
rank += 1
subset_rank_dict[tuple(sorted(chain.from_iterable(communities[-1])))] = rank
# my function to get a merge height so that it is unique (probably not that efficient)
def get_merge_height(sub):
sub_tuple = tuple(sorted([node_labels[i] for i in sub]))
n = len(sub_tuple)
other_same_len_merges = {k: v for k, v in subset_rank_dict.items() if len(k) == n}
min_rank, max_rank = min(other_same_len_merges.values()), max(other_same_len_merges.values())
range = (max_rank-min_rank) if max_rank > min_rank else 1
return float(len(sub)) + 0.8 * (subset_rank_dict[sub_tuple] - min_rank) / range
# finally using @mdml's magic, slightly modified:
G = nx.DiGraph(node_id_to_children)
nodes = G.nodes()
leaves = set( n for n in nodes if G.out_degree(n) == 0 )
inner_nodes = [ n for n in nodes if G.out_degree(n) > 0 ]
# Compute the size of each subtree
subtree = dict( (n, [n]) for n in leaves )
for u in inner_nodes:
children = set()
node_list = list(node_id_to_children[u])
while len(node_list) > 0:
v = node_list.pop(0)
children.add( v )
node_list += node_id_to_children[v]
subtree[u] = sorted(children & leaves)
inner_nodes.sort(key=lambda n: len(subtree[n])) # <-- order inner nodes ascending by subtree size, root is last
# Construct the linkage matrix
leaves = sorted(leaves)
index = dict( (tuple([n]), i) for i, n in enumerate(leaves) )
Z = []
k = len(leaves)
for i, n in enumerate(inner_nodes):
children = node_id_to_children[n]
x = children[0]
for y in children[1:]:
z = tuple(sorted(subtree[x] + subtree[y]))
i, j = index[tuple(sorted(subtree[x]))], index[tuple(sorted(subtree[y]))]
Z.append([i, j, get_merge_height(subtree[n]), len(z)]) # <-- float is required by the dendrogram function
index[z] = k
subtree[z] = list(z)
x = z
k += 1
# dendrogram
plt.figure(figsize=(14, 7))
dendrogram(Z,leaf_font_size=13, labels=[node_labels[node_id] for node_id in leaves])
#plt.savefig('dendrogram_norm_min_cut.jpg') | true |
f9ad3bb4a3df51167da6aa71484912750cbbfb05 | Python | LOVEDEEPKAUR5/ML1 | /app.py | UTF-8 | 1,821 | 2.796875 | 3 | [] | no_license | import streamlit as st
from PIL import Image
import pickle
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
st.set_option('deprecation.showfileUploaderEncoding', False)
# Load the pickled model
pickle_in = open("/content/drive/My Drive/decision_model.pkl","rb")
model=pickle.load(pickle_in)
dataset= pd.read_csv('/content/drive/My Drive/Classification Dataset2.csv')
X = dataset.iloc[:, [2, 3]].values
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X = sc.fit_transform(X)
def predict_note_authentication( Gender,Glucose,BP, BMI,Age,Outcome):
output= model.predict(sc.transform([[Age,Outcome]]))
print("Purchased", output)
if output==[1]:
prediction="person will have that diseases"
else:
prediction="person will not have that diseases"
print(prediction)
return prediction
def main():
st.title("Person Diseases")
html_temp = """
<div class="" style="background-color:blue;" >
<div class="clearfix">
<div class="col-md-12">
<center><p style="font-size:40px;color:white;margin-top:10px;">Person Diseases</p></center>
</div>
</div>
</div>
"""
st.markdown(html_temp,unsafe_allow_html=True)
Gender = st.text_input("Gender","Type Here")
Glucose = st.text_input("Glucose","Type Here")
BP = st.text_input("BP","Type Here")
BMI = st.text_input("BMI","Type Here")
Age = st.text_input("Age","Type Here")
Outcome = st.text_input("Outcome","Type Here")
resul=""
if st.button("Predict"):
result=predict_note_authentication( Gender,Glucose,BP, BMI,Age,Outcome)
st.success('Model has predicted {}'.format(result))
if st.button("About"):
st.text("Developed by Lovedeep kaur")
st.text("Student , Department of Computer Engineering")
if __name__=='__main__':
main()
| true |
e99075d040b2f5e8f30c5231f3315b4b5e0951cf | Python | aptend/leetcode-rua | /Python/1041 - Robot Bounded In Circle/1041_robot-bounded-in-circle.py | UTF-8 | 814 | 2.734375 | 3 | [] | no_license | from leezy import solution, Solution
class Q1041(Solution):
@solution
def isRobotBounded(self, instructions):
# 28ms 89.40%
face = [0, 1] # north
pos = [0, 0]
for ins in instructions:
if ins == 'L':
face[0], face[1] = -face[1], face[0]
elif ins == 'R':
face[0], face[1] = face[1], -face[0]
else:
pos[0] += face[0]
pos[1] += face[1]
return not (face == [0, 1] and pos != [0, 0])
def main():
q = Q1041()
q.add_case(q.case('GGLLGG').assert_equal(True))
q.add_case(q.case('GG').assert_equal(False))
q.add_case(q.case('GL').assert_equal(True))
q.add_case(q.case('GLRLLGLL').assert_equal(True))
q.run()
if __name__ == '__main__':
main()
| true |
fd182abde225313a8d149a4b9e4fd2a6cdf5eda6 | Python | intermezzio/differential-transistor-analysis | /analyze.py | UTF-8 | 3,341 | 3.140625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def plot_preconfig(ax, ftype):
"""
configure plot to loglog / semilog as necessary
"""
ax = ax if ax else plt
# log / semilog scales
if ftype == "exp":
ax.set_yscale("log")
elif ftype == "loglog":
ax.set_xscale("log")
ax.set_yscale("log")
def plot_data(xs, ys=None, f=None, ax=None, scatter=True, label="", ftype="linear", **kwargs):
if ax == None:
return
if ys is None:
ys = [f(x) for x in xs]
if scatter:
ax.scatter(xs, ys, label=label, **kwargs)
else:
ax.plot(xs, ys, label=label, **kwargs)
def plot_postconfig(ax, xlabel="", ylabel="", title=""):
"""
Set axis and title labels here
"""
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
def get_curve_fit(xs, ys, f):
# fit xs to ys via function f
popt, _ = curve_fit(f, xs, ys)
return popt
def plot_theoretical(xs, ys, ftype="linear", func=None, f_str=None, ax=None, label="", **kwargs):
# decide on the function for the curve fit
ys_test = ys
if ftype == "linear":
f = lambda x, m, b: m*x + b
f_str = "y = {} * x + {}"
elif ftype == "exp":
f = lambda x, a, b: a * x + b
ys_test = np.log(ys_test)
f_str = "ln(y) = {} * x + {}"
elif ftype == "loglog":
f = lambda x, a, b: a * np.log(x) + b
ys_test = np.log(ys_test)
f_str = "ln(y) = {} * ln(x) + {}"
else:
f = func
# do the curve fit
params = get_curve_fit(xs, ys_test, f)
if f_str:
print(f_str.format(*params))
if ftype == "linear":
pass
elif ftype == "exp":
f = lambda x, a, b: np.exp(a * x + b)
elif ftype == "loglog":
f = lambda x, a, b: np.exp(a * np.log(x) + b)
else:
pass
# plot theoretical
plot_data(xs, f=lambda x: f(x, *params), label=label, ax=ax, scatter=False, **kwargs)
return params
def plot_experimental(xs, ys, ax=None, label="", **kwargs):
kwargs["marker"] = kwargs.pop("marker", "o")
# plot experimental
plot_data(xs, ys=ys, label=label, ax=ax, **kwargs)
def model_axes(datasets, ax=None):
"""
datasets is a dictionary:
list of data
[{xs, ys, label}, ...]
ftype
pltargs
xlabel, ylabel, title, etc
"""
print(f"{datasets['name']}:\n\n")
# set log axes
plot_preconfig(ftype=datasets["ftype"], ax=ax)
# everything goes on one axis
for dataset in datasets["data"]:
dataset["kwargs"] = dataset["kwargs"] if "kwargs" in dataset.keys() else dict()
plot_experimental(xs=dataset["xs"], ys=dataset["ys"], label=dataset["label"], ax=ax, **dataset["kwargs"])
print(datasets)
all_xs = np.concatenate([dataset["xs"] for dataset in datasets["data"]])
all_ys = np.concatenate([dataset["ys"] for dataset in datasets["data"]])
plot_theoretical(xs=all_xs, ys=all_ys, ax=ax, func=datasets["func"] if "func" in datasets.keys() else None,
ftype=datasets["ftype"], label=datasets["name"],
f_str=datasets["f_str"] if "f_str" in datasets.keys() else None,
**datasets["pltargs"]
)
def model_all(datasets_list):
# iterate over dataset list and plot a new figure for each
for i, datasets in enumerate(datasets_list):
if "fig" in datasets.keys() and "ax" in datasets.keys():
fig = datasets["fig"]
ax = datasets["ax"]
else:
fig = plt.figure(i)
ax = fig.add_subplot(111)
model_axes(datasets, ax=ax)
fig.legend()
fig.savefig(f"{datasets['name']}.svg")
fig.savefig(f"{datasets['name']}.png")
| true |
40f398deb87b80dca2ed16d70bcaa8dccefbaa53 | Python | samanthaalcantara/codingbat2 | /list-1/has23.py | UTF-8 | 177 | 2.875 | 3 | [] | no_license | """
Date: 06 08 2020
Author: Samantha Alcantara
Question: Given an int array length 2, return True if it contains a 2 or a 3.
"""
#Answer
def has23(nums):
return 2 in nums or 3 in nums
| true |
3e668207cd14987c9597ea2b462b7e84d7a60096 | Python | Hassan-Farid/PyTech-Review | /Python Basics/Taking User Input/using InputMethod.py | UTF-8 | 1,529 | 5.15625 | 5 | [] | no_license | '''
For user input, we use the input() method which allows the user to type a particular input
'''
#Using the input method to take a message like "Hello World"
message = input() #Allows user to type in some sort of text message
print(message) #Display the message on the screen
#Using the input method to take an integer number
num = int(input())
print("The inputted number is {}".format(num))
#Using the input method to give user a certain prompt which tells the user what type of input the program wants
message = input("Please enter a text message: ")
print("The inputted text message is: {}".format(message))
num = int(input("Please enter an integer: "))
print("The inputted number is: {}".format(num))
'''
Task: Once done with this stuff, you can perform the following task to check if you got it or not
Take the following inputs by providing prompts to user:
Enter the first point's x coordinate: __________ (Input comes in the blank)
Enter the first point's y coordinate: __________ (Input comes in the blank)
Enter the second point's x coordinate: ___________ (Input comes in the blank)
Enter the second point's y coordinate: ___________ (Input comes in the blank)
Once input has been taken, you can use print formatting to display ouput in the format:
First point coordinates: (2, 5)
Second point coordinates: (7, 8)
The slope of the line through these points is given as follows:
m = (y2 - y1)/(x2 - x1)
m = (8 - 5)/(7 - 2)
m = 3/5
which is the required slope of the provided coordinates
''' | true |
afd0654c6f8957518f613790af41dc98d07994f1 | Python | JaysesS/4hsl33p_borda | /flask/data/fill.py | UTF-8 | 721 | 2.59375 | 3 | [] | no_license | import json, random, string
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
data = { "data" : []}
for x in range(50):
data['data'].append(
{
"name": get_random_string(470),
"discription": get_random_string(770),
"category": "Stego",
"score": "200",
"answer": get_random_string(470),
"files": "mega.nz/file/MrohFIDY#EqnqVVVTiq0HCOu9c-QU36v-xRXt2XBVgVXPZ2TmU7E",
"author": "yosum",
"view": True
}
)
with open("tasks.json", "w") as write_file:
json.dump(data, write_file, indent=4) | true |
b604044c604b08025c8fd41686589f3cb4e73a19 | Python | dkaramit/pseudo-Goldstone_DM | /Pseudo_Goldstone/util/Tuples.py | UTF-8 | 119 | 2.71875 | 3 | [
"MIT"
] | permissive | from itertools import combinations_with_replacement as itTuples
def Tuples(List,k):
return list(itTuples(List,k))
| true |
13e4314bc542363f45c503d948c0d962716f5821 | Python | Hemalatha30/mycodewash | /01-jsonmaker.py | UTF-8 | 696 | 3.359375 | 3 | [] | no_license |
#!/usr/bin/python3
'''Author:Hema | Email: Hemasnet@yahoo.com || json learning with Python'''
# with python, the json batteries are in box, but you need to plug them in
import json
def main():
# create a list of dictionaries
videogames = [{"game1":"red", "game2":"whisker","game3":"hema","game4":"Sakthivel"}, {"game1":"paperboy", "game2":"donkey Knong"}]
# show the values of videogames
print(videogames)
# create a file in the local system
with open('vidogames.json',"w") as vidfile: # "w" is write ; "r" - read, "a" = "append
json.dump(videogames, vidfile)
# function will nee dto be called for it to work or return something
if __name__ == "__main__":
main()
| true |
da892946662875575f056595603adce28c616f3e | Python | xuefengji/Python | /demos/single.py | UTF-8 | 820 | 3.25 | 3 | [] | no_license | # @Time: 2022/4/16 21:46
# @Author: xuef
# @File: single.py
# @Desc:
# 单例模式1
# class A:
# pass
#
# a = A()
#
# a1 = a
# a2 = a
# a3 = a
# print(id(a1))
# print(id(a2))
# print(id(a3))
# 使用 __new__ 方法
# class A:
# def __new__(cls, *args, **kwargs):
# print('__new__ is call')
# if not hasattr(cls,"_instance"):
# cls._instance = super().__new__(cls)
# return cls._instance
#
# def __init__(self):
# print('__init__ is call')
# def __call__(self, *args, **kwargs):
# print('call')
#
# a1 = A()
# a2 = A()
# a1()
# a3 = A()
# print(id(a1))
# print(id(a2))
# print(id(a3))
# Fa = type("FOO",(object,),{"v1":123, "func": lambda self:666})
#
# obj = Fa()
#
# print(obj.v1)
# print(obj.func())
class A:
pass
print(type(A))
| true |
2757c0e77201d7c097e5207f6491d9becf8f90b8 | Python | KholdStare/projecteuler | /experiments/dynamic.py | UTF-8 | 3,860 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env python
# allow importing from utils
import sys
import os
import random
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../utils")
###########################################################################
# dynamic programming experiments #
###########################################################################
class Interval(object):
"""Represent an interval with a weight.
Used for interval scheduling problems"""
def __init__(self, start, end, value=1):
self.start = start
self.end = end
self.value = value
def isOverlapping(self, other):
"""Return True if two intervals overlap."""
return other.start < self.end and self.start < other.end
def length(self):
""" :returns: length of interval """
return self.end - self.start
def __str__(self):
return "[ {}, {} ] val:{}".format(self.start, self.end, self.value)
def isValidList(intervalList):
""" Returns True if the list of intervals is disjoint.
Assumes sorted by ending time"""
if len(intervalList) == 0:
return True
i = iter(intervalList)
a = i.next()
for b in i:
if a.isOverlapping(b):
return False
a = b
return True
def listValue(intervalList):
""" Returns total value of all intervals. """
return sum( i.value for i in intervalList )
def firstCompatibleIndex(intervalList, i):
""" Given a sorted list of intervals, and an index to
particular interval, return an index j<i such that
j.isOverlapping(i) == false.
If no such interval exists, return -1. """
if i >= len(intervalList):
return -1
interval = intervalList[i]
j = i-1
while j >= 0 and intervalList[j].isOverlapping(interval):
j -= 1
return j
class WeightedSchedulingProblem(object):
"""Represented a weighted interval schedule problem"""
def __init__(self, intervalList):
self.intervalList = sorted(intervalList, key=lambda x: x.end)
self.n = len(intervalList)
self.compatibleIndeces = [ firstCompatibleIndex(intervalList, i) for i in xrange(0, self.n) ]
# careful! off by one indexing so that empty case
# has a value
self.optArray = [ 0 for x in xrange(0, self.n+1) ]
# construct solution and memoize it
for i in xrange(0, self.n):
self.optArray[i+1] = self.calcOptimum(i)
# reconstruct optimal interval list from optimum value
def calcOptimum(self, i):
p_i = self.compatibleIndeces[i]
return max(self.getOptimum(p_i) + self.intervalList[i].value,
self.getOptimum(i-1))
def getOptimum(self, i):
return self.optArray[ i+1 ]
def getOptimumList(self, i=None, acc=[]):
if i is None:
return self.getOptimumList(self.n-1)
if i < 0:
return acc
# if the optimum value for this index is the same as
# for the index below, this interval is not part of the optimal solution
if self.getOptimum(i) == self.getOptimum(i-1):
return self.getOptimumList(i-1, acc)
acc.insert(0, self.intervalList[i])
p_i = self.compatibleIndeces[i]
return self.getOptimumList(p_i, acc)
def main (intervalList):
problem = WeightedSchedulingProblem(intervalList)
return [ i.__str__() for i in problem.getOptimumList(len(intervalList) - 1) ]
if __name__ == "__main__":
# from Kleinberg and Tardos p253
intervalList = [ Interval(0, 3, 2),
Interval(1, 5, 4),
Interval(4, 6, 4),
Interval(2, 9, 7),
Interval(7, 10, 2),
Interval(8, 11, 1) ]
print main(intervalList)
| true |
153846fff7bb04e8a167a0077919f3508900d883 | Python | SalazakuIII/Gold-Rush | /main_app/views.py | UTF-8 | 2,405 | 2.703125 | 3 | [] | no_license | from django.shortcuts import render, redirect
import random, datetime
# Create your views here.
def index(request):
if "gold_src" not in request.session:
request.session["gold_amt"] = 0
request.session["activity_log"] = []
request.session.save()
return redirect("/display")
def display(request):
return render(request, "index.html")
def process(request):
timestamp = '{:%Y-%b-%d %H:%M:%S}'.format(datetime.datetime.now())
if request.POST["gold_src"] == 'Farm':
change_amt = random.randint(10, 20)
action_msg = f"<p class='activity_log gain'> Earned {change_amt} gold from the Farm! ({timestamp})</p>"
print("CHANGE AMT: ", change_amt, " on ", timestamp)
elif request.POST["gold_src"] == 'Cave':
change_amt = random.randint(5, 10)
action_msg = f"<p class='activity_log', 'gain'> Earned {change_amt} gold from the Cave! ({timestamp})</p>"
print("CHANGE AMT: ", change_amt, " on ", timestamp)
elif request.POST["gold_src"] == 'House':
change_amt = random.randint(2, 5)
action_msg = f"<p class='activity_log gain'> Earned {change_amt} gold from the House! ({timestamp})</p>"
print("CHANGE AMT: ", change_amt, " on ", timestamp)
elif request.POST["gold_src"] == 'Casino':
change_amt = random.randint(-50, 50)
if change_amt > 0:
action_msg = f"<p class='activity_log' class='gain'> Entered a casino and won {change_amt} while gambling!!! ({timestamp})</p>"
print("CHANGE AMT: ", change_amt, " on ", timestamp)
elif change_amt == 0:
action_msg = f"<p class='activity_log neutral'> Entered a casino and broke-even gambling!!! ({timestamp})</p>"
print("CHANGE AMT: ", change_amt, " on ", timestamp)
elif change_amt < 0:
action_msg = f"<p class='activity_log loss'> Entered a casino and lost {change_amt} while gambling!!! ({timestamp})</p>"
print("CHANGE AMT: ", change_amt, " on ", timestamp)
request.session["gold_amt"] += change_amt
request.session["activity_log"].append(action_msg)
request.session.save()
return redirect("/display")
def delete_session_data(request):
del request.session["gold_amt"]
del request.session["activity_log"]
request.session.save()
print("Reset Initiated | Gold Count Reset")
return redirect("/") | true |
778ecb05aeb256118e188d4b6b99363756fd90b5 | Python | Grace-Joydhar/Python-Learning | /Study Mart/10. List.py | UTF-8 | 568 | 4.21875 | 4 | [] | no_license | a = "List is a collection which is changeable and ordered. It allows duplicate values."
print(a)
b= "\nList with nested list:"
print(b)
list = [1,["Grace", 5, 54, 60], 10,4.5,6, 6,20,22, "Grace"]
print(list)
print(list[1][3])
list.extend([3,4,5,6,7]) #To add new values in the array
list.remove(10) #Direct a value. But if there are same value more than 1 time, then it will only delete the first value. So we need to delete with index property.
list.remove(list[1]) #From the index.
print(list.count(6)) #To find a number's existance.
print(list)
| true |
cad831b3fac92f4108e1000a1105e64db682c6fc | Python | amisha1garg/Strings_in_python | /LargestNoWithGivenSum.py | UTF-8 | 1,310 | 3.671875 | 4 | [] | no_license | # Geek lost the password of his super locker. He remembers the number of digits N as well as the sum S of all the digits of his password. He know that his password is the largest number of N digits that can be made with given sum S. As he is busy doing his homework, help him retrieving his password.
#
# Example 1:
#
# Input:
# N = 5, S = 12
# Output:
# 93000
# Explanation:
# Sum of elements is 12. Largest possible
# 5 digit number is 93000 with sum 12.
# User function Template for python3
class Solution:
# Function to return the largest possible number of n digits
# with sum equal to given sum.
def largestNum(self, n, s):
# code here
if s > 9 * n:
return -1
new = ""
for i in range(n):
if s >= 9:
new += "9"
s -= 9
else:
new += str(s)
s -= s
return new
# {
# Driver Code Starts
# Initial Template for Python 3
import atexit
import io
import sys
# Contributed by : Nagendra Jha
if __name__ == '__main__':
test_cases = int(input())
for cases in range(test_cases):
n, s = map(int, input().strip().split())
ob = Solution()
print(ob.largestNum(n, s))
# } Driver Code Ends | true |
ce08004137bd3143f010bf36f0ae5ae6e8316348 | Python | onceuponpython/Class-1.1 | /1_1_printing_complete.py | UTF-8 | 133 | 3.0625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 18 14:43:11 2019
@author: Owner
"""
x=input("What is your name? ")
print("Hello", x) | true |
1faf8b848752c2bcec279ddaf908befe9a773476 | Python | WokoLiu/LeetCode_python | /p0011_M_ContainerWithMostWater.py | UTF-8 | 1,502 | 3.828125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2019/3/24 17:28
# @Author : Yulong Liu
# @File : p0011_M_ContainerWithMostWater.py
"""
题号:11
难度:medium
链接:https://leetcode.com/problems/container-with-most-water
描述:等距柱形图哪两根柱子之间装水更多
"""
from typing import List
class Solution(object):
def maxArea01(self, height: List[int]) -> int:
"""先撸一个暴力的"""
max_area = 0
for i, a1 in enumerate(height):
for j, a2 in enumerate(height[i + 1:]):
max_area = max(max_area, min(a1, a2) * (j + 1))
return max_area
def maxArea02(self, height: List[int]) -> int:
"""从左右往中间压缩。由于总面积是较短的一根决定的
考虑到,如果 height[left] < height[right]
那么即使 right -= 1,max_area 也不会超过当前面积,
反而 left += 1,面积还有可能更大,因此此时应 left += 1
另一个方向的判断同理
"""
max_area = 0
left = 0
right = len(height) - 1
while left < right:
if height[left] < height[right]:
max_area = max(max_area, height[left] * (right - left))
left += 1
else:
max_area = max(max_area, height[right] * (right - left))
right -= 1
return max_area
if __name__ == '__main__':
data = [1, 8, 6, 2, 5, 4, 8, 3, 7]
print(Solution().maxArea02(data))
| true |
145b8b34745cd39308b5d9de841278ee3aee3bd6 | Python | liuweilin17/algorithm | /interview/lianjia3.py | UTF-8 | 1,026 | 3.078125 | 3 | [] | no_license | ###########################################
# Let's Have Some Fun
# File Name: lianjia3.py
# Author: Weilin Liu
# Mail: liuweilin17@qq.com
# Created Time: Tue Sep 18 12:15:12 2018
###########################################
#coding=utf-8
#!/usr/bin/python
def findPair1(a, sumV):
b = sorted(a)
l = len(a)
i = 0
j = l - 1
while i<j and i<l and j >= 0:
s = b[i] + b[j]
if s == sumV:
print 'i:%d,j:%d' % (b[i], b[j])
i += 1
elif s < sumV:
i += 1
else:
j -= 1
def findPair2(a, sumV):
dt = {}
l = len(a)
for i in range(l):
t = sumV - a[i]
b = dt.get(t, [])
if len(b) > 0:
for j in b:
print 'i:%d,j:%d' % (a[i], a[j])
ls = dt.get(a[i], [])
ls.append(i)
dt[a[i]] = ls
if __name__ == '__main__':
a = [2,1,4,10,-5]
print a
print 'findPair1:'
findPair1(a, 5)
print 'findPair2:'
findPair2(a, 5)
for i in range(5):
print 'hh'
| true |
cc00be51f796349a2dd901a91cf6b577da6fe8b9 | Python | chrissiedesemberg/code_college-python | /chap8/tiy200_8-3.py | UTF-8 | 220 | 3.578125 | 4 | [] | no_license | def make_shirt(size, message):
print(f"\nThe shirt you would like is a size {size.upper()} and should have the following message printed on {message.title()}")
make_shirt("small", "winner, winner, chicken dinner!")
| true |
a89924ff86a399f02ab26d4e2d9a1f89bf0af127 | Python | blackbogdan/interviewcake | /coding bat/warmup1.py | UTF-8 | 6,448 | 4.25 | 4 | [] | no_license | # coding=utf-8
'''We have a loud talking parrot. The "hour" parameter is the current hour time in the range 0..23. We are in trouble if the parrot is talking and the hour is before 7 or after 20. Return True if we are in trouble.
parrot_trouble(True, 6) → True
parrot_trouble(True, 7) → False
parrot_trouble(False, 6) → False'''
def parrot_trouble(talking, hour):
return talking and (hour < 7 or hour > 20)
parrot_trouble(True, 6)
'''
Given 2 ints, a and b, return True if one if them is 10 or if their sum is 10.
makes10(9, 10) → True
makes10(9, 9) → False
makes10(1, 9) → True
'''
def makes10(a, b):
return a==10 or b==10 or a+b==10
'''
Given an int n, return True if it is within 10 of 100 or 200. Note: abs(num) computes the absolute value of a number.
near_hundred(93) → True
near_hundred(90) → True
near_hundred(89) → False'''
def near_hundred(n):
return (abs(100 - n) <=10) or (abs(200 - n)<=10)
'''
Given 2 int values, return True if one is negative and one is positive. Except if the parameter "negative" is True, then return True only if both are negative.
pos_neg(1, -1, False) → True
pos_neg(-1, 1, False) → True
pos_neg(-4, -5, True) → True'''
def pos_neg(a, b, negative):
return (a < 0 and b < 0) if negative else ((a < 0 and b > 0) or (a > 0 and b < 0))
'''
Given a string, return a new string where "not " has been added to the front. However, if the string already begins with "not", return the string unchanged.
not_string('candy') → 'not candy'
not_string('x') → 'not x'
not_string('not bad') → 'not bad'"'''
def not_string(str):
return str if str.startswith('not') else "not %s" % str
'''
Given a non-empty string and an int n, return a new string where the char at index n has been removed. The value of n will be a valid index of a char in the original string (i.e. n will be in the range 0..len(str)-1 inclusive).
missing_char('kitten', 1) → 'ktten'
missing_char('kitten', 0) → 'itten'
missing_char('kitten', 4) → 'kittn'"'''
def missing_char(str, n):
return str[:n]+str[n+1:]
'''
Given a string, return a new string where the first and last chars have been exchanged.
front_back('code') → 'eodc'
front_back('a') → 'a'
front_back('ab') → 'ba'''''
def front_back(str):
return str if len(str)<=1 else str[-1]+str[1:-1]+str[0]
'''Given a string, we'll say that the front is the first 3 chars of the string. If the string length is less than 3, the front is whatever is there. Return a new string which is 3 copies of the front.
front3('Java') → 'JavJavJav'
front3('Chocolate') → 'ChoChoCho'
front3('abc') → 'abcabcabc'''''
def front3(str):
return str*3 if len(str)<3 else str[:3]*3
'''
Given a string, return a new string made of every other char starting with the first, so "Hello" yields "Hlo".
string_bits('Hello') → 'Hlo'
string_bits('Hi') → 'H'
string_bits('Heeololeo') → 'Hello'''''
def string_bits(str):
result = ""
for i in range(len(str)):
if i %2 ==0:
result = result+str[i]
return result
def string_bits_2(str):
return "".join([str[i] for i in range(len(str)) if i%2==0])
"""
Given a non-empty string like "Code" return a string like "CCoCodCode".
string_splosion('Code') → 'CCoCodCode'
string_splosion('abc') → 'aababc'
string_splosion('ab') → 'aab'"""
def string_splosion(str):
result = ""
for i in range(len(str)):
result+=str[:i+1]
# return result
return "".join([str[:i+1] for i in range(len(str))])
"""
Given a string, return the count of the number of times that a substring length 2 appears in the string and also as the last 2 chars of the string, so "hixxxhi" yields 1 (we won't count the end substring).
last2('hixxhi') → 1
last2('xaxxaxaxx') → 1
last2('axxxaaxx') → 2"""
def last2(str):
# Screen out too-short string case.
if len(str) < 2:
return 0
# last 2 chars, can be written as str[-2:]
last2 = str[-2:]
count = 0
# Check each substring length 2 starting at i
for i in range(len(str) - 2):
sub = str[i:i + 2]
if sub == last2:
count = count + 1
# return count OR THIS
# return len([1 for i in range(len(str) - 2) if str[i:i + 2] == str[-2:]])
"""Given an array of ints, return the number of 9's in the array.
array_count9([1, 2, 9]) → 1
array_count9([1, 9, 9]) → 2
array_count9([1, 9, 9, 3, 9]) → 3"""
def array_count9(nums):
return len([1 for i in nums if i==9])
"""
Given an array of ints, return True if one of the first 4 elements in the array is a 9. The array length may be less than 4.
array_front9([1, 2, 9, 3, 4]) → True
array_front9([1, 2, 3, 4, 9]) → False
array_front9([1, 2, 3, 4, 5]) → False"""
def array_front9(nums):
# First figure the end for the loop
end = len(nums)
if end > 4:
end = 4
for i in range(end): # loop over index [0, 1, 2, 3]
if nums[i] == 9:
return True
return False
"""
Given an array of ints, return True if the sequence of numbers 1, 2, 3 appears in the array somewhere.
array123([1, 1, 2, 3, 1]) → True
array123([1, 1, 2, 4, 1]) → False
array123([1, 1, 2, 1, 2, 3]) → True"""
def array123(nums):
# Note: iterate with length-2, so can use i+1 and i+2 in the loop
for i in range(len(nums)-2):
if nums[i]==1 and nums[i+1]==2 and nums[i+2]==3:
return True
return False
def array123_b(nums):
return len(set(nums).intersection([1,2,3]))==3
# return "123" in "".join(str(x) for x in nums)
"""
Given 2 strings, a and b, return the number of the positions where they contain the same length 2 substring. So "xxcaazz" and "xxbaaz" yields 3, since the "xx", "aa", and "az" substrings appear in the same place in both strings.
string_match('xxcaazz', 'xxbaaz') → 3
string_match('abc', 'abc') → 2
string_match('abc', 'axc') → 0"""
def string_match(a, b):
# Figure which string is shorter.
shorter = min(len(a), len(b))
count = 0
# Loop i over every substring starting spot.
# Use length-1 here, so can use char str[i+1] in the loop
for i in range(shorter - 1):
a_sub = a[i:i + 2]
b_sub = b[i:i + 2]
if a_sub == b_sub:
count = count + 1
return count | true |
054b47421662446f63216f1743469e6ef98530b1 | Python | aqurilla/data-structures-and-algorithms | /python/edit_distance.py | UTF-8 | 811 | 3.5625 | 4 | [] | no_license | # https://leetcode.com/problems/edit-distance/
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
nrows = len(word2) + 1
ncols = len(word1) + 1
T = [[0 for c in range(ncols)] for r in range(nrows)]
for i in range(nrows):
T[i][0] = i
for j in range(ncols):
T[0][j] = j
for i in range(1, nrows):
for j in range(1, ncols):
if word2[i-1]==word1[j-1]:
# If characters match, same value as substring without the character
T[i][j] = T[i-1][j-1]
else:
# o.w. minimum solution from either ins, rep or del
T[i][j] = min(T[i-1][j-1], T[i-1][j], T[i][j-1]) + 1
return T[nrows-1][ncols-1]
| true |
8990b99d6a7be42e67a4a11ea65f7c366084fe69 | Python | inrixx/work | /do_cv2.py | UTF-8 | 1,533 | 2.921875 | 3 | [] | no_license | # coding=utf-8
import numpy as np
import cv2 as cv
import random
#图片旋转
def rotate(image, angle, center=None, scale=1.0):
(h, w) = image.shape[:2]
if center is None:
center = (w//2, h//2)
M = cv.getRotationMatrix2D(center, angle,scale)
rotated = cv.warpAffine(image, M, (w, h))
return rotated
#图片仿射 4点映射
def affine(image):
(cols,rows) = image.shape[:2]
pts1 = np.float32([[0,0], [cols-1,0], [0, rows-1], [cols-1,rows-1]])
# pts2 = np.float32([[99,102], [cols-1,91], [50, rows-1], [cols-50,rows-50]])
pts2 = np.float32([[random.randint(1,90), random.randint(1,90)],
[cols-random.randint(1,90), random.randint(1,90)],
[random.randint(1,90), rows-random.randint(1,90)],
[cols-random.randint(1,90),rows-random.randint(1,90)]])
M = cv.getPerspectiveTransform(pts1, pts2)
affined = cv.warpPerspective(image, M, (rows, cols))
return affined
if __name__ == '__main__':
image = "./pic/ex.png"
img = cv.imread(image, 0)
# rotated = rotate(img, 70)
# cv.imshow("70", rotated)
# affined = affine(img)
# cv.imshow("2",affined)
# cv.imwrite("/home/end/pic_e/ex_1000.png",affined)
#旋转变换360张
# for i in range(260):
# j = i+400
# print(j)
# angle = random.randint(1,359)
# rotated = rotate(img,angle)
# cv.imwrite("/home/end/pic_e/ex_"+str(j)+".png",rotated)
#仿射变换360张
for i in range(260):
j = i+400
print(j)
affined = affine(img)
cv.imwrite("/home/end/pic_e/ex_"+str(j)+".png",affined)
print("finish") | true |
2faffda9ac23e6138e4bf0faba74d091bcc1e48d | Python | tageorgiou/ptolemy_scraper | /buildingdata.py | UTF-8 | 534 | 2.578125 | 3 | [] | no_license | from urllib2 import Request, urlopen
import json
buildingcoords = {}
buildings = open("buildinglist").read().split('\n')
for building in buildings:
try:
r = Request(url="http://whereis.mit.edu/search?type=query&q=%s&output=json"
% building)
response = urlopen(r)
j = json.load(response)
d = {}
d['lon'] = j[0]['long_wgs84']
d['lat'] = j[0]['lat_wgs84']
buildingcoords[building] = d
except Exception as e:
pass
print json.dumps(buildingcoords)
| true |
27877aae85f14ed37da25e03488b2a78a7385a38 | Python | zcharif/MeiTag | /MeiTagAlpha/MeiServer/SymmetricCryptoTest.py | UTF-8 | 954 | 2.75 | 3 | [] | no_license | #!/usr/bin/python\
import base64
import os
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
def encryptF(password, salt, message): #use this to encrypt. All values are bytes
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt, iterations=100000, backend=default_backend())
key = base64.urlsafe_b64encode(kdf.derive(password))
f = Fernet(key)
encrypted_text = f.encrypt(message)
return encrypted_text
def decryptF(password, salt, encrypted_text): #use this to decrypt. All values are bytes
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt, iterations=100000, backend=default_backend())
key = base64.urlsafe_b64encode(kdf.derive(password))
f = Fernet(key)
decrypted_text = f.decrypt(encrypted_text)
return decrypted_text
| true |
87a8186ea30411374eb5dd0fb4d4b69c70406f09 | Python | RYANCOX00/programming2021 | /Week04-Flow/4.2.5. Average.py | UTF-8 | 944 | 4.90625 | 5 | [] | no_license | # A program to read in numbers, add them to a list and find the average of the list.
# Author: Ryan Cox
# Reading in a number and saving as the int 'number'
number = int(input("Enter a number (0 to stop): "))
# Creating a list 'numbers'
numbers = []
# Setting a loop until the user types 0.
while number != 0:
# Adding the 'number' to 'numbers'. And then re-inputting number.
numbers.append(number)
number = int(input("Enter a number (0 to stop): "))
# Goes through the list and executes statements for each item temporarily defined as 'item'
for item in numbers:
print(item)
# Finding the average of the items in numbers
# Finding the sum of the items in the list and dividing by the number of items in the list.
average = float(sum(numbers)/len(numbers))
# alternative way to find average.
#import numpy
#average = (float(numpy.mean(numbers)))
# Outputting average
print("The average number is {}".format(average)) | true |
1ba118c4473b08623eb28259099fa2f4c7209676 | Python | nicoluv/TareaP1 | /tarea1/tarea1python/__init__.py | UTF-8 | 226 | 3.6875 | 4 | [] | no_license | from Main import calcular
print("Ingrese los numeros binario que desea calcular: \nRecuerde que deben ser binarios y separase por espacios!\n Ejem. 111 + 1000 - 1010: ")
s = input()
print("El rersultado es: ", calcular(s)); | true |
9fd8b7e5962ac763f4229d0061d1dc1ae6086c78 | Python | Ela-Na/evaluation-metrics | /msle - rmsle.py | UTF-8 | 456 | 2.78125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 6 10:33:50 2020
@author: Ela
"""
import numpy as np
import math
def mean_squared_log_error(y_true, y_pred):
error = 0
msle = 0
rmsle = 0
for yt, yp in zip(y_true, y_pred):
error += (np.log(1 + yt ) - np.log(1 + yp)) ** 2
msle = error / len(y_true)
rmsle = math.sqrt(msle)
return 'MSLE =' msle, 'RMSLE =' rmsle
| true |
58bb645c90a17343c3689bbd83cfab886acf6f35 | Python | papalagichen/leet-code | /0077 - Combinations.py | UTF-8 | 958 | 3.0625 | 3 | [] | no_license | from typing import List
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
return self.helper(1, n + 1, k)
def helper(self, start: int, end: int, k: int):
results = []
if k > 0:
for i in range(start, end):
for result in self.helper(i + 1, end, k - 1):
results.append([i] + result)
else:
results.append([])
return results
if __name__ == '__main__':
import Test
Test.test(Solution().combine, [
((0, 0), [
[]
]),
((4, 1), [
[1],
[2],
[3],
[4],
]),
((4, 2), [
[1, 2],
[1, 3],
[1, 4],
[2, 3],
[2, 4],
[3, 4],
]),
((4, 3), [
[1, 2, 3],
[1, 2, 4],
[1, 3, 4],
[2, 3, 4]
]),
])
| true |
8be019728b3b7289a70e9d3b394f9d62a8b6bc04 | Python | tsubasaokabe/test_apps | /test_apps/train.py | UTF-8 | 405 | 2.609375 | 3 | [] | no_license | from sklearn import svm
from sklearn import datasets
from sklearn.externals import joblib
def main():
#SVMを分類機にする
clf = svm.SVC()
#データセットの読み込み
iris = datasets.load_iris()
#従属変数と説明変数
X,y = iris.data, iris.target
#学習
clf.fit(X,y)
joblib.dump(clf,'./model/sample-model.pkl')
if __name__ =='__main__':
main()
| true |
b7f8db343455ad65da8eac994216cd728331c631 | Python | Sharisi123/PythonTasks7 | /task_3.py | UTF-8 | 460 | 3.640625 | 4 | [] | no_license | import datetime
def printTimeStamp(name):
print('Автор програми: ' + name)
print('Час компіляції: ' + str(datetime.datetime.now()))
printTimeStamp('Наживотов Олександр')
off = False
uniqueValues = set()
while off != True:
word = input('Введіть значення: ')
if(word == ''):
off = True
break
uniqueValues.add(word)
for value in uniqueValues:
print(value)
| true |
9141cdf4a75047a3356dc439e0cf52d83c395d47 | Python | harshonyou/SOFT1 | /week6/test_p05_ex1.py | UTF-8 | 1,007 | 3.71875 | 4 | [
"Apache-2.0"
] | permissive | import unittest
from practical_5 import split_text
class TestExo1(unittest.TestCase):
def testEmptyText(self):
self.assertEqual([], split_text("",''))
def testTextOnlyWhiteSpace(self):
self.assertEqual(["As", "Python's", "creator,", "I'd", "like", "to", "say"], \
split_text("As Python's creator, I'd like to say ",' '))
def testTextWithMultipleDelimiters(self):
self.assertEqual(["As", "Python", "s", "creator", "I", "d", "like", "to", "say"], \
split_text(" As Python's creator, I'd like to say ",", '"))
def testTextWithSingleDelimiter(self):
self.assertEqual([" As Python's creator", " I'd like to say "], \
split_text(" As Python's creator, I'd like to say ", ','))
def testTextWithoutRightDelimiter(self):
self.assertEqual([" As Python's creator, I'd like to say "], \
split_text(" As Python's creator, I'd like to say ", "!>"))
if __name__ == "__main__":
unittest.main()
| true |
009db55e7c5401386c5fb1041f1cd9bc0227398b | Python | nathanBarloy/polynoms | /poly.py | UTF-8 | 9,804 | 3.140625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 13:58:31 2020
@author: nathan barloy
"""
from numbers import Number
class Polynom() :
def __init__(self, coeffs=[]) :
self.coeffs = coeffs
self.simplify()
def simplify(self) :
max_deg = len(self.coeffs)-1
while max_deg>=0 and self.coeffs[max_deg]==0 :
max_deg-=1
self.coeffs = self.coeffs[:max_deg+1]
def set_coeff(self, deg, value) :
for _ in range(len(self.coeffs), deg+1) :
self.coeffs.append(0)
self.coeffs[deg] = value
self.simplify()
def get_coeff(self, deg) :
try :
return self.coeffs[deg]
except IndexError :
return 0
def degree(self) :
return len(self.coeffs)-1
def evaluate(self, value) :
res = 0
for x in self.coeffs[::-1] :
res *= value
res += x
return res
def __iter__(self) :
self.iter_index=0
return self
def __next__(self) :
if self.iter_index>=len(self.coeffs) :
raise StopIteration
else :
while self.coeffs[self.iter_index]==0 :
self.iter_index += 1
res = self.coeffs[self.iter_index]
self.iter_index += 1
return self.iter_index-1,res
def __str__(self) :
d = self.degree()
if d<0 :
return '0'
s = ""
for i,val in enumerate(self.coeffs[:]) :
if val!=0 :
s += str(val)
if i==1 :
s += '*x'
elif i>1 :
s += '*x^' + str(i)
s += " + "
return s[:-3]
def __add__(self, other) :
res = []
for d in range(max(self.degree(), other.degree())+1) :
res.append(self.get_coeff(d) + other.get_coeff(d))
return Polynom(res)
def __iadd__(self, other) :
res = []
for d in range(max(self.degree(), other.degree())+1) :
res.append(self.get_coeff(d) + other.get_coeff(d))
self.coeffs = res
self.simplify()
return self
def __mul__(self, other) :
if isinstance(other, Number) :
res = [x*other for x in self.coeffs]
return Polynom(res)
if isinstance(other, Polynom) :
res = []
deg1 = self.degree()
deg2 = other.degree()
for d in range(deg1+deg2+1) :
aux = 0
for i in range(max(0,d-deg2), min(deg1, d)+1) :
j = d-i
aux += self.get_coeff(i)*other.get_coeff(j)
res.append(aux)
return Polynom(res)
def __rmul__(self, other) :
return self * other
def __imul__(self, other) :
if isinstance(other, Number) :
res = [x*other for x in self.coeffs]
self.coeffs = res
self.simplify()
return self
if isinstance(other, Polynom) :
res = []
deg1 = self.degree()
deg2 = other.degree()
for d in range(deg1+deg2+1) :
aux = 0
for i in range(max(0,d-deg2), min(deg1, d)+1) :
j = d-i
aux += self.get_coeff(i)*other.get_coeff(j)
res.append(aux)
self.coeffs = res
self.simplify()
return self
def __truediv__(self, other) :
return self*(1/other)
def __itruediv__(self, other) :
self *= 1/other
return self
def __sub__(self, other) :
return self + other*-1
def __isub__(self, other) :
self += other*-1
return self
def __len__(self) :
return len(self.coeffs)
def derivate(self) :
res = []
for i in range(1, len(self.coeffs)) :
res.append(self.coeffs[i]*i)
return Polynom(res)
def integrate(self, c=0) :
res = [c]
for i in range(len(self.coeffs)) :
res.append(self.coeffs[i]/(i+1))
return Polynom(res)
def __eq__(self, other) :
return self.coeffs==other.coeffs
def __ne__(self, other) :
return not self==other
class Polynom2() :
"""
A polynom of 2 variables
"""
def __init__(self, coeffs=[]) :
if len(coeffs)>0 and isinstance(coeffs[0], Polynom) :
self.coeffs = coeffs
else :
self.coeffs = []
for l in coeffs :
self.coeffs.append(Polynom(l))
self.simplify()
def simplify(self) :
max_deg = len(self.coeffs)-1
while max_deg>=0 and len(self.coeffs[max_deg])==0 :
max_deg-=1
self.coeffs = self.coeffs[:max_deg+1]
def x_degree(self) :
return len(self.coeffs)-1
def y_degree(self) :
return max([e.degree() for e in self.coeffs])
def __len__(self) :
return len(self.coeffs)
def set_coeff(self, x_deg, y_deg, value) :
for _ in range(len(self.coeffs), x_deg+1):
self.coeffs.append(Polynom())
self.coeffs[x_deg].set_coeff(y_deg, value)
def get_coeff(self, x_deg, y_deg=None) :
if y_deg is None :
try :
return self.coeffs[x_deg]
except IndexError :
return Polynom()
try :
return self.coeffs[x_deg].get_coeff(y_deg)
except IndexError :
return 0
def get_matrix(self) :
res = []
for p in self.coeffs :
res.append(p.coeffs)
return res
def y_evaluate(self, value) :
res = []
for p in self.coeffs :
res.append(p.evaluate(value))
return Polynom(res)
def x_evaluate(self, value) :
m = self.y_degree()
res = []
for i in range(m+1) :
tot = 0
for j in range(len(self.coeffs)-1, -1, -1) :
tot *= value
tot += self.get_coeff(j,i)
res.append(tot)
return Polynom(res)
def evaluate(self, x_value, y_value) :
return self.y_evaluate(y_value).evaluate(x_value)
def __iter__(self) :
self.iter_index = 0
self.current_iter = self.coeffs[0].__iter__()
return self
def __next__(self) :
try :
(y,v) = self.current_iter.__next__()
return self.iter_index, y, v
except StopIteration :
self.iter_index +=1
try :
while len(self.coeffs[self.iter_index])==0 :
self.iter_index += 1
self.current_iter = self.coeffs[self.iter_index].__iter__()
(y,v) = self.current_iter.__next__()
return self.iter_index, y, v
except IndexError :
raise StopIteration
def __str__(self) :
s = ""
for x,y,v in self :
s += str(v)
if x>1 :
s += '*x^' + str(x)
elif x==1 :
s += '*x'
if y>1 :
s += '*y^' + str(y)
elif y==1 :
s += '*y'
s += ' + '
return s[:-3]
def __add__(self, other) :
res = []
for d in range(max(self.x_degree(), other.x_degree())+1) :
res.append(self.get_coeff(d) + other.get_coeff(d))
return Polynom2(res)
def __iadd__(self, other) :
res = []
for d in range(max(self.x_degree(), other.x_degree())+1) :
res.append(self.get_coeff(d) + other.get_coeff(d))
self.coeffs = res
self.simplify()
return self
def __sub__(self, other) :
return self + other*-1
def __isub__(self, other) :
self += other*-1
return self
def __mul__(self, other) :
if isinstance(other, Number) :
res = [x*other for x in self.coeffs]
return Polynom2(res)
if isinstance(other, Polynom2) :
res = []
deg1 = self.x_degree()
deg2 = other.x_degree()
for d in range(deg1+deg2+1) :
aux = Polynom()
for i in range(max(0,d-deg2), min(deg1, d)+1) :
j = d-i
aux += self.get_coeff(i)*other.get_coeff(j)
res.append(aux)
return Polynom2(res)
def __rmul__(self, other) :
return self * other
def __imul__(self, other) :
if isinstance(other, Number) :
res = [x*other for x in self.coeffs]
self.coeffs = res
self.simplify()
return self
if isinstance(other, Polynom2) :
res = []
deg1 = self.x_degree()
deg2 = other.x_degree()
for d in range(deg1+deg2+1) :
aux = Polynom()
for i in range(max(0,d-deg2), min(deg1, d)+1) :
j = d-i
aux += self.get_coeff(i)*other.get_coeff(j)
res.append(aux)
self.coeffs = res
self.simplify()
return self
def __truediv__(self, other) :
return self*(1/other)
def __itruediv__(self, other) :
self *= 1/other
return self
def __eq__(self, other) :
return self.coeffs==other.coeffs
def __ne__(self, other) :
return not self==other | true |
08a7dde4b7c482779fedc3e829e4531f45f968fe | Python | yue008/python-code | /chapter07/rollercoaster.py | UTF-8 | 541 | 3.296875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
@author: acer4560g
@file: rollercoaster.py
@time: 2020/2/2 8:22
@contact:python初学者(微信公众号)
@vision:3.7.3
---------------------
'''
import sys
print('本程序在python3.7.3编译,运行时请注意python版本')
print('python当前版本:\n' + sys.version)
print('--------------------------\n')
height=input('How tall are you,in inches?')
height=int(height)
if height>= 36:
print('\nYou\'re tall enoough to ride!')
else:
print('\nYou\'ll be able to ride when you\'re a little older') | true |
920ea80dfffca4eb22260c78b0774ae45fdcf5a4 | Python | joohyun333/programmers | /백준/이진탐색/가장 긴 증가하는 수열2.py | UTF-8 | 294 | 2.90625 | 3 | [] | no_license | # https://www.acmicpc.net/problem/12015
import sys, bisect
input = sys.stdin.readline
N = int(input())
arr = list(map(int, input().split()))
result = [0]
for i in arr:
if result[-1]<i:
result.append(i)
else:
result[bisect.bisect_left(result,i)] = i
print(len(result)-1)
| true |
fbfc3a87d0384f4fffb6a68c678d4aa485e2b5b9 | Python | Scripthen-KS/Genny | /gennylib.py | UTF-8 | 1,083 | 3.359375 | 3 | [] | no_license | #!/usr/bin/python
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Genny Lib, will only contain string variables and
# calculations and directory options.
# List of to do.
menu_index="""
Welcome to Genny!
To get started, please read this short story of Genny's life
then you may proceed.
================ Genny ================
Genny is a wee Python script which will generate keys that are from
pre-defined character sets."""
# List of chars-list
menu_opt0="""
Please select your char list, there's 5 to choose from...
0 Alphabetical keys: a-z (lowercase)
1 Standard Hexadecimal keys: A-F, 0-9
2 Number-only keys: 0-9
3 Alphanumeric keys: A-Z, a-z, 0-9
4 Alphanumeric + Symbols Keys: a-z, A-Z, 0-9 + All UK keyboard symbols.
"""
# Loop times.
menu_opt1="""
Checkpoint Amount:
Recommended is 100,000 (type: 100000), after this point, generation starts
to slow down.
If you have more than 6GB of FREE system memeory
feel free to set it over 40000000.
"""
menu_opt2="""
Enter key length size, default is 64-bit (8 chars, aaaabbbb)
WPA is 8 - 63 in length
"""
| true |
01cd2e440a1e96b306b2866938492fa4fbda76e8 | Python | LizEve/PopGen_Fall2014 | /62_Pop_Gen_Hw_GGM.py | UTF-8 | 3,525 | 3.734375 | 4 | [] | no_license | #!/usr/bin/env python
import math
import random
import numpy
import matplotlib.pyplot as plt
#1/p=(4N)/(k(k-1)) is the average coalescence time mean of an exponetial distribution
#p= 1/(4N)/(k(k-1))
#go back a number of generations drawn from an expoenetial distripution with the expectation was seen in the thingy above
def coal(n,k):
#N=Effective population size
#k=Number of gene copies sampled
#c=coalescent even you are interested in
genstocoal=[] #create list to put coalescent time between each node
while k > 1: #while k is larger than zero
expmean=((4*n)/float(k*(k-1)))
draw=random.expovariate(1/expmean) #draw a random number
genstocoal.append(draw)
k -= 1
#print len(gentocoal)
#this is ALWAYS equal to k-1
#total time to coalescence of all gene copies
total=sum(genstocoal)
average=numpy.mean(genstocoal)
lastvalue=k-2 #last value in list of coalescent times will be k-2
#assign the coalescent time of the last two gene copies to a variable
lastcoal=genstocoal[lastvalue]
#print lastcoaltime
#time to coalesce all but the last two
allbut2=total-lastcoal
#print allbut2
#print "Returns a list= [total time to MRCA of all k, avg time to each coal event, avg caol time to 2 remaining gene copies, and then time it takes for the tow copies to coal, [list of all coal values]]"
coalnums=[total, average, allbut2, lastcoal, genstocoal]
return coalnums
#print coalnums
#return gentocoal
def fivekmiles(n,k,y):
#n=pop size
#k=gene copies
#x= which part you want to estimate
#0=total coalescent time
#1=average coal time per branch within tree
#2=time to coal of all but 2 copies
#3=time to coal of last 2 gene copies
simuavgs=[]
for x in range (5000):
coaloutput=coal(n,k) #passing output of coal func to variable
totalcoaltime=coaloutput[y] #picking out sum of total times, which is total time to MRCA
simuavgs.append(totalcoaltime) #adding each of these times to a list
mean=numpy.mean(simuavgs)
avglist=[mean,simuavgs]
return avglist
def calcs(meanlist): #you will need to input the above values of a mean and a list of average gens to coalescence
avglist=meanlist[1] # parse out the list of averages from the 5000 simulations
mean=meanlist[0] #get the mean from the output
numabove=0
numbelow=0
#calculate the number of averages above and below the mean
for z in avglist:
if z<mean:
numbelow +=1
if z>mean:
numabove +=1
stats=[mean, numabove, numbelow]
return stats
print
print "welcome..."
print
print "...to the coalescent model"
print
print "An average value will be calculated over 5000 simulations"
print
print "Your choices are:"
print
print "0 = total coalescence time of all gene copies"
print "1 = average coalescence time per branch within population"
print "2 = time to coalescence of all but 2 gene copies"
print "3 = time to coalescence of last 2 gene copies"
print
y=(input("So which value would you like to calculate?"))
n=(input("Effective population size:"))
k=(input("Number of gene copies sampled:"))
meanlist=fivekmiles(n,k,y)
stats=calcs(meanlist)
if y == 0:
print "Mean *total* coalescence time:"+str(stats[0])
elif y == 1:
print "Mean *average* coalescence time:"+str(stats[0])
elif y == 2:
print "Mean time to coalescence of all but two gene copies:"+str(stats[0])
elif y == 3:
print "Mean time to coalescence of the last two gene copies:"+str(stats[0])
else:
print "you did somethign wrong"
print "Number of samples above mean:"+str(stats[1])
print "Number of samples below mean:"+str(stats[2])
| true |
223147019f9d8b90d3eb8c6dc4b671aae6d63494 | Python | iridium-browser/iridium-browser | /ash/webui/camera_app_ui/resources/utils/gen_preload_images_js.py | UTF-8 | 1,242 | 2.703125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python3
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates an array of images to be preloaded as a ES6 Module."""
import argparse
import json
import os
import shlex
import sys
def main():
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
'--output_file',
help='The output js file exporting preload images array')
argument_parser.add_argument(
'--images_list_file',
help='File contains a list of images to be appended')
args = argument_parser.parse_args()
with open(args.images_list_file) as f:
files = shlex.split(f.read())
images = {}
for image in files:
with open(image, 'r', encoding='utf-8') as f:
images[os.path.basename(image)] = f.read()
with open(args.output_file, 'w', encoding='utf-8') as f:
filenames = [os.path.basename(f) for f in files]
f.write('export const preloadImagesList = %s;' %
json.dumps(filenames, indent=2))
f.write('export const preloadedImages = %s;' % json.dumps(images))
return 0
if __name__ == '__main__':
sys.exit(main())
| true |
f9824f4d44a1278bb8b93d179976a26f4d678610 | Python | Sk8erboi99/Py_Expense_template | /expense.py | UTF-8 | 1,332 | 2.734375 | 3 | [] | no_license | from PyInquirer import prompt
import csv
from prompt_toolkit.validation import Validator, ValidationError
from user import get_user, get_user_option
class NumberValidator(Validator):
def validate(self, document):
try:
int(document.text)
except ValueError:
raise ValidationError(
message='Please enter a number',
cursor_position=len(document.text))
expense_questions = [
{
"type":"input",
"name":"amount",
"message":"New Expense - Amount: ",
"validate": NumberValidator,
},
{
"type":"input",
"name":"label",
"message":"New Expense - Label: ",
},
{
"type":"list",
"name":"user",
"message":"New Expense - Spender: ",
"choices": get_user
},
{
'type': 'checkbox',
'qmark': '➡',
'message': 'Select all the spenders',
'name': 'allspenders',
"choices": get_user_option,
},
]
def new_expense(*args):
infos = prompt(expense_questions)
with open('expense_report.csv', 'a') as f:
# create the csv writer
writer = csv.writer(f)
# write a row to the csv file
writer.writerow(infos.values())
print("Expense Added !")
return True
def show_status():
return True | true |
d2919a08ebab52219485e41bb6f1e18efb74d057 | Python | ropeake/budget-automator | /Date_Process.py | UTF-8 | 3,073 | 3.03125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 20 14:18:07 2018
#Changelog 16 Feb 2019 bug fixes and performance improvements
#Changelog 20 Apr 2019 Committing comments to keep track of branches - columns not yet added!
@author: Ro
"""
import pandas as pd
import win32ui
import win32con
import os.path
import budgetML
#%% Creating a function to import the csv and re-order the columns
def open_and_column_fix(csv_path):
df=pd.read_csv(csv_path, encoding='latin', quotechar='"', skiprows=4,header=0,
names=['transaction_date','posted_date','type','description','b1','credit','debit','balance','b2','b3'],
)
df.transaction_date=df.description.str.split(' ON ',expand=True)[1] #split and then return column 1 (firts column is 0)
df.loc[pd.isnull(df.transaction_date),'transaction_date']=df.loc[pd.isnull(df.transaction_date),'posted_date']
df['sort_code']="'09-01-28"
df['ac_number']=95349265
df=df[['transaction_date','posted_date','sort_code','ac_number','type','description','credit','debit','balance']]
df=df[~pd.isnull(df.balance)]
return df
#%% launches dialouge box to select file
o=win32ui.CreateFileDialog(1)
if o.DoModal()==1: # if you click a file, and then ok
selected_path=o.GetPathName() # def variable - the full path and filename of the selected file
#renamed from filename for clarity
#%% Defines variables for current and previous files with and without full path
folder_name=os.path.dirname(selected_path)
list_of_files=os.listdir(folder_name)
selected_file=os.path.basename(selected_path)
sf_index=list_of_files.index(selected_file)
previous_file=list_of_files[sf_index-1]
previous_path=os.path.join(folder_name,previous_file)
#%% Concatination approach
df_current=open_and_column_fix(selected_path)
df_previous=open_and_column_fix(previous_path)
df=pd.concat([df_current, df_previous,df_previous])
print(len(df))
df=df.drop_duplicates(keep=False)
print(len(df))
#%% Importing eirinn's machine learning code
data=pd.read_excel(r"C:\Users\Ro\OneDrive\Budgets\2018 Budget.xlsx",sheet_name='London Data')
budgetML.add_training_data(data)
df_predicted=budgetML.build_and_predict(df)
df_predicted=df_predicted[['transaction_date','posted_date','sort_code','ac_number','type','description','credit','debit','balance','Confidence','Assignment','Category']]
#%% copy to clipboard
df_predicted.to_clipboard(excel=True,index=False,header=False,sep='\t')
status=f"{len(df_current)-len(df)} duplicates removed"
# here's my new code to find duplicates but it's shit and broken
#%%
win32ui.MessageBox(f"Success! {status}, remaining transactions pasted to clipboard",'Transaction Formatter 2000')
else:
win32ui.MessageBox('Bye bye','Transaction Formatter 2000',win32con.MB_ICONSTOP)
#%% Remove dupliactes using isin
# df_current=open_and_column_fix(selected_path)
# df_previous=open_and_column_fix(previous_path)
# df_current.set_diff(df_previous) | true |
0f2c220b8eabdce2d0345721bb1dad6b6cb71310 | Python | MingKeungZhang/Mastermind-Python-3.5.1- | /Mastermind.py | UTF-8 | 2,393 | 4.34375 | 4 | [] | no_license | #William Zhang
import random
#Globcal color variable
COLORS=['red','orange','yellow','green','blue','purple']
#Generate random color
def hidden_color():
#hidden color list
hiddenColor=[]
#Generate random color
for i in range(4):
randColor=COLORS[random.randint(0,5)]
hiddenColor.append(randColor)
return hiddenColor
#Opponent's guess
def opponent_guess():
#opponent guess list
opponentGuess=[]
isInt = True
print("Make a guess of four colors:")
print("0 - red")
print("1 - orange")
print("2 - yellow")
print("3 - green")
print("4 - blue")
print("5 - purple")
#Get opponent guess
for i in range(4):
try:
guess = int(input("Choose a color(0-5): "))
except ValueError:
isInt = False
guess = -1
while(guess < 0 or guess > 5 or isInt == False):
isInt = True
try:
guess = int(input("Invalid input. Choose a color(0-5): "))
except ValueError:
isInt = False
opponentGuess.append(COLORS[guess])
print(opponentGuess)
print("Your guess is: \n"+str(opponentGuess))
return opponentGuess
#The clue
def the_clue(hiddenColor, guess):
#tempoary color and guess list
tempHiddenColor=hiddenColor[:]
tempGuess=guess[:]
#Get the clue
for i in range(4):
if(tempGuess[i]==tempHiddenColor[i]):
tempGuess[i]=''
tempHiddenColor[i]=''
print("correct color and position")
for i in range(4):
for j in range(4):
if(tempGuess[i]!='' or tempHiddenColor[j]!=''):
if(tempGuess[i]==tempHiddenColor[j]):
tempHiddenColor[j]=''
print("correct color, wrong position")
#The main
def main():
#initalized hidden color
hiddenColor=hidden_color()
print(hiddenColor)
#counter and boolean
i=0
correctGuess = False
#tries
while(i < 10 and correctGuess == False):
opponentGuess=opponent_guess()
if(opponentGuess == hiddenColor):
correctGuess = True
print("You win")
else:
the_clue(hiddenColor,opponentGuess)
if(i==9):
print("You lose")
i+=1
| true |
14f89d30c0e8bae892a4ed2fff0900ddc08b1a86 | Python | Hunt2behunter/synackapi | /scope_download_threaded.py | UTF-8 | 1,344 | 2.59375 | 3 | [
"MIT"
] | permissive | import requests
import warnings
import json
from threading import Thread
from Queue import Queue
warnings.filterwarnings("ignore")
token = raw_input("Please enter your Synack Auth Header (Command from web console: sessionStorage.getItem('shared-session-com.synack.accessToken')): ")
target_code = raw_input("Please enter your target codename: ")
max_page_count = 100
blocks = []
def return_ips(q, result):
while not q.empty():
x = q.get()
response = response = requests.get('https://platform.synack.com/api/targets/'+target_code+'/cidrs',params={'page': x[1]},headers={'Authorization': 'Bearer '+token},verify=False)
temp = json.dumps(response.json()['cidrs']).replace("[","").replace("]","").replace("\"","").replace(", ","\n").split("\n")
blocks.extend(temp)
print("Page "+str(x[1])+" done.")
q.task_done()
return True
pages = []
for x in range(1, max_page_count, 3):
pages.append(x)
q = Queue(maxsize=0)
for i in range(len(pages)):
q.put((i, pages[i]))
for i in range(20):
worker = Thread(target=return_ips, args=(q,pages))
worker.setDaemon(True)
worker.start()
q.join()
blocks = list(set(blocks))
f = open("blocks.txt","w+")
for i in range (len(blocks)):
f.write(blocks[i]+"\n")
f.close()
print ("All done! Blocks have been added to blocks.txt file.") | true |
2952c687f88f485703fb1db5e9e92c736dbcfcfb | Python | errnox/some-matplotlib-things | /matplotlib-demo/data_plotter.py | UTF-8 | 367 | 3.3125 | 3 | [] | no_license | import numpy
import pylab
"""
Simple Line Plot
----------------
Shows how to make and save a simple line plot with labels, title and grid.
"""
data = numpy.loadtxt('./datafile')
pylab.plot(data)
pylab.xlabel('time (s)')
pylab.ylabel('temperature (degrees C)')
pylab.title('Simple data visualization')
pylab.grid(True)
pylab.savefig('simple_plot')
pylab.show()
| true |
19f6e02c2d1f4740b0c66fedc80f0b92b075d863 | Python | gauthamkrishna-g/HackerRank | /Algorithms/Sorting/Palindrome_Index.py | UTF-8 | 401 | 3.4375 | 3 | [
"MIT"
] | permissive | T = int(input())
for _ in range(T):
S = input()
flag = 0
l = 0
r = len(S)-1
while l < r:
if S[l] != S[r]:
if S[l+1] == S[r] and S[l+2] == S[r-1]:
print (l)
else:
print (r)
flag = 1
break
else:
l += 1
r -= 1
if flag == 0:
print (-1) | true |
85425853edac6c6fa36ddc764aabb2443a199aeb | Python | jwodder/doapi | /doapi/ssh_key.py | UTF-8 | 3,006 | 2.75 | 3 | [
"MIT"
] | permissive | from six import string_types
from .base import ResourceWithID
class SSHKey(ResourceWithID):
"""
An SSH key resource, representing an SSH public key that can be
automatically added to the :file:`/root/.ssh/authorized_keys` files of new
droplets.
New SSH keys are created via the :meth:`doapi.create_ssh_key` method and
can be retrieved with the :meth:`doapi.fetch_ssh_key` and
:meth:`doapi.fetch_all_ssh_keys` methods.
The DigitalOcean API specifies the following fields for SSH key objects:
:var id: a unique identifier for the SSH key
:vartype id: int
:var fingerprint: the unique fingerprint of the SSH key
:vartype fingerprint: string
:var name: a human-readable name for the SSH key
:vartype name: string
:var public_key: the entire SSH public key as it was uploaded to
DigitalOcean
:vartype public_key: string
"""
def __init__(self, state=None, **extra):
if isinstance(state, string_types):
state = {"fingerprint": state}
super(SSHKey, self).__init__(state, **extra)
def __str__(self):
""" Convert the SSH key to its fingerprint """
return self.fingerprint
@property
def _id(self):
r"""
The `SSHKey`'s ``id`` field, or if that is not defined, its
``fingerprint`` field. If neither field is defined, accessing this
attribute raises a `TypeError`.
"""
if self.get("id") is not None:
return self.id
elif self.get("fingerprint") is not None:
return self.fingerprint
else:
raise TypeError('SSHKey has neither .id nor .fingerprint')
@property
def url(self):
""" The endpoint for general operations on the individual SSH key """
return self._url('/v2/account/keys/' + str(self._id))
def fetch(self):
"""
Fetch & return a new `SSHKey` object representing the SSH key's current
state
:rtype: SSHKey
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the SSH key no longer exists)
"""
api = self.doapi_manager
return api._ssh_key(api.request(self.url)["ssh_key"])
def update_ssh_key(self, name):
# The `_ssh_key` is to avoid conflicts with MutableMapping.update.
"""
Update (i.e., rename) the SSH key
:param str name: the new name for the SSH key
:return: an updated `SSHKey` object
:rtype: SSHKey
:raises DOAPIError: if the API endpoint replies with an error
"""
api = self.doapi_manager
return api._ssh_key(api.request(self.url, method='PUT',
data={"name": name})["ssh_key"])
def delete(self):
"""
Delete the SSH key
:return: `None`
:raises DOAPIError: if the API endpoint replies with an error
"""
self.doapi_manager.request(self.url, method='DELETE')
| true |
6cf756f91a5427e9dcd7d0f25ab82230c6898e33 | Python | Rtgher/ProcGen-RPG | /Proc Gen RPG/GameWindow.py | UTF-8 | 1,752 | 3.375 | 3 | [
"Unlicense"
] | permissive | """GameWindow.py
This is the main game window.
It provides the main game functionality.
"""
#import section
import pygame
import eztext
import sys
from pygame.locals import *
#global variables initialization
#screen size
win_width= 800
win_height =600
#colors
black= (0, 0, 0)
grey=(100,100,100)
white= (255, 255, 255)
red= (255, 0, 0)
green= (0, 255, 0)
blue= (0, 0, 255)
gray=(64, 64, 64)
#pygame init
pygame.init()
#pygame variables
window=pygame.display.set_mode((win_width, win_height))
textbox= eztext.Input(maxlength=40, color= black, prompt="What do you do?: " )
clock = pygame.time.Clock()
#background
background= pygame.Surface((win_width, win_height))
background.fill(grey)
#surfaces
graphicwin =pygame.Surface((win_width*0.8,win_height *0.88))
graphicwin.fill(black)
textsurf=pygame.Surface((win_width*0.98, win_height *0.10))
textsurf.fill(white)
statusbar =pygame.Surface((win_width*0.19, win_height*0.88))
textbox.draw(textsurf)
#font
gamefont= pygame.font.SysFont('Gothic', 12)
def RunWindow() :
"""This is the main graphic loop.
It keeps the game running.
"""
while True :
clock.tick(30)
events = pygame.event.get()
#event list
for event in events:
if event.type == QUIT:
return
window.blit(background, (0,0))
window.blit(statusbar, (0, 0))
window.blit(graphicwin,(0.2*win_width, 0))
textbox.update(events)
text = textbox.get()
textbox.draw(textsurf)
window.blit(textsurf, (0, 0.9*win_height))
pygame.display.update()
#####END Function
if __name__ == '__main__': RunWindow()
| true |
560828f98016f7004644dc364122a20d370bba88 | Python | victora0007/DashBoardDS4A | /Pages/StaticModelPageData.py | UTF-8 | 2,119 | 2.78125 | 3 | [
"MIT"
] | permissive | # Numeric Fields
fields = [
{"Label": "Price", "Description": "Price of device att moment of purchase", "type": "number"},
{"Label": "Past purchases", "Description": "Number of purchases done by user", "type": "number"},
{"Label": "Hours elapsed", "Description": "Total hours in local session before pass to checkout", "type": "number"},
{"Label": "Activity count", "Description": "Total number of events before pass to checkout", "type": "number"},
{"Label": "Past # sessions", "Description": "Total number of past sesions of current user", "type": "number"},
]
# Categorical Fields with their Options
fields_categorical = [
{
"Label": "Condition",
"Options": ["Bom", "Bom - Sem Touch ID", "Excelente", "Muito Bom", "Novo"], # 5
"Description": "Quality Condition of the Device",
"type": "select"
},
{
"Label": "Storage",
"Options": ["128GB", "16GB", "1TB", "256GB", "32GB", "32GB RAM 2GB", "32GB RAM 3GB", "4GB", "512GB", "512MB", "64GB", "64GB RAM:4GB", "64GB RAM:6GB", "8GB"], #14
"Description": "Storage Capacity of the device",
"type": "select"
},
{
"Label": "Brand",
"Options": ["Asus", "Huawei", "LG", "Lenovo", "Motorola", "Multilaser", "Positivo", "Quantum", "Samsung", "Sony", "Xiaomi", "iPad", "iPhone"], # 13
"Description": "Brand of the device",
"type": "select"
}
]
# Example Data
example_data = [
[1819.0, 0.0, 80.118611, 113.0, 8.0, '2', '0', '8'],
[1039.0, 2.0, 0.003889, 3.0, 1.0, '3', '1', '12'],
[1000, 10, 6, 8, 12, '2', '2', '0'],
[2045, 16, 6, 15, 4, '2', '1', '8']
]
| true |
8905361a1016644fd49284b736344cb9ef13d43b | Python | sivaprakashSP/Python-Stuffs | /Skillrack DC/Sum_of_2_nos_==_k.py | UTF-8 | 627 | 3.125 | 3 | [] | no_license | #l=[int(x) for x in input().split()]
n,y=input().split()
people = [int(x) for x in input().split()]
def tessa(source):
result = []
for p1 in range(len(source)):
for p2 in range(p1+1,len(source)):
result.append([source[p1],source[p2]])
return result
pairings = tessa(people)
lis1=[]
lis2=[]
sumlist=[]
f=0
i=1
for i in range(len(pairings)):
x,y=pairings[i]
lis1.append(x)
lis2.append(y)
for (i,j) in zip(lis1,lis2):
sumlist.append(i+j)
for i in range(len(sumlist)):
if sumlist[i] == int(y):
f=1
break
if f==1:
print("yes")
else:
print("no") | true |
fd4511fff452ec5ade05b2c3eb8f0192694eec46 | Python | rpryzant/code-doodles | /interview_problems/2018/CRACKING/3.5_v2.py | UTF-8 | 1,221 | 3.96875 | 4 | [] | no_license | """
insertion sort
two variabls: min, index
1) move [:index] elements to 2nd stack
2) find min in remaining stack (moving elemnts to 2nd stack)
3) transfer back to original stack, but pluck out the min and move it to the index^th spot
4) increment index
O(n^2) but who'se counting??
struggled with this more than i should have....
"""
import sys
class Stack:
def __init__(self, l=[]):
self.d = l
def push(self, x):
self.d.append(x)
def pop(self):
if not self.d:
raise Exception('pop from empty stack')
return self.d.pop()
def peek(self):
return self.d[-1]
def isEmpty(self):
return len(self.d) == 0
def percolate(s, aux):
top = s.pop()
done = True
while not s.isEmpty():
if top <= s.peek():
aux.push(top)
top = s.pop()
else:
done = False
aux.push(s.pop())
aux.push(top)
return done
def transfer(a, b):
while not a.isEmpty():
b.push(a.pop())
def sort_stack(s):
aux = Stack()
while not percolate(s, aux):
transfer(aux, s)
transfer(aux, s)
return s
s = Stack([5,2,6,2,1,1])
s = sort_stack(s)
print s.d
| true |
d7a73efdf1d77c9a63307e9006804c448ea17f56 | Python | cassief2/Labs | /Vol1B/MonteCarlo2-Sampling/testDriver.py | UTF-8 | 8,547 | 3.453125 | 3 | [
"CC-BY-3.0"
] | permissive | # solutions.py
"""Volume 1, Lab 16: Importance Sampling and Monte Carlo Simulations.
Solutions file. Written by Tanner Christensen, Winter 2016.
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
def prob1(n):
"""Approximate the probability that a random draw from the standard
normal distribution will be greater than 3."""
h = lambda x : x > 3
X = np.random.randn(n)
return 1/n * np.sum(h(X))
def prob2():
"""Answer the following question using importance sampling:
A tech support hotline receives an average of 2 calls per
minute. What is the probability that they will have to wait
at least 10 minutes to receive 9 calls?
Returns:
IS (array) - an array of estimates using
[5000, 10000, 15000, ..., 500000] as number of
sample points."""
h = lambda y : y > 10
f = lambda y : stats.gamma(a=9,scale=0.5).pdf(y)
g = lambda y : stats.norm(loc=12,scale=2).pdf(y)
num_samples = np.arange(5000,505000,5000)
IS = []
for n in num_samples:
Y = np.random.normal(12,2,n)
approx = 1./n*np.sum(h(Y)*f(Y)/g(Y))
IS.append(approx)
IS = np.array(IS)
return IS
def prob3():
"""Plot the errors of Monte Carlo Simulation vs Importance Sampling
for the prob2()."""
h = lambda x : x > 10
MC_estimates = []
for N in xrange(5000,505000,5000):
X = np.random.gamma(9,scale=0.5,size=N)
MC = 1./N*np.sum(h(X))
MC_estimates.append(MC)
MC_estimates = np.array(MC_estimates)
IS_estimates = prob2()
actual = 1 - stats.gamma(a=9,scale=0.5).cdf(10)
MC_errors = np.abs(MC_estimates - actual)
IS_errors = np.abs(IS_estimates - actual)
x = np.arange(5000,505000,5000)
plt.plot(x, MC_errors, color='r', label="Monte Carlo")
plt.plot(x, IS_errors, color='b', label="Importance Sampling")
plt.legend()
plt.show()
def prob4():
"""Approximate the probability that a random draw from the
multivariate standard normal distribution will be less than -1 in
the x-direction and greater than 1 in the y-direction."""
h = lambda y : y[0] < -1 and y[1] > 1
f = lambda y : stats.multivariate_normal(np.zeros(2), np.eye(2)).pdf(y)
g = lambda y : stats.multivariate_normal(np.array([-1,1]), np.eye(2)).pdf(y)
n = 10**4
Y = np.random.multivariate_normal(np.array([-1,1]), np.eye(2), size=n)
hh = np.apply_along_axis(h, 1, Y)
ff = np.apply_along_axis(f, 1, Y)
gg = np.apply_along_axis(g, 1, Y)
approx = 1./n*np.sum(hh*ff/gg)
return approx
#Test Script and Class =======================================================
def test(student_module):
"""Test script. Import the student's solutions file as a module.
X points for problem 1
X points for problem 2
...
Inputs:
student_module: the imported module for the student's file.
Returns:
score (int): the student's score, out of TOTAL.
feedback (str): a printout of test results for the student.
"""
tester = _testDriver()
tester.test_all(student_module)
return tester.score, tester.feedback
class _testDriver(object):
"""Class for testing a student's work. See test.__doc__ for more info."""
# Constructor -------------------------------------------------------------
def __init__(self):
"""Initialize the feedback attribute."""
self.feedback = ""
@staticmethod
def _errType(error):
"""Get just the name of the exception 'error' in string format."""
if isinstance(error, BaseException):
return str(type(error)).lstrip("<type 'exceptions.").rstrip("'>")
else:
return str(error)
def _grade(self, points, message=None):
"""Manually grade a problem worth 'points'. Return the score."""
credit = -1
while credit > points or credit < 0:
try:
credit = int(input("\nScore out of %d: "%points))
except:
credit = -1
if credit != points:
# Add comments (optionally),
comments = raw_input("Comments: ")
if len(comments) > 0:
self.feedback += "\n%s"%comments
# Or add a predetermined error message.
elif message is not None:
self.feedback += "\n%s"%message
return credit
# Main routine -----------------------------------------------------------
def problem1(self, s):
"""Test Problem 1. X points."""
estimate1 = s.prob1(50000)
estimate2 = s.prob1(5000000)
print "Problem 1: (should approach 0.0013499 for large n)"
print "Estimate for n = 50000: {}".format(estimate1)
print "Estimate for n = 5000000: {}".format(estimate2)
points = self._grade(10)
# Test problem 1 here.
return points
def problem2(self, s):
vals = s.prob2()
x = np.linspace(5000,50000,np.size(vals))
plt.plot(x,vals)
plt.title("Estimate for probability of waiting 10 minutes")
plt.xlabel('# Sample Points')
plt.ylabel('Probability')
plt.show()
print "Final estimate: {} (should approach .00208)".format(vals[-1])
points = self._grade(10)
return points
def problem3(self, s):
plt.ylim([0,0.0012])
s.prob3()
points = self._grade(10)
return points
def problem4(self, s):
estimate = s.prob4()
print "Estimate for problem 4: {}".format(estimate)
points = self._grade(10)
return points
def test_all(self, student_module, total=40):
"""Grade the provided module on each problem and compile feedback."""
# Reset feedback and score.
self.feedback = ""
self.score = 0
def test_one(problem, number, value):
"""Test a single problem, checking for errors."""
try:
self.feedback += "\n\nProblem %d (%d points):"%(number, value)
points = problem(student_module)
self.score += points
self.feedback += "\nScore += %d"%points
except BaseException as e:
self.feedback += "\n%s: %s"%(self._errType(e),e)
# Grade each problem.
test_one(self.problem1, 1, 10) # Problem 1: 10 points.
test_one(self.problem2, 2, 10) # Problem 2: 10 points.
test_one(self.problem3, 3, 10) # Problem 3: 10 points
test_one(self.problem4, 4, 10) # Problem 4: 10 points
# Report final score.
percentage = (100. * self.score) / total
self.feedback += "\n\nTotal score: %d/%d = %s%%"%(self.score, total, percentage)
if percentage >= 98: self.feedback += "\n\nExcellent!"
elif percentage >= 90: self.feedback += "\n\nGreat job!"
# Add comments (optionally).
print(self.feedback)
comments = str(raw_input("Comments: "))
if len(comments) > 0:
self.feedback += '\n\n\nComments:\n\t%s'%comments
# Possible Helper Functions -----------------------------------------------
def _eqTest(self, correct, student, message):
"""Test to see if 'correct' and 'student' are equal.
Report the given 'message' if they are not.
"""
if correct == student:
return 1
else:
self.feedback += "\n%s"%message
self.feedback += "\n\tCorrect response: %s"%correct
self.feedback += "\n\tStudent response: %s"%student
return 0
def _strTest(self, correct, student, message):
"""Test to see if 'correct' and 'student' have the same string
representation. Report the given 'message' if they are not.
"""
if str(correct) == str(student):
return 1
else:
self.feedback += "\n%s"%message
self.feedback += "\n\tCorrect response: %s"%correct
self.feedback += "\n\tStudent response: %s"%student
return 0
def _evalTest(self, expression, correct, message):
"""Test a boolean 'expression' to see if it is 'correct'.
Report the given 'message' if it is not.
"""
if expression is correct:
return 1
else:
self.feedback += "\n%s"%message
return 0
| true |
8246abe974fea49cc3d2fad21c95abb546f017cd | Python | josephchenhk/learn | /Python/python3-cookbook/chp13脚本编程与系统管理/13.6 执行外部命令并获取它的输出/13.6.py | UTF-8 | 376 | 2.9375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 11/1/2019 9:02 AM
# @Author : Joseph Chen
# @Email : josephchenhk@gmail.com
# @FileName: 13.6.py
# @Software: PyCharm
"""
13.6 执行外部命令并获取它的输出
"""
import subprocess
out_bytes = subprocess.check_output(['netstat','-a']) # This will take a huge amount of time!
out_text = out_bytes.decode('utf-8')
print(out_text)
| true |
af368f826b8ccb1e6c6f65cdc896262c28a3b3f4 | Python | IsaacMagno/python3_curso_em_video | /PythonExercicios/ex099.py | UTF-8 | 478 | 4 | 4 | [] | no_license | from time import sleep
def maior(*num):
print('Analisando os valores passados...')
for n in num:
print(f'{n} ', end='')
sleep(0.2)
print(f'Foram infomados {len(num)} valores ao todo.')
if len(num) == 0:
print(f'Nenhum valor foi informado.')
else:
print(f'E o maior número é {max(num)}.')
print()
maior(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
maior(2, 3, 4, 5, 6, 12, 999, 1)
maior(3, 2)
maior(6)
maior()
| true |
570b4458afbd768a2c9236166e424e6348176d33 | Python | gebbz03/PythonProject | /calculator/main.py | UTF-8 | 1,749 | 3.921875 | 4 | [] | no_license |
import hello
import Calculator
#hello.printHello("Gebb Ebero")
var1 = "n"
while(var1 == "n"):
print("1. Addition")
print("2. Subtraction")
print("3. Division")
print("4. Multiplication")
userSelected = input("Input: ")
num1=input("Please enter first number: ")
num2=input("Please enter second number: ")
math=Calculator.Math(float(num1),float(num2))
if(userSelected == "1"):
print("---------------ADDITION---------------")
res = math.addition()
print("---------------------------------------")
print("RESULT: "+str(res))
print("---------------------------------------")
elif(userSelected == "2"):
print("---------------SUBTRACTION---------------")
res = math.subtraction()
print("---------------------------------------")
print("RESULT: "+str(res))
print("---------------------------------------")
elif(userSelected == "3"):
print("---------------DIVISION---------------")
res = math.division()
print("---------------------------------------")
print("RESULT: "+str(res))
print("---------------------------------------")
elif(userSelected == "4"):
print("---------------MULTIPLICATION---------------")
res = math.multiplication()
print("---------------------------------------")
print("RESULT: "+str(res))
print("---------------------------------------")
else:
print("---------------------------------------")
print("Invalid selection")
print("---------------------------------------")
var1=input("Do you want to exit? (y/n) ")
| true |
b2cb5d46c6d30fd077e45b91157368e93b863e38 | Python | Pathairush/data_manipulation | /model_explainability/model_explanation.py | UTF-8 | 3,430 | 2.71875 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import eli5
from eli5.sklearn import PermutationImportance
def print_permutation_importance(fitted_model, data_model : tuple, random_state = 1):
if len(data_model) == 4:
X_train, X_test, y_train, y_test = data_model
if len(data_model) == 2:
X_test, y_test = data_model
else:
raise "the len of data model is neither 4 nor 2"
feature_names = X_test.columns.tolist()
perm = PermutationImportance(fitted_model, random_state = random_state).fit(X_test, y_test)
display(eli5.show_weights(perm, feature_names = feature_names))
from matplotlib import pyplot as plt
from pdpbox import pdp, get_dataset, info_plots
def plot_1D_partial_dependency(fitted_model, X_test : pd.DataFrame, model_features : list, feature : str ):
# Create the data that we will plot
pdp_obj = pdp.pdp_isolate(model = fitted_model,
dataset = X_test,
model_features = model_features,
feature = feature)
# plot it
pdp.pdp_plot(pdp_obj, feature)
plt.show()
def plot_2D_partial_dependency(fitted_model, X_test : pd.DataFrame, model_features : list, features : list , plot_type = 'contour'):
""" have an error with the matplolib version 3.0.0 """
# Create the data that we will plot
pdp_obj = pdp.pdp_interact(model = fitted_model,
dataset = X_test,
model_features = model_features,
features = features)
# plot it
pdp.pdp_interact_plot(pdp_interact_out = pdp_obj,
feature_names = feature, plot_type = plot_type)
plt.show()
import shap
def create_shap_object(my_model, model_type, **kwargs):
if model_type == 'tree':
explainer = shap.TreeExplainer(my_model)
if model_type == 'deep':
explainer = shap.DeepExplainer(my_model)
if model_type == 'other':
explainer = shap.KernelExplainer(my_model.predict_proba, train_X)
return explainer
def calculate_shap_value(explainer, data_for_prediction : pd.DataFrame):
shap_values = explainer.shap_values(data_for_prediction)
return shap_values
def plot_shap_forceplot(explainer, shap_values, data_for_prediction : pd.DataFrame):
shap.initjs()
# use [1] for binary classification to get the positive outcome
display(shap.force_plot(explainer.expected_value[1], shap_values[1], data_for_prediction))
def plot_shap_summaryplot(shap_values, X_test : pd.DataFrame):
"""When plotting, we call shap_values[1].
For classification problems, there is a separate array of SHAP values for each possible outcome.
In this case, we index in to get the SHAP values for the prediction of "True".
Calculating SHAP values can be slow. It isn't a problem here, because this dataset is small.
But you'll want to be careful when running these to plot with reasonably sized datasets.
The exception is when using an xgboost model, which SHAP has some optimizations
for and which is thus much faster."""
shap.initjs()
# use [1] for binary classification to get the positive outcome
display(shap.summary_plot(shap_values[1], X_test))
def plot_shap_dependenceplot(shap_values, X, feature_names : str, interaction_feature : str):
""" X = data[feature_names] before splitting train test """
shap.initjs()
display(shap.dependence_plot(feature_names, shap_values[1], X, interaction_index=interaction_feature)) | true |
2ddb4d0dbba21079341006fe3521d54b61ceb607 | Python | SamThomas/PyExpLabSys | /LivePlots/LivePlotsRunning.py | UTF-8 | 4,655 | 2.96875 | 3 | [] | no_license |
""" Running Plots """
#import matplotlib
#matplotlib.use('GTKAgg')
#from matplotlib.figure import Figure
#from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg
from LivePlotsCommon import Plot
from LivePlotsExceptions import NLinesError, NDataError
import gtk
import time
class NPointRunning(Plot):
""" A N point running plot """
def __init__(self, number_of_points=100, number_of_lines=1, dpi=100,
x_pixel_size=500, y_pixel_size=400, **kw):
""" Init plot:
Parameters:
number_of_points (int)
number_of_lines (int)
dpi (int)
x_pixel_size (int)
y_pixel_size (int)
**kw can contain:
logscale (boolean)
title (string)
x_label (string)
y_label (string)
line_styles (list of strings)
colors (list of strings)
y_bounds (list of floats) -- [y_min, y_max] if not given->autoscale
legends (list of strings)
legend_cols (integer)
legend_placement (string) -- right (default) or top
legend_width_right (integer)
number_in_legend (boolean) -- True (default)
legend_number_format (string) -- .2f (default)
"""
Plot.__init__(self, dpi, x_pixel_size, y_pixel_size)
# Assign settings to variable
if not kw.has_key('y_bounds'):
self.auto_y_scale = True
self.auto_x_scale = False
# These work as defaults
self.settings = {'logscale': False,
'title': None,
'x_label': None,
'y_label': None,
'line_styles': None,
'line_colors': None,
'y_bounds': None,
'x_bounds': (-1, number_of_points),
'y_bounds': None,
'legends': None,
'legend_cols': 1,
'legend_placement': 'right',
'legend_width_right': 100,
'number_in_legend': True,
'legend_number_format': '.2f'}
# Update common settings (iniherited from LivePlotsCommon)
self._change_settings_common(number_of_lines, **kw)
# Update settings specific to this type of plot and init the data
self.n_points = number_of_points
self.x = [range(number_of_points)]
[self.x.append(self.x[0]) for n in range(1, number_of_lines)]
self.y = [[1]+[None]*(number_of_points-1)]*number_of_lines
self._full_update()
def push_new_points(self, points):
""" Push new points to the lines
Attributes:
points -- list of new points, one item in the list per line, must
be a list even with just one line, the data point for a
line can be None
"""
if not isinstance(points, list):
raise TypeError('This function must be passed a list')
if len(points) != self.n_lines:
raise NLinesError(len(points), self.n_lines)
[y_data.append(new_y) for y_data, new_y in zip(self.y, points)]
self.y = [y_data[self.n_points * -1:] for y_data in self.y]
if self._update_bounds():
self._full_update()
self._quick_update()
self.update_legends()
def set_data(self, data):
""" Replace the entire data set
Attributes:
data -- list of lists of points, must contain one list per line
that each contains as many points as the plot does
"""
if not isinstance(data, list):
raise TypeError('This function must be passed a list')
if len(data) != self.n_lines:
raise NLinesError(len(y), self.n_points)
for points in data:
if len(data) != self.n_points:
raise NDataError(len(data), self.n_points)
self.y = data
self._quick_update()
self.update_legends()
def set_number_of_points(self, n_points):
self.x[0] = range(n_points)
if n_points > self.n_points:
self.y = [[None] * (n_points - self.n_points) + y for y in self.y]
else:
self.y = [y[n_points * -1:] for y in self.y]
print len(self.x)
print len(self.x[0])
print len(self.y)
print len(self.y[0])
self.n_points = n_points
self.settings['x_bounds'] = (-1, n_points)
self.first_update = True
self._quick_update()
| true |
4ef45fd769a4e42e52787a11b2ac07f986c22c68 | Python | TheRoboticsClub/colab-gsoc2020-Diego_Charrez | /models/dqn_catpole/args.py | UTF-8 | 1,911 | 2.6875 | 3 | [] | no_license | import argparse
def dqn_args_train():
"""Parse DQN training arguments.
Returns:
args: The parsed arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--seed',
dest='seed',
type=int,
help='Seed for numpy and tensorflow.',
default=1,
required=True)
parser.add_argument(
'--num_iterations',
dest='num_iterations',
type=int,
help=' Training will end after reaching the number of interations.',
default=20000)
parser.add_argument(
'--initial_collect_steps',
dest='initial_collect_steps',
type=int,
help='Exploratory steps.',
default=1000)
parser.add_argument(
'--collect_steps_per_iteration',
dest='collect_steps_per_iteration',
type=int,
help='Collected steps per iteration.',
default=1)
parser.add_argument(
'--replay_buffer_max_length',
dest='replay_buffer_max_length',
type=int,
help='Size of the replay buffer.',
default=100000)
parser.add_argument(
'--batch_size',
dest='batch_size',
type=int,
help='The assets directory.',
default=64)
parser.add_argument(
'--lr',
dest='learning_rate',
type=float,
help='The learning rate',
default=1e-3)
parser.add_argument(
'--log_interval',
dest='log_interval',
type=int,
help='Output logs after n steps.',
default=200)
parser.add_argument(
'--num_eval_episodes',
dest='num_eval_episodes',
type=int,
help='.',
default=10)
parser.add_argument(
'--eval_interval',
dest='eval_interval',
type=int,
help='.',
default=1000)
args = parser.parse_args()
return args | true |
f5c25e0412ddbe42c3b26f4d7b1c986bdd378b1d | Python | Zeroeh/Python-RSA | /rsa.py | UTF-8 | 643 | 2.625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
#with python3, you may need to do 'sudo pip3 install cryptodome'
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
import base64
__author__ = 'Zeroeh'
#need to be byte strings
player_email = b'email@mail.com'
player_password = b'password123'
pub_key = ("-----BEGIN PUBLIC KEY-----\n"
"MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDCKFctVrhfF3m2Kes0FBL/JFeO\n"
"cmNg9eJz8k/hQy1kadD+XFUpluRqa//Uxp2s9W2qE0EoUCu59ugcf/p7lGuL99Uo\n"
"SGmQEynkBvZct+/M40L0E0rZ4BVgzLOJmIbXMp0J4PnPcb6VLZvxazGcmSfjauC7\n"
"F3yWYqUbZd/HCBtawwIDAQAB\n"
"-----END PUBLIC KEY-----")
def main():
pub_key_obj = RSA.importKey(pub_key)
cipher = PKCS1_OAEP.new(pub_key_obj)
enc_msg = cipher.encrypt(player_password)
nemsg = base64.b64encode(enc_msg)
print(enc_msg.hex())
print(nemsg)
if __name__ == '__main__':
main()
| true |
7d4cdf58f35457797b66fda6d02167847318ec38 | Python | hoseinakbo/AI-P1-classic-pathfinding | /Pathfinding.py | UTF-8 | 2,180 | 3.390625 | 3 | [] | no_license | import PSA
import queue
class PathFindingState(PSA.State):
def __init__(self, board_array):
self.board_array = list(board_array)
def __eq__(self, other):
if len(self.board_array) != len(other.board_array):
return False
for i in range(0, len(self.board_array)):
if self.board_array[i] != other.board_array[i]:
return False
return True
def heuristic_to_goal(self):
return 0
class PathFindingAction(PSA.Action):
def __init__(self, first_num, second_num):
self.first_num = first_num
self.second_num = second_num
class PathFindingProblem(PSA.Problem):
def get_initial_state(self):
return PathFindingState([1, 6, 2, 5, 3, 9, 4, 7, 8])
def get_final_state(self):
return PathFindingState([1, 2, 3, 4, 5, 6, 7, 8, 9])
def get_actions(self, state):
actions = []
for i in range(0, len(state.board_array)):
if state.board_array[i] == 9:
if is_in_table(i - 3):
actions.append(PathFindingAction(i, i - 3))
if i % 3 != 0:
if is_in_table(i - 1):
actions.append(PathFindingAction(i, i - 1))
if i % 3 != 2:
if is_in_table(i + 1):
actions.append(PathFindingAction(i, i + 1))
if is_in_table(i + 3):
actions.append(PathFindingAction(i, i + 3))
return actions
def get_result_of_action(self, action, state):
new_state = PathFindingState(state.board_array)
temp = new_state.board_array[action.first_num]
new_state.board_array[action.first_num] = new_state.board_array[action.second_num]
new_state.board_array[action.second_num] = temp
return new_state
def is_goal(self, state):
if state.board_array == [1, 2, 3, 4, 5, 6, 7, 8, 9]:
return True
else:
return False
def get_cost(self, state1, state2, action):
return 1
def is_in_table(i):
if i < 0:
return False
if i > 8:
return False
return True
| true |
806befb71706fb2ff4d46ef6e7e83e32dbb874df | Python | peaceattack/pyFile | /win_zip.py | UTF-8 | 204 | 2.796875 | 3 | [] | no_license |
+# -*- coding:utf-8 -*-
+
+import zipfile
+MyZip = "C:\Users\Administrator\Desktop\MAC.zip"
+MyZipOBJ = zipfile.ZipFile(MyZip)
+MyZipOBJ.namelist()
+
+for i in MyZipOBJ.namelist():
+ print i
| true |
ae3881e7d033b2c4692bc6b5477b9d8ec8ad64ff | Python | Killavus/PyGame-SpaceInvaders | /GameModule/Env/Enemy.py | UTF-8 | 966 | 3.15625 | 3 | [] | no_license | #!/usr/bin/python2.7
from BattleObject import BattleObject
import pygame
import math
class Enemy( BattleObject ):
def __init__(self, maxHitpoints):
super( Enemy, self ).__init__(maxHitpoints)
self.direction = 1
def setDirection(self):
self.direction *= -1
def move(self, x):
self.rect.move_ip( math.copysign( x, self.direction ), 0 )
def drawHP(self):
if self.spawnFrames > 0:
return
(iw,ih) = self.image.get_size()
(bw,bh) = math.floor(iw*0.5),5
pygame.draw.rect( self.image, (0,0,0), pygame.Rect( (iw/2-bw/2,0), (bw,bh) ) )
if self.destroyed == True:
return
(maxHP,actualHP) = self.hp
hpBarPercent = float(actualHP)/maxHP
barColor = (0,255,0)
if hpBarPercent < .50:
barColor = (245,184,0)
if hpBarPercent < .25:
barColor = (255,0,0)
pygame.draw.rect( self.image, barColor, pygame.Rect( (iw/2-bw/2+1,1), (math.floor((bw-2)*(hpBarPercent)), bh-2) ) )
| true |
456e621336c37267a9f1a51a5e14c364821aeb07 | Python | mdeependu/Machine-Learning | /Programs/Linear Regression.py | UTF-8 | 1,053 | 2.921875 | 3 | [] | no_license | x=[1,2,3,4,5]
y=[2,4,5,4,5]
sum_X=sum(x)
print(sum_X)
sum_Y=sum(y)
print(sum_Y)
mean_X=(sum_X)/len(x)
mean_Y=(sum_Y)/len(y)
X_2=[]
for i in x:
temp=i*i
X_2.append(temp)
''''print(X_2)'''
sum_X_2=sum(X_2)
print(sum_X_2)
Y_2=[]
for i in y:
temp=i*i
Y_2.append(temp)
''''print(Y_2)'''
sum_Y_2=sum(Y_2)
print(sum_Y_2)
X_Y=[]
for i in range(len(x)) :
X_Y.append(x[i]*y[i])
'''print(X_Y)'''
sum_X_Y=sum(X_Y)
print(sum_X_Y)
m=((5*(sum_X_Y))-((sum_X)*(sum_Y)))/((5*(sum_X_2))-((sum_X)**2))
print(m)
c=(((sum_Y)*(sum_X_2))-((sum_X)*(sum_X_Y)))/((5*(sum_X_2)-(sum_X)**2))
print(c)
lin_reg=(m*(mean_X))+c
print(lin_reg)
print("\n")
Y_pred_1=(m*1)+c
Y_pred_2=(m*2)+c
Y_pred_3=(m*3)+c
Y_pred_4=(m*4)+c
Y_pred_5=(m*5)+c
Y_Ypred1=2-Y_pred_1
Y_Ypred2=4-Y_pred_2
Y_Ypred3=5-Y_pred_3
Y_Ypred4=4-Y_pred_4
Y_Ypred5=5-Y_pred_5
Y_Ypred1_2=(Y_Ypred1)**2
Y_Ypred2_2=(Y_Ypred2)**2
Y_Ypred3_2=(Y_Ypred3)**2
Y_Ypred4_2=(Y_Ypred4)**2
Y_Ypred5_2=(Y_Ypred5)**2
Y_pred_sum=(Y_Ypred1_2+Y_Ypred2_2+Y_Ypred3_2+Y_Ypred4_2+Y_Ypred5_2)
print(Y_pred_sum)
| true |