blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
01aaebf9eff735a8beb710f7ed8da9fd5af5b10f | Python | tomergill/DL_ass3 | /part_3_model.py | UTF-8 | 15,175 | 3.28125 | 3 | [] | no_license | import numpy as np
import dynet as dy
import itertools
import pickle
class AbstractNet:
"""
Class for an abstract neural network for part 3.
Holds 2 biLSTMs (2 builders for each, one for each direction), an embedding matrix and a
MLP with 1 hidden layer.
All subclasses needs to implement the repr function.
"""
FORWARD = 0 # index of forward builder
BACKWARD = 1 # index of backward builder
def __init__(self, num_layers, embed_dim, lstm1_dim, half_in_dim, classes_number, vocab_size):
"""
Initialize the dynet.ParameterCollection (called pc) and the parameters.
:param num_layers: Number of layers each LSTM will have
:param embed_dim: Size of each embedding vector
:param lstm1_dim: Dimension of the first biLSTM's output vectors
:param half_in_dim: Dimension of the second biLSTM's output vectors, which in turn are the
input vectors for the MLP1.
:param classes_number: Number of different classes the input can be part of. Also the
dimension of the MLP1's output vector.
:param vocab_size: Size of vocabulary. AKA how many rows the embedding matrix will have.
"""
self.pc = dy.ParameterCollection()
biLSTM1 = [dy.LSTMBuilder(num_layers, embed_dim, lstm1_dim, self.pc),
dy.LSTMBuilder(num_layers, embed_dim, lstm1_dim, self.pc)]
biLSTM2 = [dy.LSTMBuilder(num_layers, 2 * lstm1_dim, half_in_dim, self.pc),
dy.LSTMBuilder(num_layers, 2 * lstm1_dim, half_in_dim, self.pc)]
self._biLSTMs = (biLSTM1, biLSTM2)
self._E = self.pc.add_lookup_parameters((vocab_size, embed_dim))
self._W = self.pc.add_parameters((classes_number, 2 * half_in_dim))
self._b = self.pc.add_parameters(classes_number)
def repr(self, sentence):
"""
Abstract method.
Each network's vector representation for a sentence
:param sentence: Sentence to be represented
:return: A list of vector representation for each unit in sentence
"""
raise NotImplementedError
def __call__(self, sentence, renew_graph=True):
"""
Inputs sentence to the network: Get representation of sentence, which is fed to the first
biLSTM, getting b_1 to b_n. Then inserted into the second biLSTM, thus getting b'_1 up
to b'_n. Then each b'_i is fed to the MLP1 and the the output returns after a softmax is
applied.
:param sentence: Input sentence.
:return: Softmax vector of the output vector of the net.
"""
if renew_graph:
dy.renew_cg()
rep = self.repr(sentence)
layer1, layer2 = self._biLSTMs
s_f, s_b = layer1[AbstractNet.FORWARD].initial_state(), layer1[
AbstractNet.BACKWARD].initial_state()
outs_f, outs_b = s_f.transduce(rep), s_b.transduce(rep[::-1])
bs = [dy.concatenate([bf, bb]) for bf, bb in itertools.izip(outs_f, outs_b)]
s_f, s_b = layer2[AbstractNet.FORWARD].initial_state(), layer2[
AbstractNet.BACKWARD].initial_state()
outs_f, outs_b = s_f.transduce(bs), s_b.transduce(bs[::-1])
btags = [dy.concatenate([bf, bb])
for i, (bf, bb) in enumerate(itertools.izip(outs_f, outs_b))]
W, b = dy.parameter(self._W), dy.parameter(self._b)
outs = [dy.softmax(W * x + b) for x in btags]
return outs
def get_loss(self, sentence, expected_outputs):
"""
Inputs to network and returns the cross-entropy loss.
:param sentence: Input setence.
:param expected_outputs: List of each word's class' index (output of word i should be
expected_outputs[i]). With this computes the loss.
:return: Cross-entropy loss for this sentence (dynet.Expression, should call value()).
"""
probs = self(sentence)
return [-dy.log(dy.pick(prob, expected)) for prob, expected in itertools.izip(probs, expected_outputs)]
def predict(self, sentence):
"""
Inputs to network and returns the index of the class with the maximal probability
(argmax of output).
:param sentence: Input sentence
:return: List of predicted indexes of classes per word. The i-th element is the index of
the predicted class of teh i-th word in sentence.
"""
probs = self(sentence)
return [np.argmax(prob.npvalue()) for prob in probs]
def predcit_batch(self, sentences):
probs = []
all_probs = []
dy.renew_cg()
for sentence in sentences:
p = self(sentence, renew_graph=False)
probs.append(p)
all_probs.extend(p)
dy.forward(all_probs)
return [[np.argmax(word.npvalue()) for word in sentence] for sentence in probs]
def loss_on_batch(self, sentences_and_tags):
losses = []
total = 0
dy.renew_cg()
for sentence, tags in sentences_and_tags:
probs = self(sentence, renew_graph=False)
total += len(tags)
losses.extend([-dy.log(dy.pick(prob, tag))
for prob, tag in itertools.izip(probs, tags)])
return dy.esum(losses) / total
def save_to(self, file_name):
self.pc.save(file_name)
def load_from(self, file_name):
self.pc.populate(file_name)
# Option (a)
class WordEmbeddedNet(AbstractNet):
"""
Option (a):
Part 3 network, where the input sentence is embedded (each word is embedded to a vector using
the embedding matrix).
"""
def repr(self, sentence):
"""
Represents each word in sentence with it's embedding vector.
:param sentence: List of words' indexes
:return: A list of the embedded vector of each word
"""
return [dy.lookup(self._E, i) for i in sentence]
# Option (b)
class CharEmbeddedLSTMNet(AbstractNet):
"""
Option (b):
Part 3 network, where each word in the input sentence is broken apart to it's characters,
the characters are embedded to vectors using the embedding matrix and then inputted into a
LSTM, which output is the word vector representation.
"""
def __init__(self, num_layers, embed_dim, lstm1_dim, half_in_dim, classes_number,
char_vocab_size):
"""
Initializes the network like the base class, and also initializes the character LSTM.
:param num_layers: Number of layers each LSTM will have
:param embed_dim: Size of each embedding vector
:param lstm1_dim: Dimension of the first biLSTM's output vectors
:param half_in_dim: Dimension of the second biLSTM's output vectors, which in turn are the
input vectors for the MLP1.
:param classes_number: Number of different classes the input can be part of. Also the
dimension of the MLP1's output vector.
:param char_vocab_size: How many chars in the vocabulary. AKA how many rows the embedding
matrix will have.
"""
AbstractNet.__init__(self, num_layers, embed_dim, lstm1_dim, half_in_dim, classes_number,
vocab_size=char_vocab_size)
self.char_LSTM = dy.LSTMBuilder(num_layers, embed_dim, embed_dim, self.pc)
def repr(self, sentence):
"""
Each word's representation is the chars LSTM output for the embedded vectors for each
char in the word (in order)
:param sentence: List of lists of chars' indexes (words)
:return: vector outputs of the embedded char-by-char lstm.
"""
# s = self.char_LSTM.initial_state()
return [self.char_LSTM.initial_state().transduce([dy.lookup(self._E, c) for c in word])[-1]
for word in sentence]
# Option (c)
class WordAndSubwordEmbeddedNet(AbstractNet):
"""
Option (c):
Part 3 network, where each word in the input sentence is represented as it's embedding +
it's prefix embedding + it's suffix embedding.
"""
def __init__(self, num_layers, embed_dim, lstm1_dim, half_in_dim, classes_number, vocab_size,
word_to_pre_index, word_to_suf_index):
"""
Initializes the base net, and the embedding matrices for the prefixes and the suffixes.
:param num_layers: Number of layers each LSTM will have
:param embed_dim: Size of each embedding vector
:param lstm1_dim: Dimension of the first biLSTM's output vectors
:param half_in_dim: Dimension of the second biLSTM's output vectors, which in turn are the
input vectors for the MLP1.
:param classes_number: Number of different classes the input can be part of. Also the
dimension of the MLP1's output vector.
:param vocab_size: Size of vocabulary. AKA how many rows the embedding matrix will have.
:param word_to_pre_index: List of indexes, sized vocab_size. Given word's index i then
word_to_pre_index will return the index of it's prefix.
:param word_to_suf_index: List of indexes, sized vocab_size. Given word's index i then
word_to_suf_index will return the index of it's suffix.
"""
AbstractNet.__init__(self, num_layers, embed_dim, lstm1_dim, half_in_dim, classes_number,
vocab_size)
self._PE = self.pc.add_lookup_parameters((len(word_to_pre_index), embed_dim)) # prefixes
self._SE = self.pc.add_lookup_parameters((len(word_to_suf_index), embed_dim)) # suffixes
self._W2PI = word_to_pre_index
self._W2SI = word_to_suf_index
def repr(self, sentence):
"""
Represents each word as the sum of it's, it's prefix and it's suffix embedding.
:param sentence: List of words' indexes
:return: A list of embedded vectors, each is the sum of the 3 embedding vectors for each
word.
"""
return [dy.lookup(self._E, word) + dy.lookup(self._PE, self._W2PI[word]) +
dy.lookup(self._SE, self._W2SI[word]) for word in sentence]
# Option (d)
class WordEmbeddedAndCharEmbeddedLSTMNet(CharEmbeddedLSTMNet):
"""
Option (d):
Part 3 network, where each words represented with 2 concatenated vectors:
1. Each word is embedded to a vector using the word embedding matrix
2. Each word in the input sentence is broken apart to it's characters,
the characters are embedded to vectors using the embedding matrix and then inputted
into a LSTM, which output is the word vector representation.
Then the 2 concatenated vectors are an input to a linear layer, which it's output is the word
representation.
"""
def __init__(self, num_layers, embed_dim, lstm1_dim, half_in_dim, classes_number,
char_vocab_size, vocab_size):
"""
Initializes the network like the base class (the built in embedding matrix is for the
characters), and also the char LSTM, the embedding matrix for whole words and the linear
layer components.
:param num_layers: Number of layers each LSTM will have
:param embed_dim: Size of each embedding vector
:param lstm1_dim: Dimension of the first biLSTM's output vectors
:param half_in_dim: Dimension of the second biLSTM's output vectors, which in turn are the
input vectors for the MLP1.
:param classes_number: Number of different classes the input can be part of. Also the
dimension of the MLP1's output vector.
:param char_vocab_size: How many chars in the vocabulary. AKA how many rows the embedding
matrix will have.
:param vocab_size: How many words are in the vocabulary. AKA how many rows the word
embedding matrix will have.
"""
CharEmbeddedLSTMNet.__init__(self, num_layers, embed_dim, lstm1_dim, half_in_dim,
classes_number, char_vocab_size)
self._WE = self.pc.add_lookup_parameters((vocab_size, embed_dim)) # word embedding
self._W1 = self.pc.add_parameters((embed_dim, 2 * embed_dim)) # linear layer 4 embedding
self._b1 = self.pc.add_parameters(embed_dim)
def repr(self, sentence):
"""
Represents the word as (a) concatenated to (b), then into a linear layer.
:param sentence: List of pairs: a word's index and a list of the word's chars' indexes.
:return: List with each word's representation.
"""
words, chars = zip(*sentence)
chars = CharEmbeddedLSTMNet.repr(self, list(chars))
embedded = [dy.concatenate([dy.lookup(self._WE, word), embedded_chars])
for word, embedded_chars in itertools.izip(words, chars)]
W1 = dy.parameter(self._W1)
b1 = dy.parameter(self._b1)
return [W1 * x + b1 for x in embedded]
def save_net_and_params_to(net, save_file, num_layers, embed_dim, lstm1_dim, half_in_dim,
classes_number, I2T, vocab_size=0, char_vocab_size=0,
word_to_pre_index=None, word_to_suf_index=None, I2W=None, I2C=None,
unknown_word_index=-1):
save_dict = {
"class": net.__class__,
"params": (num_layers, embed_dim, lstm1_dim, half_in_dim, classes_number, vocab_size,
char_vocab_size),
"words_lists": (I2W, I2C, word_to_pre_index, word_to_suf_index),
"tags_list": I2T,
"unk_i": unknown_word_index
}
net.save_to(save_file)
pickle.dump(save_dict, open("data_of_" + save_file, "wb"))
def load_net_and_params_from(load_file):
loader = pickle.load(open("data_of_" + load_file, "rb"))
num_layers, embed_dim, lstm1_dim, half_in_dim, classes_number, vocab_size, char_vocab_size = \
loader["params"]
I2W, I2C, word_to_pre_index, word_to_suf_index = loader["words_lists"]
I2T = loader["tags_list"]
if loader["class"] == WordEmbeddedNet: # Option (a)
net = WordEmbeddedNet(num_layers, embed_dim, lstm1_dim, half_in_dim, len(I2T), len(I2W))
elif loader["class"] == CharEmbeddedLSTMNet: # Option (b)
net = CharEmbeddedLSTMNet(num_layers, embed_dim, lstm1_dim, half_in_dim, len(I2T), len(I2C))
elif loader["class"] == WordAndSubwordEmbeddedNet: # Option (c)
net = WordAndSubwordEmbeddedNet(num_layers, embed_dim, lstm1_dim, half_in_dim, len(I2T),
len(I2W), word_to_pre_index, word_to_suf_index)
else: # Option (d)
net = WordEmbeddedAndCharEmbeddedLSTMNet(num_layers, embed_dim, lstm1_dim, half_in_dim,
len(I2T), len(I2C), len(I2W))
net.load_from(load_file) # loads the parameter collection
if loader["class"] == WordEmbeddedNet: # Option (a)
return net, I2T, I2W, loader["unk_i"]
elif loader["class"] == CharEmbeddedLSTMNet: # Option (b)
return net, I2T, I2C, loader["unk_i"]
elif loader["class"] == WordAndSubwordEmbeddedNet: # Option (c)
return net, I2T, I2W, loader["unk_i"]
else: # Option (d)
return net, I2T, I2W, I2C, loader["unk_i"]
| true |
6c24cd414a113f9aa14a4c753fe15e9a47c5a548 | Python | tahmad08/AutomateTheBoringStuffPractice | /Textbook Code/shoe_size.py | UTF-8 | 1,747 | 3.953125 | 4 | [] | no_license | #shoe size trick
from datetime import datetime
def shoe_trick():
print("we'll use your shoe size to tell your age\n"
+ "what is your shoe size?")
sz = input()
while not(sz):
print("enter a value for your shoe size: ")
sz = input()
rsz = sz
if (str(rsz))[-2:] == ".5":
rsz = float(rsz) + 0.5
print("we round up your shoe size to " + str(rsz))
print("first we multiply by five: " + str(rsz) + " * 5 = " + str((float(rsz)*5)) + ". ok?")
sz = float(rsz) * 5
ok = input()
print(" add 50: 50 + "+ str(sz) + " = " + str(((sz)+50)))
sz = sz + 50
ok = input()
print(" multiply by 20: 20 * " + str(sz) + " = " + str(sz*20))
sz = sz * 20
ok = input()
year = datetime.now().year
print(" then take the current year and subtract 1000, add")
print(" "+str(year) + " - 1000 = " + str(year-1000) + " + " + str(sz) + " = " + str((sz + (year-1000))))
birth = ''
while birth == '':
print(" almost done. next we subtract your birth year.")
print(" Please enter your birthdate in mm-dd-yyyy format: ")
birth = input()
curr_month = datetime.now().month
bmonth = birth[:2]
bday = birth[3:5]
byear = birth[-4:]
bdate = datetime(datetime.now().year,int(bmonth),int(bday))
rsz = (sz + (year-1000)) - int(byear)
print(" "+str(sz) + " - " + str(byear) + " = " + str(rsz))
sz = str(int(rsz))
shoe = sz[:-2]
if(bdate > datetime.now()):
age = int(sz[-2:]) - 1
else:
age = int(sz[-2:])
print(" SHOE SIZE = " + str(shoe)+ " AND AGE: " + str(age))
#shoe_trick()
def hello(name):
#return "hello " + name
return name
| true |
534258ed5ab26c7782cc3b6b0cd8d53db2e6453b | Python | ivanferreirajr/sad-pasa | /src/database.py | UTF-8 | 2,511 | 2.859375 | 3 | [
"MIT"
] | permissive | import psycopg2 as db
import psycopg2.extras as extras
import os
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '..', '.env')
load_dotenv(dotenv_path)
DB_HOST = os.environ.get("DB_HOST")
DB_DATABASE = os.environ.get("DB_DATABASE")
DB_USER = os.environ.get("DB_USER")
DB_PASSWORD = os.environ.get("DB_PASSWORD")
class Config:
def __init__(self):
self.config = {
"postgres": {
"user": DB_USER,
"password": DB_PASSWORD,
"host": DB_HOST,
"database": DB_DATABASE,
}
}
class Connection(Config):
def __init__(self):
Config.__init__(self)
try:
self.conn = db.connect(**self.config["postgres"])
self.cur = self.conn.cursor()
except Exception as e:
print("Erro na conexão", e)
exit(1)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.commit()
self.connection.close()
@property
def connection(self):
return self.conn
@property
def cursor(self):
return self.cur
def commit(self):
self.connection.commit()
def fetchall(self):
return self.cursor.fetchall()
def execute(self, sql, params=None):
self.cursor.execute(sql, params or ())
def query(self, sql, params=None):
self.cursor.execute(sql, params or ())
return self.fetchall()
def insert_values(conn, df, table):
"""
Usando psycopg2.extras.execute_values() para inserir dataframe no banco de dados
Params:
conn: Connection
df : DataFrame
table_name: str
Returns:
void
Raises:
DatabaseError: inserção não foi realizada com sucesso
"""
# criando uma lista de tupples a partir dos valores do dataframe
tuples = [tuple(x) for x in df.to_numpy()]
# colunas de dataframe separadas por vírgula
cols = ','.join(list(df.columns))
# executando comando SQL para inserção
query = "INSERT INTO %s(%s) VALUES %%s" % (table, cols)
cursor = conn.cursor()
try:
extras.execute_values(cursor, query, tuples)
conn.commit()
except (Exception, db.DatabaseError) as error:
print("Error: %s" % error)
conn.rollback()
cursor.close()
return 1
print("Inserção dos dados finalizada ✔")
cursor.close() | true |
fecd995109c70b709fb358f83e327d1efc1c9e79 | Python | Green0v0/TIL | /Prgrms/Prgrms-ct-py4/Hint.py | UTF-8 | 3,618 | 3.671875 | 4 | [] | no_license | # 힌트를 참고하여 코드 작성
def solution(seat):
answer = -1
# [실행] 버튼을 누르면 출력 값을 볼 수 있습니다.
print('Hello Python')
return answer
# 가장 긴 팰린드롬
def solution(s):
p = 0
for i in range(len(s)):
if s[i - p:i + 1] == s[i - p:i + 1][::-1]:
p += 1
elif i - p > 0 and s[i - p - 1:i + 1] == s[i - p - 1:i + 1][::-1]:
p += 2
return p
# 제가 문제 유형에 적은 것 처럼 이전 상태들을 통해 현재 값을 구할 수 있는지 판단하는 것이 중요합니다.
#
# ["ba","na","n","a"]와 "banana"로 예를 들어보면 "ba", "na"까지는 쉽게 체크가 가능합니다. 그래서 index가 3인 "a"까지는 count가 2입니다.
# 여기서 "banan"까지 오면 "n"이 존재하기 때문에 index가 4인 시점에 count가 3이됩니다.
# index가 5인 "banana"에서는 이전 상태가 "bana"와 "banan"이 두 개 존재합니다. "bana" 상태는 count가 2이고 "banan"은 count가 3입니다. "banan"에서 "banana"를 완성하면 count가 4가되고 "bana"에서 "banana"를 완성하면 count가 3이 되므로 더 작은 쪽을 선택하면 됩니다. 이런식으로 접근하면 DP적으로 접근에 성공했다고 볼 수 있습니다. :)
#
# 다음은 제가 작성한 코드입니다. 위 설명을 참고하며 한 번 확인해보세요!
# 단어 퍼즐
def solution(strs, t):
dp = [0] * (len(t) + 1) # 미리 단어 길이만큼 리스트 생성합니다.
strs = set(strs) # 리스트를 set으로 변환합니다.
for i in range(1, len(t) + 1): # 편의를 위해 1부터 시작합니다.
dp[i] = float('inf') # 처음엔 길이가 무한입니다. (조합이 불가능하다는 의미)
for j in range(1, min(i + 1, 6)): # 단어 조각의 길이는 5 이하라는 점을 이용하여 루프를 돌립니다.
start = i - j
end = i
if t[start:end] in strs: # 문자열 t의 start부터 end까지 strs에 포함되었는지 체크합니다.
dp[i] = min(dp[i], dp[i - j] + 1) # 포함되었다면 현재값과 이전 값+1 중 더 작은 값을 저장합니다.
return -1 if dp[-1] == float('inf') else dp[-1] # 최종 결과가 무한이라면 불가능하다는 뜻이니 -1을 반환합니다.
# 다른 사람 풀이
def solution1(m,n,puddles):
grid = [[0]*(m+1) for i in range(n+1)] #왼쪽, 위로 한줄씩 만들어서 IndexError 방지
if puddles != [[]]: #물이 잠긴 지역이 0일 수 있음
for a, b in puddles:
grid[b][a] = -1 #미리 -1로 체크
grid[1][1] = 1
for j in range(1,n+1):
for k in range(1,m+1):
if j == k == 1: #(1,1)은 1로 만들어두고, 0이 되지 않도록
continue
if grid[j][k] == -1: #웅덩이는 0으로 만들어 다음 덧셈 때 영향끼치지 않게
grid[j][k] = 0
continue
grid[j][k] = (grid[j][k-1] + grid[j-1][k])%1000000007 #[a,b] = [a-1,b] + [a,b-1] 공식
return grid[n][m]
def solution2(m, n, puddles):
info = dict([((2, 1), 1), ((1, 2), 1)])
for puddle in puddles:
info[tuple(puddle)] = 0
def func(m, n):
if m < 1 or n < 1:
return 0
if (m, n) in info:
return info[(m, n)]
return info.setdefault((m, n), func(m - 1, n) + func(m, n - 1))
return func(m, n) % 1000000007 | true |
41dd594b2805c18a03d74ee13bc5f98fa352bf18 | Python | CodeValue/Pylab | /03. Lists/01. Lists.py | UTF-8 | 4,399 | 4.5 | 4 | [] | no_license |
# Define a list
#-----------------
# Empty list
lecturers = []
# Populated list
lecturers = ["Ori", "Haim", "Omer", "Maor", "Hagai"]
# List can hold items from different types
items = ["Hello", 3.14, True, ("23", -13), None]
# Indexing in lists
#-------------------
lecturers = ["Ori", "Haim", "Omer", "Maor", "Hagai"]
# Python is zero based programming language
print(lecturers[0]) # will print "Ori"
print(lecturers[4]) # will print "Hagai"
# Python support in negative index also!!!
print(lecturers[-2]) # will print "Maor"
# List editing
#--------------
lecturers = ["Ori", "Haim", "Omer", "Maor", "Hagai"]
lecturers[1] = "Ido" # will replace Haim with Ido
print(lecturers)
# Add item
lecturers.append("Ido") # New item will be added to the end of the list
print(lecturers)
# Remove item
lecturers.remove("Maor") # Remove the first first matching value from the list
print(lecturers)
# Delete an item
del (lecturers[3]) # Delete an item in specific index
print(lecturers)
# Check if an item exist
if "Ofri" in lecturers:
print("Ofri is a lecturer") # will not be executed
# List size
print(len(lecturers)) # will print 5
# List slicing
#---------------
# Very similar to projection of a list in C# using Linq functoin Take()
lecturers = ["Ori", "Haim", "Omer", "Maor", "Hagai"]
print(lecturers[1:])
print(lecturers[1:-2])
#----------------------------------------------------------------------------------------------------
#
# Before we continue to talk about "List comprehension", but first let's talk about loops in Python
#
# ----------------------------------------------------------------------------------------------------
# Exercise #1:
#
# 1. Create a list of all squares from 1 to 100.
# 2. Print the list
#
# Solution (exc. 1):
numbers = []
x = 1
while x <= 100:
numbers.append(x)
x += 1
for i in numbers:
print(i**2)
# another solution...
for num in range(1,100): # The range method returns a list object populated by the numbers in the given range
print(num**2)
# Now let's see an even better solution...
# List comprehension
#--------------------
# 1. List comprehensions are a tool for transforming one list (any iterable actually) into another list.
# 2. During this transformation, elements can be conditionally selected.
# 3. Every list comprehension can be rewritten as a for loop but not vice versa
# 4. We will use list comprehension to perform only small tasks
# Example:
old_things = []
new_things = []
for ITEM in old_things:
if condition_based_on(ITEM):
new_things.append("something with " + ITEM)
# You can rewrite the above for loop as a list comprehension like this:
new_things = ["something with " + ITEM for ITEM in old_things if condition_based_on(ITEM)]
# How to transform for loop into list comprehension structure in 4 simple steps?
numbers = [1, 2, 3, 4, 5]
doubled_odds = []
for n in numbers:
if n % 2 == 1:
doubled_odds.append(n * 2)
# 1. Copy the variable assignment for our new empty list (line 250)
doubled_odds = []
# 2. Copy the expression that we’ve been append-ing into this new list (line 253)
doubled_odds = [n * 2]
# 3. Copy the for loop line, excluding the final : (line 251)
doubled_odds = [n * 2 for n in numbers]
# 4. Copy the if statement line, also without the : (line 252)
doubled_odds = [n * 2 for n in numbers if n % 2 == 1]
doubled_odds
# List comprehension and nested loops
matrix = []
flattened = []
for row in matrix:
for n in row:
flattened.append(n)
# Can we transform a nested loop (matrix) into a list comprehension? YES, WE CAN!
flattened = [n for row in matrix for n in row]
# When working with nested loops in list comprehensions remember that the for clauses remain
# in the same order as in our original for loops.
# Exercise #2:
#
# Use list comprehensions perform the following:
#
# 1. Create a list of all squares from 1 to 100.
# 2. Print the list
#
# Solution (exc. 2):
squares = [i **2 for i in range(1, 100)]
print("List comprehensions is cool!", squares) | true |
b35190a50b7a815a384d90d9d34d8480febdd421 | Python | kimyeee/Video | /lagou/cloud.py | UTF-8 | 1,871 | 2.84375 | 3 | [] | no_license | import random
from PIL import Image
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import numpy as np
from os import path
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator, random_color_func
from lagou.models import Comment, Lagou
engine = create_engine("mysql+mysqlconnector://root:root@localhost:3306/test", max_overflow=5)
Session = sessionmaker(bind=engine)
session = Session()
text = ''
comments = session.query(Lagou)
for comment in comments:
text += comment.description
backgroud_Image = plt.imread('test.png')
alice_mask = np.array(Image.open('test.png'))
print('加载图片成功!')
'''设置词云样式'''
wc = WordCloud(
background_color='white', # 设置背景颜色
mask=alice_mask, # 设置背景图片
prefer_horizontal=0.6, # 将词横放
# color_func=lambda *args, **kwargs: (random.randrange(0, 255), random.randrange(0, 255), random.randrange(0, 255)),
font_path='C:\Windows\Fonts\simsun.ttc', # 若是有中文的话,这句代码必须添加,不然会出现方框,不出现汉字
max_words=200, # 设置最大现实的字数
stopwords=set('职位描述岗位职责任职要求岗位要求'), # 设置停用词
max_font_size=60, # 设置字体最大值
random_state=3 # 设置有多少种随机生成状态,即有多少种配色方案
)
wc.generate_from_text(text)
print('开始加载文本')
# 改变字体颜色
img_colors = ImageColorGenerator(backgroud_Image)
# 字体颜色为背景图片的颜色
wc.recolor(color_func=random_color_func)
# 显示词云图
plt.imshow(wc)
# 是否显示x轴、y轴下标
plt.axis('off')
plt.show()
# 获得模块所在的路径的
d = path.dirname(__file__)
# os.path.join(): 将多个路径组合后返回
wc.to_file(path.join(d, "h11.png"))
print('生成词云成功!')
| true |
214f75253c19099edfdb972ee361c8914af5a6ec | Python | looper123/python-quickstart | /com/quark/quick/start/JsonAnaylyis.py | UTF-8 | 2,272 | 3.4375 | 3 | [] | no_license | # json类型与各个类型间的转换
import json
# convert dic to json and repalce
import time
# %y 两位数的年份表示(00-99)
# %Y 四位数的年份表示(000-9999)
# %m 月份(01-12)
# %d 月内中的一天(0-31)
# %H 24小时制小时数(0-23)
# %I 12小时制小时数(01-12)
# %M 分钟数(00=59)
# %S 秒(00-59)
# %a 本地简化星期名称
# %A 本地完整星期名称
# %b 本地简化的月份名称
# %B 本地完整的月份名称
# %c 本地相应的日期表示和时间表示
# %j 年内的一天(001-366)
# %p 本地A.M.或P.M.的等价符
# %U 一年中的星期数(00-53)星期天为星期的开始
# %w 星期(0-6),星期天为星期的开始
# %W 一年中的星期数(00-53)星期一为星期的开始
# %x 本地相应的日期表示
# %X 本地相应的时间表示
# %Z 当前时区的名称
# %% %号本身
class JsonConverter():
def dict_to_json(self):
data = {
'no': 1,
'name': 'Runoob',
'url': 'http://www.runoob.com'
}
print("after convert to json 数据{}/类型{}".format(json.dumps(data), type(json.dumps(data))))
print(
"after convert to dict 数据{}/类型{}".format(json.loads(json.dumps(data)), type(json.loads(json.dumps(data)))))
class TimeConverter():
def time_to_str(self):
# print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
print(type(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())))
def str_to_time(self):
a = "Sat Mar 28 22:24:24 2016"
time_data = time.mktime(time.strptime(a, "%a %b %d %H:%M:%S %Y"))
print(time_data)
print(type(time_data))
if __name__ == '__main__':
# JsonConverter().dict_to_json()
# 第一次调用返回运行的实际时间
ctime = time.time()
print("第一次调用", time.clock(), ctime)
TimeConverter().str_to_time()
TimeConverter().time_to_str()
# 第二次以及以后的调用会返回和第一次调用之间的时间差 比time.time()更为精确
print("第二次调用", time.clock(), time.time() - ctime)
print("第三次调用", time.clock(), time.time() - ctime)
| true |
52d36e7f42c80e28420abc427f1fb55052516848 | Python | mvattuone/pee-wee-hermann-hesse | /get_quotes.py | UTF-8 | 1,600 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf8 -*-
from bs4 import BeautifulSoup
import requests
import re
hh_page = requests.get("https://www.goodreads.com/author/quotes/1113469.Hermann_Hesse")
hh_soup = BeautifulSoup(hh_page.text, 'html.parser')
hh_quotes = hh_soup.find_all("div", class_="quoteText")
new_hh_quotes = []
for i, quote in enumerate(hh_quotes):
new_hh_quotes.append(hh_quotes[i].find_all(re.compile('(.*) Hermann')))
# Fuck it, web scraping imdb etc. would take for fucking ever.
pw_quotes = [
"The stars at night are big and bright",
"I don't have to see it, Dottie. I lived it.",
"There are a lot of things about me you don't know anything about. Things you wouldn't understand, you couldn't understand. Things you shouldn't understand.",
"You don't wanna get mixed up with a guy like me. I'm a loner, Dottie. A rebel.",
"It's like you're trying to unravel a giant cable-knit sweater and someone keeps knitting and knitting and knitting and knitting and knitting and knitting and knitting and knitting and knitting.",
"Shhhh... I'm listening to reason.",
"That's my name, don't wear it out.",
"I brought you guys....FRENCH FRIES!",
"I meant to do that.",
"I'm rolling a big doughnut and a snake wearing a vest",
"Boy, I always thought that was the dumbest law.",
"Good for you and your father",
"Because I don't make monkeys, I just train them.",
"I know you are but what am I?",
"I wouldn't sell my bike for all the money in the world. Not for a hundred million, billion, trillion dollars!",
]
print new_hh_quotes
| true |
d67265eb684df470937a013db2d0eb6dc8a2c369 | Python | ycs1m1yk/LoLHighlighter | /flask/audio_test.py | UTF-8 | 3,227 | 2.78125 | 3 | [
"MIT"
] | permissive | import librosa
import numpy as np
import warnings
import sys
def audio_test(directory, game_start_time, game_end_time):
warnings.filterwarnings("ignore")
try:
print("Now Extracting H/L Time-Line")
reSampleRate = 5500
offset = game_start_time
duration = 360
total_time = 0
hits = []
while True:
# add offset parameter for librosa.load() to specify starting time.
# duration parameter is the total time that librosa analyze.
try:
y, sr = librosa.load(
directory, sr=reSampleRate, offset=offset, duration=duration)
# print('sr: ', reSampleRate)
except Exception as e:
print(e)
break
time = np.linspace(0, len(y)/sr, len(y))
# Exclude general output values to check only special values.
adjust = np.where((y > np.min(y)+0.1) & (y < np.max(y)-0.1), 0, y)
for i in range(adjust.size):
if adjust[i] != 0:
hit_round = round(time[i] + offset)
hits.append(int(hit_round))
offset += duration
total_time += int(librosa.get_duration(y=y))
del y
hits_remove_dup = list(set(hits))
hits_remove_dup.sort()
# print('[hits_remove_dup] --', hits_remove_dup)
hl_start = hits_remove_dup[0]
hl_temp = hits_remove_dup[0]
hl_end = hits_remove_dup[1]
hl_list = []
iteration = 1
continue_val = 20
while iteration < len(hits_remove_dup):
count = 0
while hl_end - hl_temp < continue_val:
if iteration == len(hits_remove_dup):
break
hl_end = hits_remove_dup[iteration]
hl_temp = hits_remove_dup[iteration-1]
iteration += 1
count += 1
hit_count_val = 1
if hl_end != hits_remove_dup[len(hits_remove_dup)-1]:
if count > hit_count_val:
element = [hl_start, hl_temp]
hl_list.append(element)
if iteration < len(hits_remove_dup)-1:
hl_start = hits_remove_dup[iteration]
hl_temp = hits_remove_dup[iteration]
hl_end = hits_remove_dup[iteration+1]
iteration += 1
# print(len(hl_list))
interval_resize_val = 5
for i in range(len(hl_list)):
hl_list[i][0] += -interval_resize_val
hl_list[i][1] += interval_resize_val
if hl_list[i-1][1] < game_end_time < hl_list[i][0]:
hl_list.insert(i, [game_end_time-30, game_end_time])
if hl_list[-1][1] < game_end_time:
hl_list.append([game_end_time-40 , game_end_time])
hl_list.insert(0, [game_start_time-40, game_start_time+20])
print("Audio analysis finished")
return hl_list
except Exception as e:
_, _, tb = sys.exc_info() # tb -> traceback object
print('file name = ', __file__)
print('error line No = {}'.format(tb.tb_lineno))
print(e)
| true |
223f63f17e271a0c05f49d9504408c2c99eb98b7 | Python | pedroalpacheco/100DaysOfCode | /028/uri1002.py | UTF-8 | 754 | 4.25 | 4 | [] | no_license | """
https://www.urionlinejudge.com.br/judge/en/problems/view/1002
The formula to calculate the area of a circumference
is defined as A = π . R2. Considering to this problem
that π = 3.14159:
Calculate the area using the formula given in the
problem description.
Input
The input contains a value of floating point
(double precision), that is the variable R.
Output
Present the message "A=" followed by the value
of the variable, as in the example bellow,
with four places after the decimal point.
Use all double precision variables. Like
all the problems, don't forget to print
the end of line after the result, otherwise
you will receive "Presentation Error".
"""
n = 3.14159
R = float(input())
A = n * R**2
print('A={:.4f}'.format(A)) | true |
f61534c5085ade1d9cf0d0b360c63ad331ba5d72 | Python | ndparker/rjsmin | /tests/test_issue17.py | UTF-8 | 7,990 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: ascii -*-
u"""
:Copyright:
Copyright 2019 - 2022
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==========
Issue 17
==========
https://github.com/ndparker/rjsmin/issues/17
"""
__author__ = u"Andr\xe9 Malo"
import rjsmin as _rjsmin
# pylint: disable = protected-access
py_jsmin = _rjsmin._make_jsmin(python_only=True)
py_jsmin2 = _rjsmin.jsmin_for_posers
import _rjsmin
c_jsmin = _rjsmin.jsmin
def test_non_issue():
""" Test issue """
inp = b'console.write((f++)/ 4 + 3 / 2)'
exp = b'console.write((f++)/4+3/2)'
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
def test_non_issue_bang():
""" Test issue with bang comments """
inp = b'console.write((f++)/ 4 + 3 / 2)'
exp = b'console.write((f++)/4+3/2)'
assert py_jsmin(inp, keep_bang_comments=True) == exp
assert py_jsmin2(inp, keep_bang_comments=True) == exp
assert c_jsmin(inp, keep_bang_comments=True) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp, keep_bang_comments=True) == exp
assert py_jsmin2(inp, keep_bang_comments=True) == exp
assert c_jsmin(inp, keep_bang_comments=True) == exp
def test_non_issue2():
""" Test issue """
inp = (b'if (Y) { ba=Math.max(ba, Math.round(W.minHeight/Y))}; '
b'bo.setKnobFactor((bp.width===0)? 0: br.width /bp.width);')
exp = (b'if(Y){ba=Math.max(ba,Math.round(W.minHeight/Y))};'
b'bo.setKnobFactor((bp.width===0)?0:br.width/bp.width);')
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
def test_non_issue2_bang():
""" Test issue """
inp = (b'if (Y) { ba=Math.max(ba, Math.round(W.minHeight/Y))}; '
b'bo.setKnobFactor((bp.width===0)? 0: br.width /bp.width);')
exp = (b'if(Y){ba=Math.max(ba,Math.round(W.minHeight/Y))};'
b'bo.setKnobFactor((bp.width===0)?0:br.width/bp.width);')
assert py_jsmin(inp, keep_bang_comments=True) == exp
assert py_jsmin2(inp, keep_bang_comments=True) == exp
assert c_jsmin(inp, keep_bang_comments=True) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp, keep_bang_comments=True) == exp
assert py_jsmin2(inp, keep_bang_comments=True) == exp
assert c_jsmin(inp, keep_bang_comments=True) == exp
def test_non_issue_complex():
""" Test issue """
inp = (b'console.write((f++)/*!dude*// 4 + 3 /a /* lalala */ '
b'/*!lololo*// 2)')
exp = b'console.write((f++)/4+3/a/2)'
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
def test_non_issue_complex_bang():
""" Test issue with bang comments """
inp = (b'console.write((f++)/*!dude*// 4 + 3 /a /* lalala */ '
b'/*!lololo*// 2)')
exp = b'console.write((f++)/*!dude*//4+3/a/*!lololo*//2)'
assert py_jsmin(inp, keep_bang_comments=True) == exp
assert py_jsmin2(inp, keep_bang_comments=True) == exp
assert c_jsmin(inp, keep_bang_comments=True) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp, keep_bang_comments=True) == exp
assert py_jsmin2(inp, keep_bang_comments=True) == exp
assert c_jsmin(inp, keep_bang_comments=True) == exp
def test_issue():
""" Test issue """
inp = b'for(f=0;f<z;f++)/^ *-+: *$/.test(x)'
exp = b'for(f=0;f<z;f++)/^ *-+: *$/.test(x)'
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
def test_issue_bang():
""" Test issue with bang comments """
inp = b'for(f=0;f<z;f++)/^ *-+: *$/.test(x)'
exp = b'for(f=0;f<z;f++)/^ *-+: *$/.test(x)'
assert py_jsmin(inp, keep_bang_comments=True) == exp
assert py_jsmin2(inp, keep_bang_comments=True) == exp
assert c_jsmin(inp, keep_bang_comments=True) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp, keep_bang_comments=True) == exp
assert py_jsmin2(inp, keep_bang_comments=True) == exp
assert c_jsmin(inp, keep_bang_comments=True) == exp
def test_issue_complex():
""" Test issue """
inp = (b'for(f=0;f<z;f++)/*!dude*//^ *-+: *$/i/*haha*//*!hoho*/\n'
b'./*!hihi*/\n/*huhu*/test(x)')
exp = b'for(f=0;f<z;f++)/^ *-+: *$/i.test(x)'
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
def test_issue_complex_bang():
""" Test issue with bang comments """
inp = (b'for(f=0;f<z;f++)/*!dude*//^ *-+: *$/i/*haha*//*!hoho*/\n'
b'./*!hihi*/\n/*huhu*/test(x)')
exp = b'for(f=0;f<z;f++)/*!dude*//^ *-+: *$/i/*!hoho*/./*!hihi*/test(x)'
assert py_jsmin(inp, keep_bang_comments=True) == exp
assert py_jsmin2(inp, keep_bang_comments=True) == exp
assert c_jsmin(inp, keep_bang_comments=True) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp, keep_bang_comments=True) == exp
assert py_jsmin2(inp, keep_bang_comments=True) == exp
assert c_jsmin(inp, keep_bang_comments=True) == exp
def test_issue_error1():
""" Test issue """
inp = b'for(f=0;f<z;f++)/^ *-+: *$//*'
exp = b'for(f=0;f<z;f++)/^*-+:*$'
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
def test_issue_error2():
""" Test issue """
inp = b'for(f=0;f<z;f++)/'
exp = b'for(f=0;f<z;f++)/'
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
def test_issue_error3():
""" Test issue """
inp = b'for(f=0;f<z;f++)/^ *-+: *$/./'
exp = b'for(f=0;f<z;f++)/^*-+:*$/./'
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
def test_issue_error4():
""" Test issue """
inp = b'for(f=0;f<z;f++)/^ *-+: *$/."lalala"'
exp = b'for(f=0;f<z;f++)/^*-+:*$/."lalala"'
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
inp = inp.decode('latin-1')
exp = exp.decode('latin-1')
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
| true |
43af3c6dcd3f882cce20e690f57fdb634d13e766 | Python | mdifatta/Hand-Tracking | /source/library/neural_network/keras/custom_layers/abs.py | UTF-8 | 956 | 2.796875 | 3 | [] | no_license | from keras.engine.topology import Layer
from keras import backend as K
class Abs(Layer):
def __init__(self, **kwargs):
super(Abs, self).__init__(**kwargs)
def build(self, input_shape):
super(Abs, self).build(input_shape)
def call(self, x, mask=None):
return K.abs(x)
def compute_output_shape(self, input_shape):
return input_shape
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
class AbsoluteReLu(Layer):
"""
AbsoluteReLu(x) = min(1, abs(x))
"""
def __init__(self, **kwargs):
super(AbsoluteReLu, self).__init__(**kwargs)
def build(self, input_shape):
pass
def call(self, x, mask=None):
return K.less(K.abs(x), 1)
def compute_output_shape(self, input_shape):
return input_shape
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
| true |
2f3780854eeb976902011f332e22a56689f32668 | Python | eartharmstrong/Challenge | /Ch14_Challenge-2.py | UTF-8 | 185 | 3.515625 | 4 | [] | no_license | class Square:
def __init__(self, m):
self.m = m
def __repr__(self):
return "{} by {} by {}".format(self.m, self.m, self.m)
no1 = Square(10)
print(no1)
| true |
899f7e56e1b81578fabff5ce153b19695b089ce4 | Python | maikia/StrokeUNET | /brats/preprocess.py | UTF-8 | 27,609 | 2.546875 | 3 | [
"MIT"
] | permissive | """
Tools for converting, normalizing, and fixing the T1 brain scans and
corresponding lesion data.
"""
import os
import shutil
import subprocess
import warnings
from joblib import Memory, Parallel, delayed
import matplotlib.pylab as plt
from nibabel.filebasedimages import ImageFileError
from nilearn import plotting
from nilearn.image import load_img, math_img, new_img_like
# from nipype.interfaces.fsl import BET
import numpy as np
import pandas as pd
from unet3d.utils.utils import find_dirs
if os.environ.get('DISPLAY'):
N_JOBS = 1
else:
# running on the server
N_JOBS = -1
mem = Memory('./')
def get_nifti_data(t1_file_in):
data_image = load_img(t1_file_in).get_fdata()
return data_image
def get_mean(data_t1, normalize_to_mean=None, save_path=None):
"""
t1_file_in: an existing file name
path nifti file with the T1 MRI image with the skull
normalize_to_mean: if not None, the image will be normalized before
saving. It won't have any effect if save_path is None or not valid
save_path: str or None, if str to path with .nii.gz file the f
"""
mean_data = np.mean(data_t1)
return mean_data
def normalize_intensity(nifti_filename, new_mean_normalize):
img_nifti = load_img(nifti_filename)
data_nifti = img_nifti.get_fdata()
img_data = data_nifti / get_mean(data_nifti) * new_mean_normalize
orig_type = img_nifti.get_data_dtype()
img_data = img_data.astype(orig_type)
normalized_data = new_img_like(img_nifti, img_data, affine=None,
copy_header=False)
return normalized_data
def apply_mask_to_image(mask, img):
"""given a mask (in a form of nilearn image) it applies it to the img and
returns the masked img. That is: all the values equals to 0 in the mask
will now be equal to 0 in the masked. The shape of the data remains the
same.
----------
mask : nilearn image, binary
img : nilearn image
Returns
-------
masked
img masked with a mask
"""
if mask.shape == img.shape:
img_data = img.get_fdata()
mask_data = mask.get_fdata()
img_data[mask_data == 0] = 0
masked = new_img_like(img, img_data, affine=None, copy_header=False)
return 1, masked
else:
# there is a shape mismatch between the T1 and mask
err_msg = (f'Shape mismatch between T1: {img.shape} and'
f' the mask: {mask.shape}')
return 0, err_msg
def strip_skull_mask(t1_file_in, t1_file_out, mask_file_out, frac='auto'):
"""strips the skull from the T1 MRI image
----------
t1_file_in: an existing file name
path nifti file with the T1 MRI image with the skull
t1_file_out: path to the file name
path where the new image with skull stripped is to be saved
mask_file_out: path to the file name
path where the calculated mask used to strip the t1_file_in image is
to be saved
frac: 'auto' or float
fractional intensity threshold, default is 'auto' (note: different than
in BET, where it is 0.5). If frac is 'auto', then the mean of the
t1_file_in is calculated. For mean < 20, frac is set to 0.5. If mean is
> 20 and < 25 then frac is set to 0.4. If mean is > 25 then frac is set
to 0.3.
TODO: the correctness of those settings should be tested on the
larger dataset
Returns
-------
t_img: nilearn image
t1 with the skull stripped of
mask: nilearn image
the calculated mask
"""
if frac == 'auto':
data = load_img(t1_file_in).get_fdata()
md = np.mean(data)
if md < 20:
frac = 0.4
elif md < 25:
frac = 0.3
else:
frac = 0.2
skullstrip = BET(in_file=t1_file_in, out_file=t1_file_out, mask=False,
frac=frac)
skullstrip.run()
# it sets all the values > 0 to 1 creating a mask
t_img = load_img(t1_file_out)
mask = math_img('img > 0', img=t_img)
mask.to_filename(mask_file_out)
return t_img, mask
def combine_lesions(path, lesion_str='Lesion'):
"""it loads all the images from the files found in the given path which
include lesion_str in their name (assumed to be the lesion files), adds
them together, and sets all the values different from 0 to 1.
----------
path: path to the directory
path where the lesion files are stored
lesion_str: string
string which must be included in the name of the lesion file
Returns
-------
t_img: image (binary) or 0
returns 0 if there were no matching files found or the nilearn image as
the combined lesion file
"""
n_lesions = 0
for file_name in os.listdir(path):
if lesion_str in file_name:
# is lesion
path_img = os.path.join(path, file_name)
try:
lesion_img = load_img(path_img)
except ImageFileError as e:
err_msg = (f"Problem understanding {path_img} file."
f" Error: {str(e)}")
return 0, err_msg
else:
lesion_data = lesion_img.get_fdata()
if n_lesions == 0:
lesion = lesion_data
elif np.shape(lesion) != np.shape(lesion_data):
# the shapes of the T1 and mask are not the same
err_msg = (f'shape mismatch: {np.shape(lesion)} vs'
f'{np.shape(lesion_data)}')
return 0, err_msg
else:
lesion += lesion_data
n_lesions += 1
if n_lesions > 0:
lesion[lesion > 0] = 1
lesion = lesion.astype('int8') # we don't have to keep it as int
masked = new_img_like(lesion_img, lesion,
affine=None, copy_header=False)
return n_lesions, masked
else:
# there are no lesions found
warnings.warn('there are no lesion files with name including '
f'{lesion_str} found in the {path}.')
return 0, 'no lesions found'
def find_file(path, include_str='t1', exclude_str='lesion'):
"""finds all the files in the given path which include include_str in their
name and do not include exclude_str
----------
path: path to the directory
path where the files are stored
include_str: string
string which must be included in the name of the file
exclude_str: strin
string which may not be included in the name of the file
Returns
-------
files: list
list of filenames matching the given criteria
"""
files = os.listdir(path)
if include_str is not None:
files = [n_file for n_file in files if (include_str in n_file)]
if exclude_str is not None:
files = [n_file for n_file in files if (exclude_str not in n_file)]
return files
def clean_all(dir_to_clean):
"""removes all the files and directories from the given path
----------
path: dir_to_clean
path to directory to be cleaned out
"""
if os.path.exists(dir_to_clean):
shutil.rmtree(dir_to_clean)
os.mkdir(dir_to_clean)
def init_base(path, column_names, file_name='subject_info.csv'):
"""initites the .csv file with the correct column names if it does not
already exist. Checks for the latest subject id and returns the next
subject id which should be used
----------
path: path to the directory
path to the directory where the .csv file should be stored
column_names: list of strings
names of the columns which will be set in the top of the file if it
does not already exist
file_name: string
name of the .csv file, ending on .csv. Eg. subjects.csv
Returns
-------
subj_id: int
next subject id which should be used
dfObj: pandas dataframe
the content of the csv file
"""
file_path = os.path.join(path, file_name)
if not os.path.exists(file_path):
dfObj = pd.DataFrame(columns=column_names)
dfObj.to_csv(file_path)
return 1, dfObj
else:
dfObj = pd.read_csv(file_path)
if len(dfObj) == 0:
return 1, dfObj
else:
subj_id = np.max(dfObj['NewID']) + 1
return subj_id, dfObj
def init_dict(key_names, **kwargs):
"""initiates new dictionary with the keys set to key_names, values either
None, or specified in kwargs
----------
key_names: path to the directory
path to the directory where the .csv file should be stored
**kwargs: any
values will be set in the dictionary. Keys should match keys from the
key_names
Returns
-------
dict: dictionary
dictionary with values set to either None or as specified by kwargs
"""
next_subj = dict.fromkeys(column_names, None)
for key, value in kwargs.items():
next_subj[key] = value
return next_subj
def normalize_to_mni(t1_in, t1_out, template, matrix_out):
"""transforms the t1 to the same space as template.
----------
t1_in: path to the nifti file
path to the file to be normalized
t1_out: path
where the transformed t1 image should be saved
template: path
to the template brain image
matrix_out: path
path where the matrix representing the transformation should be saved
"""
if not os.environ.get('DISPLAY'):
subprocess.run([
"nice", "-n", "10",
"flirt",
"-in", t1_in,
"-out", t1_out,
"-ref", template,
"-omat", matrix_out])
else:
subprocess.run([
"flirt",
"-in", t1_in,
"-out", t1_out,
"-ref", template,
"-omat", matrix_out])
def normalize_to_transform(t1_in, t1_out, template, matrix_in):
"""normalizes the T1 image to the given tranformation.
----------
t1_in: path to the nifti file
path to the file to be normalized
t1_out: path
where the transformed t1 image should be saved
matrix_in: path
path to the matrix used for transformation
Returns
-------
dict: dictionary
dictionary with values set to either None or as specified by kwargs
"""
# takes the saved matrix_out and uses it to transform lesion_in and saves
# the tranformed lesion_in under lesion_out
if not os.environ.get('DISPLAY'):
subprocess.run([
"nice", "-n", "10",
"flirt",
"-in", t1_in,
"-out", t1_out,
"-applyxfm", "-init", matrix_in,
"-ref", template])
# converts mask to binary. The higher threshold the smaller the mask
subprocess.run([
"nice", "-n", "10",
"fslmaths", t1_out,
"-thr", "0.5",
"-bin", t1_out])
else:
subprocess.run([
"flirt",
"-in", t1_in,
"-out", t1_out,
"-applyxfm", "-init", matrix_in,
"-ref", template])
# converts mask to binary. The higher threshold the smaller the mask
subprocess.run([
"fslmaths", t1_out,
"-thr", "0.5",
"-bin", t1_out])
def read_dataset(name):
"""reads the info for the dataset with the name
Note1: sometimes there is more than one lesion stored for one patients.
Those lesions will be collapsed into one
Note2: some patients have scans at multiple times. Those scans will be
considered as separate patient the output image masks will always
consist only of [0, 1]s
----------
name: string
name of the dataset to use
Returns
-------
dict: dictionary
dictionary info for that dataset
"""
if not os.environ.get('DISPLAY'):
# running on the server
data_storage = ('/../../../storage/store2/work/mtelencz/data/'
'stroke/data/')
else:
# running locally
data_storage = ('../../../data/')
# first data set
dataset1 = {
# full public dataset
"name": 'dataset_1',
"raw_dir": os.path.join(data_storage, 'ATLAS_R1.1-public/'),
"lesion_str": 'Lesion',
"t1_inc_str": 't1',
"t1_exc_str": None
}
if name == dataset1['name']:
return dataset1
# second data set
dataset2 = {
"name": 'dataset_2',
"raw_dir": os.path.join(data_storage, 'BIDS-private/'),
# ../../data/BIDS_lesions_zip/',
"lesion_str": 'lesion',
"t1_inc_str": 'T1',
"t1_exc_str": 'label'
}
if name == dataset2['name']:
return dataset2
dataset3 = {
"name": 'dataset_3',
"raw_dir": '../../data/20200901/',
"lesion_str": 'lesion',
"t1_inc_str": 'T1',
"t1_exc_str": 'lesion'
}
if name == dataset3['name']:
return dataset3
dataset4 = {
# full private dataset (to be partly turned public)
"name": 'dataset_4',
"raw_dir": os.path.join(data_storage, 'data_2021_04/'),
"lesion_str": 'lesion',
"t1_inc_str": 'T1w',
"t1_exc_str": 'lesion'
}
if name == dataset4['name']:
return dataset4
dataset5 = {
# full private dataset (to be partly turned public)
"name": 'dataset_5',
"raw_dir": os.path.join(data_storage, 'data_2021_05/'),
"lesion_str": 'lesion',
"t1_inc_str": 'T1w',
"t1_exc_str": 'lesion'
}
if name == dataset5['name']:
return dataset5
# third dataset (healthy patients)
# here all the scans are in the single directory
dataset_healthy = {
"name": 'dataset_healthy',
"raw_dir": '../../data/healthy/',
"lesion_str": 'lesion',
"t1_inc_str": 'T1',
"t1_exc_str": 'label'
}
if name == dataset_healthy['name']:
return dataset_healthy
return None
def bias_field_correction(t1_in):
"""corrects field bias using fast method from fsl.
It will save multiple nifti files in the directory (as described by FAST
https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FAST#Fast) where t1_in is
stored, however only the path to the biased corrected image will be
returned
----------
t1_in: path to the nifti file
path to the file to be biased corrected
Returns
-------
out_file: path to the nifti file
path to the file biased corrected
"""
basename = 'bias'
out_dir = os.path.dirname(t1_in)
basename = os.path.join(out_dir, basename)
if not os.environ.get('DISPLAY'):
subprocess.run([
"nice", "-n", "10",
"fast",
"-t", "1", # is T1
"-o", basename, # basename for outputs
"-B", # output restored image (bias-corrected image)
t1_in]) # nifti image to bias correct
else:
subprocess.run([
"fast",
"-t", "1", # is T1
"-o", basename, # basename for outputs
"-B", # output restored image (bias-corrected image)
t1_in]) # nifti image to bias correct
out = basename + '_restore.nii.gz'
return out
def plot_t1(path_t1, title, fig_dir, fig_file):
use_cmap = plt.cm.get_cmap('Blues').reversed()
try:
plotting.plot_stat_map(path_t1, title=title,
display_mode='ortho', dim=-1,
draw_cross=False, annotate=False, bg_img=None,
cmap=use_cmap,
cut_coords=(0, 0, 0))
except ImageFileError as e:
err_msg = f"Problem understanding {path_t1} file. Error: {str(e)}"
return 0, err_msg
else:
plt.savefig(os.path.join(fig_dir, fig_file))
return 1, None
def plot_mask(path_mask, title, fig_dir, fig_file):
use_cmap = plt.cm.get_cmap('autumn').reversed()
plotting.plot_stat_map(path_mask, title=title,
display_mode='ortho', dim=-1,
draw_cross=False, annotate=False, bg_img=None,
cmap=use_cmap,
cut_coords=[0, 0, 0])
plt.savefig(os.path.join(fig_dir, fig_file))
def plot_overlay(path_mask, path_bg, title, fig_dir, fig_file):
try:
plotting.plot_roi(path_mask, bg_img=path_bg, title=title,
draw_cross=False, cmap='autumn',
cut_coords=[0, 0, 0])
except ImageFileError as e:
err_msg = (f"Problem understanding {path_mask} or {path_bg} file."
f" Error: {str(e)}")
return 0, err_msg
else:
plt.savefig(os.path.join(fig_dir, fig_file))
return 1, None
def move_patient_data(dir_from, dir_to,
t1_name_old='no_skull_norm_t1.nii.gz',
lesion_name_old='no_skull_norm_lesion.nii.gz',
t1_name='T1.nii.gz', lesion_name='truth.nii.gz'):
path_list = find_dirs(raw_dir=dir_from, ext=t1_name_old)
if not os.path.exists(dir_to):
os.mkdir(dir_to)
for path in path_list:
# make the new directory
new_path = os.path.join(dir_to, os.path.basename(path))
if not os.path.exists(new_path):
os.mkdir(new_path)
old_t1_path = os.path.join(path,
t1_name_old)
old_lesion_path = os.path.join(path,
lesion_name_old)
new_t1_path = os.path.join(new_path,
t1_name)
new_lesion_path = os.path.join(new_path,
lesion_name)
shutil.copy(old_t1_path, new_t1_path)
shutil.copy(old_lesion_path, new_lesion_path)
def preprocess_image(next_id, path_raw, path_template, subj_info_file):
print(f'subject {next_id}, working on {path_raw}')
path_results = os.path.join(results_dir, f'subject_{next_id}')
path_figs = os.path.join(path_results, 'figs')
# create output path. if it already exists. remove it and create clean
if os.path.exists(path_results):
shutil.rmtree(path_results)
os.mkdir(path_results)
os.mkdir(path_figs)
# initiates info dict for the new subject
next_subj = init_dict(column_names, RawPath=path_raw,
ProcessedPath=path_results, NewID=next_id)
# 1. combine lesions
# check if multiple lesion files are saved
# combines them and sets to 0 or 1
print(f's{next_id}: combining lesions and setting them to 0s and 1s')
ok, lesion_img = combine_lesions(path_raw, lesion_str=data['lesion_str'])
if not ok:
# something went wrong
next_subj['Error'] = lesion_img
save_to_csv(subj_info_file, next_subj, next_id)
return next_subj
next_subj['RawLesionSize'] = int(np.sum(lesion_img.get_fdata()))
next_subj['RawSize_x'], next_subj['RawSize_y'], \
next_subj['RawSize_z'] = lesion_img.shape
# 2. remove the skull (from t1 and mask)
print(f's{next_id}: stripping skull')
file_in = find_file(path=path_raw,
include_str=data['t1_inc_str'],
exclude_str=data['t1_exc_str'])
assert len(file_in) == 1 # only a single T1 file should be found
t1_file = os.path.join(path_raw, file_in[0])
t1_no_skull_file = os.path.join(path_results, 't1_no_skull.nii.gz')
mask_no_skull_file = os.path.join(path_results, 'mask_no_skull.nii.gz')
no_skull_t1_img, mask_img = strip_skull_mask(
t1_file, t1_no_skull_file, mask_no_skull_file)
ok, no_skull_lesion_img = apply_mask_to_image(mask_img, lesion_img)
if not ok:
# something went wrong
next_subj['Error'] = no_skull_lesion_img
save_to_csv(subj_info_file, next_subj, next_id)
return next_subj
no_skull_lesion_file = os.path.join(path_results,
'no_skull_lesion.nii.gz')
no_skull_lesion_img.to_filename(no_skull_lesion_file)
assert no_skull_lesion_img.shape == no_skull_t1_img.shape
# 3. correct bias
print(f's{next_id}: correcting bias. this might take a while')
t1_no_skull_file_bias = bias_field_correction(t1_no_skull_file)
# 4. align the image, normalize to mni space
print(f's{next_id}: normalizing to mni space')
no_skull_norm_t1_file = os.path.join(
path_results, 'no_skull_norm_t1.nii.gz'
)
no_skull_norm_lesion_file = os.path.join(
path_results, 'no_skull_norm_lesion.nii.gz')
transform_matrix_file = os.path.join(path_results, 'matrix.mat')
normalize_to_mni(t1_no_skull_file_bias, no_skull_norm_t1_file,
template_brain_no_skull, transform_matrix_file)
normalize_to_transform(no_skull_lesion_file, no_skull_norm_lesion_file,
path_template, transform_matrix_file)
# TODO: any other steps? resampling?
# 5. Plot the results
print(f's{next_id}: plotting and saving figs')
plot_errs = ''
ok, err = plot_t1(template_brain, title='template',
fig_dir=path_figs, fig_file='0_template' + ext_fig)
if not ok:
plot_errs += err
ok, err = plot_t1(template_brain_no_skull, title='template, no skull',
fig_dir=path_figs,
fig_file='0_1_template_no_skull' + ext_fig)
if not ok:
plot_errs += err
ok, err = plot_t1(t1_file, title='original',
fig_dir=path_figs, fig_file='1_original_t1' + ext_fig)
if not ok:
plot_errs += err
plot_mask(mask_no_skull_file, title='mask',
fig_dir=path_figs, fig_file='2_mask_no_skull' + ext_fig)
ok, err = plot_t1(t1_no_skull_file, title='original, no skull',
fig_dir=path_figs,
fig_file='3_original_no_skull' + ext_fig)
if not ok:
plot_errs += err
ok, err = plot_t1(t1_no_skull_file_bias, title='original, no skull',
fig_dir=path_figs,
fig_file='3_5_original_no_skull_bias' + ext_fig)
if not ok:
plot_errs += err
plot_mask(lesion_img, title='lesion',
fig_dir=path_figs, fig_file='4_lesion' + ext_fig)
plot_mask(no_skull_lesion_img, title='lesion, mask',
fig_dir=path_figs,
fig_file='5_mask_lesion_no_skull' + ext_fig)
ok, err = plot_t1(no_skull_norm_t1_file, title='t1, no skull, norm',
fig_dir=path_figs,
fig_file='6_t1_no_skull_norm' + ext_fig)
if not ok:
plot_errs += err
plot_mask(no_skull_norm_lesion_file, title='lesion, no skull, norm',
fig_dir=path_figs,
fig_file='7_lesion_no_skull_norm' + ext_fig)
plot_overlay(lesion_img, path_bg=t1_file, title='before',
fig_dir=path_figs,
fig_file='8_before_t1_lesion' + ext_fig)
plot_overlay(no_skull_norm_lesion_file, path_bg=no_skull_norm_t1_file,
title='after', fig_dir=path_figs,
fig_file='9_after_t1_lesion' + ext_fig)
# plt.show()
plt.close('all')
next_subj['Error'] = plot_errs
# save the info in the .csv file
print(f'saving the info to the {csv_file}')
no_skull_norm_lesion_img = load_img(no_skull_norm_lesion_file)
no_skull_norm_lesion_data = no_skull_norm_lesion_img.get_fdata()
next_subj['NewLesionSize'] = int(np.sum(no_skull_norm_lesion_data))
no_skull_norm_t1_img = load_img(no_skull_norm_t1_file)
assert no_skull_norm_t1_img.shape == no_skull_norm_lesion_data.shape
next_subj['NewSize_x'], next_subj['NewSize_y'], \
next_subj['NewSize_z'] = no_skull_norm_lesion_data.shape
save_to_csv(subj_info_file, next_subj, next_id)
return next_subj
def save_to_csv(subj_info_file, next_subj, next_id):
df = pd.DataFrame(next_subj, index=[int(next_id)])
df.to_csv(subj_info_file, mode='a', header=False)
if __name__ == "__main__":
dataset_name = 'dataset_1' # also dataset_2, TODO: dataset_healthy
# rerun_all: if set to True, all the preprocessed data saved
# so far will be removed
rerun_all = True # careful !!
ext_fig = '.png'
csv_file = 'subject_info.csv'
# data to be saved in the .csv file
column_names = ['RawPath', 'ProcessedPath', 'NewID',
'RawSize_x', 'RawSize_y', 'RawSize_z',
'NewSize_x', 'NewSize_y', 'NewSize_z',
'RawLesionSize', 'NewLesionSize', 'Error']
results_dir = 'data/preprocessing_steps/' # all the preprocessing steps
# can be found here, including the .nii.gz files and the corresponding
#figures
data_dir = 'data/preprocessed/' # only preprocessed T1.nii.gz and
# corresponding truth.nii.gz binary lesion masks are saved in this
# directory
# find mni templates at:
# http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009
# use mni_icbm152_t1_tal_nlin_asym_09c.nii.gz for lower resolution
# and smaller data size
# use mni_icbm152_t1_tal_nlin_asym_09b_hires for higher resolution
# but larger data size
template_file = 'mni_icbm152_t1_tal_nlin_asym_09c.nii.gz'
template_brain = os.path.join('../../data/',
'mne_template',
template_file)
# find all the directories with the 'nii.gz' files
ext = '.nii.gz'
data_info = read_dataset(dataset_name)
assert data_info is not None
raw_dir = data_info['raw_dir']
print(f'Wait. I am searching for "{ext}" files in {raw_dir}')
path_list = find_dirs(raw_dir=raw_dir, ext=ext)
n_dirs = len(path_list)
if rerun_all:
print(f'cleaning up {results_dir}')
clean_all(results_dir)
# strip the skull from template brain
template_brain_no_skull = os.path.join(results_dir, 'template.nii.gz')
template_mask = os.path.join(results_dir, 'template_mask.nii.gz')
strip_skull_mask(template_brain, template_brain_no_skull, template_mask,
frac=0.5)
next_id, df_info = init_base(results_dir, column_names=column_names,
file_name=csv_file)
# remove all the paths from the path_list which are already stored in the
raw_paths_stored = np.array(df_info['RawPath'])
path_list = [path for path in path_list if path not in raw_paths_stored]
print(f'begining to analyze {n_dirs} patient directories')
subj_info_file = os.path.join(results_dir, csv_file)
dict_result = Parallel(n_jobs=N_JOBS)(
delayed(preprocess_image)(
next_id+idx, path_raw, template_brain_no_skull, subj_info_file)
for idx, path_raw in enumerate(path_list)
)
# move all the patients final preprocessed results to the data_dir:
move_patient_data(dir_from=results_dir, dir_to=data_dir,
t1_name_old='no_skull_norm_t1.nii.gz',
lesion_name_old='no_skull_norm_lesion.nii.gz',
t1_name='T1.nii.gz', lesion_name='truth.nii.gz')
print(f'saved results from {len(dict_result)} patient directories')
| true |
4d3e2056bd7fcadcc761dbcb7dfd90a11679f0ca | Python | adriaant/calmpy | /simulator/tools.py | UTF-8 | 7,318 | 2.765625 | 3 | [] | no_license | # -*-*- encoding: utf-8 -*-*-
# pylint: disable=E1101
from __future__ import absolute_import, unicode_literals, division
import logging
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import colors
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D # noqa
from django.utils.six.moves import xrange
logger = logging.getLogger(__name__)
class BaseTool(object):
def __init__(self, network):
self.network = network
class ConvergenceMap(BaseTool):
"""Create a convergence map by varying values for indices
x and y of given input module and plotting winner of
target module."""
color_map = ['#000000', '#FF0000', '#00FF00', '#0000FF',
'#FFFF00', '#FF00FF', '#800080', '#FF7F00',
'#00FFFF', '#996633', '#409970', '#FF0080']
def display(self, input_label, x, y, module_name, dim=100, show_plot=True):
target_mdl = self.network.module_with_name(module_name)
if not target_mdl:
return None
winners = np.empty([dim, dim], dtype='d')
step = 1.0 / dim
input_nodes = self.network.inputs[input_label].r
values = np.arange(step, 1.0 + step, step)
for i, x_val in enumerate(values):
for j, y_val in enumerate(values):
input_nodes[x] = x_val
input_nodes[y] = y_val
self.network.test()
winners[i, j] = target_mdl.winner
if show_plot:
cmap = colors.ListedColormap(self.color_map)
norm = colors.BoundaryNorm([0, 1, 2, 3, 4, 5, 6], cmap.N)
plt.imshow(winners, cmap=cmap, norm=norm)
plt.axis('off')
plt.show()
else:
return winners
class WeightPlot(BaseTool):
"""Create a 3D plot of the weights between two given modules."""
def display(self, from_name, to_name, show_plot=True, wire=False):
connection = self.network.get_connection_for(from_name, to_name)
if connection:
weights = connection.weights
dim = len(weights)
X, Y = np.mgrid[:dim, :dim]
fig = plt.figure()
ax = fig.gca(projection='3d')
if wire:
ax.plot_wireframe(X, Y, weights, rstride=1, cstride=1)
else:
ax.plot_surface(X, Y, weights,
rstride=1, cstride=1,
cmap=cm.coolwarm, linewidth=0,
antialiased=False)
if show_plot:
plt.show()
else:
return plt
return None
class BifurcationDiagram(BaseTool):
"""Create a bifurcation diagram by varying the value of one input node
and plotting the value of a target R-node per iteration.
Assumes input modules have desired pattern set."""
def display(self, input_mdl, input_idx, mdl_label, r_index, start=0.0, end=1.0, dim=100, iterations=100, transients=10):
mdl = self.network.module_with_name(mdl_label)
if not mdl:
logger.error("Unknown module name!")
return None
step = (end - start) / dim
values = np.arange(start, end + step, step)
x_vals = np.empty((dim + 1) * iterations, dtype='d')
y_vals = np.empty((dim + 1) * iterations, dtype='d')
idx = 0
for x in values:
input_mdl.r[input_idx] = x
self.network.reset()
# we ignore first couple of iterations since those are transients
for _ in xrange(0, transients):
self.network.test_one()
for _ in xrange(0, iterations):
self.network.test_one()
try:
x_vals[idx] = x
y_vals[idx] = mdl.r[r_index]
except IndexError:
break # one off end
idx += 1
else:
continue
break
plt.scatter(x_vals, y_vals, s=1)
plt.xlim(start, end)
plt.ylim(min(y_vals), max(y_vals))
plt.show()
class SinglePhasePortrait(BaseTool):
"""Create a Poincaré section of a phase portrait by plotting the value of
a target R-node against that of its paired V-node per iteration.
Assumes input modules have desired pattern set."""
def display(self, iterations=500):
x_vals = np.empty(iterations, dtype='d')
y_vals = np.empty(iterations, dtype='d')
self.network.reset()
# we ignore first 100 iterations since those are transients
for _ in xrange(0, 100):
self.network.test_one()
for idx in xrange(0, iterations):
self.network.test_one()
x_vals[idx] = self.network.total_activation()
self.network.test_one()
y_vals[idx] = self.network.total_activation()
color_map = np.arctan2(y_vals, x_vals)
plt.scatter(x_vals, y_vals, s=42, c=color_map, lw=0)
edge_r = (max(x_vals) - min(x_vals)) / 100
edge_v = (max(y_vals) - min(y_vals)) / 100
plt.xlim(min(x_vals) - edge_r, max(x_vals) + edge_r)
plt.ylim(min(y_vals) - edge_v, max(y_vals) + edge_v)
plt.show()
class StackedPhasePortrait(BaseTool):
"""Create multiple Poincaré sections of a phase portrait by plotting the value of
a target R-node against that of its paired V-node per iteration.
Assumes input modules have desired pattern set."""
def display(self, input_mdl, input_idx, width=10, step=0.001, iterations=500):
step = np.float64(step)
cur_val = input_mdl.r[input_idx]
start = cur_val - (width * step)
if start < 0.0:
logger.error("Range will be out of bounds!")
return
end = cur_val + (width * step)
if end > 1.0:
logger.error("Range will be out of bounds!")
return
x_vals = np.empty(iterations * (2 * width + 1), dtype='d')
y_vals = np.empty(iterations * (2 * width + 1), dtype='d')
values = np.arange(start, end + step, step)
idx = 0
for x in values:
input_mdl.r[input_idx] = x
self.network.reset()
# we ignore first 100 iterations since those are transients
for _ in xrange(0, 100):
self.network.test_one()
for _ in xrange(0, iterations):
self.network.test_one()
try:
x_vals[idx] = self.network.total_activation()
except IndexError:
break
self.network.test_one()
try:
y_vals[idx] = self.network.total_activation()
except IndexError:
break
print("{0}: {1}".format(x_vals[idx], y_vals[idx]))
idx += 1
else:
continue
break
color_map = np.arctan2(y_vals, x_vals)
plt.scatter(x_vals, y_vals, s=42, c=color_map, lw=0)
edge_r = (max(x_vals) - min(x_vals)) / 10
edge_v = (max(y_vals) - min(y_vals)) / 10
plt.xlim(min(x_vals) - edge_r, max(x_vals) + edge_r)
plt.ylim(min(y_vals) - edge_v, max(y_vals) + edge_v)
plt.show()
| true |
2cd0d6c8b1a9a22b9897e8370642931ab1ff659e | Python | gulup/BotW-SBFRES-to-FBX | /bfresextraction/MultiPartModelCombiner.py | UTF-8 | 935 | 3.0625 | 3 | [] | no_license | import os, shutil
def findAndCombineMultipartModels():
# Checks for an ending like -00, -01, -02, etc.
def isMultipartFolder(folderPath: str):
for i in range(0, 99):
if i < 10:
return folderPath.endswith("-0" + str(i))
else:
return folderPath.endswith("-" + str(i))
# Find all models which end with -00, -01, etc, and move them into their own pending directory.
for root, dirs, files in os.walk("."):
for dir in dirs:
if isMultipartFolder(dir):
folderRootPath = os.path.join(root, dir[0:len(dir) - 3])
completedFolderPath = folderRootPath + " (Complete)"
pendingFolderPath = folderRootPath + " (Pending)"
print("Determined that " + dir + " is a multipart folder.")
if not os.path.exists(combinedFolder):
os.makedirs(combinedFolder)
| true |
8f9a78023a22ce4d1a80bb70c3963e1c9ebb509f | Python | Valkyrja3607/AtCoder | /atcoder.jp/abc059/abc059_a/Main.py | UTF-8 | 108 | 2.9375 | 3 | [] | no_license | a,b,c=input().split()
ans=""
ans+=chr(ord(a[0])-32)
ans+=chr(ord(b[0])-32)
ans+=chr(ord(c[0])-32)
print(ans) | true |
c123e193821adb6c0d9d9b5498d7bd0ebc2b0e2b | Python | Godgaojiafu88/AID2006 | /game2048/game2048.py | UTF-8 | 2,122 | 3.578125 | 4 | [] | no_license | map = [
[2, 0, 0, 2],
[4, 2, 0, 2],
[2, 4, 2, 4],
[0, 4, 0, 4],
]
list_merge=[]
class Game2048Viem:
def __init__(self):
self.controller=Game2048Controller()
def __display_menu(self):
for i in range(len(map)):
print(map[i])
def print_direction(self):
direction=input("请输入方向(上下左右)")
if direction=="左":
self.controller.move_left()
elif direction=="右":
self.controller.move_right()
elif direction=="上":
self.controller.move_up()
elif direction=="下":
self.controller.move_down()
def main(self):
while True:
self.__display_menu()
self.print_direction()
class Game2048Controller:
def zero_to_end(self):#零元素后移
for i in range(len(list_merge) - 1, -1, -1):
if list_merge[i] == 0:
del list_merge[i]
list_merge.append(0)
def merge(self):#合并
self.zero_to_end()
for i in range(len(list_merge) - 1):
if list_merge[i] == list_merge[i + 1]:
list_merge[i] += list_merge[i + 1]
del list_merge[i + 1]
list_merge.append(0)
def move_left(self):#向左
global list_merge
for line in map:
list_merge = line
self.merge()
def move_right(self):#向右
global list_merge
for line in map:
list_merge = line[::-1]
self.merge()
line[::-1] = list_merge
def square_matrix_transposition(self):#转置
for c in range(1, len(map)): # 1 2 3
for r in range(c, len(map)):
map[r][c - 1], map[c - 1][r] = map[c - 1][r], map[r][c - 1]
def move_up(self):#向上
self.square_matrix_transposition()
self.move_left()
self.square_matrix_transposition()
def move_down(self):#向下
self.square_matrix_transposition()
self.move_right()
self.square_matrix_transposition()
view=Game2048Viem()
view.main() | true |
4fc1bbc371f6d9ae96de7fff474d757ab92e72cb | Python | ConJov/NZJ-Repository | /Task 1 - NZJ.py | UTF-8 | 248 | 3.140625 | 3 | [] | no_license | """Task 1 - N Z J"""
Sessions_Attended = {'sessions' : '1011,2344,3222,44322,555,6332,721,8789,99,1011,1124,1245,137,1499'}
n = Sessions_Attended['sessions']
n = n.split(",")
print("I have attended : ", len(n), "sessions")
| true |
301f10f8dba742fddcf5140744745a3a094405a0 | Python | premanshum/pythonWorks | /aFolder/Theory/Datastructure.py | UTF-8 | 5,125 | 4.125 | 4 | [] | no_license | '''
collections:
- str
- list
- dict
- tuple
- range
- set
1. str
- Immutable sequence of homogeneous unicode codepoints (characters)
- Single quote and double quote
- "first" "second" => "firstsecond"
- help(str) => to get a list of functions on str
- name = " ".join(["Prem", "Anshu", "Mukherji"]) >>> 'Prem Anshu Mukherji'
- join is faster and efficient than other concatenation mechanism
- partition method divides a string into three around a separator: prefix, separator, suffix
- Use underscore as a dummy name for the separator; Underscore is understood by many tools;
- "Unforgetable".partition("forget") >> ('Un', 'forget', 'able')
- departure, _, arrival = "London:Edinburgh".partition(":") >> departure = London
- Use format to insert values into string; replacement fields delimited by { and }
- "The age of {0} is {1}.format("Jim", 32) >> The age of Jim is 32
- "The age of {name} is {age}".format(name="Jim", age=32) => named parameter
- pos = ("Jim", 32) => a Tuple declaration
- "The age of {pos[0]} is {pos[1]}".format(pos=pos) => Use of tuple in place of parameter
-
2. byte
- immutable sequence of bytes
- d = b'some bytes'
- d.split() => [b'some', b'bytes']
3. list
- mutable sequence of heterogeneous objects
- s = ["apple", "oranges", 7, 9.0, ] => last character can be comma, list can be heterogeneous
- b = [] => empty list
- Can be indexed from the end, using negative indexes
- "This is a string".split()[-1] >> 'string'
- "This is a string".split()[-2] >> 'a'
- Negative indexes are better than forward indexing
- Avoid seq[len(seq) - 1)];
- Slicing extracts part of a list; slice = seq[start:stop]; stop is not included;
- "This is a string".split()[1:-1] >> ['is', 'a']
- "This is a string".split()[1:] >> ['is', 'a', 'string']
- del aSeq[3] => removes the item at index 3
- aSeq.remove('This') => removes 'This' from the list
- aSeq.reverse => reverse sort
- aSeq.sort(key) => sorts on the basis of the key
4. dictionary
- mutable mappings of key to values
- unordered mapping from unique, immutable keys to mutable values
- {k1: v1, k2: v2}
- d = {'alice': '42', 'bob': '37', 'eve': '39'} => dictionary with name as key, age as value
- d['alice'] >>> 42 => Retrieval is by key
- d['charles'] = '41' => a new entry is made with key as 'charles' and value as 41
- d = dict (g='green', r='red', y='yellow') >> {'g':'green', 'r':'red', 'y':'yellow'}
- d.update(dict(b='blue')) >> {'g':'green', 'r':'red', 'y':'yellow', 'b':'blue'}
-
5. tuple
- heterogeneous immutable sequence of arbitrary object
- once created, objects can NOT be replaced, added or removed from tuple.
- t = ("Norway", 4.953, 3) => new tuple with a string, float and int
- t[1] >>> 4.953 => tuples can be accessed using 'zero' based index
- t + (33.43, 256e9) >> ("Norway", 4.953, 3, 33.43, 256e9) => Tuples can be concatenated
- nestedTuple = ((220, 17), (1187, 110), (329, 9))
- wrongTuple = (391) => can NOT make a single element tuple; evaluates as integer;
- rightTuple = (391,) => an extra comma at the end makes it a signle element tuple;
- emptyTuple = ()
- p = 1, 1, 3, 6, 8 => no parantheses still makes it a tuple
- Tuple can be used to return multiple values from a function
- min, max = min_max([3, 7, 1, 8]) => min = 1, max = 8
- Tuple unpacking allows us to destructure directly into name reference
- fname, mname, lname = "prem anshu mukherji".split() => fname = "prem", mname = "anshu" etc.
- a, b = b, a => swapping of two variables
- tupleFromList = tuple([32, 12, 76])
- tupleFromString = tuple ("premanshu") => ('p', 'r', 'e', 'm', 'a', 'n', 's', 'h', 'u')
- m in tuple('premanshu') >> true => membership testing ('in', 'not in')
6. range
- sequence of arithmatic progression of integers
- r = range(5) => range (0, 5)
- r = range (5, 10) => range from 5 to 9
- r = range (5, 10, 2) >> 5, 7, 9 => start, stop, step
- Instead of using range, prefer enumerate, to do the enumeration job
- enumerate () yields (index, value) tuples
- for p in enumerate([33, 44, 55])
print (p)
>> (0, 33), (1, 44), (2, 55)
- for i, v in enumerate([33, 44, 55])
print ("index = {}, value = {}".format(i, v))
>> index = 0, value = 33
>> index = 1, value = 44
>> index = 2, value = 55
-
7. Set
- mutable unordered collection of unique, immutable objects
- p = {6, 8, 65, 234, 1233}
- e = set() => empty set
- add and update methods modify the set
- set allows for set algebra
- subsets, difference, symmetric-difference, intersection and union etc are support
- isSubset and disjoint also supported
''' | true |
fd4968b2297fc6ba3fbf70c69f4939a72acb22ec | Python | asiya00/Exercism | /python/protein-translation/protein_translation.py | UTF-8 | 556 | 3 | 3 | [] | no_license | def proteins(strand):
di = {"Methionine": "AUG","Phenylalanine": ("UUU","UUC"), "Leucine": ("UUA","UUG"), "Serine": ("UCU", "UCC", "UCA", "UCG"), "Tyrosine": ("UAU", "UAC"), "Cysteine": ("UGU", "UGC"), "Tryptophan": "UGG", "STOP": ("UAA", "UAG", "UGA")}
li = []
for i in range(0,len(strand),3):
codons = str(strand[i:i+3])
for key,value in di.items():
if codons in di[key]:
if key=="STOP":
return(li)
li.append(key)
return(li)
| true |
a8ac5659092b88c631019dc20fbd091fed40724c | Python | bs-lab/pegboard_slide_python | /plotting.py | UTF-8 | 2,868 | 2.890625 | 3 | [] | no_license | import sys
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from math import cos, sin, pi
# constants
DEBUG = False
# --------------------------------------------------------------------------------------------------
def make_circle_points(center: list, radius: float) -> tuple:
"""only used for plotting, not analysis"""
theta = 0
x = []
y = []
while True:
if theta > 2 * pi:
break
theta += 2 * pi * 0.01
x.append(radius * cos(theta) + center[0])
y.append(radius * sin(theta) + center[1])
return x, y
# --------------------------------------------------------------------------------------------------
def make_plot(frame_list, pucks, pegs, flat_surfaces, plot_title: str = "", avi_filename: str = "",
fps: int = 30):
"""Creates either a matplotlib graph or an mp4 file showing the results of the analysis"""
# ----------------------------------------------------------------------------------------------
def init_plot():
plt.title(plot_title)
plt.xlim(-9, 9)
plt.axis('equal')
for peg in pegs:
xx, yy = make_circle_points(peg.center, peg.radius)
plt.plot(xx, yy, 'b')
for flats in flat_surfaces:
plt.plot([flats.pt1[0], flats.pt2[0]], [flats.pt1[1], flats.pt2[1]], 'r')
return puck_dot,
# ----------------------------------------------------------------------------------------------
def update_plot(xy):
xs = []
ys = []
for x in xy:
xs.append(x[0])
ys.append(x[1])
# if DEBUG:
# print(f'x {x}, xy {xy}')
puck_dot.set_data(xs, ys)
return puck_dot,
# ----------------------------------------------------------------------------------------------
fig1 = plt.figure()
puck_dot, = plt.plot([], [], 'ro', ms=45*pucks[0].radius)
if avi_filename.strip():
ani = animation.FuncAnimation(fig1, update_plot, frame_list,
init_func=init_plot, blit=True, save_count=len(frame_list[0]))
sys.stderr.write(f'creating "{avi_filename}"\n')
Writer = animation.writers['ffmpeg']
writer = Writer(fps=fps, metadata=dict(artist='bs-lab'), bitrate=1800)
ani.save(avi_filename, writer=writer)
else:
init_plot()
for f in range(len(frame_list[0])):
# plt.pause(1/fps)
plt.pause(0.04)
for p, puck in enumerate(pucks):
x = frame_list[p][f][0]
y = frame_list[p][f][1]
plt.plot(x, y, 'r.')
xx, yy = make_circle_points([x, y], puck.radius)
plt.plot(xx, yy, 'r')
# plt.draw()
# prevent figure from disappearing
plt.show()
| true |
77fa9bcf5bdcc867cc3cb735a3af909fdc73fad0 | Python | MatthewNeu/SFARI | /remove_footer.py | UTF-8 | 1,058 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python
import sys, re
from sys import argv
import os
################################
# Matthew Neu
# Aug 9 2016
# This script removes all commented lines from the bottom of a VCF file,
# then creates a new file with "footerless" appended to name.
#
# to run: remove_footer.py <filename.vcf>
################################
input_filtered_var_file = sys.argv[1]
dirname = os.path.dirname(input_filtered_var_file)
original_file = os.path.split(input_filtered_var_file)[1].split(".")[0]
try:
input_file = open(input_filtered_var_file, "r")
except IOError:
print "unable to open" , input_filtered_var_file
sys.exit()
with input_file:
line_list = input_file.readlines()
pop_line = line_list.pop()
while pop_line.startswith("#"):
pop_line = line_list.pop()
continue
outfile_name = os.path.join(dirname, original_file + "_footerless.vcf")
try:
out_file = open(outfile_name, "w")
except IOError:
print "unable to open" , outfile_name
sys.exit()
with out_file:
out_file.writelines(line_list)
| true |
d6cb420620b1f2735311f8d4057fb0620c8a8c8b | Python | AAAEEEE/conceptual_img_ref | /src/models/listener_population.py | UTF-8 | 6,748 | 2.640625 | 3 | [] | no_license | import torch
import torch.nn as nn
from torch.distributions.bernoulli import Bernoulli
from torch.distributions.normal import Normal
import numpy as np
import pickle
class ListenerPopulation(nn.Module):
def __init__(self, n_clusters, n_attributes, n_corrupt, n_agents,
def_epsilon, corr_epsilon, epsilon_stdd,
def_prob, corr_prob,
def_rand_p, corr_rand_p):
super(ListenerPopulation, self).__init__()
self._n_clusters = n_clusters
self._n_agents = n_agents
self._n_corrupt = n_corrupt
self._n_attributes = n_attributes
self._def_epsilon = def_epsilon
self._corr_epsilon = corr_epsilon
self._epsilon_stdd = epsilon_stdd
self._def_prob = def_prob
self._corr_prob = corr_prob
self._def_rand_p = def_rand_p
self._corr_rand_p = corr_rand_p
# Binary vectors specifying corrupt vectors for clusters.
self._cluster_attrs = []
# Real-valued vectors containing corruption probabilities, for sampling agents in cluster.
self._corrupt_attrs = []
self._agent_epsilon_mat = None
self._agent_def_mat = None
self._agent_id_mat = None
self._def_bernoulli = Bernoulli(torch.tensor([def_rand_p]))
self._corr_bernoulli = Bernoulli(torch.tensor([corr_rand_p]))
self._random_guess = Bernoulli(torch.tensor([0.5]))
# Define epsilon ranges for corrupt and non-corrupt attributes.
self._def_dist = Normal(torch.tensor(
[def_epsilon]), torch.tensor([epsilon_stdd]))
self._corr_dist = Normal(torch.tensor(
[corr_epsilon]), torch.tensor([epsilon_stdd]))
def build_clusters(self, id_dict=None):
# Build clusters using parameters if no id_dict specified.
if id_dict is None:
attrs = np.arange(self._n_attributes)
for i in range(self._n_clusters):
corrupt_probs = np.full((self._n_attributes,), self._def_prob)
corrupt_idx = np.random.choice(
attrs, size=self._n_corrupt, replace=False)
corrupt_probs[corrupt_idx] = self._corr_prob
cluster_attrs = np.zeros(corrupt_probs.shape)
cluster_attrs[corrupt_idx] = 1.0
self._cluster_attrs.append(cluster_attrs)
self._corrupt_attrs.append(corrupt_probs)
# Otherwise build clusters according to dict.
else:
# Set number of clusters to match id_dict.
self._n_clusters = len(id_dict.keys())
for attr_type, corrupt_idx in sorted(id_dict.items()):
print(attr_type)
corrupt_probs = np.full((self._n_attributes,), self._def_prob)
corrupt_probs[corrupt_idx] = self._corr_prob
cluster_attrs = np.zeros(corrupt_probs.shape)
cluster_attrs[corrupt_idx] = 1.0
self._cluster_attrs.append(cluster_attrs)
self._corrupt_attrs.append(corrupt_probs)
def load_clusters(self, clusters_path):
# Load both the binary vector and probability vector for all clusters.
save_dict = pickle.load(open(clusters_path, 'rb'))
self._cluster_attrs = save_dict['cluster_attrs']
self._corrupt_attrs = save_dict['corrupt_attrs']
def save_clusters(self, clusters_path):
# Store the binary cluster definitions as well as real valued probability vectors.
save_dict = {'cluster_attrs': self._cluster_attrs,
'corrupt_attrs': self._corrupt_attrs}
pickle.dump(save_dict, open(clusters_path, 'wb'))
def populate_clusters(self):
# Generate random vector representing corrupted attributes for each agent.
agent_epsilons = []
agent_defs = []
agent_ids = []
for c in range(self._n_clusters):
# Get corruption probabilities for cluster.
corrupt_probs = self._corrupt_attrs[c]
for i in range(self._n_agents):
samples = np.random.uniform(size=self._n_attributes)
corrupt_indeces = np.squeeze(
np.argwhere(corrupt_probs - samples >= 0.0))
def_indeces = np.squeeze(
np.argwhere(corrupt_probs - samples < 0.0))
# Sample epsilon ranges.
epsilons = torch.zeros(self._n_attributes)
epsilons[corrupt_indeces] = self._corr_dist.sample(
corrupt_indeces.shape).squeeze()
epsilons[def_indeces] = self._def_dist.sample(
def_indeces.shape).squeeze()
agent_epsilons.append(epsilons)
# Also keep track of which attributes are corrupted for the agent.
agent_def = torch.zeros(self._n_attributes)
agent_def[corrupt_indeces] = 1.0
agent_defs.append(agent_def)
# Store agent cluster labels.
agent_ids.append(c)
# Consolidate into single matrix.
self._agent_epsilon_mat = torch.cat(
agent_epsilons).view(-1, self._n_attributes)
self._agent_def_mat = torch.cat(
agent_defs).view(-1, self._n_attributes).long()
self._agent_id_mat = torch.LongTensor(agent_ids)
def listen(self, features):
# Sample one listener per data point.
listeners = np.random.choice(
self._agent_epsilon_mat.size(0), size=features.size(0))
cluster_labels = self._agent_id_mat[listeners]
defs = self._agent_def_mat[listeners]
defs = defs.unsqueeze(1).expand_as(features)
epsilons = self._agent_epsilon_mat[listeners]
epsilons = epsilons.unsqueeze(1).expand_as(features)
# Attributes differences smaller than epsilon
# will definitely be random guesses.
random_guess_prob1 = (epsilons > torch.abs(features)).float()
# Random guess probability for default and corrupt attribute cases.
random_guess_prob2 = random_guess_prob1.new_full(defs.size(),
self._def_rand_p)
diff_rand_p = self._corr_rand_p - self._def_rand_p
random_guess_prob2 = random_guess_prob2 + defs.float() * diff_rand_p
# Compute complete set of attributes for which to randomly guess.
random_guess_prob_total = (random_guess_prob1 + random_guess_prob2
- random_guess_prob1 * random_guess_prob2)
# All random guesses flip the rational guess 50% of the time
flip_prob = 0.5 * random_guess_prob_total
return (cluster_labels, flip_prob)
| true |
450006b6a662e91a12deab4247006c204ee612d1 | Python | FlMondon/forecast_SN_GW | /forecast_SN_GW/math_toolbox.py | UTF-8 | 1,093 | 3.03125 | 3 | [] | no_license | """ Math toolbox, that include all the usefull tools."""
import numpy as np
def comp_rms(residuals, dof, err=True, variance=None):
"""
Compute the RMS or WRMS of a given distribution.
:param 1D-array residuals: the residuals of the fit.
:param int dof: the number of degree of freedom of the fit.
:param bool err: return the error on the RMS (WRMS) if set to True.
:param 1D-aray variance: variance of each point. If given,
return the weighted RMS (WRMS).
:return: rms or rms, rms_err
"""
if variance is None: # RMS
rms = float(np.sqrt(np.sum(residuals**2)/dof))
rms_err = float(rms / np.sqrt(2*dof))
else: # Weighted RMS
assert len(residuals) == len(variance)
rms = float(np.sqrt(np.sum((residuals**2)/variance) / np.sum(1./variance)))
#rms_err = float(N.sqrt(1./N.sum(1./variance)))
rms_err = np.sqrt(2.*len(residuals)) / (2*np.sum(1./variance)*rms)
if err:
return rms, rms_err
else:
return rms
| true |
7950b3ce4156848892827c6f6d5fdd35b74334ae | Python | ccena/Learning-Basic-Python | /Chapter 3 Data Manipulation with Pandas/3.11.2 Pandas Time Series Data Structures.py | UTF-8 | 1,577 | 4.09375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
For time stamps, Pandas provides the Timestamp type.
For time periods, Pandas provides the Period type.
For time deltas or durations, Pandas provides the Timedelta type.
Timestamp and DatetimeIndex objects are invoked by using the pd.to_datetime()
function, which can parse a wide variety of formats.
"""
import pandas as pd
import datetime
dates = pd.to_datetime([datetime.datetime(2015, 7, 3), '4th of July, 2015',
'2015-Jul-6', '07-07-2015', '20150708'])
print('Passing a series of dates by default yields a DatetimeIndex')
print(dates)
print('A TimedeltaIndex is created when one date is subtracted from another:')
print(dates - dates[0]); print('')
# Pandas offers a few functions to make the creation of regular date sequences
# more convenient
print('Regular sequences: pd.date_range()')
print('Specifying a start-and endpoint:')
date_range = pd.date_range('2015-07-03', '2015-07-10')
print(date_range) #'D' denotes daily frequency
print('Specifying a startpoint and a number of periods: ')
print(pd.date_range('2015-07-03', periods=10))
print('Hourly Stamps Example: ')
x = pd.date_range('2015-07-03', periods=8, freq='H')
print(x); print('')
# pd.period_range() and pd.timedelta_range() functions create regular sequences
# of period or time delta values
print('Monthly Periods Example:')
y= pd.period_range('2015-07', periods=8, freq='M')
print(y); print('')
print('Sequence of durations increasing by an hour:')
print(pd.timedelta_range(0, periods=10, freq='H')) | true |
0ee321eb98ca24af47d9b844c283e8cc6422cdce | Python | Fernweh-8/dla_Mai | /wishes.py | UTF-8 | 743 | 3.78125 | 4 | [] | no_license | def add_best_wishes(filename, name, wishes_num):
with open(filename, 'a+') as wish_list:
for num in range(wishes_num):
wish = input(f'{name}, czego życzysz Mai?\n')
wish_list.writelines(wish)
wish_list.write("\n")
def get_best_wishes(filename):
with open(filename) as wishes:
return wishes.read()
filename = r'C:/Users/annad/wishes.txt'
name = input('Podaj swoje imię: \n')
wishes_num = int(input('Ile masz życzeń dla Mai? Wpisz cyfrę: \n'))
add_best_wishes(filename, name, wishes_num)
print(f'To lista Twoich życzeń: \n {get_best_wishes(filename)}')
print(f'{name}, dziękuję za te wszystkie życzenia!\n')
print('Pora nominiwać 3 osoby do akcji.')
| true |
dff15da9368c9d4c5c6f32f335ad7160cbd2fe2b | Python | false-git/maildir2thunderbird | /maildir2thunderbird.py | UTF-8 | 1,027 | 2.578125 | 3 | [] | no_license | import sys
import pathlib
import shutil
import imap_tools
# 実行する前にやることを見たいときはTrueにする
DRY_RUN = False
if __name__ == "__main__":
args = sys.argv
if len(args) < 2:
print("Usage: {} maildir".format(args[0]))
exit(1)
maildir = pathlib.Path(args[1]).absolute()
name = maildir.name
subdir = maildir.parent.joinpath("{}.sbd".format(name))
for subfolder in maildir.glob(".??*"):
csd = subdir
for parts in subfolder.name[1:].split("."):
parts = imap_tools.imap_utf7.decode(parts.encode())
sd = csd
csd = sd.joinpath("{}.sbd".format(parts))
if not sd.exists():
if DRY_RUN:
print("mkdir {}".format(sd))
else:
sd.mkdir()
newname = parts
if DRY_RUN:
print("mv {} {}".format(subfolder, sd.joinpath(newname)))
else:
shutil.move(str(subfolder), str(sd.joinpath(newname)))
| true |
a6fbfe544d019b51d64667f8d258d49ed8d7466e | Python | emresevilgen/IE400_Project | /partB.py | UTF-8 | 1,895 | 2.984375 | 3 | [] | no_license | import pulp as pl
import pandas as pd
import numpy as np
def read_data(filepath):
d = pd.read_excel(filepath, 'd', header=None)
p = pd.read_excel(filepath, 'p', header=None)
return d, p
number_of_centers = 4
prob_treshold = 0.6
# Read data
d_i_j = read_data("data.xlsx")[0]
p_i_j = read_data("data.xlsx")[1]
no_of_villages = d_i_j.shape[0]
x_i = np.empty(no_of_villages, dtype=pl.LpVariable)
y_i_j = np.empty((no_of_villages, no_of_villages), dtype=pl.LpVariable)
# Decision variables
for i in range(no_of_villages):
x_i[i] = pl.LpVariable("x_"+str(i+1), cat=pl.LpBinary)
for j in range(no_of_villages):
y_i_j[i][j] = pl.LpVariable(
"y_"+str(i+1)+"_"+str(j+1), cat=pl.LpBinary)
d_max = pl.LpVariable("d_max")
p_max = pl.LpVariable("p_max")
# Problem
prob = pl.LpProblem("partA", pl.LpMinimize)
# Objective function
prob += d_max
# Constraints
prob += p_max == prob_treshold
for i in range(no_of_villages):
prob += pl.lpSum([y_i_j[i][j] for j in range(no_of_villages)]) == 1
for j in range(no_of_villages):
prob += y_i_j[i][j] <= x_i[j]
prob += d_max >= d_i_j[i][j] * y_i_j[i][j]
prob += p_i_j[i][j] * y_i_j[i][j] - p_max != 0
prob += p_i_j[i][j] * y_i_j[i][j] <= p_max
prob += pl.lpSum([x_i[i] for i in range(no_of_villages)]) == number_of_centers
# Solve
status = prob.solve(pl.CPLEX_PY(msg=0))
if (status == 1):
# If there is an optimal solution print the result
center_list = []
for v in prob.variables():
if (v.varValue == 1 and v.name[0] == "x"):
center_list.append(int(v.name[2:]))
center_list.sort()
print("Centers are at the villages with numbers", end='')
for i in center_list:
print(" " + str(i), end='')
print(".\nMinimum longest distance is " +
str(prob.objective.value()) + ".")
else:
print("No optimal solution.")
| true |
b19cfed81fe439dd4c54954672fff0e1df788548 | Python | FritzHeiden/sesu-beleg | /analyse/min_hasher.py | UTF-8 | 1,270 | 2.515625 | 3 | [] | no_license | from data.signature import Signature
import numpy as np
class MinHasher:
#nicht getestet - läuft wahrscheinlich nicht
@staticmethod
def generate_min_hash(shingles, hash_functions):
#np.asarray(shingles)
# min hash dict
#min_hash = []
strep_id = 1
strep_size = 10
biggest_value = 0
signature_params = list
current_signature = Signature
article_id = 0 #??????????
# ToDo min hash berechnen. jede hash funktion über die ids der übergebenen shingles aus der shingle map
for hash in hash_functions:
if hash%strep_size == 0:
strep_id += 1
tmp_value = 0
for shingle in shingles:
tmp_value = hash.calculate(shingles(shingle))
if tmp_value > biggestValue:
biggestValue = tmp_value
signature_params[strep_id][hash.getid()] = biggestValue
current_signature = (article_id, signature_params)
# kleinsten wert unter gleicher id im min hash speichern
# (for hash { for shingle { hash(shingle ids) -> kleinster wert in min_hash[hash id] } }
# hash_function.calculate(value)
return current_signature
| true |
fdb438f24b36af68ec0cd76443e82a17fa1fff7d | Python | irfhanaz/ComputerNetworksLab | /tcpdate-timeclient.py | UTF-8 | 211 | 2.640625 | 3 | [] | no_license | #date time client
import socket
client = socket.socket()
host = socket.gethostname()
port = 6666
client.connect((host, port))
message = client.recv(1024)
print ("Current Time: ", message.decode())
client.close() | true |
bc7a675f8a0d2b03fd79567f878ebb9c00213a3c | Python | halisyilboga1/NeuralStyle-Music | /filter_music.py | UTF-8 | 4,184 | 2.765625 | 3 | [] | no_license | # script to run a frequency analysis on given audio file to determine strength in different passbands
import numpy as np
import matplotlib.pyplot as pyplt
import scipy.io.wavfile as wav
import scipy.signal as signal
def analyze(finname, pps, cutlow, cuthigh, foutname):
Fs, arr = wav.read(finname)
dt = 1.0 / Fs
spp = Fs / pps
numper = arr.shape[0] / spp
#if multiple channels (i.e. stereo), sum them into one
if (len(arr.shape) == 2):
arr = np.sum(arr, axis=1, dtype=np.int64)
#Blow,Alow = signal.butter(2, cutlow / (Fs / 2.0), btype='lowpass', analog=False, output='ba')
#Bhigh,Ahigh = signal.butter(2, cuthigh / (Fs / 2.0), btype='highpass', analog=False, output='ba')
fout = open(foutname, 'w')
fout.write(str(pps)+'\n')
for i in range(numper):
#arrlow = signal.filtfilt(Blow,Alow,arr[i*spp : (i+1)*spp])
#arrhigh = signal.filtfilt(Bhigh,Ahigh,arr[i*spp : (i+1)*spp])
"""
fourier = np.fft.fft(arr[i*spp : (i+1)*spp])
freq = np.fft.fftfreq(spp, dt)
#get strongest frequency components in both bands
fmaxind = 0
fmaxindlow = 0
fmaxindhigh = 0
fmag = np.abs(fourier)
for j in range(fmag.size):
if (fmag[j] > fmag[fmaxind] and freq[j] > 0):
fmaxind = j
if (fmag[j] > fmag[fmaxindlow] and freq[j] > 0 and freq[j] < cutlow):
fmaxindlow = j
if (fmag[j] > fmag[fmaxindhigh] and freq[j] > 0 and freq[j] > cuthigh):
fmaxindhigh = j
fout.write(str(freq[fmaxind]) + ',' + str(fmag[fmaxind]) + ',' +
str(freq[fmaxindlow]) + ',' + str(fmag[fmaxindlow]) + ',' +
str(freq[fmaxindhigh]) + ',' + str(fmag[fmaxindhigh]) + '\n')
"""
#"""
#get estimate of signal strength from periodogram
freq1, pxx_spec = signal.periodogram(arr[i*spp : (i+1)*spp], Fs, 'flattop', scaling='spectrum')
amaxind = 0
amaxindlow = 0
amaxindhigh = 0
for j in range(pxx_spec.size):
if (pxx_spec[j] > pxx_spec[amaxind]):
amaxind = j
if (pxx_spec[j] > pxx_spec[amaxindlow] and freq1[j] < cutlow):
amaxindlow = j
if ((freq1[j] > cuthigh and freq1[amaxindhigh] < cuthigh) or (pxx_spec[j] > pxx_spec[amaxindhigh] and freq1[j] > cuthigh)):
amaxindhigh = j
fout.write(str(freq1[amaxind]) + ',' + str(np.sqrt(pxx_spec[amaxind])) + ',' +
str(freq1[amaxindlow]) + ',' + str(np.sqrt(pxx_spec[amaxindlow])) + ',' +
str(freq1[amaxindhigh]) + ',' + str(np.sqrt(pxx_spec[amaxindhigh])) + '\n')
#"""
"""
print freq1[amaxind]
print np.sqrt(pxx_spec[amaxind])
print freq1[amaxindlow]
print np.sqrt(pxx_spec[amaxindlow])
print freq1[amaxindhigh]
print np.sqrt(pxx_spec[amaxindhigh])
"""
fout.close()
## used for testing different methods of analyzing the frequency/power spectrum
def debugfunc():
#threshold - 3e7 ?
Fs, arr = wav.read('music/2khzsine.wav')
dt = 1.0 / Fs
print Fs
print dt
print arr.shape
fourier = np.fft.fft(arr)
N = arr.size
print N
freq = np.fft.fftfreq(N, dt)
idx = np.argsort(freq)
#pyplt.plot(freq[idx], np.abs(fourier[idx]))
#pyplt.show()
#threshold - 15000?
B,A = signal.butter(2, 2000 / (Fs / 2.0), btype='lowpass', analog=False, output='ba')
arrfilt = signal.filtfilt(B,A,arr)
fig = pyplt.figure()
ax1 = fig.add_subplot(211)
time = np.arange(0, 1.0 * arr.size / Fs, dt)
print time.shape
print arr.shape
print arrfilt.shape
#pyplt.plot(time[0:100], arr[0:100], 'b-')
#pyplt.plot(time[0:100], arrfilt[0:100], 'r-')
#pyplt.show()
#threshold - 11300?
f, pxx_spec = signal.periodogram(arr, Fs, 'flattop', scaling='spectrum')
print arr.max()
print np.sqrt(pxx_spec.max())
#pyplt.figure()
#pyplt.semilogy(f, np.sqrt(pxx_spec))
#pyplt.show()
if __name__ == '__main__':
finname = 'ReadyerrNot'
analyze('music/'+finname+'.wav', 40, 400, 800, 'data/'+finname+'.csv')
| true |
99fc3bd5c4ba3025eadc8797d09e070f9e032598 | Python | Ahtaxam/Python_Programms | /cutting_str.py | UTF-8 | 623 | 4.3125 | 4 | [] | no_license | # Write a program that asks the user to enter a word that contains the letter a. The program
# should then print the following two lines: On the first line should be the part of the string up
# to and including the first a, and on the second line should be the rest of the string. Sample
# output is shown below:
# Enter a word: buffalo
# buffa
# lo
print('Enter a word: ',end=' ')
word = input()
first = ''
second = ''
for letter in word:
first += letter
if letter == 'a':
loc = word.index('a')
break
else:
pass
second += word[loc+1:]
print(first)
print(second)
| true |
21aefe5d4ce6c00a5f005021d68686ab594f998a | Python | loganzartman/termiformer | /demo.py | UTF-8 | 1,276 | 3.5625 | 4 | [] | no_license | import termiformer
def input_name_simple():
data = {}
with termiformer.form(data) as f:
f.label("Name Input Form")
f.text("first_name", label="First name")
f.text("last_name", label="Last name")
return "{} {}".format(data["first_name"], data["last_name"])
# functionally the same as above
def input_name():
layout = termiformer.FormLayout()
layout.label("Name Input Form")
layout.text("first_name", label="First name")
layout.text("last_name", label="Last name")
data = {}
termiformer.present(layout, data)
return "{} {}".format(data["first_name"], data["last_name"])
# functionally the same as above
def input_name_json():
json_data = """
[
{
"type": "label",
"label": "Name Input Form"
},
{
"type": "text",
"name": "first_name",
"label": "First name"
},
{
"type": "text",
"name": "last_name",
"label": "Last name"
}
]
"""
layout = termiformer.FormLayout(json_data=json_data)
data = {}
termiformer.present(layout, data)
return "{} {}".format(data["first_name"], data["last_name"])
if __name__ == "__main__":
print("Hi, ", input_name_json(), "!")
| true |
b3411c5ff47d3bb085d31eab406b160f03913d09 | Python | marcosllessa/teste_git | /projeto v1/simuladorNFe_v1.py | UTF-8 | 3,481 | 2.6875 | 3 | [] | no_license | import xml.etree.ElementTree as ET
from random import randint
import random
import connection
'''GERA CNPJ ALEATÓRIO PARA EMISSOR E DESTINATARIO'''
def cnpj(self, punctuation = False):
n = [random.randrange(10) for i in range(8)] + [0, 0, 0, 1]
v = [2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6]
# calcula dígito 1 e acrescenta ao total
s = sum(x * y for x, y in zip(reversed(n), v))
d1 = 11 - s % 11
if d1 >= 10:
d1 = 0
n.append(d1)
# idem para o dígito 2
s = sum(x * y for x, y in zip(reversed(n), v))
d2 = 11 - s % 11
if d2 >= 10:
d2 = 0
n.append(d2)
if punctuation:
return "%d%d.%d%d%d.%d%d%d/%d%d%d%d-%d%d" % tuple(n)
else:
return "%d%d%d%d%d%d%d%d%d%d%d%d%d%d" % tuple(n)
cnpj_e = ("\n".join([cnpj(True) for i in range(1)]))
cnpj_d = ("\n".join([cnpj(True) for i in range(1)]))
'''BUSCA NO BANCO DE DADOS ALEATORIAMENTE EAN, DESCRICAO E PRECO'''
connection = connection.conexao()
r = str(randint(1,9999))
cursor = connection.cursor()
cursor.execute("SELECT gtin, description,price from gtin_data where id_gtin =" + r + " and price is not null")
try:
for gtin, description,price in cursor:
EAN = gtin
Prod = description
price = price
print('EAN: ', EAN, ' Prod: ', Prod, ' price: ', price)
path = 'C:/Troca/gtin/python/nfe/'
tree = ET.parse(path + 'schema.xml')
root = tree.getroot()
'''GERA A NOTA FISCAL'''
for ide in root.findall(".//ide"):
for nNF in ide.findall("./nNF"):
nNF.text = str(randint(1,9999))
print('NF :', nNF.text)
for emit in root.findall(".//emit"):
for CNPJ in emit.findall("./CNPJ"):
CNPJ.text = str(cnpj_e)
print('CNPJ_EMIT :', CNPJ.text)
for dest in root.findall(".//dest"):
for CNPJ in dest.findall("./CNPJ"):
CNPJ.text = str(cnpj_d)
print('CNPJ_DEST :', CNPJ.text)
for det in root.iter('det'):
for prod in root.iter('prod'):
for cprod in root.iter('cProd'):
cprod.text = str(randint(1,9999))
print('Codigo :', cprod.text)
for ean in root.iter('cEAN'):
ean.text = str(EAN)
print('EAN :', ean.text)
for xProd in root.iter('xProd'):
xProd.text = str(Prod)
print('Descrição :', xProd.text)
for NCM in root.iter('NCM'):
print('NCM :', NCM.text)
for vUnCom in root.iter('vUnCom'):
vUn = price
vUn = vUn.replace(',','.')
vUn = vUn.replace('R$', '')
vUn = vUn.strip()
vUnCom.text = str(vUn)
print('valorUn :', vUnCom.text)
for qCom in root.iter('qCom'):
qCom.text = str(randint(1,29))
qtd = int(qCom.text)
print('Qtd :', qCom.text)
for vProd in root.iter('vProd'):
total = (float(vUnCom.text)*qtd)
vProd.text = str(round(total,2))
print('Total :',vProd.text)
for eant in root.iter('cEANTrib'):
eant.text = str(EAN)
print('EANTrib :', eant.text)
for qTrib in root.iter('qTrib'):
qTrib.text = str(qtd)
print('qTrib :',qTrib.text)
tree.write(path + cnpj_e+'.xml')
except:
print('Produto sem preço')
| true |
dbf81b9aef35e58f262b02006d7308e316d4cc28 | Python | abhilash1998/BFS_exploration_and_path_planning | /exploration_r.py | UTF-8 | 25,119 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 12:56:40 2021
@author: abhi
"""
import numpy as np
import cv2
class exploration_r:
def __init__(self,start,goal):
"""
Intializes variables for the class. Stores the goal state and
start state
Parameters
----------
start : list
Starting point coordinates
goal : list
Goal point coordinates
"""
self.ground_truth={}
self.obstacle=[]
self.expanded=[]
self.parent=[]
self.parent_orignal_data={}
self.start=start
self.frontier=[self.start]
self.frontier_string=[]
self.cost=0
self.goal=goal
self.data_with_string={}
self.current_score="00"
def obstacle_prone_area(self,image):
"""
Checks if the goal state or start state is in the obstacle area
Parameters:
-----------
image : np.array
Inputs image for adding obstacle
Returns
-------
Boolean : Boolean
Returns True if wither of goal or start is in obstacle space
else returns False
"""
start_x=self.start[0]
start_y=self.start[1]
goal_x=self.goal[0]
goal_y=self.goal[1]
if (np.array_equiv(image[299-goal_x,goal_y,:],np.array([0,0,0]))) or (np.array_equiv(image[299-start_x,start_y,:],np.array([0,0,0]))):
#print(1)
return False
else:
#print(2)
return True
def obstacles_form(self,image):
"""
Create all obstacles in the images by calling various obstacle functions
Parameters
----------
image : np.array
InputsImage for adding obstacle
"""
major_axis=60
minor_axis=30
c_y=246
c_x=145
c_y1=90
c_x1=70
radius=35
for i in range(len(image)):
for j in range(len(image[0])):
self.ellipse(image,major_axis,minor_axis,i,j,c_x,c_y)
self.circle(image,radius,i,j,c_x1,c_y1)
self.slanted_rect(image,i,j)
self.polygon(image,i,j)
self.c_shape(image,i,j)
#exploration.c_shape(image,i,j)
def goal_reached(self):
"""
Checks if the goal is reached or not if reached return True
and if not reached continues exploring
Parameters
----------
Returns
-------
Boolean : bool
True or False depending on the current state reached the goal or not
"""
pos_0=self.goal[0]
pos_1=self.goal[1]
self.start_score=self.string(self.start[0],self.start[1])
self.data_with_string[self.start_score]=self.start
self.goal_score=self.string(pos_0,pos_1)
if int(self.current_score) ==int(self.goal_score) :
print("goal_reached")
#print(len(self.expanded))
#print("self.expanded",self.expanded)
return True
return False
def string(self,pos_0,pos_1):
"""
Converts the list of the state into string for easy comparison
when further converted into integer
Parameters
----------
pos_0 : Int
x-coordinate of current state
pos_0 : Int
y-coordinate of current state
Returns
-------
c : str
String of the state
"""
if pos_0 <10:
pos_0="00"+str(pos_0)
elif pos_0<100:
pos_0="0"+str(pos_0)
if pos_1 <10:
pos_1="00"+str(pos_1)
elif pos_1<100:
pos_1="0"+str(pos_1)
#pos
c=""
c=str(pos_0)+str(pos_1)
#print("c",c)
return c
def left_move(self,image,pos_0,pos_1,cost):
"""
This function makes a move in the left direction returns or update the
resultant node and checks if the move node is in the visited list
or unvisited list
Parameters
----------
image: np.array
It is a image of the states from where in the exploration happens
pos_0 : Int
x_coordinate of the current node
pos_1 : Int
y_coordinate of the current node
cost : Int
It is cost for each step(Introduce to implement Djisktras in future)
Returns
-------
image: np.array
It is a image of the states after left move
pos_0 : Int
x_coordinate of the node after left move
pos_1 : Int
y_coordinate of the node after left move
"""
if pos_1>0:
#solve_t=solve_a
#solve_t=deepcopy(list(solve_a))
#temp1=deepcopy(list(solve_a))
#temp=[pos_0,pos_1]
parent=self.string(pos_0,pos_1)
#temp.clear()
#parent=score
pos_1=pos_1-1
score=self.string(pos_0,pos_1)
if np.array_equiv(image[299-pos_0,pos_1,:],np.array([0,0,0])) or np.array_equiv(image[299-pos_0,pos_1,:],np.array([200,200,0])):
return image,pos_0,pos_1
else:
self.parent_orignal_data[score]=parent
self.data_with_string[score]=[pos_0,pos_1]
image[299-pos_0,pos_1,:]=200,200,0
self.frontier.append([pos_0,pos_1])
#self.frontier_string.append(int(score))
image=image.astype(np.uint8)
return image,pos_0,pos_1
def right_move(self,image,pos_0,pos_1,cost):
"""
This function makes a move in the right direction returns or update the
resultant node and checks if the move node is in the visited list
or unvisited list
Parameters
----------
image: np.array
It is a image of the states from where in the exploration happens
pos_0 : Int
x_coordinate of the current node
pos_1 : Int
y_coordinate of the current node
cost : Int
It is cost for each step(Introduce to implement Djisktras in future)
Returns
-------
image: np.array
It is a image of the states after right move
pos_0 : Int
x_coordinate of the node after right move
pos_1 : Int
y_coordinate of the node after right move
"""
if pos_1<len(image[1])-1:
parent=self.string(pos_0,pos_1)
#parent=score
pos_1=pos_1+1
score=self.string(pos_0,pos_1)
if np.array_equiv(image[299-pos_0,pos_1,:],np.array([0,0,0])) or np.array_equiv(image[299-pos_0,pos_1,:],np.array([200,200,0])):
return image,pos_0,pos_1
else:
self.parent_orignal_data[score]=parent
self.data_with_string[score]=[pos_0,pos_1]
image[299-pos_0,pos_1,:]=200,200,0
self.frontier.append([pos_0,pos_1])
#self.frontier_string.append(int(score))
image=image.astype(np.uint8)
return image,pos_0,pos_1
def down_move(self,image,pos_0,pos_1,cost):
"""
This function makes a move in the down direction returns or update the
resultant node and checks if the move node is in the visited list
or unvisited list
Parameters
----------
image: np.array
It is a image of the states from where in the exploration happens
pos_0 : Int
x_coordinate of the current node
pos_1 : Int
y_coordinate of the current node
cost : Int
It is cost for each step(Introduce to implement Djisktras in future)
Returns
-------
image: np.array
It is a image of the states after down move
pos_0 : Int
x_coordinate of the node after down move
pos_1 : Int
y_coordinate of the node after down move
"""
if pos_0<len(image)-1:
parent=self.string(pos_0,pos_1)
pos_0=pos_0+1
score=self.string(pos_0,pos_1)
if np.array_equiv(image[299-pos_0,pos_1,:],np.array([0,0,0])) or np.array_equiv(image[299-pos_0,pos_1,:],np.array([200,200,0])):
return image,pos_0,pos_1
else:
self.parent_orignal_data[score]=parent
self.data_with_string[score]=[pos_0,pos_1]
image[299-pos_0,pos_1,:]=200,200,0
self.frontier.append([pos_0,pos_1])
#self.frontier_string.append(int(score))
image=image.astype(np.uint8)
return image,pos_0,pos_1
def up_move(self,image,pos_0,pos_1,cost):
"""
This function makes a move in the up direction returns or update the
resultant node and checks if the move node is in the visited list
or unvisited list
Parameters
----------
image: np.array
It is a image of the states from where in the exploration happens
pos_0 : Int
x_coordinate of the current node
pos_1 : Int
y_coordinate of the current node
cost : Int
It is cost for each step(Introduce to implement Djisktras in future)
Returns
-------
image: np.array
It is a image of the states after up move
pos_0 : Int
x_coordinate of the node after up move
pos_1 : Int
y_coordinate of the node after up move
"""
if pos_0>0:
temp=[pos_0,pos_1]
parent=self.string(pos_0,pos_1)
pos_0=pos_0-1
score=self.string(pos_0,pos_1)
if np.array_equiv(image[299-pos_0,pos_1,:],np.array([0,0,0])) or np.array_equiv(image[299-pos_0,pos_1,:],np.array([200,200,0])):
return image,pos_0,pos_1
else:
self.parent_orignal_data[score]=parent
self.data_with_string[score]=[pos_0,pos_1]
image[299-pos_0,pos_1,:]=200,200,0
self.frontier.append([pos_0,pos_1])
#self.frontier_string.append(int(score))
image=image.astype(np.uint8)
return image,pos_0,pos_1
def down_right_move(self,image,pos_0,pos_1,cost):
"""
This function makes a move in the down right direction returns or update the
resultant node and checks if the move node is in the visited list
or unvisited list
Parameters
----------
image: np.array
It is a image of the states from where in the exploration happens
pos_0 : Int
x_coordinate of the current node
pos_1 : Int
y_coordinate of the current node
cost : Int
It is cost for each step(Introduce to implement Djisktras in future)
Returns
-------
image: np.array
It is a image of the states after down right move
pos_0 : Int
x_coordinate of the node after down right move
pos_1 : Int
y_coordinate of the node after down right move
"""
if pos_0<len(image)-1 and pos_1<len(image[0])-1:
parent=self.string(pos_0,pos_1)
pos_0=pos_0+1
pos_1=pos_1+1
score=self.string(pos_0,pos_1)
if np.array_equiv(image[299-pos_0,pos_1,:],np.array([0,0,0])) or np.array_equiv(image[299-pos_0,pos_1,:],np.array([200,200,0])):
return image,pos_0,pos_1
else:
self.parent_orignal_data[score]=parent
self.data_with_string[score]=[pos_0,pos_1]
image[299-pos_0,pos_1,:]=200,200,0
self.frontier.append([pos_0,pos_1])
#self.frontier_string.append(int(score))
image=image.astype(np.uint8)
return image,pos_0,pos_1
def down_left_move(self,image,pos_0,pos_1,cost):
"""
This function makes a move in the down left direction returns or update the
resultant node and checks if the move node is in the visited list
or unvisited list
Parameters
----------
image: np.array
It is a image of the states from where in the exploration happens
pos_0 : Int
x_coordinate of the current node
pos_1 : Int
y_coordinate of the current node
cost : Int
It is cost for each step(Introduce to implement Djisktras in future)
Returns
-------
image: np.array
It is a image of the states after down left move
pos_0 : Int
x_coordinate of the node after down left move
pos_1 : Int
y_coordinate of the node after down left move
"""
if pos_0<(len(image)-1) and pos_1>0:
parent=self.string(pos_0,pos_1)
pos_0=pos_0+1
pos_1=pos_1-1
score=self.string(pos_0,pos_1)
if np.array_equiv(image[299-pos_0,pos_1,:],np.array([0,0,0]))or np.array_equiv(image[299-pos_0,pos_1,:],np.array([200,200,0])):
return image,pos_0,pos_1
else:
self.parent_orignal_data[score]=parent
self.data_with_string[score]=[pos_0,pos_1]
image[299-pos_0,pos_1,:]=200,200,0
self.frontier.append([pos_0,pos_1])
#self.frontier_string.append(int(score))
image=image.astype(np.uint8)
return image,pos_0,pos_1
def up_left_move(self,image,pos_0,pos_1,cost):
"""
This function makes a move in the up left direction returns or update the
resultant node and checks if the move node is in the visited list
or unvisited list
Parameters
----------
image: np.array
It is a image of the states from where in the exploration happens
pos_0 : Int
x_coordinate of the current node
pos_1 : Int
y_coordinate of the current node
cost : Int
It is cost for each step(Introduce to implement Djisktras in future)
Returns
-------
image: np.array
It is a image of the states after up left move
pos_0 : Int
x_coordinate of the node after up left move
pos_1 : Int
y_coordinate of the node after up left move
"""
if pos_0>0 and pos_1>0:
parent=self.string(pos_0,pos_1)
# parent=score
pos_0=pos_0-1
pos_1=pos_1-1
score=self.string(pos_0,pos_1)
if np.array_equiv(image[299-pos_0,pos_1,:],np.array([0,0,0])) or np.array_equiv(image[299-pos_0,pos_1,:],np.array([200,200,0])) :
return image,pos_0,pos_1
else:
self.parent_orignal_data[score]=parent
self.data_with_string[score]=[pos_0,pos_1]
image[299-pos_0,pos_1,:]=200,200,0
self.frontier.append([pos_0,pos_1])
#self.frontier_string.append(int(score))
image=image.astype(np.uint8)
return image,pos_0,pos_1
def up_right_move(self,image,pos_0,pos_1,cost):
"""
This function makes a move in the up right direction returns or update the
resultant node and checks if the move node is in the visited list
or unvisited list
Parameters
----------
image: np.array
It is a image of the states from where in the exploration happens
pos_0 : Int
x_coordinate of the current node
pos_1 : Int
y_coordinate of the current node
cost : Int
It is cost for each step(Introduce to implement Djisktras in future)
Returns
-------
image: np.array
It is a image of the states after up right move
pos_0 : Int
x_coordinate of the node after up right move
pos_1 : Int
y_coordinate of the node after up right move
"""
if pos_0>0 and pos_1<len(image[1])-1:
parent=self.string(pos_0,pos_1)
pos_0=pos_0-1
pos_1=pos_1+1
score=self.string(pos_0,pos_1)
if np.array_equiv(image[299-pos_0,pos_1,:],np.array([0,0,0]))or np.array_equiv(image[299-pos_0,pos_1,:],np.array([200,200,0])):
return image,pos_0,pos_1
else:
self.parent_orignal_data[score]=parent
self.data_with_string[score]=[pos_0,pos_1]
image[299-pos_0,pos_1,:]=200,200,0
self.frontier.append([pos_0,pos_1])
#self.frontier_string.append(int(score))
image=image.astype(np.uint8)
return image,pos_0,pos_1
def expanding(self,pos_0,pos_1):
"""
This function checks if the node is in expanded /visited list and
if it not then appends in the expanded list
Parameters
----------
pos_0 : Int
x_coordinate of the current node
pos_1 : Int
y_coordinate of the current node
Returns
-------
"""
cnvt_front=self.string(pos_0,pos_1)
if int(cnvt_front) in self.expanded:
a=1
else:
self.expanded.append(int(cnvt_front))
def frontier_list(self):
"""
This function checks if the node is in expanded/visited list and
pops out untill it finds a node that has not been visited/expanded.
Parameters
----------
Returns
-------
pos_0 : Int
x_coordinate of the current node
pos_1 : Int
y_coordinate of the current node
"""
pos_0,pos_1=self.frontier.pop(0)
self.current_score=self.string(pos_0,pos_1)
if int(self.current_score) in self.expanded:
self.frontier_list()
elif int(self.current_score) in self.obstacle:
self.frontier_list()
#print("frontierlist",self.frontier)
#print("expanded",self.expanded)
return pos_0,pos_1
def circle(self,image,radius,i,j,c_x,c_y):
"""
This function give points that lies in circle.
Parameters
----------
image : np.array
This is the image or state in which the location of obstacle is updated
radius : Int
Radius of the circle
i : Int
x_coordinate of point
j : Int
y coordinate of points
c_x:Int
x coordinate of center of the circle
c_y:Int
y coordinate of center of the circle
Returns
-------
"""
major_axis=radius
minor_axis=radius
self.ellipse(image,major_axis,minor_axis,i,j,c_x,c_y)
def ellipse(self,image,major_axis,minor_axis,i,j,c_x,c_y):
"""
This function give points that lies in ellipse.
Parameters
----------
image : np.array
This is the image or state in which the location of obstacle is updated
radius : Int
Radius of the circle
major_axis: Int
elongation of elipse across y axis
minor_axis: Int
elogation of ellipse across x axis
i : Int
x_coordinate of point
j : Int
y coordinate of points
c_x:Int
x coordinate of center of the circle
c_y: Int
y coordinate of center of the circle
Returns
-------
"""
if (((i-c_x)/minor_axis)**2 + ((j-c_y)/major_axis)**2)<=1:
#print("yes")
image[299-i,j,:]=0,0,0
#self.obstacle.append([i,j])
self.obstacle.append(int(self.string(i,j)))
def slanted_rect(self,image,i,j):
"""
This function give points that lies in slanted rectangle.
Parameters
----------
image : np.array
This is the image or state in which the location of obstacle is updated
i : Int
x_coordinate of point
j : Int
y coordinate of points
Returns
-------
"""
if (-0.7*j+1*i)>=73.4 and (i+1.42814*j)>=172.55 and (-0.7*j+1*i)<=99.81 and (i+1.42814*j)<=429.07:
image[299-i,j,:]=0,0,0
self.obstacle.append(int(self.string(i,j)))
def c_shape(self,image,i,j):
"""
This function give points that lies in c shape.
Parameters
----------
image : np.array
This is the image or state in which the location of obstacle is updated
i : Int
x_coordinate of point
j : Int
y coordinate of points
Returns
-------
"""
if (j>=200 and j<=210 and i<=280 and i >=230) or (j>=200 and j<=230 and i<=280 and i>=270) or (j>=200 and j<=230 and i<=240 and i>=230):
image[299-i,j,:]=0,0,0
self.obstacle.append(int(self.string(i,j)))
def polygon(self,image,i,j):
"""
This function give points that lies in complex polygon.
Parameters
----------
image : np.array
This is the image or state in which the location of obstacle is updated
i : Int
x_coordinate of point
j : Int
y coordinate of points
Returns
-------
"""
if(i+j>=391 and j-i<=265 and i+0.81*j<=425.66 and i+0.17*j<=200 and 0.89*j -i >=148.75) or (13.5*j+i<=5256.72 and 1.43*j-i>=368.82 and i+0.81*j >=425.66):
image[299-i,j,:]=0,0,0
#print(2)
self.obstacle.append(int(self.string(i,j)))
def backtracking(self,image,out,image_list):
"""
The function backtracks from goal to start node
and gives an path
Parameters
----------
image : np.array
This is the image or state in which the location of obstacle is updated
out : np.array
video writing array
image_list:
list of frames that have been saved while exploring
Returns
-------
path : list
Returns the path that needs to be followed
image_list : List
Returns list of images/ frames used in exploration and backtracking
"""
loop=self.parent_orignal_data[self.goal_score]
path=[self.goal]
while int(self.start_score)!=int(loop):
# pass
#print("loop",loop)
loop=self.parent_orignal_data[loop]
index=self.data_with_string[loop]
#print(index)
path.append(index)
image[299-index[0],index[1],:]=255,255,255
image_list.append(image)
return path,image_list
| true |
80df912f509275d698356e0cfe30afc9dc1bba0e | Python | jswinther/dtu-beng-computer-engineering-projects | /intro-to-machine-learning-projects/Machine_Learning_EksamenScripts-master/ImpurityGain.py | UTF-8 | 2,043 | 3 | 3 | [] | no_license | from massImport import *
# Number of observations belonging to each class.
n = [263, 359, 358]
######################################
# Change this matrix.
InputMatrix = np.array([[143.0, 223.0],
[137.0, 251.0],
[54.0, 197.0]])
######################################
def Impurity(Matrice, obs):
# We look at the split x4 <= 0.43 and x4 <= 0.55
R1 = np.zeros(shape=Matrice.shape)
R1[:, 0] = Matrice[:, 0]
R1[:, 1] = obs[:] - Matrice[:, 0]
Nr1 = np.sum(R1)
Nvk1 = np.sum(R1, axis=0)
Ir1 = 1 - (np.max(obs)) / Nr1
Ivk1 = 1 - (np.max(R1, axis=0)) / Nvk1
Gain1 = Ir1 - np.sum((Nvk1[:] / Nr1) * Ivk1[:])
print(Gain1)
R2 = np.zeros(shape=Matrice.shape)
R2[:, 0] = Matrice[:, 1]
R2[:, 1] = obs[:] - Matrice[:, 1]
Nr2 = np.sum(R2)
Nvk2 = np.sum(R2, axis=0)
Ir2 = 1 - (np.max(obs)) / Nr2
Ivk2 = 1 - (np.max(R2, axis=0)) / Nvk2
Gain2 = Ir2 - np.sum((Nvk2[:] / Nr2) * Ivk2[:])
return np.asarray([Gain1, Gain2])
def Accuracy(Matrice, obs):
# We look at the split x4 <= 0.43 and x4 <= 0.55
R1 = np.zeros(shape=Matrice.shape)
R1[:, 0] = Matrice[:, 0]
R1[:, 1] = obs[:] - Matrice[:, 0]
Nr1 = np.sum(R1)
Nvk1 = np.sum(R1, axis=0)
Ir1 = 1 - (np.max(obs)) / Nr1
Ivk1 = 1 - (np.max(R1, axis=0)) / Nvk1
Gain1 = np.sum(np.max(R1, axis=0)) / Nr1
R2 = np.zeros(shape=Matrice.shape)
R2[:, 0] = Matrice[:, 1]
R2[:, 1] = obs[:] - Matrice[:, 1]
Nr2 = np.sum(R2)
Nvk2 = np.sum(R2, axis=0)
Ir2 = 1 - (np.max(obs)) / Nr2
Ivk2 = 1 - (np.max(R2, axis=0)) / Nvk2
Gain2 = np.sum(np.max(R2, axis=0)) / Nr1
return np.asarray([Gain1, Gain2])
print("Impurity Gain for the first and second split", Impurity(InputMatrix, n))
print("Accuracy for the first and second split", Accuracy(InputMatrix, n))
# Spring 2018 Question 11
X = np.asarray([[23, 9],
[8, 16]])
nx = [32, 24]
print(X)
print("Impurity Gain for the first and second split", Impurity(X, nx))
| true |
b6586d40394d8372df4699c6808107f44ee64bc5 | Python | lincolnoliveira/wttd | /eventex/core/tests/test_view_home.py | UTF-8 | 546 | 2.578125 | 3 | [] | no_license | from django.test import TestCase
class HomeTest(TestCase):
def setUp(self):
self.resp = self.client.get("/")
def test_get_code(self):
""" GET / should return status code 200"""
self.assertEqual(200, self.resp.status_code)
def test_template(self):
""" Should use index.html"""
self.assertTemplateUsed(self.resp, "index.html")
def test_link_inscricao(self):
"""Verifica existência de link para página de inscricao"""
self.assertContains(self.resp, 'href="/inscricao/"')
| true |
57a36e4f7d82eb084488259d5d0ab25d444b67fb | Python | edmontdants/ImageProcessing | /Tensorflow/.ipynb_checkpoints/image_augmentation-checkpoint.py | UTF-8 | 788 | 2.828125 | 3 | [] | no_license | def image_augmentation(rescale_value,rotation_range,width_shift_range, height_shift_range, shear_range, zoom_range, horizontal_flip, fill_mode):
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# rotation_range= 0-180 degree
# height_shift_range = moving the subject inframe of the picture 0-100%
# shear_range = offset vertically or horizontally 0-100%
# fillmode = fills any pixels that had lost 'nearest', other
train_datagen = ImageDataGenerator(rescale=1/rescale_value, rotation_range= rotation_range,
width_shift_range= width_shift_range, height_shift_range= height_shift_range, shear_range= shear_range,
zoom_range= zoom_range, horizontal_flip= horizontal_flip, fill_mode= fill_mode)
return train_datagen | true |
2b95a82cecb1bd58fe1bba2ec3f0bd503934e5b5 | Python | daniel-reich/ubiquitous-fiesta | /Mv5qSgZKTLrLt9zzW_5.py | UTF-8 | 188 | 2.671875 | 3 | [] | no_license |
def get_drink_ID(flavor, ml):
a = ''.join((ch if ch in '0123456789 -e' else '') for ch in ml)
words = ''.join([x[0:3] for x in flavor.split()])
return str(words).upper() + a
| true |
ee23eb7cbe982e0bd6796f0cf53762f926ed6134 | Python | SonOfLilit/gameboyoflife | /src/rle.py | UTF-8 | 4,099 | 2.75 | 3 | [] | no_license | #Written by Sean McKiernan (Mekire)
#
# Minor changes 2013 Aur Saraf and friends.
import numpy
import re
LEVEL_DATA_RE = re.compile(r"# Player: (\d+,\d+) Door: (\d+,\d+)$")
#reads pattern files in the "Life 1.05", "Life 1.06", and "Run-Length-Encoded" format
def load_rle(path):
def get_info(line,info):
if len(line) < 80:
info.append(line[3:-1])
else:
splitat = line.rfind(" ",3,80)
if splitat != -1:
info.append(line[3:splitat])
info.append(line[1+splitat:-1])
else:
info.append(line[3:80])
info.append(line[80:-1])
#parses 'Run-Length-Encoded' pattern files
block_start = (0,0)
row = 0
col = 0
colchar = ""
colint = 0
done = False
info = []
player_position = None
door_position = None
with open(path, "r") as lif:
structure = set()
for line in lif:
match = LEVEL_DATA_RE.match(line)
if match:
player_position, door_position = match.groups()
player_position = map(int, player_position.split(","))
door_position = map(int, door_position.split(","))
if line[:2] in ("#P","#R"):
nums = line[3:].split(" ")
block_start = (int(nums[0]),int(nums[1]))
row = 0
elif line[:2] in ("#D","#N","#C"):
get_info(line,info)
elif line[0] == "x":
splitat = line.rfind("rule")
if splitat != -1:
info.append("Bounding box: " + line[:splitat-2])
info.append("Rule: " + line[splitat+6:-1])
else:
info.append("Bounding box: " + line[:-1])
elif line[0] != '#' and ("$" in line or "!" in line):
for char in line:
if "0" <= char <= "9":
colchar += char
elif char == "b":
if colchar:
col += int(colchar)
else:
col += 1
colchar = ""
elif char == "o":
if colchar:
for i in range(int(colchar)):
structure |= set(((block_start[0]+col,block_start[1]+row),))
col += 1
else:
structure |= set(((block_start[0]+col,block_start[1]+row),))
col += 1
colchar = ""
elif char == "$":
if colchar:
row += int(colchar)
else:
row += 1
colchar = ""
col = 0
elif char == "!":
done = True
if done:
break
if player_position is None or door_position is None:
assert False, "Level should contain level informaion: player and door positions"
# return(structure,info)
return structure, player_position, door_position
BOARD_LENGTH = 1000
def load(path):
"""
returns (board, player_position, door_position)
"""
pattern, player_position, door_position = load_rle(path)
x_list, y_list = zip(*pattern)
size = numpy.array([max(x_list) + 1, max(y_list) + 1])
# we always load a BOARD_LENGTH*BOARD_LENGTH (excluding the empty edges) board
assert max(size) <= BOARD_LENGTH
board = numpy.zeros((BOARD_LENGTH + 2, BOARD_LENGTH + 2), dtype=numpy.int)
padding = (BOARD_LENGTH - size) / 2 + 1
for x, y in pattern:
board[x + padding[0], y + padding[1]] = 1
player_position = list(padding + player_position)
door_position = list(padding + door_position)
return board, player_position, door_position
| true |
faf593b98e8f4d75bcccf3c0a238dcd05a14eb4a | Python | PythPeri2017/PyProgFall2017 | /pitonozavr/l07/add_ops.py | UTF-8 | 1,414 | 3.75 | 4 | [] | no_license | # group = input("Введите состав подразделения, солдат: ")
# print(group)
# group_list = group.split(", ") #split() - разбивал строку
# print(group_list)
# final_group = " <3 ".join(group_list) #join() соединяет в строку
# print(final_group)
# numbers = [3, 11, 5, 30]
# print(len(numbers))
# print(max(numbers))
# print(min(numbers))
# numbers.sort()
# print(numbers)
# guess = int(input("Отгадай число, мальчонка: "))
# print(guess in numbers)
# if guess in numbers:
# print("А ю лаки мэн?!")
# else:
# print("Пур бастард!")
# print(range(0, 19))
# print(guess in range(0, 19))
# actress = {"Скарлет Йоханнсеннн", "Галя Гадот", "Маргарита"}
# for person in actress:
# if person == "Галя Гадот":
# print("не так себе " + person)
# else:
# print("так себе " + person)
# for number in range(0, 100, 3):
# print(number)
# phrases = {
# "Ленин": "Учиться и т.д.",
# "Стетхем": "Ин вино веритас",
# "Дядя Бен": "Чем больше, тем больше",
# "Фортран бой": "Нормально делай - нормально будет"
# }
# for phr in phrases.items():
# print(phr)
dishes = ["Цезарь", "Тирамису", "Овсянка"]
prices = [100, 50, 1000]
for dish, price in zip(dishes, prices):
print(dish, " по цене ", price) | true |
7f53ae9842499f77f45b12bd98aef410fa661d7f | Python | JustinSelig/pen_to_paper | /simulator.py | UTF-8 | 4,550 | 3.0625 | 3 | [
"MIT"
] | permissive | import math
import msgpackrpc
import pygame
import random
import sys
import socket
import time
import threading
from math import cos, sin, pi, sqrt, atan2
from pygame.locals import *
screen_size_x = 1920
screen_size_y = 1080
DIST_PER_TICK = 1
d2r = pi/180
# https://gist.github.com/xaedes/974535e71009fa8f090e
class Geometry(object):
@staticmethod
def circle_intersection(circle1, circle2):
'''
@summary: calculates intersection points of two circles
@param circle1: tuple(x,y,radius)
@param circle2: tuple(x,y,radius)
@result: tuple of intersection points (which are (x,y) tuple)
'''
x1,y1,r1 = circle1
x2,y2,r2 = circle2
# http://stackoverflow.com/a/3349134/798588
dx,dy = x2-x1,y2-y1
d = sqrt(dx*dx+dy*dy)
if d > r1+r2:
return None # no solutions, the circles are separate
if d < abs(r1-r2):
return None # no solutions because one circle is contained within the other
if d == 0 and r1 == r2:
return None # circles are coincident and there are an infinite number of solutions
a = (r1*r1-r2*r2+d*d)/(2*d)
h = sqrt(r1*r1-a*a)
xm = x1 + a*dx/d
ym = y1 + a*dy/d
xs1 = xm + h*dy/d
xs2 = xm - h*dy/d
ys1 = ym - h*dx/d
ys2 = ym + h*dx/d
return (xs1,ys1),(xs2,ys2)
class Simulator:
CIRCLE_COLOR = (220, 220, 220)
LINE_COLOR = (0, 0, 0)
def __init__(self, screen):
self.starting_radius = screen_size_x/2 + 20
self.left = self.starting_radius
self.right = self.starting_radius
print self.starting_radius
self.screen = screen
self.dots = set()
self.is_pen_down = False
def calc_intersect(self):
intersect = Geometry.circle_intersection((0, 0, self.left), (screen_size_x, 0, self.right))
if intersect:
if len(intersect) > 1:
return sorted(intersect, key=lambda tup: tup[1])[1]
else:
return intersect
else:
return None
def pen_down(self):
self.is_pen_down = True
def pen_up(self):
self.is_pen_down = False
def tick_right(self, direction):
self.right += DIST_PER_TICK * direction
def tick_left(self, direction):
self.left += DIST_PER_TICK * direction
def tick(self, left, right):
self.left += DIST_PER_TICK * left
self.right += DIST_PER_TICK * right
def reset(self):
self.left = self.starting_radius
self.right = self.starting_radius
self.dots = set()
def update(self):
pygame.draw.circle(self.screen, Simulator.CIRCLE_COLOR, (0, 0), self.left, 1)
pygame.draw.circle(self.screen, Simulator.CIRCLE_COLOR, (screen_size_x, 0), self.right, 1)
intersect = self.calc_intersect()
if intersect:
pygame.draw.aaline(self.screen, Simulator.LINE_COLOR, (0,0), intersect, 10)
pygame.draw.aaline(self.screen, Simulator.LINE_COLOR, (screen_size_x,0), intersect, 10)
if self.is_pen_down:
(x, y) = intersect
self.dots.add((int(x), int(y)))
for dot in self.dots:
pygame.draw.circle(self.screen, (255,0,0), dot, 1, 1)
class SimulatorInterface(object):
def __init__(self, simulator):
self.simulator = simulator
def pen_down(self):
self.simulator.pen_down()
def pen_up(self):
self.simulator.pen_up()
def tick_right(self, direction):
self.simulator.tick_right(direction)
def tick_left(self, direction):
self.simulator.tick_left(direction)
def tick(self, left, right):
self.simulator.tick(left, right)
def reset(self):
self.simulator.reset()
def receiver(simulator):
server = msgpackrpc.Server(SimulatorInterface(simulator))
server.listen(msgpackrpc.Address("localhost", 18800))
server.start()
def main():
pygame.init()
screen = pygame.display.set_mode((screen_size_x, screen_size_y), 0, 32)
simulator = Simulator(screen)
t = threading.Thread(target=receiver, args=(simulator,))
t.start()
while True:
screen.fill((255, 255, 255))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
simulator.update()
pygame.display.update()
time.sleep(0.001)
if __name__ == '__main__':
main()
| true |
6f34b469dea71d8ef89f75fac2d8fd9f95c99bfe | Python | Aasthaengg/IBMdataset | /Python_codes/p03208/s701304856.py | UTF-8 | 225 | 2.703125 | 3 | [] | no_license | N, K = list(map(int, input().split()))
h = [0]
for _ in range(N):
h += [int(input())]
h = sorted(h)
min_value = h[-1]
for i in range(K, N + 1):
min_value = min(h[i] - h[i - K + 1], min_value)
print(min_value) | true |
924d898c9a96af1965d7f2d5c2e9062852cef9c9 | Python | mgeshelley/group_10 | /project_code/old_files/input_func.py | UTF-8 | 11,170 | 3.59375 | 4 | [] | no_license | # This file contains a function to read in the input from the command line
import numpy as np
def give_file_names(g, extension=''):
folder_name = 'table_files/'
sp_basis_filename = folder_name+'%s3s.sp' %extension
SD_filename = folder_name+"%s3s_slater_det.sd" %extension
tbme_filename = folder_name+"%spairing_g%s.int" %(extension, g)
return sp_basis_filename, SD_filename, tbme_filename
'''
def manual_input(model='pairing'):
"""
"Standard" input parameters (when to lazy to give the input on the command line)
Input (None)
Returns
nmin: int,
minumum n
nmax: int,
maximum n
lmin: int,
minumum l
lmax: int,
maximum l
jmin: int,
minumum j
jmax: int,
maximum j
isos: string,
the species of the nucleons for the simulation
g: float,
the pairing constant
N_particles: int,
the number of particles in the simulation
"""
if model=='pairing':
nmin = 0
nmax = 3
lmin = 0
lmax = 0
jmin = 1
jmax = 1
isos = 'n'
g = 1
N_particles = 4 # read the number of particles in the system
if model=='harmonic':
#do stuff
n = 0
#...
return nmin, nmax, lmin, lmax, jmin, jmax, isos, g, N_particles
'''
# These classes are needed for the function below.
class Error(Exception):
"""Base class for other exceptions"""
pass
class ValueBelowZeroError(Error):
"""Raised when the input value is below zero"""
pass
class minValueLargerThanMaxError(Error):
"""Raised when the min value is larger than the max value"""
pass
class CaseError(Error):
"""Raised when the input value is not 'pairing' or 'sd'"""
pass
class FileNameError(Error):
"""Raised when the input value is not 'n' or 'np'"""
pass
class ExtensionError(Error):
"""Raised when the input value is not 'n' or 'np'"""
pass
'''
def command_line_input():
"""
This function asks the user to provide the input parameter on the command line.
It makes sure that the provided input is of correct type and within the allowed limits.
If the input is not correct the program gives an error message and asks the user to provide
the input again.
Input (None)
Returns
nmin: int,
minumum n
nmax: int,
maximum n
lmin: int,
minumum l
lmax: int,
maximum l
jmin: int,
minumum j
jmax: int,
maximum j
isos: string,
the species of the nucleons for the simulation
g: float,
the pairing constant
N_particles: int,
the number of particles in the simulation
"""
standard_input = True
while True:
try:
yn = raw_input("Whould you like standard input? \ny/n: ")
if yn not in ['y', 'n']:
raise FileNameError
if yn == 'y':
nmin, nmax, lmin, lmax, jmin, jmax, isos, g, N_particles = manual_input()
if yn == 'n':
standard_input = False
except FileNameError:
print("\nERROR: Please write either 'y' or 'n'.\n")
else:
break
if not standard_input:
while True:
try:
nmin, nmax = map(int,raw_input("Write: nmin nmax ").split(' '))
if np.logical_or(nmin < 0, nmax < 0):
raise ValueBelowZeroError
if nmin > nmax:
raise minValueLargerThanMaxError
except ValueBelowZeroError:
print("\nERROR: Please provide numbers above zero.\n")
#better try again... Return to the start of the loop
continue
except ValueError:
print("\nERROR: Please provide integers (without whitespace behind the last number).\n")
#better try again... Return to the start of the loop
continue
except minValueLargerThanMaxError:
print("\nERROR: The maximum value must be larger than the minimum value.\n")
#better try again... Return to the start of the loop
continue
else:
#age was successfully parsed!
#we're ready to exit the loop.
break
while True:
try:
lmin, lmax = map(int,raw_input("Write: lmin lmax ").split(' '))
if np.logical_or(lmin < 0, lmax < 0):
raise ValueBelowZeroError
if lmin > lmax:
raise minValueLargerThanMaxError
except ValueBelowZeroError:
print("\nERROR: Please provide numbers above zero.\n")
continue
except ValueError:
print("\nERROR: Please provide integers (without whitespace behind the last number).\n")
continue
except minValueLargerThanMaxError:
print("\nERROR: The maximum value must be larger than the minimum value.\n")
continue
else:
break
while True:
try:
jmin, jmax = map(int,raw_input("Write: 2jmin 2jmax ").split(' '))
if np.logical_or(jmin < 0, jmax < 0):
raise ValueBelowZeroError
if jmin > jmax:
raise minValueLargerThanMaxError
except ValueBelowZeroError:
print("\nERROR: Please provide numbers above zero.\n")
continue
except ValueError:
print("\nERROR: Please provide integers (without whitespace behind the last number).\n")
continue
except minValueLargerThanMaxError:
print("\nERROR: The maximum value must be larger than the minimum value.\n")
continue
else:
break
while True:
try:
isos = raw_input("Write: isospin species ('n' or 'np' without whitespace behind) ")
if isos not in ['n', 'np']:
raise IsosError
except IsosError:
print("\nERROR: Please write either 'n' or 'np'.\n")
else:
break
while True:
try:
g = float(raw_input("Write: g "))
except ValueError:
print("\nERROR: Please provide g as a float / real number (without whitespace behind the last number).\n")
else:
break
while True:
try:
N_particles = int(raw_input("Write: N_particles "))
except ValueBelowZeroError:
print("\nERROR: Please provide numbers above zero.\n")
continue
except ValueError:
print("\nERROR: Please provide the number of particles as an integer (without whitespace behind the last number).\n")
continue
else:
break
while True:
try:
file_type = raw_input("\nWould you like standard file names? \nThe standard file names will be: \n'3s_slater_det.sd', '3s.sp' and 'pairing_g1.int' for g=1 \n y/n: ")
if file_type not in ['y', 'n']:
raise FileNameError
if file_type == 'y':
sp_basis_filename, SD_filename, tbme_filename = give_file_names(g)
if file_type == 'n':
extension = raw_input("Provide your extension to the file names: ")
if not extension.isalpha():
raise ExtensionError
sp_basis_filename, SD_filename, tbme_filename = give_file_names(g, extension+'_')
#print sp_basis_filename, SD_filename, tbme_filename
except FileNameError:
print("\nERROR: Please write either 'y' or 'n'.\n")
except ExtensionError:
print("\nERROR: Please provide the extension for the file name as a string without special characters (only letters).\n")
else:
break
return nmin, nmax, lmin, lmax, jmin, jmax, isos, g, N_particles, sp_basis_filename, SD_filename, tbme_filename
'''
def command_line_input():
"""
This function asks the user to provide the input parameter on the command line.
It makes sure that the provided input is of correct type and within the allowed limits.
If the input is not correct the program gives an error message and asks the user to provide
the input again.
Input (None)
Returns
N_particles: int,
the number of particles in the simulation
case: string,
model space 'pairing' or 'sd'
g: float,
the pairing constant
"""
standard_input = True
while True:
try:
yn = raw_input("Whould you like default input? \ny/n: ")
if yn not in ['y', 'n']:
raise FileNameError
if yn == 'y':
N_particles = 4
case = 'sd'
g = 0
print("\nDefault inputs: ")
print("N_particles = %d ") %N_particles
print("case = %s \n") %case
if yn == 'n':
standard_input = False
except FileNameError:
print("\nERROR: Please write either 'y' or 'n'.\n")
else:
break
if not standard_input:
while True:
try:
N_particles = int(raw_input("Write: N_particles "))
except ValueBelowZeroError:
print("\nERROR: Please provide numbers above zero.\n")
continue
except ValueError:
print("\nERROR: Please provide the number of particles as an integer (without whitespace behind the last number).\n")
continue
else:
break
while True:
try:
case = raw_input("Write: model space ('pairing' or 'sd' without whitespace behind) ")
if case not in ['pairing', 'sd']:
raise CaseError
except CaseError:
print("\nERROR: Please write either 'pairing' or 'sd'.\n")
else:
break
while True:
try:
g = float(raw_input("Write: g (if 'sd' write '0.0')"))
except ValueError:
print("\nERROR: Please provide g as a float / real number (without whitespace behind the last number).\n")
else:
break
return N_particles, case, g
| true |
276df692c6e396e7078d0403b0aa702e6d6c9dc9 | Python | ajburjek/sister | /AdditiveUnscentedKalman.py | UTF-8 | 848 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 17 12:57:08 2019
@author: William Patton
Attempt at Kalman Filter
"""
import pylab as pl
import numpy as np
from pykalman import AdditiveUnscentedKalmanFilter
'Initialize Parameters'
def transition_function(X):
...
def observation_function(X):
...
transition_covariance = np.eye(2)
observation_covariance = np.eye(2) + something
initial_state_mean = [0, 0]
initial_state_covariance = [[1, 0.1], [ 0.1, 1]]
akf = AdditiveUnscentedKalmanFilter(transition_function,observation_function, \
transition_covariance,observation_covariance,initial_state_mean, \
initial_state_covariance)
akf_state_estimates = akf.filter(timesteps,states)
pl.figure()
lines_true = pl.plot(states, color='b')
lines_akf = pl.plot(akf_state_estimates, color='g', ls='-.')
pl.show() | true |
414d5889d55aaf27ace0231b2fd5239a5f972900 | Python | amrithajayadev/misc | /binary_trees/binary-searchtree_from-sorted_array.py | UTF-8 | 2,584 | 3.65625 | 4 | [] | no_license | from collections import deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.value = val
self.left = left
self.right = right
def preorder_traversal(node):
if node is not None:
print(node.value, end=" ")
preorder_traversal(node.left)
preorder_traversal(node.right)
def sortedArrayToBST(nums):
l = 0
r = len(nums)
root = create_tree_node(nums, l, r - 1)
# preorder_traversal(root)
output = level_order_traversal(root)
return output
def create_tree_node(nums, l, r):
if l > r:
return
mid = (l + r) // 2
root = TreeNode(nums[mid])
if root is not None:
root.left = create_tree_node(nums, l, mid - 1)
root.right = create_tree_node(nums, mid + 1, r)
return root
def find_height(root):
if root is None:
return 0
else:
l_height = 1 + find_height(root.left)
r_height = 1 + find_height(root.right)
return max(r_height, l_height)
def get_level_nodes(node, h, node_list):
if node is None:
node_list.append(None)
return
if h == 1:
node_list.append(node.value)
else:
get_level_nodes(node.left, h - 1, node_list)
get_level_nodes(node.right, h - 1, node_list)
def level_order_traversal(root):
h = find_height(root)
node_list = []
for i in range(1, h + 1):
get_level_nodes(root, i, node_list)
return node_list
def get_level_nodes1(node, i, l, nodelist):
if i == l:
nodelist.append(node)
print(f"node value {node.value} at level:{i}")
return nodelist
get_level_nodes1(node.left, i + 1, l, nodelist)
get_level_nodes1(node.right, i + 1, l, nodelist)
return
def test_my_level_nodes(nums):
l = 0
r = len(nums) - 1
root = create_tree_node(nums, l, r)
node_list = []
# print(get_level_nodes1(root, 0, 2, node_list))
print(node_list)
print(level_order_traversal_queue(root))
def level_order_traversal_queue(node):
if not node:
return
all_nodes = []
queue = deque()
queue.append(node)
while queue:
q_len = len(queue)
l_nodes = []
for i in range(q_len):
n = queue.popleft()
if n:
l_nodes.append(n.value)
queue.append(n.left)
queue.append(n.left)
all_nodes.append(l_nodes)
return all_nodes
test_my_level_nodes(nums=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
# print(sortedArrayToBST(nums=[-10, -3, 0, 5, 9])) | true |
46723b91be02ac78a790deae779edc7007355520 | Python | NguyenThao1912/CyberSecurity | /Ma Co Dien/Playfair_cipher.py | UTF-8 | 6,454 | 3.765625 | 4 | [] | no_license | class Playfair():
'''
Sets up a playfair cipher based on key supplied, used to encrypt and
decrypt.
'''
def __init__(self, text):
'''Builds cipher so that it can be used to encrypt and decrypt text.'''
self.text = text
'''Bảng chữ cái'''
self.alpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', \
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', \
'x', 'y', 'z']
combo = list(self.text)
''' key + bảng chữ cái '''
combo += self.alpha
'''cipher chứa bảng khóa '''
cipher = []
for char in combo:
'''nếu kí tự CHƯA được thêm vào cipher và kí tự ở trong bảng chữa cái '''
if char not in cipher and char in self.alpha:
cipher.append(char)
else:
pass
'''Sinh ma trận 5-5 '''
cipher = [cipher[:5], cipher[5:10], cipher[10:15], cipher[15:20], \
cipher[20:25]]
self.cipher = cipher
def __str__(self):
'''Returns the cipher in grid form.'''
cipher = '\n'
for line in self.cipher:
for char in line:
cipher += (char + ' ')
cipher += '\n'
return cipher[:-1] + '\n'
def row(self, char):
'''Finds the row for a character in the cipher grid.'''
position = 0
counter = 0
for line in self.cipher:
if char in line:
position = counter
counter += 1
return position
def col(self, char):
'''Finds the column for a character in the cipher grid.'''
position = 0
for line in self.cipher:
counter = 0
for letter in line:
if char == letter:
position = counter
counter += 1
return position
def encrypt(key, message):
'''Encrypts a message Using a playfair cipher using a given key'''
#Sinh khóa
cipher = Playfair(key)
prev_char = message[0]
mess = prev_char
# nếu kí tự đầu là j thì thay thế = i
mess.replace('j', 'i')
for char in message[1:]:
# nếu 2 chữ giống nhau đứng cạnh nhau vd aa thì thêm q vào giữa
if char == prev_char:
mess += ('q')
# nếu kí tự trong bảng chữ cái thì thêm bt và cập nhật chữ cái gần nhất
if char in cipher.alpha:
mess += (char)
prev_char = char
# nếu độ dài tin nhắn k phải là 1 số chẵn (do phải tách thành các cặp )
if len(mess) % 2 != 0:
mess += 'q'
# tách mảng message ra thành các cặp vd axbycz => ax by cz
mess = [mess[i:i + 2] for i in range(0, len(mess), 2)]
ciphertext = ''
# bắt đầu sinh mã với mỗi cặp sinh được ở trên
for pair in mess:
final_pair = ''
'''nếu 2 chữ cùng hàng =)'''
if cipher.row(pair[0]) == cipher.row(pair[1]):
for i in pair:
if cipher.col(i) == 4:
row = cipher.row(i)
final_pair += cipher.cipher[row][0]
else:
row = cipher.row(i)
final_pair += cipher.cipher[row][cipher.col(i) + 1]
ciphertext += final_pair + ' '
#nếu 2 chữ cùng cột
elif cipher.col(pair[0]) == cipher.col(pair[1]):
for i in pair:
if cipher.row(i) == 4:
final_pair += cipher.cipher[0][cipher.col(i)]
else:
row = cipher.row(i)
final_pair += cipher.cipher[row + 1][cipher.col(i)]
ciphertext += final_pair + ' '
#còn không thì lấy hàng của chữ này và cột chữ kia
else:
first = [cipher.row(pair[0]), cipher.col(pair[0])]
second = [cipher.row(pair[1]), cipher.col(pair[1])]
final_pair = cipher.cipher[first[0]][second[1]] \
+ cipher.cipher[second[0]][first[1]]
ciphertext += final_pair + ' '
#lấy mảng kq đến -1 thôi vì bỏ dâu cách ở trên ' ' =)
return ciphertext[:-1]
def decrypt(key, text):
''' Decrypts a message Using a playfair cipher using a given key '''
cipher = Playfair(key)
plaintext = ''
if type(text) == str:
text = text.split(' ')
#decrypt làm ngược lại thui =))
for pair in text:
final_pair = ''
#cùng hàng
if cipher.row(pair[0]) == cipher.row(pair[1]):
for i in pair:
if cipher.col(i) == 0:
row = cipher.row(i)
final_pair += cipher.cipher[row][4]
else:
row = cipher.row(i)
final_pair += cipher.cipher[row][cipher.col(i) - 1]
plaintext += final_pair
#cùng cột
elif cipher.col(pair[0]) == cipher.col(pair[1]):
for i in pair:
if cipher.row(i) == 0:
final_pair += cipher.cipher[4][cipher.col(i)]
else:
row = cipher.row(i)
final_pair += cipher.cipher[row - 1][cipher.col(i)]
plaintext += final_pair
#khác hàng khác cột
else:
first = [cipher.row(pair[0]), cipher.col(pair[0])]
second = [cipher.row(pair[1]), cipher.col(pair[1])]
final_pair = cipher.cipher[first[0]][second[1]] \
+ cipher.cipher[second[0]][first[1]]
plaintext += final_pair
if plaintext[-1] == 'q':
plaintext = plaintext[:-1]
counter = 0
final = ''
for i in plaintext:
if i == 'q' and plaintext[counter - 1] == plaintext[counter + 1]:
pass
else:
final += i
counter += 1
return final
if __name__ == '__main__':
print('Type in your key and message ')
message = input('Your message : ')
key = input('Your key : ')
new_cipher = Playfair(key.lower())
print(new_cipher)
cipher = encrypt(key.lower(), message.lower())
decrypt = decrypt(key.lower(), cipher)
print('encrypt : ', cipher)
print('decrypt : ', decrypt)
| true |
ab89502e83e05d5ef47d17d72c0bd9d53435ca31 | Python | rpytel1/multimedia-project | /preprocess_data.py | UTF-8 | 6,362 | 2.96875 | 3 | [] | no_license | import pandas as pd
import pickle
import gensim.downloader as api
import nltk
import numpy as np
model = api.load("glove-wiki-gigaword-100")
print(model['man'])
tokenizer = nltk.RegexpTokenizer(r'\w+')
def apply_one_hot(df, categorical_features):
"""
Function that applies one-hot encoding to the features specified in the categorical_features list (eventually
not used)
:param df: the dataframe of the dataset
:param categorical_features: the features on which one-hot will be applied
:return: the updated dataframe
"""
for col in categorical_features:
dummies = pd.get_dummies(df[col], prefix=col)
df = pd.concat([df, dummies], axis=1)
df.drop(col, axis=1, inplace=True)
return df
def make_category(data):
"""
Function for preprocessing the category related data by assigning numerical values to their categorical ones
:param data: the initial category data
:return: the converted to numerical representation category data
"""
data_lst = [[pid, data[pid]['Category'], data[pid]['Concept'], data[pid]['Subcategory']] for pid in data.keys()]
df = pd.DataFrame(data_lst, columns=['Pid', 'Category', 'Concept', 'Subcategory'])
pids = df['Pid']
df.drop('Pid', axis=1, inplace=True)
df = df.apply(lambda col: pd.factorize(col)[0]) # assign the numerical values
df['Pid'] = pids
return df
def embedding(title):
"""
TODO: This is for you Rafal
:param title:
:return:
"""
words = tokenizer.tokenize(title)
final_emb = np.zeros((1, 100))
i = 1
for word in words:
try:
final_emb += np.array(model[word])
i += 1
except:
()
return final_emb/i
def make_tags(data):
"""
Function for preprocessing the tag data by applying the glove embedding on the title and the tags of each
post
:param data: the tag data
:return: the updated dataframe
"""
titles = [[pid] + [i for sub in embedding(data[pid]['Title']).T.tolist() for i in sub] + [i for sub in embedding(
" ".join(data[pid]['Alltags'])).T.tolist() for i in sub] for pid in data.keys()]
headers = ['Pid'] + ['title_emb_' + str(i) for i in range(100)] + ['tags_emb_' + str(i) for i in range(100)]
df = pd.DataFrame(titles, columns=headers)
return df
def make_dates(data):
"""
Function for preprocessing the dates in the dataset into the appropriate dataframe format
:param data: the initial postdate data
:return: the generated dataframe
"""
data_lst = [[pid, data[pid]['Postdate']] for pid in data.keys()]
df = pd.DataFrame(data_lst, columns=['Pid', 'Postdate'])
df['Postdate'] = pd.to_datetime(df['Postdate'], format='%Y-%m-%d %H:%M:%S')
return df
def make_image_fts(data):
"""
Function for preprocessing the two types of image features extracted from the images in the dataset
:param data: the image features
:return: the created dataframe
"""
data_lst = [[pid] + data[pid]['hsv_hist'] + [i for sub in data[pid]['hog'] for i in sub] for pid in data.keys()]
headers = ['Pid'] + ['hsv_hist_' + str(i) for i in range(24)] + ['hog_' + str(i) for i in range(144)]
df = pd.DataFrame(data_lst, columns=headers)
return df
def split_data(data, usr_data):
"""
Function for splitting the posts for each user into a training and a test set according to their postdate
:param data: the dataset in a dataframe format with the post id as the index
:param usr_data: the original data with user ids as the keys and the posts as the values
:return: the split dataset for each user
"""
final_dict = {}
for user in usr_data.keys():
posts = data.loc[list(usr_data[user].keys())] # locate the posts of each user
posts.sort_values('Postdate', inplace=True) # sort values according to postdate
final_dict[user] = {'train_set': posts.iloc[:int(posts.shape[0] / 2)]} # keep half of the posts as
final_dict[user]['test_set'] = posts.iloc[int(posts.shape[0] / 2):] # training and half as test set
return final_dict
if __name__ == '__main__':
with open("data/our_jsons/user_dataset_computed.pickle", "rb") as input_file:
complete_data = pickle.load(input_file)
# split data to feature categories and preprocess each feature category
category_dict = {}
tags_dict = {}
image_dict = {}
dates_dict = {}
for user in complete_data.keys():
for pid, vals in complete_data[user].items():
for feat_type in vals.keys():
if feat_type in ['Category', 'Concept', 'Subcategory']:
if pid not in category_dict.keys():
category_dict[pid] = {feat_type: vals[feat_type]}
else:
category_dict[pid][feat_type] = vals[feat_type]
elif feat_type in ['Title', 'Alltags']:
if pid not in tags_dict.keys():
tags_dict[pid] = {feat_type: vals[feat_type]}
else:
tags_dict[pid][feat_type] = vals[feat_type]
elif feat_type == 'img_feats':
image_dict[pid] = {'hsv_hist': vals[feat_type]['hsv_hist'].tolist(),
'hog': vals[feat_type]['hog'].tolist()}
elif feat_type == 'Postdate':
if pid not in dates_dict.keys():
dates_dict[pid] = {feat_type: vals[feat_type]}
else:
dates_dict[pid][feat_type] = vals[feat_type]
category_data = make_category(category_dict)
tags_data = make_tags(tags_dict)
dates_data = make_dates(dates_dict)
image_data = make_image_fts(image_dict)
# merge all feature dataframes
all_data = pd.merge(pd.merge(pd.merge(dates_data, category_data, on="Pid"), image_data, on="Pid"), tags_data,
on="Pid")
all_data = all_data.set_index('Pid') # and set the post id as the index
# created the split dataset per user
final_data = split_data(all_data, complete_data)
with open('data/our_jsons/final_dataset_with_tags.pickle', 'wb') as handle:
pickle.dump(final_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
print('preprocessing completed!!!')
| true |
036a9186a2d8f8aa02bbc91a0a181fe066dae294 | Python | steppehare/tor_proxy | /get_proxy_ip.py | UTF-8 | 3,513 | 2.765625 | 3 | [] | no_license | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import subprocess
import json
import asyncio
import requests
from bs4 import BeautifulSoup
# import re
async def get_ip_by_torapi(host, port):
"""
Print proxy ip through tor api
:param host: string - proxy host
:param port: integer - proxy port
:return: proxy ip
"""
pipe = subprocess.PIPE
cmd = 'curl --proxy socks5h://{}:{} https://check.torproject.org/api/ip'\
.format(host, port)
res_ip = None
while True:
p = subprocess.Popen(cmd, shell=True, stdout=pipe, stderr=pipe)
res = p.stdout.read().decode('utf-8')
try:
d = json.loads(res)
res_ip = d.get('IP')
print('{0}:{1} => {2}'.format(host, port, res_ip))
except json.decoder.JSONDecodeError as err:
print(repr(err))
return res_ip
await asyncio.sleep(10)
async def get_ip_by_bs(host, port):
"""
Print proxy ip through beautiful soup usage
:param host: string - proxy host
:param port: integer - proxy port
:return: proxy ip
"""
url = 'http://sitespy.ru/my-ip'
proxies = {'http': 'socks5://{host}:{port}'.format(host=host, port=port),
'https': 'socks5://{host}:{port}'.format(host=host, port=port)}
res_ip = None
while True:
try:
resp = requests.get(url, proxies=proxies)
html = resp.text
soup = BeautifulSoup(html, "html.parser")
res_txt = soup.find('span', class_='ip')
res_ip = res_txt.text if res_ip is not None else res_ip
print('{0}:{1} => {2}'.format(host, port, res_ip))
except requests.exceptions.ConnectionError as err:
print('{0}:{1} => {2}'.format(host, port, repr(err)))
return res_ip
await asyncio.sleep(10)
async def get_ip_by_re(host, port, reg_exp):
"""
Print proxy ip through regular expressions
:param host: string - proxy host
:param port: integer - proxy port
:param reg_exp: re.compile() object - compiled reg. expression
:return: proxy ip
"""
url = 'http://www.whatip.org/'
proxies = {'http': 'socks5://{host}:{port}'.format(host=host, port=port),
'https': 'socks5://{host}:{port}'.format(host=host, port=port)}
res_ip = None
while True:
try:
resp = requests.get(url, proxies=proxies)
html = resp.text
res_arr = reg_exp.findall(html)
if len(res_arr):
res_ip = res_arr[0]
print('{0}:{1} => {2}'.format(host, port, res_ip))
except requests.exceptions.ConnectionError as err:
print('{0}:{1} => {2}'.format(host, port, repr(err)))
return res_ip
await asyncio.sleep(10)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
localhost = '127.0.0.1'
# 1) - through tor api
fut = asyncio.gather(*[get_ip_by_torapi(localhost, port) for port in range(10025, 10030)])
# 2) - through beautiful soup
# fut = asyncio.gather(*[get_ip_by_bs(localhost, port, r_exp) for port in range(10025, 10030)])
# 3) - through regular expression
# r_exp = re.compile(r'(?<=\<title\>www\.whatip\.org -- Your IP is )'
# '[\d]{1,3}.[\d]{1,3}.[\d]{1,3}.[\d]{1,3}(?=\<\/title\>)')
# fut = asyncio.gather(*[get_ip_by_re(localhost, port, r_exp) for port in range(10025, 10030)])
res = loop.run_until_complete(fut)
loop.close()
print(res)
| true |
8071fe1b7447981b1ba0b37d348b7f7eb028e5b7 | Python | YuanZheCSYZ/algorithm | /datastructure/list/36 Valid Sudoku.py | UTF-8 | 1,070 | 3.3125 | 3 | [] | no_license | # https://leetcode.com/problems/valid-sudoku/submissions/
class Solution:
"""
Runtime: 92 ms, faster than 89.26% of Python3 online submissions for Valid Sudoku.
Memory Usage: 14.3 MB, less than 43.57% of Python3 online submissions for Valid Sudoku.
"""
def isValidSudoku(self, board: List[List[str]]) -> bool:
empty = "."
n = 9
nums_9 = [[[] for _ in range(3)] for _ in range(3)]
for y in range(n):
nums = list(filter(lambda num: num != empty, board[y]))
if len(nums) != len(set(nums)):
return False
nums_v = []
for x in range(n):
if board[x][y] != empty:
nums_v.append(board[x][y])
nums_9[x // 3][y // 3].append(board[x][y])
if x % 3 == 2 and y % 3 == 2:
if len(nums_9[x // 3][y // 3]) != len(set(nums_9[x // 3][y // 3])):
return False
if len(nums_v) != len(set(nums_v)):
return False
return True | true |
7f87126713680f538aabe1201621577ef1ce0927 | Python | abiraja2004/shiyanlou | /python_learning/sci/ch 2/Recipe_1f.py | UTF-8 | 512 | 3.828125 | 4 | [] | no_license | # Matrix operations
a_matrix = np.arange(9).reshape(3,3)
b_matrix = np.arange(9).reshape(3,3)
# Addition
c_matrix = a_matrix + b_matrix
# Element wise multiplication
d_matrix = a_matrix * b_matrix
# matrix multiplication
e_matrix = np.dot(a_matrix,b_matrix)
# matrix tranpsose
f_matrix = e_matrix.T
# min,max,sum
print
print "f_matrix,minimum = %d"%(f_matrix.min())
print "f_matrix,maximum = %d"%(f_matrix.max())
print "f_matrix, col sum",f_matrix.sum(axis=0)
print "f_matrix, row sum",f_matrix.sum(axis=1)
| true |
2e502b3b3a6447ef4eb9af237a4ae424d8b91601 | Python | rehab-saleh/AlogrithmsPr-project | /Model.py | UTF-8 | 3,023 | 2.921875 | 3 | [] | no_license | class Model:
db = None
connection: None = None
def __init__(self):
self.create_table()
self._saved = False
@classmethod
def _get_table_name(cls):
return cls.__name__.lower()
@classmethod
def got_columns(cls):
columns = {}
for key, value in cls.__dict__.items():
if str(key).startswith('_'):
continue
columns[str(key)] = str(value)
return columns
def _create_tabla(self):
columns = ', '.join(' '.join(key, value)for (key, value) in self.get_columns().items())
sql = f'CREATE TABLE IF NOT EXISTS {self._get_table_name()} (id INTEGER PRIMARY KEY AUIOINCREMENT, {columns})'
cursor = self.connection.cursor()
result = cursor.execute(sql)
return result
def save(self):
if self._saved:
self._update()
return
fields = []
values = []
for key, value in self._got_values().items():
fields.append(key)
values.append(f"'{value}'")
self._insert_into(fields, values)
def _get_values(self):
values = {}
for key, values in self.__dict__items():
if str(key).startswith('_'):
continue
if value is False:
value = 0
if value is True:
value = 1
values[key] = value
return values
@classmethod
def create(cls, **kwargs):
field = list(kwargs.keys())
values = []
for value in kwargs.values():
values.append(f"'{value}'")
cls._insert_into(fields, values )
@classmethod
def _insert_into(cls, fields, values):
sql = f'INSERT INTO {cls._get_table_name()} ({", ".join(fields)} VALUES ({", ".join(values)})'
result = cls.connection.execute(sql)
cls.connection.commit()
cls.saved = True
return result
@classmethod
def all(cls):
sql = f'SELECT * FROM {cls._get_table_name()}'
records = cls.connection.execute(sql)
return [dict(row) for row in records.fetchall()]
@classmethod
def get(cls, id):
sql = f'SELECT * FROM {cls._get_table_name()} WHERE id= {id}'
record = cls.connection.execute(sql)
result = record.fetchone()
if result is None:
return False
return dict(result)
@classmethod
def find(cls, col_name, opereator, value):
if opereator == 'LIKE':
value = '%' + value + '%'
sql = f'SELECT * FROM {cls._get_table_name()} WHERE {col_name} {opereator} "{value}"'
records = cls.connection.execute(sql)
return [dict(row) for row in records.fetchall()]
def _update(self):
old = self.find('created_at', '=', self._get_values()['created_at'])
old_id = old = old[0][0]
new_values = []
for key, value in self._get_values().items():
new_values.append(f'{key} = "{value}"')
| true |
f22a69888ee05850ff1f5f546e307c743b0e1e54 | Python | lsxceo/51cto-task | /0406minitoutiao/http_server.py | UTF-8 | 3,349 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env Python
# -*- coding: utf-8 -*-
# http_server.py
import json
import socket
from pagecounter import PageCounter
import minitoutiao as mt
HOST = '' # localhost: 本机,ip值,空:任意主机都可以访问
PORT = 8000
ADDR = (HOST, PORT)
BUFSIZE = 1024
# 新建socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP相关参数
# 绑定地址
sock.bind(ADDR)
# 监听连接的个数
sock.listen(1)
print('启动http服务')
# 启动redis
pagecounter = PageCounter()
# 循环发送和接收数据
while True:
# 等待连接
print('等待连接:')
conn, addr = sock.accept()
print('成功连接:', addr)
# 循环接收
data = conn.recv(BUFSIZE)
# print('收到数据: ', data.decode('utf-8')) # 处理中文数据的显示
if data:
req_path = data.decode('utf-8').splitlines()[0]
# print('收到数据第一行:', req_path)
method, path, http = req_path.split()
print(f'切换URL地址到{path[1:]}')
path_split = path.split('/')
dic = {}
status_404 = "<h1>404 not found</h1>"
if path_split[1] == 'article' and len(path_split) == 3:
if path_split[2] == 'all':
articles = mt.session.query(mt.Article).all()
data = []
for article in articles:
art_dic = {}
author = mt.session.query(mt.Author).filter(
mt.Author.id == article.author_id).one()
art_dic['id'] = article.id
art_dic['author'] = author.name
art_dic['title'] = article.title
art_dic['content'] = article.content
data.append(art_dic)
dic['status'] = 0
dic["statusText"] = "所有文章数据"
dic['articles'] = data
pagecounter.count_page('articles', 'all')
count = pagecounter.query_page('articles', 'all')
print(f'主页的访问量是{count}')
else:
try:
article_id = int(path_split[2])
article = mt.session.query(mt.Article).filter(
mt.Article.id == article_id).one()
author = mt.session.query(mt.Author).filter(
mt.Author.id == article.author_id).one()
art_dic = {}
art_dic['id'] = article_id
art_dic['title'] = article.title
art_dic['content'] = article.content
dic['author'] = author.name
dic['article'] = art_dic
pagecounter.count_page(author.name, article.title)
count = pagecounter.query_page(author.name, article.title)
print(f'{author.name}的{article.title}的访问量是{count}')
except Exception:
dic = status_404
else:
dic = status_404
if dic != status_404:
json_data = json.dumps(dic)
else:
json_data = status_404
response = f"""HTTP/1.1 200 ok
{json_data}
""".encode('gbk')
conn.sendall(response) # 在这里处理数据
conn.close()
sock.close()
| true |
0135b6a113df6f5d4cdd96cc0f85804542581c24 | Python | jtfield/useful_scripts | /seq_line_catter.py | UTF-8 | 1,161 | 2.734375 | 3 | [] | no_license | #! /usr/bin/env python3
import argparse
import os
from re import split
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--align')
parser.add_argument('--output_align', default="NONE")
return parser.parse_args()
def main():
args = parse_args()
lengths = {}
input_file = open(args.align, 'r')
read_input = input_file.read()
split_file = read_input.split('>')
output = open(args.output_align, 'w')
for num, taxon in enumerate(split_file):
split_name_and_seq = taxon.split('\n', 1)
if len(split_name_and_seq) > 1:
name = split_name_and_seq[0]
seq = split_name_and_seq[1]
joined_seq = ''.join(seq)
joined_seq = joined_seq.replace('\n', '')
seq_len = len(joined_seq)
lengths[name] = seq_len
print(name)
print(seq_len)
output.write('>' + name)
output.write('\n')
output.write(joined_seq)
output.write('\n')
input_file.close()
if __name__ == '__main__':
main() | true |
403f8d9d0685efe2bc721e0577d782dfd1d0f6b9 | Python | ddonggsa/algorithm | /07.radix_sort.py | UTF-8 | 1,640 | 3.796875 | 4 | [] | no_license | data = list(map(int,input().split()))
isSort = False # 정렬이 완료되었나 기억하는 변수, True로 변경되면 정렬이 완료됨을 의미한다.
radix = 1 # 큐에 넣어줄 자리수의 위치를 기억하는 변수, 1 => 10 => 100 => 1000
# 정렬이 완료될 때 까지 반복한다.
while not isSort:
isSort = True
# 정렬할 숫자의 기수(진법)의 크기만큼 큐로 사용할 리스트를 만든다.
queueList = [[] for i in range(10)]
print('radix: {}'.format(radix))
# 정렬할 데이터의 개수만큼 반복하며 데이터를 큐에 넣어준다.
for n in data:
# 큐에 넣어줄 자리수에 해당되는 숫자만 뽑아낸다.
digit = n // radix % 10
# 큐에 숫자를 넣어준다.
queueList[digit].append(n)
# 정렬 작업이 완료되었나 검사한다.
if isSort and digit > 0:
isSort = False
# 큐에 저장된 데이터를 0번 큐의 데이터부터 차례대로 꺼내서 data 리스트에 다시 저장한다.
index = 0
# queueList에 저장된 0번 큐를 numbers 리스트에 저장한 다음 반복을 시작해서 9번 큐를 numbers 리스트에 저장한 후 반복한 다음 종료한다.
for numbers in queueList:
# 각각의 큐에 해당되는 리스트에 저장된 데이터 개수만큼 반복하며 data 리스트를 수정한다.
for number in numbers:
data[index] = number
index += 1
print(data)
# 다음 자리수를 큐에 넣기 위해서 radix에 저장된 숫자에 10을 곱한다.
radix *= 10
| true |
da9ec2435105c1e35155529facc1dd1950d52657 | Python | Reza-Salim/Training | /12.py | UTF-8 | 558 | 3.46875 | 3 | [] | no_license | id1 = -1
max1 = -1
max2 = -1
id2 = -1
n = 0
while n < 2:
print ("Please enter a number greater than 1")
n = int(input("Enter n : "))
else:
for i in range (1, n+1):
id0 = int(input("Enter id: "))
aver = float(input("Enter average: "))
if aver > max1:
id2 = id1
max2 = max1
max1 = aver
id1 = id0
else:
if aver>max2:
max2=aver
id2=id0
print ("Max2 = ", max2,"\t\tId2 =", id2)
| true |
7d48273ee0c35d44f3a192a955bcb3ba6230a6e7 | Python | Referor/ep_index | /st_proto.py | UTF-8 | 6,873 | 2.75 | 3 | [] | no_license | import streamlit as st
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
from statsmodels.sandbox.stats.multicomp import multipletests
import seaborn as sns
import plotly as py
import plotly.graph_objs as go
import plotly.express as px
import matplotlib.pyplot as plt
#https://medium.com/@u.praneel.nihar/building-multi-page-web-app-using-streamlit-7a40d55fa5b4
st.sidebar.title('Navigation')
selection = st.sidebar.radio("Go to", ['Сводный индекс','Визуализация исходных данных', 'Карта корреляций','Поиск взаимосвязей', 'О проекте'])
if selection == 'Сводный индекс':
st.header('Сводный индекс эпидемической обстановки')
sum_cor2=pd.read_excel('data/index_correlated.xlsx')
fig2 = px.scatter(sum_cor2, x='month', y="absolute",
size="absolute", hover_name=sum_cor2.month,color_discrete_sequence=px.colors.diverging.RdYlBu,
color='absolute', log_y=True, size_max=100, hover_data=['absolute'])
st.plotly_chart(fig2)
st.markdown('Absolute - общее количество коррелирующих с ковидом запросов за месяц')
st.markdown('Reative - относительное количество запросов, по сравнению с максимумом')
corr_data=pd.read_excel('data/only_correlated_data.xlsx', index_col='month')
st.write('Помесячное количество коррелирующих запросов ', corr_data)
if selection == 'О проекте':
st.header('О проекте')
st.markdown('Цель данного проекта: поиск математической устойчивой взаимосвязи между релевантной поисковой статистикой россиян и динамикой заболеваемости COVID-19 в РФ')
st.markdown('Для анализа поисковой статистики использовался перечень симптомов и названий лекарств связанных с протоколом лечения COVID-19')
st.markdown('Найденные взаимосвязи указывают на то, что поисковая статистика может указывать на понижение или повышение количества людей заболевших COVID-19')
st.markdown('Данные заболевмаемости/смертности от COVID-19 были получены на сайте www.lll..www')
st.markdown('Данный проект реализован в рамках квалификационной выпускной работы программы MBA Цифровая Экономика 2019-2021, МФТИ и МГИМО')
st.markdown('Авторы проекта: А. Батрименко, С. Денисова')
st.markdown('Научный руководитель: С. Сошников')
st.markdown('>Используйте меню слева для перехода на другие страницы сайта')
if selection == 'Визуализация исходных данных':
df_cov_sc=pd.read_excel('data/scaled_data.xlsx')
fig = px.line(df_cov_sc, x='month', y='value', color='query', hover_name="query").for_each_trace(lambda t: t.update(name=t.name.split("=")[1]))
fig.update_traces(mode='markers+lines')
fig.update_layout(legend_traceorder="reversed")
st.header('Визуализация данных')
st.write('Ниже представлены графики релевантной поисковой семантики и случаев заражения covid-19 и смертей')
st.header('Масштабированные данные')
st.write('Данные были нормированы для приведения к одному масштабу, с помощью инструмента MinMaxScaler изsklearn')
st.plotly_chart(fig)
st.markdown('>Нажмите дважды в области легенды графика чтобы отключить все графики')
st.markdown('>Включайте/выключайте одним кликом любой компонент график или набор графиков для сравнения')
st.header('Графики абсолютных значений')
df_cov=pd.read_excel('data/month_russia_data.xlsx', sheet_name='query')
fig = px.line(df_cov, x='month', y='value', color='query', hover_name="query").for_each_trace(lambda t: t.update(name=t.name.split("=")[1]))
fig.update_traces(mode='markers+lines')
fig.update_layout(legend_traceorder="reversed")
st.plotly_chart(fig)
if selection == 'Карта корреляций':
cov_corr_t=pd.read_excel('data/corr_true_data.xlsx')
cov_pivot=pd.pivot_table(cov_corr_t, values='corr', index=['A'],
columns=['B'])
fig = go.Figure(data=go.Heatmap(
z=cov_pivot,
x=cov_pivot.columns,
y=cov_pivot.columns,
hoverongaps = False, colorscale='deep'))
fig.show()
if selection == 'Поиск взаимосвязей':
st.header('Поиск взаимосвязей. Методика расчета')
st.markdown("""Для поиска связей в данных были подготовленны датасеты помесячных данных релевантной поисковой статистики и статистики COVID-19<br>
Далее, был проведен поиск корреляций Пирсона между всеми наборами данных данного датасета<br>
И к полученным результатам применена поправка Бенджамини-Хохберга на множественную проверку гипотез, для повышения надежности результатов. <br>
Подтвержденные значения корреляций показаны ниже.
""", unsafe_allow_html=True)
df_cov_true=pd.read_excel('data/corr_true_data.xlsx')
st.write(df_cov_true)
st.markdown("""corr - значение корреляции пирсона между A и B<br>
p - первичный уровень значимости для данной пары<br>
p_corrected - скорректированный уровень значимости для данной пары
""", unsafe_allow_html=True)
st.header('Построчный анализ подтвержденных корреляций')
zz=pd.unique(df_cov_true.A)
option = st.selectbox('Выберите строки для анализа',zz)
#st.write('You selected:', option)
data=df_cov_true.loc[df_cov_true.A ==option]
st.write('Все подтвержденные корреляции для %s' % option, data)
| true |
1e06888dc7ddb8bc0c85f47b26a5424758c9e70b | Python | Remedious-RUHacks/remedious-bot | /bot.py | UTF-8 | 3,961 | 2.734375 | 3 | [] | no_license | import os
import json
import discord
from discord.ext import commands
import requests
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv("DISCORD_TOKEN")
client = commands.Bot(command_prefix='$')
URL ="https://remedious-api.herokuapp.com/"
request = requests.get(URL + "dashboard",headers={
"email":"email4",
"password":"123"
})
json_response = request.json()
symptons_list = json_response['Symptoms Details']
remedy_list = json_response['Remedy Details']
@client.command()
async def FAQ(ctx):
embedVar = discord.Embed(title="Commonly Asked Questions", description="called by "+ctx.author.mention, color=0x115ad1)
embedVar.add_field(name="What is Remedious?", value="Remedious is a platform where you can get information about treatments and remedies for common symptoms related to COVID-19", inline=False)
embedVar.add_field(name="What is ‘Long Covid’?", value ="Long Covid describes a set of symptoms that continue long after the initial Covid-19 infection has gone. Even people who had relatively moderate Covid-19 at the time can experience long covid symptoms. So can young, fit people.", inline=False)
embedVar.add_field(name="What are the effects of Long Covid?", value="You don't have to be admitted to hospital with Covid-19 to have Long Covid but one British Medical Journal paper looked at what happened to those who were admitted (about 450,000 of them) after they were discharged. All told, one third of discharged patients were readmitted to hospital and one in 10 died.", inline=False)
embedVar.add_field(name="What are some symptoms of Long Covid?", value="Run `$symptoms` to see a full list", inline=False)
embedVar.add_field(name="What are some remedies I can follow?", value="Run `$remedies` to see a full list", inline=False)
await ctx.send(embed = embedVar)
@client.command()
async def symptoms(ctx):
embedVar = discord.Embed(title="Symptoms on Remedious",description="called by "+ctx.author.mention, color=0x115ad1)
for i in range(len(symptons_list)):
embedVar.add_field(name=symptons_list[i]['symptoms'], value="level = "+symptons_list[i]['level'], inline=False)
await ctx.send(embed=embedVar)
@client.command()
async def remedies(ctx):
embedVar = discord.Embed(title="Remedies on Remedious",description="called by "+ctx.author.mention, color=0x115ad1)
for i in range(len(remedy_list)):
embedVar.add_field(name=remedy_list[i]['name'], value="level = "+ remedy_list[i]['level'],inline=False)
await ctx.send(embed=embedVar)
@client.command()
async def remedy(ctx,*,rem):
for i in range(len(remedy_list)):
if remedy_list[i]['name']==rem:
index = i
break
try:
remedy = remedy_list[index]
embedVar = discord.Embed(title=rem,description="called by "+ctx.author.mention, color=0x115ad1)
embedVar.add_field(name = "Name",value=rem)
embedVar.add_field(name = "Frequency", value=remedy['frequency'])
embedVar.add_field(name = "Level", value=remedy['level'])
embedVar.add_field(name = "Amount", value=remedy['amount'])
embedVar.add_field(name = "Symptom Frequency", value=remedy['symptom_frequency'])
except:
await ctx.send("Invalid remedy")
return
await ctx.send(embed=embedVar)
@client.command()
async def sympton(ctx,*,symp):
for i in range(len(symptons_list)):
if symptons_list[i]['symptoms']==symp:
index = i
break
try:
symp_data = symptons_list[index]
embedVar = discord.Embed(title=symp,description="called by "+ctx.author.mention, color=0x115ad1)
embedVar.add_field(name = "Name",value=symp)
embedVar.add_field(name = "Frequency", value=symp_data['frequency'])
embedVar.add_field(name = "Level", value=symp_data['level'])
except:
await ctx.send("Invalid sympton")
return
await ctx.send(embed=embedVar)
client.run(TOKEN)
| true |
d5e7c7fd746f0f24cd9aa12241a2b999178ac039 | Python | omaotzu/python | /object_and_basic_data_structure/strings.py | UTF-8 | 1,181 | 4.59375 | 5 | [] | no_license | # len function for length
print(len('Hello'))
string = 'Hello'
# Print second index location
print(string[1])
# Print everything from index location to end of string
print(string[1:])
# Print everything before given index location
print(string[:3])
# If unknown length -- can loop backwards and print specific index location from end of array by using minus
print(string[-1])
# Print everything except for last letter --- this is so cool
print(string[:-1])
# Print every second index location in a string
print(string[::2])
# Print entire string in reverse
print(string[::-1])
# Print every second index location in reverse
print(string[::-2])
# Reassign entire strings
string = string +'!'
print(string)
# can use repetition as follows
sleepy = 'z'
print(sleepy*10)
# uppercase
print(string.upper())
# lowercase
print(string.lower())
# split method at specific letter - will not include the letter in either index of the new array
print(string.split('e'))
# create an array from a string
print(list(string))
# Insert into specific point in string by using format and changing whatever is in the curly brackets
print('Hello {} - nice to meet you'.format('Omar'))
| true |
6492a09ff2df9a026f9272f29730007d075ccd2b | Python | yuvallanger/arcade_book | /source/chapters/14_libraries_and_modules/openpyxl_example.py | UTF-8 | 478 | 3.546875 | 4 | [] | no_license | """
Example using OpenPyXL to create an Excel worksheet
"""
from openpyxl import Workbook
import random
# Create an Excel workbook
work_book = Workbook()
# Grab the active worksheet
work_sheet = work_book.active
# Data can be assigned directly to cells
work_sheet['A1'] = "This is a test"
# Rows can also be appended
for i in range(200):
work_sheet.append(["Random Number:", random.randrange(1000)])
# Save the file
work_book.save("sample.xlsx")
| true |
4f39f29b8c40bb54806ddf273e90692497a1607c | Python | valbok/penger.db | /core/lib/db/db.py | UTF-8 | 1,377 | 2.8125 | 3 | [] | no_license | """
" @author VaL
" @copyright Copyright (C) 2013 VaL::bOK
" @license GNU GPL v2
"""
import MySQLdb
"""
" MySQL Database handler
" Used to initialize database connection and keep it for whole process
" Implements kind of singelton
"
" Usage:
" DB.init( db = "databasename" )
" db = DB.get()
" cur = db.currsor
" cur.execute( "SHOW TABLES" )
"""
class DB( object ):
"""
" Static instance of current class
"""
_instance = None
"""
"
"""
ASC = "ASC"
"""
"
"""
DESC = "DESC"
"""
" @param (MySQLdb._mysql.connection) Stores database connection to current object
"""
def __init__( self, db ):
self._db = db
@property
def db( self ):
return self._db
"""
" It should be called before use of DB.get()
"
" @return (MySQLdb._mysql.connection)
"""
@staticmethod
def init( host = "localhost", user = "root", passwd = "", db = "" ):
DB._instance = DB( MySQLdb.connect( host, user, passwd, db ) )
return DB._instance
"""
" Returns static instance that was stored in DB.init()
"
" @return (__CLASS__)
"""
@staticmethod
def get():
return DB._instance.db;
"""
" Unbinds previously stored instance
"
" @return (void)
"""
@staticmethod
def uninit():
DB._instance = None
| true |
af719b40ed7ecec6e5ac7147ac69553e818221c3 | Python | epcm/2020_Summer_School_Python | /Day3/1714.py | UTF-8 | 277 | 3.421875 | 3 | [] | no_license | a = int(input())
b = int(input())
count = 0
for n in range(a, b + 1):
flag = True
for i in range(2, int(n**0.5) + 1):
if(n % i == 0):
flag = False
break
if(n == 1):
flag = False
if(flag):
count += 1
print(count) | true |
7be6702701d9bee013338d42d15cd3371421bed8 | Python | 1214367903/CubeOnline | /server/util/connector.py | UTF-8 | 950 | 2.625 | 3 | [
"MIT"
] | permissive | """
目前的服务主要是请求本地的一个接口,以及微信获取open_id的接口
因此,使用连接池理论上会有更好的性能
tornado没有自带的连接池,只好上aiohttp了
"""
from typing import Optional
import aiohttp
from config import connection_config
from util import UtilError
_client = None
async def init() -> None:
global _client
if _client is not None:
return
# client对象必须在协程中初始化,否则它会自己new一个loop,最终造成混乱
_client = aiohttp.ClientSession(**connection_config)
def get_client() -> Optional[aiohttp.ClientSession]:
if _client is None:
raise UtilError('the connector has not been initialized')
return _client
if __name__ == '__main__':
import asyncio
async def f() -> None:
await init()
assert isinstance(get_client(), aiohttp.ClientSession)
asyncio.new_event_loop().run_until_complete(f())
| true |
8f21d5248aed6aa4b9e9d8abf8b45afa3cceb6a1 | Python | Roboy/lightskin-python-framework | /LightSkin/Algorithm/RayInfluenceModels/DirectSampledRayGridInfluenceModel.py | UTF-8 | 1,192 | 2.921875 | 3 | [
"BSD-3-Clause"
] | permissive | from functools import lru_cache
from typing import Tuple, List, Dict
import math
from .RayInfluenceModel import RayGridInfluenceModel, Ray
class DirectSampledRayGridInfluenceModel(RayGridInfluenceModel):
"""
Calculates the weights of the grid cells by sampling along the direct path of the ray
and summing up the grid elements hit
"""
sampleDistance = 0.125
@lru_cache(maxsize=512)
def getInfluencesForRay(self, ray: Ray) -> List[Tuple[Tuple[int, int], float]]:
dx = ray.dx
dy = ray.dy
dist = ray.length
dx_step = dx / dist * self.sampleDistance
dy_step = dy / dist * self.sampleDistance
steps = dy / dy_step if dx_step == 0 else dx / dx_step
values: Dict[Tuple[int, int], float] = {}
for i in range(int(steps)):
# find corresponding grid element for this sample
x = ray.start_x + i * dx_step
y = ray.start_y + i * dy_step
coords = self.gridDefinition.getCellAtPoint(x, y)
if coords not in values:
values[coords] = 0.0
values[coords] += self.sampleDistance
return list(values.items())
| true |
b93d4dd085abde060c59570c184aba2816f9f802 | Python | mswift42/project-euler | /euler42.py | UTF-8 | 1,122 | 4.1875 | 4 | [] | no_license |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Coded triangle numbers
Problem 42
The nth term of the sequence of triangle numbers is given by, tn = ½n(n+1); so the first ten triangle numbers are:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a triangle number then we shall call the word a triangle word.
Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?
"""
def get_value(word):
value = 0
for i in word:
value += ord(i) - 64
return value
trianglelist = [int(0.5*i*(i+1)) for i in range(1,10000)]
def iter_words(filename):
with open(filename) as f:
for line in f:
for word in line.split('","'):
yield word
count = 1
for word in iter_words('words.txt'):
if get_value(word) in trianglelist:
count +=1
print count
| true |
ea85abac8cc1f63d7172b6f40ae2b702d49912fd | Python | AnjaliG1999/DSA | /Graphs/pathsBtwVertices.py | UTF-8 | 1,148 | 3.828125 | 4 | [] | no_license | #!usr/bin/env python3
# Graph object creation and methods
class Graph():
def __init__(self, nodes):
self.adjList = {}
self.V = nodes
for v in range(nodes):
self.addVertices(v)
def addEdge(self, v1, v2):
self.adjList[v1].append(v2)
def addVertices(self, v):
self.adjList[v] = []
# Find all the possible paths between the given
# source and destination vertices
def findPaths(src, dest, visited, paths, curr):
visited[src] = True
curr.append(src)
if src == dest:
paths.append(list(curr))
curr.pop()
else:
for v in g.adjList[src]:
if visited[v] == False:
findPaths(v, dest, visited, paths, curr)
curr.pop()
visited[src] = False
g = Graph(4)
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(0, 3)
g.addEdge(2, 0)
g.addEdge(2, 1)
g.addEdge(1, 3)
visited = [False for i in range(4)]
# final list which will contain all possible the paths
paths = []
curr = []
print("Graph:", g.adjList)
findPaths(2, 3, visited, paths, curr)
print("Paths(" + str(len(paths)) + ") :", paths)
| true |
9236349a37c9eab6c22a64c5f43d41e9e484ddce | Python | standrewscollege2018/2020-year-12-python-code-the311thsheep | /bookstore.py | UTF-8 | 4,152 | 4.25 | 4 | [] | no_license | """bookstore."""
"""21/2/2020"""
#asks again if user enters str or
#out of range when asking for int
def get_correct_input(prompt, length = 10000):
while True:
try:
value = int(input(prompt))-1
except ValueError:
print ("sorry i didnt understand that, try entering a number")
else:
if value in range (0, length):
break
else:
print ("that value is not in range")
return value
#prints all titles
def print_titles(index = None):
print("These are our current titles")
#prints all of list
if index == None:
for i in range (0, len(booklist)):
print (i+1, booklist[i])
#prints specific index
else:
for i in range (0, len(booklist)):
print (i+1, booklist[i][index])
#the list of books, their authors, and their prices
# booklist [book][author][price]
booklist = [["Divergent", "Veronica Roth", 20], ["Fairest", "Marissa Meyer", 25], ["Harrry Potter and the Chamber of Secrets", "J.K Rowling", 30]]
#list of login usernames
username_list = ["username"]
#list of login usernames
password_list = ["password"]
#constant that says if logged in
not_logged_in = True
while True:
#login
while not_logged_in == True:
#enter username
user_name = input("enter your username: ")
#username not in list - ask again
if user_name not in username_list:
print ("that is not a valid username")
continue
#ask for password
password = input("enter your password: ")
#password not in password list (user_name index)
if password == password_list[username_list.index(user_name)]:
print ("correct login")
not_logged_in = False
else:
print ("incorrect password")
#tells user options of program
user_input = get_correct_input("\nthis program lets you: \n 1) print all titles \n 2) view specific title \n 3) add book titles to a list \n 4) edit existing titles \n 5) delete titles \n 6) quit the program. \n >>>: ", 6)
#prints all titles
if user_input == 0:
print_titles(0)
#view specifc book
elif user_input == 1:
print_titles(0)
#ask what book they want to view
selected_book = get_correct_input("enter the number of the book you want to view: ", len(booklist))
print (booklist[selected_book][0], "-", booklist[selected_book][1], "- $", booklist[selected_book][2])
#add title to the list
elif user_input == 2:
new_title = input("enter the title of the new book: ")
new_author = input("enter the author of the new books name: ")
new_price = get_correct_input("enter the price of the new book: ")
booklist.append([new_title, new_author, new_price+1])
print (booklist[-1])
#edit title
elif user_input == 3:
print_titles()
#choose book to edit
broken_book = get_correct_input("enter the number of the book you want to edit: " ,len(booklist))
edit_what = get_correct_input("do you want to \n 1) change book name \n 2) change book author \n 3) change book price \n >>>: ", 3)
#change book name
if edit_what == 0:
new_title = input("enter the new title of the book: ")
booklist[broken_book][0] = new_title
#change book author
elif edit_what == 1:
new_author = input("enter the new author of the book: ")
booklist[broken_book][1] = new_author
#change book price
elif edit_what == 2:
new_price = get_correct_input("enter the new price of the book: ")
booklist[broken_book][2] = new_price
print (booklist[broken_book])
#delete
elif user_input == 4:
print_titles(0)
begone_book = get_correct_input("enter the number of the book you want to delete: ", len(booklist))
del booklist[begone_book]
print_titles(0)
#quit
elif user_input == 5:
print ("bye")
break | true |
36e99e64c57ac6fa9ddce35a00ac6f5f538755a5 | Python | ariadnapm/python_level_1 | /python/modulo_exemple.py | UTF-8 | 152 | 2.890625 | 3 | [] | no_license | def check_bank(iban):
bban = iban[4]
verif = int(bban[-2:])
number = int(bban[:-2])
return number % 97 == verif
print(check_bank(""))
| true |
544134f1bda751a86d389d937bcf492592b07d2e | Python | gxyd/competitive-programming | /charging-the-batteries.py | UTF-8 | 211 | 3.015625 | 3 | [] | no_license | #!/usr/bin/python3
n, m, k = map(int, input().split())
X = []
for i in range(m):
x, y = map(int, input().split())
X.append((x, y))
def distance(a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
| true |
b31b611f9d8202a7c1dc119a6d4731de92db577c | Python | achernyshova/Reddit-NLP-Classification | /AWS/reddit_collect.py | UTF-8 | 2,914 | 3.09375 | 3 | [] | no_license | import requests
import time
import pandas as pd
import os.path
def load_posts(posts, direction, limit, url):
headers = {'User-agent': 'Bleep bot 0.1'}
pagingId = None
#create while loop, it'll be work until 'after'/'before' gets None
#it allows me to avoid collecting duplicates
while True:
#setting direction 'after'/'before' equal to none
if pagingId == None:
params = {'limit': limit}
else:
params = {direction: pagingId, 'limit': limit}
#create request
res = requests.get(url, params = params, headers=headers)
#if we don't have errors we collect posts until 'after'/'before' gets None again.
if res.status_code == 200:
the_json = res.json()
posts.extend(the_json['data']['children'])
if the_json['data'][direction] == None:
break;
pagingId = the_json['data'][direction]
#if we get an error break the loop and print code of an error
else:
print(res.status_code)
break
time.sleep(3)
def load_subreddit(name):
posts = [] #create empty list for collecting data
url = 'https://www.reddit.com/r/' + name + '/new/.json' #create url using an argument name
#check if there is a file with posts of the subreddit
#if 'no file' parse all available posts and create new dataframe
if os.path.exists('/home/ubuntu/project/data/'+ name + '.csv') == False:
load_posts(posts, 'after', 100, url)
df = pd.DataFrame([p['data'] for p in posts]).drop_duplicates(subset='name')
#if there is a file
#load file, parse new posts, add new posts to existed posts and delete duplicates
else:
old_posts_df = pd.read_csv('/home/ubuntu/project/data/'+ name + '.csv')
old_posts_df.drop(['Unnamed: 0'], axis=1,inplace=True)
load_posts(posts, 'before', 50, url)
new_posts_df = pd.DataFrame([p['data'] for p in posts]).drop_duplicates(subset='name')
df = pd.concat([old_posts_df,new_posts_df]).drop_duplicates(subset='name')
#save data to csv
df.to_csv('/home/ubuntu/project/data/'+ name + '.csv')
#check how many posts we have
print(name, df.shape)
return (name, df.shape[0])
def save_stat(stats):
f = open('/home/ubuntu/project/data/stat.txt','a+')
f.write('***********' + str(time.ctime()) + os.linesep)
for stat in stats:
name = stat[0]
count = str(stat[1])
f.write(name +', ' + count + os.linesep)
f.close()
sport_topics = ['nba', 'baseball', 'soccer','mls', 'hockey', 'mma', 'boxing', 'FIFA']
other_topics = ['Futurology','AskEngineers','AskReddit','AskScience','History','gameofthrones','gottheories','apple','android','mac','MacSucks','iphone',
'Dogfree','aww','dogs']
stats = []
for x in sport_topics + other_topics:
stats.append(load_subreddit(x))
save_stat(stats)
| true |
850809fa0ddcc6caee55f640b6cd2d414c74ef1d | Python | elmanhamdi/Whitted-Ray-Tracing | /objects/ray.py | UTF-8 | 2,982 | 2.921875 | 3 | [] | no_license | # CENG 488 Assignment7 by
# Elman Hamdi
# 240201036
# June 2021
from utils import *
class Ray:
def __init__(self, start_pos=Pos3d(0, 0, 0, 1), direction=Vec3d(1, 1, 1, 0)):
self.start_pos = start_pos
self.direction = direction
def __str__(self):
return '\nRay Properties+\n' +'start_pos: ' + str(self.start_pos) +'\ndirection: ' +str(self.direction)
@property
def start_pos(self):
return self.__start_pos
@start_pos.setter
def start_pos(self, start_pos):
self.__start_pos = start_pos
@property
def direction(self):
return self.__direction
@direction.setter
def direction(self, direction):
self.__direction = direction
@staticmethod
def calculated_bounced_ray(coming_ray_dir, surface_normal, intersect_pos):
#r=d−2(d⋅n)n
tmp = surface_normal.mul(2*coming_ray_dir.dot_product(surface_normal))
r = coming_ray_dir.sub(tmp)
return Ray(start_pos=intersect_pos, direction=r)
#Formula reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-to-shading/reflection-refraction-fresnel
@staticmethod
def calculate_refract_ray(coming_ray_dir, surface_normal, object_ior, intersect_pos):
i = coming_ray_dir
n = surface_normal
cosi = max(-1, min(1, i.dot_product(n)))
ior_rate = 1/ object_ior
if cosi < 0:
cosi = -cosi
else:
ior_rate = 1/ior_rate
n = n*(-1)
k =1 - (ior_rate**2) * (1- (cosi**2))
t = 0 if k < 0 else (i*ior_rate).add(n*((ior_rate*cosi) - k**(1/2)))
return Ray(start_pos = intersect_pos, direction=t)
#Formula reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/introduction-to-shading/reflection-refraction-fresnel
@staticmethod
def calculate_frasnel(coming_ray_dir, surface_normal, object_ior):
i = coming_ray_dir
n = surface_normal
etai =1
etat = object_ior
cosi = max(-1, min(1, i.dot_product(n)))
#ior_rate = 1/ object_ior
if cosi > 0:
etai = object_ior
etat = 1
sint = (etai/etat) * (max(0, 1 - (cosi**2)))**(1/2)
if sint >= 1:
k = 1
else:
cost = (max(0, 1 - (sint**2)))**(1/2)
cosi = abs(cosi)
Rs = ((etat*cosi) -(etai *cost)) / ((etat*cosi) +(etai *cost))
Rp = ((etai*cosi) -(etat *cost)) / ((etai*cosi) +(etat *cost))
k = ((Rs*Rp) + (Rp*Rp))/2
return k
'''
Vec3f refract(const Vec3f &I, const Vec3f &N, const float &ior)
{
float cosi = clamp(-1, 1, dotProduct(I, N));
float etai = 1, etat = ior;
Vec3f n = N;
if (cosi < 0) { cosi = -cosi; } else { std::swap(etai, etat); n= -N; }
float eta = etai / etat;
float k = 1 - eta * eta * (1 - cosi * cosi);
return k < 0 ? 0 : eta * I + (eta * cosi - sqrtf(k)) * n;
}
''' | true |
817dd1703e6ee27474bb462ed0ee2ed01b472d28 | Python | kishoreio/code-kata | /basics/basics1.py | UTF-8 | 96 | 3.140625 | 3 | [] | no_license | N = int(input())
sum1 = 0
if(N<=100000):
for i in range(1,N+1):
sum1+=i
print(sum1)
| true |
bf3cdd455356d21f00d3b5beb64ff351f65d9435 | Python | ssst0n3/college | /app/models.bak.py | UTF-8 | 6,108 | 2.734375 | 3 | [] | no_license | #-*- coding:utf-8 -*-
import MySQLdb
def mysql_con():
global db, cursor
# 打开数据库连接
db = MySQLdb.connect("localhost","root","","college")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
def mysql_clo():
# 关闭数据库连接
cursor.close()
db.close()
# 为信息公告服务,提供按照类型分类的文章
def init_articles_xxgg():
# global type_init, typeName_init, articles_classfied_by_type_init
# return type_init, typeName_init, articles_classfied_by_type_init
return load_articles_classfied_by_type()
# 为后台管理服务,提供按照日期排序的文章
def init_articles_admin():
# global articles_admin_init, articles_admin_columns
articles_admin_columns = ['id','title','content','author','type']
# return articles_admin_init, articles_admin_columns
return load_articles_order_by_date(), articles_admin_columns
# 为后台管理服务, 提供按照日期排序的用户
def init_users_admin():
# global users_admin_init, users_admin_columns
users_admin_columns = ['id','username','password','email','role']
# return users_admin_init, users_admin_columns
return load_users_order_by_id(), users_admin_columns
# 按照文章类型分类
def load_articles_classfied_by_type():
type = ["bgxx", "bkjxjw", "xsgz", "kyyyjs", "dwhzjl"]
typeName = {"bgxx":"办公信息", "bkjxjw":"本科教学教务", "xsgz":"学生工作", "kyyyjs":"科研与研究生", "dwhzjl":"对外合作交流"}
articles_all_type = {}
for t in type:
articles_all_type[t] = load_articles_by_type(t)
return articles_all_type,type,typeName
# 按照类型查询文章
def load_articles_by_type(type):
mysql_con()
# 使用execute方法执行SQL语句
sql = "SELECT * FROM articles WHERE type='" + type + "' order by id;"
cursor.execute(sql)
# 获取数据库查询信息
results = cursor.fetchall()
mysql_clo()
articles_one_type = []
for row in results:
id = row[0]
title = row[1]
content = row[2]
author = row[3]
type = row[4]
article = {'id':id,'title':title,'content':content,'author':author,'type':type}
articles_one_type.append(article)
return articles_one_type
# 按照时间为序查询文章
def load_articles_order_by_date():
mysql_con()
sql = "SELECT * FROM articles"
cursor.execute(sql)
results = cursor.fetchall()
mysql_clo()
articles = []
for row in results:
id = row[0]
title = row[1]
content = row[2]
author = row[3]
type = row[4]
article = {'id':id,'title':title,'content':content,'author':author,'type':type}
articles.append(article)
return articles
# 按照id查询文章
def load_article_by_id(id):
mysql_con()
sql = "SELECT * FROM articles WHERE id = " + str(id)
cursor.execute(sql)
result = cursor.fetchone()
mysql_clo()
article = []
id = result[0]
title = result[1]
content = result[2]
author = result[3]
type = result[4]
article = {'id':id,'title':title,'content':content,'author':author,'type':type}
return article
# 按照id为序查找所有user
def load_users_order_by_id():
mysql_con()
sql = "SELECT * FROM users order by id;"
cursor.execute(sql)
results = cursor.fetchall()
mysql_clo()
users = []
for row in results:
id = row[0]
username = row[1]
password = row[2]
email = row[3]
role = row[4]
user = {"id":id, "username":username, "password":password, "email":email, "role":role}
users.append(user)
return users
# update更新数据库
def update_table_by_id(tableName, column, edit_data, id):
if column != 'id':
mysql_con()
sql = "UPDATE " + tableName + " SET " + column + "='" + edit_data + "' WHERE id=" + id + ";"
print sql
cursor.execute(sql)
db.commit()
mysql_clo()
return 'success'
else:
return 'id cannot be changed'
# # 根据字段、表名查询
# def load_table_by_column_order_by_column(tableName, column1, data, column2):
# mysql_con()
#
# sql = "SELECT * FROM " + tableName + " WHERE " + column1 + "='" + data + "' order by " + column2 + ";"
# print sql
# cursor.execute(sql)
#
# results = cursor.fetchall()
# mysql_clo()
#
# columns
# data = []
# for row in results:
# id = row[0]
# title = row[1]
# content = row[2]
# author = row[3]
# type = row[4]
# article = {'id':id,'title':title,'content':content,'author':author,'type':type}
# articles.append(article)
#
# mysql_clo()
# return articles
# 查询数据表名
# 数据表变化少,可以固定下来
def get_tableName():
mysql_con()
sql_tableName = "SELECT TABLE_NAME FROM information_schema.tables WHERE TABLE_SCHEMA = 'college';"
cursor.execute(sql1)
results = cursor.fetchall()
mysql_clo()
tableName = []
for row in results:
tableName.append(row)
return tableName
# 查询数据字段名
# 数据字段名变化少,可以固定下来
def get_columns_of_table():
columns_all_tables = {}
mysql_con()
for tableName in tableName_init:
sql = "SELECT COLUMN_NAME FROM information_schema.COLUMNS WHERE table_name = 'usres' AND table_schema = 'college';"
cursor.execute(sql)
results = cursor.fetchall()
columns = []
for row in results:
columns.append(row)
columns_all_tables[tableName] = columns
mysql_clo()
return columns_all_tables
articles_admin_columns = ['id','title','content','author','type']
# 初始化
# type_init, typeName_init, articles_classfied_by_type_init = load_articles_classfied_by_type()
# articles_admin_init = load_articles_order_by_date()
# users_admin_init = load_users_order_by_id()
tableName_init = get_tableName()
if __name__ == '__main__':
print load_articles_all_type()
| true |
cc36525c2a0d0a1af520cbec59a48bc01669219f | Python | mahdeesharef/coding-in-turkey-2021 | /problem-1/problem1-solution-mahdeesharef.py | UTF-8 | 338 | 2.71875 | 3 | [] | no_license | def validateRecipe(fridge, ingredients):
for i in ingredients:
if i not in fridge:
return False
return True
validateRecipe(ingredients = ['tomato', 'onion', 'lettuce'],fridge = ['tomato', 'banana', 'apple', 'onion', 'cucumber'])
| true |
3d04db4f7eef5f2f6a12aee5760ba76d8aa4105d | Python | wh-debug/python | /.history/6_1_20210201203413.py | UTF-8 | 604 | 3.640625 | 4 | [] | no_license | '''
Author: Daylight
Date: 2021-02-01 20:18:57
LastEditTime: 2021-02-01 20:33:54
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \python\6_1.py
'''
#todo 字典(简单的字典)
alien_0 = {'color': 'green', 'point': 5} #! 保存两个键值
alien_1 = {'colors': 'red'} #! 最简单的字典
print(alien_0['color']) #? 输出字典的某个键值的方法
print(alien_0['point'])
#todo 假设你射杀了一个外星人,将返回你取得的分数(访问字典中的值)
new_points = alien_0['point']
print(f"You just earned {new_points} points!")
| true |
9e295b2134c3368301a10af67ef6fd2db69933ed | Python | thierryreding/scripts | /pwm/coverage | UTF-8 | 4,020 | 2.671875 | 3 | [] | no_license | #!/usr/bin/python3
#
# example: ./coverage build/pwm linux/pwm.h
#
import argparse, os, subprocess, sys
class Log:
COLOR_NONE = '\033[0m'
COLOR_RED = '\033[31;1m'
COLOR_GREEN = '\033[32;1m'
COLOR_YELLOW = '\033[33;1m'
COLOR_BLUE = '\033[34;1m'
COLOR_MAGENTA = '\033[35;1m'
def __init__(self, colorize = True):
self.color = Log.COLOR_NONE
self.colorize = colorize
self.stack = []
def push(self, obj, color = None):
if not self.colorize:
color = None
if not color:
return str(obj)
self.stack.append(self.color)
self.color = color
return self.color + str(obj)
def pop(self, obj = None):
if self.colorize:
if self.stack:
self.color = self.stack.pop()
else:
raise Exception('unbalanced Log.pop()')
if obj is None:
return ''
if self.colorize:
return self.color + str(obj)
return str(obj)
def wrap(self, obj, color = None):
if not self.colorize:
color = None
if not color:
return str(obj)
return color + str(obj) + Log.COLOR_NONE
def red(self, obj, push = False):
func = self.push if push else self.wrap
return func(obj, Log.COLOR_RED)
def green(self, obj, push = False):
func = self.push if push else self.wrap
return func(obj, Log.COLOR_GREEN)
def yellow(self, obj, push = False):
func = self.push if push else self.wrap
return func(obj, Log.COLOR_YELLOW)
def blue(self, obj, push = False):
func = self.push if push else self.wrap
return func(obj, Log.COLOR_BLUE)
def magenta(self, obj, push = False):
func = self.push if push else self.wrap
return func(obj, Log.COLOR_MAGENTA)
def find_includers(filenames):
for filename in filenames:
proc = subprocess.run(['cscope', '-L8', filename], capture_output = True)
for line in proc.stdout.decode().splitlines():
source, scope, line, statement = line.split(maxsplit = 3)
base, ext = os.path.splitext(source)
if ext == '.h':
source = os.path.sep.join(source.split(os.path.sep)[1:])
yield from find_includers([source])
else:
yield source
def find_file(base, filename):
path = os.path.join(base, filename)
matches = []
if os.path.exists(path):
return [ path ]
with os.scandir(base) as it:
for entry in it:
# skip "source" symlinks because they lead to circles
if entry.is_symlink() and entry.name == 'source':
continue
if entry.is_dir():
match = find_file(os.path.join(base, entry.name), filename)
if match:
matches.extend(match)
return matches
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--colorize', action = 'store_true')
parser.add_argument('KBUILD_OUTPUT')
parser.add_argument('FILENAME', nargs = '+')
args = parser.parse_args()
log = Log(args.colorize)
# update cscope
print('%s generating cscope database...' % log.yellow('*'), end = '')
sys.stdout.flush()
proc = subprocess.run(['make', 'cscope'], capture_output = True)
if proc.returncode != 0:
print('%s' % log.red('failed'))
else:
print('%s' % log.green('done'))
print('%s finding object files in %s:' % (log.yellow('*'), log.magenta(args.KBUILD_OUTPUT)))
for src in find_includers(args.FILENAME):
print(' %s %s...' % (log.yellow('-'), log.blue(src)), end = '')
sys.stdout.flush()
obj = '%s.o' % os.path.splitext(src)[0]
matches = find_file(args.KBUILD_OUTPUT, obj)
if not matches:
print('%s' % log.red('not found'))
else:
print('%s' % log.green('found'))
| true |
a37dda257988b3bfb55a797c652c13ca8ec93627 | Python | brelsford/topology | /my_graph_helpers.py | UTF-8 | 41,125 | 3.015625 | 3 | [] | no_license | import numpy as np
import shapefile
import math
from collections import defaultdict
import networkx as nx
import random
import itertools
import operator
from scipy.cluster.hierarchy import linkage, dendrogram
import json
# import plotly.plotly as ply
# from plotly.graph_objs import *
import my_graph as mg
""" This file includes a bunch of helper functions for my_graph.py.
There are a bunch of basic spatial geometery functions,
some greedy search probablilty functions,
ways to set up and determine the shortest paths from parcel to a road
the code that exists on optimization problem 2: thinking about how to build in
additional connectivity beyond just minimum access, as well as plotting the
associated matrices
code for creating a mygraph object from a shapefile or a list of myfaces
(used for weak dual calculations)
a couple of test graphs- testGraph, (more or less lollipopo shaped) and
testGraphLattice which is a lattice.
"""
#############################
# BASIC MATH AND GEOMETRY FUNCTIONS
#############################
# myG geometry functions
def distance(mynode0, mynode1):
return np.sqrt(distance_squared(mynode0, mynode1))
def distance_squared(mynode0, mynode1):
return (mynode0.x-mynode1.x)**2+(mynode0.y-mynode1.y)**2
def sq_distance_point_to_segment(target, myedge):
"""returns the square of the minimum distance between mynode
target and myedge. """
n1 = myedge.nodes[0]
n2 = myedge.nodes[1]
if myedge.length == 0:
sq_dist = distance_squared(target, n1)
elif target == n1 or target == n2:
sq_dist = 0
else:
px = float(n2.x - n1.x)
py = float(n2.y - n1.y)
u = float((target.x - n1.x)*px + (target.y - n1.y)*py)/(px*px + py*py)
if u > 1:
u = 1
elif u < 0:
u = 0
x = n1.x + u*px
y = n1.y + u*py
dx = x - target.x
dy = y - target.y
sq_dist = (dx * dx + dy * dy)
return sq_dist
def intersect(e1, e2):
""" returns true if myedges e1 and e2 intersect """
# fails for lines that perfectly overlap.
def ccw(a, b, c):
return (c.y-a.y)*(b.x-a.x) > (b.y-a.y)*(c.x-a.x)
a = e1.nodes[0]
b = e1.nodes[1]
c = e2.nodes[0]
d = e2.nodes[1]
return ccw(a, c, d) != ccw(b, c, d) and ccw(a, b, c) != ccw(a, b, d)
def are_parallel(e1, e2):
""" returns true if myedges e1 and e2 are parallel """
a = e1.nodes[0]
b = e1.nodes[1]
c = e2.nodes[0]
d = e2.nodes[1]
# check if parallel; handling divide by zero errors
if a.x == b.x and c.x == d.x: # check if both segments are flat
parallel = True
# if one is flat and other is not
elif (a.x - b.x)*(c.x - d.x) == 0 and (a.x - b.x) + (c.x - d.x) != 0:
parallel = False
# if neither segment is flat and slopes are equal
elif (a.y-b.y)/(a.x-b.x) == (c.y-d.y)/(c.x-d.x):
parallel = True
# n either segment is flat, slopes are not equal
else:
parallel = False
return parallel
def segment_distance_sq(e1, e2):
"""returns the square of the minimum distance between myedges e1 and e2."""
# check different
if e1 == e2:
sq_distance = 0
# check parallel/colinear:
# lines are not parallel/colinear and intersect
if not are_parallel(e1, e2) and intersect(e1, e2):
sq_distance = 0
# lines don't intersect, aren't parallel
else:
d1 = sq_distance_point_to_segment(e1.nodes[0], e2)
d2 = sq_distance_point_to_segment(e1.nodes[1], e2)
d3 = sq_distance_point_to_segment(e2.nodes[0], e1)
d4 = sq_distance_point_to_segment(e2.nodes[1], e1)
sq_distance = min(d1, d2, d3, d4)
return sq_distance
# vector math
def bisect_angle(a, b, c, epsilon=0.2, radius=1):
""" finds point d such that bd bisects the lines ab and bc."""
ax = a.x - b.x
ay = a.y - b.y
cx = c.x - b.x
cy = c.y - b.y
a1 = mg.MyNode(((ax, ay))/np.linalg.norm((ax, ay)))
c1 = mg.MyNode(((cx, cy))/np.linalg.norm((cx, cy)))
# if vectors are close to parallel, find vector that is perpendicular to ab
# if they are not, then find the vector that bisects a and c
if abs(np.cross(a1.loc, c1.loc)) < 0 + epsilon:
# print("vectors {0}{1} and {1}{2} are close to //)".format(a,b,c)
dx = -ay
dy = ax
else:
dx = (a1.x + c1.x)/2
dy = (a1.y + c1.y)/2
# convert d values into a vector of length radius
dscale = ((dx, dy)/np.linalg.norm((dx, dy)))*radius
myd = mg.MyNode(dscale)
# make d a node in space, not vector around b
d = mg.MyNode((myd.x + b.x, myd.y + b.y))
return d
def find_negative(d, b):
"""finds the vector -d when b is origen """
negx = -1*(d.x - b.x) + b.x
negy = -1*(d.y - b.y) + b.y
dneg = mg.MyNode((negx, negy))
return dneg
# clean up and probability functions
def WeightedPick(d):
"""picks an item out of the dictionary d, with probability proportional to
the value of that item. e.g. in {a:1, b:0.6, c:0.4} selects and returns
"a" 5/10 times, "b" 3/10 times and "c" 2/10 times. """
r = random.uniform(0, sum(d.values()))
s = 0.0
for k, w in d.items():
s += w
if r < s:
return k
return k
def mat_reorder(matrix, order):
"""sorts a square matrix so both rows and columns are
ordered by order. """
Drow = [matrix[i] for i in order]
Dcol = [[r[i] for i in order] for r in Drow]
return Dcol
def myRoll(mylist):
"""rolls a list, putting the last element into the first slot. """
mylist.insert(0, mylist[-1])
del mylist[-1]
return(mylist)
######################
# DUALS HElPER
#######################
def form_equivalence_classes(myG, duals=None, verbose=False):
try:
for f in myG.inner_facelist:
f.even_nodes = {}
f.odd_node = {}
except:
pass
if not duals:
duals = myG.stacked_duals()
depth = 1
result = {}
myG.S1_nodes()
result[depth] = [f for f in myG.inner_facelist if f.odd_node[depth]]
if verbose:
# print("Graph S{} has {} parcels".format(depth, len(result[depth])))
pass
depth += 1
if verbose:
test_interior_is_inner(myG)
while depth < len(duals):
duals, depth, result = myG.formClass(duals, depth, result)
if verbose:
# md = max(result.keys())
# print("Graph S{} has {} parcels".format(md, len(result[md])))
# print("current depth {} just finished".format(depth))
# test_interior_is_inner(myG)
pass
return result, depth
######################
# DEALING WITH PATHS
#######################
def ptup_to_mypath(myG, ptup):
mypath = []
for i in range(1, len(ptup)):
pedge = myG.G[ptup[i-1]][ptup[i]]['myedge']
mypath.append(pedge)
return mypath
def path_length(path):
"""finds the geometric path length for a path that consists of a list of
MyNodes. """
length = 0
for i in range(1, len(path)):
length += distance(path[i-1], path[i])
return length
# def path_length_npy(path):
# xy = np.array([n.x,n.y for n in path])
# return np.linalg.norm(xy[1:] - xy[:-1],2,1).sum()
def shorten_path(ptup):
""" all the paths found in my pathfinding algorithm start at the fake
road side, and go towards the interior of the parcel. This method drops
nodes beginning at the fake road node, until the first and only the
first node is on a road. This gets rid of paths that travel along a
curb before ending."""
while ptup[1].road is True and len(ptup) > 2:
ptup.pop(0)
return ptup
def segment_near_path(myG, segment, pathlist, threshold):
"""returns True if the segment is within (geometric) distance threshold
of all the segments contained in path is stored as a list of nodes that
strung together make up a path.
"""
# assert isinstance(segment, mg.MyEdge)
# pathlist = ptup_to_mypath(path)
for p in pathlist:
sq_distance = segment_distance_sq(p, segment)
if sq_distance < threshold**2:
return True
return False
def _fake_edge(myA, centroid, mynode):
newedge = mg.MyEdge((centroid, mynode))
newedge.length = 0
myA.add_edge(newedge)
def __add_fake_edges(myA, p, roads_only=False):
if roads_only:
[_fake_edge(myA, p.centroid, n) for n in p.nodes if n.road]
else:
[_fake_edge(myA, p.centroid, n) for n in p.nodes]
def shortest_path_setup(myA, p, roads_only=False):
""" sets up graph to be ready to find the shortest path from a
parcel to the road. if roads_only is True, only put fake edges for the
interior parcel to nodes that are already connected to a road. """
fake_interior = p.centroid
__add_fake_edges(myA, p)
fake_road_origin = mg.MyNode((305620, 8022470))
for i in myA.road_nodes:
if len(myA.G.neighbors(i)) > 2:
_fake_edge(myA, fake_road_origin, i)
return fake_interior, fake_road_origin
def shortest_path_p2p(myA, p1, p2):
"""finds the shortest path along fencelines from a given interior parcel
p1 to another parcel p2"""
__add_fake_edges(myA, p1, roads_only=True)
__add_fake_edges(myA, p2, roads_only=True)
path = nx.shortest_path(myA.G, p1.centroid, p2.centroid, "weight")
length = nx.shortest_path_length(myA.G, p1.centroid, p2.centroid, "weight")
myA.G.remove_node(p1.centroid)
myA.G.remove_node(p2.centroid)
return path[1:-1], length
def find_short_paths(myA, parcel, barriers=True, shortest_only=False):
""" finds short paths from an interior parcel,
returns them and stores in parcel.paths """
rb = [n for n in parcel.nodes+parcel.edges if n.road]
if len(rb) > 0:
raise AssertionError("parcel %s is on a road") % (str(parcel))
if barriers:
barrier_edges = [e for e in myA.myedges() if e.barrier]
if len(barrier_edges) > 0:
myA.remove_myedges_from(barrier_edges)
else:
print("no barriers found. Did you expect them?")
# myA.plot_roads(title = "myA no barriers")
interior, road = shortest_path_setup(myA, parcel)
shortest_path = nx.shortest_path(myA.G, road, interior, "weight")
if shortest_only is False:
shortest_path_segments = len(shortest_path)
shortest_path_distance = path_length(shortest_path[1:-1])
all_simple = [shorten_path(p[1:-1]) for p in nx.all_simple_paths(myA.G,
road, interior, cutoff=shortest_path_segments + 2)]
paths = dict((tuple(p), path_length(p)) for p in all_simple
if path_length(p) < shortest_path_distance*2)
if shortest_only is True:
p = shorten_path(shortest_path[1:-1])
paths = {tuple(p): path_length(p)}
myA.G.remove_node(road)
myA.G.remove_node(interior)
if barriers:
for e in barrier_edges:
myA.add_edge(e)
parcel.paths = paths
return paths
def find_short_paths_all_parcels(myA, flist=None, full_path=None,
barriers=True, quiet=False,
shortest_only=False):
""" finds the short paths for all parcels, stored in parcel.paths
default assumes we are calculating from the outside in. If we submit an
flist, find the parcels only for those faces, and (for now) recaluclate
paths for all of those faces.
"""
all_paths = {}
counter = 0
if flist is None:
flist = myA.interior_parcels
for parcel in flist:
# if paths have already been defined for this parcel
# (full path should exist too)
if parcel.paths:
if full_path is None:
raise AssertionError("comparison path is None "
"but parcel has paths")
rb = [n for n in parcel.nodes+parcel.edges if n.road]
if len(rb) > 0:
raise AssertionError("parcel %s is on a road" % (parcel))
needs_update = False
for pathitem in parcel.paths.items():
path = pathitem[0]
mypath = ptup_to_mypath(myA, path)
path_length = pathitem[1]
for e in full_path:
if segment_near_path(myA, e, mypath, path_length):
needs_update = True
# this code would be faster if I could break to
# next parcel if update turned true.
break
if needs_update is True:
paths = find_short_paths(myA, parcel, barriers=barriers,
shortest_only=shortest_only)
counter += 1
all_paths.update(paths)
elif needs_update is False:
paths = parcel.paths
all_paths.update(paths)
# if paths have not been defined for this parcel
else:
paths = find_short_paths(myA, parcel, barriers=barriers,
shortest_only=shortest_only)
counter += 1
all_paths.update(paths)
if quiet is False:
pass
# print("Shortest paths found for {} parcels".format(counter))
return all_paths
def build_path(myG, start, finish):
ptup = nx.shortest_path(myG.G, start, finish, weight="weight")
ptup = shorten_path(ptup)
ptup.reverse()
ptup = shorten_path(ptup)
mypath = ptup_to_mypath(myG, ptup)
for e in mypath:
myG.add_road_segment(e)
return ptup, mypath
#############################################
# PATH SELECTION AND CONSTRUCTION
#############################################
def choose_path(myG, paths, alpha, strict_greedy=False):
""" chooses the path segment, choosing paths of shorter
length more frequently """
if strict_greedy is False:
inv_weight = dict((k, 1.0/(paths[k]**alpha)) for k in paths)
target_path = WeightedPick(inv_weight)
if strict_greedy is True:
target_path = min(paths, key=paths.get)
mypath = ptup_to_mypath(myG, target_path)
return target_path, mypath
# if outsidein:
# result, depth = form_equivalence_classes(myG)
# while len(flist) < 1:
# md = max(result.keys())
# flist = flist + result.pop(md)
# elif outsidein == False:
# flist = myG.interior_parcels
# ## alternate option:
# # result, depth = form_equivalence_classes(myG)
# # flist = result[3]
def build_all_roads(myG, master=None, alpha=8, plot_intermediate=False,
wholepath=True, original_roads=None, plot_original=False,
bisect=False, plot_result=False, barriers=False,
quiet=True, vquiet=True, strict_greedy=False,
outsidein=False):
"""builds roads using the probablistic greedy alg, until all
interior parcels are connected, and returns the total length of
road built. """
if vquiet is True:
quiet = True
if plot_original:
myG.plot_roads(original_roads, update=False,
parcel_labels=False, new_road_color="blue")
shortest_only = False
if strict_greedy is True:
shortest_only = True
added_road_length = 0
# plotnum = 0
if plot_intermediate is True and master is None:
master = myG.copy()
myG.define_interior_parcels()
target_mypath = None
if vquiet is False:
print("Begin w {} Int Parcels".format(len(myG.interior_parcels)))
# before the max depth (md) is calculated, just assume it's very large in
# in order ensure we find the equivalence classes at least once.
md = 100
while myG.interior_parcels:
result, depth = form_equivalence_classes(myG)
# flist from result!
flist = []
if md == 3:
flist = myG.interior_parcels
elif md > 3:
if outsidein is False:
result, depth = form_equivalence_classes(myG)
while len(flist) < 1:
md = max(result.keys())
flist = flist + result.pop(md)
elif outsidein is True:
result, depth = form_equivalence_classes(myG)
md = max(result.keys())
if len(result[md]) == 0:
md = md - 2
flist = list(set(result[3]) - set(result.get(5, [])))
if quiet is False:
pass
# potential segments from parcels in flist
all_paths = find_short_paths_all_parcels(myG, flist, target_mypath,
barriers, quiet=quiet,
shortest_only=shortest_only)
# choose and build one
target_ptup, target_mypath = choose_path(myG, all_paths, alpha,
strict_greedy=strict_greedy)
if wholepath is False:
added_road_length += target_mypath[0].length
myG.add_road_segment(target_mypath[0])
if wholepath is True:
for e in target_mypath:
added_road_length += e.length
myG.add_road_segment(e)
myG.define_interior_parcels()
if plot_intermediate:
myG.plot_roads(master, update=False)
# plt.savefig("Int_Step"+str(plotnum)+".pdf", format='pdf')
remain = len(myG.interior_parcels)
if quiet is False:
print("\n{} interior parcels left".format(remain))
if vquiet is False:
if remain > 300 or remain in [50, 100, 150, 200, 225, 250, 275]:
print ("{} interior parcels left".format(remain))
# update the properties of nodes & edges to reflect new geometry.
myG.added_roads = added_road_length
return added_road_length
def bisecting_road(myG):
# once done getting all interior parcels connected, have option to bisect
bisecting_roads = 0
start, finish = bisecting_path_endpoints(myG)
ptup, myedges = build_path(myG, start, finish)
bisecting_roads = path_length(ptup)
myG.added_roads = myG.added_roads + bisecting_roads
return bisecting_roads
############################
# connectivity optimization
############################
def __road_connections_through_culdesac(myG, threshold=5):
"""connects all nodes on a road that are within threshold = 5 meters of
each other. This means that paths can cross a culdesac instead of needing
to go around. """
etup_drop = []
nlist = [i for i in myG.G.nodes() if i.road is True]
for i, j in itertools.combinations(nlist, 2):
if j in myG.G and i in myG.G:
if distance(i, j) < threshold:
newE = mg.MyEdge((i, j))
newE.road = True
myG.add_edge(newE)
etup_drop.append((i, j))
return etup_drop
def shortest_path_p2p_matrix(myG, full=False, travelcost=False):
"""option if full is false, removes all interior edges, so that travel
occurs only along a road. If full is true, keeps interior edges. If
travel cost is true, increases weight of non-road edges by a factor of ten.
Base case is defaults of false and false."""
copy = myG.copy()
etup_drop = copy.find_interior_edges()
if full is False:
copy.G.remove_edges_from(etup_drop)
# print("dropping {} edges".format(len(etup_drop)))
__road_connections_through_culdesac(copy)
path_mat = []
path_len_mat = []
edges = copy.myedges()
if travelcost is True:
for e in edges:
if e.road is False:
copy.G[e.nodes[0]][e.nodes[1]]['weight'] = e.length*10
for p0 in copy.inner_facelist:
path_vec = []
path_len_vec = []
__add_fake_edges(copy, p0)
for p1 in copy.inner_facelist:
if p0.centroid == p1.centroid:
length = 0
path = p0.centroid
else:
__add_fake_edges(copy, p1)
try:
path = nx.shortest_path(copy.G, p0.centroid, p1.centroid,
"weight")
length = path_length(path[1:-1])
except:
path = []
length = np.nan
copy.G.remove_node(p1.centroid)
path_vec.append(path)
path_len_vec.append(length)
copy.G.remove_node(p0.centroid)
path_mat.append(path_vec)
path_len_mat.append(path_len_vec)
n = len(path_len_mat)
meantravel = (sum([sum(i) for i in path_len_mat])/(n*(n-1)))
return path_len_mat, meantravel
def difference_roads_to_fences(myG, travelcost=False):
fullpath_len, tc = shortest_path_p2p_matrix(myG, full=True)
path_len, tc = shortest_path_p2p_matrix(myG, full=False)
diff = [[path_len[j][i] - fullpath_len[j][i]
for i in range(0, len(fullpath_len[j]))]
for j in range(0, len(fullpath_len))]
dmax = max([max(i) for i in diff])
# print dmax
for j in range(0, len(path_len)):
for i in range(0, len(path_len[j])):
if np.isnan(path_len[j][i]):
diff[j][i] = dmax + 150
# diff[j][i] = np.nan
n = len(path_len)
meantravel = sum([sum(i) for i in path_len])/(n*(n-1))
return diff, fullpath_len, path_len, meantravel
def bisecting_path_endpoints(myG):
roads_only = myG.copy()
etup_drop = roads_only.find_interior_edges()
roads_only.G.remove_edges_from(etup_drop)
__road_connections_through_culdesac(roads_only)
# nodes_drop = [n for n in roads_only.G.nodes() if not n.road]
# roads_only.G.remove_nodes_from(nodes_drop)
distdict = {}
for i, j in itertools.combinations(myG.road_nodes, 2):
geodist_sq = distance_squared(i, j)
onroad_dist = nx.shortest_path_length(roads_only.G, i, j,
weight='weight')
dist_diff = onroad_dist**2/geodist_sq
distdict[(i, j)] = dist_diff
(i, j) = max(distdict.iteritems(), key=operator.itemgetter(1))[0]
return i, j
################
# GRAPH INSTANTIATION
###################
def graphFromMyEdges(elist, name=None):
myG = mg.MyGraph(name=name)
for e in elist:
myG.add_edge(e)
return myG
def graphFromMyFaces(flist, name=None):
myG = mg.MyGraph(name=name)
for f in flist:
for e in f.edges:
myG.add_edge(e)
return myG
def graphFromShapes(shapes, name, rezero=np.array([0, 0])):
nodedict = dict()
plist = []
for s in shapes:
nodes = []
for k in s.points:
k = k - rezero
myN = mg.MyNode(k)
if myN not in nodedict:
nodes.append(myN)
nodedict[myN] = myN
else:
nodes.append(nodedict[myN])
edges = [(nodes[i], nodes[i+1]) for i in range(0, len(nodes)-1)]
plist.append(mg.MyFace(edges))
myG = mg.MyGraph(name=name)
for p in plist:
for e in p.edges:
myG.add_edge(mg.MyEdge(e.nodes))
return myG
def is_roadnode(node, graph):
"""defines a node as a road node if any connected edges are road edges.
returns true or false and updates the properties of the node. """
graph.G[node].keys()
node.road = False
for k in graph.G[node].keys():
edge = graph.G[node][k]['myedge']
if edge.road is True:
node.road = True
return node.road
return node.road
def is_interiornode(node, graph):
"""defines a node as an interior node if any connected edges are interior
edges. returns true or false and updates the properties of the node. """
graph.G[node].keys()
node.interior = False
for k in graph.G[node].keys():
edge = graph.G[node][k]['myedge']
if edge.interior is True:
node.interior = True
return node.interior
return node.interior
def is_barriernode(node, graph):
"""defines a node as a road node if any connected edges are barrier edges.
returns true or false and updates the properties of the node. """
graph.G[node].keys()
node.barrier = False
for k in graph.G[node].keys():
edge = graph.G[node][k]['myedge']
if edge.barrier is True:
node.barrier = True
return node.barrier
return node.barrier
def graphFromJSON(jsonobj):
"""returns a new mygraph from a json object. calculates interior node
and graph properties from the properties of the edges.
"""
edgelist = []
# read all the edges from json
for feature in jsonobj['features']:
# check that there are exactly 2 nodes
numnodes = len(feature['geometry']['coordinates'])
if numnodes != 2:
raise AssertionError("JSON line feature has {} "
"coordinates instead of 2".format(numnodes))
c0 = feature['geometry']['coordinates'][0]
c1 = feature['geometry']['coordinates'][1]
isinterior = feature['properties']['interior']
isroad = feature['properties']['road']
isbarrier = feature['properties']['barrier']
n0 = mg.MyNode(c0)
n1 = mg.MyNode(c1)
edge = mg.MyEdge((n0, n1))
edge.road = json.loads(isroad)
edge.interior = json.loads(isinterior)
edge.barrier = json.loads(isbarrier)
edgelist.append(edge)
# create a new graph from the edge list, and calculate
# necessary graph properties from the road
new = graphFromMyEdges(edgelist)
new.road_edges = [e for e in new.myedges() if e.road]
new.road_nodes = [n for n in new.G.nodes() if is_roadnode(n, new)]
new.interior_nodes = [n for n in new.G.nodes() if is_interiornode(n, new)]
new.barrier_nodes = [n for n in new.G.nodes() if is_barriernode(n, new)]
# defines all the faces in the graph
new.inner_facelist
# defines all the faces with no road nodes in the graph as interior parcels
new.define_interior_parcels()
return new, edgelist
####################
# PLOTTING FUNCTIONS
####################
# ==============================================================================
# def plot_cluster_mat(clustering_data, plotting_data, title, dmax,
# plot_dendro=True):
# """from http://nbviewer.ipython.org/github/OxanaSachenkova/
# hclust-python/blob/master/hclust.ipynb First input matrix is used to
# define clustering order, second is the data that is plotted."""
#
# fig = plt.figure(figsize=(8, 8))
# # x ywidth height
#
# ax1 = fig.add_axes([0.05, 0.1, 0.2, 0.6])
# Y = linkage(clustering_data, method='single')
# Z1 = dendrogram(Y, orientation='right') # adding/removing the axes
# ax1.set_xticks([])
# # ax1.set_yticks([])
#
# # Compute and plot second dendrogram.
# ax2 = fig.add_axes([0.3, 0.75, 0.6, 0.1])
# Z2 = dendrogram(Y)
# # ax2.set_xticks([])
# ax2.set_yticks([])
#
# # set up custom color map
# c = mcolors.ColorConverter().to_rgb
# seq = [c('navy'), c('mediumblue'), .1, c('mediumblue'),
# c('darkcyan'), .2, c('darkcyan'), c('darkgreen'), .3,
# c('darkgreen'), c('lawngreen'), .4,c('lawngreen'),c('yellow'),.5,
# c('yellow'), c('orange'), .7, c('orange'), c('red')]
# custommap = make_colormap(seq)
#
# # Compute and plot the heatmap
# axmatrix = fig.add_axes([0.3, 0.1, 0.6, 0.6])
#
# if not plot_dendro:
# fig = plt.figure(figsize=(8, 8))
# axmatrix = fig.add_axes([0.05, 0.1, 0.85, 0.8])
#
# idx1 = Z1['leaves']
# D = mat_reorder(plotting_data, idx1)
# im = axmatrix.matshow(D, aspect='auto', origin='lower', cmap=custommap,
# vmin=0, vmax=dmax)
# axmatrix.set_xticks([])
# axmatrix.set_yticks([])
#
# # Plot colorbar.
# h = 0.6
# if not plot_dendro:
# h = 0.8
# axcolor = fig.add_axes([0.91, 0.1, 0.02, h])
# plt.colorbar(im, cax=axcolor)
# ax2.set_title(title)
# if not plot_dendro:
# axmatrix.set_title(title)
#
#
# def make_colormap(seq):
# """Return a LinearSegmentedColormap
# seq: a sequence of floats and RGB-tuples. The floats should be increasing
# and in the interval (0,1).
# """
# seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
# cdict = {'red': [], 'green': [], 'blue': []}
# for i, item in enumerate(seq):
# if isinstance(item, float):
# r1, g1, b1 = seq[i - 1]
# r2, g2, b2 = seq[i + 1]
# cdict['red'].append([item, r1, r2])
# cdict['green'].append([item, g1, g2])
# cdict['blue'].append([item, b1, b2])
# return mcolors.LinearSegmentedColormap('CustomMap', cdict)
# ==============================================================================
# ==============================================================================
# def plotly_traces(myG):
# """myGraph to plotly trace """
#
# # add the edges as disconnected lines in a trace
# edge_trace = Scatter(x=[], y=[], mode='lines',
# name='Parcel Boundaries',
# line=Line(color='grey', width=0.5))
# road_trace = Scatter(x=[], y=[], mode='lines',
# name='Road Boundaries',
# line=Line(color='black', width=2))
# interior_trace = Scatter(x=[], y=[], mode='lines',
# name='Interior Parcels',
# line=Line(color='red', width=2.5))
# barrier_trace = Scatter(x=[], y=[], mode='lines',
# name='Barriers',
# line=Line(color='green', width=0.75))
#
# for i in myG.connected_components():
# for edge in i.myedges():
# x0, y0 = edge.nodes[0].loc
# x1, y1 = edge.nodes[1].loc
# edge_trace['x'] += [x0, x1, None]
# edge_trace['y'] += [y0, y1, None]
# if edge.road:
# road_trace['x'] += [x0, x1, None]
# road_trace['y'] += [y0, y1, None]
# if edge.interior:
# interior_trace['x'] += [x0, x1, None]
# interior_trace['y'] += [y0, y1, None]
# if edge.barrier:
# barrier_trace['x'] += [x0, x1, None]
# barrier_trace['y'] += [y0, y1, None]
#
# return [edge_trace, road_trace, interior_trace, barrier_trace]
#
#
# def plotly_graph(traces, filename=None, title=None):
#
# """ use ply.iplot(fig,filename) after this function in ipython notebok to
# show the resulting plotly figure inline, or url=ply.plot(fig,filename) to
# just get url of resulting fig and not plot inline. """
#
# if filename is None:
# filename = "plotly_graph"
# fig = Figure(data=Data(traces),
# layout=Layout(title=title, plot_bgcolor="rgb(217,217,217)",
# showlegend=True,
# xaxis=XAxis(showgrid=False, zeroline=False,
# showticklabels=False),
# yaxis=YAxis(showgrid=False, zeroline=False,
# showticklabels=False)))
#
# # ply.iplot(fig, filename=filename)
# # py.iplot(fig, filename=filename)
#
# return fig, filename
#
# ==============================================================================
######################
# IMPORT & Running FUNCTIONS #
#####################
def import_and_setup(filename, threshold=1, component=None,
rezero=np.array([0, 0]), byblock=True, name=""):
""" threshold defines the minimum distance (in map units) between two nodes
before they are combined into a single node during the clean up phase. This
helps to handle poorly written polygon geometery.
Component is an option that lets you return a single block (they're ordered
by number of nodes, where 0 is the largest) instead of all of the blocks in
the map.
byblock = True runs the clean up geometery procedure on each original
block individually, rather than all the blocks together. This makes the
clean up process a lot faster for large numbers of blocks, but if there are
pieces of a block that are supposed to be connected, but are not in the
original map.
"""
# plt.close('all')
# check that rezero is an array of len(2)
# check that threshold is a float
sf = shapefile.Reader(filename)
myG1 = graphFromShapes(sf.shapes(), name, rezero)
print("shape file loaded")
myG1 = myG1.clean_up_geometry(threshold, byblock)
print("geometery cleaned up")
xmin = min([n.x for n in myG1.G.nodes()])
ymin = min([n.y for n in myG1.G.nodes()])
rezero_vector = np.array([xmin, ymin])
myG2 = rescale_mygraph(myG1, rezero=rezero_vector)
myG2.rezero_vector = rezero_vector
if component is None:
return myG2
else:
return myG2.connected_components()[component]
def rescale_mygraph(myG, rezero=np.array([0, 0]), rescale=np.array([1, 1])):
"""returns a new graph (with no interior properties defined), rescaled under
a linear function newloc = (oldloc-rezero)*rescale where all of those are
(x,y) numpy arrays. Default of rezero = (0,0) and rescale = (1,1) means
the locations of nodes in the new and old graph are the same.
"""
scaleG = mg.MyGraph()
for e in myG.myedges():
n0 = e.nodes[0]
n1 = e.nodes[1]
nn0 = mg.MyNode((n0.loc-rezero)*rescale)
nn1 = mg.MyNode((n1.loc-rezero)*rescale)
scaleG.add_edge(mg.MyEdge((nn0, nn1)))
return scaleG
def build_barriers(barriers):
for b in barriers:
b.barrier = True
b.road = False
for n in b.nodes:
n.barrier = True
n.road = False
####################
# Testing functions
###################
def test_edges_equality():
"""checks that myGraph points to myEdges correctly """
testG = testGraph()
testG.trace_faces()
outerE = list(testG.outerface.edges)[0]
return outerE is testG.G[outerE.nodes[0]][outerE.nodes[1]]['myedge']
def test_dual(myG):
""" plots the weak duals based on testGraph"""
S0 = myG.weak_dual()
myG.plot_roads(update=False)
S0.plot(node_color='g', edge_color='g', width=3)
def test_nodes(n1, n2):
""" returns true if two nodes are evaluated as the same"""
eq_num = len(set(n1).intersection(set(n2)))
is_num = len(set([id(n) for n in n1])
.intersection(set([id(n) for n in n2])))
print("is eq? ", eq_num, "is is? ", is_num)
def test_interior_is_inner(myG):
myG.inner_facelist
myG.interior_parcels
in0 = myG.interior_parcels[0]
ans = in0 in myG.inner_facelist
# print("interior in inner_facelist is {}".format(ans))
return ans
def testGraph():
n = {}
n[1] = mg.MyNode((0, 0))
n[2] = mg.MyNode((0, 1))
n[3] = mg.MyNode((0, 2))
n[4] = mg.MyNode((0, 3))
n[5] = mg.MyNode((1, 2))
n[6] = mg.MyNode((1, 3))
n[7] = mg.MyNode((0, 4))
n[8] = mg.MyNode((-1, 4))
n[9] = mg.MyNode((-1, 3))
n[10] = mg.MyNode((-1, 2))
n[11] = mg.MyNode((1, 4))
n[12] = mg.MyNode((-2, 3))
lat = mg.MyGraph(name="S0")
lat.add_edge(mg.MyEdge((n[1], n[2])))
lat.add_edge(mg.MyEdge((n[2], n[3])))
lat.add_edge(mg.MyEdge((n[2], n[5])))
lat.add_edge(mg.MyEdge((n[3], n[4])))
lat.add_edge(mg.MyEdge((n[3], n[5])))
lat.add_edge(mg.MyEdge((n[3], n[9])))
lat.add_edge(mg.MyEdge((n[4], n[5])))
lat.add_edge(mg.MyEdge((n[4], n[6])))
lat.add_edge(mg.MyEdge((n[4], n[7])))
lat.add_edge(mg.MyEdge((n[4], n[8])))
lat.add_edge(mg.MyEdge((n[4], n[9])))
lat.add_edge(mg.MyEdge((n[5], n[6])))
lat.add_edge(mg.MyEdge((n[6], n[7])))
lat.add_edge(mg.MyEdge((n[7], n[8])))
lat.add_edge(mg.MyEdge((n[8], n[9])))
lat.add_edge(mg.MyEdge((n[9], n[10])))
lat.add_edge(mg.MyEdge((n[3], n[10])))
lat.add_edge(mg.MyEdge((n[2], n[10])))
lat.add_edge(mg.MyEdge((n[7], n[11])))
lat.add_edge(mg.MyEdge((n[6], n[11])))
lat.add_edge(mg.MyEdge((n[10], n[12])))
lat.add_edge(mg.MyEdge((n[8], n[12])))
return lat
def testGraphLattice(n, xshift=0, yshift=0, scale=1):
"""returns a square lattice of dimension nxn """
nodelist = {}
for j in range(0, n**2):
x = (math.fmod(j, n))*scale + xshift
y = (math.floor(j/n))*scale + yshift
nodelist[j] = mg.MyNode((x, y))
edgelist = defaultdict(list)
for i in nodelist.keys():
ni = nodelist[i]
for j in nodelist.keys():
nj = nodelist[j]
if ni != nj:
if distance(ni, nj) == scale:
edgelist[ni].append(nj)
myedgelist = []
for n1 in edgelist.keys():
n2s = edgelist[n1]
for n2 in n2s:
myedgelist.append(mg.MyEdge((n1, n2)))
lattice = graphFromMyEdges(myedgelist)
lattice.name = "lattice"
return lattice
def testGraphEquality():
n = {}
n[1] = mg.MyNode((0, 0))
n[2] = mg.MyNode((0, 1))
n[3] = mg.MyNode((1, 1))
n[4] = mg.MyNode((1, 0))
n[5] = mg.MyNode((0, 0)) # actually equal
n[6] = mg.MyNode((0.0001, 0.0001)) # within rounding
n[7] = mg.MyNode((0.1, 0.1)) # within threshold
n[8] = mg.MyNode((0.3, 0.3)) # actually different
G = mg.MyGraph(name="S0")
G.add_edge(mg.MyEdge((n[1], n[2])))
G.add_edge(mg.MyEdge((n[2], n[3])))
G.add_edge(mg.MyEdge((n[3], n[4])))
G.add_edge(mg.MyEdge((n[4], n[5])))
G.add_edge(mg.MyEdge((n[5], n[6])))
G.add_edge(mg.MyEdge((n[6], n[7])))
G.add_edge(mg.MyEdge((n[7], n[8])))
return G, n
def json_test(test_geojson):
""" If the good geoJSON request does not show an OK status message, the
validation server is down. """
validate_endpoint = 'http://geojsonlint.com/validate'
good_geojson = '{"type": "Point", "coordinates": [-100, 80]}'
good_request = requests.post(validate_endpoint, data=good_geojson)
test_request = requests.post(validate_endpoint, data=test_geojson)
print("hard coded good geoJSON:")
print(good_request.json())
print("status for test geojson:")
print(test_request.json())
def __centroid_test():
n = {}
n[1] = mg.MyNode((0, 0))
n[2] = mg.MyNode((0, 1))
n[3] = mg.MyNode((1, 1))
n[4] = mg.MyNode((1, 0))
n[5] = mg.MyNode((0.55, 0))
n[6] = mg.MyNode((0.5, 0.9))
n[7] = mg.MyNode((0.45, 0))
n[8] = mg.MyNode((0.4, 0))
n[9] = mg.MyNode((0.35, 0))
n[10] = mg.MyNode((0.3, 0))
n[11] = mg.MyNode((0.25, 0))
nodeorder = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1]
nodetups = [(n[nodeorder[i]], n[nodeorder[i+1]])
for i in range(0, len(nodeorder)-1)]
edgelist = [mg.MyEdge(i) for i in nodetups]
f1 = mg.MyFace(nodetups)
S0 = graphFromMyFaces([f1])
S0.define_roads()
S0.define_interior_parcels()
S0.plot_roads(parcel_labels=True)
return S0, f1, n, edgelist
def testmat():
testmat = []
dim = 4
for i in range(0, dim):
k = []
for j in range(0, dim):
k.append((i-j)*(i-j))
testmat.append(k)
return testmat
def build_lattice_barrier(myG):
edgesub = [e for e in myG.myedges()
if e.nodes[0].y == 0 and e.nodes[1].y == 0]
# barrieredges = [e for e in edgesub if e.nodes[1].y == 0]
for e in edgesub:
myG.remove_road_segment(e)
e.barrier = True
myG.define_interior_parcels()
return myG, edgesub
if __name__ == "__main__":
master = testGraphLattice(7)
master.name = "Lat_0"
master.define_roads()
master.define_interior_parcels()
# S0, barrier_edges = build_lattice_barrier(S0)
# barGraph = graphFromMyEdges(barrier_edges)
S0 = master.copy()
# S0.plot_roads(master, update=False, new_plot=True)
test_dual(S0)
S0 = master.copy()
new_roads_i = build_all_roads(S0, master, alpha=2, wholepath=True,
barriers=False, plot_intermediate=False,
strict_greedy=True, vquiet=True,
outsidein=True)
S0.plot_roads()
print("outside to in" + str(new_roads_i))
S0 = master.copy()
new_roads_i = build_all_roads(S0, master, alpha=2, wholepath=True,
barriers=False, plot_intermediate=True,
strict_greedy=True, vquiet=True,
outsidein=False)
S0.plot_roads()
print("inside out" + str(new_roads_i))
| true |
d79efd6e55fb9c4d6f50132cf671ddf0456eaa60 | Python | toanloi2569/short_code | /regex.py | UTF-8 | 382 | 3.140625 | 3 | [] | no_license | import re
partern = r'^[0-9a-z]{8}$'
string = 'ms201669'
# re.compile(pattern, flags=0)
# Biên dịch 1 regex expression thành 1 regex expression object. Từ đó có thể dùng các hàm match(), search()
prog = re.compile(pattern)
result = prog.match(string)
#re.finditer(pattern, string, flags=0)
# Trả về iterator match objects
matchs = re.finditer(pattern, string)
| true |
acb5e6d432ea256c1836198856f77564dee26278 | Python | mavb86/ejercicios-python | /seccion4/bucles/ejercicio12.py | UTF-8 | 430 | 4.09375 | 4 | [] | no_license | # Ejercicio 12
#
# Realizar un algoritmo para determinar cuánto ahorrará una persona en un año, si al final de cada mes
# deposita cantidades variables de dinero; además, se quiere saber cuánto lleva ahorrado cada mes.
ahorro_acum = 0
for mes in range(1,13):
ahorro_mes = float(input("¿Cuánto has ahorrado en el mes %d ?:" % mes))
ahorro_acum += ahorro_mes
print("Actualmente, llevas ahorrado: ", ahorro_acum)
| true |
f03c880a5c1db9ed8ed949e4411924c6f400e4b0 | Python | tanxiumei/interfaceWushui | /temp.py | UTF-8 | 1,527 | 2.71875 | 3 | [] | no_license | import os
import requests
# 定义一个common的类,它的父类是object
class Common(object):
# common的构造函数
def __init__(self):
# 被测系统的根路由
self.url_root = 'http://127.0.0.1:12356'
# 封装你自己的get请求,uri是访问路由,params是get请求的参数,如果没有默认为空
def get(self, uri, params=''):
# 拼凑访问地址
url = self.url_root + uri + params
# 通过get请求访问对应地址
res = requests.get(url)
# 返回request的Response结果,类型为requests的Response类型
return res
# 封装你自己的post方法,uri是访问路由,params是post请求需要传递的参数,如果没有参数这里为空
def post(self, uri, params=''):
# 拼凑访问地址
url = self.url_root + uri
if len(params) > 0:
# 如果有参数,那么通过post方式访问对应的url,并将参数赋值给requests.post默认参数data
# 返回request的Response结果,类型为requests的Response类型
res = requests.post(url, data=params)
else:
# 如果无参数,访问方式如下
# 返回request的Response结果,类型为requests的Response类型
res = requests.post(url)
return res
import logging
filename1 = os.path.join(os.getcwd()) + '\log.txt'
logging.basicConfig(filename=filename1, level=logging.DEBUG)
logging.debug('this is a message1111')
| true |
1c923fa07b48b9f998dee64a76d61ba6728cab83 | Python | fdavis/learning-python | /algo-data-structures/maze/lib/car.py | UTF-8 | 551 | 3.296875 | 3 | [] | no_license | #!/usr/bin/python
# coding=utf-8
import util
import constants as C
class car:
def __init__(self, pos = None, dir = None):
if pos:
self.position = util.coordinate(pos)
else:
self.position = C.DEFAULT_POSITION
if dir:
self.direction = dir
else:
self.direction = C.DEFAULT_DIRECTION
self.touching = None
def move(squares = 1):
self.position += C.DIRECTION_POSITION_MAP(self.direction) * squares
def __repr__(self):
return "Pos: " + str(self.position) + " Dir:" + str(self.direction)
| true |
7d3f39b643d555f7586f406cad82b91bd87469f3 | Python | Ivanovich64/PyPlayground | /PySweeper/main.py | UTF-8 | 608 | 2.59375 | 3 | [] | no_license | # # # # # # #
# # # # # # #
# # PySweeper # #
# # # # # # #
# # # # # # #
# by: PieChai
# Python 3.8.2
# Enjoy
from matrixFunc import *
from validations import *
# Welcome Screen
print(" Welcome to PySweeper ".center(35,"~")+"\n Select your difficulty:\n (1) Crybaby\n (2) n00b\n (3) Yeah, I lift bro.\n (4) I've installed Gentoo one-handed and only half screen working.\n (5) Whatever.\n (10000) Surprise Me. \n (0) wtf is this, I wanna get out!")
#map=makeMtrx(validateDiff(input("\n\n> ")))
showMap(makeMtrx(takeDiff(validateDiff(input("\n\n > ")))))
#print(map)
| true |
85ef82e5a1b47cf007abecaf3e642f395b913feb | Python | goo-goo-goo-joob/Catalan-Number | /Calculate/management/commands/secret.py | UTF-8 | 618 | 2.671875 | 3 | [] | no_license | from os import urandom
from django.core.management.base import BaseCommand
alphabet = r"0123456789qwertyuiopasdfghjklzxcvbnm[];',./{}:>?<!@#$%^&*()_+=-"
def get_int():
n = 0
for _ in range(4):
n = (n << 2) + ord(urandom(1))
return n
class Command(BaseCommand):
help = 'Init secret file'
requires_migrations_checks = False
requires_system_checks = False
def handle(self, *args, **options):
with open("secret.txt", "w") as f:
for i in range(100):
f.write(alphabet[get_int() % len(alphabet)])
self.stdout.write('Created successfully')
| true |
3c4df60cf32d304d25bbc60bb0f13819d3511cd4 | Python | jahona/pdf_encrypt | /encrypt.py | UTF-8 | 634 | 2.921875 | 3 | [] | no_license | import PyPDF2
import os
class Encrypt():
def __init__(self):
super().__init__()
@staticmethod
def do(path, password):
print(path, password)
pdfFile = open(path, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFile)
pdfWriter = PyPDF2.PdfFileWriter()
for pageNum in range(pdfReader.numPages):
pdfWriter.addPage(pdfReader.getPage(pageNum))
pdfWriter.encrypt(password)
newFileName = os.path.basename(path)
pdfFile_new = open(newFileName, 'wb')
pdfWriter.write(pdfFile_new)
pdfFile_new.close()
pdfFile.close()
| true |
7d7b3f0f5ec4dcfeb5b29cbfe641516fd9c46943 | Python | ccdle12/RSA-Example | /python/rsa.py | UTF-8 | 5,934 | 3.953125 | 4 | [] | no_license | import unittest
import math
class RSA:
def __init__(self, p, q, e):
# Calcualte N, half of the public key.
# p and q are prime numbers. N is the product.
self.P = p
self.Q = q
self.N = self.P * self.Q
# Set the variable e, the other half of the public key, relatively
# prime to the totient of pq.
# TODO: (ccdle12) calculate this correctly.
self.E = e
# Calculate the totient of N.
self.totient_N = self.calculate_totient_N(self.P, self.Q)
# Calculate the private key = D.
self.D = self.calculate_private_key(self.E, self.totient_N)
# Calculates totient(N) = the amount of numbers shared that have the
# gcd(p, q) = 1.
def calculate_totient_N(self, p, q):
return (p-1)*(q-1)
# Calculate the private key = D.
def calculate_private_key(self, e, totient):
# Brute force example.
# return self.priv_key_brute_force(e, totient)
return self.modinv(e, totient)
# Function implementing extended
# euclidean algorithm
def ecd_recursive(self, e, phi):
# Base case, once e is 0, we have found the gcd.
if e == 0:
return (phi, 0, 1)
else:
g, y, x = self.ecd_recurisve(phi % e, e)
# We are returning = (gcd, bezout_X, bezout_Y).
return (g, x - (phi // e) * y, y)
# Iterative implementation of the extended euclidean algorithm.
def ecd_iterative(self, e, phi):
# Initialise the bezouts coefficients and (u,v) to help calculate
# (bezout_X, bezout_Y).
bezout_X, bezout_Y = 0,1
u,v = 1,0
# Assign the args to a and b.
a = e
b = phi
# Work our way down, until a is 0, meaning the previous iteration is
# the gcd(a,b).
while a != 0:
# Calculate the quotient and remainder.
quotient, remainder = b // a, b % a
# Calculate m and n. They will be used to assign the values to u,v,
# which will be used in the next round for calculating (x,y) the
# bezouts coefficients.
m, n = bezout_X - u * quotient, bezout_Y - v * quotient
# Shift the values.
b, a = a, remainder
bezout_X, bezout_Y = u, v
u, v = m, n
# Let's make it more obvious we are returning the gcd.
gcd = b
return b, bezout_X, bezout_Y
# Function to compute the modular inverse
def modinv(self, e, phi):
print("ENTRY POINT:E: {} | PHI: {}".format(e, phi))
g, x, y = self.ecd_iterative(e, phi)
print("x returned: {}".format(x % phi))
return x % phi
# Calculate private key using brute force.
def priv_key_brute_force(self, e, totient):
# Brute force solution.
# Time is O(N-1)
for i in range(self.N - 1):
d = (self.E * i) % totient
if d == 1:
return i
return None
# Encrypt a message space and return it as a cipher text.
def encrypt(self, m):
return m**self.E % self.N
# Decrypt a cipher text and return the original message space or in other
# words, reverse the transformation according to the bijection from
# f: M->C.
def decrypt(self, c):
return c**self.D % self.N
class RSATest(unittest.TestCase):
# Test that we can initilize an instance of RSA.
def test_init(self):
rsa = RSA(2, 3, 1)
self.assertIsNotNone(rsa)
# Test the member variables are correct.
def test_member_variables(self):
rsa = RSA(13, 19, 3)
self.assertEqual(13, rsa.P)
self.assertEqual(13*19, rsa.N)
# Test that we can pass the 'e', the other half of the public key. 'e' is
# relatively prime to totient(pq). In this case we will just pass 3.
def test_passing_e(self):
rsa = RSA(11, 17, 3)
self.assertEqual(3, rsa.E)
# Test that we can calculate the totient_N, which is used in finding 'd',
# the private key and multiplicative inverse of e.
# We are passing 17 as e, since it is between `1 to n` and is coprime to `n`.
def test_totient_N(self):
rsa = RSA(3, 11, 17)
self.assertEqual(20, rsa.totient_N)
# Test that we can generate the private key `d`.
def test_private_key_gen_0(self):
# N = p * q = 14
# totient_N = 6
# Public Key: e = 5, n = 14
# Private Key: d = 5, n = 14
# e MUST BE...
# * 1 < e < totient(N)
# * e = coprime with N
# e = 5
rsa = RSA(p=2, q=7, e=5)
self.assertEqual(5, rsa.D)
# Test that we can generate the private key `d`.
def test_private_key_gen_1(self):
# N = p * q = 33
# totient_N = 20
# Public Key: e = 17, n = 20
# Private Key: d = 13, n = 20
# e MUST BE...
# * 1 < e < totient(N)
# * e = coprime with N
# e = 13
rsa = RSA(p=3, q=11, e=17)
self.assertEqual(1, rsa.E * rsa.D % rsa.totient_N)
self.assertEqual(13, rsa.D)
# Test that we can encrypt a message.
def test_encryption(self):
rsa = RSA(p=3, q=11, e=17)
# The message space for encryption.
msg = 9
# Ee(m) = encrytion transformation.
# c = cipher text.
c = rsa.encrypt(msg)
self.assertEqual(c, 15)
# Test that we can decrypt a message.
def test_decrypt(self):
# The cipher text can ONLY be decrypted using the private-key `d`.
rsa = RSA(p=3, q=11, e=17)
# The message space for encryption.
msg = 9
# Ee(m) = encrytion transformation.
# c = cipher text.
c = rsa.encrypt(msg)
# m = message space.
m = rsa.decrypt(c)
self.assertEqual(m, msg)
if __name__ == "__main__":
unittest.main()
| true |
03cade6dd43daccc735ea36a362c275f706471fc | Python | chenhh/Uva | /uva_10100_string.py | UTF-8 | 1,800 | 3.796875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Authors: Hung-Hsin Chen <chenhh@par.cse.nsysu.edu.tw>
License: GPL v2
status: AC
difficulty: 2
https://uva.onlinejudge.org/external/101/10100.pdf
longest common sub-sequence of words.
Note: the word is case-sensitive.
"""
import sys
def lcs(line1, line2):
"""
line1: list of words, the first word is empty.
line2: list of words, the first word is empty.
"""
len1 = len(line1)
len2 = len(line2)
# n_row: len2, n_col: len1
length = [[0] * len2 for _ in range(len1)]
for idx in range(1, len1):
for jdx in range(1, len2):
if line1[idx] == line2[jdx]:
length[idx][jdx] = length[idx - 1][jdx - 1] + 1
else:
length[idx][jdx] = max(length[idx - 1][jdx],
length[idx][jdx - 1])
return length[len1 - 1][len2 - 1]
def parse(data):
""" a word contains only alphabet """
if not data:
return None
words = ["", ]
stack = []
for c in data:
if c.isalnum():
stack.append(c)
else:
if stack:
words.append("".join(stack))
stack.clear()
if stack:
words.append("".join(stack))
stack.clear()
return words
def main():
recs = iter(sys.stdin.readlines())
case = 0
while True:
try:
case += 1
line1 = parse(next(recs).strip())
line2 = parse(next(recs).strip())
if line1 is None or line2 is None:
print("{:>2d}. Blank!".format(case))
continue
length = lcs(line1, line2)
print("{:>2d}. Length of longest match: {}".format(case, length))
except (StopIteration):
break
if __name__ == '__main__':
main()
| true |
a53f9d4513f9fe05c83ef132b8ebe111005c636f | Python | AZAZAZAZ1/first_re-pository | /vedio-69.py | UTF-8 | 6,307 | 3.15625 | 3 | [] | no_license |
import random
class Coin:
def __init__(self,rare=False,clean=True,heads=True,**kwargs):#Kwargs is to back in dictionary the upacked ** data
for key,value in kwargs.items():
setattr(self,key,value)
self.is_rare=rare
self.is_clean=clean
self.heads=heads
if self.is_rare:
self.value=self.original_value*1.25
else:
self.value=self.original_value
if self.is_clean:
self.colour=self.clean_colour
else:
self.colour=self.rusty_colour
def rust(self):
self.colour=self.rusty_colour
def clean(self):
self.colour=self.clean_colour
def __del__(self):#destructor
print('coin spent')
def flip(self):
heads_options=[True,False]
choice=random.choice(heads_options)
def __str__(self):
if self.orginal_value>=1:
return"{} coin".format(int(self,original_value))
else:
return"{}p coin".format(int(self.original_value*100))
#==================================================
class One_pence(Coin):
def __init__(self):
data={
"original_value":0.01,
"clean_colour":'bronze' ,
"rusty_colour":'brownesh',
"num_edges":1,
"diameter":20.3,
"thickness":1.52,
"mass":3.56
}
super().__init__(**data)
#=================================================
class Two_pence(Coin):
def __init__(self):
data={
"original_value":0.02,
"clean_colour":'bronze' ,
"rusty_colour":'brownesh',
"num_edges":1,
"diameter":25.9,
"thickness":1.85,
"mass":7.12
}
super().__init__(**data)
#===================================================
class Five_pence(Coin):
def __init__(self):
data={
"original_value":0.05,
"clean_colour":'silver' ,
"rusty_colour":None,
"num_edges":1,
"diameter":18.0,
"thickness":1.77,
"mass":3.25
}
super().__init__(**data)
# because the silver is not rusting:
def rust (self):
self.colour=self.clean_colour
def clean(self):
self.colour=self.clean_colour
#======================================================================
class Ten_pence(Coin):
def __init__(self):
data={
"original_value":0.10,
"clean_colour":'silver' ,
"rusty_colour":None,
"num_edges":1,
"diameter":24.5,
"thickness":1.85,
"mass":6.5
}
super().__init__(**data)
# because the silver is not rusting:
def rust (self):
self.colour=self.clean_colour
def clean(self):
self.colour=self.clean_colour
#====================================================================
class Twenty_pence(Coin):
def __init__(self):
data={
"original_value":0.20,
"clean_colour":'silver' ,
"rusty_colour":None,
"num_edges":7,
"diameter":21.4,
"thickness":1.7,
"mass":5
}
super().__init__(**data)
# because the silver is not rusting:
def rust (self):
self.colour=self.clean_colour
def clean(self):
self.colour=self.clean_colour
#====================================================================
class Fifty_pence(Coin):
def __init__(self):
data={
"original_value":0.50,
"clean_colour":'silver' ,
"rusty_colour":None,
"num_edges":7,
"diameter":27.3,
"thickness":1.78,
"mass":8
}
super().__init__(**data)
# because the silver is not rusting:
def rust (self):
self.colour=self.clean_colour
def clean(self):
self.colour=self.clean_colour
#====================================================================
class Two_pound(Coin):
def __init__(self):
data={
"original_value":2,
"clean_colour":'gold&silver' ,
"rusty_colour":'greenesh',
"num_edges":1,
"diameter":28.4,
"thickness":2.5,
"mass":12.00
}
super().__init__(**data)
#================================================================
class One_pound(Coin):
def __init__(self):
data={
"original_value":1,
"clean_colour":'gold' ,
"rusty_colour":'greenesh',
"num_edges":1,
"diameter":22.5,
"thickness":3.15,
"mass":9.5
}
super().__init__(**data) # super is used to get all parent class data inorder to avoide override by the child class. note: [parent class can be in another library] .**data:umpack keyword argument
#=================================================================
coins=[One_pence,Two_pence, Five_pence,Ten_pence, Twenty_pence, Fifty_pence, One_pound, Two_pound]
for coin in coins:
arguments=[coin, coin.colour,coin.value, coin.diameter, coin.thickness, coin.num_edges,coin.mass]
string= "{}.colour:{}, value{},diameter(mm):{}, thickness(mm):{},number of edges:{}, mass(grams):{}".format(*arguments)
#=====================================================================
#def __init__(self,rare=False):
#self.value=1
#self.colour="gold"
#self.num_edges=1
#self.diameter= 22.5
#self.thickness=3.15
#self.heads=True
#self.rare=rare
#if self.rare:
#self.value=1.25
#else:
#self.value=1
#def rust(self):
#self.colour="greenish"
#def clean (self):
#self.colour="gold"
#def flip(self):
#heads_options=[True,False]
#choice=random.choice(heads_options)# refer to random lesson
#self.heads=choice
#def __del__(self):#destructor
#print('coin spent')
## del coin1 will give you : coin spent
#coin1=Pound()
#x= print(coin1.value)
| true |
77a6a851c95038057d86e693b029841b4a3b4f3c | Python | RogerMCL/PythonExercises | /Ex015.py | UTF-8 | 186 | 3.953125 | 4 | [] | no_license | #EXERCÍCIO 015:
km = float(input('Quantos km rodados? '))
d = int(input('Quantos dias alugados? '))
p = (60 * d) + (0.15 * km)
print(f'O preço a ser pago será de R${p:.2f}')
| true |
7b2c13414a26c5add8a4b3783e0767202014742d | Python | pastrouveedespeudo/ste-fois-c-la-bonne | /imageimage/doc_question.py | UTF-8 | 11,564 | 2.859375 | 3 | [] | no_license | class question:
def questionnage(self):
self.question = [["la voiture est rouge"],
['le crayon est vert et sur la table rouge'],
['le crayon est vert'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon était'],
['était'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le crayon est vert et sur la table rouge'],
['le chat était roux'],
['le crayon est vert et sur la table rouge'],
['le chat était roux'],
['le est bleu'],
['le jean est bleu'],
['le jean est bleu'],
['le jean était bleu'],
['le jean est bleu'],
['le jean est bleu'],
['le jean est bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean est bleu'],
['le jean bleu'],
['le jean est bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu et le crayon vert'],
['le crayon vert'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu'],
['le jean bleu est sur la chaise'],
['le jean bleu est sur la chaise'],
['le jean bleu est sur la chaise'],
['le jean bleu est sur la chaise'],
['le jean'],
['le jean'],
['le jean'],
['le jean'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull'],
['le jean à droite du pull bleu'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à droite du pull rouge'],
['le jean bleu'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la'],
['la voiture'],
['la voiture'],
['la'],
['gauche'],
['gauche'],
['gauche'],
['gauche'],
['gauche'],
['gauche'],
['gauche'],
['gauche droite gauche'],
['gauche droite gauche'],
['gauche droite'],
['gauche droite bas'],
['gauche bas droite'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['le pull est rouge et la mer est bleu'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['le jean bleu à gauche du pull rouge'],
[' jean bleu à gauche du pull rouge'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas droite gauche bas droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['self.liste_direction'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['droite gauche bas'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le jean bleu en haut du pull rouge'],
['le jean bleu en haut du pull rouge'],
['le jean bleu en haut du pull rouge'],
['le jean bleu en bas du pull rouge'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à droite du pull rouge'],
['le jean bleu à gauche du pull rouge'],
['le chien noir est à droite du chat roux'],
['la voiture jaune'],
['le chien noir est à droite du chat roux'],
['la voiture roule'],
['la voiture roule'],
['la voiture roule'],
['la voiture roule'],
['la voiture roule'],
['la voiture roule'],
['le chien'],
['le chien et le chat'],
['le chien noir'],
['le chien noir'],
['le chien noir'],
['le chien noir'],
['le chien noir'],
['le chien noir'],
['le chien noir'],
['le chien noir'],
['le chat'],
['le chat'],
['le chat'],
['le chat'],
['le chat'],
['le chat noir'],
['chien'],
['chien'],
['chien'],
['chien'],
['chien noir'],
['le chien noir'],
['chien noir'],
['le chien noir'],
['le chien noir'],
['le chien est noir'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir le chien noir'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['le chien noir est à droite du chat roux'],
['e chien noir est à droite du chat roux'] ] | true |
a886283ba47d519387bb5cd4cfa01d584dcfbaca | Python | AbolareRoheemah/Python-Tasks | /pangram.py | UTF-8 | 735 | 4.125 | 4 | [] | no_license | """
#This function helps check if a word is a pangram
print("Hello, I would love to tell you if a word or a sentence " +
" is a pangram")
print("If you are going to be entering a sentence, please do" +
" not space the words")
word = input("Please enter the word or sentence:\n")
alphabets = ["a", "b", "c", "d", "e", "f", "g", "h","i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
separate_words = list(word)
uniques = set(separate_words)
new_list = []
for unique in uniques:
new_list.append(unique)
new_list.sort()
if new_list == alphabets:
print("This word is a pangram!!!")
else:
print("This word is not a pangram!!!")
"""
| true |
82bf25f6f59f0be1fb46ffa2947d4e8416d2429e | Python | ntvo7/CIS2348-Homework2 | /Lab6.22.py | UTF-8 | 384 | 3.359375 | 3 | [] | no_license | a = int(input())
b = int(input())
num1 = int(input())
c = int(input())
d = int(input())
num2 = int(input())
X = 0
Y = 0
for x in range(-10, 11):
for y in range(-10,11):
if (a*x + b*y - num1) == (c*x + d*y - num2) and (a*x + b*y - num1) == 0:
X = x
Y = y
if X != 0 and Y != 0:
print(X, Y)
else:
print('No solution')
| true |
6e6e666bced8e027a8dcf8c991b4c21c4e571c54 | Python | CharlesMicou/ezomnotho-prototype | /visual/structure/visualizer.py | UTF-8 | 868 | 2.59375 | 3 | [] | no_license | import os
import string
from parsing.agent_data import AgentData
from plotting.agent_plotter import AgentPlotter
logs_location = os.path.abspath("../../logs")
# get the latest data unless we specify otherwise
run_dir = "0"
for dir in os.listdir(logs_location):
if string.atoi(dir) > string.atoi(run_dir):
run_dir = dir
print "Loading run: " + run_dir
run_dir_abspath = os.path.join(logs_location, run_dir)
print "Found log files: " + str(os.listdir(run_dir_abspath))
agent_data = []
for logfile in os.listdir(run_dir_abspath):
if logfile != "market.log":
agent_data.append(AgentData(os.path.join(run_dir_abspath, logfile)))
"""for data in agent_data:
plotter = AgentPlotter()
plotter.make_all_plots(data, ["CABBAGE", "WOOD", "FISH"])"""
plotter = AgentPlotter()
plotter.make_all_plots(agent_data[0], ["CABBAGE", "WOOD", "FISH"]) | true |
cba3d2bc3d588a576ef6563403a335c4bafc5781 | Python | humanoiA/Python-ML-Sessions | /exp18.py | UTF-8 | 788 | 2.984375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets,linear_model
from sklearn.model_selection import train_test_split
boston=datasets.load_boston(return_X_y=False)
x=boston.data
y=boston.target
#print(x,y)
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.33,random_state=1)
reg = linear_model.LinearRegression()
reg.fit(x_train,y_train)
print(reg.coef_)
print(reg.predict(x_test))
v_score=reg.score(x_test,y_test)
print(v_score)
plt.scatter(reg.predict(x_train),reg.predict(x_train)-y_train,color='r',s=10,label='Train Data')
plt.scatter(reg.predict(x_test),reg.predict(x_test)-y_test,color='b',s=10,label='Test Data')
plt.hlines(y=0,xmin=0,xmax=50,linewidth=2)
plt.title('First Demo')
plt.legend(loc='upper right')
plt.show() | true |
e2531c08fa759f9a4e4db46e01839e70fc93ab52 | Python | oskmy270/udpstreamer | /udpSender.py | UTF-8 | 3,319 | 3.15625 | 3 | [] | no_license | import socket
import time
import string
import random
import testData
import os
class Starter:
def __init__(self):
print 'Default values set'
def askSize(self):
atr.setSize(int(raw_input('Enter size for each UDP packet in Bytes: ')))
def askIntensity(self):
atr.setIntensity(int(raw_input('Enter intensity (packets per second): ')))
def askTarget(self):
atr.setTarget(raw_input('Enter target IP: '), int(raw_input('Enter target port: ')))
def askTimePeriod(self):
atr.setTimePeriod(int(raw_input('How long should the test occur? (seconds): ')))
def printValues(self):
print 'Target IP:\t\t\t', atr.targetIP
print 'Target port:\t\t\t', atr.targetPort
print 'Intensity (Msg/s)\t\t', atr.intensity
print 'Datagram size (Bytes)\t\t', atr.size
print 'Time for test:\t\t\t', atr.time
print 'Stream throughput (Bytes/s)\t', int(atr.size)*int(atr.intensity)
print 'Time between packets:\t\t', str(1./int(atr.intensity))
def menu(self):
#os.system('clear')
print '------------------------'
print '1. Set stream intensity'
print '2. Set stream size'
print '3. Set target'
print '4. Set time period'
print '5. Print values'
print ''
print '6. Start test'
print '7. Send synch info to server'
print '------------------------'
return raw_input('Choice (q to quit): ')
def sendUDP(self, msg):
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.sendto(atr.getMessage(), (atr.targetIP, atr.targetPort))
def createPayload(self, random, size, seq):
if random:
atr.message = '<sequence='+str(seq)+'>'
atr.message += self.id_generator(atr.size, string.ascii_uppercase + string.digits)
else:
print 'Creating non-random payload'
def id_generator(self, size, chars):
return ''.join(random.choice(chars) for x in range(size))
def sendSynchInfo(self):
synchInfo = '<sync>'
synchInfo += '<intensity='+str(atr.intensity)+'>'
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.sendto(synchInfo, (atr.targetIP, atr.targetPort))
def startTest(self):
print 'Starting test...'
sequence = 0
self.printValues()
startTime = time.time()
while (time.time() < startTime+atr.time):
self.createPayload(True, int(atr.size), sequence)
self.sendUDP(atr.message)
print sequence
sequence += 1
time.sleep(1./int(atr.intensity))
atr = testData.dataParameters()
run = Starter()
inputText = ''
while (inputText != 'q'):
inputText = run.menu()
if inputText == '1':
run.askIntensity()
if inputText == '2':
run.askSize()
if inputText == '3':
run.askTarget()
if inputText == '4':
run.askTimePeriod()
if inputText == '5':
run.printValues()
if inputText == '6':
run.startTest()
if inputText == '7':
run.sendSynchInfo()
| true |
8688367d335d72ccbddab5b9df34463a5a9b4092 | Python | jingmca/leetcode | /p907.py | UTF-8 | 1,904 | 3.34375 | 3 | [] | no_license | class Solution(object):
def sumSubarrayMins(self, A):
"""
:type A: List[int]
:rtype: int
"""
left = [None] * len(A)
right = [None] * len(A)
stack = []
for i in xrange(0, len(A)):
while stack and A[i] <= A[stack[-1]]:
stack.pop(0)
if stack:
left[i] = stack[-1]
else:
left[i] = -1
stack.append(i)
stack = []
for i in xrange(len(A)-1, -1 , -1):
while stack and A[i] < A[stack[-1]]:
stack.pop()
if stack:
right[i] = stack[-1]
else:
right[i] = len(A)
stack.append(i)
total = sum([A[i] * (i - left[i]) * (right[i] - i) for i in xrange(len(A))]) % (10 ** 9 + 7)
print left,right
return total
def sumSubarrayMins2(self, A):
MOD = 10**9 + 7
N = len(A)
# prev has i* - 1 in increasing order of A[i* - 1]
# where i* is the answer to query j
stack = []
prev = [None] * N
for i in xrange(N):
while stack and A[i] <= A[stack[-1]]:
stack.pop()
prev[i] = stack[-1] if stack else -1
stack.append(i)
# next has k* + 1 in increasing order of A[k* + 1]
# where k* is the answer to query j
stack = []
next_ = [None] * N
for k in xrange(N-1, -1, -1):
while stack and A[k] < A[stack[-1]]:
stack.pop()
next_[k] = stack[-1] if stack else N
stack.append(k)
print prev,next_
# Use prev/next array to count answer
return [(i - prev[i]) * (next_[i] - i) * A[i]
for i in xrange(N)]
s = Solution()
print(s.sumSubarrayMins([85,93,93,90])) | true |
263a2991dff1d2783b38c69ceb4a0c54f4017c2d | Python | zhaokang555/Developer | /Python3/201508/09-kw-Creative.py | UTF-8 | 436 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# class Student(object):
# pass
# def func(o, **kw):
# for k, v in kw.items():
# setattr(o, k, v)
# s = Student()
# func(s, name = 'zk', age = 23)
# print(s.name, s.age)
class Student(object):
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
def main():
s = Student(name = 'zk', age = 23)
print(s.name, s.age)
if __name__ == '__main__':
main() | true |
2c60119949ab38f37f7a697248e060840414cb66 | Python | Einsteinder/Leetcode | /73. Set Matrix Zeroes.py | UTF-8 | 741 | 3.171875 | 3 | [] | no_license | class Solution:
def setZeroes(self, matrix):
zeroXIndex = {}
zeroYIndex = {}
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 0:
zeroXIndex[i] = 1
zeroYIndex[j] = 1
for i in range(len(matrix)):
for y in zeroYIndex:
matrix[i][y] = 0
for x in zeroXIndex:
for j in range(len(matrix[0])):
matrix[x][j] = 0
return matrix
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
so = Solution()
print(so.setZeroes([
[0,1,2,0],
[3,4,5,2],
[1,3,1,5]
])) | true |
b010ad1d0eea17a6bdf634906e2518c44b92ed7b | Python | github-mohsinalam/Python | /Nested Data and Nested Iteration/m_list.py | UTF-8 | 1,029 | 4.21875 | 4 | [] | no_license | #Iterate through the list so that if the character ‘m’ is in the string, then it should be added to a new list called m_list.
#Hint: Because this isn’t just a list of lists, think about what type of object you want your data to be stored in. Conditionals may help you.
d = ['good morning', 'hello', 'chair', 'python', ['music', 'flowers', 'facebook', 'instagram', 'snapchat', ['On my Own', 'monster', 'Words dont come so easily', 'lead me right']], 'Stressed Out', 'Pauver Coeur', 'Reach for Tomorrow', 'mariners song', 'Wonder sleeps here']
m_list = []
for item in d:
if type(item) == type(' '):
if 'm' in item :
m_list.append(item)
elif type(item) == type([]):
for item2 in item:
if type(item2) == type(" "):
if 'm' in item2:
m_list.append(item2)
elif type(item2) == type([]):
for item3 in item2:
if 'm' in item3:
m_list.append(item3)
| true |
12b8b9303a7b7abc931af3c66220964bbd94dd1b | Python | buttplugio/buttplug-py | /buttplug/utils/eventhandler.py | UTF-8 | 1,716 | 3.390625 | 3 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | # Taken from https://bitbucket.org/marcusva/python-utils/
#
# Original license is public domain, don't want to bring the whole package in,
# and it's not really updated anyways.
class EventHandler(object):
"""A simple event handling class, which manages callbacks to be
executed.
"""
def __init__(self, sender):
self.callbacks = []
self.sender = sender
def __call__(self, *args):
"""Executes all callbacks.
Executes all connected callbacks in the order of addition,
passing the sender of the EventHandler as first argument and the
optional args as second, third, ... argument to them.
"""
return [callback(self.sender, *args) for callback in self.callbacks]
def __iadd__(self, callback):
"""Adds a callback to the EventHandler."""
self.add(callback)
return self
def __isub__(self, callback):
"""Removes a callback from the EventHandler."""
self.remove(callback)
return self
def __len__(self):
"""Gets the amount of callbacks connected to the EventHandler."""
return len(self.callbacks)
def __getitem__(self, index):
return self.callbacks[index]
def __setitem__(self, index, value):
self.callbacks[index] = value
def __delitem__(self, index):
del self.callbacks[index]
def add(self, callback):
"""Adds a callback to the EventHandler."""
if not callable(callback):
raise TypeError("callback must be callable")
self.callbacks.append(callback)
def remove(self, callback):
"""Removes a callback from the EventHandler."""
self.callbacks.remove(callback)
| true |