blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
14d78c9bceab38af97aaceff8679c704b5bc1538 | Python | mdallow97/Machine-Learning-Practice | /linalg.py | UTF-8 | 6,837 | 3.78125 | 4 | [] | no_license | # linalg.py
"""
This file contains functions commonly seen in linear algebra, and are building blocks to functions used in Machine Learning.
This file was solely for practicing Linear Algebra and making sure I understand the main concepts. It may not scale correctly.
Finally, it does not contain some of the most important functions that are required for machine learning:
- Computing Eigenvalues and Eigenvectors
- Computing gradients
- Computing hessians
Michael Dallow
"""
import numpy as np
import copy
def zeroes(m, n):
"""
:type m: int
:type n: int
:rtype: List[List[float]]
returns m x n matrix full of zeroes
"""
Z = [[0 for i in range(n)] for j in range(m)]
assert (np.array(Z) == np.zeros((m,n), dtype=int)).all()
return Z
def ones(m,n):
"""
:type m: int
:type n: int
:rtype: List[List[float]]
returns m x n matrix full of ones
"""
Z = [[1 for i in range(n)] for j in range(m)]
assert (np.array(Z) == np.ones((m,n), dtype=int)).all()
return Z
def transpose(A):
"""
:type n: List[List[float]]
:rtype: List[List[float]]
returns the transpose of A
"""
t = [[A[j][i] for j in range(len(A))] for i in range(len(A[0]))]
assert (np.array(t) == np.array(A).T).all()
return t
def create_identity(n):
"""
:type n: int
:rtype: List[List[float]]
returns n x n identity matrix
"""
I = [[1 if i==j else 0 for i in range(n)] for j in range(n)]
assert (np.array(I) == np.identity(n)).all()
return I
def dot_product(A, B):
"""
:type A: List[List[float]]
:type B: List[List[float]]
:rtype: List[List[float]]
"""
A = copy.deepcopy(A)
B = copy.deepcopy(B)
# A => (m x n), B => (n x p)
m,n = len(A), len(A[0])
p = len(B[0])
# num cols in A must equal num rows in B
assert n == len(B)
# C = AB => (m x p)
C = zeroes(m, p)
for i in range(m):
for j in range(p):
for k in range(n):
C[i][j] += A[i][k] * B[k][j]
assert (np.array(C) == np.dot(A, B)).all()
return C
def is_symmetric_matrix(A):
"""
:type A: List[List[float]]
:rtype: bool
"""
if transpose(A) == A:
return True
else:
return False
def trace(A):
"""
:type A: List[List[float]]
:rtype: int
"""
# A must be (n x n)
n = len(A)
assert n == len(A[0])
# the trace is the sum of the diagonal elements
tr = 0
for i in range(n):
tr += A[i][i]
assert tr == np.trace(A)
return tr
def L2_norm(x):
"""
:type x: List[float]
:rtype: float
"""
# x must be a vector
assert type(x) is list
if len(x) == 0:
return 0
assert type(x[0]) is int or type(x[0]) is float
# l2 = sqrt(sum(x[i] ** 2)), i = [0, 1, ..., n]
l2 = 0
for i in range(len(x)):
l2 += x[i]**2
# sqrt
l2 **= 0.5
assert l2 == np.linalg.norm(x)
return l2
def Lp_norm(x, p):
"""
:type x: List[float]
:type p: float
:rtype: float
"""
# x must be a vector
assert type(x) is list
if len(x) == 0:
return 0
assert type(x[0]) is int or type(x[0]) is float
assert p >= 1
lp = 0
for i in range(len(x)):
lp += abs(x[i])**p
lp **= 1.0 / p
assert lp == np.linalg.norm(x, ord=p)
return lp
def frobenius_norm(A):
"""
:type A: List[List[float]]
:rtype: float
returns l2 norm equivalent for matrix
"""
m,n = len(A), len(A[0])
Af = 0
for i in range(m):
for j in range(n):
Af += A[i][j] ** 2
Af **= 0.5
# frobenius norm equals sqrt(trace(A.T * A))
assert Af == trace(dot_product(transpose(A), A)) ** 0.5
assert Af == np.linalg.norm(A)
return Af
def get_row_echelon_matrix(ref, it=0):
"""
:type ref: List[List[float]]
:rtype: List[List[float]]
returns the row echelon form (REF) of a matrix
"""
ref = copy.deepcopy(ref)
# pivot
pivot = 0
for i in range(it, len(ref)):
if ref[i][it] != 0:
pivot = ref[i][it]
break
# move pivot row to top
temp = ref[i]
ref.pop(i)
ref = ref[0:it] + [temp] + ref[it:len(ref)]
# if pivot is 0, either reached end of matrix or column is all 0's (below prev pivot row)
if pivot == 0:
if it == len(ref) - 1:
return ref
else:
it += 1
return get_row_echelon_matrix(ref, it)
# make pivot equal one
for j in range(len(ref[it])):
ref[it][j] /= pivot
# reduce proceding rows given pivot row
for i in range(it+1, len(ref)):
if ref[i][it] != 0:
multiplier = ref[i][it]
for j in range(it, len(ref[i])):
ref[i][j] -= multiplier * ref[it][j]
if it == len(ref) - 1:
return ref
else:
it += 1
return get_row_echelon_matrix(ref, it)
def rank(A):
"""
:type A: List[List[float]]
:rtype: int
returns number of linearly independent column vectors
"""
ref = get_row_echelon_matrix(A)
m,n = len(A), len(A[0])
z = zeroes(1, n)[0]
r = 0
for row in ref:
if row != z:
r += 1
assert r == np.linalg.matrix_rank(A)
return r
def determinant(A, debug=False):
"""
:type A: List[List[float]]
:type debug: bool
:rtype: float
returns |A|
"""
def determinant_helper(A):
m,n = len(A), len(A[0])
assert m == n
if n == 1:
return A[0][0]
elif n == 2:
# definition of the determinant for a 2 x 2 matrix
return A[0][0] * A[1][1] - A[0][1] * A[1][0]
"""
|A| = sum( ((-1) ^ i) * A[0][i] * |A[!0][!i]| )
A[!0][!i] is the matrix without row 0 and column i
"""
det = 0
for i in range(n):
M = get_inner_matrix(A, 0, i)
det += ((-1) ** i) * A[0][i] * determinant_helper(M)
return det
det = determinant_helper(A)
if det != np.linalg.det(A) and debug:
# Likely not exactly the same
print(f"WARNING: Determinant's ({str(det)}) not equal to NumPy ({str(np.linalg.det(A))})")
return det
def adjoint(A):
"""
:type A: List[List[float]]
:rtype: List[List[float]]
returns adj(A)
"""
m,n = len(A), len(A[0])
assert m == n and n > 1
# find cofactor matrix
cofactor = ones(m, n)
for i in range(n):
for j in range(n):
M = get_inner_matrix(A, i, j)
cofactor[i][j] *= ((-1) ** (i+j)) * determinant(M)
# adjoint (adjugate) matrix is transpose of cofactor matrix
return transpose(cofactor)
def inverse(A):
"""
:type A: List[List[float]]
:rtype: List[List[float]]
returns A^(-1)
"""
m,n = len(A), len(A[0])
assert m == n
det = determinant(A)
adj = adjoint(A)
if det == 0:
print("WARNING: Matrix is non-invertible")
return zeroes(m,n)
inv = [[(1 / det) * adj[i][j] for i in range(n)] for j in range(n)]
if (np.array(inv) != np.linalg.inv(A)).all():
print(f"WARNING: Inverse is not equal to NumPy\nInverse:\n{np.array(inv)}\nNumPy Inverse:\n{np.linalg.inv(A)}")
return inv
def is_orthogonal(A):
"""
:type A: List[List[float]]
:rtype: bool
"""
return transpose(A) == inverse(A)
def get_inner_matrix(A, i, j):
"""
:type A: List[List[float]]
:rtype: List[List[float]]
returns the matrix A with row i and column j removed
"""
M = copy.deepcopy(A)
[k.pop(j) for k in M]
M.pop(i)
return M
def main():
A = [[-2,-4,2], [-2,1,2], [4,2,5]]
print(np.array(get_row_echelon_matrix(A)))
if __name__ == "__main__":
main()
| true |
a15af4a9de3c2c5205891a1e25dc8ec7a258bc43 | Python | PoolBRad/GIT-Python | /stockloss.py | UTF-8 | 295 | 3.921875 | 4 | [] | no_license | print('This is a quick script that will calculate a % loss or gain.')
buy_price = float(input('What is your buy price? '))
sell_price = float(input('What is the sell/current price? '))
gain_loss = ((sell_price - buy_price) / buy_price) * 100
print('Your gain/loss is {}%.'.format(gain_loss))
| true |
b1fcd52bb5682eb716a0c0a710cd9da853e438bb | Python | danielegrattarola/spektral | /spektral/layers/pooling/dmon_pool.py | UTF-8 | 7,195 | 2.640625 | 3 | [
"MIT"
] | permissive | import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense
from spektral.layers import ops
from spektral.layers.pooling.src import SRCPool
class DMoNPool(SRCPool):
r"""
The DMoN pooling layer from the paper
> [Graph Clustering with Graph Neural Networks](https://arxiv.org/abs/2006.16904)<br>
> Anton Tsitsulin et al.
**Mode**: single, batch.
This layer learns a soft clustering of the input graph as follows:
$$
\begin{align}
\C &= \textrm{MLP}(\X); \\
\X' &= \C^\top \X \\
\A' &= \C^\top \A \C; \\
\end{align}
$$
where \(\textrm{MLP}\) is a multi-layer perceptron with softmax output.
Two auxiliary loss terms are also added to the model: the modularity loss
$$
L_m = - \frac{1}{2m} \mathrm{Tr}(\C^\top \A \C - \C^\top \d^\top \d \C)
$$
and the collapse regularization loss
$$
L_c = \frac{\sqrt{k}}{n} \left\|
\sum_i \C_i^\top
\right\|_F -1.
$$
This layer is based on the original implementation found
[here](https://github.com/google-research/google-research/blob/master/graph_embedding/dmon/dmon.py).
**Input**
- Node features of shape `(batch, n_nodes_in, n_node_features)`;
- Symmetrically normalized adjacency matrix of shape
`(batch, n_nodes_in, n_nodes_in)`;
**Output**
- Reduced node features of shape `(batch, n_nodes_out, n_node_features)`;
- Reduced adjacency matrix of shape `(batch, n_nodes_out, n_nodes_out)`;
- If `return_selection=True`, the selection matrix of shape
`(batch, n_nodes_in, n_nodes_out)`.
**Arguments**
- `k`: number of output nodes;
- `mlp_hidden`: list of integers, number of hidden units for each hidden layer in
the MLP used to compute cluster assignments (if `None`, the MLP has only one output
layer);
- `mlp_activation`: activation for the MLP layers;
- `collapse_regularization`: strength of the collapse regularization;
- `return_selection`: boolean, whether to return the selection matrix;
- `use_bias`: use bias in the MLP;
- `kernel_initializer`: initializer for the weights of the MLP;
- `bias_initializer`: initializer for the bias of the MLP;
- `kernel_regularizer`: regularization applied to the weights of the MLP;
- `bias_regularizer`: regularization applied to the bias of the MLP;
- `kernel_constraint`: constraint applied to the weights of the MLP;
- `bias_constraint`: constraint applied to the bias of the MLP;
"""
def __init__(
self,
k,
mlp_hidden=None,
mlp_activation="relu",
return_selection=False,
collapse_regularization=0.1,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
k=k,
mlp_hidden=mlp_hidden,
mlp_activation=mlp_activation,
return_selection=return_selection,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.k = k
self.mlp_hidden = mlp_hidden if mlp_hidden is not None else []
self.mlp_activation = mlp_activation
self.collapse_regularization = collapse_regularization
def build(self, input_shape):
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
)
self.mlp = Sequential(
[
Dense(channels, self.mlp_activation, **layer_kwargs)
for channels in self.mlp_hidden
]
+ [Dense(self.k, "softmax", **layer_kwargs)]
)
super().build(input_shape)
def call(self, inputs, mask=None):
x, a, i = self.get_inputs(inputs)
return self.pool(x, a, i, mask=mask)
def select(self, x, a, i, mask=None):
s = self.mlp(x)
if mask is not None:
s *= mask[0]
# Collapse loss
col_loss = self.collapse_loss(a, s)
if K.ndim(a) == 3:
col_loss = K.mean(col_loss)
self.add_loss(self.collapse_regularization * col_loss)
return s
def reduce(self, x, s, **kwargs):
return ops.modal_dot(s, x, transpose_a=True)
def connect(self, a, s, **kwargs):
a_pool = ops.matmul_at_b_a(s, a)
# Modularity loss
mod_loss = self.modularity_loss(a, s, a_pool)
if K.ndim(a) == 3:
mod_loss = K.mean(mod_loss)
self.add_loss(mod_loss)
return a_pool
def reduce_index(self, i, s, **kwargs):
i_mean = tf.math.segment_mean(i, i)
i_pool = ops.repeat(i_mean, tf.ones_like(i_mean) * self.k)
return i_pool
def modularity_loss(self, a, s, a_pool):
if K.is_sparse(a):
n_edges = tf.cast(len(a.values), dtype=s.dtype)
degrees = tf.sparse.reduce_sum(a, axis=-1)
degrees = tf.reshape(degrees, (-1, 1))
else:
n_edges = tf.cast(tf.math.count_nonzero(a, axis=(-2, -1)), dtype=s.dtype)
degrees = tf.reduce_sum(a, axis=-1, keepdims=True)
normalizer_left = tf.matmul(s, degrees, transpose_a=True)
normalizer_right = tf.matmul(degrees, s, transpose_a=True)
if K.ndim(s) == 3:
normalizer = (
ops.modal_dot(normalizer_left, normalizer_right)
/ 2
/ tf.reshape(n_edges, [tf.shape(n_edges)[0]] + [1] * 2)
)
else:
normalizer = ops.modal_dot(normalizer_left, normalizer_right) / 2 / n_edges
loss = -tf.linalg.trace(a_pool - normalizer) / 2 / n_edges
return loss
def collapse_loss(self, a, s):
cluster_sizes = tf.math.reduce_sum(s, axis=-2)
n_nodes = tf.cast(tf.shape(a)[-1], s.dtype)
loss = (
tf.norm(cluster_sizes, axis=-1)
/ n_nodes
* tf.sqrt(tf.cast(self.k, s.dtype))
- 1
)
return loss
def get_config(self):
config = {
"collapse_regularization": self.collapse_regularization,
"k": self.k,
"mlp_hidden": self.mlp_hidden,
"mlp_activation": self.mlp_activation,
}
base_config = super().get_config()
return {**base_config, **config}
| true |
26a1de0083f159e63deb2da450f7e314108e82c6 | Python | safpla/autoLoss | /sep_train_gan.py | UTF-8 | 3,377 | 2.625 | 3 | [] | no_license | """ Traditional GAN """
# __Author__ == "Haowen Xu"
# __Data__ == "04-29-2018"
import tensorflow as tf
import numpy as np
import logging
import os
import sys
from models import cls
import utils
logger = utils.get_logger()
def train(config):
g = tf.Graph()
gpu_options = tf.GPUOptions(allow_growth=True)
configProto = tf.ConfigProto(gpu_options=gpu_options)
sess = tf.InteractiveSession(config=configProto, graph=g)
model = cls.Cls(config, g, loss_mode=sys.argv[1])
sess.run(model.init)
max_training_step = config.max_training_step
best_acc = 0
endurance = 0
i = 0
while i < max_training_step and endurance < config.max_endurance_stud:
train_loss, train_acc = model.train(sess)
if i % config.valid_frequence_stud == 0:
endurance += 1
valid_loss, valid_acc, _, _ = model.valid(sess)
#logger.info('====Step: {}===='.format(i))
#logger.info('train_loss: {}, train_acc: {}'\
# .format(train_loss, train_acc))
#logger.info('valid_loss: {}, valid_acc: {}'\
# .format(valid_loss, valid_acc))
if valid_acc > best_acc:
best_acc = valid_acc
_, test_acc, _, _ = model.valid(sess, model.test_dataset)
endurance = 0
i += 1
logger.info('lambda1: {}, lambda2: {}'.format(config.lambda1_stud,
config.lambda2_stud))
logger.info('valid_acc: {}'.format(best_acc))
logger.info('test_acc: {}'.format(test_acc))
return test_acc
if __name__ == '__main__':
root_path = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(root_path, 'config/classification.cfg')
config = utils.Parser(config_path)
if sys.argv[1] == '1':
#lambda_set1 = [0.001, 0.005, 0.01, 0.02, 0.03, 0.04, 0.05]
lambda_set1 = [0.2]
num1 = len(lambda_set1)
aver_mat = np.zeros([num1])
mat = []
for i in range(num1):
config.lambda1_stud = lambda_set1[i]
acc = []
for k in range(5):
acc.append(train(config))
aver_mat[i] = np.mean(np.array(acc))
mat.append(acc)
print(aver_mat)
print(mat)
elif sys.argv[1] == '3':
#lambda_set1 = [0.0001, 0.0003, 0.001, 0.003, 0.01]
lambda_set1 = [0.02, 0.03, 0.04]
lambda_set2 = [0.0001, 0.0003, 0.001, 0.003, 0.01]
#lambda_set1 = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008]
#lambda_set1 = [0.005, 0.006, 0.007, 0.008]
num1 = len(lambda_set1)
num2 = len(lambda_set2)
aver_mat = np.zeros([num1, num2])
var_mat = np.zeros([num1, num2])
for i in range(num1):
for j in range(num2):
config.lambda1_stud = lambda_set1[i]
config.lambda2_stud = lambda_set2[j]
acc = []
for k in range(5):
acc.append(train(config))
aver_mat[i, j] = np.mean(np.array(acc))
var_mat[i,j] = np.var(np.array(acc))
print(aver_mat)
print(var_mat)
else:
acc = []
for k in range(5):
acc.append(train(config))
print(acc)
print('\n')
print(np.mean(np.array(acc)))
| true |
9c2b61ea2505ddadab31309c3aa51de8929aa086 | Python | amazingguni/codevisualizer | /CodeVisualizerView/CodeVisualizer/doc/python 코드.py | UTF-8 | 318 | 2.6875 | 3 | [] | no_license | import sys
import bdb
def spam():
print 'in spam'
a=3
b=4
c=6
d=A()
e=4
class A:
def __init__(self):
self.a = 10
self.b = 3
self.c = 3
self.d = None
if __name__ == '__main__':
a=5
b=6
c=3
spam()
print "->end"
| true |
8c4aa1d79ade6c637e68abd3b8515e665d838fe1 | Python | Jitendrap1702/Coding_Ninjas_Intro_to_Python | /Conditions And Loops Python/armstrong.py | UTF-8 | 250 | 3.515625 | 4 | [] | no_license | m=int(input("enter number1"))
n=int(input("enter number2"))
for num in range(m,n+1):
sum=0
temp=num
while temp>0:
rem=temp%10
sum+=rem**3
temp=temp/10
if num==sum:
print(num)
else:
continue
| true |
f0ff49ed770f7c8b1a356b6ef1705ef60d6c2db1 | Python | sobolewskidamian/python_project_game | /src/main.py | UTF-8 | 4,657 | 2.640625 | 3 | [] | no_license | import sys
import pygame
from pygame.locals import QUIT, KEYDOWN, K_RETURN, K_KP_ENTER
from objects.inputBox import InputBox
from objects.submitBox import SubmitBox
from game import Game
FPS = 70
SCREENWIDTH = 288
SCREENHEIGHT = 512
def main():
global SCREEN, FPSCLOCK
pygame.init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode([SCREENWIDTH, SCREENHEIGHT], 0, 32)
pygame.display.set_caption('Vertical game')
pygame.mixer.music.stop()
nick = get_nick()
game = Game(nick, SCREEN, FPSCLOCK, FPS)
while True:
if game.game_ended:
game.game_ended = False
mode = choose_mode()
pygame.mixer.music.stop()
if mode:
game.server_address, game.port = '192.168.43.92', 4321#get_multiplayer_data() #'192.168.43.92', 4321
game.multiplayer = True
else:
game.multiplayer = False
game.play()
def choose_mode():
submit_box = SubmitBox(SCREENWIDTH / 2 - 75, SCREENHEIGHT / 2 - 66, 150, 32, "Singleplayer")
submit_box2 = SubmitBox(SCREENWIDTH / 2 - 75, SCREENHEIGHT / 2 - 6, 150, 32, "Multiplayer")
pygame.mixer.music.stop()
boxes = [submit_box, submit_box2]
while not submit_box.get_active() and not submit_box2.get_active():
clean_screen()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
for box in boxes:
box.handle_event(event)
for box in boxes:
box.draw(SCREEN)
pygame.display.flip()
FPSCLOCK.tick(FPS)
if submit_box2.get_active():
return True
else:
return False
def get_multiplayer_data():
input_box = InputBox(10, SCREENHEIGHT / 2 - 88, SCREENWIDTH - 20, 32)
input_box2 = InputBox(10, SCREENHEIGHT / 2 - 8, SCREENWIDTH - 20, 32)
submit_box = SubmitBox(SCREENWIDTH / 2 - 28, SCREENHEIGHT / 2 + 50, 56, 32, "Play")
boxes = [input_box, input_box2, submit_box]
while not submit_box.get_active() or input_box.get_text() == '' or input_box2.get_text() == '':
clean_screen()
SCREEN.blit(pygame.font.Font(None, 32).render('Server address:', True, pygame.Color('lightskyblue3')),
(10, SCREENHEIGHT / 2 - 120))
SCREEN.blit(pygame.font.Font(None, 32).render('Server port:', True, pygame.Color('lightskyblue3')),
(10, SCREENHEIGHT / 2 - 40))
submit_box.set_not_active()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == pygame.K_TAB and input_box.get_active():
input_box.set_not_active()
input_box2.set_active()
elif event.key == pygame.K_TAB and input_box2.get_active():
input_box2.set_not_active()
input_box.set_active()
if event.key == K_RETURN or event.key == K_KP_ENTER:
submit_box.set_active()
for box in boxes:
box.handle_event(event)
for box in boxes:
box.draw(SCREEN)
pygame.display.flip()
FPSCLOCK.tick(FPS)
try:
port = int(input_box2.get_text())
except Exception:
port = 0
return input_box.get_text(), port
def clean_screen():
SCREEN.fill((248, 248, 255))
def get_nick():
input_box = InputBox(10, SCREENHEIGHT / 2 - 16, SCREENWIDTH - 20, 32)
submit_box = SubmitBox(SCREENWIDTH / 2 - 28, SCREENHEIGHT / 2 + 16 + 20, 56, 32, "Play")
boxes = [input_box, submit_box]
while not submit_box.get_active() or input_box.get_text() == '':
SCREEN.fill((248, 248, 255))
SCREEN.blit(pygame.font.Font(None, 32).render('Your nick:', True, pygame.Color('lightskyblue3')),
(SCREENWIDTH / 2 - 54, SCREENHEIGHT / 2 - 32 - 20))
submit_box.set_not_active()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_RETURN or event.key == K_KP_ENTER):
submit_box.set_active()
for box in boxes:
box.handle_event(event)
for box in boxes:
box.draw(SCREEN)
pygame.display.flip()
FPSCLOCK.tick(FPS)
if len(input_box.get_text()) > 16:
return get_nick()
else:
return input_box.get_text()
if __name__ == '__main__':
main()
| true |
32d18b192727be2849d1c8fdd9f074616d268610 | Python | benyhh/FYS2160 | /Oblig/oblig1/1.py | UTF-8 | 1,322 | 2.953125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
from scipy.special import comb
from scipy.misc import derivative
f = open('termokopper.txt', 'r')
lines = f.readlines()
time = np.zeros(len(lines))
temp1 = np.zeros(len(lines))
temp2 = np.zeros(len(lines))
for i in range(len(lines)):
split = lines[i].split()
time[i] = split[0]
temp1[i] = split[1]
temp2[i] = split[2]
plt.plot(time,temp1,time,temp2)
plt.legend(["Temperfect", "Bodum"])
plt.title("Temperature of liquid in cups")
plt.xlabel("Time [s]")
plt.ylabel("Temperature [°C]")
#plt.savefig("cuptemp.png", dpi = 400)
plt.show()
def multiplicity(q):
W = comb(N-1+q, q, repetition = True)
return W
N = 100
q = np.arange(20)
kb = 1
epsilon = 1
W = multiplicity(q)
U = q*epsilon
S = kb*np.log(W)
def diff(A,B): #Derivative by midpoint method
D = np.zeros(len(A))
D[0] = (A[1]-A[0])/(B[1]-B[0])
for i in range(1, len(A)-1):
D[i] = (A[i+1]-A[i-1])/(B[i+1]-B[i-1])
D[-1] = (A[-1]-A[-2])/(B[-1]-B[-2])
return D
T = 1/diff(U,S)
Cv = diff(U,T)
def mult(N,q):
W = ((N+q)/q)**q*((N+q)/N)**N
return W
T_a = epsilon / k / np.log(1 + N * epsilon / U)
U_a = N*epsilon/(np.exp(epsilon/k/T)-1)
Cv_a = epsilon**2 * N * np.exp(epsilon/k/T) / ( (np.exp(epsilon/k/T)-1)**2 * k * T**2 )
plt.plot(T,Cv)
plt.show()
| true |
d8d1788f92b34df42210fc76e9035369d1a51d5d | Python | saltstack/salt | /salt/beacons/inotify.py | UTF-8 | 12,276 | 2.59375 | 3 | [
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | """
Watch files and translate the changes into salt events
:depends: - pyinotify Python module >= 0.9.5
:Caution: Using generic mask options like open, access, ignored, and
closed_nowrite with reactors can easily cause the reactor
to loop on itself. To mitigate this behavior, consider
setting the `disable_during_state_run` flag to `True` in
the beacon configuration.
:note: The `inotify` beacon only works on OSes that have `inotify`
kernel support.
"""
import collections
import fnmatch
import logging
import os
import re
import salt.utils.beacons
try:
import pyinotify
HAS_PYINOTIFY = True
DEFAULT_MASK = pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_MODIFY
MASKS = {}
for var in dir(pyinotify):
if var.startswith("IN_"):
key = var[3:].lower()
MASKS[key] = getattr(pyinotify, var)
except ImportError:
HAS_PYINOTIFY = False
DEFAULT_MASK = None
__virtualname__ = "inotify"
log = logging.getLogger(__name__)
def __virtual__():
if HAS_PYINOTIFY:
return __virtualname__
err_msg = "pyinotify library is missing."
log.error("Unable to load inotify beacon: %s", err_msg)
return False, err_msg
def _get_mask(mask):
"""
Return the int that represents the mask
"""
return MASKS.get(mask, 0)
def _enqueue(revent):
"""
Enqueue the event
"""
__context__["inotify.queue"].append(revent)
def _get_notifier(config):
"""
Check the context for the notifier and construct it if not present
"""
beacon_name = config.get("_beacon_name", "inotify")
notifier = "{}.notifier".format(beacon_name)
if notifier not in __context__:
__context__["inotify.queue"] = collections.deque()
wm = pyinotify.WatchManager()
__context__[notifier] = pyinotify.Notifier(wm, _enqueue)
if (
"coalesce" in config
and isinstance(config["coalesce"], bool)
and config["coalesce"]
):
__context__[notifier].coalesce_events()
return __context__[notifier]
def validate(config):
"""
Validate the beacon configuration
"""
VALID_MASK = [
"access",
"attrib",
"close_nowrite",
"close_write",
"create",
"delete",
"delete_self",
"excl_unlink",
"ignored",
"modify",
"moved_from",
"moved_to",
"move_self",
"oneshot",
"onlydir",
"open",
"unmount",
]
# Configuration for inotify beacon should be a dict of dicts
if not isinstance(config, list):
return False, "Configuration for inotify beacon must be a list."
else:
config = salt.utils.beacons.list_to_dict(config)
if "files" not in config:
return False, "Configuration for inotify beacon must include files."
else:
if not isinstance(config["files"], dict):
return (
False,
"Configuration for inotify beacon invalid, files must be a dict.",
)
for path in config.get("files"):
if not isinstance(config["files"][path], dict):
return (
False,
"Configuration for inotify beacon must be a list of"
" dictionaries.",
)
else:
if not any(
j in ["mask", "recurse", "auto_add"]
for j in config["files"][path]
):
return (
False,
"Configuration for inotify beacon must contain mask,"
" recurse or auto_add items.",
)
if "auto_add" in config["files"][path]:
if not isinstance(config["files"][path]["auto_add"], bool):
return (
False,
"Configuration for inotify beacon auto_add must be"
" boolean.",
)
if "recurse" in config["files"][path]:
if not isinstance(config["files"][path]["recurse"], bool):
return (
False,
"Configuration for inotify beacon recurse must be"
" boolean.",
)
if "mask" in config["files"][path]:
if not isinstance(config["files"][path]["mask"], list):
return (
False,
"Configuration for inotify beacon mask must be list.",
)
for mask in config["files"][path]["mask"]:
if mask not in VALID_MASK:
return (
False,
"Configuration for inotify beacon invalid mask"
" option {}.".format(mask),
)
return True, "Valid beacon configuration"
def beacon(config):
"""
Watch the configured files
Example Config
.. code-block:: yaml
beacons:
inotify:
- files:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
- coalesce: True
The mask list can contain the following events (the default mask is create,
delete, and modify):
* access - File accessed
* attrib - File metadata changed
* close_nowrite - Unwritable file closed
* close_write - Writable file closed
* create - File created in watched directory
* delete - File deleted from watched directory
* delete_self - Watched file or directory deleted
* modify - File modified
* moved_from - File moved out of watched directory
* moved_to - File moved into watched directory
* move_self - Watched file moved
* open - File opened
The mask can also contain the following options:
* dont_follow - Don't dereference symbolic links
* excl_unlink - Omit events for children after they have been unlinked
* oneshot - Remove watch after one event
* onlydir - Operate only if name is directory
recurse:
Recursively watch files in the directory
auto_add:
Automatically start watching files that are created in the watched directory
exclude:
Exclude directories or files from triggering events in the watched directory.
Can use regex if regex is set to True
coalesce:
If this coalescing option is enabled, events are filtered based on
their unicity, only unique events are enqueued, doublons are discarded.
An event is unique when the combination of its fields (wd, mask,
cookie, name) is unique among events of a same batch. After a batch of
events is processed any events are accepted again.
This option is top-level (at the same level as the path) and therefore
affects all paths that are being watched. This is due to this option
being at the Notifier level in pyinotify.
"""
whitelist = ["_beacon_name"]
config = salt.utils.beacons.remove_hidden_options(config, whitelist)
config = salt.utils.beacons.list_to_dict(config)
ret = []
notifier = _get_notifier(config)
wm = notifier._watch_manager
# Read in existing events
if notifier.check_events(1):
notifier.read_events()
notifier.process_events()
queue = __context__["inotify.queue"]
while queue:
event = queue.popleft()
_append = True
# Find the matching path in config
path = event.path
while path != "/":
if path in config.get("files", {}):
break
path = os.path.dirname(path)
excludes = config["files"].get(path, {}).get("exclude", "")
if excludes and isinstance(excludes, list):
for exclude in excludes:
if isinstance(exclude, dict):
_exclude = next(iter(exclude))
if exclude[_exclude].get("regex", False):
try:
if re.search(_exclude, event.pathname):
_append = False
except Exception: # pylint: disable=broad-except
log.warning("Failed to compile regex: %s", _exclude)
else:
exclude = _exclude
elif "*" in exclude:
if fnmatch.fnmatch(event.pathname, exclude):
_append = False
else:
if event.pathname.startswith(exclude):
_append = False
if _append:
sub = {
"tag": event.path,
"path": event.pathname,
"change": event.maskname,
}
ret.append(sub)
else:
log.info("Excluding %s from event for %s", event.pathname, path)
# Get paths currently being watched
current = set()
for wd in wm.watches:
current.add(wm.watches[wd].path)
# Update existing watches and add new ones
# TODO: make the config handle more options
for path in config.get("files", ()):
if isinstance(config["files"][path], dict):
mask = config["files"][path].get("mask", DEFAULT_MASK)
if isinstance(mask, list):
r_mask = 0
for sub in mask:
r_mask |= _get_mask(sub)
elif isinstance(mask, bytes):
r_mask = _get_mask(mask)
else:
r_mask = mask
mask = r_mask
rec = config["files"][path].get("recurse", False)
auto_add = config["files"][path].get("auto_add", False)
else:
mask = DEFAULT_MASK
rec = False
auto_add = False
if path in current:
for wd in wm.watches:
if path == wm.watches[wd].path:
update = False
if wm.watches[wd].mask != mask:
update = True
if wm.watches[wd].auto_add != auto_add:
update = True
if update:
wm.update_watch(wd, mask=mask, rec=rec, auto_add=auto_add)
elif os.path.exists(path):
excludes = config["files"][path].get("exclude", "")
excl = None
if isinstance(excludes, list):
excl = []
for exclude in excludes:
if isinstance(exclude, dict):
excl.append(list(exclude)[0])
else:
excl.append(exclude)
excl = pyinotify.ExcludeFilter(excl)
wm.add_watch(path, mask, rec=rec, auto_add=auto_add, exclude_filter=excl)
# Return event data
return ret
def close(config):
config = salt.utils.beacons.list_to_dict(config)
beacon_name = config.get("_beacon_name", "inotify")
notifier = "{}.notifier".format(beacon_name)
if notifier in __context__:
__context__[notifier].stop()
del __context__[notifier]
| true |
e02d0038e4dfe718d74a61ab9209f25fc71bfa51 | Python | zhexxian/SUTD-The-Digital-World | /Homework/coding_week3/Ex 1.py | UTF-8 | 89 | 2.890625 | 3 | [] | no_license | def mayIgnore(x):
if type(x) == int:
return x+1
else:
return None | true |
fff4755484c7a0e9ae8f92fdfa507b0d09534ad6 | Python | Malvi-M/Python-Projects | /Automatic Wifi Connector Bot.py | UTF-8 | 1,185 | 2.984375 | 3 | [] | no_license | ### Automatic Wifi Connector Bot
import os
import sys
saved_profiles = os.popen('netsh wlan show profiles').read() # To get the saved profiles
print(saved_profiles)
available_profiles = os.popen('netsh wlan show networks').read() # To get the available profiles
print(available_profiles)
preferred_ssid=input('Enter the preferred Wifi for your connection : ')
response = os.popen("netsh wlan disconnect").read()# To disconnect the present connection and connect to the preferred one
print(response)
if preferred_ssid not in saved_profiles:
print("Profile for "+preferred_ssid+" is not saved in system")
print("Sorry but can't establish the connection")
sys.exit()
else:
print("Profile for "+preferred_ssid+" is saved in system")
while True:
avail = os.popen('netsh wlan show networks').read() # To get the available profiles
#sleep(3)
if preferred_ssid in avail: # Checks if the preferred connection is available or not
print('Found')
break
print('--------Connecting----------')
resp = os.popen('netsh wlan connect name='+'"'+preferred_ssid+'"').read()
print(resp)
| true |
a2baaaf7fd8a543c5622ead44da7592be6165759 | Python | m32/endesive | /endesive/pdf/PyPDF2_annotate/annotations/rect.py | UTF-8 | 5,699 | 3.03125 | 3 | [
"MIT",
"LGPL-3.0-only",
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""
Rectangular Annotations
~~~~~~~~~~~~~~~~~~~~~~~
Annotations defined by a width and a height: Square, Circle
:copyright: Copyright 2019 Autodesk, Inc.
:license: MIT, see LICENSE for details.
"""
from .base import Annotation
from .base import make_border_dict
from ..pdfrw import PdfArray, PdfName
from ..config.appearance import set_appearance_state
from ..config.appearance import stroke_or_fill
from ..graphics import Bezier
from ..graphics import Close
from ..graphics import ContentStream
from ..graphics import Line
from ..graphics import Move
from ..graphics import quadratic_to_cubic_bezier
from ..graphics import Rect
from ..graphics import Restore
from ..graphics import Save
class RectAnnotation(Annotation):
"""Abstract annotation that defines its location on the document with a
width and a height.
"""
def make_rect(self):
stroke_width = self._appearance.stroke_width
L = self._location
return [
L.x1 - stroke_width,
L.y1 - stroke_width,
L.x2 + stroke_width,
L.y2 + stroke_width,
]
def add_additional_pdf_object_data(self, obj):
A = self._appearance
obj[PdfName("BS")] = make_border_dict(A)
obj[PdfName("C")] = A.stroke_color
if A.fill:
obj[PdfName("IC")] = A.fill
padding = A.stroke_width / 2.0
obj[PdfName("RD")] = PdfArray([padding, padding, padding, padding])
class Square(RectAnnotation):
subtype = "Square"
def make_appearance_stream(self):
L = self._location
A = self._appearance
stream = ContentStream([Save()])
set_appearance_state(stream, A)
stream.add(Rect(L.x1, L.y1, L.x2 - L.x1, L.y2 - L.y1))
stroke_or_fill(stream, A)
stream.add(Restore())
# TODO dash array
return stream
def add_rounded_rectangle(stream, x, y, width, height, rx, ry):
"""Creates a rounded rectangle and adds it to the content stream.
:param ContentStream stream:
:param float x1:
:param float y1:
:param float width:
:param float height:
:param float rx: x radius of the rounded corners
:param float ry: y radius of the rounded corners
"""
stream.add(Move(x + rx, y))
stream.add(Line(x + width - rx, y))
stream.add(
quadratic_to_cubic_bezier(
start_x=(x + width - rx),
start_y=y,
control_x=(x + width),
control_y=y,
end_x=(x + width),
end_y=(y + ry),
)
)
stream.add(Line(x + width, y + height - ry))
stream.add(
quadratic_to_cubic_bezier(
start_x=(x + width),
start_y=(y + height - ry),
control_x=(x + width),
control_y=(y + height),
end_x=(x + width - rx),
end_y=(y + height),
)
)
stream.add(Line(x + rx, y + height))
stream.add(
quadratic_to_cubic_bezier(
start_x=(x + rx),
start_y=(y + height),
control_x=x,
control_y=(y + height),
end_x=x,
end_y=(y + height - ry),
)
)
stream.add(Line(x, y + ry))
stream.add(
quadratic_to_cubic_bezier(
start_x=x,
start_y=(y + ry),
control_x=x,
control_y=y,
end_x=(x + rx),
end_y=y,
)
)
stream.add(Close())
def add_bezier_circle(stream, x1, y1, x2, y2):
"""Create a circle from four bezier curves and add it to the content stream,
since PDF graphics is missing an ellipse primitive.
:param ContentStream stream:
:param float x1:
:param float y1:
:param float x2:
:param float y2:
"""
left_x = x1
right_x = x2
bottom_x = left_x + (right_x - left_x) / 2.0
top_x = bottom_x
bottom_y = y1
top_y = y2
left_y = bottom_y + (top_y - bottom_y) / 2.0
right_y = left_y
cp_offset = 0.552284749831
# Move to the bottom of the circle, then four curves around.
# https://stackoverflow.com/questions/1734745/how-to-create-circle-with-b%C3%A9zier-curves
stream.add(Move(bottom_x, bottom_y))
stream.add(
Bezier(
bottom_x + (right_x - bottom_x) * cp_offset,
bottom_y,
right_x,
right_y - (right_y - bottom_y) * cp_offset,
right_x,
right_y,
)
)
stream.add(
Bezier(
right_x,
right_y + (top_y - right_y) * cp_offset,
top_x + (right_x - top_x) * cp_offset,
top_y,
top_x,
top_y,
)
)
stream.add(
Bezier(
top_x - (top_x - left_x) * cp_offset,
top_y,
left_x,
left_y + (top_y - left_y) * cp_offset,
left_x,
left_y,
)
)
stream.add(
Bezier(
left_x,
left_y - (left_y - bottom_y) * cp_offset,
bottom_x - (bottom_x - left_x) * cp_offset,
bottom_y,
bottom_x,
bottom_y,
)
)
stream.add(Close())
class Circle(RectAnnotation):
"""Circles and Squares are basically the same PDF annotation but with
different content streams.
"""
subtype = "Circle"
def make_appearance_stream(self):
L = self._location
A = self._appearance
stream = ContentStream([Save()])
set_appearance_state(stream, A)
add_bezier_circle(stream, L.x1, L.y1, L.x2, L.y2)
stroke_or_fill(stream, A)
stream.add(Restore())
return stream
| true |
c6c5ae8c5a59295e44d036f02f95e1a4ff367b1e | Python | strattonbrazil/parts | /python/minigame.py | UTF-8 | 1,488 | 2.859375 | 3 | [] | no_license | import sys
def pointContainsRect(mousePos, rect):
mouseX, mouseY = mousePos
rectX, rectY = rect["position"]
rectWidth, rectHeight = rect["size"]
return rectX < mouseX and rectY < mouseY and mouseX < rectX + rectWidth and mouseY < rectY + rectHeight
def scaleColor(color, scale):
return tuple(map(lambda channel: min(channel * scale, 1), color))
def update_game(ctx):
if "state" not in ctx:
ctx["state"] = {
"starting" : True
}
if "wasUp" not in ctx:
ctx["wasUp"] = True
unit = 1 / 7.0
if ctx["mousePos"][0] > 0.5:
color = [1,0,0]
else:
color = [0,1,0]
assets = []
colors = [(1,1,0), (1,0,1), (1,0,0), (0,1,1)]
possibleSounds = ["piano-bb", "piano-c", "piano-eb", "piano-g"]
possibleSound = None
for i in range(4):
row = i / 2
column = i % 2
rect = {
"type" : "rectangle",
"position" : [unit+unit*3*column, unit+unit*3*row],
"size" : [unit*2, unit*2]
}
if pointContainsRect(ctx["mousePos"], rect):
rect["color"] = colors[i]
possibleSound = possibleSounds[i]
else:
rect["color"] = scaleColor(colors[i], 0.4)
assets.append(rect)
ctx["assets"] = assets
if ctx["mouseDown"] and ctx["wasUp"]: # press
if possibleSound:
ctx["sound"] = possibleSound
ctx["wasUp"] = False
ctx["wasUp"] = not ctx["mouseDown"]
| true |
0359a70928c8a530c82173829c54ca70a3674fc5 | Python | oudream/hello-fastai | /courses-py/deeplearning2/seq2seq-translation.py | UTF-8 | 21,695 | 3.0625 | 3 | [] | no_license |
# coding: utf-8
# # Requirements
# In[6]:
import unicodedata, string, re, random, time, math, torch, torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import keras, numpy as np
# In[7]:
from keras.preprocessing import sequence
# ## Loading data files
#
# The data for this project is a set of many thousands of English to French translation pairs.
#
# [This question on Open Data Stack Exchange](http://opendata.stackexchange.com/questions/3888/dataset-of-sentences-translated-into-many-languages) pointed me to the open translation site http://tatoeba.org/ which has downloads available at http://tatoeba.org/eng/downloads - and better yet, someone did the extra work of splitting language pairs into individual text files here: http://www.manythings.org/anki/
#
# The English to French pairs are too big to include in the repo, so download to `data/fra.txt` before continuing. The file is a tab separated list of translation pairs:
#
# ```
# I am cold. Je suis froid.
# ```
# We'll need a unique index per word to use as the inputs and targets of the networks later. To keep track of all this we will use a helper class called `Lang` which has word → index (`word2index`) and index → word (`index2word`) dictionaries, as well as a count of each word `word2count` to use to later replace rare words.
# In[8]:
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# The files are all in Unicode, to simplify we will turn Unicode characters to ASCII, make everything lowercase, and trim most punctuation.
# In[9]:
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
# To read the data file we will split the file into lines, and then split lines into pairs. The files are all English → Other Language, so if we want to translate from Other Language → English I added the `reverse` flag to reverse the pairs.
# In[10]:
def readLangs(lang1, lang2, pairs_file, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s' % (pairs_file)).read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
# Since there are a *lot* of example sentences and we want to train something quickly, we'll trim the data set to only relatively short and simple sentences. Here the maximum length is 10 words (that includes ending punctuation) and we're filtering to sentences that translate to the form "I am" or "He is" etc. (accounting for apostrophes replaced earlier).
# In[11]:
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH and p[1].startswith(eng_prefixes)
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
# The full process for preparing the data is:
#
# * Read text file and split into lines, split lines into pairs
# * Normalize text, filter by length and content
# * Make word lists from sentences in pairs
# In[13]:
def prepareData(lang1, lang2, pairs_file, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, pairs_file, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepareData('eng', 'fra', 'fra.txt', True)
print(random.choice(pairs))
# In[14]:
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]+[EOS_token]
def variableFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
return Variable(torch.LongTensor(indexes).unsqueeze(0))
def variablesFromPair(pair):
input_variable = variableFromSentence(input_lang, pair[0])
target_variable = variableFromSentence(output_lang, pair[1])
return (input_variable, target_variable)
# In[15]:
def index_and_pad(lang, dat):
return sequence.pad_sequences([indexesFromSentence(lang, s)
for s in dat], padding='post').astype(np.int64)
# In[16]:
fra, eng = list(zip(*pairs))
# In[17]:
fra = index_and_pad(input_lang, fra)
eng = index_and_pad(output_lang, eng)
# In[18]:
def get_batch(x, y, batch_size=16):
idxs = np.random.permutation(len(x))[:batch_size]
return x[idxs], y[idxs]
# ## The Encoder
#
# The encoder of a seq2seq network is a RNN that outputs some value for every word from the input sentence. For every input word the encoder outputs a vector and a hidden state, and uses the hidden state for the next input word.
# In[19]:
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=1):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, num_layers=n_layers)
def forward(self, input, hidden):
output, hidden = self.gru(self.embedding(input), hidden)
return output, hidden
# TODO: other inits
def initHidden(self, batch_size):
return Variable(torch.zeros(1, batch_size, self.hidden_size))
# ## Simple Decoder
#
# In the simplest seq2seq decoder we use only last output of the encoder. This last output is sometimes called the *context vector* as it encodes context from the entire sequence. This context vector is used as the initial hidden state of the decoder.
#
# At every step of decoding, the decoder is given an input token and hidden state. The initial input token is the start-of-string `<SOS>` token, and the first hidden state is the context vector (the encoder's last hidden state).
# In[20]:
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, n_layers=1):
super(DecoderRNN, self).__init__()
self.embedding = nn.Embedding(output_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, num_layers=n_layers)
# TODO use transpose of embedding
self.out = nn.Linear(hidden_size, output_size)
self.sm = nn.LogSoftmax()
def forward(self, input, hidden):
emb = self.embedding(input).unsqueeze(1)
# NB: Removed relu
res, hidden = self.gru(emb, hidden)
output = self.sm(self.out(res[:,0]))
return output, hidden
# ## Attention Decoder
# If only the context vector is passed betweeen the encoder and decoder, that single vector carries the burden of encoding the entire sentence.
#
# Attention allows the decoder network to "focus" on a different part of the encoder's outputs for every step of the decoder's own outputs. First we calculate a set of *attention weights*. These will be multiplied by the encoder output vectors to create a weighted combination. The result (called `attn_applied` in the code) should contain information about that specific part of the input sequence, and thus help the decoder choose the right output words.
#
# 
#
# Calculating the attention weights is done with another feed-forward layer `attn`, using the decoder's input and hidden state as inputs. Because there are sentences of all sizes in the training data, to actually create and train this layer we have to choose a maximum sentence length (input length, for encoder outputs) that it can apply to. Sentences of the maximum length will use all the attention weights, while shorter sentences will only use the first few.
#
# 
# In[9]:
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, n_layers=1, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_output, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)))
attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
for i in range(self.n_layers):
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]))
return output, hidden, attn_weights
def initHidden(self):
return Variable(torch.zeros(1, 1, self.hidden_size))
# *Note:* There are other forms of attention that work around the length limitation by using a relative position approach. Read about "local attention" in [Effective Approaches to Attention-based Neural Machine Translation](https://arxiv.org/abs/1508.04025).
# ## Training
#
# To train we run the input sentence through the encoder, and keep track of every output and the latest hidden state. Then the decoder is given the `<SOS>` token as its first input, and the last hidden state of the decoder as its first hidden state.
#
# "Teacher forcing" is the concept of using the real target outputs as each next input, instead of using the decoder's guess as the next input. Using teacher forcing causes it to converge faster but [when the trained network is exploited, it may exhibit instability](http://minds.jacobs-university.de/sites/default/files/uploads/papers/ESNTutorialRev.pdf).
# In[21]:
def train(input_variable, target_variable, encoder, decoder,
encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
batch_size, input_length = input_variable.size()
target_length = target_variable.size()[1]
encoder_hidden = encoder.initHidden(batch_size).cuda()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
loss = 0
encoder_output, encoder_hidden = encoder(input_variable, encoder_hidden)
decoder_input = Variable(torch.LongTensor([SOS_token]*batch_size)).cuda()
decoder_hidden = encoder_hidden
for di in range(target_length):
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden)
#, encoder_output, encoder_outputs)
targ = target_variable[:, di]
# print(decoder_output.size(), targ.size(), target_variable.size())
loss += criterion(decoder_output, targ)
decoder_input = targ
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.data[0] / target_length
# In[22]:
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
# In[23]:
def trainEpochs(encoder, decoder, n_epochs, print_every=1000, plot_every=100,
learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.RMSprop(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.RMSprop(decoder.parameters(), lr=learning_rate)
criterion = nn.NLLLoss().cuda()
for epoch in range(1, n_epochs + 1):
training_batch = get_batch(fra, eng)
input_variable = Variable(torch.LongTensor(training_batch[0])).cuda()
target_variable = Variable(torch.LongTensor(training_batch[1])).cuda()
loss = train(input_variable, target_variable, encoder, decoder, encoder_optimizer,
decoder_optimizer, criterion)
print_loss_total += loss
plot_loss_total += loss
if epoch % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, epoch / n_epochs), epoch,
epoch / n_epochs * 100, print_loss_avg))
if epoch % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
showPlot(plot_losses)
# ### Attention
# In[141]:
# TODO: Make this change during training
teacher_forcing_ratio = 0.5
def attn_train(input_variable, target_variable, encoder, decoder, encoder_optimizer,
decoder_optimizer, criterion, max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]]))
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
loss += criterion(decoder_output[0], target_variable[di])
decoder_input = target_variable[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
decoder_input = Variable(torch.LongTensor([[ni]]))
loss += criterion(decoder_output[0], target_variable[di])
if ni == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.data[0] / target_length
# # Plotting results
#
# Plotting is done with matplotlib, using the array of loss values `plot_losses` saved while training.
# In[24]:
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
get_ipython().magic(u'matplotlib inline')
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
loc = ticker.MultipleLocator(base=0.2) # this locator puts ticks at regular intervals
ax.yaxis.set_major_locator(loc)
plt.plot(points)
# # Evaluation
#
# Evaluation is mostly the same as training, but there are no targets so we simply feed the decoder's predictions back to itself for each step. Every time it predicts a word we add it to the output string, and if it predicts the EOS token we stop there. We also store the decoder's attention outputs for display later.
# In[25]:
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
input_variable = variableFromSentence(input_lang, sentence).cuda()
input_length = input_variable.size()[0]
encoder_hidden = encoder.initHidden(1).cuda()
encoder_output, encoder_hidden = encoder(input_variable, encoder_hidden)
decoder_input = Variable(torch.LongTensor([SOS_token])).cuda()
decoder_hidden = encoder_hidden
decoded_words = []
# decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
# decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden)
#, encoder_output, encoder_outputs)
# decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
if ni == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[ni])
decoder_input = Variable(torch.LongTensor([ni])).cuda()
return decoded_words,0#, decoder_attentions[:di+1]
# In[27]:
def evaluateRandomly(encoder, decoder, n=10):
for i in range(n):
pair = random.choice(pairs)
print('>', pair[0])
print('=', pair[1])
output_words, attentions = evaluate(encoder, decoder, pair[0])
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
# # Training and Evaluating
#
# *Note:* If you run this notebook you can train, interrupt the kernel, evaluate, and continue training later. Comment out the lines where the encoder and decoder are initialized and run `trainEpochs` again.
# In[28]:
#TODO:
# - Test set
# - random teacher forcing
# - attention
# - multi layers
# - bidirectional encoding
# In[29]:
hidden_size = 256
encoder1 = EncoderRNN(input_lang.n_words, hidden_size).cuda()
attn_decoder1 = DecoderRNN(hidden_size, output_lang.n_words).cuda()
# In[30]:
trainEpochs(encoder1, attn_decoder1, 15000, print_every=500, learning_rate=0.005)
# In[107]:
evaluateRandomly(encoder1, attn_decoder1)
# ## Visualizing Attention
#
# A useful property of the attention mechanism is its highly interpretable outputs. Because it is used to weight specific encoder outputs of the input sequence, we can imagine looking where the network is focused most at each time step.
#
# You could simply run `plt.matshow(attentions)` to see attention output displayed as a matrix, with the columns being input steps and rows being output steps:
# NOTE: This only works when using the attentional decoder, if you've been following the notebook to this point you are using the standard decoder.
# In[20]:
output_words, attentions = evaluate(encoder1, attn_decoder1, "je suis trop froid .")
plt.matshow(attentions.numpy())
# For a better viewing experience we will do the extra work of adding axes and labels:
# In[21]:
def showAttention(input_sentence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') + ['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluateAndShowAttention(input_sentence):
output_words, attentions = evaluate(encoder1, attn_decoder1, input_sentence)
print('input =', input_sentence)
print('output =', ' '.join(output_words))
showAttention(input_sentence, output_words, attentions)
# In[22]:
evaluateAndShowAttention("elle a cinq ans de moins que moi .")
# In[23]:
evaluateAndShowAttention("elle est trop petit .")
# In[24]:
evaluateAndShowAttention("je ne crains pas de mourir .")
# In[25]:
evaluateAndShowAttention("c est un jeune directeur plein de talent .")
# * Replace the embedding pre-trained word embeddings such as word2vec or GloVe
| true |
81d132714ff38f33492314dd6ea80c92b610d5c5 | Python | Dysio/PrjCodeWars | /ZadaniaDodatkoweSDA/deep_reverse.py | UTF-8 | 1,783 | 3.8125 | 4 | [] | no_license | def deep_reverse(L):
""" assumes L is a list of lists whose elements are ints
Mutates L such that it reverses its elements and also
reverses the order of the int elements in every element of L.
It does not return anything.
"""
# resultL = []
# for elem in L:
# resultL.append(L[-1-elem][-1::-1])
# L = [L[-1-elem][-1::-1] for elem in [i for i in range(len(L))]]
# return L
# def deep_reverse(L)
# for index, element in enumerate(L):
# L[index] = element[::-1]
# L.reverse()
length = len(L)
for i in range(length):
L.append(L[length - 1 - i][-1::-1])
for i in range(length):
L.pop(0)
if __name__ == '__main__':
elements = [
{'given': [[0, 1, 2], [1, 2, 3]], 'expected': [[3, 2, 1], [2, 1, 0]]},
{'given': [[0, -1, 2, -3, 4, -5]], 'expected': [[-5, 4, -3, 2, -1, 0]]},
{'given': [[1], [1, 2, 3]], 'expected': [[3, 2, 1], [1]]},
{'given': [[], [1, 2, 3]], 'expected': [[3, 2, 1], []]},
{'given': [[2, -1, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 101, 10, 1, 1, 5, 4, 3]],
'expected': [[3, 4, 5, 1, 1, 10, 101, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], [10, -1, 2]]},
]
for e in elements:
l = e['given']
deep_reverse(l)
assert l == e['expected']
#
# testlist = [[0, 1, 2], [1, 2, 3]]
# testlist = [[0, -1, 2, -3, 4, -5]]
# testlist = [[1], [1, 2, 3]]
# testlist = [[], [1, 2, 3]]
# testlist = [[2, -1, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 101, 10, 1, 1, 5, 4, 3]]
# print(f'{testlist} len= {len(testlist)}')
# count = [-i for i in range(len(testlist))]
# print(f'count = {count}')
# print(f'deep_reverse result = {deep_reverse(testlist)}')
# print(f'testlist = {testlist}')
| true |
739d40c5aa508388cffe9fe7dbc529011feada94 | Python | nware49/PythonExploration | /MultiThreadDataSimulator.py | UTF-8 | 3,129 | 3.171875 | 3 | [] | no_license | import serial
import math
import time
import threading
import random
from datetime import datetime
Port1 = "COM3" #This is which port the data will be sent from
Port2 = "COM4"
#Attempts to open and assign a serial port
#If it cannot open the port, it will print an error message
try:
ser1 = serial.Serial(Port1)
ser2 = serial.Serial(Port2)
except:
print("Could not open ports. Port is either in use or does not exist.")
exit()
print(ser1.name)
print(ser2.name)
breakIndicator = 0
class Serial1Write(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, verbose=None):
super(Serial1Write,self).__init__()
def run(self):
a = -1.1
i = -1
pi = math.pi
while True:
if breakIndicator == 1:
break
i += 1
pi = math.pi
curDT = datetime.now()
Time = curDT.strftime("%H:%M:%S")
x = random.uniform(0,10)
RandFloat = format(x, '.5f')
y = math.sin((i*pi)/12)
SineWave = format(y, '.4f')
z = math.cos((i*pi)/12)
CosineWave = format(z, '.4f')
LongString = (str(i) + "," + str(Time) + "," + str(RandFloat) + "," + str(SineWave) + "," + str(CosineWave) + "\n")
try:
ser1.write(LongString.encode())
print (LongString)
time.sleep(1)
except:
print("Cannot write to port.")
break
return
class Serial2Write(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, verbose=None):
super(Serial2Write,self).__init__()
def run(self):
a = -1.1
i = -1
pi = math.pi
while True:
if breakIndicator == 1:
break
i += 1
a += 1.1
aFormat = format(a, '.1f')
curDT = datetime.now()
Time = curDT.strftime("%H:%M:%S")
x = random.uniform(0,10)
RandFloat = format(x, '.5f')
y = math.sin((i*pi)/12)
SineWave = format(y, '.4f')
z = math.cos((i*pi)/12)
CosineWave = format(z, '.4f')
LongString = (str(aFormat) + "," + str(Time) + "," + str(RandFloat) + "," + str(SineWave) + "," + str(CosineWave) + "\n")
try:
ser2.write(LongString.encode())
print (LongString)
time.sleep(1.1)
except:
print("Cannot write to port.")
break
return
if __name__ == '__main__':
writer1 = Serial1Write()
writer2 = Serial2Write()
writer1.start()
writer2.start()
try:
while True:
continue
except KeyboardInterrupt:
print("Keyboard Interrupt")
breakIndicator = 1
exit()
except:
print("Could not spawn threads to begin writing to ports.")
exit()
| true |
d7aa3e1a5f0de6c37553d2cf021f3c761afc3829 | Python | Dmitrii-Geek/Homework | /lesson3.1.py | UTF-8 | 577 | 3.46875 | 3 | [] | no_license | def div(*arg):
try:
arg1 = int(input("Введите числитель"))
arg2 = int(input("Введите знаменатель"))
res = arg1 / arg2
except ValueError:
return 'Value error'
except ZeroDivisionError:
return "Вы не можете использовать ноль как делитель!"
return res
if arg2 != 0:
return arg1 / arg2
else:
print("Вы не можете использовать ноль как делитель!")
print(f'result {div()}')
| true |
15576e0d7a798897492000a0710960810291dc40 | Python | leonhx/leetcode-practice | /60.permutation-sequence.py | UTF-8 | 469 | 3.015625 | 3 | [
"MIT"
] | permissive | #
# @lc app=leetcode id=60 lang=python3
#
# [60] Permutation Sequence
#
class Solution:
def getPermutation(self, n: int, k: int) -> str:
n_combs = 1
for i in range(2, n + 1):
n_combs *= i
k -= 1
digits = list(range(1, n + 1))
result = []
for i in range(n):
n_combs //= n - i
result.append(digits.pop(k // n_combs))
k %= n_combs
return ''.join(map(str, result))
| true |
d94a108c055ca4abd740753742372535ef4258b2 | Python | oway13/Schoolwork | /15Fall/1133 Intro to Programming Concepts/Python Labs/Lab 11/l11 st.py | UTF-8 | 912 | 3.796875 | 4 | [
"MIT"
] | permissive | #Lab 11 Stretch
class measure:
def __init__(self, ft=0,inch=0):
self.feet = 0
if ft == 0:
self.feet += inch//12
self.inches = inch % 12
else:
self.feet = ft
self.inches = inch
def __str__(self):
retstr = ''
if self.feet != 0:
retstr += str(self.feet)+"'"
if self.inches != 0:
retstr += str(self.inches)+'"'
if self.inches == 0 and self.feet == 0:
retstr = '0'
return retstr
def __add__(self,x):
return measure(self.feet+x.feet+((self.inches+x.inches)//12), (self.inches+x.inches)%12)
def __sub__(self,x):
return measure(self.feet-x.feet+(self.inches-x.inches)//12, (self.inches-x.inches)%12)
def main():
m1 = measure()
m2 = measure(4,11)
m3 = measure(6,10)
print(m1)
print(m2+m3)
print(m3-m2)
| true |
84b15928fa24ed359908cc44b961c06523906a08 | Python | Wendelstein7/DiscordUnitCorrector | /unitbot.py | UTF-8 | 7,863 | 2.9375 | 3 | [
"MIT"
] | permissive | # Discord Unit Corrector Bot
#
# This bot is licenced under the MIT License [Copyright (c) 2018 Wendelstein7]
#
# This is a Discord bot running python3 using the Discord.py library
# This bot will listen for any messages in Discord that contain non-SI units and when detected, reply with the message converted to SI-Units.
# Are you tired of a car that weighs 100 Stones, is 10 feet high, and can drive 50 miles at 5 degrees freedom?
# Worry no more! Your car weighs 0.64t, is 3.05m high, and can drive 80.47km at -15°C from now on!
# Simply add this bot to your server! You can choose to run it yourself or add the version that is updated and hosted by me [Wendelstein 7]
# The unit conversion library was riginally created by ficolas2, https://github.com/ficolas2, 2018/01/21
# The unit conversion library has been modified and updated by ficolas2 and Wendelstein7, https://github.com/Wendelstein7
# Licenced under: MIT License, Copyright (c) 2018 Wendelstein7 and ficolas2
import datetime
import os
import sys
from datetime import datetime, date
import discord
from discord.ext import commands
import filter
import unitconversion
import unitpedialib
description = """UnitCorrector: A community-beveloped open source Discord bot that corrects non-SI units to SI ones! Also features a !unitpedia command, allowing users to learn about (all) units."""
bot = commands.Bot(command_prefix='!', description=description)
starttime = datetime.utcnow()
longprefix = ':symbols: UnitCorrector | '
shortprefix = ':symbols: '
credits = '**HydroNitrogen** (GH: `Wendelstein7`, <@378840449152188419>) - _Creator and main current developer_ \n**Shaq** (GH: `Shaquu`, <@197109511239106561>) - _Main current developer_ \n**ficolas** (GH: `ficolas2`, <@192368029366091777>) - _Past developer_ \n ...And other wonderful contributors, see GitHub.'
@bot.event
async def on_ready():
print('Discord Unit Corrector Bot: Logged in as {} (id: {})\n'.format(
bot.user.name, bot.user.id))
@bot.event
# Catches send messages and corrects non-SI units if neccesary. Most of the code behind this is in 'unitconversion.py'.
async def on_message(message):
if bot.user.id is not message.author.id and message.author.bot is False and (message.guild is None or (message.guild is not None and discord.utils.get(message.guild.roles, name='imperial certified') not in message.author.roles)):
processedMessage = unitconversion.process(message.content)
if processedMessage is not None:
correctionText = ("I think " + filter.apply_strict(message.author.display_name if message.guild is not None else "you") +
" meant to say: ```" + filter.apply_strict(processedMessage) + "```")
await message.channel.send(correctionText)
await bot.process_commands(message)
@bot.event
async def on_command(ctx):
print('[{}] Fired {} by {}'.format(
datetime.now(), ctx.command, ctx.author))
@bot.command(name='unitcorrector', aliases=['units', 'listunits', 'unitlist'])
# May be converted to a nice embed if needed in the future.
async def unitcorrector(ctx):
"""Lists supported units by the unit corrector bot."""
supportedUnits = ""
for unit in unitconversion.units:
if supportedUnits != "":
supportedUnits += ", " + unit.getName()
else:
supportedUnits += unit.getName()
await ctx.send(shortprefix + "UnitCorrector automatically detects and corrects users who send non-SI units in their messages.\nThe bot currently supports the following units:\n```" + supportedUnits + "```")
@bot.command(name='uptime', hidden=True)
# May be deprecated, changed or removed as !about already shows the uptime.
async def uptime(ctx):
"""Shows how long this instance of the bot has been online."""
await ctx.send(shortprefix + 'Uptime\n```Bot started: {}\nBot uptime: {}```'.format(starttime, (datetime.now() - starttime)))
@bot.command(name='contributors', aliases=['credits', 'developers'])
# Will be made a nice embed in the future if there are lots of contributors.
async def contributors(ctx):
"""Lists the people who have contributed to this bot."""
embed = discord.Embed(title="UnitCorrector contributors and developers", colour=discord.Colour(
0xffffff), url="https://github.com/Wendelstein7/DiscordUnitCorrector", description=credits)
embed.set_thumbnail(url=bot.user.avatar_url)
await ctx.send(embed=embed)
@bot.command(name='unitpedia')
# Unitpedia! Still needs need a lot of expansion and work. Most of the code behind this is in 'unitpedialib.py'.
async def unitpedia(ctx, *, search: str):
"""Gives information about an unit. Try !unitpedia mi, !unitpedia litre, !unitpedia °C, etc..."""
result = unitpedialib.lookup(search)
if result != "notfound":
await ctx.send(embed=result)
else:
await ctx.send(shortprefix + 'Sorry, your search query has not returned any results. Try to search using different words or abbreviations.\n\n*Unitpedia is not complete and needs community submissions. If you want to help expand unitpedia, please visit <https://github.com/Wendelstein7/DiscordUnitCorrector>.*')
@unitpedia.error
async def unitpedia_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(shortprefix + 'You will need to enter a query to search for. Try `!unitpedia metre`, `!unitpedia °F`, `!unitpedia mile²`, etc...')
@bot.command(name='about', aliases=['info'])
# May be changed in the future to be send in DM to prevent malicious use for spam purposes.
async def about(ctx):
"""Shows information about the bot aswell as the relevant version numbers, uptime and useful links."""
embed = discord.Embed(title="UnitCorrector", colour=discord.Colour(0xffffff), url="https://github.com/Wendelstein7/DiscordUnitCorrector",
description="A fully functional public Discord bot that automatically corrects non-SI units (imperial, etc) to SI-ones (metric, etc) This bot will listen for any messages in Discord that contain non-SI units and when detected, reply with the message converted to SI-Units.\n\n*Are you tired of a car that weighs 100 Stones, is 10 feet high, and can drive 50 miles at 5 degrees freedom? Worry no more! Your car weighs 0.64t, is 3.05m high, and can drive 80.47km at -15°C from now on!*")
embed.set_thumbnail(url=bot.user.avatar_url)
embed.add_field(name=":information_source: **Commands**",
value="Please use the `!help` to list all possible commands!")
embed.add_field(name=":hash: **Developers**", value=credits)
embed.add_field(name=":symbols: **Contributing**",
value="Want to help with the bot? You're welcome to do so!\n[Visit our GitHub for more information!](https://github.com/Wendelstein7/DiscordUnitCorrector)")
embed.add_field(name=":new: **Version information**", value="Bot version: `{}`\nDiscord.py version: `{}`\nPython version: `{}`".format(
date.fromtimestamp(os.path.getmtime('unitbot.py')), discord.__version__, sys.version.split(' ')[0]), inline=True)
embed.add_field(name=":up: **Uptime information**", value="Bot started: `{}`\nBot uptime: `{}`".format(starttime.strftime(
"%Y-%m-%d %H:%M:%S"), (datetime.utcnow().replace(microsecond=0) - starttime.replace(microsecond=0))), inline=True)
embed.add_field(name=":free: **Adding the bot**",
value="Want to add this bot to **your** server? [Click here to add it!](https://discordapp.com/oauth2/authorize?client_id=405724335525855232&scope=bot&permissions=67619905)")
await ctx.send(embed=embed)
# INFO: To run the bot yourself you must enter your bots private token in a (new) file called 'token'
with open('token', 'r') as content_file:
content = content_file.read()
bot.run(content)
| true |
2abdbd77bbac72408af24e5d0e476d679c04289f | Python | jinaur/codeup | /1420.py | UTF-8 | 220 | 3.046875 | 3 | [] | no_license | n = int(input())
ln = []
l = []
for i in range(0, n) :
a, b = input().split()
ln.append(a)
l.append(int(b))
ll = sorted(l)
ll.reverse()
for i in range(0, n) :
if ll[2] == l[i] :
print(ln[i])
| true |
c9d61e0d40829db81afb606956bf8a0ca8744cc4 | Python | 33Peng33/named | /layer.py | UTF-8 | 2,226 | 2.875 | 3 | [] | no_license | import tensorflow as tf
class FeedForwardNetwork(tf.keras.models.Model):
def __init__(self, hidden_dim: int, dropout_rate: float, *args,**kwargs) ->None:
super().__init__(*args,**kwargs)
self.hidden_dim = hidden_dim
self.dropout_rate = dropout_rate
self.filter_dense_layer = tf.keras.layers.Dense(hidden_dim *4, use_bias = True, activation=tf.nn.relu, name='filter_layer')
self.output_dense_layer = tf.keras.layers.Dense(hidden_dim, use_bias=True, name='output_layer')
self.dropout_layer = tf.keras.layers.Dropout(dropout_rate)
def call(self, input:tf.Tensor, training:bool) -> tf.Tensor:
tensor = self.filter_dense_layer(input)
tensor = self.dropout_layer(tensor, training=training)
return self.output_dense_layer(tensor)
class ResidualNormalizationWrapper(tf.keras.models.Model):
def __init__(self,layer:tf.keras.layers.Layer, dropout_rate:float, *args,**kwargs) -> None:
super().__init__(*args,**kwargs)
self.layer= layer
self.layer_normalization = LayerNormalization()
self.dropout_layer = tf.keras.layers.Dropout(dropout_rate)
def call(self, input:tf.Tensor, training:bool, *args,**kwargs) -> tf.Tensor:
tensor = self.layer_normalization(input)
tensor = self.layer(tensor, training=training, *args,**kwargs)
tensor = self.dropout_layer(tensor, training=training)
return input+tensor
class LayerNormalization(tf.keras.layers.Layer):
def build(self, input_shape:tf.TensorShape) -> None:
hidden_dim = input_shape[-1]
self.scale = self.add_weight('layer_norm_scale', shape=[hidden_dim],
initializer=tf.ones_initializer())
self.bias = self.add_weight('layer_norm_bias', [hidden_dim],
initializer=tf.zeros_initializer())
super().build(input_shape)
def call(self, x:tf.Tensor, epsilon:float = 1e-6) -> tf.Tensor:
mean = tf.reduce_mean(x,axis=[-1], keepdims= True)
variance = tf.reduce_mean(tf.square(x-mean), axis=[-1],keepdims=True)
norm_x = (x-mean)* tf.rsqrt(variance + epsilon)
return norm_x * self.scale + self.bias
| true |
7f1f3ee66f54c73175c2bd120d0fd85d8f28ff32 | Python | liuxushengxian/Python001-class01 | /week04/pd_to_sql.py | UTF-8 | 1,295 | 3.296875 | 3 | [] | no_license | import pandas as pd
import numpy as np
df = pd.DataFrame({
"id":np.random.randint(1001, 1020, 20),
"age":np.random.randint(25, 55, 20),
"salary":np.random.randint(3000, 20000, 20)
})
df1 = pd.DataFrame({
"id":np.random.randint(1001, 1006, 10),
"sales":np.random.randint(5000, 20000, 10),
"date":'Feb'
})
df2 = pd.DataFrame({
"id":np.random.randint(1001, 1006, 10),
"sales":np.random.randint(10000, 100000, 10),
"date":'Mar'
})
# 1. SELECT * FROM data;
print(df)
# 2. SELECT * FROM data LIMIT 10;
print(df.head(10))
# 3. SELECT id FROM data; //id 是 data 表的特定一列
print(df['id'])
# 4. SELECT COUNT(id) FROM data;
print(df['id'].count())
# 5. SELECT * FROM data WHERE id<1000 AND age>30;
print(df[(df['id'] < 1000) & (df['age'] > 30)])
# 6. SELECT id,COUNT(DISTINCT order_id) FROM table1 GROUP BY id;
print(df1.groupby('id').aggregate({'id': 'count', }))
# 7. SELECT * FROM table1 t1 INNER JOIN table2 t2 ON t1.id = t2.id;
print(pd.merge(df1, df2, on='id'))
# 8. SELECT * FROM table1 UNION SELECT * FROM table2;
print(pd.concat([df1, df2]))
# 9. DELETE FROM table1 WHERE id=10;
print(df1[df1['id'] != 1002])
# 10. ALTER TABLE table1 DROP COLUMN column_name;
print(df1.rename(columns={'Feb': 'SAN'}, inplace=True)) | true |
d98fa2bf85c33cc1932e513cc7e8b7353cd96dc1 | Python | soldierloko/PIM_IV | /Main.py | UTF-8 | 1,350 | 2.84375 | 3 | [] | no_license | #Importa as Bibliotecas necessárias
import Funcoes as fc
from time import sleep
from Classes import Aluno_Professor
import os
#Faça Até que eu mande Sair do Sistema
while True:
#Apaga a tela
os.system('cls') or None
#Chama o Menu Principal
fc.exibir_menu()
#Aguarda o user entrar com uma opção
opcao = int(input('Digite uma opção: '))
#Abre o script de acordo com a opção do usuário
if opcao == 1:
os.system('cls') or None
fc.cadastro()
#Apaga a tela
os.system('cls') or None
#Chama o Menu Principal
fc.exibir_menu()
elif opcao == 2:
os.system('cls') or None
fc.equipamento()
#Apaga a tela
os.system('cls') or None
#Chama o Menu Principal
fc.exibir_menu()
elif opcao == 3:
os.system('cls') or None
fc.reserva()
#Apaga a tela
os.system('cls') or None
#Chama o Menu Principal
fc.exibir_menu()
elif opcao == 4:
os.system('cls') or None
fc.devolucao()
#Apaga a tela
os.system('cls') or None
#Chama o Menu Principal
fc.exibir_menu()
elif opcao == 5:
print("Fechando programa!")
sleep(2)
os.system('cls') or None
break
else:
print('Opção inválida')
| true |
8e44e0bc00e216b34cc8326637779ccf3f702ba7 | Python | 1284753334/learning2 | /datastract/Myproject/插入排序.py | UTF-8 | 2,299 | 3.34375 | 3 | [] | no_license | # 插入排序
# 复杂度 0(n)2次方
# def insert_sort(li):
# for i in range(1,len(li)):
# tmp = li[i]
# j = i-1
# # while j>=0 and li[j] > tmp:
# # li[j+1] = li[j]
# # j -= 1
# # li[j+1] = tmp
# # li=[1,3,4,5,2,7,9,8]
# # insert_sort(li)
# # print(li)
#
#
#
# def insert_sort(li):
# for i in range(1,len(li)):# i 表示摸到牌的下标
# tmp = li[i]
# j = i-1 #手里的牌的下标
# while j >= 0 and li[j] > tmp:
# li[j+1] = li[j] #向右移动一位
# j -= 1 # 箭头左移
# li[j+1] = tmp
# print(li)
#
# li = [3,2,4,1,5,7,9,6,8]
# print(li)
# insert_sort(li)
# #
#
#
#
# def insert_sort(li):
# for i in range(1,len(li)):
# tmp = li[i]
# j = i -1
# while j >=0 and li[j]<tmp:
# li[j+1] = li[j]
# j -=1
# li[j+1] = tmp
# def insert_sort(li):
# for i in range(1,len(li)):
# tmp = li[i]
# j = i -1
# while j >=0 and li[j] > tmp:
# li[j+1] = li[j]
# j -=1
# li[j+1] = tmp
#
# li = [2,3,4,1,5,8,7,9,6,0]
# insert_sort(li)
# print(li)
# def insert_sort(li):
# for i in range(1,len(li)):
# tmp = li[i]
# j = i -1
# while j >= 0 and li[j]>tmp:
# li[j+1] = li[j]
# j -=1
# li[j+1] =tmp
#
# li = [2,3,1,4,8,7,6,9,5]
# insert_sort(li)
# print(li)
# def select_sort(li):
# for i in range(len(li)-1):
# min_loc = li[i]
# for j in range(i+1,len(li)):
# if li[j] < li[min_loc]:
# min_loc = j
# li[i],li[min_loc] = li[min_loc],li[i]
#
#
# li = [4,3,1,2,6,7,8,5,9]
# select_sort(li)
# print(li)
# def insert_sort(li):
# for i in range(1,len(li)):
# tmp = li[i]
# j = i -1
# while j>=0 and li[j] > tmp:
# li[j+1]= li[j]
# j -= 1
# li[j+1] = tmp
#
#
# li = [4, 3, 1, 2, 6, 7, 8, 5, 9]
# insert_sort(li)
# print(li)
def insert_sort(li):
for i in range(1,len(li)):
tmp = li[i]
j = i -1
while j>=0 and li[j] >tmp:
li[j+1] = li[j]
j -=1
li[j+1] = tmp
li = [4, 3, 1, 2, 6, 7, 8, 5, 9]
insert_sort(li)
print(li) | true |
39f52d3270af03eb6207f7154d1fc994557f0f7f | Python | otsuka-pocari/nlp100 | /ch02/16.py | UTF-8 | 419 | 2.890625 | 3 | [] | no_license | f = open("popular-names.txt", "r")
lines = f.readlines()
N = int(input("N => "))
g = [open("16-python-%2d.txt" % i, "w") for i in range(N)]
number_of_lines_per_a_file = len(lines) // N
index = 0
for i in range(N):
for j in range(number_of_lines_per_a_file):
g[i].write(lines[index])
index += 1
while index < len(lines):
g[-1].write(lines[index])
index += 1
f.close()
for i in range(N):
g[i].close()
| true |
8c8a26abc92254f83d013d990b82c6a693db07d1 | Python | open-mmlab/mmdeploy | /mmdeploy/backend/tvm/quantize.py | UTF-8 | 2,182 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Dict, Sequence, Union
import numpy as np
import tvm
from tvm.runtime.ndarray import array
class HDF5Dataset:
"""HDF5 dataset.
Args:
calib_file (str | h5py.File): Input calibration file.
input_shapes (Dict[str, Sequence[int]]): The shape of
each input.
model_type (str): Input model type, defaults to 'end2end'.
device (str): Device type, default to llvm.
"""
def __init__(
self,
calib_file: Union[str, Any],
input_shapes: Dict[str, Sequence[int]],
model_type: str = 'end2end',
device: str = 'llvm',
) -> None:
import h5py
if isinstance(calib_file, str):
calib_file = h5py.File(calib_file, mode='r')
assert 'calib_data' in calib_file
calib_data = calib_file['calib_data']
assert model_type in calib_data
calib_data = calib_data[model_type]
self.calib_file = calib_file
self.calib_data = calib_data
self.device = device
self.input_shapes = input_shapes
first_input_group = calib_data[list(calib_data.keys())[0]]
self.dataset_length = len(first_input_group)
def __call__(self):
"""Create dataset generator.
Yields:
Iterator[Any]: data in the dataset
"""
for idx in range(self.dataset_length):
ret = dict()
for name, opt_shape in self.input_shapes.items():
input_group = self.calib_data[name]
data_np = input_group[str(idx)][...].astype(np.float32)
data_shape = data_np.shape
# tile the input data
reps = [
int(np.ceil(opt_s / data_s))
for opt_s, data_s in zip(opt_shape, data_shape)
]
data_np = np.tile(data_np, reps)
slice_list = tuple(slice(0, end) for end in opt_shape)
data_np = data_np[slice_list]
data_nd = array(data_np, tvm.device(self.device))
ret[name] = data_nd
yield ret
| true |
5463b71a389d1edeef1085c6599f3ca481e9d500 | Python | xydinesh/jamming | /cf/16/C.py | UTF-8 | 180 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python
import sys
import fractions
(a,b,x,y) = map(int, sys.stdin.next().strip().split (" "))
p = fractions.gcd(x, y);
x /= p
y /= p
q = min([a/x, b/y])
print q*x, q*y
| true |
93a8646d50d937b1e9623c8bab32f02fe92b3972 | Python | append-knowledge/pythondjango | /1/collection/set/operations.py | UTF-8 | 327 | 3.71875 | 4 | [] | no_license | s1={1,2,3,4,88,5,9,7}
s2={1,2,3,85,95,65,}
print("s1 is ",s1)
print("s2 is ",s2)
#union ie total
print("union is ",s1.union(s2))
#intersection ie common
print("intersection of set is ",s1.intersection(s2))
#difference
print("difference of s1 in s2 is ",s1.difference(s2))
print("difference of s2 in s1 is ",s2.difference(s1)) | true |
c76455c3940e3ab840f9b283e9fb6eea3da1e8fd | Python | 3deep0019/python | /List Data structure/important fuction of list/2_Manipulating_Element_of_list/2_insert().py | UTF-8 | 787 | 4.4375 | 4 | [] | no_license | # 2) insert() Function:
# ----> To insert item at specified index position
n=[1,2,3,4,5]
n.insert(1,888)
print(n)
#D:\Python_classes>py test.py
n=[1, 888, 2, 3, 4, 5]
n=[1,2,3,4,5]
n.insert(10,777)
n.insert(-10,)
print(n)
''' Note: If the specified index is greater than max index then element will be inserted at last
position. If the specified index is smaller than min index then element will be inserted at
first position
Differences between append() and insert()
append()
---------> In List when we add any element it will
come in last i.e. it will be last element.
insert()
---------> In List we can insert any element in
particular index number
''' | true |
5ea88b24d135f322cfd153ca40ec36030fdea55a | Python | BolajiOlajide/python_learning | /beginner/iterator.py | UTF-8 | 1,541 | 4.21875 | 4 | [] | no_license | iterable = ['Spring', 'Summer', 'Autumn', 'Winter']
iterator = iter(iterable)
try:
print(next(iterator))
print(next(iterator))
print(next(iterator))
print(next(iterator))
print(next(iterator))
except StopIteration:
print('Items finished in the iterable!')
def gen123():
yield 1
yield 2
yield 3
return
g = gen123()
print(next(g))
print(next(g))
# an example of a generator comprehension
million_squares = (x * x for x in range(1, 10002))
print(million_squares)
print(next(million_squares))
print(next(million_squares))
print(next(million_squares))
print(next(million_squares))
print()
# zip is also a userful tool
a = [2, 3, 4, 32, 2, 3, 6, 764, 2, 223, 4, 54, 31, 234, 5]
b = [1, 2, 5, 6, 3, 2, 3, 5, 6, 8, 9, 7, 5, 3, 2]
for i in zip(a, b):
print("The min is {:4.1f}, max ids {:4.1f} while the average is {:4.1f}"
.format(min(i), max(i), (sum(i) / len(i))))
# the built in any method and all method are used to perform memory
# efficient calculations the any method returns true if any of the
# conditions are true while the all returns True if
# all the conditions are True otherwise False.
print()
print(any(t > 0 for t in (1, 2, 3, 4, 5)), 'should be equal to True')
print(any(t > 0 for t in (1, 2, 3, -4, 5)), 'should be equal to True')
print(any(t == 10 for t in (1, 2, 3, -4, 5)), 'should be equal to False')
print()
print(all(t > 0 for t in (1, 2, 3, 4, 5)), 'should be equal to True')
print(all(t > 0 for t in (1, 2, 3, -4, 5)), 'should be equal to False')
print()
| true |
a1d35d4c3110c34487eb6c050d947119a3d61247 | Python | buchanae/split-and-convert | /run.py | UTF-8 | 1,979 | 2.6875 | 3 | [] | no_license | from __future__ import print_function
import argparse
import itertools
import gzip
import logging
import multiprocessing
import os
import subprocess
import time
log = multiprocessing.log_to_stderr()
log.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('fastq', nargs='+')
parser.add_argument('--dry-run', action='store_true')
def grouper(n, iterable, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def run_strip_and_convert(input_path, output_path):
input = gzip.open(input_path)
output = gzip.open(output_path, 'wb')
for header, seq, strand, quality in grouper(4, input):
header = header.strip()
seq = seq.strip()
header = '>' + header[1:]
seq = seq[10:]
print(header, seq, sep='\n', file=output)
input.close()
output.close()
log.info('Completed: {0} > {1}'.format(input_path, output_path))
if __name__ == '__main__':
args = parser.parse_args()
num_processes = min(7, len(args.fastq))
pool = multiprocessing.Pool(processes=num_processes)
for input_path in args.fastq:
dir_path, file_name = os.path.split(input_path)
file_name = file_name.replace('fastq', 'fasta')
output_path = os.path.join(dir_path, 'stripped-and-converted', file_name)
output_dir = os.path.dirname(output_path)
if args.dry_run:
log.info('Dry-run completed: {0} > {1}'.format(input_path, output_path))
else:
# ensure that the full path to the output file exists
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# DEBUG
#run_strip_and_convert(input_path, output_path)
pool.apply_async(run_strip_and_convert, (input_path, output_path))
pool.close()
pool.join()
| true |
fc47c2777b28cf13add17157530c641d4068fd53 | Python | wildan12-alwi/tugas-5 | /tugas5.py | UTF-8 | 4,082 | 3.59375 | 4 | [] | no_license | print("Program Input Data mahasiswa")
print("____________________________")
print("=== Data Nilai Mahasiswa ===")
print("============================")
data = {}
def input_data():
nama = input("Nama: ")
nim = input("NIM: ")
tugas = int(input ("Nilai Tugas : "))
uas = int(input("Nilai UAS : "))
uts = int(input("Nilai UTS : "))
a = tugas * 30 / 100
b = uas * 35 / 100
c = uts * 35 /100
akhir = a + b + c
return {'nim':nim,
'nama':nama,
'tugas':tugas,
'uas':uas,
'uts':uts,
'akhir':akhir
}
def cetak (data={}):
print("___________________________________________________________________")
print("| NO | NIM | NAMA | TUGAS | UAS | UTS | AKHIR |")
print("===================================================================")
if len(data) <= 0:
print ("===========================BELUM ADA DATA==========================")
else:
no = 1
for x in data.values():
print("| {0:2} | {1:9} | {2:14} | {3:5} | {4:5} | {5:5} | {6:5.2f} |".format
(no,x["nim"],x["nama"],x["tugas"],
x["uas"],x["uts"],float(x["akhir"])))
no += 1
print("===================================================================")
while True:
print("\n=================================")
c = input("(L) Lihat, (T) Tambah, (U) Ubah, \n"
"(H) Hapus, (C) Cari, (K) Keluar: ")
print("===================================")
#lihat Data
if c.lower() =='l':
print("Daftar data")
cetak(data)
#menambahkan Data
elif c.lower() =='t':
print("Input Data")
d = input_data()
data[d['nama']] = d
#Mengubah Data
elif c.lower() =='u':
nama = input("Masukan nama untuk mengubah data: ")
if nama in data.keys():
print("Masukan Data yang diubah :")
ubah = input("(Semua), (Nama), (NIM), "
"(Tugas), (UTS), (UAS) : ")
if ubah.lower() =="semua":
print("_______________________")
print("Ubah Data {}".format(nama))
print("-----------------------")
d['nim'] = input("Ubah NIM : ")
d['tugas'] = int(input("Ubah Nilai Tugas: "))
d['uas'] = int(input("Ubah Nilai UAS : "))
d['uts'] = int(input("Ubah Nilai UTS : "))
elif ubah.lower() =="nama":
d['nama'] = input("Ubah Nama : ")
elif ubah.lower() =="nim":
d['nim'] = input("Ubah Nim : ")
elif ubah.lower() =="tugas":
d['tugas'] = int(input("Ubah Nilai Tugas : "))
elif ubah.lower() =="uts":
d['uts'] = int(input("Ubah Nilai UTS : "))
elif ubah.lower() =="uas":
d['uas'] = int(input("Ubah Nilai UAS : "))
data[nama]= d
else:
print("'{}' Tidak Ditemukan".format(nama))
#menghapus data
elif c.lower() =='h':
nama = input("Masukan nama untuk menghapus data : ")
if nama in data.keys():
del data[nama]
print("Data '{}' dihapus".format(nama))
else :
print("'{}' Tidak Ditemukan".format(nama))
#mencari data
elif c.lower() =='c':
print("Mencari Daftar Nilai : ")
print("=======================")
nama = input("Masukan nama untuk mencari daftar nilai : ")
if nama in data.keys():
print("Nama {0}, dengan NIM : {1}\n"
"Nilai Tugas: {2}, UTS: {3}, dan UAS: {4}\n"
"dan nilai akhir {5}".format(nama, d['nim'],
d['tugas'], d['uts'],
d['uas'], d['akhir']))
data[nama]= d
else :
print("'{}' Tidak ditemukan".format(nama))
#mengakhiri
elif c.lower() =='k':
break
else:
print("Pilih Menu yang tersedia") | true |
0e394e8e7092fdd400edfb76338617d1c9385c04 | Python | Autumn-Chrysanthemum/Coursera | /Chapter_9/Chapter_9_p5.py | UTF-8 | 481 | 3.046875 | 3 | [] | no_license | fname = raw_input("Please enter file name: \n")
if len(fname) < 1: fname = "romeo.txt"
try:
fhandle = open(fname)
except:
print "File:", fname,"does not exist"
quit()
text = fhandle.read()
text = text.rstrip()
text = text.split()
text_dict = dict()
test_value = 0
for word in text:
text_dict[word] = test_value
test_value = test_value + 1
print text_dict
lst = text_dict.keys()
print lst
lst.sort()
# print lst
for key in lst:
print key, text_dict[key] | true |
aedf30a306419bd9bf40b7c16696eb423fb1052c | Python | sammyjmoseley/CS6820Project | /graphs.py | UTF-8 | 2,273 | 2.71875 | 3 | [] | no_license | import numpy as np
import networkx as nx
from networkx.algorithms.bipartite import generators
from treeApproximation import TreeApproximator, ComTreeNode, create_tree_from_laminar_family
import matplotlib.pyplot as plt
import sys
def random_graph(n):
m = np.random.rand(n,n) > 0.5
return nx.from_numpy_matrix(m)
def cycle(n):
return nx.cycle_graph(n)
def vertices(n):
return nx.from_numpy_matrix(np.zeros([n,n]))
def bipartite(n, m=None):
if not m:
m = n
return generators.random_graph(n, m, 0.2)
def grids(n,m=None):
if not m:
m = n
def mapping(x):
return x[0]*m + x[1]
return nx.relabel_nodes(nx.grid_graph([n,m]), mapping)
def binarytree(h, r = 2):
return nx.balanced_tree(r,h)
def load_graph(name):
dg = nx.DiGraph()
with open("data/"+name, "r") as f:
content = f.readlines()
for line in content:
if line[0] == "#":
continue
vertices = line.split()
a = int(vertices[0])
b = int(vertices[1])
dg.add_edge(a,b, weight=1)
return dg
def email_graph():
# 1,005 25,571 http://snap.stanford.edu/data/email-Eu-core.html
return load_graph("email-Eu-core.txt")
def msg_graph():
# 1,899 20,296 http://snap.stanford.edu/data/CollegeMsg.html
return load_graph("CollegeMsg.txt")
def collab_graph():
# 5,242 14,496 http://snap.stanford.edu/data/ca-GrQc.html
return load_graph("ca-GrQc.txt")
def p2p_graph():
# 6,301 20,777 http://snap.stanford.edu/data/p2p-Gnutella08.html
return load_graph("p2p-Gnutella08.txt")
def road_graph():
# 1,965,206 2,766,607 http://snap.stanford.edu/data/roadNet-CA.html
return load_graph("roadNet-CA.txt")
def visualize(g, labels = None):
plt.figure()
pos = nx.spring_layout(G = g, dim = 2, k = 10, scale=20)
nx.draw_networkx(g, pos)
if labels != None:
nx.draw_networkx_edge_labels(g, pos, labels)
if __name__ == "__main__":
g = email_graph()
visualize(g)
g_ = TreeApproximator(g).spanning_tree_approx
dic = {}
for a, b, data in g_.edges(data = True):
dic[(a,b)]= data['dist']
visualize(g_, labels = dic)
print(len(g_.nodes()))
plt.show()
| true |
1f869d44da588bd462da61ad05da1b8e28152a09 | Python | rsamit26/InterviewBit | /Python/DynamicProgramming/GreedyOrDP/Tushar's Birthday Bomb.py | UTF-8 | 2,152 | 4.375 | 4 | [
"MIT"
] | permissive | """
It’s Tushar’s birthday today and he has N friends. Friends are numbered
[0, 1, 2, …., N-1] and i-th friend have a positive strength S(i). Today
being his birthday, his friends have planned to give him birthday bombs
(kicks :P). Tushar’s friends know Tushar’s pain bearing limit and would
hit accordingly.
If Tushar’s resistance is denoted by R (>=0) then find the lexicographically
smallest order of friends to kick Tushar so that the cumulative kick strength
(sum of the strengths of friends who kicks) doesn’t exceed his resistance
capacity and total no. of kicks hit are maximum. Also note that each friend
can kick unlimited number of times (If a friend hits x times, his strength
will be counted x times)
Note:
Answer format = Vector of indexes of friends in the order in which they will hit.
Answer should have the maximum no. of kicks that can be possibly hit. If two
answer have the same no. of kicks, return one with the lexicographically smaller.
[a1, a2, …., am] is lexicographically smaller than [b1, b2, .., bm]
if a1 < b1 or (a1 = b1 and a2 < b2) … .
Input cases are such that the length of the answer does not exceed 100000.
Example:
R = 11, S = [6,8,5,4,7]
ans = [0,2]
Here, [2,3], [2,2] or [3,3] also give the maximum no. kicks.
"""
class Solution:
def tushar_bomb(self, resistance, strength):
n = len(strength)
minStrength = min(strength) # minimum strength of hit
no_of_hits = resistance//minStrength # total number of minimum strength hit
min_idx = strength.index(minStrength) # index of minimum strength
result = [min_idx]*no_of_hits # add the index of number of hists times minimum strength hit in result array ,
left_resistance = resistance - no_of_hits*minStrength # resistance left
i, j = 0, 0
while i < n and j < no_of_hits:
if strength[i] - minStrength <= left_resistance:
result[j] = i
left_resistance -= strength[i] - minStrength
j+= 1
else:
i+=1
return result
s = Solution()
stre = [6,8,5,4,7]
print(s.tushar_bomb(11, stre))
| true |
9a883d31c2deb75470158c01119613d3ace5d7c7 | Python | hyun-minLee/20200209 | /st01.Python기초/py08반복문/py08_32_무한구구단.py | UTF-8 | 630 | 4.03125 | 4 | [] | no_license | while True:
try :
x = int(input("숫자를 입력하시오"))
y = int(input("숫자를 입력하시오"))
except ValueError:
print("정수를 입력하시오")
break
if x <0 or y <0:
print("양수값을 입력하시오.")
break
if x > y:
temp = x
x = y
y = temp
for x in range(x, y+1, 1):
for y in range(1, 10, 1):
if y == 9:
print("%2d x %d= %3d" % (x, y, x*y), end=", ")
else:
print("%2d x %d= %3d" % (x, y, x*y), end=". ")
print()
| true |
6713051862d9894b03d7486233ac834df2000ee6 | Python | alaypatel07/cd | /left_factoring.py | UTF-8 | 2,903 | 3.296875 | 3 | [] | no_license | # A->aiB/ae
# B->c
# exit
# Answer
# A->aA'/aA'
# A'->iB/e
# B->c
from itertools import groupby
from functools import reduce
def get_key(element):
if len(element) >= 1:
return element[0]
else:
return ""
def left_factor(non_terminal, production):
grouped_data = groupby(production, get_key)
grouped_data_dict = {}
for data in grouped_data:
grouped_data_dict[data[0]] = [d for d in data[1]]
if len(grouped_data_dict.keys()) == len(production):
return {non_terminal: production}
new_productions = {non_terminal: []}
count = 0
for element in grouped_data_dict:
if len(grouped_data_dict[element]) > 1:
productions = [i[1:] for i in grouped_data_dict[element] if len(i) >= 1]
else:
productions = grouped_data_dict[element]
new_productions[non_terminal].extend(productions)
continue
count += 1
new_non_terminal = non_terminal + "'" * count
new_productions[non_terminal].extend([element + new_non_terminal])
new_productions.setdefault(new_non_terminal, productions)
return new_productions
def get_left_factored(productions):
"""Group the rules for each terminal according to the common characters
remove the uncommon parts and add it to a new non-terminal. Repeat for each non terminal.
"""
non_terminals = list(productions.keys())
flags = [False for _ in range(len(non_terminals))]
while not reduce(lambda x, y: x and y, flags, True):
for non_terminal in productions:
new_productions = left_factor(non_terminal, productions[non_terminal])
for new_non_terminal in new_productions:
if new_non_terminal not in productions:
flags.append(False)
non_terminals.append(new_non_terminal)
productions = {**productions, **new_productions}
if new_productions[non_terminal] == productions[non_terminal]:
flags[non_terminals.index(non_terminal)] = True
return productions
def get_productions(lines):
production = {}
for line in lines:
a, b = line.split("->")
b = b.split("/")
if a in production.keys():
production[a].extend(b)
else:
production[a] = b
return production
def get_input():
input_lines = []
i = input()
while i != "exit":
input_lines.append(i)
i = input()
return input_lines
def get_lines(grammar):
return "\n".join([key + "->" + "/".join(grammar[key]) for key in grammar.keys()])
if __name__ == '__main__':
input_lines = get_input()
productions = get_productions(lines=input_lines)
try:
left_factored_grammar = get_left_factored(productions)
print(get_lines(left_factored_grammar))
except RuntimeError as e:
print(e)
| true |
4c00900f4ca98a1def2eaecb5183f95f45045c78 | Python | boulund/proteotyping-in-silico | /mutate_fasta.py | UTF-8 | 4,999 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env python2.7
# Fredrik Boulund 2015
# Sample sequences from a FASTA file
from read_fasta import read_fasta
from sys import argv, exit, maxint
import argparse
from random import sample, choice as pychoice
from numpy.random import binomial, choice
def parse_args(argv):
"""Parse commandline arguments.
"""
desc = """Sample sequences from FASTA files with replacement. Fredrik Boulund 2015"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("FASTA",
help="FASTA file to sample from.")
parser.add_argument("-n", metavar="N", required=True, type=int,
help="Number of sequences to sample from FASTA file [%(default)s].")
parser.add_argument("--maxlength", metavar="M", type=int,
default=0,
help="Maximum length of sequences to sample from, 0 means no limit [%(default)s], cant be bigger than {}.".format(maxint))
parser.add_argument("--minlength", metavar="m", type=int,
default=0,
help="Minimum length of sequences to sample from, 0 means no limit [%(default)s].")
parser.add_argument("-p", metavar="P", type=float,
default=0.0,
help="Probability of mutation (per amino acid) [%(default)s].")
parser.add_argument("--matrix", metavar="M",
default="/local/blast-2.2.26/data/PAM30",
help="Location of BLAST reference PAM or BLOSUM matrix to use for point mutations [%(default)s].")
parser.add_argument("-o", "--outfile", metavar="FILE", dest="outfile",
default="",
help="Write output to FILE instead of STDOUT.")
if len(argv)<2:
parser.print_help()
exit()
options = parser.parse_args()
return options
def sample_fasta(fastafile, outfile, matrix, options):
"""Sample sequences from FASTA.
"""
seqs = []
for header, seq in read_fasta(fastafile, keep_formatting=False):
seqlen = len(seq)
if not options.maxlength:
options.maxlength = maxint
if seqlen >= options.minlength and seqlen <= options.maxlength:
seqs.append((header,seq))
if options.outfile:
with open(outfile, 'w') as f:
for n in xrange(0,options.n):
header, seq = pychoice(seqs)
if options.p > 0.0:
seq, mutations = mutate_seq(seq, options.p, matrix)
f.write(">"+header+" mutations="+ str(mutations)+ "\n")
else:
f.write(">"+header+"\n")
f.write(seq+"\n")
else:
for n in xrange(0,options.n):
header, seq = pychoice(seqs)
if options.p > 0.0:
seq, mutations = mutate_seq(seq, options.p, matrix)
print ">"+header+" mutations="+str(mutations)
else:
print ">"+header
print seq
def mutate(aa, m):
"""Mutate a single amino acid.
"""
likelihoods = matrix[aa].items()
minlikelihood = min(likelihoods, key=lambda v: v[1])[1]
adjusted_likelihoods = [l+abs(minlikelihood) for a, l in likelihoods]
normalizer = sum(adjusted_likelihoods)
probs = [float(l)/normalizer for l in adjusted_likelihoods]
aas = [amino_acid for amino_acid, likelihood in likelihoods]
new_aa = choice(aas, p=probs)
if new_aa == aa:
return mutate(aa, m)
else:
return new_aa
def mutate_seq(seq, p, m):
"""Mutate sequence as positions chosen by sampling binomial with probability p,
using the substitution matrix m.
"""
mutations = binomial(len(seq), p)
positions = sample(xrange(len(seq)), mutations)
seq = list(seq.upper())
for pos in positions:
seq[pos] = mutate(seq[pos], m)
seq = ''.join(seq)
return seq, mutations
def read_substitution_matrix(filename, normalize=False, remove=[]):
"""Read substitution matrix from filename into a nested dictionary.
The likelihoods can be normalized to "probabilities".
"""
with open(filename) as f:
line = f.readline()
if line.startswith("# Entries"):
matrix = {}
aas = f.readline().split()
for aa1 in aas:
for aa2 in aas:
matrix[aa1] = {aa2: 0}
for line in f:
likelihoods = line.split()
cur_aa = likelihoods[0]
likelihoods = likelihoods[1:]
for aa in aas:
matrix[cur_aa][aa] = int(likelihoods.pop(0))
else:
raise IOError("{} doesn't appear to be a BLAST substitution matrix.".format(filename))
for code in remove:
matrix.pop(code, None)
for subdict in matrix.itervalues():
subdict.pop(code, None)
return matrix
if __name__ == "__main__":
options = parse_args(argv)
matrix = read_substitution_matrix(options.matrix, remove=["X", "*"])
sample_fasta(options.FASTA, options.outfile, matrix, options)
| true |
529bc35fe78ca32f5567841c5917bdd7f7331a30 | Python | iCodeIN/Problem-Solving | /PYTHON/Newstart/Basic/Exception_Handling/exception.3.py | UTF-8 | 277 | 3.171875 | 3 | [] | no_license | #!/usr/bin/python
import os
class Networkerror(RuntimeError):
def __init__(self, arg):
self.args = arg
###So once you defined above class, you can raise the exception as follows###
try:
raise Networkerror("Bad hostname")
except Networkerror,e:
print e.args
| true |
503f9d20f1dd6e2289f6a5c317cf187591db1911 | Python | varesa/mustikkaBot | /src/eventmanager.py | UTF-8 | 3,414 | 2.9375 | 3 | [] | no_license | import re
import logging
class EventManager:
log = logging.getLogger("mustikkabot.eventmanager")
message_registered = []
special_registered = []
def __init__(self):
self.message_registered = list()
self.special_registered = list()
def register_message(self, module):
"""
:param module: instance of the module that will handle the event
Registers a module to receive events on incoming messages
"""
self.log.info("Module " + str(module) + " registering for messages")
if module not in self.message_registered:
self.message_registered.append(module)
def unregister_message(self, module):
"""
:param module: instance of the module
Unregister a module to stop it from receiving events on incoming messages
"""
self.log.info("Module " + str(module) + " unregistering messages")
remove = None
for registered in self.message_registered:
if type(registered) == type(module):
remove = registered
if remove is not None:
self.message_registered.pop(self.message_registered.index(remove))
def register_special(self, module):
"""
:param module: instance of the module that will handle the event
Registers a module to receive events on incoming "special" (non message) data
"""
self.log.info("Module " + str(module) + " registering for special messages")
if module not in self.special_registered:
self.special_registered.append(module)
def unregister_special(self, module):
"""
:param module: instance of the module
Unregister a module to stop it from receiving events on incoming "special" (non message) data
"""
self.log.info("Module " + str(module) + " unregistering special messages")
remove = None
for registered in self.special_registered:
if type(registered) == type(module):
remove = registered
if remove is not None:
self.special_registered.pop(self.special_registered.index(remove))
def handle_message(self, text):
"""
:param text: full IRC message to deliver as a text-message
:type text: str
Parse the IRC message and deliver it to registered modules
"""
result = re.search(":(.*?)!(.*) PRIVMSG (.*?) :(.*)", text)
user = None
msg = None
if result is not None:
user = result.group(1)
msg = result.group(4)
else:
self.log.warning("Received invalid message")
return # Invalid message
for module in self.message_registered:
try:
module.handle_message(text, user, msg)
except:
self.log.exception("Error happened while module '" + str(module) + "' was handling a message")
def handle_special(self, text):
"""
:param text: full IRC message to deliver as special data
:type text: str
Parse the IRC data and deliver it to registered modules
"""
for module in self.special_registered:
try:
module.handle_special(text)
except:
self.log.exception("Error happened while module '" + module + "' was handling a special message")
| true |
dffcc6610d3c252485748cd92ac1617e77768973 | Python | Arkhean/pyfit-ultime | /tests/test_kmeans.py | UTF-8 | 881 | 2.78125 | 3 | [] | no_license | from pyfit.kmeans import KMeans
from sklearn.cluster import KMeans as sk_KMeans
from sklearn.datasets import make_blobs
from sklearn.metrics import accuracy_score
import numpy as np
def test_kmeans():
X, y_true = make_blobs(n_samples=300, centers=4, cluster_std=0.60, random_state=0)
my_kmeans = KMeans(n_clusters = 4).fit(X)
sk_kmeans = sk_KMeans(n_clusters = 4).fit(X)
count = dict()
for y1, y2 in zip(my_kmeans.labels_, sk_kmeans.labels_):
count[(y1,y2)] = count.get((y1,y2), 0) + 1
count = list(count.values())
count = sorted(count, reverse=True)
assert sum(count[:4])/300 > 0.85
def test_predict():
# plantera si l'algorithme n'a pas convergé
X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]])
kmeans = KMeans(n_clusters=2).fit(X)
y = kmeans.predict([[8,8]])
assert np.array_equal(y, [[10, 2]])
| true |
37fb5c521201160a6f69449a8a74609490ecf949 | Python | jetaehyun/CS-4342-Final-Project | /SVM.py | UTF-8 | 540 | 2.734375 | 3 | [] | no_license | import pandas
import sklearn.svm
if __name__ == "__main__":
d_train = pandas.read_csv("train.csv")
y_train = d_train.label.to_numpy()
X_train = d_train.values[:,1:]
d_test = pandas.read_csv("test.csv")
ID = d_test.id.to_numpy()
X_test = d_test.values[:,1:]
svm = sklearn.svm.SVC(kernel='rbf', gamma=0.001)
svm.fit(X_train, y_train)
yHat = svm.predict(X_test)
df = pandas.DataFrame({'id': ID,
'label': yHat})
df.to_csv('predictions.csv',index=False)
print("Done") | true |
379fceb396e24ee82d214918516aaa3abff86f03 | Python | PancakeAssassin/Portfolio | /Python/CountFiles.py | UTF-8 | 479 | 4 | 4 | [] | no_license | #finds and counts all files in a specified directory
import os
def getNumFiles(path):
size= 0
if not os.path.isfile(path):
lst= os.listdir(path)
for sub in lst:
size+= getNumFiles(path + "\\" + sub)
else:
size+= 1
return size
if __name__ == '__main__':
path= input("Enter a directory: ").strip()
try:
print("The number of files is ", getNumFiles(path))
except:
print("Directory does not exist")
| true |
7bfb47584ff9383cc80a06ea2b81c7df0ddb7e0b | Python | msainTesting/TwitterAnalysis | /StreamingDataAnalysis/data/cleanData.py | UTF-8 | 283 | 2.625 | 3 | [] | no_license | import re
import emoji
#Making use of functiosn to clean Data
def removeURLS(data):
text = re.sub(r'https?:\/\/\S*', '', str(data), flags=re.MULTILINE)
return text
def removeEmojis(data):
text = emoji.get_emoji_regexp().sub("", data)
return text
| true |
de0fcd18eaa226c3dc4a3d48c80e30b5fa2d1a31 | Python | volpatto/PVGeo | /PVGeo/model_build/grids.py | UTF-8 | 12,163 | 2.78125 | 3 | [
"BSD-3-Clause"
] | permissive | __all__ = [
'CreateEvenRectilinearGrid',
'CreateUniformGrid',
'CreateTensorMesh',
]
__displayname__ = 'Grids'
import vtk
import numpy as np
from vtk.numpy_interface import dataset_adapter as dsa
# Import Helpers:
from ..base import AlgorithmBase
from .. import _helpers
from .. import interface
def _makeSpatialCellData(nx, ny, nz):
"""Used for testing
"""
arr = np.fromfunction(lambda k, j, i: k*j*i, (nz, ny, nz))
return arr.flatten()
class CreateUniformGrid(AlgorithmBase):
"""Create uniform grid (``vtkImageData``)
"""
__displayname__ = 'Create Uniform Grid'
__category__ = 'source'
def __init__(self,
extent=[10, 10, 10],
spacing=[1.0, 1.0, 1.0],
origin=[0.0, 0.0, 0.0]):
AlgorithmBase.__init__(self,
nInputPorts=0,
nOutputPorts=1, outputType='vtkImageData')
self.__extent = extent
self.__spacing = spacing
self.__origin = origin
def RequestData(self, request, inInfo, outInfo):
pdo = self.GetOutputData(outInfo, 0)
nx,ny,nz = self.__extent[0],self.__extent[1],self.__extent[2]
sx,sy,sz = self.__spacing[0],self.__spacing[1],self.__spacing[2]
ox,oy,oz = self.__origin[0],self.__origin[1],self.__origin[2]
# Setup the ImageData
pdo.SetDimensions(nx, ny, nz)
pdo.SetOrigin(ox, oy, oz)
pdo.SetSpacing(sx, sy, sz)
#pdo.SetExtent(0,nx-1, 0,ny-1, 0,nz-1)
# Add CELL data
data = _makeSpatialCellData(nx-1, ny-1, nz-1) # minus 1 b/c cell data not point data
data = interface.convertArray(data, name='Spatial Cell Data', deep=True)
# THIS IS CELL DATA! Add the model data to CELL data:
pdo.GetCellData().AddArray(data)
# Add Point data
data = _makeSpatialCellData(nx, ny, nz)
data = interface.convertArray(data, name='Spatial Point Data', deep=True)
# THIS IS CELL DATA! Add the model data to CELL data:
pdo.GetPointData().AddArray(data)
return 1
def RequestInformation(self, request, inInfo, outInfo):
# Now set whole output extent
ext = [0, self.__extent[0]-1, 0,self.__extent[1]-1, 0,self.__extent[2]-1]
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
return 1
#### Setters / Getters ####
def SetExtent(self, nx, ny, nz):
"""Set the extent of the output grid.
"""
if self.__extent != [nx, ny, nz]:
self.__extent = [nx, ny, nz]
self.Modified()
def SetSpacing(self, dx, dy, dz):
"""Set the spacing for the points along each axial direction.
"""
if self.__spacing != [dx, dy, dz]:
self.__spacing = [dx, dy, dz]
self.Modified()
def SetOrigin(self, x0, y0, z0):
"""Set the origin of the output grid.
"""
if self.__origin != [x0, y0, z0]:
self.__origin = [x0, y0, z0]
self.Modified()
class CreateEvenRectilinearGrid(AlgorithmBase):
"""This creates a vtkRectilinearGrid where the discretization along a
given axis is uniformly distributed.
"""
__displayname__ = 'Create Even Rectilinear Grid'
__category__ = 'source'
def __init__(self,
extent=[10, 10, 10],
xrng=[-1.0, 1.0],
yrng=[-1.0, 1.0],
zrng=[-1.0, 1.0]):
AlgorithmBase.__init__(self,
nInputPorts=0,
nOutputPorts=1, outputType='vtkRectilinearGrid')
self.__extent = extent
self.__xrange = xrng
self.__yrange = yrng
self.__zrange = zrng
def RequestData(self, request, inInfo, outInfo):
# Get output of Proxy
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
nx,ny,nz = self.__extent[0]+1, self.__extent[1]+1, self.__extent[2]+1
xcoords = np.linspace(self.__xrange[0], self.__xrange[1], num=nx)
ycoords = np.linspace(self.__yrange[0], self.__yrange[1], num=ny)
zcoords = np.linspace(self.__zrange[0], self.__zrange[1], num=nz)
# CONVERT TO VTK #
xcoords = interface.convertArray(xcoords,deep=True)
ycoords = interface.convertArray(ycoords,deep=True)
zcoords = interface.convertArray(zcoords,deep=True)
pdo.SetDimensions(nx,ny,nz)
pdo.SetXCoordinates(xcoords)
pdo.SetYCoordinates(ycoords)
pdo.SetZCoordinates(zcoords)
data = _makeSpatialCellData(nx-1, ny-1, nz-1)
data = interface.convertArray(data, name='Spatial Data', deep=True)
# THIS IS CELL DATA! Add the model data to CELL data:
pdo.GetCellData().AddArray(data)
return 1
def RequestInformation(self, request, inInfo, outInfo):
# Now set whole output extent
ext = [0, self.__extent[0], 0,self.__extent[1], 0,self.__extent[2]]
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
return 1
#### Setters / Getters ####
def SetExtent(self, nx, ny, nz):
"""Set the extent of the output grid.
"""
if self.__extent != [nx, ny, nz]:
self.__extent = [nx, ny, nz]
self.Modified()
def SetXRange(self, start, stop):
"""Set range (min, max) for the grid in the X-direction.
"""
if self.__xrange != [start, stop]:
self.__xrange = [start, stop]
self.Modified()
def SetYRange(self, start, stop):
"""Set range (min, max) for the grid in the Y-direction
"""
if self.__yrange != [start, stop]:
self.__yrange = [start, stop]
self.Modified()
def SetZRange(self, start, stop):
"""Set range (min, max) for the grid in the Z-direction
"""
if self.__zrange != [start, stop]:
self.__zrange = [start, stop]
self.Modified()
class CreateTensorMesh(AlgorithmBase):
"""This creates a vtkRectilinearGrid where the discretization along a
given axis is uniformly distributed.
"""
__displayname__ = 'Create Tensor Mesh'
__category__ = 'source'
def __init__(self, origin=[-350.0, -400.0, 0.0], dataname='Data',
xcellstr='200 100 50 20*50.0 50 100 200',
ycellstr='200 100 50 21*50.0 50 100 200',
zcellstr='20*25.0 50 100 200',):
AlgorithmBase.__init__(self, nInputPorts=0,
nOutputPorts=1, outputType='vtkRectilinearGrid')
self.__origin = origin
self.__xcells = CreateTensorMesh._ReadCellLine(xcellstr)
self.__ycells = CreateTensorMesh._ReadCellLine(ycellstr)
self.__zcells = CreateTensorMesh._ReadCellLine(zcellstr)
self.__dataName = dataname
@staticmethod
def _ReadCellLine(line):
"""Read cell sizes for each line in the UBC mesh line strings
"""
# OPTIMIZE: work in progress
# TODO: when optimized, make sure to combine with UBC reader
line_list = []
for seg in line.split():
if '*' in seg:
sp = seg.split('*')
seg_arr = np.ones((int(sp[0]),), dtype=float) * float(sp[1])
else:
seg_arr = np.array([float(seg)], dtype=float)
line_list.append(seg_arr)
return np.concatenate(line_list)
def GetExtent(self):
ne,nn,nz = len(self.__xcells), len(self.__ycells), len(self.__zcells)
return (0,ne, 0,nn, 0,nz)
def _MakeModel(self, pdo):
ox,oy,oz = self.__origin[0], self.__origin[1], self.__origin[2]
# Read the cell sizes
cx = self.__xcells
cy = self.__ycells
cz = self.__zcells
# Invert the indexing of the vector to start from the bottom.
cz = cz[::-1]
# Adjust the reference point to the bottom south west corner
oz = oz - np.sum(cz)
# Now generate the coordinates for from cell width and origin
cox = ox + np.cumsum(cx)
cox = np.insert(cox,0,ox)
coy = oy + np.cumsum(cy)
coy = np.insert(coy,0,oy)
coz = oz + np.cumsum(cz)
coz = np.insert(coz,0,oz)
# Set the dims and coordinates for the output
ext = self.GetExtent()
nx,ny,nz = ext[1]+1,ext[3]+1,ext[5]+1
pdo.SetDimensions(nx,ny,nz)
# Convert to VTK array for setting coordinates
pdo.SetXCoordinates(interface.convertArray(cox, deep=True))
pdo.SetYCoordinates(interface.convertArray(coy, deep=True))
pdo.SetZCoordinates(interface.convertArray(coz, deep=True))
return pdo
def _AddModelData(self, pdo, data):
nx, ny, nz = pdo.GetDimensions()
nx, ny, nz = nx-1, ny-1, nz-1
# ADD DATA to cells
if data is None:
data = np.random.rand(nx*ny*nz)
data = interface.convertArray(data, name='Random Data', deep=True)
else:
data = interface.convertArray(data, name=dataNm, deep=True)
pdo.GetCellData().AddArray(data)
return pdo
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output data object
"""
# Get input/output of Proxy
pdo = self.GetOutputData(outInfo, 0)
# Perform the task
self._MakeModel(pdo)
self._AddModelData(pdo, None) # TODO: add ability to set input data
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to set output whole extent
"""
# Now set whole output extent
ext = self.GetExtent()
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
return 1
#### Getters / Setters ####
def SetOrigin(self, x0, y0, z0):
"""Set the origin of the output
"""
if self.__origin != [x0, y0, z0]:
self.__origin = [x0, y0, z0]
self.Modified()
def SetXCells(self, xcells):
"""Set the spacings for the cells in the X direction
Args:
xcells (list or np.array(floats)) : the spacings along the X-axis"""
if len(xcells) != len(self.__xcells) or not np.allclose(self.__xcells, xcells):
self.__xcells = xcells
self.Modified()
def SetYCells(self, ycells):
"""Set the spacings for the cells in the Y direction
Args:
ycells (list or np.array(floats)) : the spacings along the Y-axis"""
if len(ycells) != len(self.__ycells) or not np.allclose(self.__ycells, ycells):
self.__ycells = ycells
self.Modified()
def SetZCells(self, zcells):
"""Set the spacings for the cells in the Z direction
Args:
zcells (list or np.array(floats)): the spacings along the Z-axis"""
if len(zcells) != len(self.__zcells) or not np.allclose(self.__zcells, zcells):
self.__zcells = zcells
self.Modified()
def SetXCellsStr(self, xcellstr):
"""Set the spacings for the cells in the X direction
Args:
xcellstr (str) : the spacings along the X-axis in the UBC style"""
xcells = CreateTensorMesh._ReadCellLine(xcellstr)
self.SetXCells(xcells)
def SetYCellsStr(self, ycellstr):
"""Set the spacings for the cells in the Y direction
Args:
ycellstr (str) : the spacings along the Y-axis in the UBC style"""
ycells = CreateTensorMesh._ReadCellLine(ycellstr)
self.SetYCells(ycells)
def SetZCellsStr(self, zcellstr):
"""Set the spacings for the cells in the Z direction
Args:
zcellstr (str) : the spacings along the Z-axis in the UBC style"""
zcells = CreateTensorMesh._ReadCellLine(zcellstr)
self.SetZCells(zcells)
| true |
dcf8b39509ac660ae4af5d9cecdc084796db19b5 | Python | jlgerber/swinstall_stack_python | /swinstall_stack/schemas/base/file_metadata.py | UTF-8 | 1,409 | 2.8125 | 3 | [] | no_license | """
file_metadata.py
FileMetadata base class
"""
__all__ = ("FileMetadataBase",)
class FileMetadataBase(object):
"""Base class for FileMetadata, defining required
methods and properties which need to be implemented.
"""
def element(self):
"""construct an element from self
:returns: xml element
:rtype: ElementTree.Element
"""
raise NotImplementedError()
@property
def is_current(self):
"""returns whether or not the FileMetadata refers to a current file or not.
:returns: true or false, depending
:rtype: bool
"""
raise NotImplementedError()
@property
def version(self):
"""Return the version of the element
:returns: version
:rtype: version type (depends)
"""
raise NotImplementedError()
@property
def versionless_path(self):
"""Return the path to the versionless file that the swinstall_stack
manages.
:returns: path to versionless file
:rtype: str
"""
raise NotImplementedError()
@property
def path(self):
"""Return the full path to the versioned file associated with
the metadata.
"""
raise NotImplementedError()
def __eq__(self, other):
raise NotImplementedError()
def __ne__(self, other):
raise NotImplementedError()
| true |
377dabd3c3056acd9e2a893c44fd0f2154991b21 | Python | a-doom/address-converter | /address_converter/address_objects.py | UTF-8 | 1,453 | 3.140625 | 3 | [
"MIT"
] | permissive | LETTER = "литера"
class AddrObject(object):
def __init__(self, aoguid, name, type_obj, postalcode):
self.aoguid = aoguid
self.name = name
self.type_obj = type_obj
self.postalcode = postalcode
def __str__(self):
return "{0} - {1}".format(self.aoguid, self.name)
def __repr__(self):
return "{0} - {1}".format(self.aoguid, self.name)
class Address(object):
def __init__(self):
self._addr_path = ()
self.house_num = 0
self.house_num_liter = ""
@property
def postalcode(self):
"""Get the postal code from the last address in the list
"""
if any(self.addr_path):
return self.addr_path[-1].postalcode
else:
return ''
def calc_address_string(self):
result = ", ".join(
["{0} {1}".format(addrobj.type_obj, addrobj.name)
for addrobj in self.addr_path])
if self.house_num:
result += ", {0}".format(self.house_num)
if self.house_num_liter:
result += " {0} {1}".format(LETTER, self.house_num_liter)
if self.postalcode:
result += ", {0}".format(self.postalcode)
return result
@property
def addr_path(self):
return self._addr_path
@addr_path.setter
def addr_path(self, value):
self._addr_path = tuple(
[v for v in value if isinstance(v, AddrObject)])
| true |
29e1c012b8e5d794a452711bd6d1204c3ccd8b18 | Python | HalShaw/Leetcode | /Single Number.py | UTF-8 | 418 | 2.6875 | 3 | [
"MIT"
] | permissive | class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
a=nums[0]
for i in range(1,len(nums)):
a^=nums[i]#所有元素异或,相同的异或后为0,0与任何数异或都为它本身
return a
'''不使用异或,使用set
s1 = set(nums)
a2 = sum(s1)*2-sum(nums)
return a2 '''
| true |
64f6b45c4dbec2a22728685061bfbbd26fd592a7 | Python | mohitleo9/interviewPractice | /Linked_Lists/LinkedLists.py | UTF-8 | 1,849 | 3.734375 | 4 | [] | no_license | class Node:
def __init__(self, data=0, next=None):
self.data = data
self.next = next
def __str__(self):
return str(self.data)
class LinkedList:
def __init__(self):
self.head = None
def insert_last(self, node):
if not node:
return
if not self.head:
self.head = node
return
tmp = self.head
while tmp.next:
tmp = tmp.next
# insert node
tmp.next = node
def insert_first(self, node):
if not node:
return
if not self.head:
self.head = node
return
node.next = self.head
self.head = node
return
def delete_node(self, data):
if not self.head:
return False
tmp = self.head
if tmp.data == data:
self.head = self.head.next
return True
found = False
while tmp.next:
if tmp.next.data == data:
found = True
break
tmp = tmp.next
if not found:
return False
tmp.next = tmp.next.next
return True
def __str__(self):
if not self.head:
return "None"
tmp = self.head
strin = ''
while tmp.next:
strin += str(tmp.data) + '->'
tmp = tmp.next
strin += str(tmp.data) + '->'
strin += 'None'
return strin
def main():
# n = Node(2)
l = LinkedList()
# l.insert_last(n)
# n = Node(4)
# l.insert_last(n)
# n = Node(5)
# l.insert_last(n)
# n = Node(6)
# l.insert_last(n)
# print l.delete_node(2)
# print l.delete_node(6)
# print l.delete_node(4)
l.insert_first(Node(6))
print l
if __name__ == '__main__':
main()
| true |
c3180bbcdc0da1d7b6284883252e4d76ea90099a | Python | firesnow1234/histogram | /histogram0517.py | UTF-8 | 27,146 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri May 17 09:52:39 2019
@author: Yuki
"""
import numpy as np
import cv2
import time
import copy
from math import isnan, isinf
from PIL import Image
import matplotlib.pyplot as plt
from myRansac import *
import math
import scipy.io as io
from scipy.io import loadmat
#from ransacPlane import *
import matlab.engine
from ransac import *
from planeFit import planefit
def augment(xyzs):
axyz = np.ones((len(xyzs), 4))
axyz[:, :3] = xyzs
return axyz
def estimate(xyzs):
axyz = augment(xyzs[:3])
return np.linalg.svd(axyz)[-1][-1, :]
def is_inlier(coeffs, xyz, threshold):
return np.abs(coeffs.dot(augment([xyz]).T)) < threshold
def project(p_in, T):
"""
激光雷达投影矩阵变换,将点云投影到相机坐标系
out: 投影结果
"""
#dimension of data and projection matrix
dim_norm = T.shape[0]
#dim_proj = T.shape[1]
# do transformation in homogenuous coordinates
p2_in = p_in
p2_out = np.transpose(np.dot(T, np.transpose(p2_in)))
#normalize homogeneous coordinates
temp1 = p2_out[:, 0:dim_norm-1]
temp2 = p2_out[:, dim_norm-1]
temp3 = np.ones((1, dim_norm-1))
temp4 = temp2.reshape(len(p2_in), 1)
p_out = temp1 / np.dot(temp4, temp3)
return p_out
def lidar2img(lidar_index_cor2, velo_img, camera2, finalProp, pro_lidar, prop_threshold):
"""
激光雷达到图像的投影显示
out: 返回在图像中的激光点(路面)
"""
dis = 4 #前方激光点 x > 4
x = []
y = []
color = []
lidarTmp = []
for idx in range(len(lidar_index_cor2)):
for jdx in range(len(lidar_index_cor2[0])):
lidarIdx2 = int(lidar_index_cor2[idx, jdx])
newPoint = velo_img[lidarIdx2, :]
if newPoint[1]>0 and newPoint[1]<camera2.shape[0]-1 and newPoint[0]>0 and newPoint[0]<camera2.shape[1]-1 and pro_lidar[lidarIdx2,0] > 4 and finalProp[idx][jdx] > prop_threshold :
x.append(int(newPoint[0]))
y.append(int(newPoint[1]))
color.append(64*dis // pro_lidar[lidarIdx2,0])
lidarTmp.append(pro_lidar[lidarIdx2, :])
# plt.figure()
# plt.imshow(camera2)
# plt.scatter(x, y, c=color, cmap=plt.cm.jet, marker='.', s=0.5)
# plt.show()
# plt.figure()
# #plt.title('Road Curb',color='blue')
# plt.axis('off')
fig = plt.gcf()
fig.set_size_inches(16.0, 9.0) #dpi = 300, output = 700*700 pixels
plt.imshow(camera2)
plt.scatter(x, y, c=color, cmap=plt.cm.jet, marker=',', s=2)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
#fig.savefig("filename2.png", format='png', transparent=True, dpi=30, pad_inches = 0)
#plt.scatter(x2, y2, c=color2, cmap=plt.cm.jet, marker='.', s=0.5)
plt.show()
return lidarTmp
# ==================================================================================================
# Preprocessing
# ==================================================================================================
def preProcessing(s):
"""
激光点云数据预处理
out: 激光点云前方数据, 前方数据的 1/x 视差图
"""
s[:,3]=1
# new computation
index = np.arange(0, len(s),int(len(s)/64)) #每一条激光雷达的起始位置
lidar_image_dis_temp = s.reshape(64,1737,4)
lidar_image_x = lidar_image_dis_temp[:,:,0]
lidar_index_correspondance = np.zeros((lidar_image_x.shape[0], lidar_image_x.shape[1])) # 对应下标
leftView, rightView = 0, 0
for i in range(lidar_image_x.shape[1]):
if lidar_image_x[31][i] > 5 and not isnan(lidar_image_x[31][i]):
leftView = i
break
for i in range(lidar_image_x.shape[1]-1, 0, -1):
if lidar_image_x[31][i] > 5 and not isnan(lidar_image_x[31][i]):
rightView = i
break
for i in range(len(index) -1): # 64
for j in range(index[i], index[i+1]): # 1737
lidar_index_correspondance[i,j%1737] = j
lidar_image_x[np.where(np.isnan(lidar_image_x))]= 0
plt.imshow(lidar_image_x, cmap=plt.cm.jet)
plt.show()
for i in range(lidar_image_x.shape[0]):
for j in range(lidar_image_x.shape[1]):
if lidar_image_x[i,j] < 0.001:
lidar_image_x[i,j] = 0
else:
lidar_image_x[i,j]= 1 / lidar_image_x[i,j]
lidar_image_x[np.where(lidar_image_x ==np.Inf)] = 0
lidar_image_front = lidar_image_x[:,leftView: rightView]
lidar_index_cor2 = lidar_index_correspondance[:, leftView: rightView]
lidar_image_front3 = np.zeros((lidar_image_front.shape[0], lidar_image_front.shape[1]))
lidar_index_cor3 = np.zeros((lidar_index_cor2.shape[0], lidar_index_cor2.shape[1]))
for i in range(int(lidar_image_front3.shape[1])):
lidar_image_front3[:,i] = lidar_image_front[:, lidar_image_front.shape[1]-i-1]
lidar_index_cor3[:, i] = lidar_index_cor2[:, lidar_index_cor2.shape[1] -i-1]
frontLidar = np.zeros((lidar_index_cor3.shape[0], lidar_index_cor3.shape[1], 3 )) # 前方激光雷达数据xyz
for i in range(lidar_index_cor3.shape[0]):
for j in range(lidar_index_cor3.shape[1]):
frontLidar[i][j] = lidar2[int(lidar_index_cor3[i][j]),:3]
lidar_image_front[np.where(np.isinf(lidar_image_front))] = 0
plt.figure()
plt.imshow(lidar_image_front, cmap=plt.cm.jet)
plt.title('front')
plt.figure()
plt.imshow(lidar_image_front3, cmap=plt.cm.jet)
plt.title('lidar_image_front3')
return frontLidar, lidar_image_front3, lidar_index_cor3
def histogram(lidar_image_front3):
"""
Histogram 函数
In: 前方的激光雷达点
Return
"""
histogram = np.zeros((64,1000))
for i in range(lidar_image_front3.shape[0]):
for j in range(lidar_image_front3.shape[1]):
indhis = int(lidar_image_front3[i,j]*1000)
if indhis < 1000:
histogram[i,indhis] += 1
plt.figure()
plt.imshow(histogram)
plt.show()
#二值化
histArray =[]
for x in range(30, 64):
for y in range(1, 1000):
if histogram[x][y] > 20:
histArray.append([y,x])
histPoints = np.array(histArray)
hist = []
for i in range(len(histPoints)):
hist.append([histPoints[i,0],histPoints[i,1]])
#ransac直线拟合 y=mx+b
m,b = plot_best_fit(hist)
#路面分割的超参数
alpha = 0.5
beta = 1.2
roadSegment = np.zeros([len(lidar_image_front3), len(lidar_image_front3[0])])
# i: 激光扫描线, j: 激光雷达获取图的前方视野点
for i in range(len(lidar_image_front3)):
for j in range(len(lidar_image_front3[0])):
light = int(lidar_image_front3[i,j] / 0.001)
if light > 1000:
light = 0
#case1: water
if(lidar_image_front3[i,j] == 0 and i <= m*light + beta*b):
roadSegment[i][j] = 0
# case2: posetive obstacle
elif(i < m*light + alpha*b):
roadSegment[i][j] = 1
#case3: negative
elif(i > m*light + beta*b):
roadSegment[i][j] = 2
#case4: road line
elif(i >= m*light + alpha*b and i <= m*light + beta*b):
roadSegment[i][j] = 3
#print('totally cost',time_end-time_start)
#归一化卷积核滤波
#finalMap=cv2.blur(roadSegment,(5,5))
'''
plt.figure()
plt.imshow(roadSegment,cmap=plt.cm.jet)
plt.title('Road Segmentation')
'''
roadProp = np.zeros([len(lidar_image_front3), len(lidar_image_front3[0])])
# i: 激光扫描线, j: 激光雷达获取图的前方视野点
#点(a,b)到直线Ax+By+C=0的距离为d=|Aa+Bb+C|/√(A^2+B^2)
#直线 mx - y + b = 0
maxDist = 0
minDist = 0
for i in range(len(lidar_image_front3)):
for j in range(len(lidar_image_front3[0])):
light = int(lidar_image_front3[i,j] / 0.001)
if light == 0:
continue
dist = abs(m * light - i + b)/((-1)*(-1) + m * m)**0.5
maxDist = max(maxDist, dist)
minDist = min(minDist, dist)
maxDist = 16
for i in range(len(lidar_image_front3)):
for j in range(len(lidar_image_front3[0])):
light = int(lidar_image_front3[i,j] / 0.001)
if light == 0:
continue
dist = abs(m * light - i + b)/((-1)*(-1) + m * m)**0.5
if dist > 16:
roadProp[i,j] = 0
continue
roadProp[i,j] = 1 - (dist / (maxDist - minDist))
return roadProp
'''
###
### Main 函数
###
'''
if __name__ == '__main__':
lidarName = "20190519\\1545204862.65.txt"
camera2 = cv2.imread("20190519\\1545204862.68.png")
velo2imgFile = 'velo2img.txt'
lidar1 = np.loadtxt(lidarName)
velo2imgTxT = np.loadtxt(velo2imgFile)
tempLidar =np.zeros((lidar1.shape[0],lidar1.shape[1]))
# 每条line有1737个点
for i in range(64):
tempLidar[1737*i : 1737*i+1737, :] = lidar1[lidar1.shape[0]-i: 0: -64, :] #lidar[i:lidar.shape[0]:64,:]
lidar = tempLidar
lidar2 =lidar[:,:4]
# imshow project
pro_lidar = lidar[: ,:4]
pro_lidar[:,3] = 1
velo_img = project(pro_lidar, velo2imgTxT)
s = copy.deepcopy(lidar2)
frontLidar, lidar_image_front3, lidar_index_cor3 = preProcessing(s)
roadProp = histogram(lidar_image_front3) # histogram 计算道路概率
prop_threshold = 0.5 #阈值0~1 判断点是道路点还是非道路点
lidarTmp = lidar2img(lidar_index_cor3, velo_img, camera2, roadProp, pro_lidar, prop_threshold)
roadArray = np.array(lidarTmp)
io.savemat('road_lidar', {'road_lidar': roadArray})
io.savemat('source_lidar', {'source_lidar': pro_lidar})
'''
### RANSAC
### 拟合平面,筛选点
###
'''
#road_ransac = roadArray[:,:3]
#road_ransac = road_ransac.T
#ransac_args, mask = run_ransacPlane(road_ransac)
'''
io.savemat('ransac_plane', {'planeF': road_ransac})
eng = matlab.engine.start_matlab()
result = eng.ransac1212(1000)
a,b,c,d = result[0][0], result[0][1], result[0][2], result[0][3] # ax + by + cz + d = 0
'''
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
#fig = plt.figure()
#ax = mplot3d.Axes3D(fig)
def plot_plane(a, b, c, d):
xx, yy = np.mgrid[:30, :30]
return xx, yy, (-d - a * xx - b * yy) / c
xyzs = roadArray[:,:3]
n = len(xyzs)
max_iterations = 10000
goal_inliers = n * 0.3
#ax.scatter3D(xyzs.T[0], xyzs.T[1], xyzs.T[2])
# RANSAC
m, b = run_ransac(xyzs, estimate, lambda x, y: is_inlier(x, y, 0.01), 3, goal_inliers, max_iterations)
a, b, c, d = m
#xx, yy, zz = plot_plane(a, b, c, d)
#ax.plot_surface(xx, yy, zz, color=(0, 1, 0, 0.5))
#plt.show()
# 筛选与平面高度差小于 0.1m 的 点
m1 = m.reshape(1,4)
mask=abs(np.dot(m1,roadArray.T))
idx = np.where(mask < 0.15)
dist_array = roadArray[idx[1][:]]
#io.savemat('dist_array', {'road_lidar': dist_array}) # 最后拟合曲面的点
'''
### 最小二乘拟合曲面
'''
plane_args = planefit(dist_array[:,:3])
'''
###
### 栅格化, 计算高程差
###
'''
Grid = []
length = 60 # 0 ~ 60m
width = 40 # -20 ~ 20 m
cell_size = 0.2 # 0.2m
for i in range(int(length*width / (cell_size**2))):
Grid.append([])
for i in range(len(pro_lidar)):
lidar_i = pro_lidar[i]
if 0<lidar_i[0]<60 and -20<lidar_i[1]<20:
idx = int(lidar_i[0]/0.2) * 200 + int((lidar_i[1]+20) /0.2)
Grid[idx].append(lidar_i)
dis = 4
x = []
y = []
color = []
lidar_grid = []
for i in range(len(velo_img)):
newPoint = velo_img[i,:]
lidar_i = pro_lidar[i,:3]
if 0<lidar_i[0]<60 and -20<lidar_i[1]<20:
idx = int(lidar_i[0]/0.2) * 200 + int((lidar_i[1]+20) /0.2)
lidar_cell = np.array(Grid[idx])
dist_cell = abs(np.dot(m1,lidar_cell.T))
if np.max(dist_cell) < 0.2: #and np.max(lidar_cell[:,2]) - np.min(lidar_cell[:,2]) < 0.2: # 计算到ransac平面距离 以及 高程差
if newPoint[1]>0 and newPoint[1] < camera2.shape[0]-1 and newPoint[0]>0 and newPoint[0]<camera2.shape[1]-1 and pro_lidar[i,0] > dis and len(Grid[idx]) > 0 :
x.append(int(newPoint[0]))
y.append(int(newPoint[1]))
color.append(64*dis // pro_lidar[i,0])
lidar_grid.append(lidar_i)
plt.figure()
plt.title('Altitude difference',color='blue')
plt.axis('off')
fig = plt.gcf()
fig.set_size_inches(16.0,9.0) #dpi = 300, output = 700*700 pixels
plt.imshow(camera2)
plt.scatter(x, y, c=color, cmap=plt.cm.jet, marker=',', s=2)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
fig.savefig("filename2.png", format='png', transparent=True, dpi=30, pad_inches = 0)
#plt.scatter(x2, y2, c=color2, cmap=plt.cm.jet, marker='.', s=0.5)
plt.show()
x10, y10 = [], []
for it in pro_lidar:
if 0<it[0] < 60 and -20<it[1]<20:
x10.append(it[0])
y10.append(it[1])
plt.scatter(y10, x10, c='black', marker=',', s=1)
plt.grid()
plt.xlabel('Y')
plt.ylabel('X')
plt.axis([-20,20,0,60])
plt.gca().invert_xaxis()
plt.show()
'''
###
### PCA求法线方向
###
'''
'''
dis = 4
x = []
y = []
color = []
lidar_grid = []
grid_view = np.zeros((len(Grid), 1))
grid_angle = np.zeros((len(Grid), 1))
upVector = np.array([a,b,c])
for i in range(len(velo_img)):
newPoint = velo_img[i,:]
lidar_i = pro_lidar[i,:3]
if 0<lidar_i[0]<60 and -20<lidar_i[1]<20:
idx = int(lidar_i[0]*2) * 200 + int((lidar_i[1]+20) /0.2)
lidar_cell = np.array(Grid[idx])
if len(lidar_cell) > 0:
if grid_view[idx] == 0 and len(Grid[idx])>3:
grid_view[idx] = 1
lidar_cell = np.array(Grid[idx])
cell_x, cell_y, cell_z = lidar_cell[:,0], lidar_cell[:,1], lidar_cell[:,2]
newBox = [cell_x, cell_y, cell_z]
box_cov = np.cov(newBox)
eigenValue, eigenVector = np.linalg.eig(box_cov)
sorted_indices = np.argsort(-eigenValue)
least_evecs = eigenVector[:,sorted_indices[:-2:-1]]
least_evecs = least_evecs.ravel()
Lx = np.sqrt(least_evecs.dot(least_evecs))
Ly = np.sqrt(upVector.dot(upVector))
cos_angle = least_evecs.dot(upVector)/(Lx*Ly)
angle = np.arccos(cos_angle)
angle2 = angle * 360/2/np.pi
grid_angle[idx] = angle2
dist_cell = abs(np.dot(m1,lidar_cell.T))
if np.max(dist_cell) < 0.15 and 0 < grid_angle[idx] < 40: # 计算栅格内的法线方向
if newPoint[1]>0 and newPoint[1] < camera2.shape[0]-1 and newPoint[0]>0 and newPoint[0]<camera2.shape[1]-1 and pro_lidar[i,0] > dis and len(Grid[idx]) > 0:
x.append(int(newPoint[0]))
y.append(int(newPoint[1]))
color.append(64*dis // pro_lidar[i,0])
lidar_grid.append(lidar_i)
plt.figure()
#plt.title('PCA normal line',color='blue')
plt.axis('off')
fig = plt.gcf()
fig.set_size_inches(16.0,9.0) #dpi = 300, output = 700*700 pixels
plt.imshow(camera2)
plt.scatter(x, y, c='r', cmap=plt.cm.jet, marker=',', s=2)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
fig.savefig("filename0.png", format='png', transparent=True, dpi=30, pad_inches = 0)
#plt.scatter(x2, y2, c=color2, cmap=plt.cm.jet, marker='.', s=0.5)
plt.show()
'''
'''
### 显示
'''
mat_data = loadmat('mydata.mat')
lidar_mat = mat_data["result"]
clo_b = np.ones(len(lidar_mat))
p0,p1,p2,p3,p4,p5 = plane_args[5], plane_args[3], plane_args[4], plane_args[0], plane_args[1], plane_args[2]
# 计算点到二次曲面的距离
def compute_dist2plane(xyz):
# comp_z = A + B*x + C*y + D*x.*x + E*x.*y + F*y.*y;
x, y, z = xyz[0], xyz[1], xyz[2]
comp_z = p0 + p1*x + p2*y + p3*x*x + p4*x*y + p5*y*y
dist = z - comp_z
return dist
dist2plane_all = list(map(compute_dist2plane, list(pro_lidar)))
pointStack = np.c_[pro_lidar, dist2plane_all] #对原始点云数据进行 列叠加
np.savetxt('finalStk.txt', pointStack, fmt='%0.6f')
lidar_mat2 = np.c_[lidar_mat, clo_b]
x2 = []
y2 = []
color2 = []
velo_img2 = project(lidar_mat, velo2imgTxT)
lidar_road2 = []
final_ground = np.zeros((len(pro_lidar), 3))
for i in range(len(velo_img2)):
newPoint = velo_img2[i,:]
lidar_i = lidar_mat2[i,:3]
if 0<lidar_i[0]<60 and -20<lidar_i[1]<20:
idx = int(lidar_i[0]/0.2) * 200 + int((lidar_i[1]+20) /0.2) # 栅格坐标
lidar_cell = np.array(Grid[idx])
if len(lidar_cell) > 0: # 栅格内的点数 > 0
dist_cell = abs(np.dot(m1, lidar_cell.T))
if np.max(dist_cell) < 0.15 and np.max(lidar_cell[:,2]) - np.min(lidar_cell[:,2]) < 0.2:
if newPoint[1]>0 and newPoint[1] < camera2.shape[0]-1 and newPoint[0]>0 and newPoint[0]<camera2.shape[1]-1 and lidar_mat2[i,0] > dis:
x2.append(int(newPoint[0]))
y2.append(int(newPoint[1]))
color2.append(64*dis // lidar_mat2[i,0])
lidar_road2.append(lidar_mat2[i,:3])
plt.figure()
#plt.title('Final Result',color='blue')
plt.axis('off')
fig = plt.gcf()
fig.set_size_inches(16.0,9.0) #dpi = 300, output = 700*700 pixels
plt.imshow(camera2)
plt.scatter(x2, y2, c=color2, cmap=plt.cm.jet, marker=',', s=2)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
fig.savefig("filename2.png", format='png', transparent=True, dpi=30, pad_inches = 0)
#plt.scatter(x2, y2, c=color2, cmap=plt.cm.jet, marker='.', s=0.5)
plt.show()
# np.savetxt('all.txt',lidar, fmt='%0.6f')
# np.savetxt('ground.txt',lidar_road2, fmt='%0.6f')
x1, y1 = [], []
for it in lidar_road2:
if 0<it[0] < 60 and -20<it[1]<20:
x1.append(it[0])
y1.append(it[1])
plt.scatter(y1, x1, c='black', marker=',', s=2)
plt.xlabel('Y')
plt.ylabel('X')
plt.axis([-20,20,0,60])
plt.gca().invert_xaxis()
plt.show()
'''
###
### 利用高程差 5cm ~ 20cm, 确定 路沿
### RANSAC
'''
# x2 = []
# y2 = []
# color2 = []
# velo_img2 = project(lidar_mat2, velo2imgTxT)
# lidar_road_curb = []
#
# for i in range(len(velo_img2)):
# newPoint = velo_img2[i,:]
# lidar_i = lidar_mat2[i,:3]
# if 0<lidar_i[0]<60 and -20<lidar_i[1]<20:
# idx = int(lidar_i[0]/0.2) * 200 + int((lidar_i[1]+20) /0.2)
# lidar_cell = np.array(Grid[idx])
# if len(lidar_cell) > 0:
# dist_cell = abs(np.dot(m1,lidar_cell.T))
# if 0.05 < np.max(lidar_cell[:,2]) - np.min(lidar_cell[:,2]) < 0.2:
# if newPoint[1]>0 and newPoint[1] < camera2.shape[0]-1 and newPoint[0]>0 and newPoint[0]<camera2.shape[1]-1 and lidar_mat2[i,0] > dis:
# x2.append(int(newPoint[0]))
# y2.append(int(newPoint[1]))
# color2.append(64*dis // lidar_mat2[i,0])
# lidar_road_curb.append(lidar_mat2[i])
#
# lidar_road_curb = np.array(lidar_road_curb)
# data_curb = lidar_road_curb[:,:2]
#
# # RANSAC拟合满足高程差的 直线
# curb_m, curb_b = plot_best_fit(list(data_curb))
#
# curb_set1 = []
# other_line = []
# other_lidar= []
# for i in range(len(data_curb)):
# dist = abs(data_curb[i][0]*curb_m + curb_b - data_curb[i][1])
# if dist < 0.05:
# curb_set1.append(lidar_road_curb[i,:])
# else:
# other_lidar.append(lidar_road_curb[i,:])
#
# curb_set2 = []
# other_lidar = np.array(other_lidar)
# other_line = other_lidar[:,:2]
# curb_m2, curb_b2 = plot_best_fit(list(other_line))
# for i in range(len(other_line)):
# dist = abs(other_line[i][0]*curb_m2 + curb_b2 - other_line[i][1])
# if dist < 0.05:
# curb_set2.append(other_lidar[i,:])
#
#
#
# velo_img_curb1 = project(curb_set1 , velo2imgTxT)
#
# x3 = []
# y3 = []
# color3 = []
# dis = 4
# for i in range(len(velo_img_curb1)):
# newPoint = velo_img_curb1[i,:]
# if newPoint[1]>0 and newPoint[1] < camera2.shape[0]-1 and newPoint[0]>0 and newPoint[0]<camera2.shape[1]-1 and curb_sets2[i,0] > dis:
# x3.append(int(newPoint[0]))
# y3.append(int(newPoint[1]))
# color3.append(64*dis // curb_set1[i][0])
#
#
# plt.figure()
# #plt.title('Road Curb',color='blue')
# plt.axis('off')
# fig = plt.gcf()
# fig.set_size_inches(16.0, 9.0) #dpi = 300, output = 700*700 pixels
# plt.imshow(camera2)
# plt.scatter(x3, y3, c='r', cmap=plt.cm.jet, marker=',', s=2)
# plt.plot([x3[0], x3[-1]], [y3[0], y3[-1]],color='y', linewidth=4)
# plt.gca().xaxis.set_major_locator(plt.NullLocator())
# plt.gca().yaxis.set_major_locator(plt.NullLocator())
# plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
# plt.margins(0,0)
# #fig.savefig("filename2.png", format='png', transparent=True, dpi=30, pad_inches = 0)
# #plt.scatter(x2, y2, c=color2, cmap=plt.cm.jet, marker='.', s=0.5)
# plt.show()
#
# velo_img_curb2 = project(curb_set2, velo2imgTxT)
#
# x3 = []
# y3 = []
# color3 = []
# dis = 4
# for i in range(len(velo_img_curb2)):
# newPoint = velo_img_curb2[i,:]
# if newPoint[1]>0 and newPoint[1] < camera2.shape[0]-1 and newPoint[0]>0 and newPoint[0]<camera2.shape[1]-1 and curb_sets2[i,0] > dis:
# x3.append(int(newPoint[0]))
# y3.append(int(newPoint[1]))
#
#
# plt.figure()
# #plt.title('Road Curb',color='blue')
# plt.axis('off')
# fig = plt.gcf()
# fig.set_size_inches(16.0, 9.0) #dpi = 300, output = 700*700 pixels
# plt.imshow(camera2)
# plt.scatter(x3, y3, c='r', cmap=plt.cm.jet, marker=',', s=2)
# plt.plot([x3[0], x3[-1]], [y3[0], y3[-1]],color='g', linewidth=2)
# plt.gca().xaxis.set_major_locator(plt.NullLocator())
# plt.gca().yaxis.set_major_locator(plt.NullLocator())
# plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
# plt.margins(0,0)
# #fig.savefig("filename2.png", format='png', transparent=True, dpi=30, pad_inches = 0)
# #plt.scatter(x2, y2, c=color2, cmap=plt.cm.jet, marker='.', s=0.5)
# plt.show()
'''
mat_data = loadmat('mydata.mat')
lidar_mat = mat_data["result"]
clo_b = np.ones(len(lidar_mat))
lidar_mat2 = np.c_[lidar_mat, clo_b]
x2 = []
y2 = []
color2 = []
velo_img2 = project(lidar_mat2, velo2imgTxT)
lidar_road2 = []
for i in range(len(velo_img2)):
newPoint = velo_img2[i,:]
if newPoint[1]>0 and newPoint[1] < camera2.shape[0]-1 and newPoint[0]>0 and newPoint[0]<camera2.shape[1]-1 and lidar_mat2[i,0] > dis:
x2.append(int(newPoint[0]))
y2.append(int(newPoint[1]))
color2.append(64*dis // lidar_mat2[i,0])
lidar_road2.append(lidar_mat2[i,:3])
plt.figure()
#plt.title('plane_fit',color='blue')
plt.axis('off')
fig = plt.gcf()
fig.set_size_inches(16.0,9.0) #dpi = 300, output = 700*700 pixels
plt.imshow(camera2)
plt.scatter(x2, y2, c='r', cmap=plt.cm.jet, marker=',', s=2)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
fig.savefig("filename2.png", format='png', transparent=True, dpi=30, pad_inches = 0)
#plt.scatter(x2, y2, c=color2, cmap=plt.cm.jet, marker='.', s=0.5)
plt.show()
'''
# DFSGrid = np.zeros((60, 40))
# dfs_size = 1
# for i in range(len(lidar_road2)):
# lidar_i = lidar_road2[i]
# idx = int(lidar_i[0] / dfs_size)
# idy = int((lidar_i[1]+20)/dfs_size)
# DFSGrid[idx][idy] = 1
| true |
eed8e39f146e59f05fbcc8f15b242c3636ce6a1a | Python | AathmanT/netty-performace-tuning | /netty_opy_custom.py | UTF-8 | 5,060 | 2.53125 | 3 | [] | no_license | import sklearn.gaussian_process as gp
import numpy as np
import random
from scipy.stats import norm
from skopt.acquisition import gaussian_ei
import time
import requests
import sys
import csv
from hyperopt import hp
from hyperopt import tpe
from hyperopt import Trials
from hyperopt import fmin
def dummy_model(x):
return 5*x[0]**2 - 4*x[1]*x[0] + 33 * x[1] + 334
def acquisition_function(x, model, minimum):
x = np.array(x).reshape(1, -1)
mu, sigma = model.predict(x, return_std=True)
print(mu, sigma)
with np.errstate(divide='ignore'):
Z = (minimum - mu) / sigma
print(norm.cdf(Z))
expected_improvement = (minimum - mu) * norm.cdf(Z) + sigma * norm.pdf(Z)
# expected_improvement[sigma == 0.0] = 0.0
return -1 * expected_improvement
def _normalize(x, minimum, maximum):
return (x - minimum) / (maximum - minimum)
def get_performance_only_tomcat(x, i):
global data
requests.put("http://192.168.32.2:8080/setThreadPoolNetty?size=" + str(x[0]))
time.sleep((i+1) * tuning_interval + start_time - time.time())
res = requests.get("http://192.168.32.2:8080/performance-netty").json()
data.append(res)
print("Mean response time : " + str(res[2]))
return float(res[2])
def objective(x):
global data
global param_history
global ii
x = int(x)
requests.put("http://192.168.32.2:8080/setThreadPoolNetty?size=" + str(x))
param_history.append([x])
time.sleep((ii+1) * tuning_interval + start_time - time.time())
ii += 1
res = requests.get("http://192.168.32.2:8080/performance-netty").json()
data.append(res)
print("Mean response time : " + str(res[2]))
return float(res[2])
folder_name = sys.argv[1] if sys.argv[1][-1] == "/" else sys.argv[1] + "/"
case_name = sys.argv[2]
ru = int(sys.argv[3])
mi = int(sys.argv[4])
rd = int(sys.argv[5])
tuning_interval = int(sys.argv[6])
data = []
param_history = []
test_duration = ru + mi + rd
iterations = test_duration // tuning_interval
noise_level = 1e-6
initial_points = 4
model = gp.GaussianProcessRegressor(kernel=gp.kernels.Matern(), alpha=noise_level,
n_restarts_optimizer=10, normalize_y=True)
x_data = []
y_data = []
start_time = time.time()
use_tpe = True
if use_tpe:
ii = 0
space = hp.uniform('x', 4, 200)
tpe_trials = Trials()
tpe_best = fmin(fn=objective, space=space, algo=tpe.suggest, trials=tpe_trials,
max_evals=test_duration // tuning_interval)
else:
thread_pool_max = 200
thread_pool_min = 4
# sample more random (or predetermined data points) and collect numbers (up to initial points)
for i in range(0, initial_points):
x = thread_pool_min + i * (thread_pool_max-thread_pool_min) / initial_points
x = int(x)
x_data.append([_normalize(x, thread_pool_min, thread_pool_max)])
y_data.append(get_performance_only_tomcat([x], i))
param_history.append([x])
model.fit(x_data, y_data)
# use bayesian optimization
for i in range(initial_points, iterations):
minimum = min(y_data)
# minimum = 99999
max_expected_improvement = 0
max_points = []
max_points_unnormalized = []
for pool_size in range(thread_pool_min, thread_pool_max + 1):
x = [pool_size]
x_normalized = [_normalize(x[0], thread_pool_min, thread_pool_max)]
ei = gaussian_ei(np.array(x_normalized).reshape(1, -1), model, minimum)
if ei > max_expected_improvement:
max_expected_improvement = ei
max_points = [x_normalized]
max_points_unnormalized = [x]
elif ei == max_expected_improvement:
max_points.append(x_normalized)
max_points_unnormalized.append(x)
if max_expected_improvement == 0:
print("WARN: Maximum expected improvement was 0. Most likely to pick a random point next")
# select the point with maximum expected improvement
# if there're multiple points with same ei, chose randomly
idx = random.randint(0, len(max_points) - 1)
next_x = max_points[idx]
param_history.append(max_points_unnormalized[idx])
next_y = get_performance_only_tomcat(max_points_unnormalized[idx], i)
x_data.append(next_x)
y_data.append(next_y)
model = gp.GaussianProcessRegressor(kernel=gp.kernels.Matern(), alpha=noise_level,
n_restarts_optimizer=10, normalize_y=True)
model.fit(x_data, y_data)
print("minimum found : ", min(y_data))
with open(folder_name + case_name + "/results.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(["IRR", "Request Count", "Mean Latency (for window)", "99th Latency"])
for line in data:
writer.writerow(line)
with open(folder_name + case_name + "/param_history.csv", "w") as f:
writer = csv.writer(f)
for line in param_history:
writer.writerow(line)
| true |
1394fb98c45531da8962eb1dd578a66150db31ec | Python | Alexamith23/conversor | /app/Http/Controllers/verifyParser.py | UTF-8 | 441 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import json
import sys
# sanitize the argument
def main(argv = sys.argv[1:]):
var = ""
it = 1
for i in argv:
var += i
if(it != len(argv)):
var += " "
it += 1
pass
return var
arguments = main()
#args = json.dumps(arguments) # doubtful operation
# default sys.exit output inverted!
if(len(sys.argv[1:]) >= 8):
sys.exit(arguments)
else:
sys.exit(0) | true |
c32b5d5208d36581c8ca1f5685dd7e29f996ae99 | Python | ziamajr/CS5590PythonLabAssignment | /InClass 6/ICE6.py | UTF-8 | 452 | 3.4375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
x = np.array([0,1,2,3,4,5,6,7,8,9])
y = np.array([1,3,2,5,7,8,8,9,10,12])
np.mean(x)
np.mean(y)
x1=np.mean(x)
y1=np.mean(y)
print (x1)
print (y1)
np.sum((x-x1)*(y-y1))
s1=np.sum((x-x1)*(y-y1))
np.sum((x-y1)*(x-y1))
s2=np.sum((x-y1)*(x-y1))
print(s1)
print(s2)
f = s1/s2
print(f)
b0 = y1-(f*x1)
print(b0)
y2= (b0+f*x)
plt.scatter(x,y)
plt.plot(x, y2)
plt.show() | true |
c669e840ccf120cb2cea4d31a5efdd4500b955ee | Python | Wjun0/python- | /day07/12-文件的拷贝-扩展大文件的拷贝.py | UTF-8 | 1,385 | 3.734375 | 4 | [] | no_license | # 原文件的名字
src_file_name = "test.txt"
# 根据原文件名字生成拷贝后的文件名: test[复件].txt
# 1. 切片
# 2. split
# 3. partition 使用这种方式
file_name, point_str, end_str = src_file_name.partition(".")
dst_file_name = file_name + "[复件]" + point_str + end_str
print(dst_file_name)
# 1. 打开目标文件(拷贝后的文件),目的就是创建一个空的文件
# 指定wb模式可以兼容文本文件和其他类型的文件(图片,视频,音频等等)
dst_file = open(dst_file_name, "wb")
# 2. 打开原文件,读取原文件中的数据
src_file = open(src_file_name, "rb")
# 拷贝大文件,不能一次性读取文件中的所有数据并加载到内存,可能导致内存暴涨及内存溢出
# data = src_file.read()
# 解决办法: 循环读取文件中的数据,每次加载一部分数据
while True:
data = src_file.read(1024) # 每次读取的最大字节长度
# 查看每次读取到的数据和数据的长度
print(data, type(data), len(data))
# if len(data) > 0:
# 判断字节类型是否有字节数据
# 字节类型数据属于容器类型,好比字符串一样
if data:
# 3. 把原文件中的数据写入到目标文件里面
dst_file.write(data)
else:
print("数据读取完成")
break
# 4. 关闭文件
src_file.close()
dst_file.close()
| true |
5e14670fc1e565eb901566daa1c1cfae29ad45f7 | Python | Sidd-UCD/UCDPA_Siddhesh | /Data Set 1.py | UTF-8 | 3,189 | 3.453125 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df_1 = pd.read_csv('/Users/siddheshkotian/Documents/Certification Data Analytics/Project Rubric/Data_Set A.csv')
print(df_1)
# Exploring Dataframe(df_1)
print(df_1.head())
print(df_1.info())
print(df_1.shape)
print(df_1.values)
print(df_1.columns)
print(df_1.index)
# Sorting Dataset(df_1)
df_1_sales = df_1.sort_values("Sales")
print(df_1_sales)
print(df_1_sales.head())
df_1_sales_des = df_1.sort_values(["Sales"], ascending=[False])
print(df_1_sales_des)
print(df_1_sales_des.head())
df_1_sales_units_des = df_1.sort_values(["Sales", "Units"], ascending=[False, False])
print(df_1_sales_units_des)
print(df_1_sales_units_des.head())
df_1_location = df_1.sort_values("Location")
print(df_1_location)
# Dropping duplicates
df_1_unique_location = df_1.drop_duplicates(subset="Location")
print(df_1_unique_location)
print(df_1_unique_location.value_counts())
df_1_unique_location_sales = df_1.drop_duplicates(subset=["Location", "Sales"])
print(df_1_unique_location_sales)
print(df_1_unique_location_sales.sort_values("Sales", ascending=False))
print(df_1_unique_location_sales.sort_values("Sales", ascending=False).head())
# Creating List
list1 = ["Restaurant", "Location", "Sales"]
print(df_1[list1].head())
list2 = ["Restaurant", "Location", "Sales"]
print(df_1[list1].head())
list3_sales = ["Sales"]
print(df_1[list3_sales].head())
list4_units = ["Units"]
print(df_1[list4_units].head())
list5 = ["Restaurant", "Location", "Sales", "Units"]
print(df_1[list5])
# converting list into arrays through Numpy
array3 = np.array(df_1[list3_sales])
print(array3)
array4 = np.array(df_1[list4_units])
print(array4)
# maximum and minimum sales value
print(array3.min())
print(array3.max())
print(array3.sum())
# mean and standard deviation
print(array3.mean())
print(array3.std())
# Indexing arrays
array1 = np.array(df_1[list5])
print(array1)
indexing = np.array([0,3])
array1_index = array1[indexing]
print(array1_index)
# Looping
print("starting the loop example")
filter = ["Restaurant", "Sales"]
Sales_restaurant = df_1[filter]
rest_above_avg = []
rest_below_avg = []
for index, row in Sales_restaurant.iterrows():
if (row["Sales"]) > 34:
rest_above_avg.append(row["Restaurant"])
else:
rest_below_avg.append(row["Restaurant"])
print(rest_above_avg)
print(rest_below_avg)
# Merging dataframes
df_new_1 = df_1[["Restaurant", "Sales"]].copy()
print(df_new_1)
df_new_2 = df_1[["Restaurant", "Units"]].copy()
print(df_new_2)
df_new_3 = df_new_1.merge(df_new_2, on="Restaurant")
print(df_new_3.head(10))
# Visualization
df_1["Sales"].hist()
df_1["Sales"].hist(bins=10)
plt.show()
# Figure_2
avg_sales_per_units = df_1.groupby("Units")["Sales"].mean()
avg_sales_per_units.plot(kind="bar", title="Mean Sales per Units")
plt.show()
# Figure_3
df_1[df_1["Franchising"]=="Yes"]["Units"].plot(kind= "line", color="red", title="Units with and without Franchise")
df_1[df_1["Franchising"]=="No"]["Units"].plot(kind="line", color="blue", title="Units with and without Franchise")
plt.legend(["Yes", "No"])
plt.show()
| true |
19ea3a1913b3cf6753abe614f20ff3db5d13ccba | Python | Kawser-nerd/CLCDSA | /Source Codes/CodeJamData/16/12/9.py | UTF-8 | 679 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# $File: solve.py
# $Date: Sat Apr 16 09:23:24 2016 +0800
# $Author: jiakai <jia.kai66@gmail.com>
import collections
def solve(rows):
cnt = collections.Counter()
for i in rows:
cnt.update(i)
rst = []
for k, v in cnt.items():
if v % 2:
rst.append(k)
return ' '.join(map(str, sorted(rst)))
def main():
nr_case = int(input())
for case in range(nr_case):
N = int(input())
rows = []
for i in range(N*2-1):
rows.append(list(map(int, input().split())))
print('Case #{}: {}'.format(case + 1, solve(rows)))
if __name__ == '__main__':
main()
| true |
b332dbf7a6ab5c820ee23b0a63829f07cb61a6bc | Python | kennycaiguo/Heima-Python-2018 | /15期/21 数据分析/10-数据的合并和分组聚合/test2.py | UTF-8 | 374 | 2.734375 | 3 | [] | no_license | # coding:utf-8
# File Name: test2
# Description :
# Author : huxiaoyi
# Date: 2019-05-14
import pandas as pd
from matplotlib import pyplot as plt
file_path = "./directory.csv"
df = pd.read_csv(file_path)
# 使用matplotlib 呈现出店铺总数排名前10的国家
data = df.groupby(by="Country").count()['Brand'].sort_values(ascending=False)[:10]
_ | true |
993a9d1f59a701a3918873c009b6f26d0f9bbb8b | Python | JohnnySunkel/BlueSky | /Keras/keras_Inception.py | UTF-8 | 1,150 | 3.171875 | 3 | [] | no_license | from keras import layers
# This example assumes the existence of a 4D input tensor 'x'
# Every branch has the same stride value (2),
# which is necessary to keep all branch outputs
# the same size so you can concatenate them.
branch_a = layers.Conv2D(128, 1,
activation = 'relu',
strides = 2)(x)
# In this branch, the striding occurs in
# the spatial convolution layer.
branch_b = layers.Conv2D(128, 1, activation = 'relu')(x)
branch_b = layers.Conv2D(128, 3, activation = 'relu',
strides = 2)(branch_b)
# In this branch, the striding occurs in
# the average pooling layer.
branch_c = layers.AveragePooling2D(3, strides = 2)(x)
branch_c = layers.Conv2D(128, 3, activation = 'relu')(branch_c)
branch_d = layers.Conv2D(128, 1, activation = 'relu')(x)
branch_d = layers.Conv2D(128, 3, activation = 'relu')(branch_d)
branch_d = layers.Conv2D(128, 3, activation = 'relu',
strides = 2)(branch_d)
# Concatenate the branch outputs to obtain
# the module output.
output = layers.concatenate(
[branch_a, branch_b, branch_c, branch_d], axis = -1)
| true |
356740d9c6b3511e6d3598a5f49059d3d174f9dc | Python | dicomgrid/sdk-python | /tests/api/test_rate_limits.py | UTF-8 | 1,090 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | """Test rate limits."""
from ambra_sdk.api.base_api import RateLimit, RateLimits
class TestRateLimits:
"""Test rate limits."""
def test_default_call_period(self):
"""Test default call period."""
rls = RateLimits(
default=RateLimit(3, 2),
get_limit=None,
special=None,
)
assert rls.call_period('abc') == 2 / 3
def test_get_call_period(self):
"""Test get call period."""
rls = RateLimits(
default=RateLimit(3, 2),
get_limit=RateLimit(4, 2),
special=None,
)
assert rls.call_period('abc') == 2 / 3
assert rls.call_period('abc/get') == 2 / 4
def test_special_call_period(self):
"""Test special call period."""
rls = RateLimits(
default=RateLimit(3, 2),
get_limit=RateLimit(4, 2),
special={'special': RateLimit(5, 2)},
)
assert rls.call_period('abc') == 2 / 3
assert rls.call_period('abc/get') == 2 / 4
assert rls.call_period('special') == 2 / 5
| true |
5a33c59e56f7fd0fc76ac4674c842dd053a056fb | Python | chipx86/djblets | /djblets/webapi/auth/backends/__init__.py | UTF-8 | 3,208 | 2.828125 | 3 | [] | no_license | """Base support for managing API authentication backends.
These functions allow for fetching the list of available API authentication
backend classes, and using them to perform an authentication based on an HTTP
request.
"""
from __future__ import annotations
from importlib import import_module
from typing import List, Optional, Type
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from djblets.webapi.auth.backends.base import (WebAPIAuthBackend,
WebAPIAuthenticateResult)
_auth_backends: List[Type[WebAPIAuthBackend]] = []
def get_auth_backends() -> List[Type[WebAPIAuthBackend]]:
"""Return the list of web API authentication backends.
This defaults to :py:class:`~djblets.webapi.auth.backends.basic.
WebAPIBasicAuthBackend`, for HTTP Basic Auth, but can be overridden by
setting ``settings.WEB_API_AUTH_BACKENDS`` to a list of class paths.
Returns:
list:
A list of all usable authentication backend classes.
"""
global _auth_backends
if not _auth_backends:
class_paths = getattr(
settings, 'WEB_API_AUTH_BACKENDS', [
'djblets.webapi.auth.backends.basic.WebAPIBasicAuthBackend',
])
_auth_backends = []
for class_path in class_paths:
class_path = str(class_path)
i = class_path.rfind(str('.'))
module, attr = class_path[:i], class_path[i + 1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured(
'Error importing web API auth backend %s: %s'
% (module, e))
try:
_auth_backends.append(getattr(mod, attr))
except AttributeError:
raise ImproperlyConfigured(
'Module "%s" does not define a "%s" class for the web API '
'auth backend'
% (module, attr))
return _auth_backends
def reset_auth_backends():
"""Reset the list of authentication backends.
The list will be recomputed the next time an authentication backend needs
to be used.
"""
global _auth_backends
_auth_backends = []
def check_login(
request: HttpRequest,
) -> Optional[WebAPIAuthenticateResult]:
"""Check if a login request was made.
If the client specifies a ``HTTP_AUTHORIZATION`` header, this will attempt
to authenticate using a supported authentication method.
Args:
request (HttpRequest): The HTTP request from the client.
Returns:
tuple:
The result of the authentication, if successful, or ``None`` otherwise.
See :py:class:`~djblets.webapi.auth.backends.base.
WebAPIAuthenticateResult` for details on the format for the returned
type value.
"""
if 'HTTP_AUTHORIZATION' in request.META:
for auth_backend_cls in get_auth_backends():
result = auth_backend_cls().authenticate(request)
if result is not None:
return result
return None
| true |
51da5610104c1838d925ab013ca74fdfc5a901fa | Python | glentner/CmdKit | /cmdkit/service/service.py | UTF-8 | 1,628 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | # SPDX-FileCopyrightText: 2021 CmdKit Developers
# SPDX-License-Identifier: Apache-2.0
"""Service class implementation."""
# internal libs
from .daemon import Daemon
class Service(Daemon):
"""
A Service can be run directly and _optionally_ daemonized.
Like `cmdkit.service.daemon.Daemon`, a `run` method must be defined
that implements the main business logic (i.e., the entry-point).
"""
_is_daemon: bool = False
def __init__(self, pidfile: str, daemon: bool = False) -> None:
"""
Initialization. You must call `.start()` before `.run()` is called.
Arguments:
pidfile (str):
Path to a process ID file. This file is created with
the process ID so it can be stopped later.
daemon (bool):
Run service as a daemon process (default: False).
"""
super().__init__(pidfile)
self.is_daemon = daemon
def daemonize(self) -> None:
"""Overrides the Daemon implementation if not `is_daemon`."""
if self.is_daemon:
super().daemonize()
@property
def is_daemon(self) -> bool:
"""Is this service able to become a daemon."""
return self.__is_daemon
@is_daemon.setter
def is_daemon(self, other: bool) -> None:
"""Assign whether this service can become a daemon."""
if other in (True, False, 0, 1):
self.__is_daemon = bool(other)
else:
raise ValueError(f'{self.__class__.__name__}.is_daemon expects True/False.')
def run(self) -> None:
raise NotImplementedError()
| true |
236129c8b2969b16ffccd5cee1f5eb482de0ff07 | Python | lattaro/manipulacao-dados-estudo-pandas | /Manipulação_dados_Pandas.py | UTF-8 | 1,239 | 3.875 | 4 | [] | no_license | import pandas as pd
notas = pd.Series ([2,7,5,10,6], index=["Alex", "João", "Pedro", "Zé", "Abel"])
print (notas)
print ("A nota do Alex é:",notas["Alex"]) #é possível trazer uma nota referenciada pelo seu index, no caso "Alex"
print("Média:", notas.mean()) #notas.mean calcula a média aritmética para o vetor nota
print("Desvio padrão:", notas.std()) #notas.std calcula o desvio padrão do vetor notas
print(notas.describe()) #descibr da o resumo estatístico do vetor notas.
print (np.log(notas)) #calcula o logatirmo neperiano do vetor notas.
df = pd.DataFrame({'Aluno' : ["Alex", "João", "Pedro", "Zé", "Abel"], #Criação de um dataframe (tabela)
'Faltas' : [3,4,2,1,4],
'P1' : [2,7,5,10,6],
'Seminário': [8.5,7.5,9.0,7.5,8.0],
'P2' : [10,8,10,7,9]})
print (df)
print (df.sort_values(by="P2")) #ordena o df pela coluna escolhida
print (df.loc[1]) #traz os valores do índice explicitado
print (df[df["P2"]>8.0]) #traz somente os valors condicionais do df
print (df[(df["P2"] > 8.0) & (df["Seminário"] > 6)]) #traz somente os valors condicionais do df.
#os valores boleanos devem ser bitwise & = and, | = or, ~ = not | true |
9043a1522e5f37f1fb1e1c98b80acfa45fc4fe87 | Python | DiyaWadhwani/SL-Lab | /partA/pythonProgs/(4)Age.py | UTF-8 | 252 | 3.484375 | 3 | [] | no_license | from datetime import date,datetime
def ageConvert(d,m,y):
dob=date(y,m,d)
today=date.today()
return today-dob
d=int(input("Enter day: "))
m=int(input("Enter month: "))
y=int(input("Enter year: "))
print("Age: ",ageConvert(d,m,y).days//365)
| true |
b46fbb32ecd5ddf41fdfc086d383b6463d3412e1 | Python | anelshaer/Python100DaysOfCode | /day-33-API-quotes-and-space-station/iss-location/main.py | UTF-8 | 2,231 | 2.734375 | 3 | [
"MIT"
] | permissive | from types import DynamicClassAttribute
import requests
from datetime import datetime
import time
import smtplib
import sys
LATITUDE = 52.520008
LONGITUDE = 13.404954
MARGIN = 5
SENDER_MAIL = "test@gmail.com"
PASSWORD = "TESTP@SSWORD"
TO_MAIL = "test2@gmail.com"
def is_iss_above():
response = requests.get(url="http://api.open-notify.org/iss-now.json")
response.raise_for_status()
data = response.json()
iss_lat = float(data["iss_position"]["latitude"])
iss_long = float(data["iss_position"]["longitude"])
print(f"Berlin latitude: {LATITUDE} longitute: {LONGITUDE}")
print(f"ISS current latitude: {iss_lat} longitude: {iss_long}")
if (iss_lat <= LATITUDE + MARGIN) and iss_lat >= LATITUDE - MARGIN:
if (iss_long <= LONGITUDE + MARGIN) and iss_long >= LONGITUDE - MARGIN:
return True
return False
def is_night():
sun_parameters = {
"lat": LATITUDE,
"lng": LONGITUDE,
"formatted": 0
}
response = requests.get(url="https://api.sunrise-sunset.org/json", params=sun_parameters)
response.raise_for_status()
data = response.json()
sunrise = int(data["results"]["sunrise"].split("T")[1].split(":")[0])
sunset = int(data["results"]["sunset"].split("T")[1].split(":")[0])
time_now_hour = datetime.now().utcnow().hour
if time_now_hour >= sunset or time_now_hour <= sunrise:
return True
return False
# Setup SMTP server: python -m smtpd -c DebuggingServer -n localhost:1025
def send_mail():
with smtplib.SMTP('localhost', 1025) as connection:
# connection.starttls()
# connection.login(user=SENDER_MAIL, password=PASSWORD)
msg = f"ISS passing over your head in the sky!\nUTC Time: {datetime.utcnow()}"
print(msg)
connection.sendmail(
from_addr=SENDER_MAIL,
to_addrs=TO_MAIL,
msg=f"Subject:Look up to the sky!\n\n{msg}"
)
def main():
try:
while True:
if is_iss_above() and is_night():
send_mail()
time.sleep(60)
except (KeyboardInterrupt, SystemExit):
print('\n! Received keyboard interrupt, quitting!.\n')
sys.exit()
if __name__ == "__main__":
main()
| true |
7d4c1a54836b31bde26e9164f7204a6240d32f45 | Python | dakotajunkman/Janggi | /main.py | UTF-8 | 705 | 3.71875 | 4 | [] | no_license | from JanggiGame import JanggiGame
def play_game():
"""
Creates a game loop to play the game.
"""
game = JanggiGame()
game.get_board().update_visual_board()
game.get_board().display_board()
while game.get_game_state() == 'UNFINISHED':
print(game.get_player_turn(), 'turn')
move_from = input('Move from: ')
move_to = input('Move to: ')
game.make_move(move_from, move_to)
game.get_board().update_visual_board()
game.get_board().display_board()
if game.is_in_check(game.get_player_turn()):
print(game.get_player_turn(), 'is in check')
print(game.get_game_state())
if __name__ == '__main__':
play_game() | true |
92060d47d178515db3c40c69143ff51ae7eb3537 | Python | dreadatour/pdigest | /pdigest.py | UTF-8 | 10,118 | 2.515625 | 3 | [] | no_license | # coding: utf-8
import datetime
import re
import time
import requests
from flask import Flask, render_template, request
app = Flask(__name__)
app.config.from_object('config')
url_re = re.compile(
r'(\bhttps?:\/\/[a-z0-9-+&@#\/%?=~_|!:,.;]*[a-z0-9-+&@#\/%=~_|])',
re.UNICODE | re.I
)
youtube_re = re.compile(
r'^https?:\/\/www\.youtube\.com\/watch\?v=(.*?)(&.*)?$',
re.UNICODE | re.I
)
youtube_link = (
'<iframe width="560" height="315" src="//www.youtube.com/embed/{0}"'
' frameborder="0" allowfullscreen class="video"></iframe>'
)
vimeo_re = re.compile(
r'^https?:\/\/vimeo\.com\/(\d+)(?:\?.*)?$', re.UNICODE | re.I
)
vimeo_link = (
'<iframe src="//player.vimeo.com/video/{0}" width="500" height="281"'
' frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen'
' class="video"></iframe>'
)
slideshare_re = re.compile(
r'^https?:\/\/(?:www\.)?slideshare\.net\/', re.UNICODE | re.I
)
slideshare_link = 'http://www.slideshare.net/api/oembed/2?url={0}&format=json'
def linkify(text):
"""
Search for url in text and return link tag with this url.
"""
match = url_re.findall(text)
if len(match) != 1:
return
link = match[0]
link_re = re.compile(
ur'\s*[\-\:\-\—\—\―]?\s*' + re.escape(link), re.UNICODE
)
text = link_re.sub('', text)
text = re.sub(r'</?p>', '', text)
text = re.sub(r'\n+', '<br><br>', text)
return u'<a href="{0}">{1}</a>'.format(link, text)
def get_embed(text):
"""
Get embed links from text.
"""
result = []
matches = url_re.findall(text)
if matches:
for url in matches:
match = youtube_re.match(url)
if match:
result.append(youtube_link.format(match.group(1)))
continue
match = vimeo_re.match(url)
if match:
result.append(vimeo_link.format(match.group(1)))
match = slideshare_re.match(url)
if match:
response = requests.get(slideshare_link.format(url))
if not response or response.status_code != 200:
continue
slideshare_data = response.json()
if not slideshare_data or 'html' not in slideshare_data:
continue
result.append(
re.sub(r'>\s*</iframe>.*', ' class="slides"></iframe>',
slideshare_data['html'])
)
return result
def str2date(text):
"""
Convert facebook date from string to date object.
"""
text_date = re.sub(r'T.*', '', text)
return datetime.datetime.strptime(text_date, '%Y-%m-%d').date()
class FacebookError(Exception):
pass
class Facebook(object):
"""
Work with Facebook API.
"""
app_id = None
app_secret = None
group_id = None
access_token = None
data = None
def __init__(self, app_id, app_secret, group_id):
self.app_id = app_id
self.app_secret = app_secret
self.group_id = group_id
self.access_token = None
def get_access_token(self):
"""
Get Facebook access token.
"""
access_token_url = 'https://graph.facebook.com/oauth/access_token'
params = {
'client_id': self.app_id,
'client_secret': self.app_secret,
'grant_type': 'client_credentials',
}
response = requests.get(access_token_url, params=params)
if response.status_code != 200:
raise FacebookError('Wrong auth response status code: {0}'.format(
response.status_code
))
if not response.text.startswith('access_token='):
raise FacebookError('Wrong auth respoce: {0}'.format(
response.text
))
self.access_token = response.text
def get_posts(self, since, until):
"""
Get group posts.
"""
# import ipdb; ipdb.set_trace() # Achtung!
since_timestamp = int(time.mktime(since.timetuple()))
until_timestamp = int(time.mktime(until.timetuple()))
if self.access_token is None:
self.get_access_token()
feed_url = (
'https://graph.facebook.com/v2.2/{0}?fields=feed'
'.since({1}).until({2}).limit(9999).fields('
'id,attachments,full_picture,from,message,picture,link,name,'
'caption,description,created_time,updated_time,'
'comments.limit(9999).fields(created_time,message))&{3}'
).format(
self.group_id,
since_timestamp,
until_timestamp,
self.access_token
)
result = []
counter = 0
while True:
counter += 1
if counter > 100:
raise FacebookError('Too many requests')
response = requests.get(feed_url)
if response.status_code != 200:
raise FacebookError(
'Wrong feed response status code: {0}'.format(
response.status_code
)
)
try:
data_json = response.json()
except Exception:
raise FacebookError('Wrong feed response: {0}'.format(
response.text
))
else:
if 'feed' in data_json:
data = data_json['feed']
else:
data = data_json
if 'data' not in data or not data['data']:
if result:
break
raise FacebookError('Empty feed response: {0}'.format(
response.text
))
is_enough = False
if 'paging' in data and 'next' in data['paging']:
feed_url = data['paging']['next']
else:
is_enough = True
for post in data['data']:
if 'updated_time' not in post or not post['updated_time']:
continue
if str2date(post['updated_time']) > until:
continue
if str2date(post['updated_time']) < since:
is_enough = True
break
result.append(post)
if is_enough:
break
return result
def get_digest(since, until):
"""
Get digest page.
"""
facebook_api = Facebook(
app_id=app.config['FACEBOOK_APP_ID'],
app_secret=app.config['FACEBOOK_APP_SECRET'],
group_id=app.config['FEED_ID']
)
try:
posts = facebook_api.get_posts(since=since, until=until)
except FacebookError:
return
result = []
for post in posts:
name = post.get('name')
link = post.get('link')
message = post.get('message')
if name:
name = re.sub(r'\n+', ' ', name)
if not link and message:
match = url_re.findall(message)
if len(match) > 0:
link = match[0]
if message:
message = re.sub(r'</?p>', '', message)
message = re.sub(r'\n+', '<br><br>', message)
message = re.sub(r'<', '<', message)
message = re.sub(r'>', '>', message)
if link:
link_re = re.compile(
ur'\s*[\-\:\-\—\—\―]?\s*' + re.escape(link), re.UNICODE
)
message = link_re.sub('', message)
embeds = []
if link:
embeds.extend(get_embed(link))
if message:
embeds.extend(get_embed(message))
post_comments = post.get('comments')
if post_comments and 'data' in post_comments:
post_comments = post_comments['data']
else:
post_comments = []
comments = []
for comment in post_comments:
comment_time = comment.get('created_time')
if not comment_time:
continue
comment_time = re.sub(r'T.*', '', comment_time)
comment_time = datetime.datetime.strptime(comment_time, '%Y-%m-%d')
if comment_time.date() < since:
continue
# if comment_time.date() > until:
# continue
comment_text = comment.get('message', '')
comment_embed = get_embed(comment_text)
if comment_embed:
embeds.extend(comment_embed)
continue
comment_link = linkify(comment_text)
if comment_link:
comments.append(comment_link)
data = {
'name': name,
'link': link,
'message': message,
'comments': comments,
'embeds': list(set(embeds)),
'is_old': bool(str2date(post['created_time']) < since),
}
result.append(data)
return result
@app.errorhandler(404)
def error_not_found(e):
"""
View '404 Page not found' error.
"""
return render_template('error.html', code=404), 404
@app.errorhandler(500)
def error_server(e):
"""
View '500 Server error' error.
"""
return render_template('error.html', code=500), 500
@app.route('/', methods=['GET'])
def index():
"""
Get index page.
"""
since = request.args.get('since', None)
until = request.args.get('until', None)
pdigest = None
if since:
if until:
try:
until = datetime.datetime.strptime(until, '%Y-%m-%d').date()
except (ValueError, TypeError):
until = None
try:
since = datetime.datetime.strptime(since, '%Y-%m-%d').date()
except (ValueError, TypeError):
since = None
else:
pdigest = get_digest(since, until)
return render_template(
'index.html',
since=since,
until=until,
pdigest=pdigest
)
if __name__ == '__main__':
app.run()
| true |
aa9185098d6d57154124951e3d6493f3482006c7 | Python | JoshuaShin/A01056181_1510_assignments | /A4/test_delete_student.py | UTF-8 | 688 | 2.65625 | 3 | [] | no_license | import io
from unittest.mock import patch
from unittest import TestCase
import crud
class TestDeleteStudent(TestCase):
@patch('builtins.input', side_effect=["test", "test", "t12345678", "True", "", "t12345678"])
def test_delete_student(self, mock_input):
crud.file_write([])
crud.add_student()
crud.delete_student()
self.assertEqual(0, len(crud.file_read()))
@patch('builtins.input', side_effect=["invalid_student_number"])
@patch('sys.stdout', new_callable=io.StringIO)
def test_delete_student_invalid(self, mock_stdout, mock_input):
crud.delete_student()
self.assertTrue("does not exist" in mock_stdout.getvalue())
| true |
0fbbbc4dba693c1c73b0c8d10571fc157c816a80 | Python | jy02sung/PythonPractice | /공튕기기.py | UTF-8 | 1,436 | 3.625 | 4 | [] | no_license | from tkinter import *
import time
import random
class Ball:
def __init__(self,canvas,color,size,x,y,xspeed,yspeed):
self.canvas=canvas
self.color=color
self.size=size
self.x=x
self.y=y
self.xspeed=xspeed
self.yspeed=yspeed
self.id=canvas.create_oval(x,y,x+size,y+size,fill=color)
def move(self):
self.canvas.move(self.id,self.xspeed,self.yspeed)
(x1,y1,x2,y2)=self.canvas.coords(self.id)
(self.x,self.y)=(x1,y1)
if x1<=0 or x2>=800:
self.xspeed=-self.xspeed
if y1<=0 or y2>=800:
self.yspeed=-self.yspeed
window=Tk()
canvas=Canvas(window,width=800,height=800)
canvas.pack()
color_list=['yellow','green','purple','blue','white']
balls_list=[]
for x in range(5):
color=random.choice(color_list)
size=random.randint(10,100)
X=random.randint(1,20)
Y=random.randint(1,20)
XS=random.randint(1,10)
YS=random.randint(1,10)
balls_list.append(Ball(canvas,color,size,X,Y,XS,YS))
ballE=Ball(canvas,'red',50,30,30,XS,YS)
#print('Ball color?',ballE.color)
#print('Ball Size?',ballE.size)
#print('Ball X?',ballE.x)
#print('')
#ballA=Ball('red',30,50,50,10,10)
#print('Ball color?',ballA.color)
#print('Ball Size?',ballA.size)
#print('Ball X?',ballA.x)
#print('')
while True:
for ball in balls_list:
ball.move()
ballE.move()
window.update()
time.sleep(0.03) | true |
cd6fa581d0f10713c07c085c9dbc37f086054354 | Python | Kawser-nerd/CLCDSA | /Source Codes/AtCoder/abc005/B/4901671.py | UTF-8 | 71 | 3.03125 | 3 | [] | no_license | n=int(input())
t=[int(input()) for i in range(n)]
print(sorted(t)[0]) | true |
9e00bcda142983cda565b9de438576cdccb34f3f | Python | zahra-alizadeh/Naive-Bayes | /project.py | UTF-8 | 3,561 | 3 | 3 | [] | no_license | import csv
import requests
from bs4 import BeautifulSoup
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
URL = 'https://mstajbakhsh.ir'
pagination = f'https://mstajbakhsh.ir/page/'
# get data from mstajbakhsh.ir
def scraping():
fileWriter = csv.writer(open('datasetfCat.csv', mode='w', encoding='utf-8'), delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
fileWriter.writerow(['category', 'text'])
nextPageURL = f'https://mstajbakhsh.ir/page/'
page = requests.get(URL)
content = BeautifulSoup(page.content, 'html.parser')
pageCount = content.select('div.pagination-centered ul li')
for i in range(2, len(pageCount) + 1):
content = BeautifulSoup(page.content, 'html.parser')
articlesLink = content.select('article div.post-actions a')
for j in range(0, len(articlesLink)):
url = articlesLink[j]['href']
print(url)
page = requests.get(url)
content = BeautifulSoup(page.content, 'html.parser')
postsCategories = content.select('article header.post-head ul.post-category a')
text = content.find('article')
text = text.text.strip().lower()
text = cleanData(text)
category = []
for postCategories in postsCategories:
category.append(postCategories.text.strip())
fileWriter.writerow([category[0], text])
nextPageURL = pagination + str(i)
print(nextPageURL)
page = requests.get(nextPageURL)
# clean and preprocess of data
def cleanData(data):
tokens = word_tokenize(data)
stopWords = set(stopwords.words('english'))
stopWords.add("'\\n")
stopWords.add("==")
stopWords.add("mir")
stopWords.add("saman")
stopWords.add("m")
stopWords.add("'m")
stopWords.add("phd")
stopWords.add("''")
stopWords.add("’")
tokens_without_sw = [word.lower() for word in tokens if not word in stopWords and not word in string.punctuation]
text = " ".join(tokens_without_sw)
return text
# train and test data
def training():
data = pd.read_csv('datasetfCat.csv', encoding='utf-8')
categories = data.category.value_counts()
# print(categories)
x_train, x_test, y_train, y_test = train_test_split(data.text, data.category, test_size=0.2)
print(f'train data size : {len(x_train)}')
print(f'test data size : {len(x_test)}')
vectorizer = CountVectorizer(binary=True)
x_train_vect = vectorizer.fit_transform(x_train)
nb = MultinomialNB()
model = nb.fit(x_train_vect, y_train)
score = nb.score(x_train_vect, y_train)
print(score)
x_test_vect = vectorizer.transform(x_test)
prediction = nb.predict(x_test_vect)
# print(f'predicted categories : {prediction}')
# print(f'test categories real value : {y_test}')
print("Accuracy: {:.2f}%".format(accuracy_score(y_test, prediction) * 100))
URL = input("Enter a url : ")
page = requests.get(URL)
content = BeautifulSoup(page.content, 'html.parser')
data = cleanData(content.text)
data_series = pd.Series(data)
test_vect = vectorizer.transform(data_series)
predictedValue = model.predict(test_vect)
print(predictedValue)
# scraping()
training()
| true |
fbe46246ba0440b6c063b0970d6bbe01c9cc45f6 | Python | psranga/jpp | /jpp | UTF-8 | 929 | 3.015625 | 3 | [] | no_license | #!/usr/bin/python
# supports #include
#
# reads from stdin and writes to stdout
import sys, re
def copy_file(fn, ofh):
try:
fh = open(fn, 'r')
except IOError:
ofh.write('// Error opening: ' + fn)
sys.stderr.write('Error opening: ' + fn + '\n')
for line in fh:
ofh.write(line)
if not line.endswith('\n'):
sys.stderr.write('adding newline in file: ' + fn + '.\n')
ofh.write('\n')
def main(ifh, ofh):
prog = re.compile('^//\s+#include\s+.([a-zA-z0-9_/\.]+)')
for line in ifh:
if line.startswith('//'):
m = prog.search(line)
if m:
fn = m.group(1)
ofh.write(line)
# if last line of input has #include and does not end with a newline
if not line.endswith('\n'):
ofh.write('\n')
copy_file(fn, ofh)
ofh.write('// End of #include\n')
else:
ofh.write(line)
if __name__ == '__main__':
main(sys.stdin, sys.stdout)
| true |
3cc2ba21f3c0d83b00cba18ec8886ccfe2b98da1 | Python | jpn--/popgen | /popgen/config.py | UTF-8 | 2,139 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | from __future__ import print_function
import yaml
class ConfigError(Exception):
pass
def wrap_config_value(value):
"""The method is used to wrap YAML elements as Config objects. So the
YAML properties can be accessed using attribute access.
E.g. If config object - x for is specificed as the following YAML:
attribbute1:
attribute2 : 'Value'
then attribute access x.attribute1.attribute2 is used to access "Value".
Also, x.attribute can be used to access the dictionary {attribute: 'value'}
"""
if isinstance(value, str):
return value
try:
return value + 0
except TypeError:
pass
return Config(value)
class Config(object):
"""The class returns a Config object that can be used to access the
different YAML elements used to specify the PopGen project.
"""
def __init__(self, data):
self._data = data
def __getattr__(self, key):
value = self.return_value(key)
return wrap_config_value(value)
def __getitem__(self, key):
value = self.return_value(key)
return wrap_config_value(value)
def return_value(self, key):
try:
value = self._data[key]
except KeyError as e:
raise ConfigError(
"Key - %s doesn't exist in the YAML configuration" % key)
return value
def __len__(self):
return len(self._data)
def __repr__(self):
return self._data.__repr__()
def return_list(self):
data_list = []
for i in self._data:
data_list.append(i)
return data_list
def return_dict(self):
return self._data
def write_to_file(self, filepath):
with open(filepath, 'w') as outfile:
outfile.write(yaml.dump(self._data,
default_flow_style=False))
if __name__ == "__main__":
import yaml
yaml_f = file("../demo/bmc/configuration.yaml", "r")
config_dict = yaml.load(yaml_f)
config_obj = Config(config_dict)
print (config_obj.project.name)
print (config_obj["project"]["name"])
| true |
e1ef3b2f23651d7ed731c5092d5d100c555e25d8 | Python | kolibril13/tricks_for_python | /style_dynamic_typing_with_typehints.py | UTF-8 | 514 | 3.515625 | 4 | [] | no_license | from typing import Callable,List,Dict, Any
def factorial(i:int) -> int:
if i < 0:
return None
if i==0:
return 1
if i >0:
return i*factorial(i-1)
def map_my_list(func:Callable,l:List[int])-> List[int]:
l2= [func(i) for i in l]
return l2
def map_my_dict(func:Callable,dic:Dict[Any,int]) -> Dict:
d2= {key:func(value) for key,value in dic.items()}
return d2
print(factorial(12))
# print(map_my_list(factorial,[1,2,3]))
print(map_my_dict(factorial,{"a":2,"b":3})) | true |
65807d196e00b57f937569027a2a0f6a300ef5a4 | Python | changjinhan/Algorithm-study | /ch7/array_partition_1.py | UTF-8 | 515 | 3.15625 | 3 | [] | no_license | import collections
import heapq
import functools
import itertools
import re
import sys
import math
import bisect
from typing import *
def arrayPairSum(nums: List[int]) -> int:
return sum(sorted(nums)[::2]) # 한 줄로 pythonic 하게 풀이
if __name__ == "__main__":
with open("../input/array_partition_1.txt", "r") as f:
data = f.read().splitlines()
for x in data:
nums = x
nums = list(map(int, nums.strip("[]").split(",")))
print(arrayPairSum(nums)) | true |
cce8a8014fba44ed330dbe542cf593f155936ebf | Python | OCHA-DAP/hdx-python-country | /src/hdx/location/currency.py | UTF-8 | 17,159 | 2.578125 | 3 | [
"MIT"
] | permissive | """Currency conversion"""
import logging
from datetime import datetime, timezone
from typing import Dict, Optional, Union
from hdx.utilities.dateparse import (
get_timestamp_from_datetime,
now_utc,
parse_date,
)
from hdx.utilities.dictandlist import dict_of_dicts_add
from hdx.utilities.downloader import Download, DownloadError
from hdx.utilities.path import get_temp_dir
from hdx.utilities.retriever import Retrieve
logger = logging.getLogger(__name__)
class CurrencyError(Exception):
pass
class Currency:
"""Currency class for performing currency conversion. Uses Yahoo, falling back on
exchangerate.host for current rates and Yahoo falling back on IMF for historic
rates. Note that rate calls are cached.
"""
_primary_rates_url = "https://query2.finance.yahoo.com/v8/finance/chart/{currency}=X?period1={date}&period2={date}&interval=1d&events=div%2Csplit&formatted=false&lang=en-US®ion=US&corsDomain=finance.yahoo.com"
_secondary_rates_url = "https://api.exchangerate.host/latest?base=usd"
_secondary_historic_url = (
"https://codeforiati.org/imf-exchangerates/imf_exchangerates.csv"
)
_cached_current_rates = None
_cached_historic_rates = None
_rates_api = None
_secondary_rates = None
_secondary_historic = None
_fallback_to_current = False
_no_historic = False
_user_agent = "hdx-python-country-rates"
_retriever = None
_log_level = logging.DEBUG
_fixed_now = None
@classmethod
def _get_int_timestamp(cls, date: datetime) -> int:
"""
Get integer timestamp from datetime object
Args:
date (datetime): datetime object
Returns:
int: Integer timestamp
"""
return int(round(get_timestamp_from_datetime(date)))
@classmethod
def setup(
cls,
retriever: Optional[Retrieve] = None,
primary_rates_url: str = _primary_rates_url,
secondary_rates_url: str = _secondary_rates_url,
secondary_historic_url: str = _secondary_historic_url,
fallback_historic_to_current: bool = False,
fallback_current_to_static: bool = False,
no_historic: bool = False,
fixed_now: Optional[datetime] = None,
log_level: int = logging.DEBUG,
) -> None:
"""
Setup the sources. If you wish to use a static fallback file by setting
fallback_current_to_static to True, it needs to be named "secondary_rates.json"
and put in the fallback_dir of the passed in Retriever.
Args:
retriever (Optional[Retrieve]): Retrieve object to use for downloading. Defaults to None (generate a new one).
primary_rates_url (str): Primary rates url to use. Defaults to Yahoo API.
secondary_rates_url (str): Current rates url to use. Defaults to exchangerate.host.
secondary_historic_url (str): Historic rates url to use. Defaults to IMF (via IATI).
fallback_historic_to_current (bool): If historic unavailable, fallback to current. Defaults to False.
fallback_current_to_static (bool): Use static file as final fallback. Defaults to False.
no_historic (bool): Do not set up historic rates. Defaults to False.
fixed_now (Optional[datetime]): Use a fixed datetime for now. Defaults to None (use datetime.now()).
log_level (int): Level at which to log messages. Defaults to logging.DEBUG.
Returns:
None
"""
cls._cached_current_rates = {"USD": 1}
cls._cached_historic_rates = dict()
cls._rates_api = primary_rates_url
cls._secondary_rates = None
cls._secondary_historic = None
if retriever is None:
downloader = Download(user_agent=cls._user_agent)
temp_dir = get_temp_dir(cls._user_agent)
retriever = Retrieve(
downloader,
None,
temp_dir,
temp_dir,
save=False,
use_saved=False,
)
cls._retriever = retriever
try:
secondary_rates = retriever.download_json(
secondary_rates_url,
"secondary_rates.json",
"secondary current exchange rates",
fallback_current_to_static,
)
cls._secondary_rates = secondary_rates["rates"]
except (DownloadError, OSError):
logger.exception("Error getting secondary current rates!")
cls._secondary_rates = "FAIL"
cls._fixed_now = fixed_now
cls._log_level = log_level
if no_historic:
cls._no_historic = True
if cls._no_historic:
return
try:
_, iterator = retriever.get_tabular_rows(
secondary_historic_url,
dict_form=True,
filename="historic_rates.csv",
logstr="secondary historic exchange rates",
)
cls._secondary_historic = dict()
for row in iterator:
currency = row["Currency"]
date = cls._get_int_timestamp(parse_date(row["Date"]))
rate = float(row["Rate"])
dict_of_dicts_add(
cls._secondary_historic, currency, date, rate
)
except (DownloadError, OSError):
logger.exception("Error getting secondary historic rates!")
cls._secondary_historic = "FAIL"
cls._fallback_to_current = fallback_historic_to_current
@classmethod
def _get_primary_rates_data(
cls, currency: str, timestamp: int, downloader=None
) -> Optional[Dict]:
"""
Get the primary fx rate data for currency
Args:
currency (str): Currency
timestamp (int): Timestamp to use for fx conversion
Returns:
Optional[float]: fx rate or None
"""
if cls._rates_api is None:
Currency.setup()
url = cls._rates_api.format(currency=currency, date=str(timestamp))
if downloader is None:
downloader = cls._retriever
try:
chart = downloader.download_json(url, log_level=cls._log_level)[
"chart"
]
if chart["error"] is not None:
return None
return chart["result"][0]
except (DownloadError, KeyError):
return None
@classmethod
def _get_primary_rate(
cls, currency: str, timestamp: Optional[int] = None
) -> Optional[float]:
"""
Get the primary current fx rate for currency ofr a given timestamp. If no timestamp is supplied,
datetime.now() will be used unless fixed_now was passed in the constructor.
Args:
currency (str): Currency
timestamp (Optional[int]): Timestamp to use for fx conversion. Defaults to None (datetime.now())
Returns:
Optional[float]: fx rate or None
"""
if timestamp is None:
if cls._fixed_now:
now = cls._fixed_now
get_close = True
else:
now = now_utc()
get_close = False
timestamp = cls._get_int_timestamp(now)
else:
get_close = True
data = cls._get_primary_rates_data(currency, timestamp)
if not data:
return None
if get_close:
adjclose = data["indicators"]["adjclose"][0].get("adjclose")
if adjclose is None:
return None
return adjclose[0]
return data["meta"]["regularMarketPrice"]
@classmethod
def _get_secondary_current_rate(cls, currency: str) -> Optional[float]:
"""
Get the secondary current fx rate for currency
Args:
currency (str): Currency
Returns:
Optional[float]: fx rate or None
"""
if cls._secondary_rates is None:
Currency.setup()
if cls._secondary_rates == "FAIL":
return None
return cls._secondary_rates.get(currency)
@classmethod
def get_current_rate(cls, currency: str) -> float:
"""
Get the current fx rate for currency
Args:
currency (str): Currency
Returns:
float: fx rate
"""
currency = currency.upper()
if cls._cached_current_rates is None:
Currency.setup()
fx_rate = cls._cached_current_rates.get(currency)
if fx_rate is not None:
return fx_rate
fx_rate = cls._get_primary_rate(currency)
if fx_rate is not None:
cls._cached_current_rates[currency] = fx_rate
return fx_rate
fx_rate = cls._get_secondary_current_rate(currency)
if fx_rate is not None:
logger.warning(f"Using secondary current rate for {currency}!")
cls._cached_current_rates[currency] = fx_rate
return fx_rate
raise CurrencyError(f"Failed to get rate for currency {currency}!")
@classmethod
def get_current_value_in_usd(
cls, value: Union[int, float], currency: str
) -> float:
"""
Get the current USD value of the value in local currency
Args:
value (Union[int, float]): Value in local currency
currency (str): Currency
Returns:
float: Value in USD
"""
currency = currency.upper()
if currency == "USD":
return value
fx_rate = cls.get_current_rate(currency)
return value / fx_rate
@classmethod
def get_current_value_in_currency(
cls, usdvalue: Union[int, float], currency: str
) -> float:
"""
Get the current value in local currency of the value in USD
Args:
usdvalue (Union[int, float]): Value in USD
currency (str): Currency
Returns:
float: Value in local currency
"""
currency = currency.upper()
if currency == "USD":
return usdvalue
fx_rate = cls.get_current_rate(currency)
return usdvalue * fx_rate
@classmethod
def _get_interpolated_rate(
cls,
timestamp1: int,
rate1: float,
timestamp2: int,
rate2: float,
desired_timestamp: int,
) -> float:
"""
Return a rate for a desired timestamp based on linearly interpolating between
two timestamp/rate pairs.
Args:
timestamp1 (int): First timestamp to use for fx conversion
rate1 (float): Rate at first timestamp
timestamp2 (int): Second timestamp to use for fx conversion
rate2 (float): Rate at second timestamp
desired_timestamp (int): Timestamp at which rate is desired
Returns:
float: Rate at desired timestamp
"""
return rate1 + (desired_timestamp - timestamp1) * (
(rate2 - rate1) / (timestamp2 - timestamp1)
)
@classmethod
def _get_secondary_historic_rate(
cls, currency: str, timestamp: int
) -> Optional[float]:
"""
Get the secondary fx rate for currency on a particular date
Args:
currency (str): Currency
timestamp (int): Timestamp to use for fx conversion
Returns:
Optional[float]: fx rate or None
"""
if cls._secondary_historic is None:
Currency.setup()
if cls._secondary_historic == "FAIL":
return None
currency_data = cls._secondary_historic.get(currency)
if currency_data is None:
return None
fx_rate = currency_data.get(timestamp)
if fx_rate:
return fx_rate
timestamp1 = None
timestamp2 = None
for ts in currency_data.keys():
if timestamp > ts:
timestamp1 = ts
else:
timestamp2 = ts
break
if timestamp1 is None:
if timestamp2 is None:
return None
return currency_data[timestamp2]
if timestamp2 is None:
return currency_data[timestamp1]
return cls._get_interpolated_rate(
timestamp1,
currency_data[timestamp1],
timestamp2,
currency_data[timestamp2],
timestamp,
)
@classmethod
def get_historic_rate(
cls, currency: str, date: datetime, ignore_timeinfo: bool = True
) -> float:
"""
Get the fx rate for currency on a particular date. Any time and time zone
information will be ignored by default (meaning that the time is set to 00:00:00
and the time zone set to UTC). To have the time and time zone accounted for,
set ignore_timeinfo to False. This may affect which day's closing value is used.
Args:
currency (str): Currency
date (datetime): Date to use for fx conversion
ignore_timeinfo (bool): Ignore time and time zone of date. Defaults to True.
Returns:
float: fx rate
"""
currency = currency.upper()
if currency == "USD":
return 1
if cls._cached_historic_rates is None:
Currency.setup()
currency_data = cls._cached_historic_rates.get(currency)
if ignore_timeinfo:
date = date.replace(
hour=0, minute=0, second=0, microsecond=0, tzinfo=timezone.utc
)
else:
date = date.astimezone(timezone.utc)
timestamp = cls._get_int_timestamp(date)
if currency_data is not None:
fx_rate = currency_data.get(timestamp)
if fx_rate is not None:
return fx_rate
fx_rate = cls._get_primary_rate(currency, timestamp)
if fx_rate is not None:
dict_of_dicts_add(
cls._cached_historic_rates, currency, timestamp, fx_rate
)
return fx_rate
fx_rate = cls._get_secondary_historic_rate(currency, timestamp)
if fx_rate is not None:
dict_of_dicts_add(
cls._cached_historic_rates, currency, timestamp, fx_rate
)
return fx_rate
if cls._fallback_to_current:
fx_rate = cls.get_current_rate(currency)
if fx_rate:
logger.warning(
f"Falling back to current rate for currency {currency} on date {date.isoformat()}!"
)
return fx_rate
raise CurrencyError(
f"Failed to get rate for currency {currency} on date {date.isoformat()}!"
)
@classmethod
def get_historic_value_in_usd(
cls,
value: Union[int, float],
currency: str,
date: datetime,
ignore_timeinfo: bool = True,
) -> float:
"""
Get the USD value of the value in local currency on a particular date. Any time
and time zone information will be ignored by default (meaning that the time is
set to 00:00:00 and the time zone set to UTC). To have the time and time zone
accounted for, set ignore_timeinfo to False. This may affect which day's closing
value is used.
Args:
value (Union[int, float]): Value in local currency
currency (str): Currency
date (datetime): Date to use for fx conversion
ignore_timeinfo (bool): Ignore time and time zone of date. Defaults to True.
Returns:
float: Value in USD
"""
currency = currency.upper()
if currency == "USD":
return value
fx_rate = cls.get_historic_rate(
currency, date, ignore_timeinfo=ignore_timeinfo
)
return value / fx_rate
@classmethod
def get_historic_value_in_currency(
cls,
usdvalue: Union[int, float],
currency: str,
date: datetime,
ignore_timeinfo: bool = True,
) -> float:
"""
Get the current value in local currency of the value in USD on a particular
date. Any time and time zone information will be ignored by default (meaning
that the time is set to 00:00:00 and the time zone set to UTC). To have the time
and time zone accounted for, set ignore_timeinfo to False. This may affect which
day's closing value is used.
Args:
value (Union[int, float]): Value in USD
currency (str): Currency
date (datetime): Date to use for fx conversion
ignore_timeinfo (bool): Ignore time and time zone of date. Defaults to True.
Returns:
float: Value in local currency
"""
currency = currency.upper()
if currency == "USD":
return usdvalue
fx_rate = cls.get_historic_rate(
currency, date, ignore_timeinfo=ignore_timeinfo
)
return usdvalue * fx_rate
| true |
781f394770d1945c00cde95c301ec1b58922b8b8 | Python | K4CZP3R/minecraft-server-status | /common_modules/status_repo.py | UTF-8 | 1,804 | 2.796875 | 3 | [] | no_license | import pymongo
class StatusRepo:
def __init__(self, url):
self.url = url
self.client = None
self.collection = None
self.database = None
def connect(self):
self.client = pymongo.MongoClient(
self.url
)
self.database = self.client["xyz_k4czp3r_oeloeserver"]
self.collection = self.database['PlayerStatus']
def check_connection(self) -> bool:
if self.client is None or self.collection is None:
self.connect()
return False
return True
def get_data(self):
self.check_connection()
ret = {}
for status in self.collection.find({}):
player_id = status['playerId']
if player_id not in ret:
ret[player_id] = {}
if "name" not in ret[player_id]:
ret[player_id]["name"] = status["playerName"]
if "logs" not in ret[player_id]:
ret[player_id]["logs"] = []
ret[player_id]["logs"].append([status['timeOfEvent'], status['type'], status['playerName']])
to_return = []
for player in ret:
_logs = sorted(ret[player]["logs"], key=lambda x: x[0], reverse=True)
player_dict = {
"player_id": player,
"player_name": ret[player]["name"],
"logs": []
}
if len(_logs) < 2:
size_to_get = 1
else:
size_to_get = 2
for i in range(0, size_to_get):
log_dict = {
"action": _logs[i][1],
"time": _logs[i][0]
}
player_dict["logs"].append(log_dict)
to_return.append(player_dict)
return to_return
| true |
505e1444f7bff8eb4f7d4b69319e1b0ffc7edb90 | Python | milim328/python-study | /모두의 파이썬 프로젝트2.py | UTF-8 | 1,471 | 3.765625 | 4 | [] | no_license | #타자게임
#게임이 시작되면 동물 이름으로 된 영어 단어가 화면에 표시됩니다.----리스트사용/랜덤함
#사용자는 그 단어를 최대한 빠르고 정확하게 입력해야 합니다. 바르게 입력했으면 다음 문제로 넘어가고,
#오타가 있으면 같은 단어가 한 번 더 나옵니다.
#틀린 문제를 다시 입력하는 동안에도 시간은 계속 흐르기 때문에 속도뿐만 아니라
#정확도도 중요한 게임입니다.
#사전준비 - 게임에 필요한 모듈을 임포트
#메인프로그램 :타자게임을 처리하는 부분 -- 사용자에게 문제 보여주고 타자 입력을
#받아 반복 -- 오타가 나면 계속해야하므로 while 사용
#결과 계산해서 보여주기 : 타자를 입력한 시간을 소수점 둘째 자리까지 계산해서 출력
import time
import random
#단어 리스트 작성
w = ["cat", "dog", "fox","monkey","mouse","panda","frog","snake","wolf"]
n=1 #문제번호
print("타자게임- 준비되면 엔터")
input()
start = time.time() #시작시간을 기록한다.
q= random.choice(w)
while n <= 5:
print("문제",n)
print(q)
x= input()
if q == x:
print("통과!")
n = n+1
q = random.choice(w)
else:
print("오타 : 재입력")
end = time.time()
et = end - start
et = format(et, ".2f") #보기좋게 소수점 2째자리까지 표기 , format 사용
print("타자시간:", et, "초")
| true |
0f92f873c0c913afb6dccf36fce2be2780a4bfd9 | Python | qoire/INCDS | /dev/GUI/GUI/main.py | UTF-8 | 2,883 | 2.5625 | 3 | [] | no_license | import sys
import mainwindow #our mainwindow containing definitions for GUI
import multiprocessing
import retrieverthread
import numpy as np
from PyQt4 import QtCore, QtGui, uic
form_class = uic.loadUiType("mainwindow.ui")[0]
class MainWindowClass(QtGui.QMainWindow, form_class):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.isWindowFullScreen = False
self.setupUi(self)
# Setup data variables
self.np_phase = []
self.np_amplitude = []
# Setup shared variables
self.x_lower = 0
self.x_upper = 10
self.y_lower = 0
self.y_upper = 10
# Connections for menuActions
self.actionFullscreen.triggered.connect(self.actFullScreen_triggered)
self.actionWindowed.triggered.connect(self.actWindowed_triggered)
# Configure the plotview (graphicsView)
self.setPlotAmplitude()
self.setCurves()
# Start reciever thread
self.startRecieverThread()
# Define custom reciever handlers
QtCore.QObject.connect(self.retthread, QtCore.SIGNAL("newData(PyQt_PyObject)"), self.recieverThreadHandler)
QtCore.QObject.connect(self.retthread, QtCore.SIGNAL("newPhaseData(PyQt_PyObject)"), self.recieverThreadPhaseHandler)
QtCore.QObject.connect(self.retthread, QtCore.SIGNAL("newAmplitudeData(PyQt_PyObject"), self.recieverThreadAmplitudeHandler)
def setPlotAmplitude(self):
plot = self.liveView.getPlotItem()
plot.showGrid(x=True, y=True)
plot.setXRange(0, 0.01)
plot.setYRange(-0.5, 0.5)
plot.setLabel('bottom', 'Time')
plot.setLabel('left', 'Amplitude')
def setCurves(self):
self.live_curve = self.liveView.plot(np.linspace(0, 0, 100))
self.phase_curve = self.phaseView.plot(np.linspace(0, 0, 100))
self.amplitude_curve = self.amplitudeView.plot(np.linspace(0, 0, 100))
def actFullScreen_triggered(self):
self.showFullScreen()
def actWindowed_triggered(self):
self.showNormal()
# Thread handler section
def startRecieverThread(self):
self.retthread = retrieverthread.RetrieverThread(self.liveView)
self.retthread.start()
def recieverThreadHandler(self, npdata):
time_calc = np.linspace(0, 0.01, npdata.size)
self.live_curve.setData(x=time_calc, y=npdata)
def recieverThreadPhaseHandler(self, floatdata):
#self.np_phase.append(floatdata)
self.phase_curve.setData(floatdata)
def recieverThreadAmplitudeHandler(self, floatdata):
#self.np_phase.append(floatdata)
self.amplitude_curve.setData(floatdata)
if __name__ == "__main__":
# Implement thread for recieving information
app = QtGui.QApplication(sys.argv)
mainWindow = MainWindowClass()
mainWindow.show()
app.exec_()
| true |
c6c245a15c54f6bc04060af013c4d77464cda4d8 | Python | suchetsapre/CodeBlocks | /main.py | UTF-8 | 738 | 3.59375 | 4 | [] | no_license | #example of how one conditional structure would work
import conditionstruc as cs
import whilestruc as ws
import conditionblock as cb
arg = 0
action1 = 'print(\'action1\')'
action2 = 'print(\'action2\')'
action3 = 'print(\'action3\')\narg+=1'
block1 = cb.ConditionBlock("IF", action1)
block2 = cb.ConditionBlock("ELSE", action2)
block3 = cb.ConditionBlock("WHILE", action3)
cond1 = '5+5==11'
cond2 = 'arg<10'
structure1 = cs.ConditionStructure(block1, block2, cond1)
structure2 = ws.WhileStructure(block3, cond2, arg)
if eval(structure1.cond) == True:
structure1.if_block.runAction()
elif eval(structure1.cond) == False:
structure1.else_block.runAction()
while eval(structure2.cond) == True:
structure2.while_block.runAction()
| true |
b548a26e6d7d490c146a96f10b9683c189d5d4ff | Python | chzp471025707/001 | /001/03_02.py | UTF-8 | 2,691 | 3.390625 | 3 | [] | no_license | import tensorflow as tf
#通过numpy工具包生成模拟数据集
from numpy.random import RandomState
# 1. 定义神经网络的参数,输入和输出节点
#训练数据batch的大小(一次训练模型,投入的样例数,本该一次性投入所有样例,为了防止内存泄漏设定batch)
batch_size = 16
#产生随机变量,2行3列,方差为1,种子为1
w1= tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2= tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
# 数据是float32型,数据形状,行不定,列为2
#使用none作用是使用不大的batch大小。数据小时方便计算,大的话可能会导致内存溢出
x = tf.placeholder(tf.float32, shape=(None, 2), name="x-input")
# 使用placeholder的作用是把指定类型数据进行存储,在图计算时再把数据加入,
#因为使用常量的话,需要在图里添加节点,迭代百万上亿次任务效率会很低
#使用这种方法时,对输入要进行类型的约束
y_= tf.placeholder(tf.float32, shape=(None, 1), name='y-input')
# 2. 定义前向传播过程,损失函数及反向传播算法
a = tf.matmul(x, w1) #把x与w1进行乘法运算
y = tf.matmul(a, w2)
#定义损失函数(交叉熵)来刻画预测值与真实值的差距
cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
#定义反向传播的优化方法
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
# 3. 通过随机数生成一个模拟数据集
rdm = RandomState(1)
X = rdm.rand(128,2)
Y = [[int(x1+x2 < 1)] for (x1, x2) in X]
# 4. 创建一个会话来运行TensorFlow程序
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# 输出目前(未经训练)的参数取值。
print("w1:", sess.run(w1))
print("w2:", sess.run(w2))
print("\n")
# 设定训练的轮数
STEPS = 5000
for i in range(STEPS):
#每次选择batch_size个样本进行训练
start = (i * batch_size) % 16
end = (i * batch_size) % 16 + batch_size
#使用选择的样本训练神经网络并更新参数
sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
#每隔1000次迭代计算所有数据上的交叉熵并输出
if i % 1000 == 0:
total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y})
print("After %d training step(s), cross entropy on all data is %g" % (i, total_cross_entropy))
# 输出训练后的结果。
#结果越小,说明预测的结果与实际值之间的差距越小
print("\n")
print("w1:", sess.run(w1))
print("w2:", sess.run(w2)) | true |
1ddec391482d0a3f668c90c86b8f9958baa620e0 | Python | frankbryce/First | /Cryptology/Cyphers/Cyphers/Shared/StrUtil.py | UTF-8 | 583 | 2.828125 | 3 | [] | no_license | import re
import LtrUtil as lu
from collections import OrderedDict as od
puncRegEx = re.compile("[,. ]+")
def StripStr(str):
return puncRegEx.sub("",str.upper())
def GenerateKeyedAlphabet(key,alph):
return list(od.fromkeys(key+alph))
def ReformatStr(str,format):
i=0
outstr = '';
for c in format:
if lu.isLtr(c):
if c.isupper():
outstr = outstr + str[i].upper()
else:
outstr = outstr + str[i].lower()
i = i + 1
else:
outstr = outstr + c
return outstr
| true |
4505920b5a2fde5965707523914877c857b8bc52 | Python | kameltigh/deep-cluster-tf | /deep_cluster/clustering/kmeans.py | UTF-8 | 3,024 | 3.046875 | 3 | [
"MIT"
] | permissive | import logging
import tensorflow as tf
class Kmeans:
EPSILON = 1e-07
def __init__(self, k):
self.centroids = None
self.k = k
@staticmethod
def __get_clusters(data, centroids):
clusters = []
for sample in data:
distances = tf.norm(tf.expand_dims(sample, axis=0) - centroids, ord="euclidean", axis=1)
clusters.append(tf.argmin(distances, axis=0))
return tf.stack(clusters)
@staticmethod
def __initialize_centroids(data, k):
"""
returns list of centroid coordinates. They are randomly selected from the data.
:param data: the actual dataset to cluster
:param k: number of clusters
:return: clusters (their coordinates)
"""
centroid_indices = tf.random.uniform([k], 0, data.shape[0], dtype=tf.int32)
return tf.gather(data, centroid_indices)
@staticmethod
def __update_centroids(data, clusters, centroids):
"""
Updates the cluster centroids
:param data:
:param clusters:
:param centroids:
:return:
"""
new_centroids = []
centroid_distance = []
for i in range(centroids.shape[0]):
cluster_data = data[tf.equal(i, clusters)]
cluster_centroid = tf.reduce_mean(cluster_data, axis=0)
new_centroids.append(cluster_centroid)
centroid_dist = tf.norm(cluster_centroid - centroids[i], ord="euclidean")
centroid_distance.append(centroid_dist)
return tf.stack(new_centroids), tf.reduce_sum(tf.squeeze(centroid_distance))
def __reset_empty_centroids(self, non_empty_clusters, data):
empty_clusters = set(range(self.k)) - set(non_empty_clusters.numpy())
new_centroids_indices = tf.random.uniform([len(empty_clusters)], 0, data.shape[0], dtype=tf.int32)
new_centroids = self.centroids.numpy()
idx = 0
for i in empty_clusters:
new_centroids[i] = tf.gather(data, tf.gather(new_centroids_indices, idx))
idx += 1
self.centroids = tf.stack(new_centroids)
def fit_transform(self, data, max_iter=200):
if self.centroids is None:
self.centroids = Kmeans.__initialize_centroids(data, self.k)
clusters = None
for i in range(max_iter):
clusters = Kmeans.__get_clusters(data, self.centroids)
y, _ = tf.unique(clusters)
while y.shape[0] < self.k:
logging.debug("resetting centroids at iter {}".format(i))
self.__reset_empty_centroids(y, data)
clusters = Kmeans.__get_clusters(data, self.centroids)
y, _ = tf.unique(clusters)
self.centroids, centroid_evolution = Kmeans.__update_centroids(data, clusters, self.centroids)
logging.debug("Centroid evo: {}".format(centroid_evolution))
if centroid_evolution <= Kmeans.EPSILON:
break
return clusters
| true |
d437132e1e39cd56f96626996a23b371748efc40 | Python | GaganDureja/Algorithm-practice | /Stuttering Function.py | UTF-8 | 170 | 3.09375 | 3 | [] | no_license | #Link: https://edabit.com/challenge/gt9LLufDCMHKMioh2
def stutter(word):
repeat = word[:2] + '... '
return repeat*2 + word + '?'
print(stutter('incredible')) | true |
7c707be75006dda0ec66b69f00d0ae98c6dcc318 | Python | rizkyyz/pertemuan7 | /labpy03/latihan2.py | UTF-8 | 307 | 3.375 | 3 | [] | no_license | #Muhammd Rizky Abdillah
#no ambil source code
print("---Latihan 2----")
print("menampilkan bilangan berhenti ketika bilangan 0 dan menampilkan bilangan terbesar")
max=0
while True:
a=int(input("masukan bilangan : "))
if max < a :
max = a
if a==0:
break
print("bilangan terbesar adalah = ",max)
| true |
3d13974c91607a3d4e2954a89255919dd96b6760 | Python | yuryanliang/Python-Leetcoode | /2019/0607/169_majority_element.py | UTF-8 | 132 | 2.921875 | 3 | [] | no_license | def majority_element(nums):
nums_set=set(nums)
for i in nums_set:
if nums.count(i)>len(nums)/2:
return i | true |
d35f692449403d2fe2e13b0dfc37be1eb1e5088e | Python | koralmxxx/python | /toplam2.py | UTF-8 | 163 | 3.421875 | 3 | [] | no_license | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
sayi1 = input("Birinci sayiyi girin: ")
sayi2 = input("İkinci sayiyi girin: ")
toplam = sayi1 + sayi2
print toplam
| true |
5021143582d7926e731a2877b16b3f635f4eff15 | Python | AV272/Programming | /Machine learning/Other/rashid_4.py | UTF-8 | 5,309 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 4 12:37:29 2021
@author: lkst
"""
import numpy as np
import scipy.special as sp
import matplotlib.pyplot as plt
import imageio
import glob # helps work with filepath
# definition of neural network class
class neuralNetwork:
# initialization function
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
# put a number of nodes in leyers
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
# learning coeffition
self.lr = learningrate
# matrix of weight coefficients
#self.w_ih = np.random.rand(self.hnodes, self.inodes) - 0.5
#self.w_ho = np.random.rand(self.onodes, self.hnodes) - 0.5
# coefficient from normal distribution
self.w_ih = np.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))
self.w_ho = np.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))
# sigmoid function
self.activation_function = lambda x: sp.expit(x)
pass
# training of neural network
def train(self, inputs_list, targets_list):
# translate data to 2D-array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T # answers
# get signals for hidden layer
hidden_inputs = np.dot(self.w_ih, inputs)
# using sigmoid function on hidden inputs
hidden_outputs = self.activation_function(hidden_inputs)
# get signals for output layer
final_inputs = np.dot(self.w_ho, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
# computation of errors
output_errors = targets - final_outputs
hidden_errors = np.dot(self.w_ho.T, output_errors)
# rewriting of weights coefficients
self.w_ho += self.lr*np.dot((output_errors*final_outputs*(1- final_outputs)),\
np.transpose(hidden_outputs))
self.w_ih += self.lr*np.dot((hidden_errors*hidden_outputs*(1- hidden_outputs)),\
np.transpose(inputs))
pass
# going by network
def query(self, inputs_list):
# translate data to 2D-array
inputs = np.array(inputs_list, ndmin=2).T
# get signals for hidden layer
hidden_inputs = np.dot(self.w_ih, inputs)
# using sigmoid function on hidden inputs
hidden_outputs = self.activation_function(hidden_inputs)
# get signals for output layer
final_inputs = np.dot(self.w_ho, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
return(final_outputs)
# number of input, hidden, output nodes
input_nodes = 784
hidden_nodes = 150
output_nodes = 10
learning_rate = 0.2
lr_arr = []
eff_arr = []
for num in range(1):
#hidden_nodes = num*50 + 50
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
# getting data for training
training_data_file = open('/home/lkst/github/Programming/Machine learning/Other/Data/mnist_train.csv', 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
## TRAINING OF NEURAL NETWORK
# using few epochs
epochs = 6
for e in range(epochs):
for record in training_data_list:
all_values = record.split(',')
inputs = (np.asfarray(all_values[1:])/255.0*0.99) + 0.01 # rewrite values to interval [0.01, 1]
# take the answers
targets = np.zeros(output_nodes) + 0.01
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
pass
pass
## TESTING OF NEURAL NETWORK ON MY OWN NUMBERS
our_own_dataset = []
# ? -- one symbol
for image_file_name in glob.glob('/home/lkst/github/Programming/Machine learning/Other/Data/?.png'):
print('loading ...', image_file_name)
# take true answer from file name
label = int(image_file_name[-5:-4])
# load image from png into array
img_array = imageio.imread(image_file_name, as_gray=True)
# reshape 28x28 into 784 array
img_data = 255.0 - img_array.reshape(784)
img_data = (img_data/255.0*0.99) + 0.01
print(np.min(img_data))
print(np.max(img_data))
# create list from label and image data
record = np.append(label, img_data)
our_own_dataset.append(record)
pass
for item in range(5):
plt.imshow(our_own_dataset[item][1:].reshape(28,28), cmap='Greys',interpolation='None')
correct_label = our_own_dataset[item][0]
inputs = our_own_dataset[item][1:]
outputs = n.query(inputs)
print(outputs)
label = np.argmax(outputs)
print('Network answer ', label)
print('Correct answer ', correct_label)
if (label == correct_label):
print("Match")
else:
print('No match')
pass
pass
pass
| true |
fa2bed788ec74722d634e5130b470b9c707d2093 | Python | jrmsdev/pysadm | /tlib/_sadmtest/mock/utils/path.py | UTF-8 | 3,098 | 2.546875 | 3 | [
"BSD-3-Clause"
] | permissive | # Copyright (c) Jeremías Casteglione <jrmsdev@gmail.com>
# See LICENSE file.
import os.path
from collections import deque
from unittest.mock import Mock
class MockPath(object):
_mock = None
_expect = None
_return = None
_default = None
sep = '/'
def __init__(self, cfg):
self._expect = []
self._return = {}
self._default = {}
self._mock = Mock()
self.isfile = self._mock.isfile
self.isdir = self._mock.isdir
self.abspath = self._mock.abspath
self.unlink = self._mock.unlink
self.basename = self._mock.basename
self.dirname = self._mock.dirname
self.isabs = self._mock.isabs
self._configure(cfg)
def _configure(self, cfg):
self._setDefaults()
self.isfile.side_effect = self._sideEffect('isfile')
self.isdir.side_effect = self._sideEffect('isdir')
self.abspath.side_effect = self._sideEffect('abspath')
self.unlink.side_effect = self._sideEffect('unlink')
self.basename.side_effect = self._sideEffect('basename')
self.dirname.side_effect = self._sideEffect('dirname')
self.isabs.side_effect = self._sideEffect('isabs')
if cfg is not None:
self._parseConfig(cfg)
def _setDefaults(self):
self._default['isfile'] = True
self._default['isdir'] = True
self._default['isabs'] = True
def _parseConfig(self, cfg):
data = cfg.get('utils.path', fallback = None)
if data is None:
data = cfg.get('path', fallback = '')
if data != '':
for l in data.splitlines():
l = l.strip()
if l != '':
x = l.split(';')
rtrn = x[0].strip()
cmdline = ';'.join(x[1:]).strip()
util = cmdline.split(' ')[0].strip()
self._expect.append(cmdline)
self._setReturn(util, rtrn)
def _setReturn(self, name, data):
if name == '':
raise RuntimeError('mock path: util name is empty')
if self._return.get(name, None) is None:
self._return[name] = deque()
self._return[name].appendleft(data)
def _sideEffect(self, util):
def wrapper(*args, **kwargs):
rtrn = self._return.get(util, None)
if rtrn is None:
return self._default.get(util, None)
try:
data = rtrn.pop()
except IndexError:
return self._default.get(util, None)
if data == '':
return self._default.get(util, None)
if data == 'False':
return False
elif data == 'True':
return True
return data
return wrapper
def check(self):
got = []
for x in self._mock.mock_calls:
xname = x[0]
xargs = x[1]
cmdline = xname
if len(xargs) > 0:
cmdline = "%s %s" % (xname, ' '.join([str(i) for i in xargs]))
xkwargs = x[2]
for k, v in xkwargs.items():
v = str(v)
cmdline = "%s, %s=%s" % (cmdline, k, v)
got.append(cmdline)
assert got == self._expect, \
"mock path\n*** GOT:\n%s\n*** EXPECT:\n%s" % ('\n'.join(got), '\n'.join(self._expect))
def join(self, *parts):
r = '/'.join(parts)
if r.startswith('//'):
return r[1:]
return r
def normpath(self, name):
return os.path.normpath(name)
def glob(self, patt):
l = []
patt = patt.replace('*', 'S0')
patt = patt.replace('?', 'Q')
l.append(patt)
l.append(patt.replace('S0', 'S1'))
l.append(patt.replace('S0', 'S2'))
return l
| true |
7be3ca47e7c11df97a406aaaf18ce86f48577647 | Python | astrotutor9/Neopixels-Microbit-and-Python | /chase_functions.py | UTF-8 | 571 | 3.359375 | 3 | [] | no_license | from microbit import *
import neopixel
ring = neopixel.NeoPixel(pin0, 16)
# set some variables for colours
red = (75, 0, 0)
green = (0, 75, 0)
blue = (0, 0, 75)
off = (0, 0, 0)
# define (def) a function and give it a clear, simple name.
# Here the colours are sent from the call at the bottom
# and renamed as use_this_colour.
def chase(use_this_colour):
for led_number in range(0, 16):
ring[led_number] = use_this_colour
ring[led_number - 2] = off
ring.show()
sleep(50)
while True:
chase(red)
chase(green)
chase(blue) | true |
3a7c407594a7576a28330b834881ea2d96885b3f | Python | publiccoding/prog_ln | /my_practice/logical_iq/project/multiprocessingexample.py | UTF-8 | 1,544 | 2.96875 | 3 | [] | no_license | from multiprocessing import Pool, Process,Pipe, queues
from random import random
from math import pi, sqrt
import time
import os
def compute_pi(n):
i, index = 0, 0
while i < n:
#time.sleep(0.001)
x = random()
y = random()
if sqrt(x*x + y*y) <= 1:
index +=1
i +=1
ratio = 4.0 * index / n
print(ratio)
# if __name__ == '__main__' :
# mproc1 = Process(target=compute_pi, args=(100000,))
# mproc1.start()
# mproc1.join()
# print("first process completed ")
# mproc2 = Process(target=compute_pi, args=(200000,))
# mproc2.start()
# mproc2.join()
# print("second process compelted")
# if __name__ == '__main__':
# mypi = compute_pi(100000)
# print("My Pi : {0}, Error: {1} ".format(mypi, mypi - pi))
# def start_function_for_process(n):
# time.sleep(0.2)
# result = n*n
# return result
# if __name__ == '__main__':
# p = Pool(5)
# result = p.map(start_function_for_process,range(200),chunksize=10)
# print(result)
# p.close()
def ponger(p,s):
count = 0
while count < 10:
msg = p.recv()
print("process {0} got message {1}".format(os.getpid(),msg))
time.sleep(1)
p.send(s)
count +=1
if __name__ == '__main__':
parent, child = Pipe()
proc = Process(target=ponger,args=(child,'ping'))
proc.start()
parent.send('wonging')
ponger(parent,'pong')
proc.join()
| true |
06f07e087afd2ca486892e44b392054de657bc14 | Python | PlatformOfTrust/standards | /tools/ontology-validator/validators/file_content/objects_defined.py | UTF-8 | 2,140 | 3.25 | 3 | [] | no_license | """This module has a class that validates that every class and property from
the file is defined in the ontology file.
"""
from utils.constants import _ID
from utils.ontology import Ontology
from utils.validation import is_class, is_property
from validators.file_content.file_content import FileContentValidator
def validate_id(property_id: str) -> bool:
"""This method validates if an id is defined in Ontology file."""
first_part = property_id.split(":")[0]
last_part = property_id.split(":")[-1].split("/")[-1]
simple_id = f"{first_part}:{last_part}"
ontology_id_path = Ontology.get_path_recursively(simple_id)
property_name = property_id.split(":")[-1]
return ontology_id_path == f"{property_name}/"
class ObjectsDefined(FileContentValidator):
"""This class has a method validate that validates that every class and
property from the file is defined in the ontology file.
"""
def __init__(self, file_content: dict, validation_message: str):
super().__init__(file_content, validation_message)
def validate(self) -> bool:
"""This method validates that every class and property from the file is
defined in the ontology file.
"""
json_content = self.file_content
valid_file = True
for item in json_content:
if _ID in json_content[item]:
if is_property(json_content[item]):
if not validate_id(json_content[item][_ID]):
valid_file = False
self.write_debug_message(
f"Property: '{json_content[item][_ID]}' is"
" not defined in ontology")
elif is_class(json_content[item]):
if not Ontology.has_class(json_content[item][_ID]):
valid_file = False
self.write_debug_message(
f"Class: '{json_content[item][_ID]}' is not"
" defined in ontology")
if not valid_file:
self.write_validation_message()
return valid_file
| true |
528eb1a0e011ac79d2625adb782bff6b90189244 | Python | pradhanmanva/PracticalList | /pr12.py | UTF-8 | 233 | 3.796875 | 4 | [] | no_license | # wap to find the largest number of the three numbers
a = 103
b = 121
c = 93
if (a > b and a > c):
print("%s is greatest" % (a))
elif (b > a and b > c):
print("%s is greatest" % (b))
else:
print("%s is greatest" % (c))
| true |
25994ac190ee30ca08ff4444809f983a58d54e90 | Python | chopley/opticalPointing | /extractPositions/starPosition.py | UTF-8 | 6,132 | 2.796875 | 3 | [] | no_license | #Script that will do the following:
#1) Read in positions of stars from images in pixels
#2) Use a catalog to calculate the expected positions of the stars
#3) Calculate the az,el position of the centre of the image
#Written by Charles Copley,AVN Science
#Rev 1.0 06/08/2015
import pandas,numpy,ephem,sys,cv2,datetime
#define the column layout
def extractImage(time):
#function that reads an image at a given time, and outputs the offset of the brightest point in pixels
filename=time+'.png'
img = cv2.imread(filename, 0)
rows,cols = img.shape
#remove some of the white stuff in the image
img[20:rows-1,0:80]=0
img[0:23]=0
centerRow, centerCol = rows/2, cols/2
#Find the brightest pixel position
(sourceRow,sourceCol) = numpy.unravel_index(img.argmax(), img.shape)
dCol = sourceCol-centerCol
dRow = sourceRow-centerRow
snr = numpy.max(img)/numpy.mean(img)
return (dCol,dRow,snr,sourceCol,sourceRow)
def estimateStarPosition(timeObserved,starName,siteDetail,starList):
Kuntunse=ephem.Observer()
Kuntunse.lat=(siteDetail['Lat'])*ephem.degree
Kuntunse.lon=(siteDetail['Lon'])*ephem.degree
elevation=siteDetail['Altitude']
#print Kuntunse.lat,Kuntunse.lon,elevation
indexLoc = starList[starList['Name']==starName].index.tolist()
source = ephem.FixedBody()
source._ra = starList['RA'][indexLoc[0]]
source._dec = starList['DEC'][indexLoc[0]]
source._epoch=ephem.J2000
Kuntunse.date=timeObserved
source.compute(Kuntunse)
# a='%s %s %2.2f %2.2f' %(starName,timeObserved,numpy.rad2deg(source.az),numpy.rad2deg(source.alt))
return (timeObserved,source.az.norm/ephem.degree,source.alt/ephem.degree)
def cartRotate(x,y,angle):
x2 = x*numpy.cos(angle) + y*numpy.sin(angle)
y2 = -x*numpy.sin(angle) + y*numpy.cos(angle)
return (x2,y2)
def analyseImage(image1,image2,rotAngle,interpValues):
az1 = image1[3]
az2 = image2[3]
dAz=az2-az1
if(dAz>=180):
dAz=dAz-360
if(dAz<=-180):
dAz=dAz+360
el1 = image1[4]
el2 = image2[4]
dEl=el2-el1
xpix1=image1[1]
xpix2=image2[1]
ypix1=image1[2]
ypix2=image2[2]
dXpix = float(xpix2-xpix1)
dYpix = float(ypix2-ypix1)
distanceAngle = numpy.sqrt(((dAz)*numpy.cos(numpy.radians(el2)))**2 + (dEl)**2)
distancePixel = numpy.sqrt(((dXpix))**2 + (dYpix)**2)
rotationAngle=numpy.radians(rotAngle)
(xp1,yp1) = cartRotate(xpix1,ypix1,rotationAngle)
(xp2,yp2) = cartRotate(xpix2,ypix2,rotationAngle)
i1=(xp1,yp1)
i2=(xp2,yp2)
distanceScale = distancePixel/distanceAngle
zeroAz = interpValues[0]
zeroEl = interpValues[1]
return (i1,i2,distanceScale)
#values that require setting
#Angle of the azimuth axis to the Image horizontal axis
#rotationAngle=numpy.radians(-14.4)
rotationAngle=numpy.radians(104.4)
#Number of pixels to one degree
Scale=160.
#starName = sys.argv[1]
starName = 'KapCen'
#date = sys.argv[3]
date = '2015/08/02'
#siteName = sys.argv[2]
siteName = 'Klerefontein'
#read in the list of possible observing sites
sites = pandas.read_csv('sites.txt')
siteDetail=sites[sites['Site']==siteName]
print siteDetail
#file that contains the star name, and time of observations
starDetail = pandas.read_csv('starLog.txt')
colspecs=[(0,4),(6,15),(16,26),(28,40),(41,53),(54,65),(66,71)]
#read in the data frame from the starlist catalog
starList=pandas.read_fwf('starList.cat',colspecs=colspecs)
starDetail['Time2']='na'
starDetail['Az']='na'
starDetail['El']='na'
starDetail['dPixCol']='na'
starDetail['dPixRow']='na'
starDetail['SNR']='na'
starDetail['PixCol']='na'
starDetail['PixRow']='na'
starDetail['Image']='na'
starDetail['PixAz']='na'
starDetail['PixEl']='na'
starDetail['offAz']='na'
starDetail['offEl']='na'
starDetail['centreAz']='na'
starDetail['centreEl']='na'
for i in range(0,len(starDetail)):
obsTime1=starDetail['Time'][i]
obsTime2= datetime.datetime.strptime(obsTime1,'%Y-%m-%d-%H%M%S')
starName=starDetail['StarName'][i]
(timeout1,az1,el1)= estimateStarPosition(obsTime2,starName,siteDetail,starList)
(pixcol1,pixrow1,snr1,sCol1,sRow1) = extractImage(obsTime1)
starDetail['Time2'][i]=obsTime2
starDetail['Az'][i]=az1
starDetail['El'][i]=el1
starDetail['dPixCol'][i]=pixcol1
starDetail['dPixRow'][i]=pixrow1
starDetail['SNR'][i]=snr1
starDetail['PixCol'][i]=sCol1
starDetail['PixRow'][i]=sRow1
image1=(starDetail['StarName'][i],starDetail['dPixCol'][i],starDetail['dPixRow'][i],starDetail['Az'][i],starDetail['El'][i],starDetail['PixCol'][i],starDetail['PixCol'][i])
starDetail['Image'][i]=image1
(xp1,yp1) = cartRotate(starDetail['dPixCol'][i],starDetail['dPixRow'][i],rotationAngle)
starDetail['PixAz'][i]=xp1
starDetail['PixEl'][i]=yp1
starDetail['offAz'][i]=xp1/Scale
starDetail['offEl'][i]=yp1/Scale
starDetail['centreEl'][i]=float(starDetail['El'][i])-float(starDetail['offEl'][i])
starDetail['centreAz'][i]=float(starDetail['Az'][i])+float(starDetail['offAz'][i])/numpy.cos(numpy.radians(80))
if(float(starDetail['centreAz'][i])>180.):
starDetail['centreAz'][i]=starDetail['centreAz'][i]-360
rotAngle=14.4
print starDetail['StarName'][5], starDetail['StarName'][4]
#first just check the rotation angle is correct-
# First make sure the two images are just azimuth movement
# we want i1a[0]-i2a[0] to be close to zero. This implies zero elevation motion
(i1a,i2a,distanceScalea)= analyseImage(starDetail['Image'][5],starDetail['Image'][4],rotAngle,(180,80))
print i1a[0]-i2a[0],i1a[0]-i1a[1]
print distanceScalea
(i1,i2,distanceScale)= analyseImage(starDetail['Image'][0],starDetail['Image'][5],rotAngle,(180,80))
print (i1[0]-i2[0])/distanceScale,(i1[0]-i1[1])/distanceScale
print distanceScale
(i1,i2,distanceScale)= analyseImage(starDetail['Image'][8],starDetail['Image'][6],rotAngle,(0,80))
print (i1[0]-i2[0])/distanceScale,(i1[0]-i1[1])/distanceScale
print distanceScale
(i1,i2,distanceScale)= analyseImage(starDetail['Image'][9],starDetail['Image'][6],rotAngle,(0,80))
print (i1[0]-i2[0])/distanceScale,(i1[0]-i1[1])/distanceScale
print distanceScale
(i1,i2,distanceScale)= analyseImage(starDetail['Image'][7],starDetail['Image'][6],rotAngle,(0,80))
print (i1[0]-i2[0])/distanceScale,(i1[0]-i1[1])/distanceScale
print distanceScale
print starDetail
| true |
45d853c2decbc71918d56725260fad871ed4420e | Python | giselemanuel/programming-challenges | /100DaysOfDays/Dia02/ex08.py | UTF-8 | 368 | 4.65625 | 5 | [
"MIT"
] | permissive | """
Exercício Python 8:
Escreva um programa que leia um valor em metros e o exiba convertido em centímetros e milímetros.
"""
print("-" * 40)
print(f'{"Converte metros em centimetros":^40}')
print("-" * 40)
metro = float(input("Digite o valor em metros: "))
centimetros = metro * 100
print(f"{metro:.0f} metr(s) é equivalente a {centimetros:.0f} centimetro(s).") | true |
ea999e54c2fd148db4840e990b1ea723ec25f895 | Python | wangyifeibeijing/newtype_sbm | /data_system/mnist/read_mnist.py | UTF-8 | 1,540 | 2.796875 | 3 | [] | no_license | import os
import struct
import numpy as np
import scipy.io as scio
def load_mnist(path, kind='t10k'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels.idx1-ubyte'
% kind)
images_path = os.path.join(path,
'%s-images.idx3-ubyte'
% kind)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II',
lbpath.read(8))
labels = np.fromfile(lbpath,
dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack('>IIII',
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(len(labels), 784)
return images, labels
def rand_sample(x,y,samnum=1000):
[n,m]=x.shape
aim=np.hstack((x, y))
row_rand_array = np.arange(aim.shape[0])
np.random.shuffle(row_rand_array)
row_rand = aim[row_rand_array[0:samnum]]
x_r=row_rand[:,0:m]
y_r=row_rand[:,m]
return [x_r,y_r]
'''
[d,l]=load_mnist("")
print(d.shape)
print(l.shape)
scio.savemat('mnist_test.mat', {'mnist_test': d,'mnist_label': l})
'''
data=scio.loadmat("mnist.mat")
x=data['mnist']
y=data['mnist_label']
[x1,y1]=rand_sample(x,y.T,samnum=1000)
print(y1)
scio.savemat('mnist_1000.mat', {'mnist_1000': x1,'label':y1})
'''
print(x.shape)
print(y.shape)
''' | true |
9f715a16adc9a47e5361c682b1827f963a8a9e07 | Python | NC-Elbow/PackagesForGeneralConsumption | /blockmatrix.py | UTF-8 | 3,983 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 10:26:50 2020
@author: clark
"""
import numpy as np
import pandas as pd
from math import nan
from numpy import matmul as mm
class blockmm:
def __init__(self, A, B, partition_shape = 10):
# A and B are large matrices to be multiplied whose dimensions line up
# ie A is nxm, B is mxp so C = AB is nxp.
self.A = A
self.B = B
self.partition_shape = partition_shape
def partition(self):
a1,a2 = self.A.shape
b1,b2 = self.B.shape
if (a2 != b1):
print("We cannot multiply these matrices as presented.")
self.C = nan
else:
if a1 > self.partition_shape:
self.rows_a = [k*self.partition_shape for k in range(1 + a1//self.partition_shape)]
if a1%self.partition_shape > 0:
self.rows_a. append(a1)
# We're breaking this into chunks of one thousand rows
else:
self.rows_a = [0]
self.rows_a.append(a1)
if a2 > self.partition_shape:
self.cols_a = [k*self.partition_shape for k in range(1 + a2//self.partition_shape)]
if a2%self.partition_shape > 0:
self.cols_a.append(a2)
self.rows_b = self.cols_a
# We're breaking this into chunks of one thousand rows/columns
else:
self.cols_a = [0]
self.cols_a.append(a2)
self.rows_b = self.cols_a
if b2 > self.partition_shape:
self.cols_b = [k*self.partition_shape for k in range(1 + b2//self.partition_shape)]
if b2%self.partition_shape > 0:
self.cols_b.append(b2)
# We're breaking this into chunks of one thousand columns
else:
self.cols_b = [0]
self.cols_b.append(b2)
def make_A_blocks(self):
A_block = {}
for i in range(len(self.rows_a) - 1):
for j in range(len(self.cols_a) - 1):
name = "{0}{1}".format(i+1,j+1)
val = self.A[self.rows_a[i]:self.rows_a[i+1], self.cols_a[j]:self.cols_a[j+1]]
A_block[name] = val
#print("A " + name)
#print(val.shape)
self.A_block = A_block
def make_B_blocks(self):
B_block = {}
for i in range(len(self.rows_b) - 1):
for j in range(len(self.cols_b) - 1):
name = "{0}{1}".format(i+1,j+1)
val = self.B[self.rows_b[i]:self.rows_b[i+1], self.cols_b[j]:self.cols_b[j+1]]
B_block[name] = val
#print("B " + name)
#print(val.shape)
self.B_block = B_block
def make_C_blocks(self):
C_block = {}
for i in range(len(self.rows_a) - 1):
for j in range(len(self.cols_b) - 1):
name = "{0}{1}".format(i+1,j+1)
val = 0
for k in range(len(self.cols_a) - 1):
temp = "mm(self.A_block['{0}{1}'], self.B_block['{1}{2}'])".format(i+1,k+1,j+1)
val = val + eval(temp)
C_block[name] = val
#print("C " + name)
#print(val.shape)
self.C_block = C_block
def store_to_csv(self, path_to_save):
if path_to_save== '':
path_to_save = '/home/clark/Computing/python_projects/csvs/'
for x,y in self.C_block.item():
y.to_csv(path_to_save +"x.csv")
def main(self):
self.partition()
self.make_A_blocks()
self.make_B_blocks()
self.make_C_blocks()
| true |
3b8ef7220abbf7013590d95123ca541d7330ca81 | Python | AlexandrSech/Z49-TMS | /students/Titov/6/task_6_4.py | UTF-8 | 275 | 3.484375 | 3 | [] | no_license | """Найти сумму всех элементов матрицы."""
import random
sum = 0
matr = []
for i in range(5):
matr.append([])
for j in range(5):
a = random.randint(1, 30)
matr[i].append(a)
sum += a
print(matr[i])
print(sum) | true |
5058b9265da455110f041cc25e2b1a1842fb34d8 | Python | ozgurfiratcelebi/UdacityWeatherTrends | /WeatherTrends.py | UTF-8 | 958 | 3.15625 | 3 | [] | no_license | """
istanbul verilerini al
Dünyanın sıcaklık değerlerini al
csv leri python ile açgrafiği dök
Şehriniz, küresel ortalamaya kıyasla ortalama olarak daha sıcak mı yoksa daha soğuk mu? Fark zaman içinde tutarlı oldu mu?
Şehrinizin sıcaklıklarındaki zaman içindeki değişimler, küresel ortalamadaki değişikliklerle karşılaştırıldığında nasıl?
Genel eğilim neye benziyor? Dünya daha da ısınıyor mu, soğuyor mu? Trend, son birkaç yüz yılda tutarlı oldu mu?
"""
import pandas as pd
import matplotlib.pyplot as plt
dfIstanbul = pd.read_csv("Istanbul.csv")
dfGlobal = pd.read_csv("Global.csv")
plt.plot(dfIstanbul['year'],dfIstanbul['avg_temp'] ,label="Istanbul Data")
plt.plot(dfGlobal['year'],dfGlobal['avg_temp'] ,label="Global Data")
plt.legend()
plt.title('Temperature in Istanbul and Global', fontsize=20)
plt.xlabel('Year', fontsize=16)
plt.ylabel('Temperature [°C]', fontsize=16)
plt.show()
| true |
430ae27d8636101e066d402846bc92c9089a385f | Python | Cythes/My-Firsts | /m8ball.py | UTF-8 | 1,066 | 3.921875 | 4 | [] | no_license | #!/usr/bin/env python3.2
"""
m8ball.py
name:Cythes
Problem:Get the system to print a fortune based on 1-6 number generation
Target Users: Myself and those poor souls who stumble upon
Target System: GNU/LINUX
Functional Requirements:
-User enters text to be decided
-Program uses a dice roll to determine a number between 1-6
-Program then tells a message based on the resulting number
-User can then exit out of the program
Testing: A simple test run expecting an answer to a question.
Maintainer:Cythes
"""
#import random
import random
#User enters a statement.
tell = input("Your question? ")
#Beginning of the dice process
die = random.randint(1, 6)
if die == 1:
print("Answer not presently availible: Ask later")
if die == 2:
print("My reply is no")
if die == 3:
print("Signs point to yes")
if die == 4:
print("Outlook good")
if die == 5:
print("Dont count on it.")
if die == 6:
print("As I see it: yes")
exit = input("Press enter to exit the program. Dont forget your Cthulhu on the way out :)")
#this case is closed!
| true |