blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
00ccabca272902bdb95981e4071b71dd97536bfb
|
Python
|
jmberros/stem-loop
|
/bin/slice-fastas.py
|
UTF-8
| 2,272
| 2.84375
| 3
|
[] |
no_license
|
#!/usr/bin/python
#-*- encoding:utf-8 -*-
import sys
import os
import argparse
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
def slice_fasta(filename, subseq_name, start, end, subdir):
# "Negative" nucleotides won't work for the slicing
if start < 0:
start = 0
print "Warning! {} had a negative start nucleotide. I assigned 0.".format(filename)
if end < 0:
end = 0
print "Warning! {} had a negative end nucleotide. I assigned 0.".format(filename)
is_antisense = end < start
seq_record = SeqIO.read(filename, "fasta")
if not os.path.exists(subdir):
os.makedirs(subdir)
subseq_filename = subdir + "/" + \
filename.replace(".fa", "__sliced_{}.fa".format(subseq_name))
if is_antisense:
#seq = seq_record.reverse_complement().seq
seq = seq_record.seq
subseq_record = SeqRecord(seq[end-1:start]) # Include limits of range
subseq_record.description = "from {} to {}".format(end, start)
else:
seq = seq_record.seq
subseq_record = SeqRecord(seq[start-1:end]) # Include limits of range
subseq_record.description = "{} from {} to {}".format(seq_record.name, start, end)
subseq_record.id = "{}_{}".format(seq_record.name, subseq_name)
with open(subseq_filename, "w") as file:
file.write(subseq_record.format("fasta"))
return subseq_filename if os.path.isfile(subseq_filename) else None
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Slice FASTAs")
parser.add_argument("filename", help="FASTA to slice")
parser.add_argument("--subdir", default="sliced-fastas", help="Subdirectory to move sliced files")
parser.add_argument("--name", help="Subsequence name")
parser.add_argument("-f", "--from", type=int, help="Start nucleotide", required=True)
parser.add_argument("-t", "--to", type=int, help="End nucleotide", required=True)
for line in sys.stdin:
args = line.rstrip("\n").split(" ")
args = vars(parser.parse_args(args))
subseq_filename= slice_fasta(
filename=args['filename'], subseq_name=args['name'],
start=args['from'], end=args['to'], subdir=args['subdir']
)
print subseq_filename
| true
|
ca2345559a8d296488f1031b2382db71af57b8e4
|
Python
|
AkaiTobira/TetrisAgents
|
/Libraries/game_master.py
|
UTF-8
| 2,572
| 2.84375
| 3
|
[] |
no_license
|
import pygame
import time
import enum
from Libraries.Structures.tetrisGame import Tetris
from Libraries.game import Game
from Libraries.presenter import Presenter
from Libraries.tester import Tester
from Libraries.consts import *
class GameState(Enum):
LearningApp = 1,
PresentingApp = 2,
TestingApp = 3,
class StateChanger:
activeApp = None
apps = {}
def get_state(self, state):
if state == GameState.LearningApp:
if not 1 in self.apps.keys():
self.apps[1] = Game((250,860), "Tetris")
self.activeApp = self.apps[1]
return self.activeApp
if state == GameState.TestingApp:
if not 3 in self.apps.keys():
self.apps[3] = Tester((250,860), "Tetris")
self.activeApp = self.apps[3]
return self.activeApp
if state == GameState.PresentingApp:
if not 2 in self.apps.keys():
self.apps[2] = Presenter((int(1300 * NUMBER_OF_SCREENS/5.0) ,860), "Tetris")
self.activeApp = self.apps[2]
return self.activeApp
class GameMaster:
stateSwitcher = None
activeScreen = None
activeState = GameState.LearningApp
running = True
def __init__(self):
pygame.mouse.set_visible(False)
self.stateSwitcher = StateChanger()
self.activeScreen = self.stateSwitcher.get_state(self.activeState)
def process(self):
while True:
event = pygame.event.poll()
if event.type == pygame.NOEVENT: return
if event.type == pygame.QUIT:
self.running = False
return
self.process_change_app(event)
self.activeScreen.process(event)
def process_change_app(self, event):
if event.type == pygame.KEYUP:
last_state = self.activeState
if event.key == AppKeys.ChangeScreen:
self.activeState = GameState.LearningApp
if event.key == AppKeys.ChangeScreen2:
self.activeState = GameState.PresentingApp
if event.key == AppKeys.ChangeScreen3:
self.activeState = GameState.TestingApp
if last_state != self.activeState:
self.activeScreen = self.stateSwitcher.get_state(self.activeState)
self.activeScreen.reset_resolution()
def update(self, delta):
self.activeScreen.update(delta)
def draw(self,):
self.activeScreen.draw()
def is_running(self):
return self.running
| true
|
4b32154f1f0fd3a8598c61f26d510ca1b5575df9
|
Python
|
cryptolovers-tipbots/words2binary_converter
|
/Words2Binary.py
|
UTF-8
| 1,700
| 3.4375
| 3
|
[] |
no_license
|
# Auto converter words to Binary or hex(later)
s = input()
def binary(input):
encrypt = {'A': '01000001', 'B': '01000010', 'C': '01000011', 'D': '01000100', 'E': '01000101', 'F': '01000110',
'G': '01000111', 'H': '01001000', 'I': '01001001', 'J': '01001010', 'K': '01001011', 'L': '01001100',
'M': '01001101', 'N': '01001110', 'O': '01001111', 'P': '01010000', 'Q': '01010001', 'R': '01010010',
'S': '01010011', 'T': '01010100', 'U': '01010101', 'V': '01010110', 'W': '01010111', 'X': '01011000',
'Y': '01011001', 'Z': '01011010', 'a': '01100001', 'b': '01100010', 'c': '01100011', 'd': '01100100',
'e': '01100101', 'f': '01100110', 'g': '01100111', 'h': '01101000', 'i': '01101001', 'j': '01101010',
'k': '01101011', 'l': '01101100', 'm': '01101101', 'n': '01101110', 'o': '01101111', 'p': '01110000',
'q': '01110001', 'r': '01110010', 's': '01110011', 't': '01110100', 'u': '01110101', 'v': '01110110',
'w': '01110111', 'x': '01111000', 'y': '01111001', 'z': '01111010'}
decrypt = {value: key for key, value in encrypt.items()}
if '1' in input:
return ''.join(decrypt[i] for i in input.split())
return ' '.join(encrypt[i] for i in input.__str__())
print(binary(s))
#takes single word input uppercase or lower and returns the letters in Binary and then can be converted back into original word.
# this was a full out learning experience for me never worked with a dic{} before. Leared alot and it was fun, But would like to figure out how to input sentances but the throw an error on the spacing. Cant figure out a way to code around it any ideas?
| true
|
07ead8857ccba543a15f3123d5f6a190201148a7
|
Python
|
maina2998/python_test.py
|
/python_test.py
|
UTF-8
| 1,048
| 4.03125
| 4
|
[] |
no_license
|
y =[]
x =[100,110,120,130,140,150]
for d in x:
if d * 5:
y.append(x)
print(x)
def divisible_by_three(n):
for x in n:
n = 10
if x % 3 == 0:
print("{} is divisible by three".format(x))
else:
print("{} is not divisible by three".format(x))
y =[]
def flatten_the_lists(l):
flist =[]
flist.extend([l])
if(type(l)is not list):
return flist
else:
x =[[1,2],[3,4],[5,6]]
flist=flatten_the_lists(x)
print(flist)
def divisible_by_seven():
x in range(100,200)
for y in x:
if y % 7 != 0:
print("{} is divisible by 7 ".format(y))
else:
print("{} not divisible by 7". format(y))
def greetings(*args):
students =[{"age":19, "name":"Eunice"},
{"age":21, "name":"Agnes"},
{"age":18, "name":"Teresa"},
{"age":22, "name":"Asha"}]
for student in greetings:
print(f"Hello {args.name} , you were born in the year {args.year}")
greetings()
| true
|
00502f082d3df5472daae4246cdbe3902234482f
|
Python
|
KimYeong-su/Baekjoon
|
/python/1074_Z.py
|
UTF-8
| 1,335
| 3.359375
| 3
|
[] |
no_license
|
'''
1074_Z
sol) col๊ณผ row์ ๋๋จธ์ง๋ฅผ ํตํด ์ด๋๋งํผ์ ๊ฐ์ด ๋ํด์ ธ์ผํ๋์ง๋ฅผ ํ๋จ
๋ํ ๊ฐ์ฅ ํฐ ์ฌ๊ฐํ์์ ์ ์ ์์์ง ์๋ก 4์ ๋ฐฐ์๋ก ๋๋ ์ง๋ค๋ ๊ฒ์ ์๊ฐ!!
์ ์ฌ๊ฐํ์ ๋์ด๋ ๊ธธ์ด์ ์ ๊ณฑ์ ๋น๋ก ํ๋ค๋ ๊ฑฐ.. ์ด๊ฑธ ๋์น๋ฉด ํ๋ญ๋๋ค.
์ง์ง ์ฝ๋๋ฅผ ๋๋ฝ๊ฒ ์งฐ๋๋ฐ ๊ฐ๊ฐ์ row๋ 2์ ๋ฐฐ์๋ก ์ปค์ง๊ณ col์ 1์ฉ ์ปค์ง๋ค๋ ์ ์ ์๊ฐํด
๊ทธ๋ฅ 2์ง์๋ก ๋ฐ๊ฟ์ ์๋ฆฌ๋ง๋ค์ ํฌ๊ธฐ๋ฅผ ์ด์ฉํ๋ค๋ฉด ๋จ 2์ค๋ก ํ์ ์๋ค๋ ์ ..
'''
def z_potition(size, tr, tc, result):
global answer
if size==0:
answer = result
return
if tr < 2**(size-1) and tc < 2**(size-1):
z_potition(size-1, tr%2**(size-1), tc%2**(size-1), result+4**(size-1)*0)
elif tr < 2**(size-1) and tc >= 2**(size-1):
z_potition(size-1, tr%2**(size-1), tc%2**(size-1), result+4**(size-1)*1)
elif tr >= 2**(size-1) and tc < 2**(size-1):
z_potition(size-1, tr%2**(size-1), tc%2**(size-1), result+4**(size-1)*2)
elif tr >= 2**(size-1) and tc >= 2**(size-1):
z_potition(size-1, tr%2**(size-1), tc%2**(size-1), result+4**(size-1)*3)
N, r, c = map(int,input().split())
answer = 0
z_potition(N, r, c, 0)
print(answer)
# n,r,c=map(int,input().split())
# print(int(f'{c:b}',4)+2*int(f'{r:b}',4))
| true
|
9791deeec7c9c5bef473bdb48113f4d4d71d9fe4
|
Python
|
donadivarun/MLCMS
|
/MLCMS-master/gui2.py
|
UTF-8
| 3,659
| 2.859375
| 3
|
[] |
no_license
|
import wx
import sys
import system as model
import json
def initialize_system(file_name):
"""
Reads the scenario file and initializes the system
:param file_name:
:return:
"""
with open(file_name) as scenario:
data = json.load(scenario)
rows = data['rows']
cols = data['cols']
system = model.System(cols, rows)
for col, row in data['pedestrians']:
system.add_pedestrian_at((col, row))
for col, row in data['obstacles']:
system.add_obstacle_at((col, row))
col, row = data['target']
target = system.add_target_at((col, row))
#model.evaluate_cell_distance(system, target)
return system
class Frame(wx.Frame):
def __init__(self, parent, system):
wx.Frame.__init__(self, parent)
self.system = system
self.cell_size = 10
self.InitUI()
def InitUI(self):
self.SetTitle("Cellular Automaton")
self.SetSize((self.system.cols + 10) * self.cell_size, (self.system.rows + 10) * self.cell_size)
self.canvas_panel = Canvas(self)
self.button_panel = ButtonPanel(self)
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(self.canvas_panel, 1, wx.EXPAND | wx.ALL, 0)
sizer_1.Add(self.button_panel, 0, wx.EXPAND | wx.ALL, 1)
self.SetSizer(sizer_1)
self.Layout()
# self.Centre()
class Canvas(wx.Panel):
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0, name="Canvas"):
super(Canvas, self).__init__(parent, id, pos, size, style, name)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.parent = parent
def OnSize(self, event):
self.Refresh() # MUST have this, else the rectangle gets rendered corruptly when resizing the window!
#event.Skip() # seems to reduce the ammount of OnSize and OnPaint events generated when resizing the window
def OnPaint(self, event):
dc = wx.PaintDC(self)
dc.Clear()
# print(self.parent.system.__str__())
for row in self.parent.system.grid:
for cell in row:
dc.SetBrush(wx.Brush(cell.state))
dc.DrawRectangle(cell.row * self.parent.cell_size, cell.col * self.parent.cell_size,
self.parent.cell_size, self.parent.cell_size)
def color_gui(self, event):
self.parent.system.update_sys()
self.OnPaint(event)
class ButtonPanel(wx.Panel):
def __init__(self, parent: Frame, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0, name="ButtonPanel"):
super(ButtonPanel, self).__init__(parent, id, pos, size, style, name)
self.SetSize(10*parent.cell_size, parent.system.rows*parent.cell_size)
self.button_start = wx.Button(self, -1, "Start")
self.button_start.Bind(wx.EVT_BUTTON, parent.canvas_panel.color_gui)
self.button_stop = wx.Button(self, -1, "Stop")
self.button_step = wx.Button(self, -1, "Step")
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add(self.button_start, 1, 0)
sizer_1.Add(self.button_stop, 1, wx.EXPAND | wx.ALL, 0)
sizer_1.Add(self.button_step, 1, wx.EXPAND | wx.ALL, 0)
self.SetSizer(sizer_1)
self.Layout()
def main():
# file_name = input("Please enter a scenario file name: ")
app = wx.App()
# gui = Frame(parent= None, system=initialize_system('Scenarios/' + file_name))
gui = Frame(parent=None, system=initialize_system('Scenarios/scenario_task2.json'))
gui.Show()
app.MainLoop()
if __name__ == '__main__':
main()
| true
|
52b3000009ce20e2c3a9c0f234055086a168376e
|
Python
|
ek-ok/deep-cupid
|
/char_rnn.py
|
UTF-8
| 5,689
| 2.71875
| 3
|
[] |
no_license
|
import tensorflow as tf
import numpy as np
class CharRNN():
def __init__(self, char_to_ind, batch_shape, rnn_size, num_layers,
learning_rate, grad_clip, predict=False):
self.char_to_ind = char_to_ind
self.num_classes = len(char_to_ind)
self.num_samples, self.num_chars = batch_shape
self.rnn_size = rnn_size
self.num_layers = num_layers
self.learning_rate = learning_rate
self.grad_clip = grad_clip
if predict:
self.num_samples, self.num_chars = (1, 1)
self.checkpoint = tf.train.latest_checkpoint('checkpoints')
self.build_network()
def build_network(self):
tf.reset_default_graph()
self.build_inputs_layer()
self.build_rnn_layer()
self.build_outputs_layer()
self.build_loss()
self.build_optimizer()
self.saver = tf.train.Saver()
def build_inputs_layer(self):
"""build the input layer"""
shape = (self.num_samples, self.num_chars)
self.inputs = tf.placeholder(tf.int32, shape=shape, name='inputs')
self.targets = tf.placeholder(tf.int32, shape=shape, name='targets')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.rnn_inputs = tf.one_hot(self.inputs, self.num_classes)
def build_rnn_layer(self):
cell = tf.contrib.rnn.BasicLSTMCell
cells = [cell(self.rnn_size) for _ in range(self.num_layers)]
rnn = tf.contrib.rnn.MultiRNNCell(cells)
rnn = tf.contrib.rnn.DropoutWrapper(rnn, output_keep_prob=self.keep_prob) # noqa E501
self.initial_state = rnn.zero_state(self.num_samples, dtype=tf.float32)
self.rnn_outputs, self.final_state = tf.nn.dynamic_rnn(
rnn, self.rnn_inputs, initial_state=self.initial_state)
def build_outputs_layer(self):
"""build the output layer"""
# Concatenate the output of rnn_cell๏ผ
# Example: [[1,2,3],[4,5,6]] -> [1,2,3,4,5,6]
output = tf.concat(self.rnn_outputs, axis=1)
x = tf.reshape(output, [-1, self.rnn_size])
with tf.variable_scope('softmax'):
shape = [self.rnn_size, self.num_classes]
w = tf.Variable(tf.truncated_normal(shape, stddev=0.1))
b = tf.Variable(tf.zeros(self.num_classes))
self.logits = tf.matmul(x, w) + b
self.prob_pred = tf.nn.softmax(self.logits, name='predictions')
def build_loss(self):
"""calculate loss according to logits and targets"""
# One-hot coding
y_one_hot = tf.one_hot(self.targets, self.num_classes)
y_reshaped = tf.reshape(y_one_hot, self.logits.get_shape())
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits,
labels=y_reshaped)
self.loss = tf.reduce_mean(loss)
def build_optimizer(self):
adam = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
gradients, variables = zip(*adam.compute_gradients(self.loss))
gradients, _ = tf.clip_by_global_norm(gradients, self.grad_clip)
self.optimizer = adam.apply_gradients(zip(gradients, variables))
def train(self, batches, iters, keep_prob=0.5):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
new_state = sess.run(self.initial_state)
for i in range(iters):
x, y = next(batches)
feed = {self.inputs: x,
self.targets: y,
self.keep_prob: keep_prob,
self.initial_state: new_state}
loss, new_state, _ = sess.run([self.loss,
self.final_state,
self.optimizer], feed_dict=feed)
if i % 200 == 0:
print(f'step: {i} loss: {loss:.4f}')
self.saver.save(sess, f'checkpoints/i{i}_l{self.rnn_size}_ckpt') # noqa E501
def sample_top_n(self, preds, top_n=5):
"""Choose top_n most possible characters in predictions"""
# Set all values other that top_n choices to 0
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
# Normalization
p = p / np.sum(p)
# Randomly choose one character
c = np.random.choice(self.num_classes, 1, p=p)[0]
return c
def predict(self, prime, num_char):
ind_to_char = {v: k for k, v in self.char_to_ind.items()}
input_chars = [self.char_to_ind[s] for s in list(prime)]
output_chars = []
output_char = input_chars[-1]
with tf.Session() as sess:
self.saver.restore(sess, self.checkpoint)
state = sess.run(self.initial_state)
# Loop for inputs
for input_char in input_chars:
feed = {self.inputs: [[input_char]],
self.initial_state: state,
self.keep_prob: 1.}
state = sess.run(self.final_state, feed_dict=feed)
# Loop for prediction
for _ in range(num_char):
feed = {self.inputs: [[output_char]],
self.initial_state: state,
self.keep_prob: 1.}
preds, state = sess.run([self.prob_pred, self.final_state],
feed_dict=feed)
output_char = self.sample_top_n(preds, 5)
output_chars.append(ind_to_char[output_char])
return prime + ''.join(output_chars)
| true
|
ddcef4292b9c34a778cfcd88fd208dee31fd7b5f
|
Python
|
PaulHancock/Aegean
|
/tests/unit/test_cluster.py
|
UTF-8
| 5,404
| 2.578125
| 3
|
[
"AFL-3.0"
] |
permissive
|
#! /usr/bin/env python
"""
Test cluster.py
"""
import logging
import math
from copy import deepcopy
import numpy as np
from AegeanTools import catalogs, cluster, wcs_helpers
from AegeanTools.models import SimpleSource
from astropy.io import fits
__author__ = 'Paul Hancock'
logging.basicConfig(format="%(module)s:%(levelname)s %(message)s")
log = logging.getLogger("Aegean")
log.setLevel(logging.INFO)
def test_norm_dist():
"""Test norm_dist"""
src1 = SimpleSource()
src1.ra = 0
src1.dec = 0
src1.a = 1.
src1.b = 1.
src1.pa = 0.
src2 = SimpleSource()
src2.ra = 0
src2.dec = 1/3600.
src2.a = 1
src2.b = 1
src2.pa = 0.
if not cluster.norm_dist(src1, src1) == 0:
raise AssertionError()
if not cluster.norm_dist(src1, src2) == 1/math.sqrt(2):
raise AssertionError()
def test_sky_dist():
"""Test sky_dist"""
src1 = SimpleSource()
src1.ra = 0
src1.dec = 0
src2 = SimpleSource()
src2.ra = 0
src2.dec = 1/3600.
if not cluster.sky_dist(src1, src1) == 0.:
raise AssertionError()
if not cluster.sky_dist(src1, src2) == 1/3600.:
raise AssertionError()
def test_vectorized():
"""Test that norm_dist and sky_dist can be vectorized"""
# random data as struct array with interface like SimpleSource
X = np.random.RandomState(0).rand(20, 6)
Xr = np.rec.array(X.view([('ra', 'f8'), ('dec', 'f8'),
('a', 'f8'), ('b', 'f8'),
('pa', 'f8'),
('peak_flux', 'f8')]).ravel())
def to_ss(x):
"Convert numpy.rec to SimpleSource"
out = SimpleSource()
for f in Xr.dtype.names:
setattr(out, f, getattr(x, f))
return out
for dist in [cluster.norm_dist, cluster.sky_dist]:
x0 = Xr[0]
# calculate distance of x0 to all of Xr with vectorized operations:
dx0all = dist(x0, Xr)
for i, xi in enumerate(Xr):
dx0xi = dist(x0, xi)
# check equivalence between pairs of sources and vectorized
if not np.isclose(dx0xi, dx0all[i], atol=0):
raise AssertionError()
# check equivalence between SimpleSource and numpy.record
if not np.isclose(dx0xi, dist(to_ss(x0), to_ss(xi)), atol=0):
raise AssertionError()
def test_pairwise_elliptical_binary():
"""Test pairwise_elliptical_binary distance"""
src1 = SimpleSource()
src1.ra = 0
src1.dec = 0
src1.a = 1.
src1.b = 1.
src1.pa = 0.
src2 = deepcopy(src1)
src2.dec = 1/3600.
src3 = deepcopy(src1)
src3.dec = 50
mat = cluster.pairwise_ellpitical_binary([src1, src2, src3], eps=0.5)
if not np.all(mat == [[False, True, False],
[True, False, False],
[False, False, False]]):
raise AssertionError()
def test_regroup():
"""Test that regroup does things"""
# this should throw an attribute error
try:
cluster.regroup([1], eps=1)
except AttributeError as e:
print(f"Correctly raised error {type(e)}")
# this should result in 51 groups
a = cluster.regroup('tests/test_files/1904_comp.fits',
eps=1/3600.)
if not len(a) == 51:
raise AssertionError(
"Regroup with eps=1/3600. gave {0} groups instead of 51"
.format(len(a)))
# this should give 1 group
a = cluster.regroup('tests/test_files/1904_comp.fits', eps=10, far=1000)
if not len(a) == 1:
raise AssertionError(
"Regroup with eps=10, far=1000. gave {0} groups instead of 51"
.format(len(a)))
def test_regroup_dbscan():
table = catalogs.load_table('tests/test_files/1904_comp.fits')
srccat = catalogs.table_to_source_list(table)
a = cluster.regroup_dbscan(srccat,
eps=1/3600.)
if not len(a) == 51:
raise AssertionError(
"Regroup_dbscan with eps=1/3600. gave {0} groups instead of 51"
.format(len(a)))
return
def test_resize_ratio():
"""Test that resize works with ratio"""
# Load a table
table = catalogs.load_table('tests/test_files/1904_comp.fits')
srccat = catalogs.table_to_source_list(table)
first = deepcopy(srccat[0])
out = cluster.resize(deepcopy(srccat), ratio=1)
if not ((first.a - out[0].a < 1e-9) and
(first.b - out[0].b < 1e-9)):
raise AssertionError("resize of 1 is not identity")
return
def test_resize_psfhelper():
"""Test that resize works with psfhelpers"""
# Load a table
table = catalogs.load_table('tests/test_files/1904_comp.fits')
srccat = catalogs.table_to_source_list(table)
# make psfhelper
head = fits.getheader('tests/test_files/1904-66_SIN.fits')
psfhelper = wcs_helpers.WCSHelper.from_header(head)
first = deepcopy(srccat[0])
out = cluster.resize(deepcopy(srccat), psfhelper=psfhelper)
print(first.a, out[0].a)
if not ((first.a - out[0].a < 1e-9) and
(first.b - out[0].b < 1e-9)):
raise AssertionError("resize with psfhelper is not identity")
return
if __name__ == "__main__":
# introspect and run all the functions starting with 'test'
for f in dir():
if f.startswith('test'):
print(f)
globals()[f]()
| true
|
eea41ede11f47409661461ed44a93ce333cfc195
|
Python
|
dillonhicks/ipymake
|
/branches/devel-0.2-alpha/examples/kusp/subsystems/datastreams/postprocess/headfilter.py
|
UTF-8
| 6,651
| 2.625
| 3
|
[] |
no_license
|
import filtering
import entities
import thread
import time
import inputs
import copy
import sys
class HeadFilter(filtering.Filter):
"""this is the first filter in a pipeline, and has an execution loop
which drives the rest of the pipeline"""
#debug_flag = True
def choose_next(self, entitydict):
"""entitydict is a mapping from source ids (which can be used to
reference source objects in self.sources) to the next available entity
for that source.
this function examines these entities and returns the source id of the
source to obtain the next event."""
raise Exception("Abstract Method")
def preprocess(self, entity):
"""any processing that needs to be done to an entity BEFORE a decision
is made to choose the next entity. you may return None if the
entity is to be rejected."""
return entity
def postprocess(self, entity):
"""any processing that needs to be done to an entity AFTER a decision
is made to choose the next entity"""
return entity
def open_sources(self):
"""initialization step. create any sources here and install them
with add_input_source. this function is called from establish_connections()
and after it completes, each source will be individually opened"""
pass
def __init__(self, params):
filtering.Filter.__init__(self, params)
# a dictionary of named input sources. each one maps to an InputSource
# object. the run() method iterates through all the sources and
# sends them in chronological order to the filters
self.sources = {}
# a dictionary, with each key being an input source. the value
# is another dictionary, which maps entity composite IDs from that
# source to composite IDs in the merged namespace. if a composite id
# is not present in this dictionary, it will be unmodified.
self.remapping = {}
# This dictionary maps input source names to the next available
# entity for that input source
self.next_entity = {}
self.lock = thread.allocate_lock()
self.terminate_flag = False
def stop(self):
self.debug("stop called")
self.lock.acquire()
self.terminate_flag = True
self.lock.release()
def run(self):
"""main execution function which drives entire pipeline.
entities are demultiplexed from the input sources and pushed
chronologically through the pipeline until there are no more
entities.
you must have first called establish_connections() before
you can call run()"""
self.nsevent = self.namespace["DSTREAM_ADMIN_FAM/NAMESPACE"].get_id()
# build the next_entity dictionary
self.lock.acquire()
for sourceid, source in self.sources.iteritems():
entity = self.fetch(sourceid)
if entity.message == entities.PIPELINE_EOF:
self.sources[sourceid].close()
else:
self.next_entity[sourceid] = entity
self.lock.release()
# iterate, sending the earliest event down the pipeline,
# until there are no more events
while(True):
self.lock.acquire()
if self.terminate_flag or not self.next_entity:
self.lock.release()
break
min_sourceid = self.choose_next(self.next_entity)
try:
ne = self.next_entity[min_sourceid]
ne = self.postprocess(ne)
if ne:
self.send(ne)
except Exception, e:
self.info("caught exception "+`e`)
self.send(entities.PipelineError())
raise
nextent = self.fetch(min_sourceid)
if nextent.message == entities.PIPELINE_EOF:
self.info("Finished reading data from source "+`min_sourceid`)
self.sources[min_sourceid].close()
del self.sources[min_sourceid]
del self.next_entity[min_sourceid]
else:
self.next_entity[min_sourceid] = nextent
self.lock.release()
# all done. send a pipelineend message
self.send(entities.PipelineEnd())
def add_input_source(self, source):
"""add an input source object for this pipeline to read entities from"""
self.lock.acquire()
sourceid = source.get_name()
self.sources[sourceid] = source
self.remapping[sourceid] = {}
source.open()
self.lock.release()
def establish_connections(self):
"""create and open all input sources.
this has to be a separate step from run(), because
otherwise there will be no way to construct the dependencies"""
pass
def get_dependencies(self):
"""return names of pipelines that this depends on for data"""
deps = []
self.lock.acquire()
for source in self.sources.values():
d = source.get_dependency()
if d:
deps.append(d)
self.lock.release()
return deps
def process(self, entity):
raise Exception("process should never be called on a head filter")
def fetch(self, sourceid):
"""fetch an entity from a named input source, and do some preprocessing
before being merged."""
#self.namespace.check_ids()
while True:
try:
entity = self.sources[sourceid].read()
except Exception:
self.info("ERROR: Failed to read entity from input source "+`sourceid`)
self.send(entities.PipelineError())
raise
if (entity.message == entities.PIPELINE_EOF or
entity.message == entities.PIPELINE_ERROR):
# this is a special entity that specifies end-of-file. it gets
# passed along so all filters and outputs can close themseleves
return entity
entity = self.preprocess(entity)
if not entity:
continue
cid = entity.get_cid()
if cid == self.nsevent:
# this is a namespace event. merge the namespace into our own,
# renumbering new families/entities as necessary. any renumbering
# done will be noted in the source-specific remapping dictionary
# so entities can automatically be renumbered as they come in
conflicts, new_ns = self.namespace.merge(entity.get_extra_data())
if conflicts:
pass
#print "CONFLICTS",conflicts
for old_cid, new_cid in conflicts.iteritems():
#print sourceid, old_cid, new_cid
self.remapping[sourceid][old_cid] = new_cid
# if our namespace wasn't changed, no point in passing along
# data. return None, which means 'try again'
if not new_ns.values():
continue
entity = entity.change_extra_data(new_ns)
# the cid is still the value read in from the input source
# if during merging this needs to be changed, the cid will be
# in the remapping dictionary
if cid in self.remapping[sourceid]:
#print self.remapping[sourceid]
entity = entity.change_cid(self.remapping[sourceid][cid])
# entities can look up their own information in the namespace.
# this value is cleared before it is serialized to not waste space
try:
entity.set_namespace(self.namespace)
except Exception, e:
self.info("FAIL")
raise
#print entity
return entity
| true
|
6ab8b5bd3125eaf1e9ee191281a0f58a84de60d4
|
Python
|
SvenGronauer/successful-ingredients-paper
|
/sipga/common/online_mean_std.py
|
UTF-8
| 2,115
| 3.21875
| 3
|
[] |
no_license
|
import numpy as np
import torch
class OnlineMeanStd(torch.nn.Module):
"""Track mean and standard deviation of inputs with incremental formula."""
def __init__(self, epsilon=1e-5, shape=()):
super().__init__()
self.mean = torch.nn.Parameter(torch.zeros(*shape), requires_grad=False)
self.std = torch.nn.Parameter(torch.ones(*shape), requires_grad=False)
self.count = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.eps = epsilon
self.bound = 10
self.S = torch.nn.Parameter(torch.zeros(*shape), requires_grad=False)
@staticmethod
def _convert_to_torch(x):
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
if isinstance(x, float):
x = torch.tensor([x]) # use [] to make tensor torch.Size([1])
if isinstance(x, np.floating):
x = torch.tensor([x]) # use [] to make tensor torch.Size([1])
return x
def forward(self, x, subtract_mean=True, clip=False):
"""Make input average free and scale to standard deviation."""
is_numpy = isinstance(x, np.ndarray)
x = self._convert_to_torch(x)
assert x.shape[-1] == self.mean.shape[-1], \
f'got shape={x.shape} but expected: {self.mean.shape}'
if subtract_mean:
x_new = (x - self.mean) / (self.std + self.eps)
else:
x_new = x / (self.std + self.eps)
if clip:
x_new = torch.clamp(x_new, -self.bound, self.bound)
x_new = x_new.numpy() if is_numpy else x_new
return x_new
def update(self, x) -> None:
"""Update internals incrementally."""
x = self._convert_to_torch(x)
assert len(x.shape) == 1, 'Not implemented for dim > 1.'
self.count.data += 1
new_mean = self.mean + (x - self.mean) / self.count
new_S = self.S + (x - self.mean) * (x - new_mean)
# nn.Parameters cannot be updated directly, must use .data instead
self.mean.data = new_mean
self.std.data = torch.sqrt(new_S / self.count)
self.S.data = new_S
| true
|
a3d0d6d3c2a5dbcea1ea61a641309af3c2db2502
|
Python
|
clpetrie/nuclear
|
/notes/invertP/invertexpm2r.py
|
UTF-8
| 815
| 2.734375
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
import random as ran
from math import pi
num=100000
rmin=0.0
rmax=10.0
a=0.0
b=0.5 #max value of P
nbin=50
ranr=[]
r=[]
mybin=[]
for i in range(0,num):
temp=0.5*np.log(1/(2*ran.uniform(a,b))) #P=2*exp(-2r)
ranr.append(temp)
step=(rmax-rmin)/nbin
for i in range(0,nbin+1):
mybin.append(i*step)
P,edges=np.histogram(ranr,bins=mybin)
#P=P/(float(np.sum(P))*(edges[1]-edges[0])) #normalize P to add to 1
#for i in range(0,len(edges)-1):
# r.append(0.5*(edges[i+1]+edges[i]))
P=P/(float(np.sum(P))*(mybin[1]-mybin[0])) #normalize P to add to 1
for i in range(0,len(mybin)-1):
r.append(0.5*(mybin[i+1]+mybin[i]))
plt.plot(r,P,linewidth=3)
print r
r=np.arange(rmin,rmax,0.05)
plt.plot(r,2.0*np.exp(-2*r),'r',linewidth=2)
#plt.xlim(rmin,rmax)
plt.show()
| true
|
d7b53b50a7fff94d3e480da4144cc43eb6b6e777
|
Python
|
ibrahimsha23/prak_algo
|
/binary_search_algo.py
|
UTF-8
| 1,428
| 4.21875
| 4
|
[] |
no_license
|
import random
def generate_random_list():
# Generate Random List
random_list = random.sample(range(45), 34)
print("Random List is - {0}".format(random_list))
return random_list
def sort_list(rlist):
# Sorting - Insert Sort
for i in range(1, len(rlist)):
key = rlist[i]
position = i
while position > 0 and rlist[position -1] > key:
rlist[position] = rlist[position-1]
rlist[position - 1] = key
position = position - 1
rlist[position] = key
print("Sorted List is - {0}".format(rlist))
return rlist
def binary_search(rlist, fval):
# Binary Search
while len(rlist) is not 1:
rlist_len = len(rlist)
f_ele = rlist[:round(len(rlist)/2)]
s_ele = rlist[round(len(rlist)/2):]
if f_ele[-1] >= fval:
rlist = f_ele
continue
elif s_ele[-1] >= fval:
rlist = s_ele
continue
else:
print("{0} value does not exists".format(fval))
return False
print(rlist[0])
if rlist[0] is fval:
print("{0} value exists".format(fval))
return True
else:
print("{0} value does not exists".format(fval))
return False
rlist = generate_random_list()
sorted_rlist = sort_list(rlist)
search_val = int(input("Enter the search value: "))
binary_search(sorted_rlist, search_val)
| true
|
0d467a899abc4604bb898045005c4f6a2c953fb0
|
Python
|
BohdanKryven/Python-Orion-basic-
|
/homework_14_decorators_practice/2_task.py
|
UTF-8
| 787
| 3.359375
| 3
|
[] |
no_license
|
class WrongType(Exception):
pass
class DecoratorTypeR:
def __init__(self, arg_1, arg_2, arg_3, arg_4):
self.arg_1 = arg_1
self.arg_2 = arg_2
self.arg_3 = arg_3
self.arg_4 = arg_4
def __call__(self, func):
def wrap(a, b, c):
try:
if isinstance(a, self.arg_1) and isinstance(b, self.arg_2) and isinstance(c, self.arg_3) \
and isinstance(func(a, b, c), self.arg_4):
return func(a, b, c)
else:
raise WrongType
except WrongType:
return "Wrong type. Rewrite please"
return wrap
@DecoratorTypeR(int, float, int, float)
def func_(a, b, c):
return sum([a, b, c])
print(func_(7, 1.2, 4))
| true
|
ab2bbd2026d9e3fe323a2dc3b818c4e26eb0dcef
|
Python
|
wattaihei/ProgrammingContest
|
/AtCoder/ABC-B/086probB.py
|
UTF-8
| 128
| 3.0625
| 3
|
[] |
no_license
|
a, b = map(str, input().split())
X = int(a+b)
ans = 'No'
for x in range(1001):
if x**2 == X:
ans = 'Yes'
print(ans)
| true
|
3685311cffcce1a0c2491daf109d624eeb457c58
|
Python
|
Kingdon065/Replace
|
/Color/color.py
|
UTF-8
| 931
| 2.859375
| 3
|
[] |
no_license
|
#! python3
# _*_ coding: utf-8 _*_
from colorama import init, Fore
init(autoreset=False)
class Colored:
# ๅๆฏ่ฒ:็บข่ฒ ่ๆฏ่ฒ:้ป่ฎค
def red(self, s):
return Fore.LIGHTRED_EX + s + Fore.RESET
# ๅๆฏ่ฒ:็ปฟ่ฒ ่ๆฏ่ฒ:้ป่ฎค
def green(self, s):
return Fore.LIGHTGREEN_EX + s + Fore.RESET
# ๅๆฏ่ฒ:้ป่ฒ ่ๆฏ่ฒ:้ป่ฎค
def yellow(self, s):
return Fore.LIGHTYELLOW_EX + s + Fore.RESET
# ๅๆฏ่ฒ:็ฝ่ฒ ่ๆฏ่ฒ:้ป่ฎค
def white(self,s):
return Fore.LIGHTWHITE_EX + s + Fore.RESET
# ๅๆฏ่ฒ:่่ฒ ่ๆฏ่ฒ:้ป่ฎค
def blue(self,s):
return Fore.LIGHTBLUE_EX + s + Fore.RESET
# ๅๆฏ่ฒ:้่ฒ ่ๆฏ่ฒ:้ป่ฎค
def cyan(self, s):
return Fore.LIGHTCYAN_EX + s + Fore.RESET
# ๅๆฏ่ฒ:ๆด็บข่ฒ ่ๆฏ่ฒ:้ป่ฎค
def magenta(self, s):
return Fore.LIGHTMAGENTA_EX + s + Fore.RESET
| true
|
0c4198d0c08425db431d48b058d8caf814226ef1
|
Python
|
gannaramu/LeetCode-1
|
/python/140_Word_Break_II.py
|
UTF-8
| 1,415
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
"""
Given a non-empty string s and a dictionary wordDict
containing a list of non-empty words, add spaces in s to
construct a sentence where each word is a valid dictionary word.
Return all such possible sentences.
Note:
The same word in the dictionary may be reused multiple times in the segmentation.
You may assume the dictionary does not contain duplicate words.
Example 1:
Input:
s = "catsanddog"
wordDict = ["cat", "cats", "and", "sand", "dog"]
Output:
[
"cats and dog",
"cat sand dog"
]
"""
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
wordSet = set(wordDict)
from collections import defaultdict
memo = defaultdict(list)
def DFS(s):
""" top-down DFS with memoization """"
#nonlocal memo
if not s:
return [None]
if s in memo:
return memo[s]
for endIndex in range(1, len(s)+1):
word = s[:endIndex]
if word in wordSet:
for sentence in DFS(s[endIndex:]):
memo[s].append(word + (' ' + sentence if sentence else ''))
return memo[s]
DFS(s)
return memo[s]
| true
|
10260e79086a7c836e91b11e541b21d38a755d98
|
Python
|
avi9839/document-converter
|
/txt2doc.py
|
UTF-8
| 458
| 2.59375
| 3
|
[] |
no_license
|
from docx import Document
import re
import os
path = '/home/tusharsk/Desktop/game_is_on'
direct = os.listdir(path)
for i in direct:
document = Document()
document.add_heading(i, 0)
myfile = open('/home/tusharsk/Desktop/game_is_on/'+i).read()
myfile = re.sub(r'[^\x00-\x7F]+|\x0c',' ', myfile) # remove all non-XML-compatible characters
p = document.add_paragraph(myfile)
document.save('/home/tusharsk/Desktop/game_is_on'+i+'.docx')
| true
|
de0b738356aa1a45510eedba72ab78386f8952ed
|
Python
|
allenmattp/automate
|
/ch12/tickerScraper.py
|
UTF-8
| 2,249
| 2.84375
| 3
|
[] |
no_license
|
import bs4, requests
def getQuote(ticker):
site = "https://finance.yahoo.com/quote/" + ticker
res = requests.get(site)
res.raise_for_status() # make sure yahoo is talking to us
# collect selections from site
soup = bs4.BeautifulSoup(res.text, "html.parser")
last_price = soup.select("#quote-header-info > div.My\(6px\).Pos\(r\).smartphone_Mt\(6px\) > div.D\(ib\).Va\(m\).Maw\(65\%\).Ov\(h\) > div > span.Trsdu\(0\.3s\).Fw\(b\).Fz\(36px\).Mb\(-4px\).D\(ib\)")
bid = soup.select("#quote-summary > div.D\(ib\).W\(1\/2\).Bxz\(bb\).Pend\(12px\).Va\(t\).ie-7_D\(i\).smartphone_D\(b\).smartphone_W\(100\%\).smartphone_Pend\(0px\).smartphone_BdY.smartphone_Bdc\(\$seperatorColor\) > table > tbody > tr:nth-child(3) > td.Ta\(end\).Fw\(600\).Lh\(14px\) > span")
ask = soup.select("#quote-summary > div.D\(ib\).W\(1\/2\).Bxz\(bb\).Pend\(12px\).Va\(t\).ie-7_D\(i\).smartphone_D\(b\).smartphone_W\(100\%\).smartphone_Pend\(0px\).smartphone_BdY.smartphone_Bdc\(\$seperatorColor\) > table > tbody > tr:nth-child(4) > td.Ta\(end\).Fw\(600\).Lh\(14px\) > span")
cap = soup.select("#quote-summary > div.D\(ib\).W\(1\/2\).Bxz\(bb\).Pstart\(12px\).Va\(t\).ie-7_D\(i\).ie-7_Pos\(a\).smartphone_D\(b\).smartphone_W\(100\%\).smartphone_Pstart\(0px\).smartphone_BdB.smartphone_Bdc\(\$seperatorColor\) > table > tbody > tr:nth-child(1) > td.Ta\(end\).Fw\(600\).Lh\(14px\) > span")
try:
print(f"{ticker.upper()}:\nLast price: ${last_price[0].text.strip()}\n"
f"Bid: ${bid[0].text.strip()}\n"
f"Ask: ${ask[0].text.strip()}\n"
f"Market Cap: ${cap[0].text.strip()}")
except IndexError:
# if a ticker doesn't have data, skip it
print(f"Issue with {ticker}... Skipping")
def getSymbols(file):
# create a list of ticker symbols
with open(file, "r") as f:
line = f.readline()
ticker_list = []
while line:
ticker_list.append(line.split())
line = f.readline()
f.close()
return ticker_list
if __name__ == '__main__':
ticker_list = getSymbols("symbols.txt") # symbols.txt includes all symbols on file with SEC
for ticker in ticker_list:
getQuote(ticker[0])
print() # line break
| true
|
2443120e25bc81bb1738c5c338252adb0902dc8e
|
Python
|
Helga-Helga/consistency-constraints-recognition
|
/lab2_max_flow/src/utils.py
|
UTF-8
| 1,755
| 3.421875
| 3
|
[] |
no_license
|
from numpy import (
dot,
zeros,
reshape,
where,
array,
clip,
)
lookup_table = zeros((256, 256))
for i in range(256):
for j in range(256):
lookup_table[i, j] = (i - j) ** 2
def neighbor_exists(i, j, neighbor_index, height, width):
"""Returns True if a given neighbor exists for a given pixel
Parameters
----------
i: unsigned integer
Vertical coordinate of a pixel
j: unsigned integer
Horizontal coordinate of a pixel
neighbor_index: belongs to {0, 1, 2, 3}
Index of a neighbor
height: unsigned integer
Image height
width: unsigned integer
Image width
Returns
-------
True of False
Result depends on existence of a given neighbor
"""
if neighbor_index == 0 and j > 0:
return True
elif neighbor_index == 1 and i > 0:
return True
elif neighbor_index == 2 and j + 1 < width:
return True
elif neighbor_index == 3 and i + 1 < height:
return True
else:
return False
def get_neighbor_coordinate(i, j, neighbor_number):
"""Calculate coordinate of a given neighbor for a given pixel
Parameters
----------
i: unsigned integer
Vertical coordinate of a pixel
j: unsigned integer
Horizontal coordinate of a pixel
neighbor_number: number from {0, 1, 2, 3}
Neighbor index
Returns
-------
tuple of unsigned integers
Coordinated of neighbor
"""
if neighbor_number == 0:
return i, j - 1
elif neighbor_number == 1:
return i - 1, j
elif neighbor_number == 2:
return i, j + 1
elif neighbor_number == 3:
return i + 1, j
else:
return None, None
| true
|
67c70519ed71add7dfe8f00c4366839f393811f8
|
Python
|
chenliang15405/python-learning
|
/study_day05-ๅคไปปๅก/ๅ็จ/01_่ฟญไปฃๅจ.py
|
UTF-8
| 1,888
| 4.96875
| 5
|
[] |
no_license
|
"""
int ็ฑปๅไธๆฏๅฏ่ฟญไปฃ็็ฑปๅ๏ผๆไปฅไธๅฏไปฅ็ดๆฅ่ฟญไปฃ
for i in range(10):
print(i)
่ฟ็งไธๆฏ้ๅint็ฑปๅ๏ผๆฏ้ๅไธไธชๅ่กจ๏ผrange(10) ๆฏๅๅปบไธไธช1-10็ๅ่กจ
ๅ
็ปใๅ่กจใๅญๅ
ธใๅญ็ฌฆไธฒ้ฝๆฏๅฏ่ฟญไปฃ็ฑปๅ๏ผๆฐๅญ็ฑปๅ้ฝๆฏไธๅฏไปฅ่ฟญไปฃ็็ฑปๅ
ๆณ่ฆๅๅปบ็ๅฏน่ฑกๅฏไปฅ่ฟญไปฃ๏ผ้่ฆ้ๅ__iter__ๆนๆณ,ๅนถไธ่ฏฅๆนๆณ้่ฆ่ฟๅไธไธชๅฏน่ฑก็ๅผ็จ๏ผ่ฟไธชๅฏน่ฑกไธญๅฟ
้กปๅ
ๅซ__iter__ ๅ__next__ๆนๆณ๏ผ
"""
from collections.abc import Iterable
print(isinstance("123", Iterable))
""" ่ชๅทฑๅฎ็ฐไธไธชๅฏ่ฟญไปฃ็ๅฏน่ฑก"""
class Classmate(object):
def __init__(self):
self.names = list()
def add(self, name):
self.names.append(name)
"""ๆณ่ฆๅๅปบ็ๅฏน่ฑกๅฏไปฅ่ฟญไปฃ๏ผ้่ฆ้ๅ__iter__ๆนๆณ,ๅนถไธ่ฏฅๆนๆณ้่ฆ่ฟๅไธไธชๅฏน่ฑก็ๅผ็จ๏ผ่ฟไธชๅฏน่ฑกไธญๅฟ
้กปๅ
ๅซ__iter__ ๅ__next__ๆนๆณ๏ผ"""
def __iter__(self):
return CalssIterable(self)
# ๅฎไนไธไธช่ฟญไปฃๅจ๏ผnextๆนๆณๅฐฑๆฏforๅพช็ฏ่ชๅจ่ฐ็จ็ๆนๆณ๏ผ้่ฟๅจไธไธไธชๅฏน่ฑกไธญ่ฟๅ่ฏฅๅฏน่ฑก๏ผๅฎ้
ไธ่ฟญไปฃ็ๆถๅ๏ผไฝฟ็จ็ๆฏ่ฟไธชๅฏน่ฑก
class CalssIterable(object):
def __init__(self, obj):
self.obj = obj
self.current_num = 0
def __iter__(self):
pass
def __next__(self):
if self.current_num < len(self.obj.names):
ret = self.obj.names[self.current_num]
self.current_num += 1
return ret
else:
raise StopIteration
classmate = Classmate()
classmate.add("ๅผ ไธ")
classmate.add("ๆๅ")
classmate.add("็ไบ ")
print("ๆฏๅฆๆฏๅฏ่ฟญไปฃๅฏน่ฑก๏ผ", isinstance(classmate, Iterable))
calssmate_iter = iter(classmate)
print("ๅคๆญclassmate_iterๆฏๅฆๆฏ่ฟญไปฃๅจ:", isinstance(calssmate_iter, Iterable))
print(next(calssmate_iter))
for i in classmate:
print(i)
| true
|
4c78d3b4d2c237dc31feca8337e6a62136693e85
|
Python
|
xuelang201201/PythonCrashCourse
|
/ๆฐๆฎๅฏ่งๅ/ๅจๆ่ฏไธ่ฏ/die_visual.py
|
UTF-8
| 872
| 3.546875
| 4
|
[] |
no_license
|
"""
่ชๅจ็ๆๆ ็ญพ๏ผ่ฏทไฟฎๆน die_visual.py ๅ dice_visual.py๏ผๅฐ็จๆฅ่ฎพ็ฝฎhist.x_labelsๅผ็ๅ่กจๆฟๆขไธบไธไธช่ชๅจ็ๆ่ฟ็งๅ่กจ็ๅพช็ฏใ
ๅฆๆไฝ ็ๆๅ่กจ่งฃๆ๏ผๅฏๅฐ่ฏๅฐ die_visual.py ๅ dice_visual.py ไธญ็ๅ
ถไป for ๅพช็ฏไนๆฟๆขไธบๅ่กจ่งฃๆใ
"""
import pygal
from die import Die
# ๅๅปบไธไธชD6
die = Die()
# ๆทๅ ๆฌก้ชฐๅญ๏ผๅนถๅฐ็ปๆๅญๅจๅจไธไธชๅ่กจไธญ
results = [die.roll() for roll_num in range(1000)]
# ๅๆ็ปๆ
frequencies = [results.count(value) for value in range(1, die.num_sides + 1)]
# print(frequencies)
# ๅฏน็ปๆ่ฟ่กๅฏ่งๅ
hist = pygal.Bar()
hist.title = "Results of rolling one D6 1000 times."
hist.x_labels = [str(x) for x in range(1, die.num_sides + 1)]
hist.x_title = "Result"
hist.y_title = "Frequency of Result"
hist.add('D6', frequencies)
hist.render_to_file('die_visual.svg')
| true
|
617e96aaf22739c43607d6d7833a6e59e23e9799
|
Python
|
aarsh-sharma/Competitive-Programming
|
/CodeChef/KS2.py
|
UTF-8
| 209
| 3.78125
| 4
|
[] |
no_license
|
def sumDigit(n):
s = 0
while (n > 0):
s += n % 10
n = n//10
return s
t = int(input())
while(t):
t -= 1
n = int(input())
n *= 10
while (sumDigit(n) % 10 != 0):
n += 1
print(n)
| true
|
5849576132c0d77baae374f9467dc3f1a9065ca7
|
Python
|
isabella232/ignite-python-thin-client
|
/examples/get_and_put_complex.py
|
UTF-8
| 2,239
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from pyignite import Client
from pyignite.datatypes import CollectionObject, MapObject, ObjectArrayObject
client = Client()
with client.connect('127.0.0.1', 10800):
my_cache = client.get_or_create_cache('my cache')
value = OrderedDict([(1, 'test'), ('key', 2.0)])
# saving ordered dictionary
type_id = MapObject.LINKED_HASH_MAP
my_cache.put('my dict', (type_id, value))
result = my_cache.get('my dict')
print(result) # (2, OrderedDict([(1, 'test'), ('key', 2.0)]))
# saving unordered dictionary
type_id = MapObject.HASH_MAP
my_cache.put('my dict', (type_id, value))
result = my_cache.get('my dict')
print(result) # (1, {'key': 2.0, 1: 'test'})
type_id = CollectionObject.LINKED_LIST
value = [1, '2', 3.0]
my_cache.put('my list', (type_id, value))
result = my_cache.get('my list')
print(result) # (2, [1, '2', 3.0])
type_id = CollectionObject.HASH_SET
value = [4, 4, 'test', 5.6]
my_cache.put('my set', (type_id, value))
result = my_cache.get('my set')
print(result) # (3, [5.6, 4, 'test'])
type_id = ObjectArrayObject.OBJECT
value = [7, '8', 9.0]
my_cache.put(
'my array of objects',
(type_id, value),
value_hint=ObjectArrayObject # this hint is mandatory!
)
result = my_cache.get('my array of objects')
print(result) # (-1, [7, '8', 9.0])
my_cache.destroy()
| true
|
01b3ee1be227019b6144e997fd8bffff6616c94c
|
Python
|
lyssym/NER-toolkits
|
/keras_kit/multi/runModel.py
|
UTF-8
| 1,305
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
# _*_ coding: utf-8 _*_
from __future__ import print_function
import nltk
from .util.preprocessing import addCharInformation, createMatrices, addCasingInformation
from .neuralnets.bilstm import BiLSTM
import sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Usage: python runModel.py modelPath inputPath")
exit()
modelPath = sys.argv[1]
inputPath = sys.argv[2]
# :: Read input ::
with open(inputPath, 'r') as f:
text = f.read()
# :: Load the model ::
lstmModel = BiLSTM.loadModel(modelPath)
# :: Prepare the input ::
sentences = [{'tokens': nltk.word_tokenize(sent)} for sent in nltk.sent_tokenize(text)]
addCharInformation(sentences)
addCasingInformation(sentences)
dataMatrix = createMatrices(sentences, lstmModel.mappings, True)
# :: Tag the input ::
tags = lstmModel.tagSentences(dataMatrix)
# :: Output to stdout ::
for sentenceIdx in range(len(sentences)):
tokens = sentences[sentenceIdx]['tokens']
for tokenIdx in range(len(tokens)):
tokenTags = []
for modelName in sorted(tags.keys()):
tokenTags.append(tags[modelName][sentenceIdx][tokenIdx])
print("%s\t%s" % (tokens[tokenIdx], "\t".join(tokenTags)))
print("")
| true
|
eff1ff235bf5c6df5c41a9a7f7a650ef44b9dbf3
|
Python
|
ThomasZumsteg/adventofcode2015
|
/day16.py
|
UTF-8
| 1,860
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
from get_input import get_input, line_parser
import operator
def filter_aunts(aunts, props):
for prop, test in props.items():
new_aunts = {}
for a, props in aunts.items():
if prop not in props or test(props[prop]):
new_aunts[a] = props
aunts = new_aunts
return aunts
def part1(aunts, known_pros=None):
props = {
'children': lambda v: v == 3,
'cats': lambda v: v == 7,
'samoyeds': lambda v: v == 2,
'pomeranians': lambda v: v == 3,
'akitas': lambda v: v == 0,
'vizslas': lambda v: v == 0,
'goldfish': lambda v: v == 5,
'trees': lambda v: v == 3,
'cars': lambda v: v == 2,
'perfumes': lambda v: v == 1,
}
aunts = filter_aunts(aunts, props)
assert len(aunts) == 1
return list(aunts.keys())[0]
def part2(aunts, known_pros=None, compare=None):
props = {
'children': lambda v: v == 3,
'cats': lambda v: v > 7,
'samoyeds': lambda v: v == 2,
'pomeranians': lambda v: v < 3,
'akitas': lambda v: v == 0,
'vizslas': lambda v: v == 0,
'goldfish': lambda v: v < 5,
'trees': lambda v: v > 3,
'cars': lambda v: v == 2,
'perfumes': lambda v: v == 1,
}
aunts = filter_aunts(aunts, props)
assert len(aunts) == 1
return list(aunts.keys())[0]
def parse(line):
aunt_n = line.index(':')
aunt = int(line[4:aunt_n])
props = {}
for prop in line[aunt_n+2:].split(', '):
k, v = prop.split(': ')
props[k] = int(v)
return (aunt, props)
if __name__ == '__main__':
aunts = dict(line_parser(get_input(day=16, year=2015), parse=parse))
# Part 1: 103
# Part 2: 405
print("Part 1: {}".format(part1(aunts)))
print("Part 2: {}".format(part2(aunts)))
| true
|
e13593c50e95055a4b5dcd06844ef470cf92137f
|
Python
|
xuming0629/xm-study
|
/xm-study/python-test/yearif.py
|
UTF-8
| 252
| 3.609375
| 4
|
[] |
no_license
|
year=int(input("่ฏท่พๅ
ฅไธไธชๅนดไปฝ:"))
if(year%400==0):
print("ๆฏ้ฐๅนด")
else:
if(year%4==0):
if(y%100==0):
print("ไธๆฏ้ฐๅนด")
else:
print("ๆฏ้ฐๅนด")
else:
print("ไธๆฏ้ฐๅนด")
| true
|
78c582ff6fd7db8ea5bc3359baf7d79fd8f9d16f
|
Python
|
GbotemiB/Simple-Tasks
|
/factorial.py
|
UTF-8
| 212
| 3.921875
| 4
|
[] |
no_license
|
def fact(n):
if n == 1:
return 1
return n*fact(n-1)
factorialOf = int(input("Enter the number which you seek to find it factorial \n"))
print("Factorial of %s :" % factorialOf,fact(factorialOf))
| true
|
a8eec4e77b38e9da8579d3103a0aa55c4db6b92b
|
Python
|
pip-install-HSE/TelegramCalendarBot
|
/bot/keyboards.py
|
UTF-8
| 5,592
| 2.8125
| 3
|
[] |
no_license
|
import calendar
import re
from bot.modules.keyboard import KeyboardInline, KeyboardReply
from aiogram import types
from datetime import datetime, timedelta
import locale
import logging
def toArray(object):
if type(object) == type([]):
array = object
elif type(object) == type("string") or type(object) == type(0):
array = [object]
else:
array = []
return array
def to2Array(object, toString = False):
array = toArray(object)
for i, data in enumerate(array):
if type(data) == type("string") or type(data) == type(0):
array[i] = [data]
if toString == True:
for i, line in enumerate(array):
for j, object in enumerate(line):
if type(object) == type(0):
array[i][j] = str(object)
if type(array[i][j]) != type("string"):
# print(object, type(object))
array = [[]]
break
return array
def reply(array, one_time_keyboard = False, resize_keyboard = True):
array = to2Array(array, True)
keyboard = types.ReplyKeyboardMarkup(one_time_keyboard = True, resize_keyboard = True)
for line in array:
keyboard.row(*line)
return keyboard
def remove():
return types.ReplyKeyboardRemove()
def force_reply():
return types.ForceReply()
def url(text, url):
keyboard = types.InlineKeyboardMarkup(row_width=1)
keyboard.add(types.InlineKeyboardButton(text=text, url=url))
return keyboard
def inline(array, callback = None):
array = to2Array(array)
if callback != None:
callback = to2Array(callback)
else:
callback = array
# print(array, callback)
max_len = len(max(array, key=len))
keyboard = types.InlineKeyboardMarkup(row_width = max_len)
for i, line in enumerate(array):
buttons = []
for j, text in enumerate(line):
button = types.InlineKeyboardButton(text = text, callback_data = callback[i][j])
buttons.append(button)
# print("new line")
keyboard.add(*buttons)
return keyboard
"""
keyboard v 1.0
:List of :Dicts where first is :Str name, last is :Str callback.
"""
menu = KeyboardReply([["ะะฐะฟะธัั", "ะกััะดะธั"],
["ะะฐะบ ะฟัะพะตั
ะฐัั?", "ะัะฐะนั-ะปะธัั"]]).get()
matches = KeyboardInline([{"<-": "prev", "->": "next"},
{"ะะตะฝั": "menu"}]).get()
back = KeyboardInline([{"ะะตะฝั": "menu"}]).get()
def month(month, year):
#locale.setlocale(locale.LC_ALL, "ru")
month_array = ['ะฏะฝะฒะฐัั', 'ะคะตะฒัะฐะปั', 'ะะฐัั', 'ะะฟัะตะปั', 'ะะฐะน', 'ะัะฝั', 'ะัะปั', 'ะะฒะณััั', 'ะกะตะฝััะฑัั', 'ะะบััะฑัั', 'ะะพัะฑัั','ะะตะบะฐะฑัั']
today = datetime.today()
current = datetime.today().replace(day=1, month=month, year=year)
next = current + timedelta(days=31)
prev = current - timedelta(days=1)
# month_str = current.strftime("%B")
month_str=month_array[current.month-1]
month_text=[f"{month_str} {current.year}"]
scroll_text = [">>"]
month_callback = [f"choose_month {current.strftime('%m.%Y')}"]
scroll_callback = [f"set_month {next.strftime('%m.%Y')}"]
if current.year > today.year or (current.year == today.year and current.month > today.month):
scroll_text = ["<<"] + scroll_text
logging.info(scroll_text)
month_callback=[f"choose_month {current.strftime('%m.%Y')}"]
scroll_callback = [f"set_month {prev.strftime('%m.%Y')}"]+scroll_callback
logging.info(scroll_text)
return inline(
[
month_text,
scroll_text,
["ะัะฑัะฐัั"]
],
[
month_callback,
scroll_callback,
[f"choose_month {current.strftime('%m.%Y')}"]
]
)
def day(month, year):
today = datetime.today()
current = datetime.today().replace(day=1, month=month, year=year)
first_day = 1
count = calendar.mdays[current.month]
logging.info(f"Days in month: {count}")
if current.month == today.month and current.year == today.year:
first_day = today.day
count -= (today.day - 1)
row_count = count // 8 + (1 if (count % 8) != 0 else 0)
count_in_row = count // row_count
keyboard = []
for day in range(first_day, first_day + count):
if (day - first_day) % count_in_row == 0:
keyboard.append([])
keyboard[-1].append(day)
callback = [[f"set_day {button//10}{button%10}" for button in row] for row in keyboard]
return inline(
keyboard+["ะะตัะฝััััั ะบ ะฒัะฑะพัั ะผะตัััะฐ"],
callback+ ["choose_month"]
)
def time(events):
keyboard = []
callback = []
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
start = re.findall(r"\d\d\d\d-\d\d-\d\dT(\d\d:\d\d):\d\d\+\d\d:\d\d", start)[0]
end = event['end'].get('dateTime', event['end'].get('date'))
end = re.findall(r"\d\d\d\d-\d\d-\d\dT(\d\d:\d\d):\d\d\+\d\d:\d\d", end)[0]
try:
if(event['description']==None):
keyboard.append([f"{start}-{end}"])
callback.append([event['id']])
except:
keyboard.append([f"{start}-{end}"])
callback.append([event['id']])
if (keyboard==[]):
return None
return inline(
keyboard+["ะะตัะฝััััั ะบ ะฒัะฑะพัั ะดะฝั"],
callback+ ["choose_day"]
)
| true
|
5a16b99cd35a0c482ea68dfb9a1f4ea64b7123b7
|
Python
|
gochab/project_calculate_it
|
/chapter_03.py
|
UTF-8
| 340
| 4.125
| 4
|
[] |
no_license
|
def addition():
first_number = 30
second_number = 60
print(first_number + second_number)
addition()
def multiplication():
first_number = 50
second_number = 20
print(first_number * second_number)
multiplication()
def height():
heigh = raw_input("What is you weight?")
print ("You mesure " + heigh)
height()
| true
|
5ffee59e643d7500b9e597a144fb53a9eccb1e21
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02791/s670187435.py
|
UTF-8
| 141
| 2.96875
| 3
|
[] |
no_license
|
n=int(input())
p=list(map(int,input().split()))
t=0
ans=0
for i in range(n):
if t>=p[i] or t==0:
ans+=1
t=p[i]
print(ans)
| true
|
a6e390341a306666fba93983a088eee864bfe5d4
|
Python
|
KristoferSundequist/Slider
|
/Dreamer/simple_slider.py
|
UTF-8
| 3,905
| 3.46875
| 3
|
[] |
no_license
|
from graphics import *
import numpy as np
import globals
from typing import List
##########
## GAME ##
##########
width = globals.width
height = globals.height
def clear(win):
for item in win.items[:]:
item.undraw()
win.update()
class Slider:
def __init__(self):
self.reset()
def reset(self):
self.x = float(np.random.randint(50, width - 50))
self.y = float(np.random.randint(50, height - 50))
self.radius = 30
self.speed = 5
# 1
# 0 2
# 3
def push(self, direction):
if direction == 0:
self.x -= self.speed
elif direction == 1:
self.y -= self.speed
elif direction == 2:
self.x += self.speed
elif direction == 3:
self.y += self.speed
def update(self):
if self.x + self.radius >= width:
self.x = width - self.radius
if self.x - self.radius <= 0:
self.x = self.radius
if self.y + self.radius >= height:
self.y = height - self.radius
if self.y - self.radius <= 0:
self.y = self.radius
def render(self, win):
c = Circle(Point(self.x, self.y), self.radius)
c.setFill("black")
c.draw(win)
class Target:
def __init__(self, radius):
self.reset()
self.radius = radius
def reset(self):
self.x = float(np.random.randint(width))
self.y = float(np.random.randint(height))
def render(self, win):
c = Circle(Point(self.x, self.y), self.radius)
c.setFill("yellow")
c.setOutline("yellow")
c.draw(win)
class Enemy:
def __init__(self, radius):
self.x = float(np.random.randint(width))
self.y = float(np.random.randint(height))
self.radius = radius
def reset(self):
self.x = float(np.random.randint(width))
self.y = float(np.random.randint(height))
def render(self, win):
c = Circle(Point(self.x, self.y), self.radius)
c.setFill("red")
c.setOutline("red")
c.draw(win)
def update(self, sliderx, slidery):
if self.x > sliderx:
self.x -= 1
else:
self.x += 1
if self.y > slidery:
self.y -= 1
else:
self.y += 1
class Game:
state_space_size = 6
action_space_size = 5
def __init__(self):
self.s = Slider()
self.t = Target(50)
self.enemy = Enemy(30)
def intersect(self, a, b):
return a.radius + b.radius > np.sqrt(np.power(a.x - b.x, 2) + np.power(a.y - b.y, 2))
def get_state(self) -> List[float]:
return [
self.s.x / width,
self.s.y / height,
self.t.x / width,
self.t.y / height,
self.enemy.x / width,
self.enemy.y / height,
]
def set_game_state(self, gamestate: List[float]):
self.s.x = gamestate[0] * globals.width
self.s.y = gamestate[1] * globals.height
self.t.x = gamestate[2] * globals.width
self.t.y = gamestate[3] * globals.height
self.enemy.x = gamestate[4] * globals.width
self.enemy.y = gamestate[5] * globals.height
def step(self, action):
self.s.push(action)
self.s.update()
self.enemy.update(self.s.x, self.s.y)
reward = 0
if self.intersect(self.s, self.t):
reward += 0.2
self.t.reset()
if self.intersect(self.s, self.enemy):
reward -= 1
self.enemy.reset()
return reward, self.get_state()
# 1
# 0 2
# 3
def render(self, value, reward, win):
clear(win)
self.t.render(win)
self.s.render(win)
self.enemy.render(win)
Text(Point(250, 250), value).draw(win)
Text(Point(300, 300), reward).draw(win)
| true
|
65f00fd9f6863140b9ceff7cbd51bfe9ef9f5322
|
Python
|
liquor1014/blog
|
/blog.py
|
UTF-8
| 2,905
| 2.96875
| 3
|
[] |
no_license
|
import os
from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
#ๅๅงๅ็จๅบๅๆฐๆฎๅบ
app = Flask(__name__) #ๅๅปบๅบ็จ็จๅบๅฏน่ฑก๏ผ
basedir = os.path.abspath(os.path.dirname(__file__)) #่ทๅๅฝๅ็ฎๅฝ็็ปๅฏน่ทฏๅพ๏ผ
# print(__file__)
# print(basedir)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir,'myblog.db') #sqliteๆฐๆฎๅบ็ๆไปถๅญๆพ่ทฏๅพ
db = SQLAlchemy(app)
#ๅฎไนๅๅฎขๆ็ซ ๆฐๆฎModel็ฑป
class Blog(db.Model):
id = db.Column(db.Integer,primary_key= True)
title = db.Column(db.String(50))
text = db.Column(db.Text)
def __init__(self,title,text): #ๅๅงๅๆนๆณ
self.title = title
self.text = text
def __repr__(self):
return self.title + ":" + self.text
# db.create_all() #ๅๅปบๆฐๆฎๅบๆไปถๅๆฐๆฎๅบ่กจ๏ผไฝๅช้ๆไฝไธๆฌก
@app.route('/')
def home_blog():
'''
ๆธฒๆ้ฆ้กตHTMLๆจกๆฟๆไปถ
'''
return render_template('home.html')
#ๆฅ่ฏขๅๆๅ
จ้จๅ่กจ
@app.route('/blogs',methods=['GET'])
def list_blog():
blogs = Blog.query.all()
return render_template('list.html',blogs = blogs)
#ๅๅปบblogๆ็ซ
@app.route('/blogs/create',methods=['GET','POST'])
def write_blog():
if request.method == 'GET':
return render_template('write.html')
else:
title = request.form['title'] #request.from่ทๅไปฅpostๆนๅผๆไบค็ๆฐๆฎ
text = request.form['text']
#ๅๅปบไธไธชblogๅฏน่ฑก
blog = Blog(title = title , text = text)
db.session.add(blog)
db.session.commit() # ๅฟ
้กปๆไบคๆ่ฝ็ๆ
return redirect('/blogs') # ๅๅปบๅฎๆไนๅ้ๅฎๅๅฐๅๆๅ่กจ้กต้ข
#blog่ฏฆๆ
ๅๅ ้ค
@app.route('/blogs/<uid>', methods=['GET','DELETE'])
def del_inquire_blog(uid):
if request.method == 'GET':
blog = Blog.query.filter_by(id =uid).first_or_404()
return render_template('query_blog.html', blog=blog)
elif request.method == 'DELETE':
blog = Blog.query.filter_by(id =uid).delete()
db.session.commit()
return 'ok'
@app.route('/blogs/update/<id>',methods = ['GET', 'POST'])
def update_note(id):
'''
ๆดๆฐๅๆ
'''
if request.method == 'GET':
# ๆ นๆฎIDๆฅ่ฏขๅๆ่ฏฆๆ
blog = Blog.query.filter_by(id = id).first_or_404()
# ๆธฒๆไฟฎๆน็ฌ่ฎฐ้กต้ขHTMLๆจกๆฟ
return render_template('update_blog.html',blog = blog)
else:
# ่ทๅ่ฏทๆฑ็ๅๆๆ ้ขๅๆญฃๆ
title = request.form['title']
text = request.form['text']
# ๆดๆฐๅๆ
blog = Blog.query.filter_by(id = id).update({'title':title,'text':text})
# ๆไบคๆ่ฝ็ๆ
db.session.commit()
# ไฟฎๆนๅฎๆไนๅ้ๅฎๅๅฐๅๆ่ฏฆๆ
้กต้ข
return redirect('/blogs/{id}'.format(id = id))
app.run()
| true
|
1cb1a8b1cddf5b396d9052564010a32d0a25529e
|
Python
|
abawgus/Trolin
|
/DONTTOUCH/TerminalDrivenBackend.py
|
UTF-8
| 1,213
| 3.15625
| 3
|
[] |
no_license
|
c=0
#class bg(self):
# def f(self):
# if bg =='AC':
# pass
#loc='url'
#class Person(name):
# """who the player is interacting with"""
# if name=='Storey':
# pass
while c is 0:
var=raw_input("Hey welcome to your freshman year! I'm your sibb! It's my job to teach you the ropes here at Olin. You can ask me what like about Olin. Just type ASK WEST HALL to ask me about west hall. When you're ready to enter Olin, say EXIT ")
inp=var.split(' ')
if inp[0]=='ASK':
if inp[1] == 'WEST':
var=raw_input("West Hall is where most First Years and Sophmores live.It has a pretty good Lounge culture, with four Lounges that are frequenctly occupied. All of the rooms are doubles; you'll be in one of them. West Hall also has Laundry Room and a Kitchen ")
if inp[1] == 'EAST':
var=raw_input("East Hall is where most of the Juniors and Seniors live. It has doubles as well as suites. East Hall also has the Piano Room, Bike Room, and Public Saftey. Man Hall is also in East Hall. ")
if inp[1] == 'MILAS':
var=raw_input("Milas Hall is blahbalh blah insert whatever")
if inp[0]=='EXIT':
c=1
| true
|
1eb2b793f16fea465fd011b211e4e3c031920fee
|
Python
|
jornbergmans/snippets
|
/02_python/discord/raidcalendar.py
|
UTF-8
| 487
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import discord
TOKEN = 'NjAzMTQxMTQ5NjM2Njg5OTIw.XTbM3w.2VE73TyajD6kg0h2yh0KDYUAZog'
client = discord.Client()
@client.event
async def on_message(message):
if message.author.id == client.user.id:
return
if message.content.startswith('!hello'):
msg = 'Hello {0.author.mention}'.format(message)
await message.channel.send(msg)
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.run(TOKEN)
| true
|
7622a94abc9a369c6e7e4349366666b3987e0d80
|
Python
|
waynewu6250/LeetCode-Solutions
|
/852.peak-index-in-a-mountain-array.py
|
UTF-8
| 493
| 2.90625
| 3
|
[] |
no_license
|
#
# @lc app=leetcode id=852 lang=python3
#
# [852] Peak Index in a Mountain Array
#
# @lc code=start
class Solution:
def peakIndexInMountainArray(self, A: List[int]) -> int:
left = 0
right = len(A)-1
while left + 1 < right:
mid = (left+right) // 2
if A[mid] < A[mid+1]:
left = mid
else:
right = mid
return left if A[left] > A[right] else right
# @lc code=end
| true
|
7c008ac508328fca8a3c2151a78d75e7991c8fc0
|
Python
|
OdedMous/Imbalanced-Dataset
|
/adversarial.py
|
UTF-8
| 5,855
| 3.15625
| 3
|
[] |
no_license
|
import copy
import models
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import visualizations
def adversarial_optimizing_noise(model, org_img, true_label, target_label, regularization="l1"):
"""
Creates an adversarial image by optimizing some noise, and add it to some original image.
:param model: the trained model we want to fool.
:param org_img: original image. to it we want to add the noise in order to create the adversarial image.
:param true_label: the gold label of org_image.
:param target_label: the label we want the trained model will mistakly classify it for the adversarial image.
:param regularization: which norm to use in order to keep the noise as low as possibale.
:return: noise - the noise we should add to original image in order to create an adversarial image
pred_adversarial_label - the last label the trained model predicted to the noise image
in the noise optimization iterations.
"""
# necessary pre-processing
target_label = torch.LongTensor([target_label]) #
org_img = org_img.unsqueeze(0) # add batch diminsion to org_image
# Init value of noise and make its gradients updatable
noise = nn.Parameter(data=torch.zeros(1, 3*32*32), requires_grad=True) # gray image
#noise = nn.Parameter(data=torch.ones(1, 3*32*32), requires_grad=True) # white image
#noise = nn.Parameter(data=torch.randn(1, 3*32*32), requires_grad=True) # gaussion noise
# Check classification before modification
pred_label = np.argmax(model(org_img).data.numpy())
if true_label != pred_label:
print("WARNING: IMAGE WAS NOT CLASSIFIED CORRECTLY")
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(params=[noise], lr=0.001, momentum=0.9)
# Noise optimization
iterations = 30000
for iteration in range(iterations):
optimizer.zero_grad()
output = model(org_img + noise.view((1,3,32,32)))
loss = criterion(output, target_label)
if regularization == "l1":
adv_loss = loss + torch.mean(torch.abs(noise))
elif regularization == "l2":
adv_loss = loss + torch.mean(torch.pow(noise, 2))
else:
adv_loss = loss
adv_loss.backward()
optimizer.step()
# keep optimizing until we get that the predicted label is the target label
pred_adversarial_label = np.argmax(model(org_img).data.numpy())
if pred_adversarial_label == target_label:
break
if iteration == iterations-1:
print("Warning: optimization loop ran for the maximum iterations. The result may not be correct")
return noise.view((3,32,32)).detach(), pred_adversarial_label
def FGSM(model, org_img, true_label):
"""
Creates an adversarial image by Fast Gradient Sign Method.
:param model: the trained model.
:param org_img: original image. to it we want to add the noise in order to create the adversarial image.
:param true_label: the gold label of org_image.
:return: adversarial_img,
noise - the noise used to create the adversarial image
y_pred_adversarial
"""
true_label = Variable(torch.LongTensor(np.array([true_label])), requires_grad=False)
org_img = org_img.unsqueeze(0) # add batch diminsion
org_img = Variable(org_img, requires_grad=True) # set org_img as parameter (cuz we need its gradient)
# Classification before Adv
pred_label = np.argmax(model(org_img).data.numpy())
criterion = nn.CrossEntropyLoss()
# Forward pass
output = model(org_img)
loss = criterion(output, true_label)
loss.backward() # obtain gradients on org_img
# Add perturbation
epsilon = 0.01 #0.01 # 0.15
x_grad = torch.sign(org_img.grad.data)
noise = epsilon * x_grad
adversarial_img = torch.clamp(org_img.data + noise, 0, 1)
# Classification after optimization
y_pred_adversarial = np.argmax(model(Variable(adversarial_img)).data.numpy())
return adversarial_img.squeeze(0), noise.squeeze(0), y_pred_adversarial
def create_adversarial_img(path, org_img, true_label):
"""
Creates an adversarial image, and display it. We do it with 2 different methods.
:param path: a path for the trained model.
:param org_img: original image. to it we want to add the noise in order to create the adversarial image.
:param true_label: the gold label of org_image.
"""
# Load trained model
trained_net = models.SimpleModel()
trained_net.load(path=path)
trained_net.eval()
# show original image
visualizations.imshow(org_img)
# Adversarial method 1
# Copy the model so the original trained network wont change while we creating
# the adversarial image
model_copy = copy.deepcopy(trained_net)
model_copy.eval()
noise, adv_label = adversarial_optimizing_noise(model_copy, org_img, true_label=0, target_label=2, regularization="l1")
visualizations.imshow(noise) # show noise
visualizations.imshow(org_img+noise) # show adversarial image
out = trained_net((org_img+noise).unsqueeze(0))
print("true label:", true_label, "adv_label:", adv_label, "trained_net label:", out)
# Adversarial method 2
model_copy2 = copy.deepcopy(trained_net)
adver_img, noise2, adv_label_2 = FGSM(model_copy2, org_img, true_label=0)
visualizations.imshow(noise2) # show noise
visualizations.imshow(adver_img) # show adversarial image
out = trained_net(adver_img.unsqueeze(0))
print("true label:", true_label, "adv_label:", adv_label_2, "trained_ned label:", out)
| true
|
48a0d471d4d94049b6bd782f407c0785138e09ab
|
Python
|
Bryan-Brito/IFRN
|
/TESTES LEGAIS/PYCODEBR V1.py
|
UTF-8
| 120
| 2.9375
| 3
|
[] |
no_license
|
import socket as s
host = 'google.com'
Ip = s.gethostbyname(host)
print('O IP do Host"' + host + '" รฉ: ' + Ip)
| true
|
33e64556876207cd729514503501f1a89c7e92f1
|
Python
|
Hellofafar/Leetcode
|
/Easy/566.py
|
UTF-8
| 2,173
| 4.09375
| 4
|
[] |
no_license
|
# ------------------------------
# 566. Reshape the Matrix
#
# Description:
# In MATLAB, there is a very useful function called 'reshape', which can reshape a matrix into a new one with different size but keep its original data.
# You're given a matrix represented by a two-dimensional array, and two positive integers r and c representing the row number and column number of the wanted reshaped matrix, respectively.
# The reshaped matrix need to be filled with all the elements of the original matrix in the same row-traversing order as they were.
# If the 'reshape' operation with given parameters is possible and legal, output the new reshaped matrix; Otherwise, output the original matrix.
# Example 1:
# Input:
# nums =
# [[1,2],
# [3,4]]
# r = 1, c = 4
# Output:
# [[1,2,3,4]]
# Explanation:
# The row-traversing of nums is [1,2,3,4]. The new reshaped matrix is a 1 * 4 matrix, fill it row by row by using the previous list.
#
# Example 2:
# Input:
# nums =
# [[1,2],
# [3,4]]
# r = 2, c = 4
# Output:
# [[1,2],
# [3,4]]
# Explanation:
# There is no way to reshape a 2 * 2 matrix to a 2 * 4 matrix. So output the original matrix.
#
# Note:
# The height and width of the given matrix is in range [1, 100].
# The given r and c are all positive.
#
# Version: 1.0
# 07/18/18 by Jianfa
# ------------------------------
class Solution(object):
def matrixReshape(self, nums, r, c):
"""
:type nums: List[List[int]]
:type r: int
:type c: int
:rtype: List[List[int]]
"""
if not nums or not nums[0]:
return []
if len(nums) * len(nums[0]) != r * c:
return nums
res = []
temp = []
for i in range(len(nums)):
for j in range(len(nums[0])):
if len(temp) < c:
temp.append(nums[i][j])
else:
res.append(temp)
temp = [nums[i][j]]
res.append(temp)
return res
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
#
| true
|
25847034ca11c46e1a8b7ead090e54eb293eed4f
|
Python
|
nbyouri/LINGI2261-AI
|
/Assignment3_Siam/basic_agent.py
|
UTF-8
| 2,220
| 3.390625
| 3
|
[] |
no_license
|
from agent import AlphaBetaAgent
import minimax
from state_tools_basic import rocks
from constants import*
"""
Agent skeleton. Fill in the gaps.
"""
class MyAgent(AlphaBetaAgent):
"""This is the skeleton of an agent to play the game."""
def get_action(self, state, last_action, time_left):
"""This function is used to play a move according
to the board, player and time left provided as input.
It must return an action representing the move the player
will perform.
"""
return minimax.search(state, self)
def successors(self, state):
"""The successors function must return (or yield) a list of
pairs (a, s) in which a is the action played to reach the
state s;
"""
actions = state.get_current_player_actions()
successors = list()
for action in actions:
if state.is_action_valid(action):
new_state = state.copy()
new_state.apply_action(action)
successors.append((action, new_state))
for s in successors:
yield s
def cutoff(self, state, depth):
"""The cutoff function returns true if the alpha-beta/minimax
search has to stop; false otherwise.
"""
return state.game_over() or depth >= 1
def evaluate(self, state):
"""The evaluate function must return an integer value
representing the utility function of the board.
"""
return static_evaluate(self.id, state) - static_evaluate(self.id - 1, state)
def static_evaluate(id, state):
"""The val function is the sum of (5 - # moves from p in direction f to exit) for each rock
controlled by player where p,f are the position and the exit direction
for each rock controlled by player"""
val = 0
controlled_rocks = rocks(state, id)
for (x, y), f in controlled_rocks:
if f == UP:
dst = x + 1
elif f == LEFT:
dst = y + 1
elif f == DOWN:
dst = 5 - x
elif f == RIGHT:
dst = 5 - y
else:
raise ValueError("Wrong face value %s", f)
val += 5 - dst
return val
| true
|
7a7e8b7b1ef0e01596216343dedb4b739df641b7
|
Python
|
kurakura1412/luna-offlineserver
|
/_z_python/sorting.py
|
UTF-8
| 1,755
| 2.671875
| 3
|
[] |
no_license
|
#! python
# readonly data. do not edit. [ "name", index, position, special_id]
mdata =[
["EMPTY_C", 999999, 0, 874],
["Potion x5", 200012, 1, 875],
["Potion x20", 200012, 2, 556],
["Magicgong", 200014, 3, 235],
["Ether", 200015, 4, 587],
["Elixir", 200016, 5, 547],
["Megalixir", 200017, 6, 897],
["Enfuss", 200019, 7, 652],
["EMPTY_A", 999999, 8, 874],
["EMPTY_B", 999999, 9, 214],
["Lunar", 200010, 10, 542],
["Lunar X", 200009, 11, 5421]
]
mdatacopy = [[i for i in a] for a in mdata ]
# sorting. editing is allowed.
listTC = []
tosort = 0
for f in mdata:
if f[0][0:5] != "EMPTY":
listTC.append(f)
tosort += 0
#
bsorted = False
while bsorted == False:
bsorted = True;
for i in range(len(listTC) - 1):
if listTC[i][1] > listTC[i+1][1]:
bsorted = False;
listTC[i],listTC[i+1] = listTC[i+1],listTC[i]
#
def finditembyIDandPOS( x, y, z):
for data in z:
if data[1] == x and data[2] == y:
return data
def finditembyPOS( y,z):
for data in z:
if data[2] == y:
return data
def finditembyID( x,z):
for data in z:
if data[1] == x:
return data
def finditembySPECID( v,z):
for data in z:
if data[3] == v:
return data
#
list_move = []
for i in range(len(listTC)):
dataA = listTC[i]
dataB = finditembySPECID(dataA[3],mdatacopy)
dataC = mdatacopy[i]
if dataA[2] != dataC[2]:
indexA = mdatacopy.index(dataB);
indexB = mdatacopy.index(dataC)
list_move.append("move %d <-> %d [%s <-> %s]" % (indexA,indexB,dataB[0],dataC[0] ) )
#dataX = dataB
#dataB = dataC
#dataC = dataX
mdatacopy[ indexA], mdatacopy[ indexB] = mdatacopy[ indexB], mdatacopy[ indexA ];
#printing
print( listTC )
print( "@@" )
print(mdata)
print( "@@" )
print(mdatacopy)
print( "@@" )
for cc in list_move:
print(cc)
| true
|
aab69da308b59b71d66e7471c89ff487891c81b8
|
Python
|
gianheck/MeshSim
|
/scripts/chainsim/pp_scripts/tabulate
|
UTF-8
| 3,379
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
import sys
import re
import getopt
def build_table(file_name):
tbl = {}
# Read table data
fp = open(file_name, 'r')
for l in fp:
l = l.strip()
if len(l) == 0:
continue
l = l.split(' ')
if l[0] == 'file':
size = int(re.search(",size_([0-9]*),", l[1]).group(1))
elif l[0] == 'xfer_bytes':
xfer_bytes = int(l[1])
if size in tbl:
tbl[size].append(xfer_bytes)
else:
tbl[size] = [ xfer_bytes ]
fp.close()
# Sort columns
for k in tbl.keys():
tbl[k].sort()
return tbl
def print_table(tbl, width, factor=None):
nrow = max(len(x) for x in tbl.values())
cols = sorted(x for x in tbl.keys())
int_format = "%%%dd" % width
str_format = "%%%ds" % width
float_format = "%%%d.2f" % width
# Print heading
heading = ""
for cv in cols:
heading += (int_format % cv)
print(heading)
print(width * len(cols) * '-')
# Print entries
for i in range(nrow):
row = ""
for cv in cols:
index = i - nrow + len(tbl[cv])
if index >= 0:
if factor is None:
row += (int_format % (tbl[cv][index],))
else:
row += (float_format % (tbl[cv][index] * factor,))
else:
row += (str_format % "")
print(row)
print(width * len(cols) * '-')
def usage():
print("Tabulate data from tcp average throughput data.")
print("")
print("This tool processes the output of pcap_eval, when run with")
print("no argument. the input file is assumed to contain metrics")
print("from multiple pcap files, with the chain size stored in the")
print("file name. It then creates a table of measurements, one")
print("column for each chain size. The rate entries in each column")
print("are then sorted in increasing size.")
print("")
print("Usage:")
print("tabulate [-h] [-d <duration>] [-w <width>] [tcpavg.txt files...]")
print("")
print("Switches:")
print(" -h display this help")
print(" -d <dur> download duration in seconds; when")
print(" this is given, a rate in Mbps will be")
print(" computed, otherwise aggregate download")
print(" size in bytes.")
print(" -w <wid> column width.")
if __name__ == "__main__":
# default arguments
duration = None
width = 10
# read command line arguments
opts, args = getopt.getopt(sys.argv[1:], "hd:w:")
for o, v in opts:
if o == '-h':
usage()
sys.exit(0)
elif o == '-d':
duration = float(v)
elif o == '-w':
width = int(v)
else:
sys.exit(1)
if len(args) != 1:
sys.stderr.write("Error: Expect exactly one stats file on "
"command line\n")
sys.exit(1)
# Create table
tbl = build_table(args[0])
if len(tbl) == 0:
sys.stderr.write("Error: No data points found in file `%s'\n" %
args[0])
sys.exit(1)
# Print table
if duration is None:
print_table(tbl, width)
else:
print_table(tbl, width, 8. / duration / 1000000.)
| true
|
2f8fcd1904edea9207a65cd3bc15b581e6067a87
|
Python
|
elginbeloy/trading
|
/backtester/strategies/macd_strat_1.py
|
UTF-8
| 2,821
| 3.09375
| 3
|
[] |
no_license
|
from ta.trend import MACD, SMAIndicator
from ta.volume import MFIIndicator
from strategy import Strategy
from utils import has_n_days_data
# Risk 5% of capital per trade by default (weighted by probability)
DEFAULT_RISK_PER_TRADE = 0.05
# MACDStratOne: MACD has positive slope and crosses signal
# in a not already overbought asset with positive long and
# medium term trends.
class MACDStratOne(Strategy):
def init(self):
self.add_column_to_all_assets("SMA_10", ["Close"], lambda c: SMAIndicator(c, window=10).sma_indicator())
self.add_column_to_all_assets("SMA_50", ["Close"], lambda c: SMAIndicator(c, window=50).sma_indicator())
self.add_column_to_all_assets("SMA_200", ["Close"], lambda c: SMAIndicator(c, window=200).sma_indicator())
self.add_column_to_all_assets("MFI", ["High", "Low", "Close", "Volume"], lambda c1, c2, c3, c4: MFIIndicator(c1, c2, c3, c4).money_flow_index())
self.add_column_to_all_assets("MACD", ["Close"], lambda c: MACD(c).macd())
self.add_column_to_all_assets("MACD_SIG", ["Close"], lambda c: MACD(c).macd_signal())
def create_signals(self):
typical_bet_size = self.available_cash * DEFAULT_RISK_PER_TRADE
for symbol in self.asset_dfs.keys():
if has_n_days_data(self.asset_dfs[symbol], self.current_day, 201):
close_prices = self.asset_dfs[symbol]["Close"].to_list()
sma_10_price = self.asset_dfs[symbol]["SMA_10"].iloc[-1]
sma_50_price = self.asset_dfs[symbol]["SMA_50"].iloc[-1]
sma_200_price = self.asset_dfs[symbol]["SMA_200"].iloc[-1]
mfi_arr = self.asset_dfs[symbol]["MFI"].to_list()
macd_arr = self.asset_dfs[symbol]["MACD"].to_list()
macd_sig_arr = self.asset_dfs[symbol]["MACD_SIG"].to_list()
# Entry Signals
# Close price was higher than the day before
if close_prices[-1] > close_prices[-2]:
# MFI is not already overbought
if mfi_arr[-1] < 60:
# Medium term trend is positive beyond a trend threshold
if sma_10_price > sma_50_price * 1.1:
# Long term trend is positive beyond a trend threshold
if sma_50_price > sma_200_price * 1.1:
# MACD is above signal
if macd_arr[-1] - macd_sig_arr[-1] * 1.05:
# MACD is growing recently
if macd_arr[-1] > macd_arr[-2] and macd_arr[-1] > macd_arr[-3]:
amount_to_buy = close_prices[-1] / typical_bet_size
if amount_to_buy > 0.1:
self.buy(symbol, amount_to_buy)
# Exit Signals
# MACD signal crosses over MACD
if macd_arr[-1] * 1.05 < macd_sig_arr[-1]:
if self.get_available_equity()[symbol]:
self.sell(symbol, self.get_available_equity()[symbol])
| true
|
3372917ecee2767485fed41bbf9e6312df417da6
|
Python
|
robobrobotcop/shadow-boys
|
/mtg-card-sale/mtg_card_sale.py
|
UTF-8
| 2,522
| 2.515625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import json
import requests
import os
from datetime import datetime, timedelta
from retry import PostFailure, retry
today = datetime.utcnow()
update = datetime.utcnow() - timedelta(1)
@retry(PostFailure)
def get_api_data(user):
response = requests.post(user['url'], data="""{
me {
name
inventoryCount
inventoryValue
clientFunds
sold (soldSince: \"$update") {
qty
name
price
foil
lang
}
payouts {
sum
}
}
}""".replace("$update", update.strftime("%Y-%m-%d %H:%M:%S")), auth=(user['usr'], user['pwd']))
if response.status_code not in (200, 403):
raise PostFailure('error code from api')
return response
@retry(PostFailure)
def post_to_slack(payload, user):
response = requests.post(user['webhook'], data=json.dumps(payload))
if response.status_code != 200:
raise PostFailure('error code from slack')
with open('{}/mtg_card_sale_config.json'.format(os.path.dirname(os.path.abspath(__file__))), 'r') as fh:
users = json.load(fh)
for user in users:
response = get_api_data(user)
cards = ''
for sold_card in response.json()['me']['sold']:
if sold_card['foil']:
card = str(sold_card['qty']) + ' ' + sold_card['name'] + u', ร ' + str(sold_card['price']) + ' SEK, Language: ' + sold_card['lang'] + ', *Foil*' + '\n'
else:
card = str(sold_card['qty']) + ' ' + sold_card['name'] + u', ร ' + str(sold_card['price']) + ' SEK, Language: ' + sold_card['lang'] + '\n'
cards = ''.join((cards, card))
paid_out = 0
if response.json()['me']['payouts']:
for payout in response.json()['me']['payouts']:
paid_out += payout['sum']
else:
paid_out = 0
payload = {
'text': '*MTG cards for sale, update {}*'.format(today.strftime("%Y-%m-%d")) + '\n'
'Cards in inventory: ' + str(response.json()['me']['inventoryCount']) + '\n'
'Value of inventory: ' + str(response.json()['me']['inventoryValue']) + ' SEK' + '\n'
'Sum at client funds account (to be paid out): ' + str(response.json()['me']['clientFunds']) + ' SEK' + '\n'
'Total sum paid out: ' + str(paid_out) + ' SEK' + '\n'
'*New sold cards:*' + '\n' + cards,
'channel': user['channel']}
if cards != '':
post_to_slack(payload, user)
| true
|
3f3ccc6581092bb7d5597140a1002fc776b32300
|
Python
|
xkdytk/Algorithm
|
/sort/count_sort.py
|
UTF-8
| 430
| 3.640625
| 4
|
[] |
no_license
|
def count_sort(array):
sort_arr = []
count = [0] * (max(array)+1)
index = 0
for i in array:
count[i] += 1
while index < len(count):
if count[index] != 0:
count[index] -= 1
sort_arr.append(index)
else:
index += 1
return sort_arr
print(count_sort([7, 5, 9, 0, 3, 1, 6, 2, 9, 1, 4, 8, 0, 5, 2]))
print(count_sort([3, 2, 5, 4, 2, 1, 5, 2, 2, 1]))
| true
|
0ce217d67e7e19414eef48b1f32bd6c096f5799e
|
Python
|
FilaCo/upg
|
/src/ui/cli/group.py
|
UTF-8
| 338
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
from ui.cli.command import Command
class Group(Command):
def __init__(self):
self.__children = []
def render(self):
map(lambda x: x.render(), self.__children)
def add(self, child: Command):
self.__children.append(child)
@property
def children(self) -> list:
return self.__children
| true
|
82482d8ff1eda48ec89b610926534442d01510db
|
Python
|
shengexing/AlgorithmLearn_Python
|
/AlgorithmDiagram9787115447630/Chapter04/c04_2_quickSort/quickSort.py
|
UTF-8
| 604
| 4.1875
| 4
|
[] |
no_license
|
""" ๅฟซ้ๆๅบ """
import random
# ๅฟซ้ๆๅบ็ๅฝๆฐ
def quickSort(array):
if len(array) < 2:
return array # ๅบ็บฟๆกไปถ๏ผไธบ็ฉบๆๅชๅ
ๅซไธไธชๅ
็ด ็ๆฐ็ปๆฏ โๆๅบโ ็
else:
index = random.randint(0, len(array) - 1)
return quickSort(
[i for i in array[0:index] + array[index+1:] if i < array[index]]
) + [array[index]] + quickSort(
[i for i in array[0:index] + array[index+1:] if i >= array[index]]
)
print(quickSort([]))
print(quickSort([5]))
print(quickSort([5, 3]))
print(quickSort([5, 3, 6, 2, 10]))
| true
|
8ba754f530c78564917b39fcfdc001dcb9565e04
|
Python
|
yanrising/bitez
|
/resources/bch/rates.py
|
UTF-8
| 391
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
from bitcash.network import satoshi_to_currency_cached, currency_to_satoshi_cached
def bch_to_fiat(amount, currency):
amount = amount * (10**8)
conversion = satoshi_to_currency_cached(amount, currency)
return conversion
def fiat_to_bch(amount, currency):
conversion = currency_to_satoshi_cached(amount, currency)
conversion = conversion / (10**8)
return conversion
| true
|
63b4fbe597fa921d86f245d5116655c30c5c1876
|
Python
|
AdityJadhao/pythonProject
|
/dictAndsets.py
|
UTF-8
| 587
| 3.703125
| 4
|
[] |
no_license
|
#dictonary is collection of key value pair
#create dictionary
myDic = {
"Power": "Knowledge",
"Aditya": "Student",
"Marks": [1,2,3,5],
"myDic2": {'Taste': 'Sweet'} #nested dictionary
}
print(myDic['Power'])
print(myDic['myDic2']['Taste'])
print(myDic.keys())
print(myDic.values())
print(myDic.items())
#update dictionary
updateDic = {
"keys" : "values"
}
myDic.update(updateDic)
print(myDic)
print(type(myDic))
print(myDic.get('Power')) # use this, if value is not present , it return NOne
print(myDic['Power']) # if value is not present , it return key error
| true
|
339f92fa1047d62e6e31b14eee897410827930b9
|
Python
|
jhl667/compbio-galaxy-wrappers
|
/nanostring/nanostring_client.py
|
UTF-8
| 2,114
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
## author: Janice Patterson
## targets nCounter MAX/FLEX system
import ftputil
from ftputil import FTPHost
import sys
import os
import json
class nCounter(FTPHost):
'''
inheriting from ftputil.FTPHost
'''
def download_datadir(self, source_dir, dest_dir):
'''
:param source_dir: /technician/RCCData, /technician/RLFData, /technician/CLFData
:param dest_dir: dest directory name the directory RCCData, RLFData, CLFData
:return: transfers
'''
if self.path.exists(source_dir):
recursive = self.walk(source_dir, topdown=True, onerror=None)
for root, dirs, files in recursive:
for file in files:
print(root + "/" + file + " to ", dest_dir)
fpath = self.path.join(root, file)
print(fpath)
if self.path.isfile(fpath):
dest_file = os.path.join(dest_dir, file)
print(dest_file)
# download only if newer double make sure
if not self.path.exists(dest_file):
self.download_if_newer(fpath,dest_file)
else:
print(dest_file + " exists already, skipping.")
else:
print("FTP dir is "+ self.getcwd())
print(self.listdir(self.curdir))
def download_batch(self, batch, dest_dir):
'''
:param batch: batch id only, path to file /technician/RCCData is taken care of
:return: download zip to current directory
'''
zfile = batch + "_RCC.ZIP"
batchpath = self.path.join("/technician/RCCData")
zfiles = self.listdir(batchpath)
if zfile in zfiles:
dest_file = os.path.join(dest_dir, zfile)
print(os.path.join(batchpath, zfile) + " to ", dest_file)
source_file = os.path.join(batchpath, zfile)
self.download(source_file, dest_file)
else:
print(zfile + " DOES NOT EXIST")
print(zfiles)
| true
|
4346ca5735253e5fe63dec5e8aad40613eebcdb4
|
Python
|
XiaoyanYang2008/IRS-MRS-F2M2HRSystem
|
/webapp/search.py
|
UTF-8
| 7,057
| 2.671875
| 3
|
[] |
no_license
|
import re
import string
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.neighbors import NearestNeighbors
import app_constants
import lk_parser
import normalizeText
class ResultElement:
def __init__(self, rank, name, filename, score, type):
self.rank = rank
self.name = name
self.filename = filename
self.score = score
self.type = type
def getfilepath(loc):
temp = str(loc)
temp = temp.replace('\\', '/')
return temp
def clearup(s, chars):
return re.sub('[%s]' % chars, '', s).lower()
def normalize(words):
words = words.lower()
words = normalizeText.replace_numbers(words) # replace number to words
words = words.translate(str.maketrans({key: None for key in string.punctuation})) # remove punctuation
words = words.strip() # remove white space
words = normalizeText.remove_stopwords(words)
words = normalizeText.remove_non_ascii(words)
words = normalizeText.lemmatize_verbs(words)
words = ' '.join(words)
return words
hasA = 'noA'
hasB = 'noB'
def gethasA():
return hasA
def gethasB():
return hasB
def ui_search(important_search):
global hasA
df1 = search_by_tfidf(important_search)
# print(df1)
flask_return = []
rank = 0
for idx, row in df1.head(20).iterrows():
name = row['name']
filename = row['profileURL']
score = row['NScore']
rank = rank + 1
flask_return.append(ResultElement(rank, name, filename, score, 'typeA'))
hasA = 'hasA'
# for idx, row in df1.head(20).iterrows():
# name = row['name']
# filename = row['profileURL']
# score = row['NScore']
# if (score <= 1 and score > 0.75):
# type = 'typeA'
# hasA = 'hasA'
# elif (score <0.75):
# type = 'typeB'
# hasB = 'hasB'
# else:
# type = 'noType'
# rank = rank + 1
# res = ResultElement(rank, name, filename, score, type)
# flask_return.append(res)
return flask_return
def search_by_tfidf(search_keywords):
search_keywords = normalize(search_keywords)
resume_df = pd.read_csv("./db/resume_db.csv")
resume_df.drop_duplicates(subset="profileURL",
keep='last', inplace=True)
resumes = resume_df['rawResume']
resume_sm, tfidf_vectorizer = build_tfidf_vectorizer(resumes)
search_sm = tfidf_vectorizer.transform([search_keywords])
vals = cosine_similarity(search_sm, resume_sm)
df = resume_df
df['Score'] = vals[0]
df = df[df['Score'] != 0]
if(len(df)==0):
return df
if max(df['Score'] != 0):
df['NScore'] = df['Score'] / max(df['Score']) # rescale for optaPlanner planning
else:
df['NScore'] = df['Score']
# idx = vals.argsort()[0][-1]
#
# print(type(vals))
# print(resumeDF.iloc[[idx]])
df = df.sort_values(by=["Score"], ascending=False)
db = lk_parser.loadData(app_constants.RESUMEDB_FILE_PB)
df['expectedMonthlySalary'] = df['profileURL'].apply(lambda x: getExpectMonthlySalary(db, x))
df1 = df[['name', 'profileURL', 'Score', 'NScore', 'expectedMonthlySalary']]
# df1 = df
return df1
def getExpectMonthlySalary(db, x):
resume = lk_parser.findResumeByURL(db, x)
if resume is not None:
return resume.monthlySalary
else:
return 0
def build_tfidf_vectorizer(resumes):
resumes = resumes.apply(normalize)
# TODO: search keywords can be comma seperated, input this method to see what result matched what keywords.
# May help to explain results matched with which keywords, as long as none zero.
# Example, zaki matched java, but other people doesn't matched java. so, java keywords under zaki has a score
tfidf_vectorizer = TfidfVectorizer(stop_words='english', ngram_range=(1, 2))
tfidf_vectorizer.fit(resumes.tolist())
resume_sm = tfidf_vectorizer.transform(resumes.tolist())
return resume_sm, tfidf_vectorizer
def res(importantkey, optionalkey):
normalizeText.denoise_text(importantkey)
normalizeText.denoise_text(optionalkey)
importantkey = normalize(importantkey)
optionalkey = normalize(optionalkey)
try:
impt = str(importantkey)
textimp = [impt]
except:
textimp = 'None'
vectorizer = TfidfVectorizer(stop_words='english')
vectorizer.fit(textimp)
vector = vectorizer.transform(textimp)
Job_Desc_Imp = vector.toarray()
if len(optionalkey) != 0:
try:
optt = str(optionalkey)
textopt = [optt]
vectorizerOpt = TfidfVectorizer(stop_words='english')
vectorizerOpt.fit(textopt)
vectorOpt = vectorizer.transform(textopt)
Job_Desc_Opt = vectorOpt.toarray()
except:
textopt = 'None'
df = pd.read_csv("./db/resume_db.csv")
df.drop_duplicates(subset="profileURL",
keep='last', inplace=True)
resume = df['rawResume']
resume_vect = []
resume_vect_Raw = []
score_A = []
score_B = []
for row in resume:
t_raw = str(row)
try:
t_resume = normalize(t_raw)
# t_resume = ' '.join(text) # done in normalize()
t_resume = t_resume.translate(str.maketrans('', '', string.punctuation))
text = [t_resume]
vector_raw = vectorizer.transform(text)
resume_vect_Raw.append(vector_raw.toarray())
vector = vectorizer.transform(text)
resume_vect.append(vector.toarray())
except Exception as e:
print(e)
pass
for i in resume_vect:
samples = i
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(samples)
NearestNeighbors(algorithm='auto', leaf_size=30)
scorea = neigh.kneighbors(Job_Desc_Imp)[0][0].tolist()
score_A.append(scorea[0])
if len(optionalkey) != 0:
scoreb = neigh.kneighbors(Job_Desc_Opt)[0][0].tolist()
score_B.append(scoreb[0])
df['Score_A'] = score_A
if len(optionalkey) != 0:
df['Score_B'] = score_B
df['Score'] = df['Score_A'] * 0.7 + df['Score_B'] * 0.3
else:
df['Score'] = df['Score_A']
df = df.sort_values(by=["Score"])
df1 = df[['name', 'profileURL', 'Score']]
print(df1)
flask_return = []
rank = 0
global hasA
global hasB
for idx, row in df1.head(20).iterrows():
name = row['name']
filename = row['profileURL']
score = row['Score']
if score < 1:
type = 'typeA'
hasA = 'hasA'
elif (score >= 1 and score < 2):
type = 'typeB'
hasB = 'hasB'
else:
type = 'noType'
rank = rank + 1
res = ResultElement(rank, name, filename, score, type)
flask_return.append(res)
return flask_return
| true
|
a181148cb257c606cb3ef073ff4b6f515098cf0a
|
Python
|
alehander92/bach
|
/bach/opcodes.py
|
UTF-8
| 542
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
class Opcodes(object):
'''
a collection of cpython opcodes
for cleaner opcode comprehensions in generator
'''
def __init__(self, *values):
self.values = []
for value in values:
if isinstance(value, list):
data = Opcodes(*value)
self.values += data.to_list()
elif isinstance(value, Opcodes):
self.values += value.to_list()
else:
self.values.append(value)
def to_list(self):
return self.values
| true
|
aa7f217eff47d8554d688d062e303aa6cd4c3ff1
|
Python
|
sanjiv576/Game
|
/rootPackage/agreement_file.py
|
UTF-8
| 3,916
| 2.84375
| 3
|
[] |
no_license
|
from tkinter import *
from tkinter.font import *
from rootPackage.create_accounts import link
def terms_and_conditions():
agreement = Toplevel()
agreement.title("Terms and Conditions")
agreement.geometry("800x700")
agreement.iconbitmap("agree_0.ico")
agreement.configure(bg="silver")
# using different fonts for different headings
myFont = Font(size=20, weight="bold")
# function created to hide this module and reveal main login window
def close():
agreement.withdraw()
link()
#create_my_account()
# function for agree
def agree():
response = val.get()
if response == "On":
continue_button = Button(agreement, text="Continue", highlightbackground="yellow",
command=close, font=("Gothic", 20), padx=9, pady=9)
continue_button.pack()
# files handling and labelling
terms_and_condition_label = Label(agreement, text="Terms and Conditions", bg="black", fg="white")
terms_and_condition_label['font'] = myFont
terms_and_condition_label.pack()
agreement_t = """
Please read these Terms and Conditions carefully,
before playing โSpace Invaders โ operated by Guardians of the Galaxy.
Your access to and use of our game is conditioned on your acceptance of and compliance with these Terms.
These Terms apply to all visitors, users and others who access or use the game.
You will need to enter your name and password to have access to our game.
We can guarantee you that your details will remain confidential and we whatsoever have no right to share it.
"""
agreement_label = Label(agreement, text=agreement_t, bg="silver")
agreement_label.pack()
sound_t ="""This game may have some sound effects or music which may be disturbing to you so we recommend to mute the game
if you find it difficult to listen.
"""
sound_label = Label(agreement, text="Sound and Music", bg="silver")
sound_label['font'] = myFont
sound_label.pack()
sound = Label(agreement, text=sound_t, bg="silver")
sound.pack()
terms = """
Please read these Terms and Conditions carefully,
before playing โSpace Invaders โ operated by Guardians of the Galaxy.
Your access to and use of our game is conditioned on your acceptance of and compliance with these Terms.
These Terms apply to all visitors, users and others who access or use the game.
You will need to enter your name and password to have access to our game.
We can guarantee you that your details will remain confidential and we whatsoever have no right to share it.
"""
permission_label = Label(agreement, text="Permission", bg="silver")
permission_label['font'] = myFont
permission_label.pack()
permission_t = Label(agreement, text=terms, bg="silver")
permission_t.pack()
changes = """
We reserve the right, at our sole discretion, to modify or replace these Terms at any time.
If any change were to come to our terms that directly or indirectly hampers your data and information,
we will try to provide at least 15 daysโ notice prior to any new terms taking effect.
By accessing or using the game, you agree to be bound by these Terms. If you disagree with any part of the terms,
then you may not access the Game."""
changes_label = Label(agreement, text="Changes and Update", bg="silver")
changes_label['font'] = myFont
changes_label.pack()
changes_t = Label(agreement, text=changes, bg="silver")
changes_t.pack()
# inserting check button
val = StringVar()
check_button = Checkbutton(agreement, text="Yes, I agree.", bg="teal", font=("Century, Gothic", 22),
fg="white", variable=val, onvalue="On", offvalue="Off", command=agree)
check_button.deselect()
check_button.pack()
agreement.mainloop()
| true
|
7470b8822c206a916e67f88d448b1d90ef026a2c
|
Python
|
igortereshchenko/datascience
|
/holenyshchenkosd/integration/validator.py
|
UTF-8
| 1,474
| 3.265625
| 3
|
[] |
no_license
|
def city_validator(text):
match = re.match('[A-Z\s]+', text)
if match:
return True
else:
print(f'\'{text}\' is not valid, must be capital word(-s)!')
return False
def zip_code_validator(text):
match = re.match('^\d{5}$', text)
if match:
return True
else:
print(f'\'{text}\' is not valid, must be 5 digits!')
return False
def total_episodes_non_lupa_validator(text):
match = re.match('\d+', text)
if match:
return True
else:
print(f'\'{text}\' is not valid, must be integer!')
return False
def state_validator(txt):
pattern = re.compile('([A-Z])([A-Z])')
if re.match(pattern, txt):
return True
else:
print('It is not a state')
return False
def percent_of_beneficiaries_with_cancer_validator(txt):
pattern = re.compile(r'100|\d?\d+')
if re.match(r'100|\d?\d', txt) and float(txt) <= 100:
return True
else:
print('It can\'t be a percent_of_beneficiaries_with_cancer')
return False
def percent_of_beneficiaries_with_depression_validator(txt):
if re.match(r'100|\d?\d', txt) and float(txt) <= 100:
return True
else:
print('It can\'t be a percent_of_depression_with_depression')
return False
def provider_id_validator (txt):
if re.match(r'\d\d\d\d\d\d', txt):
return True
else:
print('It can\'t be a provider_id')
return False
| true
|
a9805fb45001b5176649e445aff2fb9d02dba397
|
Python
|
Liu0330/spider
|
/xiaospider/01_tieba_spider.py
|
UTF-8
| 3,698
| 2.71875
| 3
|
[] |
no_license
|
# coding=utf-8
import requests
from lxml import etree
import json
class TiebaSpider:
def __init__(self,tieba_name):
self.tieba_name = tieba_name
self.start_url = "http://tieba.baidu.com/mo/q----,sz@320_240-1-3---2/m?kw="+tieba_name+"&pn=0"
self.part_url = "http://tieba.baidu.com/mo/q----,sz@320_240-1-3---2/"
self.headers= {"User-Agent":"Mozilla/5.0 (Linux; Android 5.1.1; Nexus 6 Build/LYZ28E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Mobile Safari/537.36"}
def parse_url(self,url):#ๅ้่ฏทๆฑ๏ผ่ทๅๅๅบ
print(url)
response = requests.get(url,headers=self.headers)
return response.content
def get_content_list(self,html_str):#3ๆๅๆฐๆฎ
html = etree.HTML(html_str)
div_list = html.xpath("//div[contains(@class,'i')]") #ๆ นๆฎdivๅ็ป
content_list = []
for div in div_list:
item = {}
item["title"] = div.xpath("./a/text()")[0] if len(div.xpath("./a/text()"))>0 else None
item["href"] = self.part_url+div.xpath("./a/@href")[0] if len(div.xpath("./a/@href"))>0 else None
item["img_list"] = self.get_img_list(item["href"],[])
# itemp["img_list"] = [requests.utils.unquote(i).split("src=")[-1] for i in item["img_list"]]
content_list.append(item)
#ๆๅไธไธ้กต็urlๅฐๅ
next_url = self.part_url+html.xpath("//a[text()='ไธไธ้กต']/@href")[0] if len(html.xpath("//a[text()='ไธไธ้กต']/@href"))>0 else None
return content_list,next_url
def get_img_list(self,detail_url,total_img_list): #่ทๅๅธๅญไธญ็ๆๆ็ๅพ็
#3.2่ฏทๆฑๅ่กจ้กต็urlๅฐๅ๏ผ่ทๅ่ฏฆๆ
้กต็็ฌฌไธ้กต
detail_html_str = self.parse_url(detail_url)
detail_html = etree.HTML(detail_html_str)
#3.3ๆๅ่ฏฆๆ
้กต็ฌฌไธ้กต็ๅพ็๏ผๆๅไธไธ้กต็ๅฐๅ
img_list = detail_html.xpath("//img[@class='BDE_Image']/@src")
total_img_list.extend(img_list)
#3.4่ฏทๆฑ่ฏฆๆ
้กตไธไธ้กต็ๅฐๅ๏ผ่ฟๅ
ฅๅพช็ฏ3.2-3.4
detail_next_url = detail_html.xpath("//a[text()='ไธไธ้กต']/@href")
if len(detail_next_url)>0:
detail_next_url = self.part_url + detail_next_url[0]
return self.get_img_list(detail_next_url,total_img_list)
# else:
# return total_img_list
return total_img_list
def save_content_list(self,content_list): #4ไฟๅญๆฐๆฎ
file_path = self.tieba_name+".txt"
with open(file_path,"a",encoding="utf-8") as f:
for content in content_list:
f.write(json.dumps(content,ensure_ascii=False,indent=2))
f.write("\n")
print("ไฟๅญๆๅ")
def run(self):#ๅฎ็ฐไธป่ฆ้ป่พ
next_url = self.start_url
while next_url is not None:
#1.start_url
#2.ๅ้่ฏทๆฑ๏ผ่ทๅๅๅบ
html_str = self.parse_url(next_url)
#3.ๆๅๆฐๆฎ๏ผๆๅไธไธ้กต็urlๅฐๅ
#3.1ๆๅๅ่กจ้กต็urlๅฐๅๅๆ ้ข
#3.2่ฏทๆฑๅ่กจ้กต็urlๅฐๅ๏ผ่ทๅ่ฏฆๆ
้กต็็ฌฌไธ้กต
#3.3ๆๅ่ฏฆๆ
้กต็ฌฌไธ้กต็ๅพ็๏ผๆๅไธไธ้กต็ๅฐๅ
#3.4่ฏทๆฑ่ฏฆๆ
้กตไธไธ้กต็ๅฐๅ๏ผ่ฟๅ
ฅๅพช็ฏ3.2-3.4
content_list,next_url = self.get_content_list(html_str)
#4.ไฟๅญๆฐๆฎ
self.save_content_list(content_list)
#5.่ฏทๆฑไธไธ้กต็urlๅฐๅ๏ผ่ฟๅ
ฅๅพช็ฏ2-5ยท
if __name__ == '__main__':
tieba_spider = TiebaSpider("ๅทๆธ
ๆธ
")
tieba_spider.run()
| true
|
734fae2c5c11d12088606601943d97b6a1132f9e
|
Python
|
dmoranj/cvdatasetutils
|
/cvdatasetutils/oranalysis.py
|
UTF-8
| 2,844
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
from cvdatasetutils.pascalvoc import load_VOC
from cvdatasetutils.visualgenome import load_visual_genome, extract_object_dataframe
from mltrainingtools.cmdlogging import section_logger
import pandas as pd
import os
from cvdatasetutils.coco import COCOSet
def extract_voc_object_data(img_url):
def extractor(obj):
return [
'',
'',
obj['class'],
img_url,
obj['bx'],
obj['by'],
obj['h'],
obj['w']
]
return extractor
def extract_voc_object_dataframe(voc, limit=10, report=2e5):
section = section_logger(1)
data = []
counter = 0
for img in voc[0]:
if counter > limit:
break
else:
counter += 1
if counter % report == 0:
section("Loaded objects in {} images".format(counter))
rows = map(extract_voc_object_data(img['filename']), img['objects'])
data.extend(rows)
odf = pd.DataFrame(data, columns=['object_id', 'synsets', 'names', 'img', 'x', 'y', 'h', 'w'])
return odf
def extract_coco_object_dataframe(coco_definitions):
annotations = coco_definitions.get_annotations()
images = coco_definitions.get_images()
joined_df = annotations.join(images.set_index('image_id'), on='image_id')
joined_df['x'] = joined_df.x / joined_df.width
joined_df['y'] = joined_df.y / joined_df.height
joined_df['w'] = joined_df.w / joined_df.width
joined_df['h'] = joined_df.h / joined_df.height
return joined_df[['x', 'y', 'w', 'h', 'name', 'image_id']]
def generate_analysis(coco_path, voc_path, vg_path, output_path):
log = section_logger()
log('Loading definitions for COCO')
coco_definitions = COCOSet(coco_path)
log('Converting COCO data to dataframes')
coco_df = extract_coco_object_dataframe(coco_definitions)
log('Loading definitions for VOC')
voc_definitions = load_VOC(voc_path)
log('Converting VOC data to dataframes')
voc_df = extract_voc_object_dataframe(voc_definitions, limit=1e8)
log('Loading definitions for Visual Genome')
vg_definitions = load_visual_genome(vg_path)
log('Converting VG data to dataframes')
vg_df = extract_object_dataframe(vg_definitions, limit=1e8)
log('Saving dataframes')
voc_df.to_csv(os.path.join(output_path, 'voc_df.csv'))
vg_df.to_csv(os.path.join(output_path, 'vg_df.csv'))
coco_df.to_csv(os.path.join(output_path, 'coco_df.csv'))
generate_analysis('/home/dani/Documentos/Proyectos/Doctorado/Datasets/COCO',
'/home/dani/Documentos/Proyectos/Doctorado/Datasets/VOC2012/VOCdevkit/VOC2012',
'/home/dani/Documentos/Proyectos/Doctorado/Datasets/VisualGenome',
'/home/dani/Documentos/Proyectos/Doctorado/cvdatasetutils/analytics')
| true
|
3b4eff6f8441c56821db6c042ae4bfacf9a182d0
|
Python
|
JobJob/DeepLearningTutorials
|
/code/starty.py
|
UTF-8
| 4,440
| 2.625
| 3
|
[] |
no_license
|
import cPickle, gzip
import numpy as np
import scipy as sp
import theano
import theano.tensor as T
from PIL import Image
# Load the dataset
f = gzip.open('../data/mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
len(train_set[1])
len(valid_set[1])
len(test_set[1])
def shared_dataset(data_xy):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x, dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y, dtype=theano.config.floatX))
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets us get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
batch_size = 500 # size of the minibatch
# accessing the third minibatch of the training set
data = train_set_x[2 * batch_size: 3 * batch_size]
label = train_set_y[2 * batch_size: 3 * batch_size]
imgname = "bananas"
imd = test_set[0][1].reshape(28,28)
sp.misc.imsave(imgname+".png", imd)
filter_names = ["sobel","prewitt","laplace"]
filters = {filter_name: getattr(sp.ndimage.filters, filter_name) for filter_name in filter_names}
imagefns = []
for fltrname,fltr in filters.iteritems():
imgnamefl = imgname+"_"+fltrname+".png"
sp.misc.imsave(imgnamefl, fltr(imd))
imagefns.append(imgnamefl)
import os, sys
# os.system("open {0}".format(" ".join(imagefns)))
from skimage import data, io, filter
image = data.coins() # or any NumPy array!
edges = filter.sobel(imd)
#io.imshow(edges)
# io.show()
from skimage.feature import corner_harris, corner_subpix, corner_peaks
from matplotlib import pyplot as plt
# coords = corner_peaks(corner_harris(imd), min_distance=5)
# coords
# coords_subpix = corner_subpix(imd, coords, window_size=13)
# fig, ax = plt.subplots()
# ax.imshow(imd, interpolation='nearest', cmap=plt.cm.gray)
# ax.plot(coords[:, 1], coords[:, 0], '.b', markersize=3)
# ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15)
# ax.axis((0, 28, 28, 0))
# plt.show()
plt.figure()
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_harris,
corner_peaks, ORB, plot_matches)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img1 = rgb2gray(data.lena())
img1.shape
imd.shape
img1.dtype
imd.dtype
imd = np.asfarray(imd)
img1
imd
descriptor_extractor = ORB()
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
keypoints1
descriptors1
plt.imshow(imd, cmap=plt.cm.gray)
plt.show()
plt.imshow(img1, cmap=plt.cm.gray)
# print "keypoints1",keypoints1
# print "descriptors1",descriptors1
exit()
from skimage import measure
contour_counts = {0:2, 1:1, 2:2, 3:1, 4:1, 5:1, 6:2, 7:1, 8:3, 9:2 }
wrongs = []
THRESH = float(sys.argv[1])
for i in range(100):
imd = test_set[0][i]
digit = test_set[1][i]
non_zeros = imd > THRESH
imd[non_zeros] = 1.0
imd = imd.reshape(28,28)
# Find contours at a constant value of 0.8
contours = measure.find_contours(imd, 0.2)
num_contours = len(contours)
if num_contours > contour_counts[digit]:
fig, ax = plt.subplots()
ax.imshow(imd, interpolation='nearest', cmap=plt.cm.gray)
print i,":",digit,"-",num_contours
wrongs.append(i)
for n, contour in enumerate(contours):
# plot all contours found
ax.plot(contour[:, 1], contour[:, 0], linewidth=2)
#Display the image with the contours
ax.axis('image')
ax.set_xticks([])
ax.set_yticks([])
plt.show()
print(wrongs)
| true
|
748ca91d9a7955b2727284ccba7cd896f99153a8
|
Python
|
trevino-676/users-service
|
/app/api/routes.py
|
UTF-8
| 4,151
| 2.578125
| 3
|
[] |
no_license
|
from flask import Blueprint, request, jsonify, make_response
# from werkzeug import check_password_hash, generate_password_hash
from app import app
from app import db
from app import jwt_required
from app.models.company import Company
from app.models.users import User
from app.api.controllers.user_controller import UserController
from app.api.controllers.company_controller import CompanyController
user_controller = UserController()
company_controller = CompanyController()
mod_user = Blueprint('user', __name__, url_prefix='/v1/user')
company_routes = Blueprint('company', __name__, url_prefix='/v1/company')
@mod_user.route('/')
@jwt_required()
def root():
users = [user.to_dict() for user in user_controller.get_users(0, 0)]
if len(users) == 0:
return make_response(jsonify(
{
"data": [],
"message": "No users found"
}), 401)
return make_response(jsonify(
{
"data": users,
"message": "ok"
}
), 200)
@mod_user.route("/", methods=["POST"])
@jwt_required()
# TODO mandar bad requests
def add_user():
input_data = request.json
username = input_data["username"]
mail = input_data["email"]
first_name = input_data["first_name"]
last_name = input_data["last_name"]
password = input_data["password"]
company = input_data["company"]
is_inserted = user_controller.new_user(username, mail, first_name, last_name, password, company)
if not is_inserted:
return make_response(jsonify(
{"error": "error al insertar el usuarion", "status": "error"}), 401)
return make_response(jsonify({"status": True, "error": ""}), 200)
@mod_user.route("/update", methods=["POST"])
@jwt_required()
def update_user():
input_data = request.json
id = input_data["id"]
username = input_data["username"]
mail = input_data["email"]
first_name = input_data["first_name"]
last_name = input_data["last_name"]
password = input_data["password"]
company = input_data["company"]
is_updated = user_controller.update_user(id, username, mail, first_name,
last_name, password, company)
if not is_updated:
make_response(
jsonify({
"message": "Error al modificar el usuario",
"status": "error"
}), 401
)
return make_response(jsonify({"message": "True", "status": "ok"}), 200)
@mod_user.route("/delete", methods=["POST"])
@jwt_required()
def delete_user():
deleted_id = request.json["id"]
if not user_controller.delete_user(deleted_id):
return jsonify("{'error': 'Error al eliminar el usuario'}")
return jsonify("{'is_deleted': True, 'error': '' }")
@company_routes.route("/<id>")
@jwt_required()
def get_companies_by_id(id):
filters = {"id": id, "actives": True}
companies = company_controller.get_companies(filters)
return jsonify(companies)
@company_routes.route("/")
@jwt_required()
def get_companies():
filters = {"actives": True}
companies = company_controller.get_companies(filters)
return jsonify(companies)
@company_routes.route("/", methods=["POST"])
@jwt_required()
def add_company():
data = request.json
if not company_controller.add_company(data):
return make_response(jsonify("{'error': 'Error al insertar la compaรฑia'"), 500)
return make_response(jsonify("{'is_inserted': True, 'error': ''"), 200)
@company_routes.route("/update", methods=["POST"])
@jwt_required()
def update_company():
data = request.json
if not company_controller.update_company(data):
return make_response(jsonify("{'error': 'Error al actualizar la compaรฑia'"), 500)
return make_response(jsonify("{'is_updated': True, 'error': ''"), 200)
@company_routes.route("/delete", methods=["POST"])
@jwt_required()
def delete_company():
deleted_id = request.json["id"]
if not company_controller.delete_company(deleted_id):
return make_response(jsonify("{'error': 'Error al eliminar la compaรฑia'"), 500)
return make_response(jsonify("{'is_updated': True, 'error': ''"), 200)
| true
|
a3d56a666364fc53c25371d47ac769e4c03717a1
|
Python
|
tedrepo/nlg-mcts
|
/lm_mcts_sequence_demo.py
|
UTF-8
| 744
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
from nlgmcts import *
if __name__ == '__main__':
print("creating language model...")
lm = ShakespeareCharLanguageModel(n=5)
num_simulations = 1000
width = 6
text_length = 50
start_state = ["<L>"]
eval_function = lambda text: 100 - lm.perplexity(text)
# mcts = LanguageModelMCTS(lm, width, text_length, eval_function, c=25)
mcts = LanguageModelMCTSWithPUCT(lm, width, text_length, eval_function, cpuct=25)
state = start_state
print("beginning search...")
mcts.search(state, num_simulations)
best = mcts.get_best_sequence()
generated_text = ''.join(best[0])
print("generated text: %s (score: %s, perplexity: %s)" % (generated_text, str(best[1]), lm.perplexity(generated_text)))
| true
|
ba3e9386d79256de80a1053d8d89f34e3da2abf3
|
Python
|
verasazonova/textsim
|
/corpus/reuters.py
|
UTF-8
| 3,321
| 2.828125
| 3
|
[] |
no_license
|
__author__ = 'verasazonova'
from nltk.corpus import reuters
import argparse
import numpy as np
from corpus.medical import word_valid
class ReutersDataset():
def __init__(self, categories=None, lower=True):
if categories == None or len(categories) == 1:
self.fileids = reuters.fileids()
else:
self.fileids = reuters.fileids(categories)
self.categories = categories
self.lower = lower
def get_subset(self, fileid):
if self.lower:
return [ word.lower() for word in reuters.words(fileid) if word_valid(word) ]
else:
return [ word for word in reuters.words(fileid) if word_valid(word) ]
def __iter__(self):
for fileid in self.fileids:
yield self.get_subset(fileid)
def get_train(self):
x = [ self.get_subset(fileid) for fileid in self.fileids if fileid.startswith("train")]
y = [ 1 if self.categories[0] in reuters.categories(fileid) else 0
for fileid in self.fileids if fileid.startswith("train")]
return x, y
def get_test(self):
x = [ self.get_subset(fileid) for fileid in self.fileids if fileid.startswith("test")]
y = [ 1 if self.categories[0] in reuters.categories(fileid) else 0
for fileid in self.fileids if fileid.startswith("test")]
return x, y
def get_target(self):
# cat1 vs. cat2
if len(self.categories) > 1:
target = [ [cat for cat in reuters.categories(fileid) if cat in self.categories][0]
for fileid in self.fileids]
# cat1 vs. not cat1
else:
target = [ 1 if self.categories[0] in reuters.categories(fileid) else 0
for fileid in self.fileids]
self.classes, target = np.unique(target, return_inverse=True)
return target
def explore_categories(max_len=5000, min_len=100, percentage=0.3):
for cat in reuters.categories():
for cat2 in reuters.categories():
if cat2 > cat:
if len(set(reuters.fileids(cat)) & set(reuters.fileids(cat2))) == 0:
l1 = len(reuters.fileids(cat))
l2 = len(reuters.fileids(cat2))
if ( (l1 + l2) > min_len) and ( (l1 + l2) < max_len) and float((min(l1, l2))/float(l1+l2) > percentage):
print cat, cat2, l1 + l2, float(min(l1, l2))/float(l1+l2)
def __main__():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-c', action='store', nargs='+', dest='categories', help='Data filename')
parser.add_argument('-min', action='store', dest='min', help='Data filename')
parser.add_argument('-max', action='store', dest='max', help='Data filename')
parser.add_argument('-p', action='store', dest='percentage', help='Data filename')
arguments = parser.parse_args()
rd = ReutersDataset(arguments.categories)
x, y = rd.get_test()
print len(x)
print len(y)
x, y = rd.get_train()
print len(x)
print len(y)
print y
#print rd.get_train()
#print arguments.percentage
#explore_categories(max_len=int(arguments.max), min_len=int(arguments.min), percentage=float(arguments.percentage))
if __name__ == "__main__":
__main__()
| true
|
6954ce78332b3688910b1f6fa0228c049afc1bd9
|
Python
|
drummonds/fab_support
|
/tests/test_utils.py
|
UTF-8
| 285
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
from shutil import rmtree
def remove_tree(path):
assert path not in ("c:\\", "c:", "\\", "/") # Add safety check
if isinstance(path, tuple) or isinstance(path, list):
for this in path:
remove_tree(this)
else:
rmtree(path, ignore_errors=True)
| true
|
5339debd744e07523929e5a7f9a1ef75f2971035
|
Python
|
sdbaronc/taller_de_algoritmos
|
/algoritmo_23.py
|
UTF-8
| 159
| 3.453125
| 3
|
[] |
no_license
|
seg=int(input("Digita la cantidad de segundos: "))
min=seg/60
seg_2=int(seg%60)
horas=int(min/60)
min_2=int(min%60)
print("Tiempo ",horas,":",min_2, ":",seg_2)
| true
|
93f324b6d60900aab1dc3dce5c4bbc0a7139fcf9
|
Python
|
aidanr002/Emergency-Watch
|
/EmergencyWatch/python serverside/wafire.py
|
UTF-8
| 6,479
| 2.65625
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup
import requests
import json
import time
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from datetime import datetime
from dateutil import tz
from scraper_cleanup import character_ord_check
from scraper_cleanup import tag_removal_for_linebreak
from scraper_cleanup import special_tag_removal
def get_wa_fire_events(data):
session = requests.Session()
retry = Retry(connect = 3, backoff_factor = 0.5)
adapter = HTTPAdapter(max_retries = retry)
session.mount('https://', adapter)
session.mount('http://', adapter)
#fire
INFORMATION_FIRE_ICON = "http://images001.cyclonewebservices.com/wp-content/uploads/2019/03/information.png"
ADVICE_FIRE_ICON = "http://images001.cyclonewebservices.com/wp-content/uploads/2019/03/yellowfire.png"
WATCHACT_FIRE_ICON = "http://images001.cyclonewebservices.com/wp-content/uploads/2019/03/orangefire.png"
EMERGENCY_FIRE_ICON = "http://images001.cyclonewebservices.com/wp-content/uploads/2019/03/redfire.png"
APP_ICON = 'http://images001.cyclonewebservices.com/wp-content/uploads/2019/03/triangle.png'
#Start of loop for WA Fire
#Loads link as focus source
source = session.get('http://www.emergency.wa.gov.au/data/message_DFESCap.xml', verify = False).text
#Creates object with this source
soup = BeautifulSoup(source, 'lxml')
# Goes through the soup object and for each class it iterates through some extractions
for entry in soup.find_all('cap:alert'):
#event_type = 'Fire'
#Gets the level
if 'fire' in entry.find("cap:category").text.lower():
event_level = entry.find("cap:severity").text
#For the emergency type fire, add one of the fire icons
#Depending on level, sets icon
if "unknown" in event_level.lower() or "minor" in event_level.lower():
event_icon = INFORMATION_FIRE_ICON
elif "moderate" in event_level.lower():
event_icon = ADVICE_FIRE_ICON
elif "severe" in event_level.lower():
event_icon = WATCHACT_FIRE_ICON
elif "extreme" in event_level.lower():
event_icon = EMERGENCY_FIRE_ICON
else:
event_icon = INFORMATION_FIRE_ICON
#Gets headline
event_title = entry.find('cap:headline').text
event_title = character_ord_check(event_title)
#Gets updated time
event_time = entry.find("cap:sent").text
event_time = event_time.replace('+08:00', '')
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('Australia/Perth')
utc = datetime.strptime(event_time, '%Y-%m-%dT%H:%M:%S')
utc = utc.replace(tzinfo=from_zone)
event_time = utc.astimezone(to_zone)
event_time_converted = event_time.isoformat()
#Seperates into usable parts
year = "%d" % event_time.year
month = "%d" % event_time.month
day = "%d" % event_time.day
hour = "%d" % event_time.hour
minute = "%d" % event_time.minute
if int(minute) < 10:
minute = "0" + minute
#Concatanate the parts into the ideal string
if int(hour) > 12:
hour = int(hour) % 12
event_time = str(hour) + ':' + minute + 'pm ' + day + '/' + month + '/' + year
elif int(hour) == 12:
event_time = str(hour) + ':' + minute + 'pm ' + day + '/' + month + '/' + year
elif int(hour) < 12:
event_time = str(hour) + ':' + minute + 'am ' + day + '/' + month + '/' + year
#Makes the description
event_headline = None
event_category = None
event_response = None
event_urgency = None
event_severity = None
event_certainty = None
event_areaDesc = None
event_description = None
event_instruction = None
try:
event_headline = entry.find("cap:headline").text
event_category = entry.find("cap:category").text
event_response = entry.find("cap:responseType").text
event_urgency = entry.find("cap:urgency").text
event_severity = entry.find("cap:severity").text
event_certainty = entry.find("cap:certainty").text
event_areaDesc = entry.find("cap:areaDesc").text
event_description = entry.find("description").text
event_instruction = entry.find("instruction").text
except Exception:
pass
event_content = ''
if event_category != None and event_headline != None:
event_content += event_category + ": " + event_headline + "\n"
if event_areaDesc != None:
event_content += 'Location: ' + event_areaDesc + '\n'
if event_severity != None:
event_content += "Severity: " + event_severity + '\n'
if event_urgency != None:
event_content += "Urgency: " + event_urgency + '\n'
if event_certainty != None:
event_content += "Certainty: " + event_certainty + '\n'
if event_response != None:
event_content += "Response: " + event_response + '\n'
if event_description != None:
event_content += "Description: " + event_description + '\n'
if event_instruction != None:
event_content += "Instructions: " + event_instruction + '\n'
event_content = character_ord_check(event_content)
event_content = tag_removal_for_linebreak(event_content)
event_content = special_tag_removal(event_content)
# Gets the set of coord's and sets them to sperate variables
event_lat_long, throw_away = entry.find("cap:circle").text.split(" ")
event_lat, event_lng = event_lat_long.split(",")
data['events'].append({
'event_heading': event_title,
'location': 'Unknown',
'time': event_time,
'description': event_content,
'event_icon': event_icon,
'event_lat': event_lat,
'event_lng': event_lng
})
return (data)
| true
|
e13f702b13f6ad9c9abf62de9bbce1a0e891a55d
|
Python
|
tianxing1994/MachineLearning
|
/Kaggle/titanic/ๆนๆณไบ/ๆต่ฏๆฐๆฎๅพๅ 95%.py
|
UTF-8
| 5,649
| 3.59375
| 4
|
[] |
no_license
|
"""่ฎญ็ปๆฐๆฎไธญ, ๆ นๆฎๆ Age ๅผ็ๆ ทๆฌ, ็บฟๆงๅๅฝ้ขๆตๅบๆ Age ๅผ็ๆ ทๆฌไน Age ๅผ. """
import re
from sklearn.linear_model import LinearRegression, LogisticRegression
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVR
Titanic = pd.read_csv(r"C:\Users\tianx\PycharmProjects\analysistest\dataset\titanic\train.csv")
gender_submission = pd.read_csv(r"C:\Users\tianx\PycharmProjects\analysistest\dataset\titanic\gender_submission.csv")
Titanic_test = pd.read_csv(r"C:\Users\tianx\PycharmProjects\analysistest\dataset\titanic\test.csv")
# print(Titanic.isnull().any())
# print(Titanic.dtypes)
# Age, Cabin, Embarked ๅๆ็ฉบๅผ
# Name, Sex, Ticket, Cabin, Embarked ไธบๆๆฌๅผ.
# print(len(Titanic))
# print(Titanic.loc[:,"Age"].isnull().sum())
# ่ฎญ็ปๆ ทๆฌๆ 891 ไธช, Age ็ผบๅคฑๅผ 177 ไธช. ้่ฟๅทฒๆๅผ, ็บฟๆงๅๅฝๆฑๆช็ฅๅผ.
# print(Titanic.loc[:,"Cabin"].isnull().sum())
# Cabin ็ผบๅคฑๅผ 687 ไธช, ๅคง้จไปฝ้ฝๅทฒ็ผบๅคฑ, drop ๆ.
# print(Titanic.loc[:,"Embarked"].isnull().sum())
# print(Titanic.loc[:,"Embarked"].value_counts())
# Embarked ๅช็ผบๅคฑ 2 ไธช, ่ฏฅๅๅชๆไธไธชๅผ, S 644, C 168, Q 77. ็จ S ๆฅๅกซๅ
ไธคไธช็ผบๅคฑๅผ.
Titanic_1 = Titanic.drop("Cabin",axis=1)
Titanic_1 = Titanic_1.drop("Ticket",axis=1)
Titanic_1.loc[:,"Embarked"] = Titanic_1.loc[:,"Embarked"].fillna("S")
# print(Titanic_1.columns)
# print(Titanic_1.loc[:,"Embarked"].isnull().sum())
# ๅฐๆๆๅญ็ฌฆไธฒ็ๅ่ฝฌๆขไธบๆฐๅญ.
object_columns = ["Sex", "Embarked"]
global_namespace = globals()
for column in object_columns:
global_namespace[column] = dict(zip(Titanic_1.loc[:,column].unique(), range(len(Titanic_1.loc[:,column].unique()))))
Titanic_1.loc[:, column] = Titanic_1.loc[:,column].map(global_namespace[column])
# print(Titanic_1.dtypes)
# ๅชๅฉ Name ๅไธบ object ็ฑปๅ. ่ทๅ Name ๅๅไบบ็็งฐ่ฐ.
Titanic_1.loc[:,"Name"] = Titanic_1.loc[:,"Name"].map(lambda x:re.search(" ([A-Za-z]+)\.", x)[0])
# ๅฏน่ฝฌๆขไธบ็งฐ่ฐๅ็ Name ๅ่ฟ่ก Object ่ฝฌ int64
Name_dict = dict(zip(Titanic_1.loc[:,"Name"].unique(), range(len(Titanic_1.loc[:,"Name"].unique()))))
Titanic_1.loc[:,"Name"] = Titanic_1.loc[:,"Name"].map(Name_dict)
# print(Titanic_1.dtypes)
# ็ฑปๅ่ฝฌๆขๅฎๆ.
# ๅๅบ Age ไธญไธไธบ็ฉบ็ไธไธบ็ฉบ็ๆ ทๆฌ.
age_isnull = Titanic_1.loc[Titanic_1.loc[:,"Age"].isnull()]
age_notnull = Titanic_1.loc[Titanic_1.loc[:,"Age"].notnull()]
# print(age_isnull.loc[:,"Age"].isnull().sum())
# print(age_notnull.loc[:,"Age"].notnull().sum())
# isnull ๆ ทๆฌ 177 ไธช, notnull ๆ ทๆฌ 714 ไธช.
# ไฝฟ็จ SVR ็บฟๆงๅๅฝ.
# svr = SVR()
# svr.fit(age_notnull.drop("Age",axis=1),age_notnull.loc[:,"Age"])
# score_svr = svr.score(age_notnull.drop("Age",axis=1),age_notnull.loc[:,"Age"])
# print(score_svr)
# ๅพๅ 0.093
# parameters = {
# # "kernel": ["linear","rbf","poly","sigmoid"],
# "kernel": ["linear"],
# 'C':[2,]
# }
#
# svr = SVR(gamma="scale")
# clf = GridSearchCV(svr,parameters,cv=5)
# clf.fit(age_notnull.drop("Age",axis=1),age_notnull.loc[:,"Age"])
# print(clf.score(age_notnull.drop("Age",axis=1),age_notnull.loc[:,"Age"]))
# print(clf.best_estimator_)
# print(clf.best_score_)
# C=1.0, kernel="linear" ๆไฝณๅพๅ 0.20
# ไฝฟ็จ SVR ็บฟๆงๅๅฝ.
linearR = LinearRegression()
linearR.fit(age_notnull.drop(["Age","Survived"],axis=1),age_notnull.loc[:,"Age"])
# score_linearR = linearR.score(age_notnull.drop("Age",axis=1),age_notnull.loc[:,"Age"])
# print(score_linearR)
# ๅพๅ 0.276
# ๅกซๅ
็ฉบๆฐๆฎ.
age_pred = linearR.predict(age_isnull.drop(["Age","Survived"],axis=1))
age_isnull.loc[:,"Age"] = age_pred
# print(age_isnull.isnull().any())
# print(age_pred)
train_data = age_isnull.append(age_notnull)
# print(train_data.shape)
# ่ฎญ็ปๆจกๅ
logistic = LogisticRegression()
logistic.fit(train_data.drop("Survived",axis=1), train_data.loc[:,"Survived"])
# ๆฃๆฅๆต่ฏๆฐๆฎ
# print(Titanic_test.dtypes)
# print(Titanic_test.isnull().any())
# Name, Sex, Ticket, Embarked ไธบ object ็ฑปๅ
# Age, Fare, Cabin ๅญๅจ็ผบๅคฑๅผ
Titanic_test_1 = Titanic_test.drop("Cabin",axis=1)
Titanic_test_1 = Titanic_test_1.drop("Ticket",axis=1)
test_data_object_columns = ["Sex", "Embarked"]
for column in test_data_object_columns:
Titanic_test_1.loc[:, column] = Titanic_test_1.loc[:,column].map(global_namespace[column])
Name_test_unique = Titanic_test_1.loc[:,"Name"].map(lambda x:re.search(" ([A-Za-z]+)\.", x)[0]).unique()
# for name in Name_test_unique:
# if name not in Name_dict:
# print(name)
# Dona. ไธๅจ Name_dict ไธญ.
# print(Name_dict)
Name_dict["Dona."] = 2
Titanic_test_1.loc[:,"Name"] = Titanic_test_1.loc[:,"Name"].map(lambda x:re.search(" ([A-Za-z]+)\.", x)[0])
Titanic_test_1.loc[:,"Name"] = Titanic_test_1.loc[:,"Name"].map(Name_dict)
Titanic_test_1.loc[:,"Name"].fillna(1,inplace=True)
Titanic_test_1.loc[:,"Fare"].fillna(method='ffill',inplace=True)
# print(Titanic_test_1.loc[:,"Age"].isnull().sum())
Titanic_test_age_isnull = Titanic_test_1.loc[Titanic_test_1.loc[:,"Age"].isnull()]
Titanic_test_age_notnull = Titanic_test_1.loc[Titanic_test_1.loc[:,"Age"].notnull()]
age_test_pred = linearR.predict(Titanic_test_age_isnull.drop("Age",axis=1))
Titanic_test_age_isnull.loc[:,"Age"] = age_test_pred
test_data = Titanic_test_age_isnull.append(Titanic_test_age_notnull)
# print(test_data.dtypes)
# print(test_data.isnull().any())
result_dict = dict(zip(gender_submission.loc[:,"PassengerId"], gender_submission.loc[:,"Survived"]))
test_target = test_data.loc[:,"PassengerId"].map(result_dict)
test_score = logistic.score(test_data, test_target)
print(test_score)
# ๆต่ฏๆฐๆฎๅพๅ 95%
| true
|
82266bdbfb736076e175672cdc5c81d2718b3362
|
Python
|
abeasock/python
|
/weatherunderground_api.py
|
UTF-8
| 5,259
| 3.015625
| 3
|
[] |
no_license
|
##############################################################################
#-----------------------------------------------------------------------------
# Program Information
#-----------------------------------------------------------------------------
# Author : Amber Zaratisan
# Creation Date : 06SEP2017
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Script Information
#-----------------------------------------------------------------------------
# Script : weatherunderground_api.py
# Brief Description : This script will use Weather Underground's API to
# do a geolookup and gather weather elements for a
# given latitude/longitude at a particular date and
# hour.
# Data used :
# Output Files : locations_weather_joined.csv
#
# Notes / Assumptions : WU Documentation: https://www.wunderground.com/weather/api/d/docs?d=index
#-----------------------------------------------------------------------------
# Environment Information
#-----------------------------------------------------------------------------
# Python Version : 3.6.2
# Anaconda Version : 5.0.1
# Spark Version : n/a
# Operating System : Windows 10
#-----------------------------------------------------------------------------
# Change Control Information
#-----------------------------------------------------------------------------
# Programmer Name/Date : Change and Reason
#
##############################################################################
import pandas as pd
import requests
import json
import datetime
api_key = <your api key>
# Create a pandas DataFrame with latitudes, longitudes
locations = [('2/14/2014 12:31', 39.633556, -86.813806),
('5/19/2016 23:01', 41.992934, -86.128012),
('12/29/2017 20:05', 39.975028, -81.577583),
('4/7/2016 3:00', 44.843667, -87.421556),
('7/2/2015 5:45', 39.794824, -76.647191)]
labels = ['date', 'latitude', 'longitude']
df = pd.DataFrame(locations, columns=labels)
# Convert string to datetime
df['timestamp'] = df['date'].map(lambda x: datetime.datetime.strptime(x,'%m/%d/%Y %H:%M'))
df.drop('date', axis=1, inplace=True)
def get_historical_weather(df):
df_out = pd.DataFrame()
for index, row in df.iterrows():
timestamp = row['timestamp']
latitude = row['latitude']
longitude = row['longitude']
date = datetime.datetime.strptime(str(timestamp), '%Y-%m-%d %H:%M:%S').strftime("%Y-%m-%d")
hour = datetime.datetime.strptime(str(timestamp), '%Y-%m-%d %H:%M:%S').strftime("%H")
weather_url = 'http://api.wunderground.com/api/' + api_key + '/geolookup/history_' + str(date) + '/q/' + str(latitude) + ',' + str(longitude) + '.json'
weather_req = requests.get(weather_url)
if weather_req.status_code==200:
jsondata = json.loads(weather_req.content)
else:
print('[ ERROR ] WUnderground.com Status Code: ' + str(weather_req.status_code))
record = [hour_obs for hour_obs in jsondata['history']['observations'] if hour_obs['utcdate']['hour'] == hour ]
row['heatindexm'] = record[0]['heatindexm']
row['windchillm'] = record[0]['windchillm']
row['wdire'] = record[0]['wdire']
row['wdird'] = record[0]['wdird']
row['windchilli'] = record[0]['windchilli']
row['hail'] = record[0]['hail']
row['heatindexi'] = record[0]['heatindexi']
row['precipi'] = record[0]['precipi']
row['thunder'] = record[0]['thunder']
row['pressurei'] = record[0]['pressurei']
row['snow'] = record[0]['snow']
row['pressurem'] = record[0]['pressurem']
row['fog'] = record[0]['fog']
row['icon'] = record[0]['icon']
row['precipm'] = record[0]['precipm']
row['conds'] = record[0]['conds']
row['tornado'] = record[0]['tornado']
row['hum'] = record[0]['hum']
row['tempi'] = record[0]['tempi']
row['tempm'] = record[0]['tempm']
row['dewptm'] = record[0]['dewptm']
row['rain'] = record[0]['rain']
row['dewpti'] = record[0]['dewpti']
row['visi'] = record[0]['visi']
row['vism'] = record[0]['vism']
row['wgusti'] = record[0]['wgusti']
row['metar'] = record[0]['metar']
row['wgustm'] = record[0]['wgustm']
row['wspdi'] = record[0]['wspdi']
row['wspdm'] = record[0]['wspdm']
df_out = df_out.append(row)
return df_out
df2 = get_historical_weather(df)
df2.to_csv('locations_weather_joined.csv', index=False)
| true
|
a9e8432a55e1d8201a9d980c12f6aac626d6e8d6
|
Python
|
lmagalhaes/toy-robot
|
/toy_robot/robot.py
|
UTF-8
| 2,407
| 3.0625
| 3
|
[] |
no_license
|
from toy_robot.utils import Boundary, CardinalCoordinates, Point
default_boundary = Boundary(Point(0, 0), Point(4, 4))
class Robot:
def __init__(self, location: Point = None, direction: str = None, boundary: Boundary = None):
self.location = location
self.direction = direction
self.boundaries = boundary if boundary else default_boundary
@property
def is_activated(self) -> bool:
return self.location and self._direction
@property
def location(self) -> Point:
return self._location
@location.setter
def location(self, location: Point) -> None:
if not location:
location = Point(None, None)
self._location = location
@property
def direction(self):
return None if not self._direction else CardinalCoordinates(self._direction).name
@direction.setter
def direction(self, direction: str) -> None:
if direction:
direction = CardinalCoordinates[direction.upper()].value
self._direction = direction
def place(self, location: Point, direction: str) -> None:
if self.is_within_boundaries(location):
self.location = location
self.direction = direction
def right(self) -> None:
self._rotate(1)
def left(self) -> None:
self._rotate(-1)
def _rotate(self, direction: int) -> None:
new_direction = self._direction + direction
if not 0 < new_direction < 5: # full rotation
new_direction = 1 if new_direction else 4
self._direction = new_direction
def move(self) -> None:
if not self.location:
return
increase_coordinate = {
CardinalCoordinates.NORTH.name: Point(0, 1),
CardinalCoordinates.SOUTH.name: Point(0, -1),
CardinalCoordinates.EAST.name: Point(1, 0),
CardinalCoordinates.WEST.name: Point(-1, 0)
}
new_location = self.location.sum(
increase_coordinate.get(self.direction)
)
if self.is_within_boundaries(new_location):
self.location = new_location
def is_within_boundaries(self, location: Point) -> bool:
return location and self.boundaries.is_within_boundaries(location)
def report(self) -> str:
if not self.location:
return ''
return f'{self.location},{self.direction}'
| true
|
f91848ce185e71c39c63344e8a674964b7bc3741
|
Python
|
Gwiradus/Dynamic-Programming-and-Reinforcement-Learning
|
/ev_fleet_model.py
|
UTF-8
| 4,753
| 3.453125
| 3
|
[] |
no_license
|
"""EV Fleet Model"""
import numpy as np
import matplotlib.pyplot as plt
"""Helper Functions"""
def rayleigh_cdf(x_value, sigma=11.1):
"""Rayleigh cumulative distribution function"""
return 1 - np.exp(-(x_value**2 / (2*sigma**2)))
def rayleigh_pdf(x_value, sigma=11.1):
"""Rayleigh probability distribution function"""
return (x_value / sigma**2) * np.exp(-(x_value**2 / (2*sigma**2)))
def inverse_rayleigh_cdf(y_value, sigma=11.1):
"""Inverse of Rayleigh cumulative distribution function"""
return np.sqrt(-1 * (np.log(1-y_value) * 2*sigma**2))
def truncate_normal(mean, sd, min_value, max_value, size):
"""Function that gives a list of normally distributed random numbers with given mean, standard deviation and max-min"""
y_list = np.zeros((size, 1), dtype='float')
for i in range(size):
y = np.random.normal(mean, sd, 1)
if y < min_value:
y_list[i] = min_value
elif min_value <= y <= max_value:
y_list[i] = y
else:
y_list[i] = max_value
return y_list
def ev_single_boundary(time, time_vector, energy_req, power_max=6.6):
"""Function to find the max min energy boundaries of a single EV"""
arrive_time = time_vector[0]
depart_time = time_vector[1]
e_min = 0
e_max = 0
if time < arrive_time:
return [0, 0]
elif arrive_time <= time <= depart_time:
e_min = max(energy_req - power_max * (depart_time - time), 0)
e_max = min(energy_req, power_max * (time - arrive_time))
return [e_min, e_max]
else:
return [energy_req, energy_req]
def ev_fleet_boundary(time, arrive_vector, depart_vector, energy_req_vector, number_of_evs, power_max=6.6):
"""Function to find the max min energy boundaries of a fleet of EVs"""
e_max = 0
e_min = 0
for ev in range(number_of_evs):
energy_vector = ev_single_boundary(time, [arrive_vector[ev], depart_vector[ev]], energy_req_vector[ev], power_max)
e_min += energy_vector[0]
e_max += energy_vector[1]
return [e_min, e_max]
def initialise_fleet(number_of_evs):
"""Function to initialise fleet parameters like arrival/departure times, distance covered etc."""
t_min = 7
t_max = 18
arrive_time = truncate_normal(8, 0.5, t_min, 9, number_of_evs)
depart_time = truncate_normal(17, 0.5, 16, t_max, number_of_evs)
energy_req = np.array([(inverse_rayleigh_cdf(np.random.rand(1)) * 0.174) for i in range(number_of_evs)])
p_max = 6.6 # kW
e_max = []
e_min = []
time = []
for t in range(t_min, t_max+1):
energy = ev_fleet_boundary(t, arrive_time, depart_time, energy_req, number_of_evs, p_max)
e_min.append(energy[0])
e_max.append(energy[1])
time.append(t)
return e_min, e_max, time
"""MDP Functions"""
def spot_price(time):
"""Function that returns the day ahead price of that hour"""
return 25 + 4 * np.sin(3 * np.pi * time/24)
def transition_function(current_state, action, constraints):
"""Transition Function that gives the next state depending on current state and action"""
delta_t = 1
next_state = current_state + action * delta_t
if next_state < constraints[0]:
next_state = constraints[0]
elif constraints[0] <= next_state <= constraints[1]:
next_state = next_state
else:
next_state = constraints[1]
return next_state
def reward_function(price, current_state, next_state):
"""Reward function that gives the reward for each hour, based on current state and next state reached"""
return price * (next_state - current_state)
def environment(time, current_state, action, constraint):
"""Environment function that gives the next state and reward based on current state and action"""
price = spot_price(time)
next_state = transition_function(current_state, action, constraint)
reward = reward_function(price, current_state, next_state)
return next_state, reward
"""Main Script"""
n_ev = 1000
min_e, max_e, t_time = initialise_fleet(1000)
state_track = []
xk = 0
for i in range(len(t_time)):
t = t_time[i]
uk = np.random.rand(1) * 500 # kW of charging power drawn
print("Environment")
xk1, rk = environment(t, xk, uk, [min_e[i], max_e[i]])
print("Next State =", xk1)
print("Reward =", rk)
xk = xk1
state_track.append(xk1)
plt.plot(t_time, min_e, label='Minimum Energy', linestyle='--')
plt.plot(t_time, max_e, label='Maximum Energy', linestyle='--')
plt.plot(t_time, state_track, label='Energy')
plt.legend()
plt.show()
| true
|
75355144051ececf0e1ed0e6eb40a53bb7fcb4e2
|
Python
|
AnaMaria99/EasyChatBot
|
/utils.py
|
UTF-8
| 405
| 2.890625
| 3
|
[] |
no_license
|
class FileReader:
def __init__(self, filename):
self.__filename = filename
def citire_date(self):
date = []
with open(self.__filename) as f:
for intrebare in f:
raspuns = f.readline().strip('\n')
date.append((intrebare, raspuns))
return date
def parsefloat(string):
try:
return float(''.join([x for x in string if x.isdigit() or x == '.']).strip('.'))
except:
return None
| true
|
9477300204d583663760f98b74a0acc45d4f5c51
|
Python
|
ThanHuuTuan/python-Spider
|
/Spider/Nine--Shoe Figure/้ๅพ.py
|
UTF-8
| 1,957
| 2.984375
| 3
|
[] |
no_license
|
import requests
from bs4 import BeautifulSoup
import os
import time
import random
def get_urls(url):
res=requests.get(url)
# print(res.text)
s=1
html=BeautifulSoup(res.text,'lxml')
div=html.find_all('div','showindex__children')
for i in range(len(div)):
# print(div[i])
url='http://qcr0122.x.yupoo.com'+div[i].find('a','album__main').get('href')
title=div[i].find('a','album__main').get('title')
# print(url)
print(title)
get_img(url,title)
rtime = float( random.randint(1, 50) / 20)
print("่ฏท่ฎฉๆไผๆฏ%d็ง้" % rtime)
print("ๆฅไธๆฅๅฐ่ฆ็ฌๅ" + "็ฌฌ%dๆฌพ" % (i + 1))
s+=1
time.sleep(rtime)
def get_img(url,title):
res=requests.get(url)
html=BeautifulSoup(res.text,'lxml')
divs=html.find('div','showalbum__parent showalbum__nor nor')
# print(divs)
div=divs.find_all('div','showalbum__children image__main')
i=1
for i in range(len(div)):
img='http://photo.yupoo.com'+div[i].find('img').get('data-path')
#ๅ็ฐ็img่ฟๆฅๆฏๅ็
print(img)
get_img_content(img,title,i)
i+=1
def get_img_content(url,username,i):
folder_path='./'+username
if os.path.exists(folder_path)==False:
os.makedirs(folder_path)
res=requests.get(url)
try:
fp = open(folder_path+'\\' +str(i)+'.jpg', 'wb')
fp.write(res.content)
print("Sucessful"+username)
fp.close()
except:
print("Failed"+username)
pass
if __name__=='__main__':
for i in range(11,19):#19
url='http://qcr0122.x.yupoo.com/albums?tab=gallery&page='+str(i)
get_urls(url)
rtime = float(5 + random.randint(1, 50) / 20)
print("่ฏท่ฎฉๆไผๆฏ%d็ง้" % rtime)
print("ๆฅไธๆฅๅฐ่ฆ็ฌๅ" + "้ฆ้กต็ฌฌ%d้กต" % (i+1))
time.sleep(rtime)
| true
|
b9bbff89b17218769f471ded6f9c29df8c7387bc
|
Python
|
tkkuehn/aoc19
|
/day9/part1.py
|
UTF-8
| 13,312
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/python3
with open('./input.txt', 'r') as f:
contents = f.read().splitlines()[0]
program = [int(x) for x in contents.split(',')]
class Computer:
def __init__(self):
self.memory = {}
self.inst_ptr = 0
self.input_queue = []
self.output_buffer = []
self.relative_base = 0
def run_program(self, program):
self.memory = {idx: val for idx, val in zip(range(len(program)), program)}
self.inst_ptr = 0
return self.continue_program()
def access_memory(self, idx):
if idx < 0:
raise KeyError('Attempted to access negative address')
try:
return self.memory[idx]
except KeyError:
self.memory[idx] = 0
return 0
def mutate_memory(self, idx, val):
if idx < 0:
raise KeyError('Attempted to mutate negative address')
self.memory[idx] = val
def continue_program(self):
while True:
opcode_val = str(self.memory[self.inst_ptr])
digits = len(opcode_val)
if digits == 1:
opcode_val = '0' + opcode_val
digits += 1
opcode = int(opcode_val[-2:])
if opcode == 99:
return 0
elif opcode in [1, 2, 7, 8]:
params = 3
elif opcode in [3, 4, 9]:
params = 1
elif opcode in [5, 6]:
params = 2
else:
raise Exception('Invalid opcode')
for i in range(params + 2 - digits):
opcode_val = '0' + opcode_val
increase_int_ptr = True
if opcode == 1:
augend_mode = int(opcode_val[-3])
if augend_mode == 0:
augend_addr = self.access_memory(self.inst_ptr + 1)
augend = self.access_memory(augend_addr)
elif augend_mode == 1:
augend = self.access_memory(self.inst_ptr + 1)
elif augend_mode == 2:
augend_addr = self.access_memory(self.inst_ptr + 1)
augend = self.access_memory(
self.relative_base + augend_addr)
else:
raise RuntimeError(
'Invalid augend mode: {}'.format(augend_mode))
addend_mode = int(opcode_val[-4])
if addend_mode == 0:
addend_addr = self.access_memory(self.inst_ptr + 2)
addend = self.access_memory(addend_addr)
elif addend_mode == 1:
addend = self.access_memory(self.inst_ptr + 2)
elif addend_mode == 2:
addend_addr = self.access_memory(self.inst_ptr + 2)
addend = self.access_memory(
self.relative_base + addend_addr)
else:
raise RuntimeError(
'Invalid addend mode: {}'.format(addend_mode))
result_mode = int(opcode_val[-5])
if result_mode == 0:
result = self.access_memory(self.inst_ptr + 3)
elif result_mode == 2:
result = self.relative_base + self.access_memory(
self.inst_ptr + 3)
else:
raise RuntimeError(
'Invalid result mode: {}'.format(result_mode))
self.mutate_memory(result, augend + addend)
elif opcode == 2:
multiplicand_mode = int(opcode_val[-3])
if multiplicand_mode == 0:
multiplicand_addr = self.access_memory(self.inst_ptr + 1)
multiplicand = self.access_memory(multiplicand_addr)
elif multiplicand_mode == 1:
multiplicand = self.access_memory(self.inst_ptr + 1)
elif multiplicand_mode == 2:
multiplicand_addr = self.access_memory(self.inst_ptr + 1)
multiplicand = self.access_memory(self.relative_base
+ multiplicand_addr)
else:
raise RuntimeError(
'Invalid multiplicand mode: {}'.format(
multiplicand_mode))
multiplier_mode = int(opcode_val[-4])
if multiplier_mode == 0:
multiplier_addr = self.access_memory(self.inst_ptr + 2)
multiplier = self.access_memory(multiplier_addr)
elif multiplier_mode == 1:
multiplier = self.access_memory(self.inst_ptr + 2)
elif multiplier_mode == 2:
multiplier_addr = self.access_memory(self.inst_ptr + 2)
multiplier = self.access_memory(self.relative_base
+ multiplier_addr)
else:
raise RuntimeError(
'Invalid multiplier mode: {}'.format(multiplier_mode))
result_mode = int(opcode_val[-5])
if result_mode == 0:
result = self.access_memory(self.inst_ptr + 3)
elif result_mode == 2:
result = self.relative_base + self.access_memory(
self.inst_ptr + 3)
else:
raise RuntimeError(
'Invalid result mode: {}'.format(result_mode))
self.mutate_memory(result, multiplicand * multiplier)
elif opcode == 3:
input_mode = int(opcode_val[-3])
if input_mode == 0:
input_ = self.access_memory(self.inst_ptr + 1)
elif input_mode == 2:
input_ = self.relative_base + self.access_memory(
self.inst_ptr + 1)
else:
raise RuntimeError(
'Invalid input mode: {}'.format(input_mode))
val = int(self.input_queue.pop(0))
self.mutate_memory(input_, val)
elif opcode == 4:
output_mode = int(opcode_val[-3])
if output_mode == 0:
output_addr = self.access_memory(self.inst_ptr + 1)
output_ = self.access_memory(output_addr)
elif output_mode == 1:
output_ = self.access_memory(self.inst_ptr + 1)
elif output_mode == 2:
output_addr = self.relative_base + self.access_memory(
self.inst_ptr + 1)
output_ = self.access_memory(output_addr)
else:
raise RuntimeError(
'Invalid output mode: {}'.format(output_mode))
self.output_buffer.append(output_)
self.inst_ptr += params + 1
return 1
elif opcode in [5, 6]:
check_mode = int(opcode_val[-3])
if check_mode == 0:
check_addr = self.access_memory(self.inst_ptr + 1)
check = self.access_memory(check_addr)
elif check_mode == 1:
check = self.access_memory(self.inst_ptr + 1)
elif check_mode == 2:
check_addr = self.relative_base + self.access_memory(
self.inst_ptr + 1)
check = self.access_memory(check_addr)
else:
raise RuntimeError('Invalid check mode: {}'.format(
check_mode))
val_mode = int(opcode_val[-4])
if val_mode == 0:
val_addr = self.access_memory(self.inst_ptr + 2)
val = self.access_memory(val_addr)
elif val_mode == 1:
val = self.access_memory(self.inst_ptr + 2)
elif val_mode == 2:
val_addr = self.relative_base + self.access_memory(
self.inst_ptr + 2)
val = self.access_memory(val_addr)
else:
raise RuntimeError('Invalid val mode: {}'.format(
val_mode))
if ((opcode == 5 and check != 0)
or (opcode == 6 and check == 0)):
self.inst_ptr = val
increase_int_ptr = False
elif opcode == 7:
a_mode = int(opcode_val[-3])
if a_mode == 0:
a_addr = self.access_memory(self.inst_ptr + 1)
a = self.access_memory(a_addr)
elif a_mode == 1:
a = self.access_memory(self.inst_ptr + 1)
elif a_mode == 2:
a_addr = self.relative_base + self.access_memory(
self.inst_ptr + 1)
a = self.access_memory(a_addr)
else:
raise RuntimeError('Invalid a mode: {}'.format(
a_mode))
b_mode = int(opcode_val[-4])
if b_mode == 0:
b_addr = self.access_memory(self.inst_ptr + 2)
b = self.access_memory(b_addr)
elif b_mode == 1:
b = self.access_memory(self.inst_ptr + 2)
elif b_mode == 2:
b_addr = self.relative_base + self.access_memory(
self.inst_ptr + 2)
b = self.access_memory(b_addr)
else:
raise RuntimeError('Invalid b mode: {}'.format(
b_mode))
result_mode = int(opcode_val[-5])
if result_mode == 0:
result = self.access_memory(self.inst_ptr + 3)
elif result_mode == 2:
result = self.relative_base + self.access_memory(
self.inst_ptr + 3)
else:
raise RuntimeError(
'Invalid result mode: {}'.format(result_mode))
if a < b:
self.mutate_memory(result, 1)
else:
self.mutate_memory(result, 0)
elif opcode == 8:
a_mode = int(opcode_val[-3])
if a_mode == 0:
a_addr = self.access_memory(self.inst_ptr + 1)
a = self.access_memory(a_addr)
elif a_mode == 1:
a = self.access_memory(self.inst_ptr + 1)
elif a_mode == 2:
a_addr = self.relative_base + self.access_memory(
self.inst_ptr + 1)
a = self.access_memory(a_addr)
else:
raise RuntimeError('Invalid a mode: {}'.format(
a_mode))
b_mode = int(opcode_val[-4])
if b_mode == 0:
b_addr = self.access_memory(self.inst_ptr + 2)
b = self.access_memory(b_addr)
elif b_mode == 1:
b = self.access_memory(self.inst_ptr + 2)
elif b_mode == 2:
b_addr = self.relative_base + self.access_memory(
self.inst_ptr + 2)
b = self.access_memory(b_addr)
else:
raise RuntimeError('Invalid b mode: {}'.format(
b_mode))
result_mode = int(opcode_val[-5])
if result_mode == 0:
result = self.access_memory(self.inst_ptr + 3)
elif result_mode == 2:
result = self.relative_base + self.access_memory(
self.inst_ptr + 3)
else:
raise RuntimeError(
'Invalid result mode: {}'.format(result_mode))
if a == b:
self.mutate_memory(result, 1)
else:
self.mutate_memory(result, 0)
elif opcode == 9:
adjust_mode = int(opcode_val[-3])
if adjust_mode == 0:
adjust_addr = self.access_memory(self.inst_ptr + 1)
adjust = self.access_memory(adjust_addr)
elif adjust_mode == 1:
adjust = self.access_memory(self.inst_ptr + 1)
elif adjust_mode == 2:
adjust_addr = self.relative_base + self.access_memory(
self.inst_ptr + 1)
adjust = self.access_memory(adjust_addr)
else:
raise RuntimeError('Invalid adjust mode: {}'.format(
adjust_mode))
self.relative_base += adjust
else:
raise Exception('Invalid opcode: {}'.format(opcode))
if increase_int_ptr:
self.inst_ptr += params + 1
a = Computer()
a.input_queue.append(1)
if a.run_program(program) == 1:
while True:
if a.continue_program() == 0:
break
print(a.output_buffer)
| true
|
b78c56d1fc74f528fe60acda215964ef83af19ff
|
Python
|
gezpage/opyapi
|
/tests/schema/validators/test_date_time.py
|
UTF-8
| 767
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
import pytest
from opyapi.schema.validators import DateTime
from datetime import datetime
def test_can_instantiate():
validator = DateTime()
assert validator.validate("2016-09-18T17:34:02.124Z")
@pytest.mark.parametrize(
"value",
(
"2016-09-18T17:34:02.124Z",
"2016-09-18 17:34:02.124Z",
"2016-09-1817:34:02.124Z",
"2016-09-1817:34:02Z",
"2016-09-18T17:34:02+02:00",
"20160918173402Z",
),
)
def test_valid_values(value: str):
validator = DateTime()
date = validator.validate(value)
assert isinstance(date, datetime)
assert date.year == 2016
assert date.month == 9
assert date.day == 18
assert date.hour == 17
assert date.minute == 34
assert date.second == 2
| true
|
11e5f4f70ef1ef84cf7abda18cc47f5d044fb592
|
Python
|
jseranna/HPE-Python-repository
|
/tr_assessment_IT calculator.py
|
UTF-8
| 590
| 3.71875
| 4
|
[] |
no_license
|
# tax calculation app
# input data
name = input('What is your name: ')
age = int(input('age please: '))
sal = int(input('what is your total CTC: '))
sec = int(input('money invested under section 80C(if any): '))
# process
x= int(250000)
if(sal <=250000):
y=0
elif(sal >=250001 and sal <=500000):
y=(sal-sec-x)*5/100
elif(sal >=500001 and sal <=1000000):
y=(sal-sec-x-x)*20/100+12500
elif(sal >=1000001):
y=(sal-sec-x-x-x-x)*30/100+112500
else:
y=0
# Output
print('Hi',name , 'your total tax payable amount is: ', y)
| true
|
fe48ab6f1062d4ccbf89ebbe773b3e2d582b992d
|
Python
|
juneharold/PH526x_UPFR
|
/review/numpy_practice/numpy3.py
|
UTF-8
| 416
| 3.6875
| 4
|
[] |
no_license
|
import numpy as np
z1 = np.array([1, 3, 5, 7, 9])
z2 = z1+1
print(z1)
print(z2)
ind = [0, 2, 3]
z3 = z1[ind]
print(z3)
z4 = (z1>6)
print(z4)
z5 = z1[z1 > 6] # only returns values where the index is true
# slicing vs indexing
z1 = np.array([1, 3, 5, 7, 9])
w = z1[0:3] # if w is modified, z1 also gets modified
w[0]=3
print(w)
print(z1)
ind=[0, 1, 2]
x=z1[ind] # even if x is modified, z1 does not get modified
| true
|
9fddfd4f5dad0d2f8cbf03cd6588173ead9bf680
|
Python
|
xuehanshuo/ref-python-lan
|
/05_้ซ็บงๅ้/hm_01_ๅ่กจๅขๅ ๆฅๆน.py
|
UTF-8
| 1,023
| 4.15625
| 4
|
[] |
no_license
|
name_list = ["one", "two", "three"]
"""
name_list.
name_list.append name_list.count name_list.insert name_list.reverse
name_list.clear name_list.extend name_list.pop name_list.sort
name_list.copy name_list.index name_list.remove
"""
# 1.ๅๅผๅๅ็ดขๅผ
# ๅๅผ
print(name_list[0])
# ๅ็ดขๅผ
print(name_list.index("one"))
# 2.ไฟฎๆนๆฐๆฎ
name_list[0] = "ones"
# 3.ๅขๅ
# append ๅๅ่กจๆซๅฐพ่ฟฝๅ ๆฐๆฎ
name_list.append("one")
# insert
name_list.insert(1, "zero")
# extend ๆๅ
ถไปๅ่กจ่ฟฝๅ ๅฐๆซๅฐพ
name_list_temp = ["uno", "dos"]
name_list.extend(name_list_temp)
# 4.ๅ ้ค
# remove ๅ ้ค็ฌฌไธไธชๆๅฎ็ๆฐๆฎ
name_list.remove("ones")
# pop ้ป่ฎคๅผนๅบๅนถ่ฟๅๆๅไธไธชๅ้๏ผๅฆๅๅ ้คๅฏนๅบ็ดขๅผๅผ
name_list.pop()
name_list.pop(4)
# clear ๆธ
็ฉบๅ่กจ
name_list.clear()
# del ็จไบไปๅ
ๅญไธญๅ ้คๆไธชๅ้๏ผๅ็ปญไปฃ็ ไธๅฏๅไฝฟ็จ่ฟไธชๅ้
"""
del name_list[0]
name = "one"
del name
print(name) # ไธๅฏ็จ
"""
print(name_list)
| true
|
057d20c6613085b868397dc6677134382343bfe5
|
Python
|
SuryankDixit/LeetCode_Algorithms
|
/Python/Spiral_Matrix.py
|
UTF-8
| 767
| 3.140625
| 3
|
[] |
no_license
|
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
k, l = 0, 0
m = len(matrix)
n = len(matrix[0])
res = []
while (k < m and l < n):
for i in range(l, n):
res.append(matrix[k][i])
k += 1
for i in range(k, m):
res.append(matrix[i][n-1])
n -= 1
if (k < m):
for i in range(n-1, (l-1), -1):
res.append(matrix[m-1][i])
m -= 1
if (l < n):
for i in range(m-1, k-1, -1):
res.append(matrix[i][l])
l += 1
return res
| true
|
7e32f4e01ef209571853ece8eaf7b319dd5df3ee
|
Python
|
itsavik4u/python-learning
|
/test_calc.py
|
UTF-8
| 868
| 3.484375
| 3
|
[] |
no_license
|
import unittest
from calc import Calc
class TestCalc(unittest.TestCase):
def test_add(self):
c = Calc()
# cover the edge cases
self.assertEqual(c.add(36, 4), 40)
self.assertEqual(c.add(-1, 1), 0)
self.assertEqual(c.add(-2, -4), -6)
self.assertEqual(c.add(-36, 4), -32)
def test_div(self):
c = Calc()
# cover the edge cases
self.assertEqual(c.div(36, 4), 9)
self.assertEqual(c.div(-1, 1), -1)
self.assertEqual(c.div(-1, -1), 1)
self.assertEqual(c.div(5, 2), 2)
# checking / raising the exception
# self.assertRaises(ValueError, c.div, 10, 0)
# alternate: using context manager
with self.assertRaises(ValueError):
# call the function normally
c.div(10, 0)
if __name__ == '__main__':
unittest.main()
| true
|
5526ab7d57981edbe79dcd197652d938b9c003ec
|
Python
|
ziqizhang/msm4phi
|
/code/python/src/analysis/IAA_kappa.py
|
UTF-8
| 1,973
| 2.6875
| 3
|
[] |
no_license
|
import sklearn
from sklearn.metrics import cohen_kappa_score
lookup={}
lookup["Advocates"]=0
lookup["Patient"]=1
lookup["P"]=1
lookup["HPO"]=2
lookup["HPI"]=3
lookup["Other"]=4
lookup["Research"]=5
def read_annotations(in_csv, num_lines:int, ignore_header=True):
converted_labels=[]
with open(in_csv, 'r') as f:
lines = f.readlines()
for i in range(0, num_lines+1):
if ignore_header and i==0:
continue
l = lines[i].replace('"','').strip()
part=l.split(",")
labels=[]
for x in range(1, len(part)):
p = part[x]
if len(p)==0:
continue
else:
labels.append(lookup[p.strip()])
labels=sorted(labels, reverse=True)
try:
converted_labels.append(labels)
except KeyError:
print("error")
return converted_labels
def maximize_agreement(annotator1:list, annotator2:list):
for i in range(0, len(annotator1)):
ann1 = annotator1[i]
ann2 = annotator2[i]
if len(ann1)>1 or len(ann2)>1:
inter = set(ann1) & set(ann2)
if len(inter)>0:
annotator1[i]=list(inter)[0]
annotator2[i] = list(inter)[0]
else:
annotator1[i] = ann1[0]
annotator2[i] = ann2[0]
else:
annotator1[i]=ann1[0]
annotator2[i]=ann2[0]
if __name__=="__main__":
annotator1 = \
read_annotations("/home/zz/Cloud/GDrive/ziqizhang/project/msm4phi/paper2/data/annotation/GB_annotation.csv",100)
annotator2 = \
read_annotations("/home/zz/Cloud/GDrive/ziqizhang/project/msm4phi/paper2/data/annotation/ZZ_annotation.csv",100)
maximize_agreement(annotator1,annotator2)
print(cohen_kappa_score(annotator1, annotator2,
labels=None, weights=None))
| true
|
8e6842779b87380cc90393e24fb0dc6c4c65ea54
|
Python
|
baubrun/dp_py
|
/observer/observer.py
|
UTF-8
| 259
| 2.59375
| 3
|
[] |
no_license
|
from abc import ABCMeta, abstractmethod
class Observer(metaclass=ABCMeta):
@abstractmethod
def update(self, desc):
pass
@abstractmethod
def unsubscribe(self):
pass
@abstractmethod
def subscribe(self):
pass
| true
|
98b10b4b9c6024feb65652b321dc1d3945b4ff40
|
Python
|
ky8778/AL_study
|
/A/BackTracking/BJ2580์ค๋์ฟ _KY.py
|
UTF-8
| 1,211
| 2.609375
| 3
|
[] |
no_license
|
inData = [list(map(int,input().split())) for _ in range(9)]
result = [[0 for _ in range(9)] for _ in range(9)]
def checkMap(y,x):
checkNum = [False for _ in range(10)]
for idx in range(9):
val = inData[y][idx]
checkNum[val] = True
val = inData[idx][x]
checkNum[val] = True
startY = (y//3)*3
startX = (x//3)*3
for i in range(3):
for j in range(3):
val = inData[i+startY][j+startX]
checkNum[val] = True
# print(checkNum)
return checkNum
def getResult(n):
if n>=len(inList):
for i in range(9):
for j in range(9):
result[i][j] = inData[i][j]
return True
yy = inList[n][0]
xx = inList[n][1]
checkNumber = checkMap(yy,xx)
# print(checkNumber)
for i in range(1,10):
if not checkNumber[i]:
inData[yy][xx] = i
if getResult(n+1):
return True
else:
inData[yy][xx] = 0
return False
inList = []
for i in range(9):
for j in range(9):
if inData[i][j] == 0:
inList.append([i,j])
getResult(0)
# print(inData)
for i in result:
print(*i)
| true
|
e364d18be3b242b3a07ea2a86a91548f648f752e
|
Python
|
TimurKTI/PythonProjects
|
/mono.py
|
UTF-8
| 1,892
| 3.8125
| 4
|
[] |
no_license
|
alf = tuple("ะะะะะะะะะะะะะะะะะะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ")
N = len(alf)
def encrypt(text, a, k):
print(f"ะะฐัะธััะพะฒะบะฐ ะฟะพ ะบะปััั: {key}\n")
text = text.upper()
gv_txt = ""
for ch in text:
if ch in alf:
enc_ch = (a * alf.index(ch) + key) % (N)
gv_txt += alf[enc_ch]
else:
gv_txt += ch
return gv_txt
def decrypt(text, a, k):
print(f"ะ ะฐััะธััะพะฒะบะฐ ะฟะพ ะบะปััั: {key}\n")
text = text.upper()
gv_txt = ""
a_inv = 0
flag = 0
for i in range(0, N):
flag = (a * i) % (N)
if flag == 1:
a_inv = i
for ch in text:
if ch in alf:
enc_ch = ( a_inv * (alf.index(ch) - key) ) % (N)
gv_txt += alf[enc_ch]
else:
gv_txt += ch
return gv_txt
a = 13
key = 5
while True:
user = input("""ะฒะฒะตะดะธัะต "ัะธัั", ะตัะปะธ ะฝัะถะฝะพ ะทะฐัะธััะพะฒะฐัั ัะตะบัั ะธะปะธ "ัะฐัั", ะตัะปะธ ะฝัะถะฝะพ ัะฐััะธััะพะฒะฐัั, ะธะปะธ ะถะต ะฒะฒะตะดะธ ััะพ-ะฝะธะฑัะดั ะดััะณะพะต ะดะปั ัะพะณะพ ััะพะฑั ะฒัะนัะธ ะธะท ะฟัะพะณัะฐะผะผั\n """)
if user == 'ัะธัั':
text = input("ะฒะฒะตะดะธ ัะตะบัั, ะบะพัะพััะน ะฝัะถะฝะพ ะทะฐัะธััะพะฒะฐัั\n")
ciphtxt = encrypt(text, a, key)
print("ะะฐั ะทะฐัะธััะพะฒะฐะฝะฝัะน ัะตะบัั\n")
print(ciphtxt)
print('\n')
elif user == 'ัะฐัั':
text = input("ะฒะฒะตะดะธ ัะตะบัั, ะบะพัะพััะน ะฝัะถะฝะพ ัะฐััะธััะพะฒะฐัั\n")
firstxt = decrypt(text, a, key)
print("ะะฐั ัะฐััะธััะพะฒะฐะฝะฝัะน ัะตะบัั\n")
print(firstxt)
print('\n')
else : break
input("ะัะตะบัะฐัะตะฝะธะต ัะฐะฑะพัั ะฟัะพะณัะฐะผะผั")
| true
|
db59edcd86a97c5d6816fd92d9e006f9be9ed3ee
|
Python
|
mattijn/pynotebook
|
/2015/2015-12-18 Cartopy Global Drought.py
|
UTF-8
| 24,480
| 2.625
| 3
|
[] |
no_license
|
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LATITUDE_FORMATTER, LONGITUDE_FORMATTER
import matplotlib.ticker as mticker
import matplotlib.colors as mcolors
import matplotlib.colorbar as mcb
import cartopy.feature as cfeature
from matplotlib import gridspec
from datetime import datetime
import warnings
from osgeo import gdal
import numpy as np
# In[2]:
import numpy as np
import matplotlib.colors as mcolors
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
c = mcolors.ColorConverter().to_rgb
def cmap_discretize(cmap, N):
"""Return a discrete colormap from the continuous colormap cmap.
cmap: colormap instance, eg. cm.jet.
N: number of colors.
Example
x = resize(arange(100), (5,100))
djet = cmap_discretize(cm.jet, 5)
imshow(x, cmap=djet)
"""
if type(cmap) == str:
cmap = get_cmap(cmap)
colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))
colors_rgba = cmap(colors_i)
indices = np.linspace(0, 1., N+1)
cdict = {}
for ki,key in enumerate(('red','green','blue')):
cdict[key] = [ (indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in xrange(N+1) ]
# Return colormap object.
return mcolors.LinearSegmentedColormap(cmap.name + "_%d"%N, cdict, 1024)
def reverse_colourmap(cmap, name = 'my_cmap_r'):
"""
Return a reverse colormap from a LinearSegmentedColormaps
cmap: LinearSegmentedColormap instance
name: name of new cmap (default 'my_cmap_r')
Explanation:
t[0] goes from 0 to 1
row i: x y0 y1 -> t[0] t[1] t[2]
/
/
row i+1: x y0 y1 -> t[n] t[1] t[2]
so the inverse should do the same:
row i+1: x y1 y0 -> 1-t[0] t[2] t[1]
/
/
row i: x y1 y0 -> 1-t[n] t[2] t[1]
"""
reverse = []
k = []
for key in cmap._segmentdata:
k.append(key)
channel = cmap._segmentdata[key]
data = []
for t in channel:
data.append((1-t[0],t[2],t[1]))
reverse.append(sorted(data))
LinearL = dict(zip(k,reverse))
my_cmap_r = mcolors.LinearSegmentedColormap(name, LinearL)
return my_cmap_r
# In[3]:
date = datetime(2004,10,15)
extent = [-179,179,-60,80]
# In[4]:
date_str = '20041015'
prefix = ['P0','P1','P2','P3','MEAN','DC']
folder = r'D:\Downloads\Mattijn@Zhou\GlobalDroughtProvince\tif//'
in_rasters = []
for pre in prefix:
out_raster = folder + pre + date_str + '.tif'
print out_raster
in_rasters.append(out_raster)
data1 = np.ma.masked_equal(gdal.Open(in_rasters[0]).ReadAsArray(),7)
data2 = np.ma.masked_equal(gdal.Open(in_rasters[1]).ReadAsArray(),7)
data3 = np.ma.masked_equal(gdal.Open(in_rasters[2]).ReadAsArray(),7)
data4 = np.ma.masked_equal(gdal.Open(in_rasters[3]).ReadAsArray(),7)
data5 = np.ma.masked_equal(gdal.Open(in_rasters[4]).ReadAsArray(),7)
data6 = np.ma.masked_equal(gdal.Open(in_rasters[5]).ReadAsArray(),7)
# In[5]:
data6.max()
# In[6]:
in_tif = in_rasters[0]
ds = gdal.Open(in_tif)
print 'geotransform', ds.GetGeoTransform()
print 'raster X size', ds.RasterXSize
print 'raster Y size', ds.RasterYSize
data = ds.ReadAsArray()
data_ma = np.ma.masked_equal(data,7)
gt = ds.GetGeoTransform()
proj = ds.GetProjection()
xres = gt[1]
yres = gt[5]
# get the edge coordinates and add half the resolution
# to go to center coordinates
xmin = gt[0] + xres * 0.5
xmax = gt[0] + (xres * ds.RasterXSize) - xres * 0.5
ymin = gt[3] + (yres * ds.RasterYSize) + yres * 0.5
ymax = gt[3] - yres * 0.5
#ds = None
gridlons = np.mgrid[xmin:xmax+xres:xres]
gridlats = np.mgrid[ymax+yres:ymin:yres]
# In[7]:
drought_cat_tci_cmap = make_colormap([c('#993406'), c('#D95E0E'),0.2, c('#D95E0E'), c('#FE9829'),0.4,
c('#FE9829'), c('#FFD98E'),0.6, c('#FFD98E'), c('#FEFFD3'),0.8, c('#C4DC73')])
drought_per_tci_cmap = make_colormap([c('#993406'), c('#D95E0E'),0.2, c('#D95E0E'), c('#FE9829'),0.4,
c('#FE9829'), c('#FFD98E'),0.6, c('#FFD98E'), c('#FEFFD3'),0.8, c('#FEFFD3')])
drought_avg_tci_cmap = make_colormap([c('#993406'), c('#D95E0E'),0.1, c('#D95E0E'), c('#FE9829'),0.2,
c('#FE9829'), c('#FFD98E'),0.3, c('#FFD98E'), c('#FEFFD3'),0.4,
c('#FEFFD3'), c('#C4DC73'),0.5, c('#C4DC73'), c('#93C83D'),0.6,
c('#93C83D'), c('#69BD45'),0.7, c('#69BD45'), c('#6ECCDD'),0.8,
c('#6ECCDD'), c('#3553A4'),0.9, c('#3553A4')])
drought_per_tci_cmap_r = reverse_colourmap(drought_per_tci_cmap, name = 'drought_per_tci_cmap_r')
drought_cat_tci_cmap_r = reverse_colourmap(drought_cat_tci_cmap, name = 'drought_cat_tci_cmap_r')
# In[8]:
#extent = [111.91693268, 123.85693268, 49.43324112, 40.67324112]
#extent = [73.5,140,14,53.6]
fig = plt.figure(figsize=(27.69123,12))
gs = gridspec.GridSpec(3, 3)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ax1
# PLOT TOP LEFT
ax1 = fig.add_subplot(gs[0,0], projection=ccrs.InterruptedGoodeHomolosine(central_longitude=0))
ax1.set_extent(extent)
ax1.outline_patch.set_edgecolor('none')
# gridlines
gl1 = ax1.gridlines()
# pcolormesh
bounds1 = [0.25,0.5,0.75,1]
cmap1 = cmap_discretize(drought_per_tci_cmap_r,6)
norm1 = mcolors.BoundaryNorm(bounds1, cmap1.N)
im1 = ax1.pcolormesh(gridlons, gridlats, data1, transform=ccrs.PlateCarree(), norm=norm1, cmap=cmap1, vmin=0, vmax=1)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ax2
# PLOT MIDDLE LEFT
ax2 = fig.add_subplot(gs[1,0], projection=ccrs.InterruptedGoodeHomolosine(central_longitude=0))
ax2.set_extent(extent)
ax2.outline_patch.set_edgecolor('none')
# gridlines
gl2 = ax2.gridlines()
# pcolormesh
bounds2 = [0.25,0.5,0.75,1]
cmap2 = cmap_discretize(drought_per_tci_cmap_r,6)
norm2 = mcolors.BoundaryNorm(bounds2, cmap2.N)
im2 = ax2.pcolormesh(gridlons, gridlats, data2, transform=ccrs.PlateCarree(), norm=norm2, cmap=cmap2, vmin=0, vmax=1)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ax3
# PLOT BOTTOM LEFT
ax3 = fig.add_subplot(gs[2, 0], projection=ccrs.InterruptedGoodeHomolosine(central_longitude=0))
ax3.set_extent(extent)
ax3.outline_patch.set_edgecolor('none')
# gridlines
gl3 = ax3.gridlines()
# pcolormesh
bounds3 = [0.25,0.5,0.75,1]
cmap3 = cmap_discretize(drought_per_tci_cmap_r,6)
norm3 = mcolors.BoundaryNorm(bounds3, cmap3.N)
im3 = ax3.pcolormesh(gridlons, gridlats, data3, transform=ccrs.PlateCarree(), norm=norm3, cmap=cmap3, vmin=0, vmax=1)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ax4
# PLOT BOTTOM MIDDLE
ax4 = fig.add_subplot(gs[2,1], projection=ccrs.InterruptedGoodeHomolosine(central_longitude=0))
ax4.set_extent(extent)
ax4.outline_patch.set_edgecolor('none')
# gridlines
gl4 = ax4.gridlines()
# pcolormesh
bounds4 = [0.25,0.5,0.75,1]
cmap4 = cmap_discretize(drought_per_tci_cmap_r,6)
norm4 = mcolors.BoundaryNorm(bounds4, cmap4.N)
im4 = ax4.pcolormesh(gridlons, gridlats, data4, transform=ccrs.PlateCarree(), norm=norm4, cmap=cmap4, vmin=0, vmax=1)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ax5
# PLOT BOTTOM RIGHT
ax5 = fig.add_subplot(gs[2,2], projection=ccrs.InterruptedGoodeHomolosine(central_longitude=0))
ax5.set_extent(extent)
ax5.outline_patch.set_edgecolor('none')
# gridlines
gl5 = ax5.gridlines(linewidth=1, color='gray', linestyle=':')
# pcolormesh
bounds5 = [-1,-0.35,-0.25,-0.15,0,0.15,0.25,0.35,1]
cmap5 = cmap_discretize(drought_avg_tci_cmap,8)
norm5 = mcolors.BoundaryNorm(bounds5, cmap5.N)
im5 = ax5.pcolormesh(gridlons, gridlats, data5, transform=ccrs.PlateCarree(), norm=norm5, cmap=cmap5, vmin=-1, vmax=1)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ax6
# PLOT CENTER
ax6 = fig.add_subplot(gs[0:2,1:3], projection=ccrs.InterruptedGoodeHomolosine(central_longitude=0))
ax6.set_extent(extent)
ax6.outline_patch.set_edgecolor('gray')
ax6.outline_patch.set_linewidth(1)
ax6.outline_patch.set_linestyle(':')
# features
coastline = cfeature.COASTLINE.scale='50m'
borders = cfeature.BORDERS.scale='50m'
ax6.add_feature(cfeature.COASTLINE,linewidth=0.5, edgecolor='black')
ax6.add_feature(cfeature.BORDERS, linewidth=0.5, edgecolor='black')
# gridlines
gl6 = ax6.gridlines(linewidth=1, color='gray', linestyle=':')
gl6.xlocator = mticker.FixedLocator(range(-180,190,20))
gl6.ylocator = mticker.FixedLocator(range(-60,90,10))
gl6.xformatter = LONGITUDE_FORMATTER
gl6.yformatter = LATITUDE_FORMATTER
# pcolormesh
bounds6 = [0, 1, 2, 3, 4, 5]
cmap6 = cmap_discretize(drought_cat_tci_cmap_r,5)
norm6 = mcolors.BoundaryNorm(bounds6, cmap6.N)
im6 = ax6.pcolormesh(gridlons, gridlats, data6, transform=ccrs.PlateCarree(), norm=norm6, cmap=cmap6, vmin=0, vmax=4)
#date = i[-7:]
#year = date[-4::]
#doy = date[-7:-4]
#date_out = datetime.datetime.strptime(str(year)+'-'+str(doy),'%Y-%j')
date_label = 'Date: '+str(date.year) +'-'+str(date.month).zfill(2)+'-'+str(date.day).zfill(2)
# ADD LABELS FOR EACH PLOT
ax1.set_title('Percentage of Slight Drought', weight='semibold', fontsize=12)
ax2.set_title('Percentage of Moderate Drought', weight='semibold', fontsize=12)
ax3.set_title('Percentage of Severe Drought', weight='semibold', fontsize=12)
ax4.set_title('Percentage of Extreme Drought', weight='semibold', fontsize=12)
ax5.set_title('Average of NDAI', weight='semibold', fontsize=12)
ax6.set_title('Drought Alert at Province Level. '+date_label, fontsize=20, weight='semibold', color='k')
# ADD LEGEND IN ALL PLOTS
# -------------------------Ax 1
#cbax1 = fig.add_axes([0.328, 0.67, 0.011, 0.16]) # without tight_layout()
cbax1 = fig.add_axes([0.03, 0.7, 0.011, 0.10]) # including tight_layout()
#cmap = mcolors.ListedColormap(['r', 'g', 'b', 'c'])
cmap = cmap_discretize(drought_per_tci_cmap,6)
cmap.set_over('0.25')
cmap.set_under('0.75')
# If a ListedColormap is used, the length of the bounds array must be
# one greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [1, 2, 3, 4, 5]
bounds_ticks = [1.5, 2.5, 3.5, 4.5]
bounds_ticks_name = ['>75%', '50-75%', '25-50%', '<25%']
norm = mcolors.BoundaryNorm(bounds, cmap.N)
cb2 = mcb.ColorbarBase(cbax1, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
#boundaries=[0]+bounds+[13],
#extend='both',
extendfrac='auto',
ticklocation='right',
ticks=bounds_ticks,#_name, # optional
spacing='proportional',
orientation='vertical')
#cb2.set_label('Discrete intervals, some other units')
cb2.set_ticklabels(bounds_ticks_name)
# -------------------------Ax 2
#cbax1 = fig.add_axes([0.328, 0.67, 0.011, 0.16]) # without tight_layout()
cbax2 = fig.add_axes([0.03, 0.37, 0.011, 0.10]) # including tight_layout()
#cmap = mcolors.ListedColormap(['r', 'g', 'b', 'c'])
cmap = cmap_discretize(drought_per_tci_cmap,6)
cmap.set_over('0.25')
cmap.set_under('0.75')
# If a ListedColormap is used, the length of the bounds array must be
# one greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [1, 2, 3, 4, 5]
bounds_ticks = [1.5, 2.5, 3.5, 4.5]
bounds_ticks_name = ['>75%', '50-75%', '25-50%', '<25%']
norm = mcolors.BoundaryNorm(bounds, cmap.N)
cb2 = mcb.ColorbarBase(cbax2, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
#boundaries=[0]+bounds+[13],
#extend='both',
extendfrac='auto',
ticklocation='right',
ticks=bounds_ticks,#_name, # optional
spacing='proportional',
orientation='vertical')
#cb2.set_label('Discrete intervals, some other units')
cb2.set_ticklabels(bounds_ticks_name)
# -------------------------Ax 3
#cbax1 = fig.add_axes([0.328, 0.67, 0.011, 0.16]) # without tight_layout()
cbax3 = fig.add_axes([0.03, 0.04, 0.011, 0.10]) # including tight_layout()
#cmap = mcolors.ListedColormap(['r', 'g', 'b', 'c'])
cmap = cmap_discretize(drought_per_tci_cmap,6)
cmap.set_over('0.25')
cmap.set_under('0.75')
# If a ListedColormap is used, the length of the bounds array must be
# one greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [1, 2, 3, 4, 5]
bounds_ticks = [1.5, 2.5, 3.5, 4.5]
bounds_ticks_name = ['>75%', '50-75%', '25-50%', '<25%']
norm = mcolors.BoundaryNorm(bounds, cmap.N)
cb2 = mcb.ColorbarBase(cbax3, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
#boundaries=[0]+bounds+[13],
#extend='both',
extendfrac='auto',
ticklocation='right',
ticks=bounds_ticks,#_name, # optional
spacing='proportional',
orientation='vertical')
#cb2.set_label('Discrete intervals, some other units')
cb2.set_ticklabels(bounds_ticks_name)
# -------------------------Ax 4
#cbax1 = fig.add_axes([0.328, 0.67, 0.011, 0.16]) # without tight_layout()
cbax4 = fig.add_axes([0.36, 0.04, 0.011, 0.10]) # including tight_layout()
#cmap = mcolors.ListedColormap(['r', 'g', 'b', 'c'])
cmap = cmap_discretize(drought_per_tci_cmap,6)
cmap.set_over('0.25')
cmap.set_under('0.75')
# If a ListedColormap is used, the length of the bounds array must be
# one greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [1, 2, 3, 4, 5]
bounds_ticks = [1.5, 2.5, 3.5, 4.5]
bounds_ticks_name = ['>75%', '50-75%', '25-50%', '<25%']
norm = mcolors.BoundaryNorm(bounds, cmap.N)
cb2 = mcb.ColorbarBase(cbax4, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
#boundaries=[0]+bounds+[13],
#extend='both',
extendfrac='auto',
ticklocation='right',
ticks=bounds_ticks,#_name, # optional
spacing='proportional',
orientation='vertical')
#cb2.set_label('Discrete intervals, some other units')
cb2.set_ticklabels(bounds_ticks_name)
# -------------------------Ax 5
#cbax5 = fig.add_axes([0.85, 0.15, 0.011, 0.16]) # without tight_layout()
cbax5 = fig.add_axes([0.6922, 0.04, 0.011, 0.16]) # including tight_layout()
#cmap = mcolors.ListedColormap(['r', 'g', 'b', 'c'])
cmap = cmap_discretize(drought_avg_tci_cmap,8)
cmap.set_over('0.25')
cmap.set_under('0.75')
# If a ListedColormap is used, the length of the bounds array must be
# one greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [1, 2, 3, 4, 5,6,7,8,9]
bounds_ticks = [1.5, 2.5, 3.5, 4.5,5.5,6.6,7.5,8.5]
bounds_ticks_name = [' ', '-0.35', ' ', '-0.15','0','0.15',' ','0.35',' ']
norm = mcolors.BoundaryNorm(bounds, cmap.N)
cb2 = mcb.ColorbarBase(cbax5, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
#boundaries=[0]+bounds+[13],
#extend='both',
extendfrac='auto',
ticklocation='right',
ticks=bounds,#_name, # optional
spacing='proportional',
orientation='vertical')
cb2.set_ticklabels(bounds_ticks_name)
# ------------------------Ax 6
#cbax6 = fig.add_axes([0.79, 0.48, 0.020, 0.30]) # without tight_layout()
cbax6 = fig.add_axes([0.37, 0.4, 0.020, 0.20]) # without tight_layout()
#cmap = mcolors.ListedColormap(['r', 'g', 'b', 'c'])
cmap = cmap_discretize(drought_cat_tci_cmap,5)
cmap.set_over('0.25')
cmap.set_under('0.75')
# If a ListedColormap is used, the length of the bounds array must be
# one greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [1, 2, 3, 4, 5,6]
bounds_ticks = [1.5, 2.5, 3.5, 4.5,5.5]
bounds_ticks_name = ['Extreme Drought', 'Severe Drought', 'Moderate Drought', 'Slight Drought', 'No Drought']
norm = mcolors.BoundaryNorm(bounds, cmap.N)
cb2 = mcb.ColorbarBase(cbax6, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
#boundaries=[0]+bounds+[13],
#extend='both',
extendfrac='auto',
ticklocation='right',
ticks=bounds_ticks,#_name, # optional
spacing='proportional',
orientation='vertical')
#cb2.set_label('Discrete intervals, some other units')
cb2.set_ticklabels(bounds_ticks_name)
cb2.ax.tick_params(labelsize=12)
# # ADD LAKES AND RIVERS
# #FOR PLOT 1
# lakes = cfeature.LAKES.scale='110m'
# rivers = cfeature.RIVERS.scale='110m'
# ax1.add_feature(cfeature.LAKES)
# ax1.add_feature(cfeature.RIVERS)
# #FOR PLOT 2
# ax2.add_feature(cfeature.LAKES)
# ax2.add_feature(cfeature.RIVERS)
# #FOR PLOT 3
# ax3.add_feature(cfeature.LAKES)
# ax3.add_feature(cfeature.RIVERS)
# #FOR PLOT 4
# ax4.add_feature(cfeature.LAKES)
# ax4.add_feature(cfeature.RIVERS)
# #FOR PLOT 5
# ax5.add_feature(cfeature.LAKES)
# ax5.add_feature(cfeature.RIVERS)
#FOR PLOT 6
#lakes = cfeature.LAKES.scale='50m'
#rivers = cfeature.RIVERS.scale='50m'
#ax6.add_feature(cfeature.LAKES)
#ax6.add_feature(cfeature.RIVERS)
ax1.add_feature(cfeature.COASTLINE, linewidth=0.2, edgecolor='black')
ax1.add_feature(cfeature.BORDERS, linewidth=0.2, edgecolor='black')
ax2.add_feature(cfeature.COASTLINE, linewidth=0.2, edgecolor='black')
ax2.add_feature(cfeature.BORDERS, linewidth=0.2, edgecolor='black')
ax3.add_feature(cfeature.COASTLINE, linewidth=0.2, edgecolor='black')
ax3.add_feature(cfeature.BORDERS, linewidth=0.2, edgecolor='black')
ax4.add_feature(cfeature.COASTLINE, linewidth=0.2, edgecolor='black')
ax4.add_feature(cfeature.BORDERS, linewidth=0.2, edgecolor='black')
ax5.add_feature(cfeature.COASTLINE, linewidth=0.2, edgecolor='black')
ax5.add_feature(cfeature.BORDERS, linewidth=0.2, edgecolor='black')
ax6.add_feature(cfeature.COASTLINE, linewidth=0.2, edgecolor='black')
ax6.add_feature(cfeature.BORDERS, linewidth=0.2, edgecolor='black')
with warnings.catch_warnings():
# This raises warnings since tight layout cannot
# handle gridspec automatically. We are going to
# do that manually so we can filter the warning.
warnings.simplefilter("ignore", UserWarning)
gs.tight_layout(fig, rect=[None,None,None,None])
#gs.update(wspace=0.03, hspace=0.03)
path_out = r'D:\Downloads\Mattijn@Zhou\GlobalDroughtProvince\png//Global_'
file_out = 'DroughtAlert_'+str(date.timetuple().tm_yday).zfill(3)+str(date.year).zfill(4)+'.png'
filepath = path_out+file_out
fig.savefig(filepath, dpi=200, bbox_inches='tight')
print filepath
#plt.show()
fig.clf()
plt.close()
#del record#,county
ram = None
# In[ ]:
get_ipython().magic(u'matplotlib inline')
# In[ ]:
my_cmap = drought_per_tci_cmap
# In[ ]:
fig = plt.figure(figsize=(12, 32))
ax1 = fig.add_axes([0.03, 0.7, 0.011, 0.10]) # including tight_layout()
#cmap = mcolors.ListedColormap(['r', 'g', 'b', 'c'])
my_cmap = cmap_discretize(drought_per_tci_cmap,6)
#for key in cmap._segmentdata:
# cmap._segmentdata[key] = list(reversed(cmap._segmentdata[key]))
# If a ListedColormap is used, the length of the bounds array must be
# one greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [1, 2, 3, 4, 5]
bounds_ticks = [1.5, 2.5, 3.5, 4.5]
bounds_ticks_name = ['>75%', '50-75%', '25-50%', '<25%']
norm = mcolors.BoundaryNorm(bounds, my_cmap.N)
cb2 = mcb.ColorbarBase(ax1, cmap=my_cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
#boundaries=[0]+bounds+[13],
#extend='both',
extendfrac='auto',
ticklocation='right',
ticks=bounds_ticks,#_name, # optional
spacing='proportional',
orientation='vertical')
#cb2.set_label('Discrete intervals, some other units')
cb2.set_ticklabels(bounds_ticks_name)
plt.show()
# In[ ]:
my_cmap
# In[ ]:
def reverse_colourmap(cmap, name = 'my_cmap_r'):
"""
In:
cmap
name (default 'my_cmap_r')
Out:
my_cmap_r
Explanation:
t[0] goes from 0 to 1
row i: x y0 y1 -> t[0] t[1] t[2]
/
/
row i+1: x y0 y1 -> t[n] t[1] t[2]
so the inverse should do the same:
row i+1: x y1 y0 -> 1-t[0] t[2] t[1]
/
/
row i: x y1 y0 -> 1-t[n] t[2] t[1]
"""
reverse = []
k = []
for key in cmap._segmentdata:
k.append(key)
channel = cmap._segmentdata[key]
data = []
for t in channel:
data.append((1-t[0],t[2],t[1]))
reverse.append(sorted(data))
LinearL = dict(zip(k,reverse))
my_cmap_r = mpl.colors.LinearSegmentedColormap(name, LinearL)
return my_cmap_r
# In[ ]:
my_cmap_r = reverse_colourmap(my_cmap)
# In[ ]:
#my_cmap_r._segmentdata
arg = cmap_discretize(my_cmap_r,6)
# In[ ]:
fig = plt.figure(figsize=(8, 2))
ax1 = fig.add_axes([0.05, 0.80, 0.9, 0.15])
ax2 = fig.add_axes([0.05, 0.475, 0.9, 0.15])
norm = mpl.colors.Normalize(vmin=0, vmax=1)
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap = my_cmap, norm=norm,orientation='horizontal')
cb2 = mpl.colorbar.ColorbarBase(ax2, cmap = my_cmap_r, norm=norm, orientation='horizontal')
# In[ ]:
import matplotlib as mpl
# In[ ]:
| true
|
904980c5f87a3a8187d023c38936992d3abb612f
|
Python
|
adslchen/leetcode
|
/E15/lc310.py
|
UTF-8
| 1,152
| 3.1875
| 3
|
[] |
no_license
|
class Solution(object):
def findMinHeightTrees(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[int]
"""
if n == 1:
return [0]
graph = {}
# build the neighbor list
for edge in edges:
node1 = edge[0]
node2 = edge[1]
if node1 in graph:
graph[node1].append(node2)
else:
graph[node1] = [node2]
if node2 in graph:
graph[node2].append(node1)
else:
graph[node2] = [node1]
print(graph)
leaves = []
for node in graph.keys():
if len(graph[node]) == 1:
leaves.append(node)
print(leaves)
while n > 2:
n -= len(leaves)
new_leaves = []
for leaf in leaves:
neighbor = graph[leaf].pop()
graph[neighbor].remove(leaf)
if len(graph[neighbor]) == 1:
new_leaves.append(neighbor)
leaves = new_leaves
return leaves
| true
|
f84b3f75a098d32b93fb49a14afdc974a143b724
|
Python
|
englandbaron/CadvisorAnalysis
|
/K8sModel/Deployment.py
|
UTF-8
| 476
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: Tang Smith
@contact: 415107188@qq.com
@software: PyCharm
@time: 2019/9/13 ไธๅ12:25
"""
from .Pod import Pod
class Deployment(object):
def __init__(self,name,InitialPodNumber):
# TODO: Logic View for Pods
self.PodNumber = InitialPodNumber
self.pod_list = []
pass
def pod_bind(self,pod):
self.pod_list.append(pod)
def __repr__(self):
return "<Deployment %s>" % self.name
| true
|
45dc097d5334bc6ab7ba7fe9a046d6728a5ab9c5
|
Python
|
SharpColton/HighSchoolCodingAA
|
/NumTemp.py
|
UTF-8
| 338
| 4.0625
| 4
|
[] |
no_license
|
def GetTemp():
while True:
try:
print('Please Enter The Temperature You Would Like Converted ')
print('Numeric Values Only')
temp = eval(input('Temperature To Be Converted: '))
return temp
break;
except NameError:
print ("Invalid input")
| true
|
8210d8222aff6c3e8b6897ab73ff9e05557b99a5
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02410/s412174668.py
|
UTF-8
| 314
| 2.953125
| 3
|
[] |
no_license
|
# coding: utf-8
n, m = map(int, input().split())
matrixA = []
vectorB = []
for i in range(n):
matrixA.append(list(map(int, input().split())))
for i in range(m):
vectorB.append(int(input()))
for i in range(n):
num = 0
for j in range(m):
num += matrixA[i][j] * vectorB[j]
print(num)
| true
|
3ee6055f0b355179b0934daa19ac3ec316aa72f3
|
Python
|
VinF/deer
|
/examples/ALE/ALE_env_gym.py
|
UTF-8
| 4,140
| 2.59375
| 3
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
""" Interface with the ALE environment
Authors: Vincent Francois-Lavet
"""
import numpy as np
np.set_printoptions(threshold=np.nan)
import cv2
#from ale_python_interface import ALEInterface
import gym
from deer.base_classes import Environment
#import matplotlib
#matplotlib.use('qt5agg')
#from mpl_toolkits.axes_grid1 import host_subplot
#import mpl_toolkits.axisartist as AA
#import matplotlib.pyplot as plt
#from PIL import Image
class MyEnv(Environment):
VALIDATION_MODE = 0
def __init__(self, rng, **kwargs):
""" Initialize environment.
Arguments:
rng - the numpy random number generator
"""
if(bool(kwargs["game"])):
self.env = gym.make(kwargs["game"])
else:
# Choice between Seaquest-v4, Breakout-v4, SpaceInvaders-v4, BeamRider-v4, Qbert-v4, Freeway-v4', etc.
self.env = gym.make('Seaquest-v4')
self._random_state=rng
self.env.reset()
frame_skip=kwargs.get('frame_skip',1)
self._frame_skip = frame_skip if frame_skip >= 1 else 1
self._screen=np.average(self.env.render(mode='rgb_array'),axis=-1)
self._reduced_screen = cv2.resize(self._screen, (84, 84), interpolation=cv2.INTER_LINEAR)
#decide whether you want to keep this in repo, if so: add dependency to cv2
#plt.imshow(self._reduced_screen, cmap='gray')
#plt.show()
self._mode = -1
self._mode_score = 0.0
self._mode_episode_count = 0
def reset(self, mode):
if mode == self._mode:
# already in the right mode
self._mode_episode_count += 1
else:
# switching mode
self._mode = mode
self._mode_score = 0.0
self._mode_episode_count = 0
self.env.reset()
for _ in range(self._random_state.randint(15)):
action = self.env.action_space.sample()
# this executes the environment with an action,
# and returns the observation of the environment,
# the reward, if the env is over, and other info.
observation, reward, self.terminal, info = self.env.step(action)
self._screen=np.average(self.env.render(mode='rgb_array'),axis=-1)
self._reduced_screen = cv2.resize(self._screen, (84, 84), interpolation=cv2.INTER_LINEAR)
self.state=np.zeros((84,84), dtype=np.uint8) #FIXME
return [1*[4 * [84 * [84 * [0]]]]]
def act(self, action):
#print "action"
#print action
self.state=np.zeros((4,84,84), dtype=np.float)
reward=0
for t in range(4):
observation, r, self.terminal, info = self.env.step(action)
#print "observation, reward, self.terminal"
#print observation, reward, self.terminal
reward+=r
if self.inTerminalState():
break
self._screen=np.average(observation,axis=-1) # Gray levels
self._reduced_screen = cv2.resize(self._screen, (84, 84), interpolation=cv2.INTER_NEAREST) # 84*84
#plt.imshow(self._screen, cmap='gray')
#plt.show()
self.state[t,:,:]=self._reduced_screen
self._mode_score += reward
return np.sign(reward)
def summarizePerformance(self, test_data_set, learning_algo, *args, **kwargs):
if self.inTerminalState() == False:
self._mode_episode_count += 1
print("== Mean score per episode is {} over {} episodes ==".format(self._mode_score / self._mode_episode_count, self._mode_episode_count))
def inputDimensions(self):
return [(1, 4, 84, 84)]
def observationType(self, subject):
return np.float16
def nActions(self):
print ("self.env.action_space")
print (self.env.action_space)
return self.env.action_space.n
def observe(self):
return [(np.array(self.state)-128.)/128.]
def inTerminalState(self):
return self.terminal
if __name__ == "__main__":
pass
| true
|
5fe01016af5e4bed37a7865dece431f970647423
|
Python
|
Cc618/ML0
|
/linear_regression.py
|
UTF-8
| 1,558
| 3.9375
| 4
|
[] |
no_license
|
from random import random
def predict(x):
'''
Prediction by the network
'''
return a * x + b
def show():
'''
Displays the data in a graph
'''
import matplotlib.pyplot as plt
# Blue = ground truth
plt.plot(x_data, y_data, 'b.')
# Red = prediction
plt.plot(x_data, [predict(x) for x in x_data], 'r-')
plt.axis([0, 1, 2, 3])
plt.show()
target_a = .5
target_b = 2
print_freq = 25
# * Dataset
n = 20
# x data is within [0, 1)
x_data = [x / n for x in range(n)]
y_data = [target_a * x + target_b for x in x_data]
# With noise :
# noise_strength = 5e-2
# y_data = [target_a * x + target_b + random() * noise_strength for x in x_data]
# * Weigths
# Init weights with 'random values'
a = -.12
b = 0
# * Hyper parameters
learning_rate = 1e-1
epochs = 500
# * Training
avg_loss = 0
for e in range(epochs):
loss = 0
da = 0
db = 0
# For each tuple (x, y) in the dataset
for x, y in zip(x_data, y_data):
yi = predict(x)
loss += (yi - y) ** 2
# * Compute gradient
da += x * (yi - y)
db += yi - y
da /= n
db /= n
loss /= n
# * Back propagate
a -= learning_rate * da
b -= learning_rate * db
avg_loss += loss
if e != 0 and e % print_freq == 0:
print(f'Epoch : {e:3d} Loss : {avg_loss / n:1.6f}')
avg_loss = 0
print(f'a = {a:.2f} b = {b:2.2f}')
print(f'Target : {[f"{target_a * x + target_b:.2f}" for x in x_data]}')
print(f'Guess : {[f"{predict(x):.2f}" for x in x_data]}')
show()
| true
|
e6983674bc0b5743ee75df53dc8e859c7d289ec7
|
Python
|
basakrajarshi/HackerRankChallenges-Python
|
/InterviewPreparationKit/Arrays/array_manipulation.py
|
UTF-8
| 717
| 2.703125
| 3
|
[] |
no_license
|
import math
import os
import random
import re
import sys
def arrayManipulation(n, queries):
diffarr = [0]*(n+1)
#print(diffarr)
for i in queries:
diffarr[i[0]-1] += i[2]
diffarr[i[1]] -= i[2]
maxi = 0
asum = 0
for j in diffarr:
asum += j
if (asum > maxi):
maxi = asum
return (maxi)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nm = input().split()
n = int(nm[0])
m = int(nm[1])
queries = []
for _ in range(m):
queries.append(list(map(int, input().rstrip().split())))
result = arrayManipulation(n, queries)
fptr.write(str(result) + '\n')
fptr.close()
| true
|
e438a62e4bef285ad5cca8dcf12e25a8d9079f73
|
Python
|
emrantalukder/eventstream-stack
|
/eventstream-transform/transform.py
|
UTF-8
| 1,890
| 2.59375
| 3
|
[] |
no_license
|
import os
import socket
import logging
from time import strftime
from datetime import datetime
from flask import Flask, request, json, jsonify
from elasticsearch import Elasticsearch
ELASTICSEARCH_URL = os.getenv('ELASTICSEARCH_URL', 'http://elasticsearch:9200')
app = Flask(__name__)
es = Elasticsearch([ELASTICSEARCH_URL])
# write to a "durable" queue file
def durable_write(event):
with open("data/durable_queue.json", 'a+') as durable_queue:
json.dump(event, durable_queue)
durable_queue.write("\n")
return event
# write to elasticsearch
def es_write(event):
try:
res = es.index(index=event['eventType'], doc_type='event', body=event)
return res
except Exception as e:
logging.error(str(e))
res = durable_write(event)
return event
# find event id
def find_event_id(event):
try:
eventType = event['eventType']
eventValue = event['eventValue']
with open(f'data/{eventType}.json') as eventFile:
data = json.load(eventFile)
id = data[eventValue]
eventId = id
return eventId
except Exception as e:
logging.error(str(e))
return None
# transform event by loading and parsing eventType file from disk
def xform(event):
eventId = find_event_id(event)
res = es_write(event)
return res
# web endpoint:
@app.route("/", methods=["POST"])
def event_stream():
try:
event = request.get_json(force=True)
data = None
# transform lists or single object
if type(event) == list:
data = list(map(lambda e: xform(e), event))
else:
data = xform(event)
return jsonify(data)
except Exception as e:
print(f'{strftime("%I:%M:%S")} - {str(e)}')
return str(e), 500
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80)
| true
|
c50a327b22c5b52d957a8a82c1b8268bfeb6ea0e
|
Python
|
beasyx0/cexio
|
/cexio/bot/tests/test_models.py
|
UTF-8
| 2,820
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
from django.test import TestCase
from django.utils import timezone
from cexio.bot.models import TimeStamped, BotConfigurationVariables, Order
class TestModels(TestCase):
'''Tests for all bot.models'''
def test_timestamped_save_method(self):
'''Test model 'TimeStamped' checks that self.date and self.updated updates on save'''
print('Testing TimeStamped save method')
timestamped = TimeStamped()
timestamped.save()
now = timezone.now()
timestamped.refresh_from_db()
self.assertEqual(timestamped.date.date(), now.date())
self.assertEqual(timestamped.updated.date(), now.date())
print('Testing complete')
def test_botconfigurationvariables_str_method_and_fields(self):
'''Test model 'BotConfigurationVariables' str method'''
# thest tests need impovement
print('Testing BotConfigurationVariables str method and fields')
bot = BotConfigurationVariables(name='Some bot', pair='BTC/USD', buy=0.02, upswing_buy=0.03,
sell=0.02, downswing_sell=0.03, fee=1.0, auto_cancel_order_period=20)
bot.save()
bot.refresh_from_db()
expected_str = 'Some bot'
returned_str = bot.__str__()
self.assertEqual(expected_str, returned_str)
expected_pair = 'BTC/USD'
returned_pair = bot.pair
self.assertEqual(expected_pair, returned_pair)
expected_buy = 0.02
returned_buy = bot.buy
self.assertEqual(expected_buy, returned_buy)
expected_upswing_buy = 0.03
returned_upswing_buy = bot.upswing_buy
self.assertEqual(expected_upswing_buy, returned_upswing_buy)
expected_sell = 0.02
returned_sell = bot.sell
self.assertEqual(expected_sell, returned_sell)
expected_downswing_sell = 0.03
returned_downswing_sell = bot.downswing_sell
self.assertEqual(expected_downswing_sell, returned_downswing_sell)
expected_fee = 1.0
returned_fee = bot.fee
self.assertEqual(expected_fee, returned_fee)
expected_auto_cancel_order_period = 20
returned_auto_cancel_order_period = bot.auto_cancel_order_period
self.assertEqual(expected_auto_cancel_order_period, returned_auto_cancel_order_period)
print('Testing complete')
def test_order_str_method(self):
'''Test model 'Order' str method'''
print('Testing Order str method')
order = Order(order_id='123456789', pair='BTC/USD', order_type='BUY', price=55000, amount=0.0001)
order.save()
order.refresh_from_db()
expected_str = 'Order ID: 123456789'
returned_str = order.__str__()
self.assertEqual(expected_str, returned_str)
print('Testing complete')
| true
|
7d9d3a067dfa6f081bd1af30a96131d9b8641552
|
Python
|
kersky98/stud
|
/coursera/pythonHse/fifth/9.py
|
UTF-8
| 402
| 3.3125
| 3
|
[] |
no_license
|
# ะะฐะนะดะธัะต ะธ ะฒัะฒะตะดะธัะต ะฒัะต ะดะฒัะทะฝะฐัะฝัะต ัะธัะปะฐ, ะบะพัะพััะต ัะฐะฒะฝั ัะดะฒะพะตะฝะฝะพะผั
# ะฟัะพะธะทะฒะตะดะตะฝะธั ัะฒะพะธั
ัะธัั.
ns = 10
ne = 100
tt = tuple()
for i in range(ns, ne):
t = tuple(str(i))
zm = 2 * int(t[0]) * int(t[1])
if i == zm:
# print(i)
tt += (zm, )
for i in range(len(tt)):
print(tt[i])
| true
|
558eae1b75fc9bd292145f33bd1ea3c3cadfb0a8
|
Python
|
bartlebythecoder/otu_reports
|
/enigma_finder.py
|
UTF-8
| 2,307
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/python
import matplotlib.pyplot as plt
import sqlite3
def hex_to_num(x):
h2n_dict = {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,
'8':8,'9':9,'A':10,'B':11,'C':12,'D':13,'E':14,'F':15,'G':16,'H':17,'J':18}
return int(h2n_dict[x])
# MAIN PROGRAM
# Open the SQLite 3 database
print ('Which database would you like?')
print ('1. Spinward Marches')
print ('2. Solomani Rim')
print ('3. Core')
db_choice_no = input('Please pick a number... ')
database_dict = {'1':'spinward_marches.db','2':'solomani_rim.db','3':'core.db'}
header_dict = {'1':'the Marches','2':'the Rim','3':'Core'}
conn = sqlite3.connect(database_dict[db_choice_no])
c = conn.cursor()
tech_level_rating = { '0':10,'1':10,'2':10,'3':10,'4':10,
'5':100,'6':100,'7':100,'8':100,'9':100,
'A':1000,'B':1000,'C':1000,'D':1000,'E':1000,
'F':1000,'G':1000,'H':1000,'J':1000}
starport_to_color = {'A':10,'B':9,'C':8,'D':7,'E':6,'X':5}
sql3_select = """ SELECT name,
uwp,
cx
FROM tb_t5_systems """
c.execute(sql3_select)
allrows = c.fetchall()
name = list()
law_level = list()
accept_level = list()
tech_level = list()
strange_level = list()
starport = list()
for row in allrows:
name_row = row[0]
starport_row = row[1][0]
law_level_row = hex_to_num(row[1][6])
accept_level_row = hex_to_num(row[2][2])
tech_level_row = hex_to_num(row[1][8])
strange_level_row = hex_to_num(row[2][3])
print (name_row, starport_row, tech_level_row)
if starport_row == 'A':
starport.append(starport_row)
name.append(name_row)
law_level.append(law_level_row)
accept_level.append(accept_level_row)
tech_level.append(tech_level_row)
strange_level.append(strange_level_row)
plt.xlabel('Tech Level')
plt.ylabel('Strange Level')
plt.title('Acceptance and Law in ' + header_dict[db_choice_no])
plt.axis([-1, 17, -1, 12])
plt.scatter(tech_level,strange_level,s=100, c = accept_level, cmap=plt.cm.YlGn)
# for i, txt in enumerate(name):
# plt.annotate(txt, (law_level[i]-.5,accept_level[i]))
plt.show()
| true
|
b8aaa5c1dd451b33c03d65d7e62666f904f71d06
|
Python
|
tonkla555/CP3-Klapathai-Chaikla
|
/Exercise8_Klapathai_C.py
|
UTF-8
| 1,749
| 3.53125
| 4
|
[] |
no_license
|
username = input("Username : ")
password = input("Password : ")
if username == ("tonkla555") and password == ("tonkla55"):
print("---Welcome To TK-Shop---")
print("No. Product price")
print("1. TK-001 (100 THB)")
print("2. TK-002 (150 THB)")
print("3. TK-002 (200 THB)")
print("4. TK-004 (250 THB)")
No = int(input("Select Product No. : "))
if No == 1:
pieces = int(input("Enter the number of pieces : "))
if pieces > 0 :
print("-------------------------------Total")
print("TK-001 100(THB)","*",pieces," ",100*pieces,"(THB)")
else :
print("You cannot buy 0 items.")
elif No == 2:
pieces = int(input("Enter the number of pieces : "))
if pieces > 0 :
print("-------------------------------Total")
print("TK-002 150(THB)","*",pieces," ",150*pieces,"(THB)")
else:
print("You cannot buy 0 items.")
elif No == 3:
pieces = int(input("Enter the number of pieces : "))
if pieces > 0 :
print("-------------------------------Total")
print("TK-003 200(THB)","*",pieces," ",200*pieces,"(THB)")
else:
print("You cannot buy 0 items.")
elif No == 4:
pieces = int(input("Enter the number of pieces : "))
if pieces > 0 :
print("-------------------------------Total")
print("TK-004 250(THB)","*",pieces," ",250*pieces,"(THB)")
else:
print("You cannot buy 0 items.")
else :
print("The item you selected is not available.")
else :
print("Incorrect Username or Password.")
print("-----Thank You-----")
| true
|
d6a834777aacdec11c4533527236e156e595f8e5
|
Python
|
holcombddf/Protein-Design-Scripts
|
/src/plotter.py
|
UTF-8
| 6,147
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/python
#Creates a graph for all data in a given CSV, using the first column as the x-values, and all other columns as the y-values for the plots. Change the sizing and plotting to suit your needs.
import sys,re,os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import csv
import argparse
class Plotter:
def __init__(self, ax, s=20, linewidth=1.0):
self.ax = ax
self.s = s
self.linewidth = linewidth
#plots scatterplot and logarithmic regression
def log_plot(self, x, y, label="", d=1, c="black", scatter=True, plot=True): #x data, y data, the axis to plot on, label, the degree of the polynomial, and the color
l = min([len(x), len(y)])
fit = np.polyfit(np.log(x[:l]), y[:l], deg=d) #seems to work whether or not set_yscale is 'log'
fity = []
for t in x[:l]:
val = 0
for i, b in enumerate(fit):
val = val + fit[i] * ((np.log(t)) ** (len(fit)-1-i))
fity.append(val)
if scatter:
self.ax.scatter(x[:l], y[:l], c=c, s=self.s) #change s to make the dots on the scatterplot bigger
if plot:
self.ax.plot(x[:l], fity, color=c, label=label, linewidth=self.linewidth) #change linewidth to make the line for the plot thicker
#plots scatterplot and polynomial regression
def poly_plot(self, x, y, label="", d=1, c="black", scatter=True, plot=True):
l = min([len(x), len(y)])
if self.ax.get_yscale() == 'log':
fit = np.polyfit(x[:l], [np.log10(z) for z in y[:l]], deg=d)
else:
fit = np.polyfit(x[:l], y[:l], deg=d)
fity = []
for t in x[:l]:
val = 0
for i, b in enumerate(fit): #build the y-value from the polynomial
val = val + fit[i] * (t ** (len(fit)-1-i))
if self.ax.get_yscale() == 'log':
fity.append(10 ** val)
else:
fity.append(val)
if scatter:
self.ax.scatter(x[:l], y[:l], c=c, s=self.s) #change s to make the dots on the scatterplot bigger
if plot:
self.ax.plot(x[:l], fity, color=c, label=label, linewidth=self.linewidth) #change linewidth to make the line for the plot thicker
#transposes a data frame, assuming the data is float
def reverse_frame(data):
newdata = [[] for col in data[0]]
for row in data:
for i in range(len(newdata)):
try:
yval = float(row[i])
newdata[i].append(yval)
except:
pass
return(newdata)
#returns an array containing the current min and max
def compare_range(xval, xran):
if len(xran) == 0:
xran.append(xval)
elif len(xran) == 1:
if xval > xran[0]:
xran = [xran[0], xval]
else:
xran = [xval, xran[0]]
elif xval > xran[1]:
xran[1] = xval
elif xval < xran[0]:
xran[0] = xval
return(xran)
def eval_str_f(string): #defaults to false if it doesn't find true
string = (string.strip()).lower()
if string == "true" or string == "1" or string == "t":
return(True)
else:
return(False)
def parse_args(sysargv):
parser = argparse.ArgumentParser()
parser.add_argument("--csv", metavar='FILE', type=str, default=None, action="store", help="file containing the data to graph")
parser.add_argument("--header", metavar='BOOL', type = str, default="false", action="store", help="whether or not the first row of the CSV file contains column headers")
return(parser.parse_args())
def main(sysargv=[]):
args = parse_args(sysargv)
#range of minimum and maximum values for x and y variables
#could be useful when defining xlim and ylim
xran = []
yran = []
#read the data from the CSV
x = []
y = []
labels = []
with open(args.csv, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=",",quotechar="\'")
for i,row in enumerate(reader):
if i == 0 and eval_str_f(args.header):
labels = row
else:
try: #skips row if data is not numeric
xval = float(row[0])
x.append(xval)
#find the x minimum and maximum
xran = compare_range(xval, xran)
if len(y) == 0:
y = [[] for z in row[1:]]
for j,yval in enumerate(row[1:]):
yval = float(yval)
y[j].append(yval)
#find the y minimum and maximum
yran = compare_range(yval, yran)
except Exception as e:
print(str(e))
#################################################
#################################################
#CHANGE THIS PART TO SUIT YOUR NEEDS
fig,ax=plt.subplots(figsize=(20, 10)) #adjusts the figure size
My_Plotter = Plotter(ax, 75, 3.0) #adjusts the scatterplot point size and regression line width
ax.set_yscale('log') #creates a logarithmic scale for the y-axis
matplotlib.rcParams.update({'font.size': 30}) #adjusts the font size
plt.xlim(xran[0]-0.02*(xran[1]-xran[0]), xran[1]+0.02*(xran[1]-xran[0])) #limits for the x-axis
plt.ylim(yran[0]-1, yran[1]+300) #limits for the y-axis
plt.xlabel("Time (seconds)")
plt.ylabel("Hydrodynamic Radius (nm); Volume")
#this part actually plots the data, using a scatterplot and a best fit polynomial or logarithmic
#arguments are the x data array, the y data array, the label, the degree of the polynomial, and the color on the graph
My_Plotter.log_plot(x, y[0], "2000 $\mu$M ATP", 3, "red")
My_Plotter.log_plot(x, y[1], "500 $\mu$M ATP", 4, "blue")
My_Plotter.poly_plot(x, y[2], "250 $\mu$M ATP", 8, "orange")
My_Plotter.poly_plot(x, y[3], "100 $\mu$M ATP", 3, "green")
My_Plotter.poly_plot(x, y[4], "50 $\mu$M ATP", 2, "magenta")
My_Plotter.poly_plot(x, y[5], "0 $\mu$M ATP", 1, "black")
#put the legend in the right side of the graph
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.78, box.height]) #change box.width scaling so that legend fits
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#################################################
#################################################
#create and save the graph
fig.savefig(os.path.join(os.path.dirname(args.csv), "figure")) #saves the figure in the same directory as the data source
plt.close()
if __name__ == "__main__":
main(sys.argv[1:])
| true
|
759e320069935d34f7434e46fe0f667162b53f26
|
Python
|
nairaaoliveira/ProgWeb
|
/Exercicios_Python/Lista 5_Python/q01_Lista 5_Ex_Python.py
|
UTF-8
| 386
| 3.984375
| 4
|
[] |
no_license
|
'''1. Faรงa uma funรงรฃo que recebe uma quantidade desejada de itens e retorna uma
lista carregada com essa quantidade. Faรงa outra funรงรฃo para exibir esses itens
esperados por espaรงo em branco.'''
def ListaQuant(quant):
L = [12, 9, 5]
i = 0
while i < 3:
print(len(L[quant]))
break
quant = int(input("Nรบmero : "))
ListaQuant(quant)
| true
|
4a6b5579655bdc4257a87e1c184b688ad0ac6196
|
Python
|
sayantanauddy/hierarchical_bipedal_controller
|
/matsuoka_walk/robots.py
|
UTF-8
| 14,259
| 2.859375
| 3
|
[] |
no_license
|
"""
Module for wrappers of robot specific classes
"""
from abc import ABCMeta, abstractmethod, abstractproperty
from nicomotion import Motion
import math
import pypot
from pypot.vrep import from_vrep
from pypot.creatures import PoppyHumanoid
import time
import numpy as np
from pypot.utils.stoppablethread import StoppableLoopThread
from pypot.dynamixel.motor import DxlMXMotor
class Robot:
"""
Abstract class for robot specific functions
"""
# This class cannot be instantiated but must be inherited by a class that provides implementation of the methods
# and values for the properties
__metaclass__ = ABCMeta
def __init__(self):
"""
The constructor of the abstract class
"""
pass
@abstractproperty
def sync_sleep_time(self):
"""
Time to sleep to allow the joints to reach their targets
"""
pass
@abstractproperty
def robot_handle(self):
"""
Stores the handle to the robot
This handle is used to invoke methods on the robot
"""
pass
@abstractproperty
def interpolation(self):
"""
Flag to indicate if intermediate joint angles should be interpolated
"""
pass
@abstractproperty
def fraction_max_speed(self):
"""
Fraction of the maximum motor speed to use
"""
pass
@abstractproperty
def wait(self):
"""
Flag to indicate whether the control should wait for each angle to reach its target
"""
pass
@abstractmethod
def set_angles(self, joint_angles, duration=None, joint_velocities=None):
"""
Sets the joints to the specified angles
:type joint_angles: dict
:param joint_angles: Dictionary of joint_names: angles (in radians)
:type duration: float
:param duration: Time to reach the angular targets (in seconds)
:type joint_velocities: dict
:param joint_velocities: dict of joint angles and velocities
:return: None
"""
pass
@abstractmethod
def get_angles(self, joint_names):
"""
Gets the angles of the specified joints and returns a dict of joint_names: angles (in radians)
:type joint_names: list(str)
:param joint_names: List of joint names
:rtype: dict
"""
pass
class Nico(Robot):
"""
This class encapsulates the methods and properties for interacting with the nao robot
It extends the abstract class 'Robot'
"""
sync_sleep_time = None
robot_handle = None
interpolation = None
fraction_max_speed = None
wait = None
def __init__(self, sync_sleep_time, interpolation=False, fraction_max_speed=0.01, wait=False,
motor_config='config.json', vrep=False, vrep_host='127.0.0.1', vrep_port=19997, vrep_scene=None):
"""
The constructor of the class. Class properties should be set here
The robot handle should be created here
Any other initializations such as setting angles to particular values should also be taken care of here
:type sync_sleep_time: float
:param sync_sleep_time: Time to sleep to allow the joints to reach their targets (in seconds)
:type interpolation: bool
:param interpolation: Flag to indicate if intermediate joint angles should be interpolated
:type fraction_max_speed: float
:param fraction_max_speed: Fraction of the maximum motor speed to use
:type wait: bool
:param wait: Flag to indicate whether the control should wait for each angle to reach its target
:type motor_config: str
:param motor_config: json configuration file
:type vrep: bool
:param vrep: Flag to indicate if VREP is to be used
:type vrep_host: str
:param vrep_host: IP address of VREP server
:type vrep_port: int
:param vrep_port: Port of VREP server
:type vrep_scene: str
:param vrep_scene: VREP scene to load
"""
super(Nico, self).__init__()
# Set the properties
self.sync_sleep_time = sync_sleep_time
self.interpolation = interpolation
self.fraction_max_speed = fraction_max_speed
self.wait = wait
self.motor_config = motor_config
self.vrep = vrep
self.vrep_host = vrep_host
self.vrep_port = vrep_port
self.vrep_scene = vrep_scene
# Create the robot handle
self.robot_handle = Motion.Motion(self.motor_config, self.vrep, self.vrep_host, self.vrep_port, self.vrep_scene)
# List of all joint names
self.all_joint_names = self.robot_handle.getJointNames()
# Initialize the joints
# for joint_name in self.all_joint_names:
# self.set_angles({joint_name:0.0})
# Sleep for a few seconds to allow the changes to take effect
time.sleep(3)
def set_angles_slow(self, target_angles, duration, step=0.01):
"""
Sets the angles over the specified duration using linear interpolation
:param target_angles:
:param duration:
:param step:
:return:
"""
# Retrieve the start angles
start_angles = self.get_angles(joint_names=target_angles.keys())
# Calculate the slope for each joint
angle_slopes = dict()
for joint_name in target_angles.keys():
start = start_angles[joint_name]
end = target_angles[joint_name]
angle_slopes[joint_name] = (end - start)/duration
# t starts from 0.0 and goes till duration
for t in np.arange(0.0, duration+0.01, step):
current_angles = dict()
# Calculate the value of each joint angle at time t
for joint_name in target_angles.keys():
current_angles[joint_name] = start_angles[joint_name] + angle_slopes[joint_name]*t
# Set the current angles
self.set_angles(current_angles)
# Sleep for the step time
time.sleep(step)
def set_angles(self, joint_angles, duration=None, joint_velocities=None):
"""
Sets the joints to the specified angles (after converting radians to degrees since the poppy robot uses degrees)
:type joint_angles: dict
:param joint_angles: Dictionary of joint_names: angles (in radians)
:type duration: float
:param duration: Time to reach the angular targets (in seconds)
:type joint_velocities: dict
:param joint_velocities: dict of joint angles and velocities
:return: None
"""
l_knee_max = 90.0
l_knee_min = 0.0
r_kee_max = 90.0
r_knee_min = 0.0
for joint_name in joint_angles.keys():
# Convert the angle to degrees
target_angle_degrees = math.degrees(joint_angles[joint_name])
if joint_name == 'l_knee_y':
if target_angle_degrees >= l_knee_max:
target_angle_degrees = l_knee_max
elif target_angle_degrees <= l_knee_min:
target_angle_degrees = l_knee_min
else:
target_angle_degrees = target_angle_degrees
if joint_name == 'r_knee_y':
if target_angle_degrees >= r_kee_max:
target_angle_degrees = r_kee_max
elif target_angle_degrees <= r_knee_min:
target_angle_degrees = r_knee_min
else:
target_angle_degrees = target_angle_degrees
self.robot_handle.setAngle(joint_name, target_angle_degrees, self.fraction_max_speed)
# Sleep to allow the motors to reach their targets
if duration is not None:
time.sleep(self.sync_sleep_time)
def get_angles(self, joint_names=None):
"""
Gets the angles of the specified joints and returns a dict of joint_names: angles (in radians)
If joint_names=None then the values of all joints are returned
:type joint_names: list(str)
:param joint_names: List of joint names
:rtype: dict
"""
# Create the dict to be returned
joint_angles = dict()
# If joint names are not provided, get values of all joints
if joint_names is None:
# Call the nicomotion api function to get list of joint names
joint_names = self.all_joint_names
motors = self.robot_handle._robot.motors
# If no joint names are specified, return angles of all joints in raidans
# Else return only the angles (in radians) of the specified joints
for m in motors:
if joint_names is None:
joint_angles[str(m.name)] = math.radians(m.present_position)
else:
if m.name in joint_names:
joint_angles[str(m.name)] = math.radians(m.present_position)
return joint_angles
def cleanup(self):
"""
Cleans up the current connection to the robot
:return: None
"""
self.robot_handle.cleanup()
class Poppy(Robot):
"""
This class encapsulates the methods and properties for interacting with the poppy robot
It extends the abstract class 'Robot'
"""
sync_sleep_time = None
robot_handle = None
interpolation = None
fraction_max_speed = None
wait = None
def __init__(self, sync_sleep_time, interpolation=False, fraction_max_speed=0.01, wait=False,
motor_config=None, vrep=False, vrep_host='127.0.0.1', vrep_port=19997, vrep_scene=None):
"""
The constructor of the class. Class properties should be set here
The robot handle should be created here
Any other initializations such as setting angles to particular values should also be taken care of here
:type sync_sleep_time: float
:param sync_sleep_time: Time to sleep to allow the joints to reach their targets (in seconds)
:type interpolation: bool
:param interpolation: Flag to indicate if intermediate joint angles should be interpolated
:type fractionMaxSpeed: float
:param fractionMaxSpeed: Fraction of the maximum motor speed to use
:type wait: bool
:param wait: Flag to indicate whether the control should wait for each angle to reach its target
"""
super(Poppy, self).__init__()
# Set the properties
self.sync_sleep_time = sync_sleep_time
self.interpolation = interpolation
self.fraction_max_speed = fraction_max_speed
self.wait = wait
self._maximumSpeed = 1.0
# Close existing vrep connections if any
pypot.vrep.close_all_connections()
# Create a new poppy robot and set the robot handle
self.robot_handle = PoppyHumanoid(simulator='vrep',
config=motor_config,
host=vrep_host,
port=vrep_port,
scene=vrep_scene)
# Sync the robot joints
self.robot_handle.start_sync()
# Perform required joint initializations
# Move arms to pi/2
# self.robot_handle.l_shoulder_y.goal_position = -90
# self.robot_handle.r_shoulder_y.goal_position = -90
# Sleep for a few seconds to allow the changes to take effect
time.sleep(3)
def set_angles(self, joint_angles, duration=None, joint_velocities=None):
"""
Sets the joints to the specified angles (after converting radians to degrees since the poppy robot uses degrees)
:type joint_angles: dict
:param joint_angles: Dictionary of joint_names: angles (in radians)
:type duration: float
:param duration: Time to reach the angular targets (in seconds)
:type joint_velocities: dict
:param joint_velocities: dict of joint angles and velocities
:return: None
"""
for joint_name in joint_angles.keys():
# Convert the angle to degrees
target_angle_degrees = math.degrees(joint_angles[joint_name])
try:
# Determine the right joint and set the joint angle
for motor in self.robot_handle.motors:
if motor.name == joint_name:
motor.compliant = False
motor.goal_speed = 1000.0 * min(self.fraction_max_speed, self._maximumSpeed)
motor.goal_position = target_angle_degrees
break
except Exception as e: # Catch all exceptions
print e.message
raise RuntimeError('Could not set joint angle')
# Sleep to allow the motors to reach their targets
if not duration:
time.sleep(self.sync_sleep_time)
def get_angles(self, joint_names=None):
"""
Gets the angles of the specified joints and returns a dict of joint_names: angles (in radians)
If joint_names=None then the values of all joints are returned
:type joint_names: list
:param joint_names: List of joint name strings
:rtype: dict
:returns: dict of joint names and angles
"""
# Create the dict to be returned
joint_angles = dict()
# Retrieve the list of DxlMXMotor objects
motors = self.robot_handle.motors
# If no joint names are specified, return angles of all joints in raidans
# Else return only the angles (in radians) of the specified joints
for m in motors:
if joint_names is None:
joint_angles[str(m.name)] = math.radians(m.present_position)
else:
if m.name in joint_names:
joint_angles[str(m.name)] = math.radians(m.present_position)
return joint_angles
def cleanup(self):
"""
Cleans up the current connection to the robot
:return: None
"""
# TODO check if it works
self.robot_handle.close()
| true
|
cf53ef9304116af8bf2caa501c19f0ce2c53a991
|
Python
|
SteveEwell/python-ldap-filter
|
/tests/test_filter_output.py
|
UTF-8
| 2,167
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
import pytest
from ldap_filter import Filter
class TestFilterOutput:
def test_to_string(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*)(!(account=disabled)))'
parsed = Filter.parse(filt)
string = parsed.to_string()
assert string == filt
def test_string_typecast(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*)(!(account=disabled)))'
string = str(Filter.parse(filt))
assert string == filt
def test_to_simple_concat(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*)(!(account=disabled)))'
string = Filter.parse(filt) + ''
assert string == filt
def test_to_complex_concat(self):
filt = '(&(sn=ron)(sn=bob))'
string = Filter.parse(filt) + 'test'
assert string == '(&(sn=ron)(sn=bob))test'
class TestFilterFormatting:
def test_default_beautify(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*))'
parsed = Filter.parse(filt)
string = parsed.to_string(True)
assert string == '(&\n (|\n (sn=ron)\n (sn=bob)\n )\n (mail=*)\n)'
def test_custom_indent_beautify(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*))'
parsed = Filter.parse(filt)
string = parsed.to_string(2)
assert string == '(&\n (|\n (sn=ron)\n (sn=bob)\n )\n (mail=*)\n)'
def test_custom_indent_char_beautify(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*))'
parsed = Filter.parse(filt)
string = parsed.to_string(indent=2, indt_char='!')
assert string == '(&\n!!(|\n!!!!(sn=ron)\n!!!!(sn=bob)\n!!)\n!!(mail=*)\n)'
class TestFilterSimplify:
def test_optimized_filter(self):
filt = '(&(|(sn=ron)(sn=bob))(mail=*)(!(account=disabled)))'
parsed = Filter.parse(filt)
string = parsed.simplify().to_string()
assert string == filt
def test_unoptimized_filter(self):
filt = '(&(|(sn=ron)(&(sn=bob)))(|(mail=*))(!(account=disabled)))'
optimized = '(&(|(sn=ron)(sn=bob))(mail=*)(!(account=disabled)))'
parsed = Filter.parse(filt)
string = parsed.simplify().to_string()
assert string == optimized
| true
|
ceb31a29f3a27f3007fd46ebe715528b212191c9
|
Python
|
bigbear11/TextClassify
|
/nbbaye.py
|
UTF-8
| 1,751
| 2.90625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import math
import argparse
from collections import defaultdict
def loaddata(corpus_file):
f=open(corpus_file)
labels=defaultdict(int)
labels_words=defaultdict(int)
total=0
for line in f.readlines():
arr=line.strip().split('/')
if len(arr)< 2:continue
tokenizer=list(arr[1])
for item in tokenizer:
labels[arr[0]] +=1
labels_words[(arr[0],item)] += 1
total+=1
#print arr[0],arr[1]
#print total
f.close()
return labels,labels_words,total
def model(labels,labels_words,total,text):
words = list(text)
temp = {}
#print text
for tag in labels.keys():
temp[tag] = math.log(labels[tag]) - math.log(total)
for word in words:
temp[tag] += math.log(labels_words.get((tag, word), 1.0)) - math.log(labels[tag])
label=0
grade = 0.0
for t in labels.keys():
cnt = 0.0
for tt in labels.keys():
cnt += math.exp(temp[tt] - temp[t])
cnt = 1.0 / cnt
if cnt > grade:
label, grade = t, cnt
return label, grade
def run(args):
f=open(args.input_file)
ff=open(args.output,'w')
labels,labels_words,total=loaddata(args.corpus)
for line in f.readlines():
query=line.strip()
label,grade=model(labels,labels_words,total,query)
ff.write(query+'\t'+label+'\t'+str(grade)+'\n')
f.close()
ff.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c","--corpus",help="corpus")
parser.add_argument("-i","--input_file",help="input_file")
parser.add_argument("-o","--output",help="output")
args = parser.parse_args()
run(args)
| true
|
6efb5571a96cdf49380ec1c562b89fb8c3a42a6a
|
Python
|
kashifusmani/interview_prep
|
/beyond/test.py
|
UTF-8
| 271
| 3.109375
| 3
|
[] |
no_license
|
def find_duplicate(arr):
s = set()
for elem in arr:
if elem not in s:
s.add(elem)
else:
return elem
def find_missing(arr):
return 0
if __name__ == "__main__":
a = [1, 4, 2, 0, 4, 5]
print(find_duplicate(a))
| true
|
56068f612af76e285aa0b235e5ee56144c53eea1
|
Python
|
koseus/CSE566
|
/hw1/hw1.py
|
UTF-8
| 2,027
| 3.6875
| 4
|
[] |
no_license
|
import numpy as np
f = open("input.txt", "r")
size = int(f.readline())
# print(size)
data = []
for i in range(size):
line = f.readline()
# print(line)
instance = []
for l in line.split():
# print(l)
instance.append(float(l))
# print(instance)
data.append(instance)
data = np.asarray(data, dtype=np.float32)
# distinguish negative items from positive ones. We will use positives to compute S, and negatives to compute G.
negative = data[0:10]
positive = data[10:15]
# print(negative)
# print(positive)
##### Compute S #####
# Initial rectangle is equal to the first item
S_minX = positive[0][0]
S_maxX = positive[0][0]
S_minY = positive[0][1]
S_maxY = positive[0][1]
for p in positive:
if(S_minX > p[0]):
S_minX = p[0]
if(S_maxX < p[0]):
S_maxX = p[0]
if(S_minY > p[1]):
S_minY = p[1]
if(S_maxY < p[1]):
S_maxY = p[1]
print("Most specific hypothesis is (" + str(S_minX) + ", " + str(S_minY) + ") (" + str(S_maxX) + ", " + str(S_maxY) + ")")
##### Compute G #####
smallX = []
largeX = []
smallY = []
largeY = []
for n in negative:
if(n[0] < S_minX):
smallX.append(n[0])
if(n[0] > S_maxX):
largeX.append(n[0])
if(n[1] < S_minY):
smallY.append(n[1])
if(n[1] > S_maxY):
largeY.append(n[1])
# Find the largest X smaller than S_minX
max = 0
for s in smallX:
if(s > max):
max = s
G_minX = max
# Find the largest Y smaller than S_minY
max = 0
for s in smallY:
if(s > max):
max = s
G_minY = max
# Find the smallest X larger than S_maxX
min = 10
for l in largeX:
if(l < min):
min = l
G_maxX = min
# Find the smallest Y larger than S_maxY
min = 10
for l in largeY:
if(l < min):
min = l
G_maxY = min
# Squeeze the rectangle by 1 quantum from all sides so that the border does not contain any negatives
G_minX = round(G_minX + 0.05, 2)
G_maxX = round(G_maxX - 0.05, 2)
G_minY = round(G_minY + 0.05, 2)
G_maxY = round(G_maxY - 0.05, 2)
print("Most general hypothesis is (" + str(G_minX) + ", " + str(G_minY) + ") (" + str(G_maxX) + ", " + str(G_maxY) + ")")
| true
|
f1da9446da4f5865379fbb5bdf80986cda6a9d35
|
Python
|
MLmicroscopy/distortions
|
/src/ang/phase.py
|
UTF-8
| 7,261
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
import numpy as np
import io
import re
import collections
def np2str(arr, precision=6, smart_int=False):
assert len(arr.shape) == 1
s = []
for x in arr:
x = float(x)
if smart_int and x.is_integer():
s += ["{}".format(int(x))]
else:
s += ["{0:.{1}f}".format(x, precision)]
return " ".join(s)
_default_latticeConstants = np.zeros([6]) # 3 constants & 3 angles
_default_elasticConstants = np.zeros([6, 6])
_default_categories = np.zeros([4])
class Phase(object):
def __init__(self, id,
name='',
formula='',
symmetry=0,
latticeConstants=_default_latticeConstants,
hklFamilies=np.array([]),
elasticConstants=np.array([]),
categories=_default_categories):
self._id = id
self._materialName = name
self._formula = formula
self._info = ''
self._symmetry = symmetry
self._latticeConstants = latticeConstants
self._hklFamilies = hklFamilies
self._elasticConstants = elasticConstants
self._categories = categories
@property
def id(self):
return self._id
@property
def materialName(self):
return self._materialName
@materialName.setter
def materialName(self, val):
if not isinstance(val, str):
raise AttributeError("materialName must be a string.")
self._materialName = val
@property
def formula(self):
return self._formula
@formula.setter
def formula(self, val):
if not isinstance(val, str):
raise AttributeError("formula must be a string.")
self._formula = val
@property
def symmetry(self):
return self._symmetry
@symmetry.setter
def symmetry(self, val):
if not isinstance(val, int) or val < 0:
raise AttributeError("symmetry must be a positive integer. Get {}".format(val))
self._symmetry = val
@property
def latticeConstants(self):
return self._latticeConstants
@latticeConstants.setter
def latticeConstants(self, val):
if not isinstance(val, np.ndarray):
raise AttributeError("latticeConstants must be a numpy array")
if val.shape != _default_latticeConstants.shape:
raise AttributeError("latticeConstants must have a shape of {}. Get {}"
.format(_default_latticeConstants.shape, val.shape))
self._latticeConstants = val
@property
def hklFamilies(self):
return self._hklFamilies
@hklFamilies.setter
def hklFamilies(self, val):
if not isinstance(val, np.ndarray):
raise AttributeError("hklFamilies must be a numpy array")
if len(val.shape) not in [0, 2] or val.shape[1] != 6:
raise AttributeError("hklFamilies must have a shape of ?x6. Get {}".format(val.shape))
self._hklFamilies = val
@property
def numberFamilies(self):
return len(self._hklFamilies)
@property
def elasticConstants(self):
return self._elasticConstants
@elasticConstants.setter
def elasticConstants(self, val):
if not isinstance(val, np.ndarray):
raise AttributeError("elasticConstants must be a numpy array")
if len(val.shape) not in [0, 2] or val.shape != _default_elasticConstants.shape:
raise AttributeError("elasticConstants must have a shape of {}. Get {}"
.format(_default_elasticConstants.shape, val.shape))
self._elasticConstants = val
@property
def categories(self):
return self._categories
@categories.setter
def categories(self, val):
if not isinstance(val, np.ndarray):
raise AttributeError("categories must be a numpy array")
if val.shape != _default_categories.shape:
raise AttributeError("categories must have a shape of {}. Get {}"
.format(_default_categories.shape, val.shape))
self._categories = val
@staticmethod
def create_from_phase(id, name, formula, phase):
return Phase(id=id,
name=name,
formula=formula,
symmetry=phase.symmetry,
latticeConstants=phase.latticeConstants,
hklFamilies=phase.hklFamilies,
elasticConstants=phase.elasticConstants,
categories=phase.categories)
@staticmethod
def load_from_string(sphase):
lines = sphase.splitlines()
# extract phase id (compulsary)
phase_id = re.findall("# Phase (\d+)", string=lines[0])
if len(phase_id) != 1:
raise Exception("Invalid Format: Could not retrieve the phase id")
phase = Phase(id=int(phase_id[0]))
list_buffer = collections.defaultdict(list)
for i, line in enumerate(lines[1:]):
if line.startswith("#"):
tokens = re.split('\s+', line.strip())
if tokens[1] == 'MaterialName':
phase.materialName = str(tokens[2])
elif tokens[1] == 'Formula':
phase.formula = str(tokens[2])
elif tokens[1] == 'Symmetry':
phase.symmetry = int(tokens[2])
elif tokens[1] == 'LatticeConstants':
phase.latticeConstants = np.array(tokens[2:], dtype=np.float32)
elif tokens[1] == 'Categories0':
phase.categories = np.array(tokens[2:], dtype=np.float32)
elif tokens[1] in ['hklFamilies',
'ElasticConstants']:
list_buffer[tokens[1]].append(tokens[2:])
else:
raise Exception("Invalid Format: missing diese in the header")
# Post process list
if "hklFamilies" in list_buffer:
phase.hklFamilies = np.array(list_buffer["hklFamilies"], dtype=np.float32)
if "ElasticConstants" in list_buffer:
phase.elasticConstants = np.array(list_buffer["ElasticConstants"], dtype=np.float32)
return phase
def dump(self):
with io.StringIO() as buffer:
buffer.write("# Phase\t{}\n".format(self.id))
buffer.write("# MaterialName\t{}\n".format(self.materialName))
buffer.write("# Formula\t{}\n".format(self.formula))
buffer.write("# Info\n")
buffer.write("# Symmetry\t{}\n".format(self.symmetry))
buffer.write("# LatticeConstants\t{}\n".format(np2str(self.latticeConstants, precision=3)))
buffer.write("# NumberFamilies\t{}\n".format(self.numberFamilies))
for family in self.hklFamilies:
buffer.write("# hklFamilies \t{}\n".format(np2str(family, smart_int=True)))
for elastic in self.elasticConstants:
buffer.write("# ElasticConstants \t{}\n".format(np2str(elastic, precision=6)))
buffer.write("# Categories0\t{}\n".format(np2str(self.categories, smart_int=True)))
sphase = buffer.getvalue()
return sphase
| true
|