blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4aacdbf1c096e5d7e78ca59716ea9f98ba59fb4b
|
Python
|
mvbTuratti/vm
|
/app/pesos.py
|
UTF-8
| 1,244
| 2.875
| 3
|
[] |
no_license
|
import csv
import os
import datetime
from multiprocessing import Lock
lock = Lock()
def log_peso(valor, file):
with lock:
data = [datetime.datetime.utcnow().strftime("%Y%m%dT%H%M%SZ"),valor]
def older_than_last(time):
rows = []
with open(file, 'r') as csvfile:
csvreader = csv.reader(csvfile)
fields = next(csvreader)
for row in csvreader:
rows.append(row)
return True if len(rows) <= 1 else rows[-1][0] < time
if not os.path.isfile(file):
with open(file,'w', newline='', encoding='utf-8') as c:
cw = csv.writer(c)
cw.writerow(['Horario','Peso'])
with open(file,'a', newline='', encoding='utf-8') as c:
cw = csv.writer(c)
if older_than_last(data[0]):
cw.writerow(data)
return valor
def get_last_peso(file):
with lock:
rows = []
with open(file, 'r') as csvfile:
csvreader = csv.reader(csvfile)
fields = next(csvreader)
for row in csvreader:
rows.append(row)
return 0 if len(rows) <= 1 else rows[-1][1]
| true
|
539d5b0939bfb215de269915f269b517b26d9632
|
Python
|
rajeevdodda/Codeforces
|
/CF-B/201-300/CF266-D2-B.py
|
UTF-8
| 310
| 3.0625
| 3
|
[] |
no_license
|
# https://codeforces.com/problemset/problem/266/B
n, t = map(int, input().split())
queue = list(input())
for i in range(t):
j = 0
while j < n - 1:
if queue[j] == "B" and queue[j+1] == "G":
queue[j], queue[j+1] = "G", "B"
j += 1
j +=1
print(''.join(queue))
| true
|
dcd0a45c0c6d8d14b69daf8483309ecadceb5e68
|
Python
|
callmexss/data_structure_and_algorithms
|
/tree/trie.py
|
UTF-8
| 1,494
| 3.84375
| 4
|
[] |
no_license
|
class TrieNode:
def __init__(self):
self.path = 0
self.end = 0
self.maps = [None] * 26
class Trie:
def __init__(self):
self.root = TrieNode()
def insert(self, word):
if not word:
return
node = self.root
for c in word:
index = ord(c) - ord('a')
if not node.maps[index]:
node.maps[index] = TrieNode()
node.path += 1
node = node.maps[index]
node.end += 1
def search(self, word):
if not word:
return False
node = self.root
for c in word:
index = ord(c) - ord('a')
if not node.maps[index]:
return False
node = node.maps[index]
return node.end != 0
def has_prefix(self, prefix):
if not prefix:
return False
node = self.root
for i, c in enumerate(prefix):
index = ord(c) - ord('a')
if not node.maps[index]:
return False
node = node.maps[index]
return i == len(prefix) - 1
if __name__ == "__main__":
trie = Trie()
trie.insert('hello')
assert trie.search('hello') == True
assert trie.search('hell') == False
assert trie.search('world') == False
assert trie.has_prefix('h') == True
assert trie.has_prefix('he') == True
assert trie.has_prefix('hel') == True
assert trie.has_prefix('hell') == True
| true
|
7fbfc4c0d799c80ab451a86c520ac7fbe10b4ab9
|
Python
|
Vijendrapratap/Machine-Learning
|
/Week4/NumPY/27. WAP to remove specific elements in a numpy array.py
|
UTF-8
| 459
| 4.5
| 4
|
[] |
no_license
|
"""
Write a Python program to remove specific elements in a numpy array.
Expected Output:
Original array:
[ 10 20 30 40 50 60 70 80 90 100]
Delete first, fourth and fifth elements:
[ 20 30 60 70 80 90 100]
"""
import numpy as np
myaaray = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
index = [0, 1, 8]
print("Original array:")
print(myaaray)
print("Delete first, second and eighth elements:")
new_array = np.delete(myaaray, index)
print(new_array)
| true
|
d6e8e92a08c928dde1d0c6f2dba79d330a66fa6f
|
Python
|
tbrack/ustjay-ethay-actsfayig
|
/main.py
|
UTF-8
| 1,052
| 2.828125
| 3
|
[] |
no_license
|
import os
import requests
from flask import Flask, render_template_string
from bs4 import BeautifulSoup
app = Flask(__name__)
pig_latinizer = "https://hidden-journey-62459.herokuapp.com"
def get_fact():
response = requests.get("http://unkno.com")
soup = BeautifulSoup(response.content, "html.parser")
facts = soup.find_all("div", id="content")
return facts[0].getText()
def pig_latinize(phrase):
"""Takes a phrase, submits it to the piglatinizer, and returns a link"""
url = "https://hidden-journey-62459.herokuapp.com/piglatinize/"
# phrase = "what is this"
input = {"input_text": phrase}
resp = requests.post(url, data=input, allow_redirects=False)
return resp.headers.get('Location')
@app.route('/')
def home():
fact = get_fact().strip()
pl_link = pig_latinize(fact)
template = "<a href={{ link }}>{{ link }}</a>"
return render_template_string(template, link=pl_link)
if __name__ == "__main__":
port = int(os.environ.get("PORT", 6787))
app.run(host='0.0.0.0', port=port)
| true
|
8a1a7ddec086073c2c6423362cc8c2f585e04636
|
Python
|
cliffpham/algos_data_structures
|
/algos/daily_coding_problem/problem_211.py
|
UTF-8
| 766
| 3.859375
| 4
|
[] |
no_license
|
# This problem was asked by Microsoft.
# Given a string and a pattern, find the starting indices of all occurrences of the pattern in the string.
# For example, given the string "abracadabra" and the pattern "abr", you should return [0, 7].
import unittest
class Test(unittest.TestCase):
def test1(self):
self.assertEqual(
find_pattern("abracadabra", "abr"),
[0,7]
),
def test2(self):
self.assertEqual(
find_pattern("bcaabcbacabc", "abc"),
[3,9]
)
def find_pattern(s, p):
find = len(p)
result = []
for i in range(len(s)):
if s[i:find] == p:
result.append(i)
find+=1
return result
if __name__ == "__main__":
unittest.main()
| true
|
943e6dfedc0cd5910b9b9aa16b65600a8c6faed2
|
Python
|
1-WebPages-1/Calcu1
|
/calculadora.py
|
UTF-8
| 3,553
| 3.640625
| 4
|
[] |
no_license
|
from tkinter import *
ventana = Tk()
ventana.title("Calculadora")
i = 0
#entrada
e_texto = Entry(ventana, font= ("Calibri 20"))
e_texto.grid(row = 0, column = 0, columnspan = 4, padx = 5, pady = 5)
#Funciones
def click_boton(valor):
global i
e_texto.insert(i, valor)
i += 1
def borrar():
e_texto.delete(0, END)
i = 0
def hacer_operacion():
ecuacion = e_texto.get()
resultado = eval(ecuacion)
e_texto.delete(0, END)
e_texto.insert(0, resultado)
i = 0
# Usar tecla enter para operar
#Botones
boton1 = Button(ventana, text = "1", width = 5, height = 2, command = lambda: click_boton(1))
boton2 = Button(ventana, text = "2", width = 5, height = 2, command = lambda: click_boton(2))
boton3 = Button(ventana, text = "3", width = 5, height = 2, command = lambda: click_boton(3))
boton4 = Button(ventana, text = "4", width = 5, height = 2, command = lambda: click_boton(4))
boton5 = Button(ventana, text = "5", width = 5, height = 2, command = lambda: click_boton(5))
boton6 = Button(ventana, text = "6", width = 5, height = 2, command = lambda: click_boton(6))
boton7 = Button(ventana, text = "7", width = 5, height = 2, command = lambda: click_boton(7))
boton8 = Button(ventana, text = "8", width = 5, height = 2, command = lambda: click_boton(8))
boton9 = Button(ventana, text = "9", width = 5, height = 2, command = lambda: click_boton(9))
boton0 = Button(ventana, text = "0", width = 15, height = 2, command = lambda: click_boton(0))
boton_borrar = Button(ventana, text = "AC", width = 5, height = 2, command = lambda: borrar()) #Cambiar
boton_parentesis1 = Button(ventana, text = "(", width = 5, height = 2, command = lambda: click_boton("("))
boton_parentesis2 = Button(ventana, text = ")", width = 5, height = 2, command = lambda: click_boton(")"))
boton_punto = Button(ventana, text = ".", width = 5, height = 2, command = lambda: click_boton("."))
boton_div = Button(ventana, text = "/", width = 5, height = 2, command = lambda: click_boton("/"))
boton_mult = Button(ventana, text = "x", width = 5, height = 2, command = lambda: click_boton("*"))
boton_sum = Button(ventana, text = "+", width = 5, height = 2, command = lambda: click_boton("+"))
boton_rest = Button(ventana, text = "-", width = 5, height = 2, command = lambda: click_boton("-"))
boton_igual = Button(ventana, text = "=", width = 5, height = 2, command = lambda: hacer_operacion())
#Agregar botones en pantalla
#Plantilla >> botonX.grid(row = 4, column=0,padx = 5, pady = 5) <<
boton_borrar.grid(row = 1, column=0,padx = 5, pady = 5)
boton_parentesis1.grid(row = 1, column=1,padx = 5, pady = 5)
boton_parentesis2.grid(row = 1, column=2,padx = 5, pady = 5)
boton_div.grid(row = 1, column=3,padx = 5, pady = 5)
boton7.grid(row = 2, column=0,padx = 5, pady = 5)
boton8.grid(row = 2, column=1,padx = 5, pady = 5)
boton9.grid(row = 2, column=2,padx = 5, pady = 5)
boton_mult.grid(row = 2, column=3,padx = 5, pady = 5)
boton4.grid(row = 3, column=0,padx = 5, pady = 5)
boton5.grid(row = 3, column=1,padx = 5, pady = 5)
boton6.grid(row = 3, column=2,padx = 5, pady = 5)
boton_sum.grid(row = 3, column=3,padx = 5, pady = 5)
boton1.grid(row = 4, column=0,padx = 5, pady = 5)
boton2.grid(row = 4, column=1,padx = 5, pady = 5)
boton3.grid(row = 4, column=2,padx = 5, pady = 5)
boton_rest.grid(row = 4, column=3,padx = 5, pady = 5)
boton0.grid(row = 5, column=0,columnspan= 2,padx = 5, pady = 5)
boton_punto.grid(row = 5, column= 2,padx = 5, pady = 5)
boton_igual.grid(row = 5, column=3,padx = 5, pady = 5)
ventana.mainloop()
| true
|
a08dbd7a23f31b3588e2a9cab0d6788c90ccd406
|
Python
|
dayaftereh/py-csv
|
/scsv.py
|
UTF-8
| 2,103
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
import csv
import os
from datetime import datetime
import locale
# set default to german
locale.setlocale(locale.LC_ALL, 'de_DE')
class CSVWriter:
def __init__(self, fname, delimiter=' '):
self._f = None
self._counter = 0
self._writer = None
self._fname = fname
self._delimiter = delimiter
self._open_writer()
def _open_writer(self):
fname = self._next_name()
self._f = open(fname, 'w')
self._writer = csv.writer(self._f, delimiter=self._delimiter)
def _next_name(self):
if self._counter < 1:
self._counter = self._counter + 1
return self._fname
parent = os.path.dirname(self._fname)
fname = os.path.basename(self._fname)
splittxt = os.path.splitext(fname)
next_fname = "%s.%d%s" % (splittxt[0], self._counter, splittxt[1])
next_name = os.path.join(parent, next_fname)
self._counter = self._counter + 1
return next_name
def next_file(self):
if not self._f is None:
self._f.close()
self._f = None
self._open_writer()
def writerow(self, row):
self._writer.writerow(row)
def read_directory(dirname, delimiter=' '):
for root, _, files in os.walk(dirname):
for fname in files:
csvfile = os.path.join(root, fname)
for line in read(csvfile, delimiter):
yield line
def read(csvfile, delimiter=' '):
with open(csvfile) as f:
reader = csv.reader(f, delimiter=delimiter)
for row in reader:
yield row
def writer(csvfile, delimiter=' '):
f = open(csvfile, 'w')
writer = csv.writer(f, delimiter=delimiter)
return writer
def str2date(date_str, format):
return datetime.strptime(date_str, format)
def str2datetime(date, time=None):
if time is None:
return datetime.strptime(date, '%m/%d/%Y')
dateString = '%s %s' % (date, time)
return datetime.strptime(dateString, '%m/%d/%Y %H:%M')
def str2float(string):
return locale.atof(string)
| true
|
b55bf2516bceca8ed74c69245fb6f0c8fa16019f
|
Python
|
dhruv-rajput/data-structures-and-algo
|
/stack/max-area-matrix.py
|
UTF-8
| 1,905
| 3.15625
| 3
|
[] |
no_license
|
def nsl(arr,n):
ans=[]
stack=[]
i=0
while i<n:
if len(stack)==0:
ans.append(-1)
elif len(stack)>0 and stack[0][0]<arr[i]:
ans.append(stack[0][1])
elif len(stack)>0 and stack[0][0]>=arr[i]:
while len(stack)>0 and stack[0][0]>=arr[i]:
stack.pop(0)
if len(stack)==0:
ans.append(-1)
else:
ans.append(stack[0][1])
stack.insert(0,(arr[i],i))
i+=1
return(ans)
def nsr(arr,n):
ans=[]
stack=[]
i=n-1
while i>=0:
if len(stack)==0:
ans.append(n)
elif len(stack)>0 and stack[0][0]<arr[i]:
ans.append(stack[0][1])
elif len(stack)>0 and stack[0][0]>=arr[i]:
while len(stack)>0 and stack[0][0]>=arr[i]:
stack.pop(0)
if len(stack)==0:
ans.append(n)
else:
ans.append(stack[0][1])
stack.insert(0,(arr[i],i))
i-=1
return(ans[::-1])
def MAH(arr,n):
nsl1=0
nsr1=0
nsl1=nsl(arr,n)
nsr1=nsr(arr,n)
l=[]
for i in range(n):
l.append(arr[i]*(nsr1[i]-nsl1[i]-1))
return(max(l))
arr = [[0, 1, 1, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 0, 0]]
ans=[]
for i in range(0,len(arr)):
if(i==0):
ans.append(arr[0])
else:
temp=[]
for j in range(0,len(arr[0])):
if(arr[i][j]==0):
temp.append(arr[i][j])
else:
temp.append(arr[i][j] + ans[i-1][j])
ans.append(temp)
t=[]
for i in ans:
t.append(MAH(i,len(i)))
print(max(t))
m=[[3,1], [2,2] ,[5,3], [3,4], [2,5] ,[4,6], [3,7]]
print(m[3][1])
| true
|
c9e2c3bf00c8f614ff828177bef771041f462057
|
Python
|
lvyufeng/DuConv_mindspore
|
/src/callbacks.py
|
UTF-8
| 2,425
| 2.84375
| 3
|
[] |
no_license
|
import math
import time
from mindspore.train.callback import Callback
from numpy.lib.function_base import average
class TimeMonitor(Callback):
"""
Monitor the time in training.
Args:
data_size (int): How many steps are the intervals between print information each time.
if the program get `batch_num` during training, `data_size` will be set to `batch_num`,
otherwise `data_size` will be used. Default: None.
Raises:
ValueError: If data_size is not positive int.
"""
def __init__(self, per_print_times=1):
super(TimeMonitor, self).__init__()
self._per_print_times = per_print_times
self.epoch_time = time.time()
self.time_list = []
def step_begin(self, run_context):
self.epoch_time = time.time()
def step_end(self, run_context):
step_seconds = (time.time() - self.epoch_time) * 1000
self.time_list.append(step_seconds)
cb_params = run_context.original_args()
if self._per_print_times != 0 and cb_params.cur_step_num % self._per_print_times == 0:
print("per step time: {:5.3f} ms".format(average(self.time_list)), flush=True)
self.time_list = []
class LossCallBack(Callback):
"""
Monitor the loss in training.
If the loss in NAN or INF terminating training.
Note:
if per_print_times is 0 do not print loss.
Args:
per_print_times (int): Print loss every times. Default: 1.
"""
def __init__(self, dataset_size=-1):
super(LossCallBack, self).__init__()
self._dataset_size = dataset_size
def step_end(self, run_context):
"""
Print loss after each step
"""
cb_params = run_context.original_args()
if self._dataset_size > 0:
percent, epoch_num = math.modf(cb_params.cur_step_num / self._dataset_size)
if percent == 0:
percent = 1
epoch_num -= 1
print("epoch: {}, current epoch percent: {}, step: {}, outputs are {}"
.format(int(epoch_num), "%.3f" % percent, cb_params.cur_step_num, str(cb_params.net_outputs)),
flush=True)
else:
print("epoch: {}, step: {}, outputs are {}".format(cb_params.cur_epoch_num, cb_params.cur_step_num,
str(cb_params.net_outputs)), flush=True)
| true
|
831a32e5cdecdb4b64074eccd9f2fb716459d781
|
Python
|
JustinGOSSES/pyrolite
|
/docs/source/examples/geochem/lambdas_orthogonal_polynomials.py
|
UTF-8
| 3,198
| 3.0625
| 3
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
"""
lambdas: Visualising Orthogonal Polynomials
============================================
"""
import numpy as np
import matplotlib.pyplot as plt
from pyrolite.plot.spider import REE_v_radii
from pyrolite.geochem.ind import REE, get_ionic_radii
from pyrolite.util.math import lambdas, lambda_poly_func, OP_constants
np.random.seed(82)
def plot_orthagonal_polynomial_components(ax, xs, lambdas, params, log=False, **kwargs):
"""Plot polynomials on an axis over x values."""
for w, p in zip(lambdas, params): # plot the polynomials
f = np.ones_like(xs) * w
for c in p:
f *= xs - np.float(c)
if log:
f = np.exp(f)
label = (
"$r^{}: \lambda_{}".format(len(p), len(p))
+ ["\cdot f_{}".format(len(p)), ""][int(len(p) == 0)]
+ "$"
)
ax.plot(xs, f, label=label, **kwargs)
########################################################################################
# First we generate some example data:
#
data_ree = REE(dropPm=True)
data_radii = np.array(get_ionic_radii(data_ree, charge=3, coordination=8))
lnY = (
np.random.randn(*data_radii.shape) * 0.1
+ np.linspace(3.0, 0.0, data_radii.size)
+ (data_radii - 1.11) ** 2.0
- 0.1
)
for ix, el in enumerate(data_ree):
if el in ["Ce", "Eu"]:
lnY[ix] += np.random.randn(1) * 0.6
Y = np.exp(lnY)
########################################################################################
# Now we can calculate the lambdas:
#
exclude = ["Ce", "Eu"]
if exclude:
subset_ree = [i for i in data_ree if not i in exclude]
subset_Y = Y[[i in subset_ree for i in data_ree]]
subset_radii = np.array(get_ionic_radii(subset_ree, charge=3, coordination=8))
else:
subset_Y, subset_ree, subset_radii = Y, data_ree, data_radii
params = OP_constants(subset_radii, degree=4)
ls = lambdas(np.log(subset_Y), subset_radii, params=params, degree=4)
continuous_radii = np.linspace(subset_radii[0], subset_radii[-1], 20)
l_func = lambda_poly_func(ls, pxs=subset_radii, params=params)
smooth_profile = np.exp(l_func(continuous_radii))
########################################################################################
ax = REE_v_radii(Y, ree=data_ree, index="radii", color="0.8", label="Data")
REE_v_radii(
subset_Y,
ree=subset_ree,
ax=ax,
index="radii",
color="k",
linewidth=0,
label="Subset",
)
plot_orthagonal_polynomial_components(ax, continuous_radii, ls, params, log=True)
ax.plot(continuous_radii, smooth_profile, label="Reconstructed\nProfile", c="k", lw=2)
ax.legend(frameon=False, facecolor=None, bbox_to_anchor=(1, 1))
plt.show()
########################################################################################
# For more on using orthogonal polynomials to describe geochemical pattern data, see:
# O’Neill, H.S.C., 2016. The Smoothness and Shapes of Chondrite-normalized Rare Earth
# Element Patterns in Basalts. J Petrology 57, 1463–1508.
# `doi: 10.1093/petrology/egw047 <https://doi.org/10.1093/petrology/egw047>`__.
#
# .. seealso::
#
# Examples:
# `Dimensional Reduction <lambdadimreduction.html>`__,
# `REE Radii Plot <../plotting/REE_v_radii.html>`__
| true
|
3e8ff3db6650ca67e02d0f82df214a83928c820b
|
Python
|
albertauyeung/iems5703
|
/lectures/files/scraping.py
|
UTF-8
| 726
| 3.296875
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup as BS
import requests
# Base URL of content pages
base_url = "http://highscalability.com/blog/?currentPage={:d}"
# A variable for collecting links to individual articles
links = []
# Loop through the first 10 content pages
for i in range(1, 11):
# Generate the actual URL to the content page
url = base_url.format(i)
print(url)
# Send HTTP request
res = requests.get(url, timeout=5)
if res.status_code == 200:
# Parse the page using BeautifulSoup
soup = BS(res.text, 'lxml')
# Extract links of individual articles
anchors = soup.select("h2 a")
for a in anchors:
links.append(a["href"])
| true
|
c35de9d40f938b898bfbb62f051224121013b503
|
Python
|
abrozynski/python-fun
|
/projectEuler/problem54.py
|
UTF-8
| 640
| 3.234375
| 3
|
[] |
no_license
|
import poker
def problem54():
player_1_wins = 0
player_2_wins = 0
ties = 0
infile = open('poker.txt')
outinfo=[]
for line in infile:
agame = poker.PokerGame()
hand1 = poker.PokerHand(line[0:14])
hand2 = poker.PokerHand(line[15:len(line)-2])
result = agame.compare_hands(hand1, hand2)
if result == 'Player 1':
player_1_wins += 1
outinfo.append([hand1.show_hand(), hand1.evaluation, hand2.show_hand(), hand2.evaluation])
# outinfo.append([hand1.show_hand(),hand1.evaluation, hand2.show_hand(),hand2.evaluation, result])
print player_1_wins, player_2_wins, ties
return outinfo
| true
|
7b3a3501158ba4a35aaffdf6f72a865f9ea9f78b
|
Python
|
kaosbeat/choirbox
|
/python/metronome.py
|
UTF-8
| 3,093
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
import gpiozero
from gpiozero import Button, LED
from time import sleep, time
import socket
import sys
from signal import pause
# import thread module
import threading
import sys
sys.path +=['.']
from metronomeStates import BPMStateMachine
'''
In this script we want to light the LED at pin <> according to the signal coming from PD.
In another thread/process we want to alter the BPM of the pd patch by getting the input tapped tempo.
The available pins, not used by the pisound are GPIO:
2, 3, 4, 27, 22, 5, 7, 23, 14, 15
references:
https://github.com/defaultxr/taptempo.py
'''
button = Button('GPIO2')
led = LED('GPIO3')
bpm = 60.0
BPM_duration = 60.0/bpm
times = []
def addtime(times):
if(type(times) != list):
raise(TypeError)
t = time()
if(len(times) == 0):
tdiff = 0 # initial seed
else:
tdiff = t - times[-1][0]
return (t, tdiff)
def averagetimes(times):
averagetime = sum([row[1] for row in times])/float(len(times))
bpm = (1.0/(averagetime/60.0))
return (averagetime, bpm)
def _button_callback():
global t, BPM_duration, bpm
#blink led on touches
led.blink(on_time=0.01, off_time=0.01,n=2,background=True)
#check which state we are in and go in to tapping state
if(bpm_sm.is_pdSendsBPM):
bpm_sm.tapped()
# set a programming timer for deciding when programmed
if(t.is_alive):
t.cancel()
t = threading.Timer(BPM_duration*2, sendBPM)
t.start()
#calculate the time difference between pushes
timestamp, tdiff = addtime(times)
times.append([timestamp, tdiff])
if(len(times) > 1):
# remove first element if it's either the initial seed
# or when the list reaches max length
if(times[0][1] == 0 or len(times) > 16):
del(times[0])
(BPM_duration, bpm) = averagetimes(times)
# print(f'detected BPM: {bpm}, avg_time = {BPM_duration}')
'''
fake function, to be replaced with response from pd #TODO
go function needs to be implemented based on response from pd!
'''
def receiveACKfromPD():
print(f"BPM PROGRAMMED {BPM_duration, bpm}")
bpm_sm.go()
'''
sendBPM
transmits the amount of ms between each pulses to pd
'''
def sendBPM():
print("send BPM")
bpm_sm.tapStopped()
# SEND BPM TO PD #TODO
t.cancel()
times.clear()
# wait for response from pd to switch back to normal state
t_ack_pd = threading.Timer(0.2, receiveACKfromPD)
t_ack_pd.start()
'''
if we've programmed the BPM, then we blink the LED on the beats of the pd patch
'''
def rcvBEATfromPD():
if(bpm_sm.is_pdSendsBPM):
led.blink(on_time=0.005, off_time=0.005,n=1,background=True)
#TODO program a fake beat again
global t_fake_beat
t_fake_beat = threading.Timer(BPM_duration, rcvBEATfromPD)
t_fake_beat.start()
def Main():
button.when_pressed = _button_callback
t_fake_beat.start() #TODO
pause()
bpm_sm = BPMStateMachine()
t = threading.Timer(1.5, sendBPM)
t_fake_beat = threading.Timer(BPM_duration, rcvBEATfromPD) #TODO
if __name__ == '__main__':
Main()
| true
|
a5d3f087704f8da72aec00c6fbacae77075e5aa5
|
Python
|
drathke924/userscripts
|
/Advent_Of_Code_2022/day8.py
|
UTF-8
| 2,226
| 3.375
| 3
|
[] |
no_license
|
from math import prod
def generate_map(data_in):
map_out = {}
for y in range(0, len(data_in)):
for x in range(0, len(data_in[y])):
current_num = data_in[y][x]
map_out[(x, y)] = current_num
return map_out, len(data_in[y]), len(data_in)
def is_seen(x, y, full_map, map_size_x, map_size_y):
height = full_map[(x,y)]
seen = [1, 1, 1, 1]
for i in range(0, x):
if full_map[(i,y)] >= height:
seen[0] = 0
break
for i in range(map_size_x - 1, x, -1):
if full_map[(i,y)] >= height:
seen[1] = 0
break
for i in range(0, y):
if full_map[(x,i)] >= height:
seen[2] = 0
break
for i in range(map_size_y - 1, y, -1):
if full_map[(x,i)] >= height:
seen[3] = 0
break
return max(seen)
def get_scenic_score(x, y, full_map, map_size_x, map_size_y):
height = full_map[(x,y)]
seen = [0, 0, 0, 0]
for i in range(x - 1, -1, -1):
seen[0] += 1
if full_map[(i,y)] >= height:
break
for i in range(x + 1, map_size_x):
seen[1] += 1
if full_map[(i,y)] >= height:
break
for i in range(y - 1, -1, -1):
seen[2] += 1
if full_map[(x,i)] >= height:
break
for i in range(y + 1, map_size_y):
seen[3] += 1
if full_map[(x,i)] >= height:
break
return prod(seen)
def part_one(full_map, map_size_x, map_size_y):
seen = 0
for y in range(0, map_size_y):
for x in range(0, map_size_x):
seen += is_seen(x, y, full_map, map_size_x, map_size_y)
return seen
def part_two(full_map, map_size_x, map_size_y):
scores = []
for y in range(0, map_size_y):
for x in range(0, map_size_x):
scores.append(get_scenic_score(x, y, full_map, map_size_x, map_size_y))
return max(scores)
with open("day8.txt" , "r") as f:
data = [[int(digit) for digit in list(line.strip())] for line in f.readlines()]
tree_map, map_size_x, map_size_y = generate_map(data)
print("Part One:", part_one(tree_map, map_size_x, map_size_y))
print("Part Two:", part_two(tree_map, map_size_x, map_size_y))
| true
|
42fdfb15a01d65f1fb576eed4365b417b6305f6f
|
Python
|
David8Zorrilla/TP-2018-1
|
/4-errores_y_math/Ejemplos/Errores/main.py
|
UTF-8
| 249
| 4.09375
| 4
|
[] |
no_license
|
value = input('Inserta un numero: ')
try:
int_value = int(value)
is_int = True
except Exception as e:
print('Exception:', e)
is_int = False
else:
print(5 + int_value)
finally:
print('Es' if is_int else 'No es', 'un Numero')
| true
|
36bff43399a0f056f1cf62af83a432d9eca2a17f
|
Python
|
SehajS/Pong-Game
|
/main.py
|
UTF-8
| 3,855
| 3.59375
| 4
|
[] |
no_license
|
import turtle
import winsound
# creating a window
win = turtle.Screen()
win.title("Pong Game") # window title
win.bgcolor("#0c264f") # b ackground color
win.setup(width=800, height=600) # window dimensions
win.tracer(delay=0) # no animation delay
left_score = 0
right_score = 0
# Left Paddle
left_paddle = turtle.Turtle()
left_paddle.shape("square")
left_paddle.speed(0) # maximum speed for the movement of left paddle
left_paddle.color("white")
left_paddle.shapesize(stretch_wid=5, stretch_len=1) # width of pedal is 100 px
# length of pedal is 20 px
left_paddle.penup() # no tailing coordinate
left_paddle.goto(-350, 0)
# total width is 800, so 350 to left and 350 to right have some space unused.
# Right Paddle
right_paddle = turtle.Turtle()
right_paddle.shape("square")
right_paddle.shapesize(stretch_wid=5, stretch_len=1)
right_paddle.speed(0)
right_paddle.color("white")
right_paddle.penup()
right_paddle.goto(350, 0) # on the other side of the screen
# Ball
ball = turtle.Turtle()
ball.shape("circle")
ball.speed(0)
ball.color("white")
ball.penup()
ball.goto(0,0)
ball.dx = 2 # movement of ball right by 2 px
ball.dy = 2
# Pen (score-board)
pen = turtle.Turtle()
pen.speed(0)
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0,260)
'''
Movement of left and right paddles up and down.
'''
def left_paddle_up():
y = left_paddle.ycor()
y += 20 # move paddle up by 20 units
left_paddle.sety(y)
def right_paddle_up():
y = right_paddle.ycor()
y += 20
right_paddle.sety(y)
def left_paddle_down():
y = left_paddle.ycor()
y -= 20
left_paddle.sety(y)
def right_paddle_down():
y = right_paddle.ycor()
y -= 20
right_paddle.sety(y)
# Keyboard Input
win.listen()
win.onkeypress(left_paddle_up, "w")
win.onkeypress(left_paddle_down, "s")
win.onkeypress(right_paddle_up, "Up")
win.onkeypress(right_paddle_down, "Down")
# Main game loop
while True:
win.update()
# Move the ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
# Border Checking
if(ball.ycor() > 290):
ball.sety(290)
ball.dy *= -1
winsound.PlaySound("edge_sound.wav", winsound.SND_ASYNC)
if(ball.ycor() < -290):
ball.sety(-290)
ball.dy *= -1
winsound.PlaySound("edge_sound.wav", winsound.SND_ASYNC)
if(ball.xcor() > 390):
winsound.PlaySound("edge_sound.wav", winsound.SND_ASYNC)
ball.goto(0,0)
ball.dx *= -1
left_score += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(left_score, right_score), align="center", font=("Comic Sans MS", 24, "bold italic"))
if(ball.xcor() < -390):
winsound.PlaySound("edge_sound.wav", winsound.SND_ASYNC)
ball.goto(0,0)
ball.dx *= -1
right_score += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(left_score, right_score),
align="center", font=("Comic Sans MS", 24, "bold italic"))
# pedal borders
if(right_paddle.ycor() > 250):
right_paddle.sety(250)
if(right_paddle.ycor() < -250):
right_paddle.sety(-250)
if(left_paddle.ycor() > 250):
left_paddle.sety(250)
if(left_paddle.ycor() < -250):
left_paddle.sety(-250)
# ball and pedal collision
if(ball.xcor() > 340 and ball.xcor() < 350 and ball.ycor() < right_paddle.ycor() + 40 and ball.ycor() > right_paddle.ycor() - 40):
winsound.PlaySound("collision_sound.wav", winsound.SND_ASYNC)
ball.setx(340)
ball.dx *= -1
if(ball.xcor() < -340 and ball.xcor() > -350 and ball.ycor() < left_paddle.ycor() + 40 and ball.ycor() > left_paddle.ycor() -40):
winsound.PlaySound("collision_sound.wav", winsound.SND_ASYNC)
ball.setx(-340)
ball.dx *= -1
| true
|
0164661444898db1f59a1b5f268bcca9aab6de07
|
Python
|
happyxuwork/tensorflow-learning
|
/src/demo/section6-cnn/evaluateImage/convertCIFRA10ToImage.py
|
UTF-8
| 1,594
| 2.8125
| 3
|
[] |
no_license
|
# -*- coding: UTF-8 -*-
'''
@author: xuqiang
'''
from scipy.misc import imsave
import numpy as np
import pickle as cPickle
import os
# 解压缩,返回解压后的字典
def unpickle(file):
fo = open(file, 'rb')
dict = cPickle.load(fo,encoding='bytes')
fo.close()
return dict
# 生成训练集图片,如果需要png格式,只需要改图片后缀名即可。
in_path = "F:/CIFAR10/"
out_path = "F:/CIFAR10/png/"
for j in range(1, 6):
dataName = "data_batch_" + str(j) # 读取当前目录下的data_batch12345文件,dataName其实也是data_batch文件的路径,本文和脚本文件在同一目录下。
Xtr = unpickle(os.path.join(in_path,dataName))
print(dataName + " is loading...")
for i in range(0, 10000):
img = np.reshape(Xtr[b'data'][i], (3, 32, 32)) # Xtr['data']为图片二进制数据
img = img.transpose(1, 2, 0) # 读取image
picName = 'train/' + str(Xtr[b'labels'][i]) + '_' + str(i + (j - 1)*10000) + '.png' # Xtr['labels']为图片的标签,值范围0-9,本文中,train文件夹需要存在,并与脚本文件在同一目录下。
imsave(os.path.join(out_path,picName), img)
print(dataName + " loaded.")
print("test_batch is loading...")
# 生成测试集图片
testXtr = unpickle(os.path.join(in_path,"test_batch"))
for i in range(0, 10000):
img = np.reshape(testXtr[b'data'][i], (3, 32, 32))
img = img.transpose(1, 2, 0)
picName = 'test/' + str(testXtr[b'labels'][i]) + '_' + str(i) + '.jpg'
imsave(os.path.join(out_path,picName), img)
print("test_batch loaded.")
| true
|
3ad884eb6cb4ef490ef64615eaa31ac02adcb849
|
Python
|
GlennGuan/learn_cookbook
|
/chapter10/10_7.py
|
UTF-8
| 379
| 2.625
| 3
|
[] |
no_license
|
# 让目录或zip文件成为可运行的脚本 p417
myapplication/
spam.py
bar.py
grok.py
__main__.py
bash % python3 myapplication
解释器会把__main__.py文件作为主程序执行。
# 把代码打包进一个zip文件中时同样有效
bash % ls
spam.py bar.py grok.py __main__.py
bash % zip -r myapp.zip *.py
bash % python3 myapp.zip
...output from __main__.py...
| true
|
01d355ab8858246df78af58b427abb4dc261096a
|
Python
|
GhostUser/IRIS-classification-flask-api
|
/app.py
|
UTF-8
| 986
| 2.796875
| 3
|
[] |
no_license
|
from flask import Flask, request, jsonify, render_template
import numpy as np
import pickle
app=Flask(__name__)
model=pickle.load(open("model.pkl", "rb"))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
initial_features = [float(x) for x in request.form.values()]
final_features = [np.array(initial_features)]
predictions = model.predict(final_features)
species = ['Setosa','Versicolor','Virginica']
output = species[predictions[0]]
return render_template('index.html', prediction_species="The Predicted Species is {}".format(output))
@app.route('/api_predict', methods=["POST"])
def api_predict():
data = request.get_json(force=True)
prediction = model.predict([np.array(list(data.values()))])
species = ['setosa','versicolor','virginica']
output = species[prediction[0]]
return jsonify(output)
if __name__ == "__main__":
app.run(debug=True)
| true
|
d0c818398f5b70591416a07b1e44308954e27ca7
|
Python
|
shaheen19/dsp
|
/editors/nutmeg.py
|
UTF-8
| 307
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print("This file was created using the nano editor by Aaron W")
# Generate some random Gaussian noise
x = np.random.randn(10)
y = np.random.randn(10)
fig, ax = plt.subplots(1)
ax.scatter(x, y)
plt.show()
| true
|
12bfe23eed77d5399318fe762e7ebb965e7120ba
|
Python
|
CesarCent17/Virtual_Library
|
/views/ciencia_ficcion.py
|
UTF-8
| 3,327
| 3.015625
| 3
|
[] |
no_license
|
from tkinter import *
from centrar import *
import webbrowser as wb
from tkinter import messagebox
class Ficcion:
def __init__(self):
self.obj_centrar = Centro()
self.getVentana()
self.ventana_ficcion.iconbitmap("C:/Users/PC/Pictures/Virtual Library/graphic_resources/Virtual Library.ico")
#Objeto canvas
canvas = Canvas(self.ventana_ficcion, bg="#ffffff", height=600, width=1000, bd=0, highlightthickness=0, relief="ridge")
canvas.place(x=0, y=0)
#Background
background_img = PhotoImage(file=f"C:/Users/PC/Pictures/Virtual Library/graphic_resources/Ficcion/background.png")
background = canvas.create_image(653.0, 299.5, image=background_img)
###################################################################################
#Buttons
#1984
img0 = PhotoImage(file=f"C:/Users/PC/Pictures/Virtual Library/graphic_resources/Ficcion/img0.png")
self.libro1 = Button(self.ventana_ficcion,image=img0, borderwidth=0, highlightthickness=0,
relief="flat", bg="#ffffff", command=self.abrirlibro1)
self.libro1.place(x=85, y=118, width=136, height=136)
#La ultima pregunta
img1 = PhotoImage(file=f"C:/Users/PC/Pictures/Virtual Library/graphic_resources/Ficcion/img1.png")
self.libro2 = Button(self.ventana_ficcion,image=img1, borderwidth=0, highlightthickness=0,
relief="flat", bg="#ffffff", command=self.abrirlibro2)
self.libro2.place(x=85, y=275, width=136, height=136)
#Yo, robot
img2 = PhotoImage(file=f"C:/Users/PC/Pictures/Virtual Library/graphic_resources/Ficcion/img2.png")
self.libro3 = Button(self.ventana_ficcion,image=img2, borderwidth=0, highlightthickness=0,
relief="flat", bg="#ffffff", command=self.abrirlibro3)
self.libro3.place(x=85, y=432, width=136, height=136)
#Volver
img3 = PhotoImage(file=f"C:/Users/PC/Pictures/Virtual Library/graphic_resources/Ficcion/img3.png")
self.volver = Button(self.ventana_ficcion,image=img3, borderwidth=0, highlightthickness=0,
relief="flat", bg="#ffffff", command=self.accionVolver)
self.volver.place(x=913, y=37, width=50, height=50)
###################################################################################
self.ventana_ficcion.mainloop()
def getVentana(self):
self.ventana_ficcion = Toplevel()
self.ventana_ficcion.title("Editar perfil")
self.obj_centrar.centrar_ventana(self.ventana_ficcion, 600, 1000)
self.ventana_ficcion.resizable(0, 0)
def accionVolver(self):
self.ventana_ficcion.destroy()
#Eleccion
def abrirlibro1(self):
wb.open_new(r"C:/Users/PC/Pictures/Virtual Library/books/Ciencia ficcion/George Orwell 1984.pdf")
def abrirlibro2(self):
wb.open_new(r"C:/Users/PC/Pictures/Virtual Library/books/Ciencia ficcion/Isaac Asimov la ultima pregunta.pdf")
def abrirlibro3(self):
wb.open_new(r"C:/Users/PC/Pictures/Virtual Library/books/Ciencia ficcion/Isaac Asimov yo robot.pdf")
#Prueba
def mensajeEleccion(self):
messagebox.showinfo("Elección de libro", "El libro se abrirá en su aplicación predeterminada")
| true
|
d4c024bce65cbcec1ec4ebe1240a64047b977317
|
Python
|
XMY400013/Tik_Tak_Toe_Telegram_bot
|
/main.py
|
UTF-8
| 3,042
| 2.6875
| 3
|
[] |
no_license
|
import telebot
from telebot import types
from dotenv import load_dotenv
import os
from Class import Table
dotenv_path = os.path.join(os.path.dirname(__file__), 'env')
load_dotenv(dotenv_path)
tb = telebot.TeleBot(os.getenv('TOKEN'))
# Класс таблица
table = Table()
# отклик ан команду
@tb.message_handler(commands=['x_O'])
def start_game(message):
m = types.InlineKeyboardMarkup()
b_1 = types.InlineKeyboardButton(text=' ', callback_data='0')
b_2 = types.InlineKeyboardButton(text=' ', callback_data='1')
b_3 = types.InlineKeyboardButton(text=' ', callback_data='2')
b_4 = types.InlineKeyboardButton(text=' ', callback_data='3')
b_5 = types.InlineKeyboardButton(text=' ', callback_data='4')
b_6 = types.InlineKeyboardButton(text=' ', callback_data='5')
b_7 = types.InlineKeyboardButton(text=' ', callback_data='6')
b_8 = types.InlineKeyboardButton(text=' ', callback_data='7')
b_9 = types.InlineKeyboardButton(text=' ', callback_data='8')
m.add(b_1, b_2, b_3, b_4, b_5, b_6, b_7, b_8, b_9)
mes_ = tb.send_message(message.chat.id, f'X: {message.from_user.first_name} O:', reply_markup=m)
table.save_game(mes_.message_id, message.from_user.id, message.from_user.first_name)
print(table.table)
print(mes_.message_id)
@tb.callback_query_handler(func=lambda call: True)
def calling(call):
print(call.message.message_id)
print(table.table)
if table.which_turn(call.message.message_id, call.from_user.id, call.from_user.first_name, call.data):
list_table = table.return_table(call.message.message_id)
m = types.InlineKeyboardMarkup()
b_1 = types.InlineKeyboardButton(text=list_table[0], callback_data='0')
b_2 = types.InlineKeyboardButton(text=list_table[1], callback_data='1')
b_3 = types.InlineKeyboardButton(text=list_table[2], callback_data='2')
b_4 = types.InlineKeyboardButton(text=list_table[3], callback_data='3')
b_5 = types.InlineKeyboardButton(text=list_table[4], callback_data='4')
b_6 = types.InlineKeyboardButton(text=list_table[5], callback_data='5')
b_7 = types.InlineKeyboardButton(text=list_table[6], callback_data='6')
b_8 = types.InlineKeyboardButton(text=list_table[7], callback_data='7')
b_9 = types.InlineKeyboardButton(text=list_table[8], callback_data='8')
m.add(b_1, b_2, b_3, b_4, b_5, b_6, b_7, b_8, b_9)
users = table.return_users(call.message.message_id)
tb.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id, reply_markup=m, text=f'X: {users[0]} O: {users[1]}')
end_game = table.return_winner(call.message.message_id)
if end_game:
tb.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=end_game,
reply_markup=m)
print('ended game')
if __name__ == '__main__':
tb.polling(none_stop=True)
| true
|
692861c7254199d3b80aed58bdcf740650aab227
|
Python
|
mathematiguy/movie_reviews
|
/rotten_tomatoes/rotten_tomatoes/spiders/rt_reviews.py
|
UTF-8
| 1,815
| 2.671875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import scrapy
class RtReviewsSpider(scrapy.Spider):
name = 'rt_reviews'
allowed_domains = ['rottentomatoes.com']
start_urls = ['https://www.rottentomatoes.com/m/star_wars_the_last_jedi/reviews/?page=1&type=user']
def parse(self, response):
# get the names for each reviewer
names = response.xpath('//*[@id="reviews"]/div[3]/div/div[1]/div[3]/a/span/text()').extract()
# get the text from each review
reviews = ['\n\n'.join(response.xpath('//*[@id="reviews"]/div[3]/div[%d]/div[2]/div/text()' % (i + 1)).extract()).strip() for i in range(len(names))]
# get the date for each review
dates = [x for x in response.xpath('//*[@id="reviews"]/div[3]/div/div[2]/span/text()').extract() if x not in [' ', u'\xbd', u' \xbd']]
# get the star count for each review
star_counts = [len(response.xpath('//*[@id="reviews"]/div[3]/div[%d]/div[2]/span[1]/span' %(i + 1)).extract()) for i in range(len(names))]
# get the half star count for each review
half_counts = [x == u' \xbd' for x in ''.join(response.xpath('//*[@id="reviews"]/div[3]/div/div[2]/span/text()').extract()).split('December 24, 2017')]
# calculate the star rating for each review
star_ratings = [star_count + 0.5 * half_count if half_count else star_count for star_count, half_count in zip(star_counts, half_counts)]
for name, review, date, star_rating in zip(names, reviews, dates, star_ratings):
yield dict(name = name, review = review, date = date, star_rating = star_rating)
page_links = response.xpath('//*[@id="reviews"]/div[4]/a[2]/@href').extract()
if len(page_links) > 0:
# get the link to the next page
next_page = response.xpath('//*[@id="reviews"]/div[4]/a[2]/@href').extract()[0]
# go to the next page
yield scrapy.Request(response.urljoin(next_page), callback = self.parse)
| true
|
18e942c7bcbd454a3a760242823fa9156ca8b5d2
|
Python
|
dexion/springbok
|
/AnomalyDetection/DistributedDetection.py
|
UTF-8
| 13,224
| 2.515625
| 3
|
[] |
no_license
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from ROBDD.robdd import Robdd
from ROBDD.synthesis import synthesize, compare, negate_bdd
from ROBDD.operators import Bdd
from AnomalyError import AnomalyError
from AnomalyError import ErrorType
from collections import deque
import time
from NetworkGraph import NetworkGraph
import networkx as nx
from SpringBase.Firewall import Firewall
import MyGtk.Gtk_Main
from SpringBase.Rule import Rule
from SpringBase.Action import Action
class DistributedDetection:
"""DistributedDetection class.
Implementation of the distributed detection algorithm.
Parameters
----------
deep_search : bool. Enable deep search.
error_path : List. List of error with corresponding path
cancel : bool. Used to cancel detection. If True cancel detection and return
"""
def __init__(self, deep_search):
self.deep_search = deep_search
self.error_path = []
self.cancel = False
def __getstate__(self):
"""Used by Pickle for saving"""
state = self.__dict__.copy()
return state
def __setstate__(self, state):
"""Used by Pickle for loading"""
self.__dict__.update(state)
def _cancel_detection(self, widget, *args):
"""Callback function for canceling detection"""
self.cancel = True
def distributed_detection(self):
"""Distributed detection algorithm.
Find all simple path between each node.
For each path try to detect distributed anomaly
Return
------
Return the list of error found
"""
t0 = time.time()
error_list = deque()
g = NetworkGraph.NetworkGraph().multidigraph
# reverse graph to compute rooted tree path
g_reverse = NetworkGraph.NetworkGraph().get_reversed_multidigraph()
MyGtk.Gtk_Main.Gtk_Main().create_progress_bar("Anomaly detection", count_nb_rules(g, g_reverse), self._cancel_detection)
# Detect path between each couple of node
for source in g.nodes():
if self.cancel:
break
if isinstance(source, Firewall) or source is None:
continue
for target in g.nodes():
if self.cancel:
break
if isinstance(target, Firewall) or target is source or target is None:
continue
path_res = get_rooted_tree(g_reverse, source, target, set())
if path_res:
remain, tmp_error = self._tree_parse_detection(path_res)
error_list.append([source.to_string() + " → " + target.to_string(), tmp_error])
self.error_path = error_list
t1 = time.time()
MyGtk.Gtk_Main.Gtk_Main().change_statusbar('Anomaly distributed detection process in %.3f secondes' % (t1 - t0))
MyGtk.Gtk_Main.Gtk_Main().destroy_progress_bar()
return error_list
def _tree_parse_detection(self, tree_path):
"""Detect anomalies given a rooted tree
Parameters
----------
tree_path : nested list. Rooted tree representation
Return
------
Return a tuple containing the accepted packets and a list of detected errors
"""
error_list = []
parent = tree_path[0]
remain = Robdd.false()
for i in xrange(1, len(tree_path)):
acl_list = NetworkGraph.NetworkGraph().get_acl_list(src=tree_path[i][0], dst=parent)
# test is leaf
if len(tree_path[i]) == 1:
res_remain, res_error = self._distributed_detection(acl_list, Robdd.true(), tree_path[i])
res_error = []
else:
res_remain, res_error = self._tree_parse_detection(tree_path[i])
error_list += res_error
res_remain, res_error = self._distributed_detection(acl_list, res_remain, tree_path[i])
error_list += res_error
remain = synthesize(remain, Bdd.OR, res_remain)
return remain, error_list
def _distributed_detection(self, acl_list, remain, tree_path):
"""Detection method for a given acl with a given remain ROBDD.
This algorithm is derived from the algorithm of Fireman.
For more informations read :
- Firewall Policy Advisor for Anomaly Detection and rules analysis,
http://www.arc.uncc.edu/pubs/im03-cr.pdf
- FIREMAN : A Toolkit for FIREwall Modeling and ANalysis,
http://www.cs.ucdavis.edu/~su/publications/fireman.pdf
Parameters
----------
acl : Rule list. The rule list to test
remain : ROBDD. The remaining ROBDD
Return
------
Return a tuple of remaining rules and the list error found in this context
"""
accept_robdd_list = []
error_list = deque()
error_list_append = error_list.append
for acl in acl_list:
for rule_path in acl.get_rules_path():
accept = Robdd.false()
deny = Robdd.false()
if self.cancel:
break
for rule, action in rule_path:
if self.cancel:
break
MyGtk.Gtk_Main.Gtk_Main().update_progress_bar(1)
MyGtk.Gtk_Main.Gtk_Main().update_interface()
error_rules = []
rule_action = rule.action.chain if isinstance(rule.action.chain, bool) else action
if rule_action:
# P ⊆ I
if compare_bdd(rule.toBDD(), Bdd.IMPL, remain):
pass
# P ⊆ ¬I
elif compare_bdd(rule.toBDD(), Bdd.IMPL, negate_bdd(remain)):
if self.deep_search:
# ∀ ACLx < ACLj, ∀ x ∈ ACLx, ∃ <Px, deny> such that Px ∩ Pj != ∅
error_rules = self.search_rules(rule, Bdd.AND, False, tree_path)
error_list_append(
AnomalyError.error_message(ErrorType.DIST_SHADOW, ErrorType.ERROR, rule, error_rules))
# P ∩ I != ∅
else:
if self.deep_search:
# ∀ ACLx < ACLj, ∀ x ∈ ACLx, ∃ Px such that Px ∩ Pj != ∅
error_rules = self.search_rules(rule, Bdd.AND, None, tree_path)
error_list_append(
AnomalyError.error_message(ErrorType.DIST_CORRELATE, ErrorType.WARNING, rule, error_rules))
else:
# P ⊆ I
if compare_bdd(rule.toBDD(), Bdd.IMPL, remain):
if self.deep_search:
# ∀ ACLx < ACLj, ∀ x ∈ ACLx, ∃ <Px, accept> such that Px ∩ Pj != ∅
error_rules = self.search_rules(rule, Bdd.AND, True, tree_path)
error_list_append(
AnomalyError.error_message(ErrorType.DIST_RAISED, ErrorType.WARNING, rule, error_rules))
# P ⊆ ¬I
elif compare_bdd(rule.toBDD(), Bdd.IMPL, negate_bdd(remain)):
if self.deep_search:
# ∀ ACLx < ACLj, ∀ x ∈ ACLx, ∃ <Px, deny> such that Px ∩ Pj != ∅
error_rules = self.search_rules(rule, Bdd.AND, False, tree_path)
error_list_append(
AnomalyError.error_message(ErrorType.DIST_REDUNDANT, ErrorType.WARNING, rule, error_rules))
# P ∩ I != ∅
else:
if self.deep_search:
# ∀ ACLx < ACLj, ∀ x ∈ ACLx such that Px ∩ Pj != ∅
error_rules = self.search_rules(rule, Bdd.AND, None, tree_path)
error_list_append(
AnomalyError.error_message(ErrorType.DIST_CORRELATE, ErrorType.WARNING, rule, error_rules))
# update value
if rule.action.is_chained() or rule.action.is_return():
if action:
# D = D ∪ ¬(A ∪ P) = D ∪ (¬A ∩ ¬P)
deny = synthesize(deny, Bdd.OR, negate_bdd(synthesize(accept, Bdd.OR, rule.toBDD())))
else:
# D = D ∪ (¬A ∩ P)
deny = synthesize(deny, Bdd.OR, synthesize(negate_bdd(accept), Bdd.AND, rule.toBDD()))
else:
if rule.action.chain:
# A = A ∪ (¬D ∩ P)
accept = synthesize(accept, Bdd.OR, synthesize(negate_bdd(deny), Bdd.AND, rule.toBDD()))
else:
# D = D ∪ (¬A ∩ P)
deny = synthesize(deny, Bdd.OR, synthesize(negate_bdd(accept), Bdd.AND, rule.toBDD()))
accept_robdd_list.append(accept)
res_accept = Robdd.false()
for a in accept_robdd_list:
res_accept = synthesize(res_accept, Bdd.OR, a)
return res_accept, error_list
def search_rules(self, rule, operator, action, tree_path):
"""Deep search option.
Reparse all rules corresponding to the anomaly.
Parameters
----------
rule : Rule. The rule to compare
operator : Bdd.Operator. The operation to perform
action : Bool or None. Filter rules having this action
path : the current tested path
index_path : the current position of the rule in the current path"""
error_rules = deque()
parent = tree_path[0]
for i in xrange(1, len(tree_path)):
if self.cancel:
break
if len(tree_path[i]) > 1:
error_rules += self.search_rules(rule, operator, action, tree_path[i])
acl_list = NetworkGraph.NetworkGraph().get_acl_list(src=tree_path[i][0], dst=parent)
for acl in acl_list:
for rule_path in acl.get_rules_path():
for r, a in rule_path:
rule_action = r.action.chain if isinstance(r.action.chain, bool) else a
if action is not None and rule_action != action:
continue
if compare_bdd(rule.toBDD(), operator, r.toBDD()):
error_rules.append(r)
if not error_rules:
deny_rule = Rule(-1, 'probably implicit deny', [], [], [], [], [], Action(False))
if not action and compare_bdd(rule.toBDD(), operator, deny_rule.toBDD()):
error_rules.append(deny_rule)
return error_rules
def get_rooted_tree(graph, source, target, visited):
"""Create a nested list of a rooted tree with source as leaf and target as root.
Parameters
----------
graph : MultiDiGraph. networkX multidigraph reversed for path search
source : Ip. souce node
target : Ip. target node
visited : set. Set of visited path"""
if source == target:
return [source]
res = []
visited.add(target)
for n in nx.neighbors(graph, target):
if n not in visited:
tmp = get_rooted_tree(graph, source, n, set(visited))
if tmp:
res.append(tmp)
if res:
res.insert(0, target)
return res
def compare_bdd(bdd1, operator, bdd2):
"""Compare two ROBDD"""
res = compare(bdd1, operator, bdd2)
if operator == Bdd.IMPL:
return res <= 2
elif operator == Bdd.AND:
return not res <= 2
def bdd_to_string(bdd):
"""Convert a ROBDD to string (used for memoization)"""
return str(bdd.root) + ' ' + bdd.list()
def count_nb_rules(g, g_reverse):
"""Count the number of rules in all path. Used for the progress bar
Parameters
----------
g : MultiDiGraph. The topology graph
g_reverse : MultiDiGraph. The reversed graph of g
Return
------
Return the number of rules counted"""
def count_rules(tree_path):
res = 0
for i in xrange(1, len(tree_path)):
# test is leaf
if not len(tree_path[i]) == 1:
res += count_rules(tree_path[i])
acl_list = NetworkGraph.NetworkGraph().get_acl_list(src=tree_path[i][0], dst=tree_path[0])
res += reduce(lambda x, y: x + y, [len(acl.rules) for acl in acl_list], 0)
return res
nb_rules = 0
for source in g.nodes():
if isinstance(source, Firewall):
continue
for target in g.nodes():
if isinstance(target, Firewall) or target is source:
continue
path_res = get_rooted_tree(g_reverse, source, target, set())
nb_rules += count_rules(path_res)
return nb_rules
| true
|
fceef754a1efd52e3136335fe4372593c6aa521f
|
Python
|
nsmith0310/Programming-Challenges
|
/Python 3/Project_Euler/problem 90.py
|
UTF-8
| 727
| 3.171875
| 3
|
[] |
no_license
|
###1217
from itertools import combinations as c, product as p
t= list(c(["0","1","2","3","4","5","6","7","8","9"],6))
u = list(c(t,2))
v=[]
f=[]
for x in u:
c = list(p(x[0],x[1]))
g=[]
for y in c:
s=y[0]+y[1]
g.append(s)
f.append(g)
count=0
for x in f:
if ("01" in x or "10" in x) and ("04" in x or "40" in x) and (("09" in x or "90" in x) or ("06" in x or "60" in x)) and (("16" in x or "61" in x) or ("19" in x or "91" in x)) and ("25" in x or "52" in x) and (("36" in x or "63" in x) or ("39" in x or "93" in x)) and (("49" in x or "94" in x) or ("46" in x or "64" in x)) and ("81" in x or "18" in x):
count+=1
print(count)
| true
|
b004d0ff1f95f5b05247dbba11d4a67a6d2f3f34
|
Python
|
BenjHyon/heigthmapGenerator
|
/MountainGenaratorPOC.py
|
UTF-8
| 7,802
| 3.03125
| 3
|
[] |
no_license
|
import random, time
from PIL import Image, ImageFilter
import math
import os
import matplotlib.pyplot as plt
import sys
import numpy as np
#Make a realistic HeightMap of a mountain from the shape of a mountain from it's png value
#Known Issue: Little shape with 1 pixel wide area is not well defined
#to do:
count = 0
########SHAPER
#1 8 7
#2 x 6
#3 4 5
def GetTheIndex8(datah,current):
edges = {}
#Check if we are at the edge:
for k in range(8):
edges[k] = GetIndexFromCursorAndRelativPos(datah,current,k)
return edges
def GetIndexFromCursorAndRelativPos(datah,cursor,RelativPos, radius = 1):
size = math.sqrt(len(datah))
sizeM = size * size
if(size != int(size)):
raise ValueError('map is not square !')
size = int(size)
if RelativPos == 0:
if cursor - size*radius-radius > 0:
return cursor - size * radius-radius
return -1
if RelativPos == 1:
if cursor - radius > 0:
return cursor - radius
return -1
if RelativPos == 2:
if cursor + size*radius -radius < sizeM:
return cursor + size*radius - radius
return -1
if RelativPos == 3:
if cursor + size*radius < sizeM:
return cursor + size*radius
return -1
if RelativPos == 4:
if cursor + size*radius + radius < sizeM:
return cursor + size*radius + radius
return -1
if RelativPos == 5:
if cursor + radius < sizeM:
return cursor + radius
return -1
if RelativPos == 6:
if cursor - size*radius + radius > 0:
return cursor - size*radius + radius
return -1
if RelativPos == 7:
if cursor - size*radius > 0:
return cursor - size*radius
return -1
raise ValueError('trying to acces to relative position {} but only 0 to 7 is supported'.format(RelativPos))
def FindNext(datah,elevM,current,prec = -1):
if prec == -1:#debut
indexOfAjdacent = GetTheIndex8(datah,current)
diff = [0]*8
for k in range(7):
if indexOfAjdacent[k] != -1 and indexOfAjdacent[k+1] != -1:
diff[k] = abs(datah[indexOfAjdacent[k]]-datah[indexOfAjdacent[k+1]])
if indexOfAjdacent[7] != -1 and indexOfAjdacent[0] != -1:
diff[7]=abs(datah[indexOfAjdacent[7]]-datah[indexOfAjdacent[0]])
maxInd = diff.index(max(diff))#now i have to decide which index to take between 2
if maxInd == 7:
if datah[indexOfAjdacent[0]] == elevM:
return indexOfAjdacent[0]
elif datah[indexOfAjdacent[7]] == elevM:
return indexOfAjdacent[7]
else:
return current #when returning current we know we are on a lonely mountain
indexOfAjdacent = GetTheIndex8(datah,current)
diff = [0]*8
for k in range(7):
if indexOfAjdacent[k] != -1 and indexOfAjdacent[k+1] != -1 and indexOfAjdacent[k] != prec and indexOfAjdacent[k+1] != prec:
diff[k] = abs(datah[indexOfAjdacent[k]]-datah[indexOfAjdacent[k+1]])
if indexOfAjdacent[7] != -1 and indexOfAjdacent[0] != -1:
diff[7]=abs(datah[indexOfAjdacent[7]]-datah[indexOfAjdacent[0]])
maxInd = diff.index(max(diff))#now i have to decide which index to take between 2
if maxInd == 7:
if datah[indexOfAjdacent[0]] == elevM:
return indexOfAjdacent[0]
elif datah[indexOfAjdacent[7]] == elevM:
return indexOfAjdacent[7]
else:
return current #when returning current we know we are on a lonely mountain
if datah[indexOfAjdacent[maxInd]] == elevM:
return indexOfAjdacent[maxInd]
elif datah[indexOfAjdacent[maxInd +1]] == elevM:
return indexOfAjdacent[maxInd +1]
else: return current
def loadImage(filename):
img = Image.open(filename)
size = img.size[0]
if size != img.size[1]:
print("Error : Image is not square !")
return [k[0] for k in list(img.getdata())]
def saveImage(datah,folder,name = 'default'):
size = math.sqrt(len(datah))
size = int(size)
img = Image.new('RGB',(size,size))
datak = [(a,b,c) for a,b,c in datah]
img.putdata(datak)
img.save(folder+"/ARDAP_{}.png".format(name))
def GetFirstPixelOfMountain(datah,elevM):
for k in range(len(datah)):
if datah[k] == elevM:
return k
raise ValueError('specified elevation is not found in that sample')
def MountainShape(datah, elevM, startingP):
maxValue = 255 #255 for png, could be set to 511 if 2bytes format is supported
mntShape = []
debugRcolor = random.randrange(0,255)
debugGcolor = random.randrange(0,255)
debugBcolor = random.randrange(0,255)
if datah[startingP] != elevM:
raise ValueError('startingPoint is not set on good elevation : given {} expected {}'.format(datah[startingP],elevM))
if elevM< 0 or elevM > maxValue:
raise ValueError('Mountain level is set to unsupported elevation, must be between 0 and {}, gien {}'.format(maxValue,elevM))
dataDebug = [[k,k,k] for k in datah]
prec = startingP
current = FindNext(datah,elevM,prec)
mntShape +=[current]
step = 1
dataDebug[startingP][1]=255
dataDebug[current][0]=255
global count
while(current != startingP):
new = FindNext(datah,elevM,current,prec)
prec = current
current = new
mntShape +=[current]
if current == prec:#step back
current = prec
prec = new
step += 1
# save here for debug:
dataDebug[current] = [debugRcolor,debugGcolor,debugBcolor]
saveImage(dataDebug,"C:/Users/BEN/Pictures/ARDA_Project/Debug","step{}".format(count))
return mntShape
########FRACTALISER
def Average(datah,elevM,startingP):
size = int(math.sqrt(len(datah)))
datak = [k for k in datah]
mntShape = MountainShape(datak,elevM,startingP)
RightestColomn = 0
posInLineMin = size
posInLineMax = 0
for k in range(len(mntShape)):
posInLine = mntShape[k]%size
if posInLineMin > posInLine:
posInLineMin = posInLine
if posInLineMax < posInLine:
posInLineMax = posInLine
posInColMin = size
posInColMax = 0
for k in range(len(mntShape)):
posInCol = mntShape[k]//size
if posInColMin > posInCol:
posInColMin = posInCol
if posInColMax < posInCol:
posInColMax = posInCol
# dataIm = [[a,a,a] for a in datah]
# dataIm[posInLineMin + size * posInColMin] = [255,255,255]
# dataIm[posInLineMax + size * posInColMin] = [255,255,255]
# dataIm[posInLineMin + size * posInColMax] = [255,255,255]
# dataIm[posInLineMax + size * posInColMax] = [255,255,255]
# saveImage(dataIm,"C:/Users/BEN/Pictures/ARDA_Project/Debug")
#######Main
Average(loadImage("C:/Users/BEN/Pictures/ARDA_Project/MntPOC2.png"),195,GetFirstPixelOfMountain(loadImage("C:/Users/BEN/Pictures/ARDA_Project/MntPOC2.png"),195))
#Get the shape of the mountain v
#draw fractale line x
#randomnize fractale height x
#get down to the valley x
#sample of code that don't work yet:
# for k in range(len(mntShape)):
# datak[mntShape[k]] = 75
# while(1 == 1):
# indexNeighboors = GetTheIndex8(datak,startingP)
# for k in range(7):
# if datak[indexNeighboors[k]] == elevM and indexNeighboors[k] != -1:
# nStartingP = indexNeighboors[k]
# nStartingPvalue = datak[nStartingP]
# if nStartingP == startingP:
# print("done")
# return 0
# startingP = nStartingP
# mntShape = MountainShape(datak,elevM,nStartingP)
# for k in range(len(mntShape)):
# datak[mntShape[k]] = 75
| true
|
783ac452e8fe55f9913c59d91e37e44f28c5c0c1
|
Python
|
jackysz/CodinGame-3
|
/1. Easy/Python 3/Horse_Racing_Duals.py
|
UTF-8
| 129
| 2.8125
| 3
|
[] |
no_license
|
n=int(input())
a= sorted([int(input()) for i in range(n)])
d = 10000000
for i in range(n-1):
d = min(d,a[i+1]-a[i])
print (d)
| true
|
dad1e78a8ae4bd32656f1722c3a59e3e4a1d3323
|
Python
|
javierlara/health-insurance
|
/api/routes/resources/health_center.py
|
UTF-8
| 2,582
| 2.59375
| 3
|
[] |
no_license
|
from flask_restful import Resource
from flask import abort, make_response, request
import flask as f
import api.models as models
from datetime import datetime
from api.db import db_session as session
class HealthCenter(Resource):
@staticmethod
def get_health_center(health_center_id):
return session.query(models.HealthCenter).get(health_center_id)
@staticmethod
def delete_health_center(health_center):
health_center.deleted_at = datetime.utcnow()
session.commit()
@staticmethod
def update_health_center(health_center, data):
health_center.update(data)
session.commit()
def get(self, health_center_id):
health_center = self.get_health_center(health_center_id)
if health_center is None:
abort(404)
return health_center.serialize()
def put(self, health_center_id):
health_center = self.get_health_center(health_center_id)
if health_center is None:
abort(404)
data = request.get_json()
self.update_health_center(health_center, data)
f.flash('El centro "' + health_center.name + '" fue editado con éxito')
return health_center.serialize()
def delete(self, health_center_id):
health_center = self.get_health_center(health_center_id)
if health_center is None:
abort(404)
self.delete_health_center(health_center)
f.flash('El centro "' + health_center.name + '" fue borrado con éxito')
return make_response()
class HealthCenterCollection(Resource):
def get(self):
health_centers = self.get_all_health_centers()
return [r.serialize() for r in health_centers]
def post(self):
data = request.get_json()
new_health_center = self.add_new_health_center(data)
f.flash('El centro "' + new_health_center.name + '" fue creado con éxito')
return new_health_center.serialize()
@staticmethod
def get_all_health_centers():
query = session.query(models.HealthCenter).filter(models.HealthCenter.deleted_at == None)
return query.all()
@staticmethod
def add_new_health_center(data):
health_center = models.HealthCenter(
name=data.get('name'),
address=data.get('address'),
telephone=data.get('telephone'),
location=data.get('location'),
extradata=data.get('extradata'),
plan_ids=data.get('plan_ids')
)
session.add(health_center)
session.commit()
return health_center
| true
|
1685524c521f680692d978930fc92d54b8f60285
|
Python
|
sfeng77/myleetcode
|
/evaluateReversePolishNotation.py
|
UTF-8
| 1,028
| 4.28125
| 4
|
[] |
no_license
|
# Evaluate the value of an arithmetic expression in Reverse Polish Notation.
#
# Valid operators are +, -, *, /. Each operand may be an integer or another expression.
#
# Some examples:
# ["2", "1", "+", "3", "*"] -> ((2 + 1) * 3) -> 9
# ["4", "13", "5", "/", "+"] -> (4 + (13 / 5)) -> 6
class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
st = []
def evl(a, b, t):
if t == "+":
return a + b
elif t == '-':
return a - b
elif t == '*':
return a * b
else:
if a * b < 0 and a % b != 0:
return (a / b + 1)
else:
return a / b
for t in tokens:
if t in '+-*/':
a = st.pop()
b = st.pop()
c = evl(b,a,t)
st+=[c]
else:
st+=[int(t)]
return st.pop()
| true
|
9a35d756406f675a44249b0b013cf249861ae41b
|
Python
|
falyse/advent-of-code
|
/2015/20/main-2.py
|
UTF-8
| 1,857
| 3.453125
| 3
|
[] |
no_license
|
import math
houses = {}
goal = 34000000/11
def meets_goal(i):
score = 0
for e in range(math.ceil(i/50), i+1):
if not i % e:
score += e
# print('i', i, ', e', e, ' -> ', score)
if score >= goal:
print('House', i, 'met goal with', score)
exit(0)
return True
print(i, score)
return False
def get_candidates():
nums = []
for i2 in range(10):
for i3 in range(10):
for i5 in range(10):
for i7 in range(10):
for i11 in range(5):
for i13 in range(5):
for i17 in range(5):
for i19 in range(5):
for i23 in range(5):
for i29 in range(5):
for i31 in range(5):
for i37 in range(5):
for i41 in range(5):
for i43 in range(5):
for i47 in range(5):
nums.append(2**i2 * 3**i3 * 5**i5 * 7**i7 * 11**i11 * 13*i13 * 17**i17 * 19**i19 * 23**i23 * 29**i29 *
31**i31 * 37**i37 * 41**i41 * 43**i43 * 47**i47)
nums = sorted(set(nums))
nums = [x for x in nums if x <= 873600]
nums = [x for x in nums if x > 0]
print(nums)
# exit(1)
return nums
# too high: 873600, 875160
# meets_goal(100)
# exit(1)
# for i in get_candidates():
# meets_goal(i)
for i in range(686880, 873600, 60):
meets_goal(i)
print('No match')
| true
|
03a8edb3075f56aec1676e41f7494cb2c8f86dd7
|
Python
|
sshchicago/SSH-C-User-Management-Tools
|
/sshcldap.py
|
UTF-8
| 8,819
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env python2.6
"""
sshcldap.py: A class of useful tools for working with LDAP servers.
Tested with 389 Directory Server against the SSH:Chicago user database;
probably works with other directories, but you'll have to try to find
out. :-)
"""
__author__ = "Christopher Swingler"
__copyright__ = "Copyright 2013, South Side Hackerspace Chicago"
__license__ = "tbd"
__email__ = "chris@chrisswingler.com"
__status__ = "Development"
import ldap
import string
import random
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
class sshcldap:
"""
A class with LDAP tools for interacting with our repository.
"""
BASEDN="dc=sshchicago,dc=org"
URL="ldap://dir.sshchicago.org:389"
STANDARD_USER_GROUPS=['SSHC Members']
PROTECTED_USER_GROUPS=['Deactivation Protected Users'] + STANDARD_USER_GROUPS
ADMIN_USER_GROUPS=['Administrative Members'] + PROTECTED_USER_GROUPS
OFFICER_USER_GROUPS=['Officers'] + ADMIN_USER_GROUPS
__lconn = None
def __init__(self, uid, password, url=None, basedn=None):
"""
Initializes the connection using uid and password.
A fully qualified cn is constructed by combining uid and basedn.
"""
if (basedn != None):
self.BASEDN = basedn
if (url != None):
self.URL = url
self.__lconn = ldap.initialize(self.URL)
self.__lconn.simple_bind_s(self.__fquid(uid), password)
def __delete__(self):
"""
Deconstructor, just unbinds the connection.
"""
try:
__lconn.unbind()
except:
# Eh, who cares.
pass
def __fquid(self, uid):
"""
Returns the fully-qualified dn of a uid.
"""
return "uid=%s,ou=people,%s" % (uid, self.BASEDN)
def __genPass(self):
"""
Generates a string of random characters to be used as
a (temporary) password.
"""
characters = string.ascii_letters + string.punctuation + string.digits
password = "".join(random.choice(characters) for x in range(random.randint(8, 16)))
return password
def is_connection_valid(self):
"""
Checks if the LDAP bind is working. Returns true/false.
"""
# Ideally, we'd use python-ldap's whoami_s() to see who we are and if we've
# bound, but 389 doesn't implement RFC 4532. In that case, we're
# going to do a search, and if we get more than 1 result, consider it good.
r = self.__lconn.search_s(self.BASEDN, ldap.SCOPE_SUBTREE, '(cn=*)',['mail'])
if (len(r) > 1):
return True
return False
def find_user(self, uid):
"""
Finds a user with the matching uid. Returns some information about that
user.
"""
r = self.__lconn.search_s(self.BASEDN, ldap.SCOPE_SUBTREE, '(uid=*%s*)' % uid, ['mail','cn'])
return r
def find_user_by_email(self, email):
"""
Finds a user with the matching email address (case knocked down).
Returns true if they exist, false otherwise.
"""
result = self.__lconn.search_s(self.BASEDN, ldap.SCOPE_SUBTREE, '(mail=%s)' % email, ['mail','cn'])
return (len(result) > 0)
def is_user_active(self, uid):
"""
Returns if a user is active, or inacitve.
"""
# The active/inactive stuff is very much a 389 extension.
# nsAccountLock = True when an account is inactive.
# If it's false, or the attrib doesn't exist at all, the acount is active.
r = self.__lconn.search_s(self.BASEDN, ldap.SCOPE_SUBTREE, '(uid=*%s*)' % uid, ['nsAccountLock'])
logger.debug("%s's nsAccountLock is %s" % (uid, r))
try:
return not(bool(r[0][1]['nsAccountLock']))
except KeyError:
# Expected if the account is active.
return True
return True
def create_user(self, givenName, sn, mail, uid=None, password=None):
"""
Creates a user under basedn. If not specified, uid will be
the first letter of givenName + sn (lower case), and
password will be randomly generated.
Returns a tuple containing (uid, password); password in cleartext.
"""
if (uid == None):
# Create the UID
uid = "%s%s" % (givenName.lower()[0], sn.lower())
if (password == None):
password = self.__genPass()
addDn = "uid=%s,ou=People,%s" % (uid, self.BASEDN)
addModList = [('userPassword', password), \
('mail', mail), ("sn", sn), ("givenname",givenName), ("cn", "%s %s" % (givenName, sn)), \
("objectclass","top"),("objectclass","person"),("objectclass","inetorgperson")]
self.__lconn.add_s(addDn, addModList)
return (uid, password)
def delete_user(self, uid):
"""
Deletes a user. Should not be used much, in favor of deactivate_user.
"""
self.__lconn.delete_s("uid=%s,ou=People,%s" % (uid, self.BASEDN))
def deactivate_user(self, uid):
"""
Deactivates a user; preventing them from logging in. Leaves the entry
in the database.
"""
self.__lconn.modify_ext_s("uid=%s,ou=People,%s" % (uid, self.BASEDN), [(ldap.MOD_ADD,'nsAccountLock',"True")])
def activate_user(self, uid):
"""
Activates a user (by deleting the related attribute).
"""
self.__lconn.modify_ext_s("uid=%s,ou=People,%s" % (uid, self.BASEDN), [(ldap.MOD_DELETE,'nsAccountLock',None)])
def add_to_group(self, uid, group):
"""
Adds the uid to a single group.
"""
fquid = "uid=%s,ou=People,%s" % (uid, self.BASEDN)
fqgrpid = "cn=%s,ou=Groups,%s" % (group, self.BASEDN)
self.__lconn.modify_ext_s(fqgrpid, [(ldap.MOD_ADD,'uniqueMember',fquid)])
def add_to_groups(self, uid, groups):
"""
Adds a UID to the list of groups in group.
"""
for group in groups:
self.add_to_group(uid, group)
def set_standard_user_groups(self, uid):
"""
Adds the uid to the list of standard groups defined by
STANDARD_USER_GROUPS
"""
self.add_to_groups(uid, self.STANDARD_USER_GROUPS)
def set_admin_user_groups(self, uid):
"""
Adds the uid to the list of groups defined by
ADMIN_USER_GROUPS
"""
self.add_to_groups(uid, self.ADMIN_USER_GROUPS)
def set_officer_user_groups(self, uid):
"""
Adds the uid to the list of groups defined by
OFFICER_USER_GROUPS
"""
self.add_to_groups(uid, self.OFFICER_USER_GROUPS)
def reset_password(self, uid):
"""
Sets the password of uid to a random value. Returns new password.
"""
newpass = self.__genPass()
#self.__lconn.passwd_s(user="uid=%s,ou=People,%s" % (uid, self.BASEDN), oldpw=None, newpw=newpass)
self.__lconn.modify_ext_s("uid=%s,ou=People,%s" % (uid, self.BASEDN), [(ldap.MOD_REPLACE,'userPassword',newpass)])
return newpass
def list_people(self):
"""
Returns a list of all members of the ou=People ou.
"""
logger.debug("Going to search (\"ou=People,%s\", scope=ldap.SCOPE_SUBTREE, filterstr='(objectClass=person)'" % self.BASEDN)
people = self.__lconn.search_s("ou=People,%s" % (self.BASEDN), scope=ldap.SCOPE_SUBTREE, filterstr='(objectClass=person)')
logger.debug("People is %s | %s | %s" % (type(people), len(people), people))
logger.debug("People contains %s members" % len(people))
return people
def is_member_of_group(self, uid, groupCn):
"""
Returns true if uid is a member of groupCn,
otherwise return false.
"""
if(type(uid) == list):
uid = uid[0]
uid = "uid=%s" % uid
# (&(objectClass=groupOfUniqueNames)(cn~="Deactivation Protected Users,ou=Groups,dc=sshchicago,dc=org"))
groupItems = self.__lconn.search_s("ou=Groups,%s" % (self.BASEDN), scope=ldap.SCOPE_SUBTREE, filterstr='(&(objectClass=groupOfUniqueNames)(cn~=%s))' % groupCn)
logger.debug("Members of %s are: %s" % (groupCn, groupItems))
from pprint import pprint as pp
pp(groupItems)
for uniqueMember in groupItems[0][1]['uniqueMember']:
logger.debug("Testing if %s == %s" % (uid, uniqueMember))
if uniqueMember.find(uid) > -1:
logger.debug("Found %s in %s" % (uid, uniqueMember))
return True
return False
| true
|
252d1d245db30f917ca605af89d743df8b6653a8
|
Python
|
Bharath2/Informed-RRT-star
|
/PathPlanning/rrt.py
|
UTF-8
| 4,139
| 3.03125
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as Axes3D
from .rrtutils import *
class RRT:
def __init__(self, start, goal, Map,
max_extend_length = 5.0,
path_resolution = 0.5,
goal_sample_rate = 0.05,
max_iter = 100 ):
self.start = Node(start)
self.goal = Node(goal)
self.max_extend_length = max_extend_length
self.goal_sample_rate = goal_sample_rate
self.max_iter = max_iter
self.dim = start.shape[0]
self.tree = Rtree(self.dim)
self.map = Map
def plan(self):
"""Plans the path from start to goal while avoiding obstacles"""
self.tree.add(self.start)
for i in range(self.max_iter):
#Generate a random node (rnd_node)
rnd_node = self.get_random_node()
#Get nearest node (nearest_node)
nearest_node = self.tree.nearest(rnd_node)
#Get new node (new_node) by connecting
new_node = self.steer(nearest_node,rnd_node)
#If the path between new_node and the nearest node is not in collision
if not self.map.collision(nearest_node.p,new_node.p):
self.tree.add(new_node)
# If the new_node is very close to the goal, connect it
# directly to the goal and return the final path
if self.dist(new_node,self.goal) <= self.max_extend_length:
if not self.map.collision(new_node.p,self.goal.p):
self.goal.parent = new_node
return self.final_path()
# cannot find path
return None
@staticmethod
def dist(from_node, to_node):
#euler distance
return np.linalg.norm(from_node.p - to_node.p)
def steer(self,from_node, to_node):
"""Connects from_node to a new_node in the direction of to_node
with maximum distance max_extend_length
"""
dist = self.dist(from_node, to_node)
#Rescale the path to the maximum extend_length
if dist > self.max_extend_length:
diff = from_node.p - to_node.p
to_node.p = from_node.p - diff/dist * self.max_extend_length
to_node.parent = from_node
return to_node
def sample(self):
# Sample random point inside boundaries
lower,upper = self.map.bounds
return lower + np.random.rand(self.dim)*(upper - lower)
def get_random_node(self):
"""Sample random node inside bounds or sample goal point"""
if np.random.rand() > self.goal_sample_rate:
rnd = self.sample()
else:
rnd = self.goal.p
return Node(rnd)
def final_path(self):
"""Compute the final path from the goal node to the start node"""
path = []
node = self.goal
if (node.p == node.parent.p).all(): node = node.parent
while node.parent:
path.append(node.p)
node = node.parent
path.append(self.start.p)
return np.array(path[::-1])
def draw_graph(self,ax):
'''plot the whole graph'''
for node in self.tree.all():
if node.parent:
xy = np.c_[node.p,node.parent.p]
ax.plot(*xy, "-",color = (0.2, 0.9, 0.2, 0.9),zorder = 5)
def draw_path(self,ax,path):
'''draw the path if available'''
if path is None:
print("path not available")
else:
ax.plot(*np.array(path).T, '-', color = (0.9, 0.2, 0.5, 0.8), zorder = 5)
def draw_scene(self, path = None, ax = None, graph = False):
'''draw the whole scene'''
if ax is None:
fig = plt.figure()
if self.dim == 3:
ax = Axes3D.Axes3D(fig)
elif self.dim == 2:
ax = plt.axes()
else:
print('cannot plot for current dimensions')
return
ax.scatter(*path.T)
if graph: self.draw_graph(ax)
self.draw_path(ax,path)
self.map.plotobs(ax)
plt.show()
| true
|
1687e5dbb72875a871c080acf17e12c48b71e02a
|
Python
|
aaronbae/competitive
|
/kickstart/mural.py
|
UTF-8
| 617
| 3.5
| 4
|
[] |
no_license
|
import math
def solve(mural):
painted_window = math.ceil(len(mural) / 2)
max_beauty = sum(mural[:painted_window])
curr_beauty = max_beauty
for i in range(0, len(mural)-painted_window):
curr_beauty -= mural[i]
curr_beauty += mural[i+painted_window]
if curr_beauty > max_beauty:
max_beauty = curr_beauty
return max_beauty
# Standard input reading scheme
t = int(input()) # read a line with a single integer
for i in range(1, t + 1):
N = int(input())
mural = list(map(int, list(input())))
val = solve(mural)
print("Case #{}: {}".format(i, val))
| true
|
e45ee52942e991575b649de8e9010016685fdfdd
|
Python
|
westcoj/PythonZork
|
/Weapon.py
|
UTF-8
| 2,894
| 3.796875
| 4
|
[] |
no_license
|
"""
* The following module contains the weapon classes for the game that the
* player uses to attack monsters
*
* @author Cody West
* @version Zork Clone
* @date 11/08/2017
"""
import random
class Weapon(object):
'''
Super class contiaing generic weapon methods for specific inheritors to use
'''
def __init__(self, name, atk, uses):
'''
Constructor that sets up values of weapon
Args:
name: Type of weapon
atk: Attack value of weapon
uses: Number of times a weapon can be used
'''
self.__name = name
self.__attack = atk
self.__uses = uses
def getName(self):
"""
Gets weapon type
"""
return self.__name
def damage(self):
"""
Gets weapon attack value
"""
return self.__attack
def getUses(self):
"""
Gets weapon number of uses left
"""
return self.__uses
def setUse(self):
"""
Sets weapon use down one
"""
self.__uses -= 1
def __str__(self, *args, **kwargs):
"""
Gets weapon type (Expirementing with print statements)
"""
return self.__name
def print(self):
"""
Gets weapon type (Expirementing with print statements)
"""
return self.__name
class Kisses(Weapon):
'''
Main weapon of player, never runs out of uses
'''
def __init__(self):
'''
Constructer that sets up Herhey's Kisses weapon
'''
atk = 1.0
name = "Hershey's Kisses"
uses = -1
super().__init__(name,atk,uses)
class Straws(Weapon):
'''
Weapon of player with 2 uses.
'''
def __init__(self):
'''
Constructer that sets up Sour Straws weapon
'''
atk = random.uniform(1.0,1.75)
name = "Sour Straws"
uses = 2
super().__init__(name,atk,uses)
class Bars(Weapon):
'''
Weapon of player with 4 uses
'''
def __init__(self):
'''
Constructer that sets up Chocolate Bars weapon
'''
atk = random.uniform(2.0,2.4)
name = "Chocolate Bars"
uses = 4
super().__init__(name,atk,uses)
class Bombs(Weapon):
'''
Weapon of player with single use
'''
def __init__(self):
'''
Constructer that sets up Nerd Bombs weapon
'''
atk = random.uniform(3.5,5.0)
name = "Nerd Bombs"
uses = 1
super().__init__(name,atk,uses)
| true
|
4c69c1b9be3ae7d23d4601a5e7501c6905fca722
|
Python
|
NataliaDiaz/BrainGym
|
/detect-feature-kernel.py
|
UTF-8
| 2,387
| 3.921875
| 4
|
[] |
no_license
|
"""
# Feature detection in a image with:
- a convolution kernel,
- a threshold and
- image
as input. The convolution kernel is a square K x K real matrix denoted by k(i,j),
the threshold is a real number, and the image is given as an RxC real matrix with
elements between 0 and 1, with the pixel in row r and column c given by p(r,c)
where r in [0,R) and c in [0,C). We say theat there is a feature at position (r,c)
if SUM (k(i,j) p(r+i, c+j)) > T
However, the kernel is only valid if it is not overflowing the image, therefore,
if the kernel is overflowing the image, we do not want to detect a feature.
Input:
2 0.65 3 4 # K, T, R and C where all are integers except T which is a real number
-1.0 1.0 # kernel values (KxK)
1.0 0.0
0.0 0.1 0.2 0.3 # image values (RxC)
0.4 0.5 0.6 0.7
0.8 0.9 0.0 0.1
Output:
0 2 # the positions of the detected features (r, c), one per line
1 0
1 1
"""
def detect_features_with_convolution_kernel_and_threshold():#kernel, T, R, C):
K,T,R,C = str(raw_input()).strip().split()
K = int(K)
T = float(T)
R = int(R)
C = int(C)
#print "K,T,R,C : ",K,T,R,C
kernel = []
# read kernel
for kernel_row in range(K):
kernel_row = []
for value in (raw_input()).split():#strip().split()
kernel_row.append(float(value))
kernel.append(kernel_row)
#print "Kernel: \n",kernel
# read image
img = []
for image_row in range(R):
image_row = []
for value in (raw_input()).split():#strip().split()
image_row.append(float(value))
img.append(image_row)
#print "Image: \n",img
features = []
for r in range(R):
for c in range(C): #while kernel_start_r < (R-K) and kernel_start_c < (C-K):
if feature_is_present_in_pos(kernel, img, r, c, K, T, R, C):
features.append((r, c))
print_features(features)
def feature_is_present_in_pos(kernel, img, r, c, K, T, R, C):
suma = 0
if not kernel_overflows_img(r, c, K, R, C):
for i in range(K):
for j in range(K):
partial_sum = kernel[i][j] * img[r+i][c+j]
suma += partial_sum
if suma > T:
#print "feature_is_present in (r,c): ", r,c
return True
return False
def kernel_overflows_img(r, c, K, R, C):
if (r+K) > R or (c+K)> C:
return True
return False
def print_features(features):
for (r,c) in features:
print r, " ", c
detect_features_with_convolution_kernel_and_threshold()
| true
|
64768a811ec5d3062f8d35e632e63f5c18b2da74
|
Python
|
Swati1-ud/Data_Practice
|
/5_Tree/Post-order_Traversal_Of_Binary_Tree.py
|
UTF-8
| 540
| 3.203125
| 3
|
[] |
no_license
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def postOrder(self, root):
"""
input: TreeNode root
return: Integer[]
"""
# write your solution here
res = []
self.help(root, res)
return res
def help(self, root, res):
if not root:
return
else:
self.help(root.left, res)
self.help(root.right, res)
res.append(root.val)
| true
|
e94885e3146a956f9e5b62b186aee295cba17957
|
Python
|
DivyaJyotiDas/Trillio_Files_Operations
|
/main/tests/snapshot/test_smart_backup.py
|
UTF-8
| 1,399
| 2.703125
| 3
|
[] |
no_license
|
import os, sys, pytest, mock
from mock import mock_open, MagicMock
from main.main_lib.snapshot.smart_backup import md5, chunks_md5, size_of_file, num_process_creation
@pytest.fixture(scope='module')
def ret_file_path():
file = os.path.abspath('dummy_test_file')
return file
@mock.patch('builtins.open', new_callable=mock_open, read_data=b'This\nis\nmocked\nfile\n')
def test_md5_when_fname_is_provided_should_return_hexdigit(m, ret_file_path):
assert md5(ret_file_path) == '4d16ffe4859ea62d591cb936e448e47d'
assert m.assert_called_with(ret_file_path, 'rb') is None
def test_chunks_md5_when_str_is_provided_shoud_return_hexdigit(ret_file_path):
assert chunks_md5(b'This string need to be tested.') == '1dc81443e1b15f9afeea97b54d698e85'
def test_size_of_file_when_provided_should_return_file_size(ret_file_path):
m = MagicMock()
m.seek.return_value = 20
assert size_of_file(m) == 20
@pytest.mark.parametrize('input, expected', [(2 ** 10, pow(2, 0)),
(2 ** 20, pow(2, 1)),
(2 ** 30, pow(2, 2)),
])
def test_num_process_creation_when_size_is_kb_returns_one(input, expected):
assert num_process_creation(input) == expected
| true
|
02f21a048596644cdfc3b43538e202e33cb00f5e
|
Python
|
10elements/leetcode
|
/reverseWords.py
|
UTF-8
| 227
| 3.53125
| 4
|
[] |
no_license
|
def reverseWords(s):
t = [word for word in s.strip().split() if word != ' ']
t.reverse()
return ' '.join(t)
def main():
s = " a b "
print(reverseWords(s))
b()
def b():
print('b')
if __name__ == '__main__':
main()
| true
|
4becf20eac58eb816ac1a3913166d015c218745c
|
Python
|
jin-james/code_lbj
|
/parse_word/readDocx3th.py
|
UTF-8
| 6,400
| 2.546875
| 3
|
[] |
no_license
|
import docx
from win32com import client
import re
from bs4 import BeautifulSoup
import os
'''
试卷按顺序的题型放入style中
'''
style = {
'0': 'single',
'1': 'multiple',
'2': 'judgment',
'3': 'fillin',
'4': 'subjective',
'5': 'english'
}
def read4word(file):
'''
:param file: 为传入的.docx文件对象,以二进制格式打开
:return:
'''
doc = docx.Document(file)
end_para_no, QueStyle_para_no = read_doc4para_no(doc)
# print(QueStyle_para_no, end_para_no)
word2html(file, end_para_no, QueStyle_para_no)
def read_doc4para_no(doc):
i = 1
end_para_no = []
QueStyle_para_no = []
para = doc.paragraphs
for p in range(len(para)):
if re.compile("单选题示例").match(para[p].text):
QueStyle_para_no.append(i)
if re.compile("多选题示例").match(para[p].text):
QueStyle_para_no.append(i)
if re.compile("判断题示例").match(para[p].text):
QueStyle_para_no.append(i)
if re.compile("填空题示例").match(para[p].text):
QueStyle_para_no.append(i)
if re.compile("主观题示例").match(para[p].text):
QueStyle_para_no.append(i)
if re.compile("英语题示例").match(para[p].text):
QueStyle_para_no.append(i)
if para[p].text == "【结束】":
end_para_no.append(i)
i = i+1
return end_para_no, QueStyle_para_no
def word2html(file, end_para_no, QueStyle_para_no):
Questions = []
word = client.Dispatch('Word.Application')
# 后台运行,不显示,不警告
word.Visible = 0
word.DisplayAlerts = 0
doc = word.Documents.Open(file)
doc.SaveAs('\\tmp\\html.html', 10) # 选用 wdFormatFilteredHTML的话公式图片将存储为gif格式
doc.Close()
word.Quit()
file_path = '\\tmp\\html.html'
htmlfile = open(file_path, 'r', encoding='gb2312' or 'utf-8')
htmlhandle = htmlfile.read()
soup = BeautifulSoup(htmlhandle, 'lxml')
style_no = 0
patt = re.compile(r'<img.*?src=".*?\.(?:jpg|jpeg|gif|bmp|png)">|(<span).*?(>)(.*?)(</span>)', re.S)
re_a = re.compile(r'<span>A\.(.*?)B', re.S | re.M)
re_b = re.compile(r'B\.(.*?)C', re.S | re.M)
re_b_judgement = re.compile(r'B\.(.*?)</p>', re.S | re.M)
re_c = re.compile(r'C\.(.*?)D', re.S | re.M)
re_d = re.compile(r'D\.(.*?)</p>', re.S | re.M)
for p in range(len(QueStyle_para_no) - 1):
i = 1
question = ""
option = {}
answer = ""
analysis = ""
for item in soup.find_all('p'):
para_str = ""
if QueStyle_para_no[p] <= i < QueStyle_para_no[p + 1]:
group = patt.findall(str(item))
for g in group:
span_str = g[0] + g[1] + g[2] + g[3]
para_str += span_str
para_str = "<p>" + para_str + "</p>"
para_str = para_str.replace("\n", "")
if "【题文】" in para_str:
question = para_str.replace("<span>【题文】</span>", "")
if "【选项】" in para_str:
if style['%s' % str(p)] == 'judgment':
opt = para_str.replace("<span>【选项】</span>", "")
A = re_a.findall(opt)
B = re_b_judgement.findall(opt)
option['A'] = str(A[0]).replace('<span>', '').replace('</span>', '')
option['B'] = str(B[0]).replace('<span>', '').replace('</span>', '')
else:
opt = para_str.replace("<span>【选项】</span>", "")
A = re_a.findall(opt)
B = re_b.findall(opt)
C = re_c.findall(opt)
D = re_d.findall(opt)
option['A'] = str(A[0]).replace('<span>', '').replace('</span>', '')
option['B'] = str(B[0]).replace('<span>', '').replace('</span>', '')
option['C'] = str(C[0]).replace('<span>', '').replace('</span>', '')
option['D'] = str(D[0]).replace('<span>', '').replace('</span>', '')
if "【答案】" in para_str:
answer = para_str.replace("<span>【答案】</span>", "")
if "【解析】" in para_str:
analysis = para_str.replace("<span>【解析】</span>", "")
if "【结束】" in para_str:
mm = {}
if style['%s' % str(p)] == 'subjective':
mm['type'] = style['%s' % str(p)]
mm['question'] = question
mm['answer'] = answer
mm['analysis'] = analysis
print(mm)
Questions.append(mm)
question = ""
option = ""
answer = ""
analysis = ""
else:
mm['type'] = style['%s' % str(p)]
mm['question'] = question
mm['option'] = option
mm['answer'] = answer
mm['analysis'] = analysis
print(mm)
Questions.append(mm)
question = ""
option = ""
answer = ""
analysis = ""
i += 1
style_no = p
question = ""
option = {}
answer = ""
analysis = ""
i = 1
for item in soup.find_all('p'):
para_str = ""
if QueStyle_para_no[-1] <= i <= end_para_no[-1]:
group = patt.findall(str(item))
for g in group:
span_str = g[0] + g[1] + g[2] + g[3]
para_str += span_str
para_str = "<p>" + para_str + "</p>"
para_str = para_str.replace("\n", "")
if "【题文】" in para_str:
question = para_str.replace("<span>【题文】</span>", "")
if "【选项】" in para_str:
opt = para_str.replace("<span>【选项】</span>", "")
A = re_a.findall(opt)
B = re_b.findall(opt)
C = re_c.findall(opt)
D = re_d.findall(opt)
option['A'] = str(A[0]).replace('<span>', '').replace('</span>', '')
option['B'] = str(B[0]).replace('<span>', '').replace('</span>', '')
option['C'] = str(C[0]).replace('<span>', '').replace('</span>', '')
option['D'] = str(D[0]).replace('<span>', '').replace('</span>', '')
if "【答案】" in para_str:
answer = para_str.replace("<span>【答案】</span>", "")
if "【解析】" in para_str:
analysis = para_str.replace("<span>【解析】</span>", "")
if "【结束】" in para_str:
mm = {}
if style['%s' % str(style_no+1)] == 'subjective' or 'english':
mm['type'] = style['%s' % str(style_no+1)]
mm['question'] = question
mm['answer'] = answer
mm['analysis'] = analysis
print(mm)
Questions.append(mm)
question = ""
option = ""
answer = ""
analysis = ""
else:
mm['type'] = style['%s' % str(style_no+1)]
mm['question'] = question
mm['option'] = option
mm['answer'] = answer
mm['analysis'] = analysis
print(mm)
Questions.append(mm)
question = ""
option = ""
answer = ""
analysis = ""
i += 1
# print(Questions)
if __name__ == '__main__':
# path = 'C:\\Users\\j20687\\Desktop\\demo.docx'
# html_path = 'C:\\Users\\j20687\\Desktop\\HTML'
read4word(file)
| true
|
b93202eedaa6cc4132009afc3ff0aaf0d4c4d21a
|
Python
|
i7677181/mars_rover
|
/test_solution.py
|
UTF-8
| 1,247
| 3.09375
| 3
|
[] |
no_license
|
import unittest
from solution import go
class TestSolution(unittest.TestCase):
""" A bunch of test cases for our solution """
def test_solution_example(self):
"""Testing the example input"""
assert '0 0 N' == go(
# no movement
'''1 1
0 0 N
'''
)
assert '1 3 N\n5 1 E' == go(
# the test example
'''5 5
1 2 N
LMLMLMLMM
3 3 E
MMRMMRMRRM
'''
)
assert '1 3 N' == go(
# the first test case of the test example
'''5 5
1 2 N
LMLMLMLMM'''
)
assert '5 1 E' == go(
# the second test case of the test example
'''5 5
3 3 E
MMRMMRMRRM
'''
)
def test_out_of_bounds(self):
"""Testing incorrect input"""
try:
go(
# an example that will go out of bounds
'''5 5
3 3 E
MMMMM'''
)
except IndexError:
print('invalid input, out of bounds exception caught')
else:
raise RuntimeError("out of bounds not detected")
try:
go(
# an example that places the robot outside of the map
'''0 0
1 1 E
'''
)
except IndexError:
print('invalid input, out of bounds exception caught')
else:
raise RuntimeError("out of bounds not detected")
if __name__ == '__main__':
unittest.main()
| true
|
8aecd6ad500908e431f26ea8b21e8df39e10dbe7
|
Python
|
kivy/pyjnius
|
/jnius/signatures.py
|
UTF-8
| 2,766
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
'''
signatures.py
=============
A handy API for writing JNI signatures easily
Author: chrisjrn
This module aims to provide a more human-friendly API for
wiring up Java proxy methods in PyJnius.
You can use the signature function to produce JNI method
signautures for methods; passing PyJnius JavaClass classes
as return or argument types; provided here are annotations
representing Java's primitive and array times.
Methods can return just a standard primitive type:
>>> signature(jint, ())
'()I'
>>> s.signature(jvoid, [jint])
'(I)V'
Or you can use autoclass proxies to specify Java classes
for return types.
>>> from jnius import autoclass
>>> String = autoclass("java.lang.String")
>>> signature(String, ())
'()Ljava/lang/String;'
'''
__version__ = '0.0.1'
from . import JavaClass
from . import java_method
''' Type specifiers for primitives '''
class _JavaSignaturePrimitive(object):
_spec = ""
def _MakeSignaturePrimitive(name, spec):
class __Primitive(_JavaSignaturePrimitive):
''' PyJnius signature for Java %s type ''' % name
_name = name
_spec = spec
__Primitive.__name__ = "j" + name
return __Primitive
jboolean = _MakeSignaturePrimitive("boolean", "Z")
jbyte = _MakeSignaturePrimitive("byte", "B")
jchar = _MakeSignaturePrimitive("char", "C")
jdouble = _MakeSignaturePrimitive("double", "D")
jfloat = _MakeSignaturePrimitive("float", "F")
jint = _MakeSignaturePrimitive("int", "I")
jlong = _MakeSignaturePrimitive("long", "J")
jshort = _MakeSignaturePrimitive("short", "S")
jvoid = _MakeSignaturePrimitive("void", "V")
def JArray(of_type):
''' Signature helper for identifying arrays of a given object or
primitive type. '''
spec = "[" + _jni_type_spec(of_type)
return _MakeSignaturePrimitive("array", spec)
def with_signature(returns, takes):
''' Alternative version of @java_method that takes JavaClass
objects to produce the method signature. '''
sig = signature(returns, takes)
return java_method(sig)
def signature(returns, takes):
''' Produces a JNI method signature, taking the provided arguments
and returning the given return type. '''
out_takes = []
for arg in takes:
out_takes.append(_jni_type_spec(arg))
return "(" + "".join(out_takes) + ")" + _jni_type_spec(returns)
def _jni_type_spec(jclass):
''' Produces a JNI type specification string for the given argument.
If the argument is a jnius.JavaClass, it produces the JNI type spec
for the class. Signature primitives return their stored type spec.
'''
if issubclass(jclass, JavaClass):
return "L" + jclass.__javaclass__ + ";"
elif issubclass(jclass, _JavaSignaturePrimitive):
return jclass._spec
| true
|
8664dd494208e6f124e6758513927ace632cebb1
|
Python
|
hi0t/Outtalent
|
/Leetcode/564. Find the Closest Palindrome/solution1.py
|
UTF-8
| 624
| 3
| 3
|
[
"MIT"
] |
permissive
|
class Solution:
def nearestPalindromic(self, n: str) -> str:
evenPal = lambda sp: int(sp + sp[::-1])
oddPal = lambda sp: int(sp + sp[::-1][1:])
sn, n = n, int(n)
if len(sn) == 1: return str(n - 1)
ans = -inf
mid = len(sn) // 2
for sp in sn[:mid], sn[:mid + 1], str(int(sn[:mid]) * 10):
p = int(sp)
for pal in evenPal, oddPal:
for d in -1, 0, 1:
val = pal(str(p + d))
if val == n: continue
ans = min(ans, val, key=lambda x: (abs(x - n), x))
return str(ans)
| true
|
efd1c6d7f845950f4d4ca616a38b70e6d6ac6e0b
|
Python
|
tmu-nlp/100knock2016
|
/aron/chapter07/knock63.py
|
UTF-8
| 565
| 2.671875
| 3
|
[] |
no_license
|
# knock63.py
# coding = utf-8
import sys, json
import redis
r = redis.Redis(host="localhost", port=6379, db=1)
with open("artist.json", "r") as file:
for line in file:
# dic = defaultdict(lambda : 0)
dic = json.loads(line.rstrip())
if "name" in dic.keys() and "tags" in dic.keys():
name = dic["name"]
tags = dic["tags"]
# print(name)
for tag in tags:
# print("\t", tag["count"], tag["value"])
r.hset(name.encode('utf-8'), tag["value"].encode('utf-8'), int(tag["count"]))
# r.set(name.encode('utf-8'), tags.encode('utf-8'))
# hgetall
| true
|
f2f1d776db6c0ebece20fe733dc12af6f79aa562
|
Python
|
goldenhairs/Convertible_bond-1
|
/var.py
|
UTF-8
| 5,223
| 2.984375
| 3
|
[] |
no_license
|
from tiingo import TiingoClient
import pandas as pd
import datetime
import numpy as np
import matplotlib.pyplot as plt
import warnings
import scipy
import scipy.stats
import matplotlib.pyplot as plt
# Tiingo API is returning a warning due to an upcoming pandas update
warnings.filterwarnings('ignore')
# User Set Up
data = {'Stocks': ['600939'], 'Quantity': [600]} # Define your holdings
ScenariosNo = 500 # Define the number of scenarios you want to run
# Percentile = 80 # Define your confidence interval
VarDaysHorizon = 1 # Define your time period
info = 0 # 1 if you want more info returned by the script
# Create a DataFrame of holdings
df = pd.DataFrame(data)
# print('[INFO] Calculating the max amount of money the portfolio will lose within',
# VarDaysHorizon, 'days', Percentile, 'percent of the time.')
today = datetime.date.today() - datetime.timedelta(days=1)
low=111
high=113
fee=15
def is_business_day(date):
return bool(len(pd.bdate_range(date, date)))
def dateforNoOfScenarios(date):
i = 0
w = 0
while i < ScenariosNo:
if (is_business_day(today - datetime.timedelta(days=w)) == True):
i = i+1
w = w+1
else:
w = w+1
continue
#print('gotta go back these many business days',i)
#print('gotta go back these many days',w)
# remember to add an extra day (days +1 = scenario numbers)
# 4% is an arbitary number i've calculated the holidays to be in 500days.
return(today - datetime.timedelta(days=w*1.04 + 1))
def SourceHistoricPrices():
if info == 1:
print('[INFO] Fetching stock prices for portfolio holdings')
# Set Up for Tiingo
config = {}
config['session'] = True
config['api_key'] = '填写你自己的 Tiingo api_key'
client = TiingoClient(config)
# Create a list of tickers for the API call
Tickers = []
i = 0
for ticker in data:
while i < len(data[ticker]):
Tickers.append(data[ticker][i])
i = i+1
if info == 1:
print('[INFO] Portfolio Holdings determined as', Tickers)
if info == 1:
print('[INFO] Portfolio Weights determined as', data['Quantity'])
# Call the API and store the data
global HistData
HistData = client.get_dataframe(
Tickers, metric_name='close', startDate=dateforNoOfScenarios(today), endDate=today)
print(HistData)
if info == 1:
print('[INFO] Fetching stock prices completed.', len(HistData), 'days.')
return(HistData)
def ValuePortfolio():
HistData['PortValue'] = 0
i = 0
if info == 1:
print('[INFO] Calculating the portfolio value for each day')
while i < len(data['Stocks']):
stock = data['Stocks'][i]
quantity = data['Quantity'][i]
HistData['PortValue'] = HistData[stock] * \
quantity + HistData['PortValue']
i = i+1
def Calculate(Percentile,low,high,fee):
if info == 1:
print('[INFO] Calculating Daily % Changes')
# calculating percentage change
HistData['Perc_Change'] = HistData['PortValue'].pct_change()
# calculate money change based on current valuation
HistData['DollarChange'] = HistData.loc[HistData.index.max()]['PortValue'] * \
HistData['Perc_Change']
if info == 1:
print('[INFO] Picking', round(HistData.loc[HistData.index.max()]['PortValue'], 2), ' value from ',
HistData.index.max().strftime('%Y-%m-%d'), ' as the latest valuation to base the monetary returns')
ValueLocForPercentile = round(len(HistData) * (1 - (Percentile / 100)))
if info == 1:
print('[INFO] Picking the', ValueLocForPercentile, 'th highest value')
global SortedHistData
SortedHistData = HistData.sort_values(by=['DollarChange'])
if info == 1:
print('[INFO] Sorting the results by highest max loss')
VaR_Result = SortedHistData.iloc[ValueLocForPercentile + 1,
len(SortedHistData.columns)-1] * np.sqrt(VarDaysHorizon)
# print('The portfolio\'s VaR is:', round(VaR_Result, 2))
ES_Result = round(SortedHistData['DollarChange'].head(
ValueLocForPercentile).mean(axis=0), 2) * np.sqrt(VarDaysHorizon)
# print('The portfolios\'s Expected Shortfall is', ES_Result)
print('%s%%\t%s\t%s\t%s' % (Percentile,VaR_Result,(low-100)*10+VaR_Result-fee,(high-100)*10+VaR_Result-fee))
def Output(low,high,fee):
cis = [5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,99]
print('置信区间\tVaR\t转债价格@%s\t转债价格@%s' % (low,high))
for ci in cis:
Calculate(ci,low,high,fee)
SourceHistoricPrices()
ValuePortfolio()
Output(low,high,fee)
def plotme():
data1 = HistData['Perc_Change']
num_bins = 50
# the histogram of the data
n, bins, patches = plt.hist(
data1, num_bins, normed=1, facecolor='green', alpha=0.5)
# add a 'best fit' line
sigma = HistData['Perc_Change'].std()
data2 = scipy.stats.norm.pdf(bins, 0, sigma)
plt.plot(bins, data2, 'r--')
plt.xlabel('Percentage Change')
plt.ylabel('Probability/Frequency')
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.show()
plotme()
| true
|
6c6df7b1223423cfb3ff6e360a162c417f01e3a1
|
Python
|
Dimaed90800/Python_Y
|
/Калькулятор 1.0.py
|
UTF-8
| 182
| 2.640625
| 3
|
[] |
no_license
|
import sys
if __name__ == "__main__":
if len(sys.argv) >= 2:
a = (''.format(sys.argv[1], sys.argv[-1]))
print(sum(a.split()))
else:
print("0")
| true
|
615a44d5f2be5ce57f5da2598fb5f35325b79adf
|
Python
|
iCarrrot/Python
|
/l2/z2.py
|
UTF-8
| 2,909
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
from abc import abstractmethod
from itertools import product
class Formula():
sign = '?'
def __str__(self):
return ' ('+self.formuly[0].__str__() + self.sign +self.formuly[1].__str__()+') '
@abstractmethod
def oblicz(self, zmienne):
pass
def zmienne(self):
wynik = set()
if(type(self.formuly[0]) != bool):
wynik = wynik | self.formuly[0].zmienne()
if(type(self.formuly[1]) != bool):
wynik = wynik | self.formuly[1].zmienne()
return wynik
def obliczformuly(self, zmienne):
wyn1,wyn2=(0,0)
if type(self.formuly[0]) == bool:
wyn1 = self.formuly[0]
else:
wyn1 = self.formuly[0].oblicz(zmienne)
if type(self.formuly[1]) == bool:
wyn2 = self.formuly[1]
else:
wyn2 = self.formuly[1].oblicz(zmienne)
return (wyn1, wyn2)
def __init__(self, f1, f2):
self.formuly=[]
self.formuly.append(f1)
self.formuly.append(f2)
class Zmienna(Formula):
def __str__(self):
return str(self.val)
def __init__(self, v):
self.val = v
def zmienne(self):
if(type(self.val) != bool):
return set([self.val])
def oblicz(self, zmienne):
if(type(self.val) == bool):
return val
else:
return zmienne[self.val]
class Imp(Formula):
sign = '=>'
def oblicz(self, zmienne):
wyn1, wyn2 = self.obliczformuly(zmienne)
if wyn1 == False:
return True
else:
return wyn2
class And(Formula):
sign = '&'
def oblicz(self, zmienne):
wyn1, wyn2 = self.obliczformuly(zmienne)
return wyn1 and wyn2
class Or(Formula):
sign = '|'
def oblicz(self, zmienne):
wyn1, wyn2 = self.obliczformuly(zmienne)
return wyn1 or wyn2
class Iff(Formula):
sign = '<=>'
def oblicz(self, zmienne):
wyn1, wyn2 = self.obliczformuly(zmienne)
return ((wyn1 and wyn2) or (not wyn1 and not wyn2))
class Not(Formula):
sign = '!'
def __str__(self):
return self.sign + self.val.__str__()
def __init__(self, f1):
self.val = f1
def zmienne(self):
return self.val.zmienne()
def oblicz(self, zmienne):
if(type(self.val) == bool):
return not self.val
else:
return not self.val.oblicz(zmienne)
def tautologia(formula):
if type(formula) == bool:
return formula
variables = list(formula.zmienne())
for x in product([True, False], repeat=len(variables)):
if not (formula.oblicz(dict(zip(variables, list(x))))):
return False
return True
n = Zmienna(True)
print(Imp(Zmienna('x'), True))
print(Not(True))
print(Not(Zmienna('x')).oblicz({'x': True}))
print(tautologia(Or(Not(Zmienna('y')), Zmienna('y'))))
| true
|
a99aed51eba6f7aa6c15877d6f1960887bb163d5
|
Python
|
Nukker/HttpServer
|
/log.py
|
UTF-8
| 1,218
| 2.859375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# Time:2012-02-12 17:11:45
__author__ = 'Kun'
import os,datetime,sys
try:
import config
except ImportError:
print 'Can\'t find the config file!!'
sys.exit(0)
def get_log_filename():
return datetime.datetime.now().strftime('server.%Y-%m-%d-%H-%M-%S.log')
class log():
def __init__(self):
self.log_file_path = config.logpath
self.log_file = config.logpath + os.sep + get_log_filename()
print self.log_file
if not os.path.exists(self.log_file_path):
os.makedirs(self.log_file_path)
def write(self,msg):
self.file = open(self.log_file,'ab')
self.file.write(msg)
self.file.close()
print msg
def info(self, msg):
msg = self.getTime() + ' [INFO] ' + msg
self.write(msg)
def error(self, msg):
msg = self.getTime() + ' [ERROR] ' + msg
self.write(msg)
def warning(self, msg):
msg = self.getTime() + ' [WARNING] ' + msg
self.write(msg)
def getTime(self):
return datetime.datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')
if __name__ == '__main__':
log = log()
log.info('helloboy\n')
log.error('testerror\n')
| true
|
ecb91c34d37b690d409e069da43e9edcc7dd60a8
|
Python
|
OscarPerez0/OLC1_Proyecto1_201213498
|
/Grafo.py
|
UTF-8
| 5,336
| 2.984375
| 3
|
[] |
no_license
|
import pydot
from PIL import Image
class Grafo:
def __init__(self, ers):
self.l_er = ers
def generar_grafo(self):
if len(self.l_er) < 1:
return
callgraph = pydot.Dot(graph_type='digraph')
keys = list(self.l_er)
for text in keys:
if text == 'id':
callgraph.add_subgraph(self.grafo_id())
elif text == 'entero':
callgraph.add_subgraph(self.grafo_numero())
elif text == 'decimal':
callgraph.add_subgraph(self.grafo_decimal())
elif text == 'cadena':
callgraph.add_subgraph(self.grafo_cadena())
elif text == 'cadena_s':
callgraph.add_subgraph(self.grafo_cadena_s())
elif text == 'comentario_m':
callgraph.add_subgraph(self.grafo_comentario_m())
elif text == 'comentario_s':
callgraph.add_subgraph(self.grafo_comentario_s())
callgraph.write_raw('Reportes/automata.dot')
callgraph.write_png('Reportes/automata.png')
im = Image.open('Reportes/automata.png')
im.show()
def grafo_id(self):
cluster_id = pydot.Cluster('id', label= 'Identificador')
cluster_id.add_node(pydot.Node('S0', label='S0'))
cluster_id.add_node(pydot.Node('S1', label='S1', color='green'))
cluster_id.add_edge(pydot.Edge('S0', 'S1', label='L'))
cluster_id.add_edge(pydot.Edge('S1', 'S1', label='L'))
cluster_id.add_edge(pydot.Edge('S1', 'S1', label='N'))
cluster_id.add_edge(pydot.Edge('S1', 'S1', label='_'))
return cluster_id
def grafo_numero(self):
cluster_entero = pydot.Cluster('entero', label='Entero')
cluster_entero.add_node(pydot.Node('B0', label='B0'))
cluster_entero.add_node(pydot.Node('B1', label='B1', color='green')) #put color
cluster_entero.add_edge(pydot.Edge('B0', 'B1', label='N'))
cluster_entero.add_edge(pydot.Edge('B1', 'B1', label='N'))
return cluster_entero
def grafo_decimal(self):
cluster_decimal = pydot.Cluster('decimal', label='Decimal')
cluster_decimal.add_node(pydot.Node('C0', label='C0'))
cluster_decimal.add_node(pydot.Node('C1', label='C1'))
cluster_decimal.add_node(pydot.Node('C2', label='C2')) #punto
cluster_decimal.add_node(pydot.Node('C3', label='C3', color='green'))
cluster_decimal.add_edge(pydot.Edge('C0', 'C1', label='N'))
cluster_decimal.add_edge(pydot.Edge('C1', 'C2', label='.'))
cluster_decimal.add_edge(pydot.Edge('C2', 'C3', label='N'))
cluster_decimal.add_edge(pydot.Edge('C3', 'C3', label='N'))
return cluster_decimal
def grafo_cadena(self):
print('cadena')
cluster_cadena = pydot.Cluster('cadena', label='Cadena')
cluster_cadena.add_node(pydot.Node('D0', label='D0'))
cluster_cadena.add_node(pydot.Node('D1', label='D1'))
cluster_cadena.add_node(pydot.Node('D2', label='D2', color='green'))
cluster_cadena.add_edge(pydot.Edge('D0', 'D1', label='"'))
cluster_cadena.add_edge(pydot.Edge('D1', 'D1', label='CC'))
cluster_cadena.add_edge(pydot.Edge('D1', 'D2', label='"'))
return cluster_cadena
def grafo_cadena_s(self):
print('cadena_s')
cluster_cadena_s = pydot.Cluster('cadena_s', label='Cadena Simple')
cluster_cadena_s.add_node(pydot.Node('E0', label='E0'))
cluster_cadena_s.add_node(pydot.Node('E1', label='E1'))
cluster_cadena_s.add_node(pydot.Node('E2', label='E2', color='green'))
cluster_cadena_s.add_edge(pydot.Edge('E0', 'E1', label="'"))
cluster_cadena_s.add_edge(pydot.Edge('E1', 'E1', label='CC'))
cluster_cadena_s.add_edge(pydot.Edge('E1', 'E2', label="'"))
return cluster_cadena_s
def grafo_comentario_m(self):
print('comentario_m')
cluster_comment = pydot.Cluster('comentario_m', label='Comentario Multilinea')
cluster_comment.add_node(pydot.Node('F0', label='F0'))
cluster_comment.add_node(pydot.Node('F1', label='F1'))
cluster_comment.add_node(pydot.Node('F2', label='F2'))
cluster_comment.add_node(pydot.Node('F3', label='F3'))
cluster_comment.add_node(pydot.Node('F4', label='F4', color='green'))
cluster_comment.add_edge(pydot.Edge('F0', 'F1', label='/'))
cluster_comment.add_edge(pydot.Edge('F1', 'F2', label='*'))
cluster_comment.add_edge(pydot.Edge('F2', 'F2', label='CC'))
cluster_comment.add_edge(pydot.Edge('F2', 'F3', label='*'))
cluster_comment.add_edge(pydot.Edge('F3', 'F4', label='/'))
return cluster_comment
def grafo_comentario_s(self):
print('comentario_s')
cluster_comment = pydot.Cluster('comentario_s', label='Comentario Simple')
cluster_comment.add_node(pydot.Node('G0', label='G0'))
cluster_comment.add_node(pydot.Node('G1', label='G1'))
cluster_comment.add_node(pydot.Node('G2', label='G2', color='green'))
cluster_comment.add_edge(pydot.Edge('G0', 'G1', label='/'))
cluster_comment.add_edge(pydot.Edge('G1', 'G2', label='/'))
cluster_comment.add_edge(pydot.Edge('G2', 'G2', label='CC'))
return cluster_comment
| true
|
ab66c2291f41ba1ef7793f140a99e63a58a84ed1
|
Python
|
appsjit/testament
|
/LeetCode/soljit/s239_slidingWindowMax.py
|
UTF-8
| 394
| 2.84375
| 3
|
[] |
no_license
|
class Solution(object):
def maxSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
if len(nums) == 0 or len(nums) < k:
return []
l, r = 0, k - 1
res = []
while l < len(nums) - k + 1:
res.append(max(nums[l:k + l]))
l += 1
return res
| true
|
577d5cf0186b68219579c0db83dff6ac614652f5
|
Python
|
LYleonard/Algorithm
|
/PyLeetCode/SortingAlgorithms/quicksort/lambda_quickwort.py
|
UTF-8
| 737
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'LYLeonard'
__mtime__ = '2018/4/13'
# Code Description: 使用python的lambda表达式(匿名函数)实现快速排序
"""
import random
'''
匿名函数,一行代码实现快速排序
'''
quick_sort = lambda array: array if len(array) <= 1 else \
quick_sort([item for item in array[1:] if item <= array[0]]) + [array[0]] + \
quick_sort([item for item in array[1:] if item > array[0]])
def random_sqe(n):
'''
随机生成测试用例
:param n:
:return: return a list of n size
'''
L = []
for i in range(n):
L.append(random.randint(0, 2*n))
return L
n = 10
L = random_sqe(n)
print L
print quick_sort(L)
| true
|
91bce0ed390f570c7c9f9c63bbf7b492cb1a7176
|
Python
|
JunchuangYang/PythonInterView
|
/Python剑指Offer/032_把数组排成最小的数/032.py
|
UTF-8
| 974
| 3.78125
| 4
|
[] |
no_license
|
__author__ = 'lenovo'
"""
一般的 sorted 排序函数 都有相应的 cmp函数,用来定制化排序的比较方法。
然而 python 3中的 sorted( ) 除去的cmp 参数,推荐使用 key。
Python中有相应的函数 支持将 cmp函数转化为key的值。
cmp指定一个定制的比较函数,这个函数接收两个参数(iterable的元素),
如果第一个参数小于第二个参数,返回一个负数;
如果第一个参数等于第二个参数,返回零;
如果第一个参数大于第二个参数,返回一个正数。默认值为None
"""
import functools
def cmp(a , b):
"自定义排序规则"
sa = str(a)
sb = str(b)
if sa+sb > sb+sa:
return 1
elif sa+sb == sb+sa:
return 0
else:
return -1
def solution(nums):
return ''.join([str(item) for item in sorted(nums,key = functools.cmp_to_key(cmp))])
if __name__ == "__main__":
print(solution([3,32,321]))
| true
|
67df813999501f939e3e402246bd35e10e5be12e
|
Python
|
sholmbo/misfits
|
/misfits/gui/tools/base/intervals.py
|
UTF-8
| 3,024
| 2.65625
| 3
|
[] |
no_license
|
from matplotlib import cm
from .base import Base
from ...plot import ErrorSnake
class BaseIntervals (Base) :
def __init__(self, gui, spectrum, method):
self.fig, self.ax = gui.fig, gui.ax
self.gui, self.spectrum, self.method = gui, spectrum, method
self.artists = dict()
self.artists['spectrum'] = ErrorSnake(gui.ax[0], spectrum)
self.intervals = list()
self._active_interval, self._selected_interval, self._current_interval = None, None, None
self.fig.canvas.mpl_connect('button_press_event', lambda e: self.add_interval(e))
self.fig.canvas.mpl_connect('motion_notify_event', lambda e: self.set_interval(e))
self.fig.canvas.mpl_connect('motion_notify_event', lambda e: self.pick_interval(e))
self.fig.canvas.mpl_connect('button_press_event', lambda e: self.del_interval(e))
def new_interval(self):
return BaseInterval(self)
def add_interval(self, e):
if not e.inaxes is self.ax[0] or \
not e.button == 1:
return
for interval in self.intervals:
interval.set_visible(False)
interval = self.new_interval()
interval.set_data(x0=e.xdata, xx=e.xdata)
self.intervals.append(interval)
self.intervals.sort(key=lambda i: i.x0)
self._active_interval = interval
self.set_interval(e)
def set_interval(self, e):
if not self._active_interval or \
not e.inaxes is self.ax[0]:
return
if not e.button == 1:
self._active_interval = None
return
self._active_interval.set_data(xx=e.xdata)
self._update_colors()
self.gui.set_limits(self.ax[1])
self.fig.canvas.draw()
def pick_interval(self, e):
if not e.inaxes is self.ax[0] or \
self._active_interval:
return
intervals = [interval for interval in self.intervals if interval.in_interval(e.xdata)]
intervals.sort(key=lambda i: e.xdata - i.x0)
if not intervals:
self._current_interval = None
return
if self._current_interval is intervals[0]:
return
self.show_interval(intervals[0])
def show_interval(self, interval):
self._current_interval = interval
self._selected_interval = interval
for interval in self.intervals:
interval.set_visible(False)
self._current_interval.set_visible(True)
self.gui.set_limits(self.ax[1])
self.fig.canvas.draw()
def del_interval(self, e):
if not self._current_interval or \
not e.inaxes is self.ax[0] or \
not e.button == 3:
return
self.intervals.remove(self._current_interval)
self._current_interval.delete()
self._current_interval = None
self._selected_interval = None
self._update_colors()
self.gui.set_limits(self.ax[1])
self.fig.canvas.draw()
def _update_colors(self):
N = len(self.intervals)
for i, interval in enumerate(self.intervals):
color = cm.get_cmap('gist_rainbow')(1.*i/N)
interval.set_color(color)
def __iter__(self):
return iter(self.intervals)
| true
|
33cf99052759cb48f6de9b512c7464c49a93ffa6
|
Python
|
ebergstein/DojoAssignments
|
/Python/Flask_MySQL/Wall/server.py
|
UTF-8
| 4,226
| 2.515625
| 3
|
[] |
no_license
|
from flask import Flask, render_template, redirect, request, session, flash
from mysqlconnection import MySQLConnector
from datetime import datetime
app = Flask(__name__)
mysql = MySQLConnector(app,'wall') #BAD NAME, DON'T DO THIS
# the "re" module will let us perform some regular expression operations
import re
import md5
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
app = Flask(__name__)
app.secret_key = "ThisIsSecret!"
@app.route('/')
def index():
session.clear()
return render_template('index.html')
@app.route('/login', methods = ["POST"])
def login():
email = request.form['email']
##print "*"*80
##print email
password = md5.new(request.form['password']).hexdigest()
query = "SELECT * FROM users WHERE users.email = :email LIMIT 1"
data = {'email': email }
user = mysql.query_db(query, data)
##print "*"*80
##print user
if user != []:
if user[0]['password'] == password:
session['name'] = user[0]['first_name']
session['id'] = user[0]['id']
return redirect ('/wall')
else:
return redirect ('/')
else:
return redirect ('/')
@app.route('/register', methods = ["POST"])
def register():
if len(request.form['email']) < 1 or len(request.form['first_name']) < 1 or len(request.form['last_name']) < 1 or len(request.form['password']) < 1:
flash("You forgot something.")
return redirect ('/')
elif not EMAIL_REGEX.match(request.form['email']):
flash("Email not valid.")
return redirect ('/')
data = {'first_name': request.form['first_name'],
'last_name': request.form['last_name'],
'email': request.form['email'],
'password': md5.new(request.form['password']).hexdigest()
}
query = "INSERT INTO users (first_name, last_name, email, password, created_at, updated_at) VALUES (:first_name, :last_name, :email, :password, NOW(), NOW())"
mysql.query_db(query, data)
session['name'] = request.form['first_name']
newquery = "SELECT * FROM users WHERE users.email = :email LIMIT 1"
newdata = {'email': request.form['email'] }
user = mysql.query_db(newquery, newdata)
session['id'] = user[0]['id']
return redirect ('/wall')
@app.route('/wall')
def wall():
message_query = mysql.query_db("SELECT messages.id, users.first_name, users.last_name, messages.created_at, messages.message FROM users JOIN messages ON users.id = messages.user_id ORDER BY messages.created_at DESC")
for data in message_query:
temp = data['created_at'].strftime('%c')
flash("<div class = 'message'><h3>" + data['first_name'] + " " + data['last_name'] + " - " + temp + "</h3><p>" + data['message'] + "</p></div>")
query = "SELECT users.first_name, users.last_name, comments.created_at, comments.comment FROM users JOIN comments ON users.id = comments.user_id JOIN messages ON messages.id = comments.message_id WHERE messages.id = :id ORDER BY comments.created_at ASC;"
message_id = {'id': data['id']}
comment_query = mysql.query_db(query, message_id)
for comment in comment_query:
temp = comment['created_at'].strftime('%c')
flash("<div class = 'comment'><h4>" + comment['first_name'] + " " + comment['last_name'] + " - " + temp + "</h4><p>" + comment['comment'] + "</p></div>")
flash("<p>Post a comment</p><form action = '/comment' method = 'post'><textarea name = 'comment'></textarea><input type = 'hidden' name = 'id' value = {}><input type = 'submit' value = 'Post a comment'></form>".format(data['id']))
return render_template('wall.html')
@app.route('/post', methods = ["POST"])
def post():
query = "INSERT INTO messages (user_id, message, created_at, updated_at) VALUES (:id, :message, NOW(), NOW())"
data = {'id': session['id'],
'message': request.form['message']
}
mysql.query_db(query, data)
return redirect('/wall')
@app.route('/comment', methods = ["POST"])
def comment():
query = "INSERT INTO comments (message_id, user_id, comment, created_at, updated_at) VALUES (:message_id, :user_id, :comment, NOW(), NOW())"
data = {'message_id': int(request.form['id']),
'user_id': session['id'],
'comment': request.form['comment']
}
print "*"*80
print data['message_id']
mysql.query_db(query, data)
return redirect('/wall')
@app.route('/logout', methods = ["POST"])
def logout():
return redirect('/')
app.run(debug=True)
| true
|
c81c8774f3a85bc723067e5f20038ab3fa20e8bd
|
Python
|
Gio-giouv/breakthru-check
|
/Breakthruengine.py
|
UTF-8
| 7,826
| 2.828125
| 3
|
[] |
no_license
|
import pygame
from tkinter import messagebox
import numpy as np
class State_Game:
def __init__(self):
self.board =[
["__", "__", "__", "__", "__", "__", "__", "__", "__", "__", "__"],
["__", "__", "__", "Sf", "Sf", "Sf", "Sf", "Sf", "__", "__", "__"],
["__", "__", "__", "__", "__", "__", "__", "__", "__", "__", "__"],
["__", "Sf", "__", "__", "Gf", "Gf", "Gf", "__", "__", "Sf", "__"],
["__", "Sf", "__", "Gf", "__", "__", "__", "Gf", "__", "Sf", "__"],
["__", "Sf", "__", "Gf", "__", "GB", "__","Gf", "__", "Sf", "__"],
["__", "Sf", "__", "Gf", "__", "__", "__", "Gf", "__", "Sf", "__"],
["__", "Sf", "__", "__", "Gf", "Gf", "Gf", "__", "__", "Sf", "__"],
["__", "__", "__", "__", "__", "__", "__", "__", "__", "__", "__"],
["__", "__", "__", "Sf", "Sf", "Sf", "Sf", "Sf", "__", "__", "__"],
["__", "__", "__", "__", "__", "__", "__", "__", "__", "__", "__"]]
self.function_move = {"f": self.fleet_moves,"B":self.fleet_moves}
self.secondmove=0
self.silverfleet=20
self.goldfleet=12
self.swin=False
self.gwin=False
self.mobility=0
self.bigship=1
self.piece_captured=[]
self.gold=10
self.silver=10
self.Goldmove = True
self.movetrack =[]
self.DEBUG = True
self.draw=False
# takes a move as parapameter and execute
def make_move(self,move):
if move.piece_move == "GB" and self.secondmove==0:
self.board[move.start_row][move.start_col] = "__"
if self.board[move.end_row][move.end_col] != "__":
self.piece_captured.append(self.board[move.end_row][move.end_col])
if self.piece_captured[-1][1] == 'Sf':
self.silverfleet -= 1
if self.silverfleet == 0:
self.gwin=True
self.board[move.end_row][move.end_col] = move.piece_move
self.secondmove = 2
self.movetrack.append(move)# track the move so we can undo it later
if (move.end_col == 10 or move.end_col == 0) or (move.end_row == 0 or move.end_row == 10):
self.gwin=True
self.gold= 10*self.goldfleet
if self.secondmove == 2:
self.secondmove = 0
self.Goldmove = not self.Goldmove #swap players
elif move.piece_move == "Gf"or move.piece_move =="Sf":
self.board[move.start_row][move.start_col] = "__"
if self.board[move.end_row][move.end_col] != "__":
self.piece_captured.append(self.board[move.end_row][move.end_col])
if self.piece_captured[-1][1] == 'Gf':
self.goldfleet-=1
elif self.piece_captured[-1][1] == 'Sf':
self.silverfleet -= 1
elif self.piece_captured[-1][1] == 'GB':
self.silverfleet=1000
self.swin=True
self.silver = 10*self.silverfleet
self.board[move.end_row][move.end_col] = move.piece_move
self.movetrack.append(move)
self.secondmove = self.secondmove + 1
if move.piece_captured !="__":
self.secondmove =2
if self.secondmove == 2 :
self.secondmove=0
self.Goldmove = not self.Goldmove
#undo the last move
def undo_move(self):
if len(self.movetrack) != 0:
move =self.movetrack.pop()
self.board[move.start_row][move.start_col] = move.piece_move
self.board[move.end_row][move.end_col] = move.piece_captured
self.Goldmove = not self.Goldmove # switch to silver
# consider valid moves
def valid_moves(self):
moves= self.possible_moves()# for now
#for i in range(len(moves),-1,-1):
#self.make_move((moves[i]))
if moves ==0 :
self.draw=True
return moves
def possible_moves(self):
moves = []
for r in range(len(self.board)): # number of row
for c in range(len(self.board[r])): # number of col in the row
turn = self.board[r][c][0]
if (turn == "G" and self.Goldmove) or (turn == "S" and not self.Goldmove):
piece = self.board[r][c][1]
if piece == 'f':
if self.secondmove == 1 :
lastPiece = self.movetrack[-1]
if r != lastPiece.end_row or c != lastPiece.end_col: # avoid 2-times moving of the same piece
self.function_move[piece](r, c, moves)
else:
self.function_move[piece](r, c, moves)# call move function
else:
self.function_move[piece](r, c, moves)
self.mobility=len(moves)
return moves
# moves = []
# for r in range(len(self.board)):# number of row
# for c in range(len(self.board[r])): # number of col in the row
# turn = self.board[r][c][0]
# if (turn == "G" and self.Goldmove) or (turn == "S" and not self.Goldmove):
# piece = self.board[r][c][1]
# # call move function
# if piece == "f":
# if self.secondmove == 1:
# lastpiece = self.movetrack[-1]
# if r != lastpiece.end_row and c!= lastpiece.end_col:
# self.function_move[piece](r,c,moves)
# else:
# self.function_move[piece](r,c,moves)
# elif piece =="B" and self.secondmove == 0:
# self.function_move[piece](r,c,moves)
#get all the fleet moves add moves on list
def fleet_moves(self,r,c,moves):
directions = ((-1,0),(0,-1),(1,0),(0,1))
colorenemy = "S" if self.Goldmove else "G"
for d in directions:
for i in range(1,11):
endrow = r + d[0]*i
endcol = c + d[1]*i
if 0 <= endrow < 10 and 0 <= endcol <10:
endpiece = self.board[endrow][endcol]
#if self.Goldmove: # gold fleet moves
if endpiece == "__":
moves.append(Moves((r,c),(endrow,endcol),self.board))
elif self.secondmove==0:
if self.board[r+1][c-1][0] == colorenemy :
moves.append(Moves((r,c),(r+1,c-1),self.board))
elif self.board[r-1][c-1][0] == colorenemy :
moves.append(Moves((r, c), (r - 1, c - 1),self.board))
elif self.board[r - 1][c + 1][0] == colorenemy :
moves.append(Moves((r, c), (r - 1, c + 1),self.board))
elif self.board[r + 1][c + 1][0] == colorenemy :
moves.append(Moves((r, c), (r + 1, c + 1),self.board))
break
else:break
else:break
# if c-1>= 0 :# captures to the up left
# if self.board[r-1][c-1][0] == colorenemy:
# moves.append(Moves((r,c),(r-1,c-1),self.board))
# if c+1<=10:# captures up to right
# if self.board[r-1][c+1][0] == colorenemy:
# moves.append(Moves((r,c),(r-1,c+1),self.board))
# if c - 1 >= 0: # captures to the down left
# if self.board[r+1][c-1][0] == colorenemy:
# moves.append(Moves((r, c), (r+1, c-1), self.board))
# if c + 1 <= 10: # captures down to right
# if self.board[r+1][c + 1][0] == colorenemy:
# moves.append(Moves((r, c), (r + 1, c + 1), self.board))
#def bigf_moves(self,r,c,moves):
# pass
class Moves():
#map keys to values
ranks_to_rows = {"1":10,"2":9,"3":8, "4":7,"5":6,"6":5,"7":4,"8":3,"9":2,"10":1,"11":0}
row_to_ranks = {v:k for k,v in ranks_to_rows.items()}
ranks_to_cols = {"a":0,"b":1,"c":2, "d":3,"e":4,"f":5,"g":6,"h":7,"i":8,"j":9,"k":10}
col_to_ranks = {v:k for k,v in ranks_to_cols.items()}
def __init__(self,start_sq,end_sq,board):
self.start_row = start_sq[0]
self.start_col = start_sq[1]
self.end_row = end_sq[0]
self.end_col = end_sq[1]
self.piece_move = board[self.start_row][self.start_col]
self.piece_captured = board[self.end_row][self.end_col]
self.IDmove = self.start_row*1000000+self.start_col*10000+self.end_row*100+self.end_col
print(self.IDmove)
#overriding equal methods
def __eq__(self,other):
if isinstance(other,Moves):
return self.IDmove == other.IDmove
return False
def get_notation(self):
return self.get_rank(self.start_row,self.start_col)+self.get_rank(self.end_row,self.end_col)
def get_rank(self,r,c):
return self.col_to_ranks[c]+self.row_to_ranks[r]
| true
|
8b25d0f10151273cc9faaac4c9d1cb4531b96b4c
|
Python
|
DevAnuragGarg/Python-Learning-Basics
|
/listsTuples03/sorting.py
|
UTF-8
| 888
| 4.46875
| 4
|
[
"Apache-2.0"
] |
permissive
|
pangram = "The quick brown fox jumps over the lazy dog"
# it creates the list from the iterables
letters = sorted(pangram)
print(letters)
numbers = [2.3, 4.5, 31., 9.1, 1.6]
# here we are passing the list and get the new list in return
sorted_numbers = sorted(numbers)
print(sorted_numbers)
# the sort function sort the original list and doesn't create new list
numbers.sort()
print(numbers)
# added a argument key which make the capital T to be shown with the small ones
missing_letter = sorted("The quick brown fox jumped over the lazy dog", key=str.casefold)
print(missing_letter)
names = ["Graham",
"John",
"terry",
"eric",
"Terry",
"Micheal"]
# this will print the names starting with small letters at the end
names.sort()
# this will print the names starting with small letters at the end
names.sort(key=str.casefold)
print(names)
| true
|
9c3eb5a60c99ae8c9de3aca31325d6b182beaae5
|
Python
|
Zaccheaus90/holbertonschool-web_back_end-1
|
/0x07-Session_authentication/api/v1/auth/auth.py
|
UTF-8
| 2,117
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
""" Module of auth
"""
from os import getenv
from flask import request
from typing import List, TypeVar
class Auth:
""" Auth Class """
def __init__(self):
"""
Constructor
Args:
path: path to authenticate
excluded_paths: list of excluded path to authenticate
"""
def require_auth(self, path: str, excluded_paths: List[str]) -> bool:
"""
Require the auth
Args:
path: path to authenticate
excluded_paths: list of excluded path to authenticate
Return:
True if is authenticated otherwise false
"""
if path is None or excluded_paths is None or len(excluded_paths) == 0:
return True
if path[-1] is not '/':
path += '/'
for paths in excluded_paths:
if paths.endswith('*'):
if path.startswith(paths[:-1]):
return False
elif path == paths:
return False
return True
def authorization_header(self, request=None) -> str:
"""
Look the headers
Args:
request: Look the autthorization
Return:
The authorization header or None
"""
if request is None:
return None
return request.headers.get('Authorization', None)
def current_user(self, request=None) -> TypeVar('User'):
"""
Look current user
Args:
request: Look the reques user
Return:
The user
"""
return None
def session_cookie(self, request=None):
"""
Cookie value
Args:
request: Get the cookie session
Return:
Cookie session
"""
if request is None:
return None
session_env = getenv('SESSION_NAME', None)
cookie_sess = request.cookies.get(session_env, None)
return cookie_sess
| true
|
1f72f55c29bf0f871f804ca9016414fc5b56970c
|
Python
|
scinext/fh-kapfenberg-ss2013-raspberry-pi
|
/programs/thermometer-command_line_client-itm11-g1.py
|
UTF-8
| 1,875
| 2.984375
| 3
|
[] |
no_license
|
#! /usr/bin/python
import sys, socket
from sensors.simulation import Thermometer
class ThermoProxy():
"""Connects with a server"""
def __init__(self, host="127.0.0.1", port=1024):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self._sock.connect((host, port))
except:
sys.exit("Host unreachable")
def send_msg(self, msg):
if msg == "": msg = "EMPTY"
self._sock.send(msg)
def receive_msg(self):
return self._sock.recv(1024)
if __name__ == '__main__':
default = "127.0.0.1:1024"
host_str = raw_input("<HOST>:<PORT> [%s] " % (default))
# use default address if not specified
if (host_str == ''): host_str = default
host_str_array = host_str.split(":")
# use specified host address
HOST = host_str_array[0]
if len(host_str_array) > 1:
# use specified port
PORT = int(host_str_array[1])
else:
# use default port
PORT = int(default.split(":")[1])
tp = ThermoProxy(HOST, PORT)
print(tp.receive_msg())
while True:
# prefix that will be shown as long as you are connected to the server
prefix = "client@%s# " % (tp._sock.getpeername()[0])
# let the client set a command
cmd = raw_input(prefix)
# send message
#tp.send_msg(cmd.upper())
tp.send_msg(cmd.upper())
# receive message
rcv = tp.receive_msg()
# end program if connection closed by server
if not rcv:
print("Connection closed by server.")
break
# print the received message of the server
print(rcv)
print("--- END OF PROGRAM ---")
| true
|
d84ca0c73a094f49b83012d3598cfc14c9122844
|
Python
|
brandonjp/SyncSettings
|
/tests/libs/test_gist_api.py
|
UTF-8
| 3,475
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
from sync_settings.libs.gist_api import Gist
from sync_settings.libs.exceptions import GistException
from tests import *
from unittest import TestCase
class TestGistAPI(TestCase):
def __init__(self, *args, **kwargs):
super(TestGistAPI, self).__init__(*args, **kwargs)
self.api = Gist(opts.get('access_token'))
def test_access_token(self):
with self.assertRaises(GistException):
Gist('')
self.assertIsInstance(self.api, Gist)
self.assertIsNotNone(self.api)
def test_create_gist(self):
data = {'description': 'test description'}
# Try to create a gist with invalid data
# Without files object
with self.assertRaises(GistException):
self.api.create(data)
# With a wrong 'files' object
with self.assertRaises(GistException):
self.api.create({
'description': 'some description',
'files': 'some files'
})
# Create a gist with empty content
with self.assertRaises(GistException):
self.api.create({
'description': 'some description',
'files': {
'file_test.txt': {
'content': ''
}
}
})
# Create a gist without description
gist = self.api.create({
'files': {
'file.txt': {
'content': 'content for this file.txt'
}
}
})
self.assertIsNotNone(gist.get('id'))
# Delete test gist
self.assertTrue(self.api.delete(gist.get('id')))
# Create a gist with description and files
data.update({ 'files': {
'someFile': {
'content': 'Content of this file'
}
}})
gist = self.api.create(data)
self.assertIsNotNone(gist.get('id'))
gist_id = gist.get('id')
# Get a list with all public gists
gist_items = self.api.list()
# Check if the created gist isn't public gist list
for gist_item in gist_items:
if gist_id == gist_item.get('id'):
self.assertFalse(gist_item.get('public'))
# Delete test gist
self.assertTrue(self.api.delete(gist_id))
def test_edit_gist(self):
test_gist = self.api.create({
'files': {
'test_gist.txt': { 'content': 'Gist test content' }
}
})
# Get possible errors
# Passing wrong parameters
test_gist_id = test_gist.get('id')
with self.assertRaises(GistException):
self.api.edit('some_id', {})
# With wrong files object
with self.assertRaises(GistException):
self.api.edit(test_gist_id, {
'files': 'some content'
})
# Updating without changes
gist = self.api.edit(test_gist_id, {})
self.assertIsNotNone(gist.get('id'))
self.assertEqual(len(gist.get('files')), 1)
# Adding a new file
gist = self.api.edit(test_gist_id, {
'files': { 'other_file.txt': {
'content': 'Some content'
}}
})
self.assertIsNotNone(gist.get('id'))
self.assertEqual(len(gist.get('files')), 2)
self.assertTrue(self.api.delete(test_gist_id))
def test_get_gist(self):
test_gist = self.api.create({
'files': {
'test_gist.txt': { 'content': 'Gist test content' }
}
})
test_gist_id = test_gist.get('id')
# Getting errors
with self.assertRaises(GistException):
self.api.get('---')
# Getting gist
gist = self.api.get(test_gist_id)
self.assertIsNotNone(gist.get('id'))
self.assertEqual(test_gist_id, gist.get('id'))
self.assertTrue(self.api.delete(test_gist_id))
| true
|
f9695e2279f3341f7168404c26d55496e288ebad
|
Python
|
nushackers/code-golf-nov-16
|
/question2/b.py
|
UTF-8
| 163
| 2.71875
| 3
|
[] |
no_license
|
def x(s, i):
if i == len(s):
return []
else:
a=x(s,i+1)
return a + [s[i-1],s[i+1]] if s[i] == 0 else a
r = lambda s: max(x(s, 0))
| true
|
e16d6b3e3bd1f875f92c03ef080b588585070f8a
|
Python
|
FlaxBear/BookMarkGroup
|
/server/ver1_0/BookMarkGroupServer/Validate/groupFolderEditUpdValidate.py
|
UTF-8
| 1,689
| 2.59375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
from flask_wtf import FlaskForm
from wtforms import HiddenField, StringField, TimeField, ValidationError
class GroupFolderUpdValidate(FlaskForm):
state = HiddenField()
group_folder_id = HiddenField()
group_folder_name = StringField()
group_folder_version = StringField()
json_directory_path = StringField()
group_folder_memo = StringField()
create_time = TimeField()
update_time = TimeField()
def validate_group_folder_id(self, group_folder_id):
if group_folder_id.data == "":
raise ValidationError("グループフォルダーIDが選択されていません")
if group_folder_id.data != "0":
pass
# 存在チェック
def validate_group_folder_name(self, group_folder_name):
if group_folder_name.data == "":
raise ValidationError("グループフォルダー名を入力してください")
if len(group_folder_name.data) > 20:
raise ValidationError("グループフォルダー名は20文字以内にしてください")
if r"[^.!#$%&'*+\/=?^_`{|}~-]" in group_folder_name.data:
raise ValidationError(r"グループフォルダー名に.!#$%&'*+\/=?^_`{|}~-は含められません。")
def validate_group_folder_version(self, group_folder_version):
if group_folder_version.data == "":
raise ValidationError("グループフォルダーバージョンを入力してください")
# if group_folder_version.data.isdigit():
# raise ValidationError("グループフォルダーバージョンは数値で設定してください")
def validate_group_folder_memo(self, group_folder_memo):
if len(group_folder_memo.data) > 200:
raise ValidationError("備考は200文字内で入力してください")
| true
|
3062da9b5ccb35a82541ad46c520369ff1acabc0
|
Python
|
kun-cockpit-tech/nlp
|
/rs/session_1/minist.py
|
UTF-8
| 2,166
| 2.734375
| 3
|
[] |
no_license
|
from sklearn import datasets
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn.metrics import accuracy_score
import pickle
def read_data(data_file):
import gzip
f = gzip.open(data_file, "rb")
# train, val, test = pickle.load(f)
# 改:使用了_Unpickler对象,并且对象的构造函数使用了“bytes”编码,这样就能正确读取pickle格式的文件了
Myunpickle = pickle._Unpickler(file=f, fix_imports=True, encoding="bytes", errors="strict")
train, val, test = Myunpickle.load()
f.close()
train_x = train[0]
train_y = train[1]
test_x = test[0]
test_y = test[1]
return train_x, train_y, test_x, test_y
data_file = "F:/py_workspace/nlp_session/nlp_data/mnist.pkl.gz"
train_x, train_y, test_x, test_y = read_data(data_file)
# digits = datasets.load_digits()
# data = digits.data
# target = digits.target
# print(data.shape)
# print(digits.images[0])
# print(digits.target)
# # plt.gray()
# # plt.imshow(digits.images[0])
# # plt.show()
# train_x,test_x,train_y,test_y = train_test_split(data,target,test_size=25,random_state=33)
ss = preprocessing.StandardScaler()
train_ss_x = ss.fit_transform(train_x)
test_ss_x = ss.fit_transform(test_x)
# lr
model = LogisticRegression()
# 贝叶斯
# from sklearn.naive_bayes import MultinomialNB
# model = MultinomialNB(alpha=0.01)
# knn
# from sklearn.neighbors import KNeighborsClassifier
# model = KNeighborsClassifier()
# 随机森林
# from sklearn.ensemble import RandomForestClassifier
# model = RandomForestClassifier(n_estimators=8)
# 决策树
# from sklearn import tree
# model = tree.DecisionTreeClassifier()
# gbdt
# from sklearn.ensemble import GradientBoostingClassifier
# model = GradientBoostingClassifier(n_estimators=200)
# svm
# from sklearn.svm import SVC
# model = SVC(kernel='rbf', probability=True)
model.fit(train_x,train_y)
predict_y = model.predict(test_ss_x)
print(test_y)
print(predict_y)
accuracy = accuracy_score(test_y,predict_y)
print(accuracy)
| true
|
d59e19866e8d8d1cfacb65d3ff1d935bb6e72cd1
|
Python
|
shreyb/decisionengine_modules
|
/util/testutils.py
|
UTF-8
| 1,003
| 3.296875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
'''
Utils to simplify testing
'''
# These imports needed for the `eval` blocks
from classad import classad # noqa
import datetime # noqa
import pandas as pd # noqa
def input_from_file(fname):
with open(fname) as fd:
return eval(fd.read())
def raw_input_from_file(fname):
with open(fname) as fd:
return fd.read()
def compare_dfs(df1, df2):
"""
for some reason df.equals does not work here
but if I compare cell by cell it works
:type df1: :class:`pd.DataFrame`
:arg df1: data frame instance
:type df2: :class:`pd.DataFrame`
:arg df2: data frame instance
:rtype: :obj:`bool` - True if equal
"""
if df1.shape[0] != df2.shape[0]:
return False
if df1.shape[1] != df2.shape[1]:
return False
rc = True
for i in range(df1.shape[0]):
for j in range(df1.shape[1]):
if (df1.iloc[i, j] != df2.iloc[i, j]):
rc = False
break
return rc
| true
|
9972efb39ccb2b49b4df3a50cc0825ef17045f87
|
Python
|
zx2229/web-scraping-with-python
|
/python3/chapter4/testUrllib2.py
|
UTF-8
| 920
| 2.890625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
__author__ = 'hstking hstking@hotmail.com'
import urllib.request, urllib.error, urllib.parse
def clear():
'''该函数用于清屏 '''
print('内容较多,显示3秒后翻页')
time.sleep(3)
OS = platform.system()
if (OS == 'Windows'):
os.system('cls')
else:
os.system('clear')
def linkBaidu():
url = 'http://www.baidu.com'
try:
response = urllib.request.urlopen(url,timeout=3)
except urllib.error.URLError:
print("网络地址错误")
exit()
with open('./baidu.txt','w') as fp:
fp.write(response.read())
print(("获取url信息,response.geturl() \n: %s" %response.geturl()))
print(("获取返回代码,response.getcode() \n: %s" %response.getcode()))
print(("获取返回信息,response.info() \n: %s" %response.info()))
print("获取的网页内容已存入当前目录的baidu.txt中,请自行查看")
if __name__ == '__main__':
linkBaidu()
| true
|
1bffd9c1c7d06144dec9a950321a41df272af715
|
Python
|
antiHiXY/OOP_and_Num_Methods
|
/Third_Task/main.py
|
UTF-8
| 1,631
| 3.15625
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
import sympy as smp
from scipy import integrate
import num_integr as ni
figure_size_const = (13, 6.7)
a = 0
x0 = 2
def make_plot (name, steps, errors):
plt.figure (figsize = figure_size_const)
plt.title (name)
plt.loglog (steps, errors, '-o', linewidth = 2, markersize = 4)
plt.grid (True)
plt.xlabel ('Number of Blocks')
plt.ylabel ('Error')
plt.show ()
x_sym = smp.Symbol ('x')
list_of_symbolic_functions = [5 / (2 + 3 * x_sym * x_sym), 2 / (5 + smp.cos (x_sym)),
(3 + 4 * x_sym * x_sym)**1/3, smp.exp (-x_sym * x_sym) * 2 / (np.pi**(1/2)),
smp.log (x_sym + 2) / (x_sym + 2)]
functions = [smp.lambdify(x_sym, f) for f in list_of_symbolic_functions]
list_of_analytical_integrals = [smp.lambdify (x_sym, smp.integrate (f, x_sym))(x0) for f in list_of_symbolic_functions]
number_of_blocks = [2**(n + 1) for n in range(15)]
list_of_function_names = ["5 / (2 + 3 * x^2)","2 / (5 + cos (x))", "(3 + 4 * x^2)^(1/3)",
"2 / sqrt (pi) * exp (-x^2)", "ln (x + 2) / (x + 2)"]
list_of_integrals = [ni.IntegrateRightRect, ni.IntegrateLeftRect, ni.IntegrateMidRect, ni.IntegrateTrapez, ni.IntegrateSimpson]
for i, function in enumerate (functions):
for Constr in list_of_integrals:
list_of_errors = list ()
for blocks in number_of_blocks:
numerical_integral = Constr (a = a, f = function, num_blocks = blocks)
list_of_errors.append (abs (numerical_integral (x0) - list_of_analytical_integrals[i]))
make_plot (list_of_function_names[i], number_of_blocks, list_of_errors)
| true
|
280491a08ce14be7d4d92753511ddc862c669fe4
|
Python
|
KerinPithawala/Visualizing
|
/main2.py
|
UTF-8
| 298
| 2.609375
| 3
|
[] |
no_license
|
import pandas as pd
import matplotlib.pyplot as mp
df = pd.read_csv('sobar-72.csv')
#df.plot('behavior_eating','attitude_consistency','attitude_spontaneity')
#scatter-plot for behaviour eating with cervix cancer
df.plot(kind = 'scatter', x = 'attitude_spontaneity', y = 'ca_cervix')
mp.show()
| true
|
31a0b363bba5bad91f1d55078a25caf128148bb5
|
Python
|
missionpinball/mpf
|
/mpf/core/bcp/bcp_client.py
|
UTF-8
| 1,085
| 2.625
| 3
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
"""Base class for all bcp clients."""
import abc
from mpf.core.mpf_controller import MpfController
class BaseBcpClient(MpfController, metaclass=abc.ABCMeta):
"""Base class for bcp clients."""
__slots__ = ["name", "bcp", "exit_on_close"]
def __init__(self, machine, name, bcp):
"""Initialise client."""
super().__init__(machine)
self.name = name
self.bcp = bcp
self.exit_on_close = False
async def connect(self, config):
"""Actively connect client."""
raise NotImplementedError("implement")
async def read_message(self):
"""Read one message from client."""
raise NotImplementedError("implement")
def accept_connection(self, receiver, sender):
"""Handle incoming connection from remote client."""
raise NotImplementedError("implement")
def send(self, bcp_command, kwargs):
"""Send data to client."""
raise NotImplementedError("implement")
def stop(self):
"""Stop client connection."""
raise NotImplementedError("implement")
| true
|
c13b1cb7908e6a2250ff2e031e708d57d1d40aab
|
Python
|
mirittw/WorldOfGames
|
/WorldOfGames/MainScores.py
|
UTF-8
| 1,071
| 2.640625
| 3
|
[] |
no_license
|
from flask import Flask
import Utils
app = Flask(__name__)
@app.route('/scores', methods=['GET', 'POST', 'DELETE'])
def score_server():
try:
scores = open(Utils.SCORES_FILE_NAME, "r")
fullscore = 0
for line in scores.readlines():
if line != "" and line != "\n":
fullscore += int(line)
return ('<html>' + '\n' +
'<head>' + '\n' +
'<title>Scores Game</title>' + '\n' +
'</head>' + '\n' +
'<body>' + '\n' +
f'<h1>The score is <div id="score">{fullscore}</div></h1>' + '\n' +
'</body>' + '\n' +
'</html>')
except BaseException as e:
return ('<html>' + '\n' +
'<head>' + '\n' +
'<title>Scores Game</title>' + '\n' +
'</head>' + '\n' +
'<body>' + '\n' +
f'<h1><div id="score" style="color:red">{e.args}</div></h1>' + '\n' +
'</body>' + '\n' +
'</html>')
app.run(host="0.0.0.0", port=5001, debug=True)
| true
|
7b88b365878ba33c5397e4011304926d4b5ab212
|
Python
|
lazarow/decisiontrees-algs-test
|
/models/breast-cancer.CHAID.py
|
UTF-8
| 7,696
| 2.546875
| 3
|
[] |
no_license
|
def findDecision(obj): #obj[0]: 0, obj[1]: 1, obj[2]: 2, obj[3]: 3, obj[4]: 4, obj[5]: 5, obj[6]: 6, obj[7]: 7, obj[8]: 8
# {"feature": "3", "instances": 114, "metric_value": 23.7104, "depth": 1}
if obj[3] == '30-34':
# {"feature": "4", "instances": 26, "metric_value": 8.8135, "depth": 2}
if obj[4] == '0-2':
# {"feature": "1", "instances": 16, "metric_value": 8.6232, "depth": 3}
if obj[1] == '50-59':
return 'no'
elif obj[1] == '40-49':
# {"feature": "8", "instances": 5, "metric_value": 4.2426, "depth": 4}
if obj[8] == 'right_up':
# {"feature": "6", "instances": 2, "metric_value": 2.8284, "depth": 5}
if obj[6]>2:
return 'no'
elif obj[6]<=2:
return 'yes'
else:
return 'yes'
elif obj[8] == 'left_low':
return 'yes'
elif obj[8] == 'right_low':
return 'no'
elif obj[8] == 'left_up':
return 'yes'
else:
return 'yes'
elif obj[1] == '60-69':
return 'no'
elif obj[1] == '30-39':
return 'no'
else:
return 'no'
elif obj[4] == '6-8':
# {"feature": "8", "instances": 4, "metric_value": 2.8284, "depth": 3}
if obj[8] == 'right_low':
# {"feature": "6", "instances": 2, "metric_value": 2.8284, "depth": 4}
if obj[6]<=2:
return 'yes'
elif obj[6]>2:
return 'no'
else:
return 'no'
elif obj[8] == 'right_up':
return 'no'
elif obj[8] == 'left_low':
return 'no'
else:
return 'no'
elif obj[4] == '9-11':
return 'yes'
elif obj[4] == '3-5':
# {"feature": "1", "instances": 2, "metric_value": 2.8284, "depth": 3}
if obj[1] == '50-59':
return 'no'
elif obj[1] == '60-69':
return 'yes'
else:
return 'yes'
elif obj[4] == '15-17':
return 'no'
else:
return 'no'
elif obj[3] == '25-29':
# {"feature": "4", "instances": 20, "metric_value": 10.2593, "depth": 2}
if obj[4] == '0-2':
# {"feature": "8", "instances": 15, "metric_value": 8.1801, "depth": 3}
if obj[8] == 'left_low':
# {"feature": "0", "instances": 7, "metric_value": 3.7236, "depth": 4}
if obj[0] == 'no-recurrence-events':
# {"feature": "1", "instances": 6, "metric_value": 4.0, "depth": 5}
if obj[1] == '40-49':
# {"feature": "2", "instances": 2, "metric_value": 2.8284, "depth": 6}
if obj[2] == 'premeno':
return 'yes'
elif obj[2] == 'ge40':
return 'no'
else:
return 'no'
elif obj[1] == '50-59':
return 'no'
elif obj[1] == '60-69':
return 'no'
else:
return 'no'
elif obj[0] == 'recurrence-events':
return 'yes'
else:
return 'yes'
elif obj[8] == 'left_up':
return 'no'
elif obj[8] == 'right_up':
return 'no'
elif obj[8] == 'right_low':
return 'no'
else:
return 'no'
elif obj[4] == '3-5':
return 'yes'
elif obj[4] == '6-8':
return 'yes'
elif obj[4] == '15-17':
return 'no'
elif obj[4] == '9-11':
return 'no'
else:
return 'no'
elif obj[3] == '20-24':
# {"feature": "8", "instances": 16, "metric_value": 6.6563, "depth": 2}
if obj[8] == 'left_up':
# {"feature": "1", "instances": 6, "metric_value": 6.6921, "depth": 3}
if obj[1] == '50-59':
return 'no'
elif obj[1] == '40-49':
return 'no'
elif obj[1] == '30-39':
return 'yes'
elif obj[1] == '60-69':
return 'no'
else:
return 'no'
elif obj[8] == 'left_low':
# {"feature": "1", "instances": 5, "metric_value": 5.4142, "depth": 3}
if obj[1] == '50-59':
return 'no'
elif obj[1] == '40-49':
return 'no'
elif obj[1] == '60-69':
return 'yes'
else:
return 'yes'
elif obj[8] == 'central':
return 'no'
elif obj[8] == 'right_up':
# {"feature": "0", "instances": 2, "metric_value": 2.8284, "depth": 3}
if obj[0] == 'recurrence-events':
return 'yes'
elif obj[0] == 'no-recurrence-events':
return 'no'
else:
return 'no'
else:
return 'yes'
elif obj[3] == '15-19':
# {"feature": "5", "instances": 12, "metric_value": 5.5777, "depth": 2}
if obj[5] == 'no':
# {"feature": "1", "instances": 10, "metric_value": 7.2779, "depth": 3}
if obj[1] == '60-69':
# {"feature": "6", "instances": 4, "metric_value": 3.8637, "depth": 4}
if obj[6]<=2:
return 'no'
elif obj[6]>2:
return 'yes'
else:
return 'yes'
elif obj[1] == '50-59':
return 'no'
elif obj[1] == '40-49':
return 'no'
elif obj[1] == '30-39':
return 'no'
else:
return 'no'
elif obj[5] == 'yes':
return 'yes'
else:
return 'yes'
elif obj[3] == '40-44':
# {"feature": "1", "instances": 11, "metric_value": 8.4734, "depth": 2}
if obj[1] == '50-59':
return 'no'
elif obj[1] == '40-49':
# {"feature": "4", "instances": 3, "metric_value": 4.2426, "depth": 3}
if obj[4] == '3-5':
return 'yes'
elif obj[4] == '15-17':
return 'yes'
elif obj[4] == '0-2':
return 'no'
else:
return 'no'
elif obj[1] == '30-39':
return 'no'
elif obj[1] == '60-69':
return 'yes'
elif obj[1] == '70-79':
return 'no'
else:
return 'no'
elif obj[3] == '10-14':
return 'no'
elif obj[3] == '35-39':
# {"feature": "4", "instances": 9, "metric_value": 5.501, "depth": 2}
if obj[4] == '0-2':
# {"feature": "5", "instances": 7, "metric_value": 4.8783, "depth": 3}
if obj[5] == 'no':
return 'no'
elif obj[5] == 'yes':
return 'yes'
else:
return 'yes'
elif obj[4] == '9-11':
return 'yes'
elif obj[4] == '6-8':
return 'no'
else:
return 'no'
elif obj[3] == '50-54':
# {"feature": "7", "instances": 6, "metric_value": 3.4142, "depth": 2}
if obj[7] == 'right':
# {"feature": "6", "instances": 4, "metric_value": 3.8637, "depth": 3}
if obj[6]<=2:
return 'yes'
elif obj[6]>2:
return 'no'
else:
return 'no'
elif obj[7] == 'left':
return 'no'
else:
return 'no'
elif obj[3] == '5-9':
# {"feature": "1", "instances": 2, "metric_value": 2.8284, "depth": 2}
if obj[1] == '40-49':
return 'yes'
elif obj[1] == '30-39':
return 'no'
else:
return 'no'
elif obj[3] == '45-49':
return 'no'
elif obj[3] == '0-4':
return 'no'
else:
return 'no'
| true
|
65d40584ffbab14cc3476803d678aafe83b6ef81
|
Python
|
decodyng/mlgroup5
|
/Kaggle/makeSubmission.py
|
UTF-8
| 998
| 3
| 3
|
[] |
no_license
|
__author__ = 'kensimonds'
def makeSubmission(clf, name):
# Input: A fitted classifier
# Output: None
# This function creates a Kaggle submission file by making predictions and storing the resulting csv file
# in the prescribed manner
testInputFile = open("../data/test.tsv") # Kaggle test set
testOutputFile = open("%s.csv" % name, "w+") # Kaggle test submission file
phraseIDs = []
inputPhrases = []
for line in testInputFile: # Extract reviews
tokenized = line.replace('\n','').split('\t')
phraseIDs.append(tokenized[0])
inputPhrases.append(tokenized[2])
inputPhrases = inputPhrases[1:] # remove header
phraseIDs = phraseIDs[1:]
predictions = clf.predict(inputPhrases) # Make predictions
i = 0
testOutputFile.write("PhraseId,Sentiment\n")
for i in range(len(inputPhrases)):
testOutputFile.write(phraseIDs[i] + "," + str(predictions[i]) + "\n")
testOutputFile.close()
testInputFile.close()
| true
|
a72f097e2a3fc29c0ceb5430e90e4bfb29ff83f8
|
Python
|
Brewgarten/storm-bolt
|
/storm/bolt/configuration.py
|
UTF-8
| 6,094
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
"""
Copyright (c) IBM 2015-2017. All Rights Reserved.
Project name: storm-bolt
This project is licensed under the MIT License, see LICENSE
Serializable configuration functionality
Cluster DSL
-----------
We support the following cluster syntax that includes optional deployments.
.. code-block:: bash
cluster {
name: storm-cluster
nodes: 3
disks: [100,100,100]
}
deployments: [
ssh.AddAuthorizedKey: {
publicKeyPath: ~/.ssh/id_rsa.pub
}
software.UpdateKernel
]
"""
import logging
import os
import re
import time
from c4.utils.hjsonutil import HjsonSerializable
from c4.utils.jsonutil import JSONSerializable
from c4.utils.logutil import ClassLogger
from storm.thunder.configuration import (DeploymentInfos,
getTypedParameter)
log = logging.getLogger(__name__)
DEFAULT_CPUS = 2
DEFAULT_IMAGE_ID = "centos-7.2"
DEFAULT_NUMBER_OF_NODES = 3
DEFAULT_RAM = 2048
@ClassLogger
class ConfigurationInfo(HjsonSerializable, JSONSerializable):
"""
Container for cluster provisioning and deployment information
:param clusterInfo: cluster information
:type clusterInfo: :class:`~ClusterInfo`
:param deploymentInfos: deployments information
:type deploymentInfos: :class:`~DeploymentInfos`
"""
def __init__(self, clusterInfo=None, deploymentInfos=None):
self.cluster = clusterInfo if clusterInfo else ClusterInfo()
self.deploymentInfos = deploymentInfos if deploymentInfos else DeploymentInfos([])
@classmethod
def fromHjson(cls, hjsonString, objectHook=None):
"""
Load object from the specified Hjson string
:param cls: the class to deserialize into
:type cls: class
:param hjsonString: a Hjson string
:type hjsonString: str
:param objectHook: a function converting a Hjson dictionary
into a dictionary containing Python objects. If ``None``
then the default :py:meth:`fromHjsonSerializable` is used
:type objectHook: func
:returns: object instance of the respective class
"""
deploymentInfos = None
# check for deployment infos
deployments = re.search(r"^(?P<deployments>deployments\s*:.*)", hjsonString, re.MULTILINE | re.DOTALL)
if deployments:
deploymentInfos = DeploymentInfos.fromHjson(deployments.group("deployments"), objectHook=objectHook)
hjsonString = hjsonString.replace(deployments.group("deployments"), "")
clusterInfo = ClusterInfo.fromHjson(hjsonString, objectHook=objectHook)
return cls(clusterInfo, deploymentInfos=deploymentInfos)
class ClusterInfo(HjsonSerializable, JSONSerializable):
"""
Cluster information
:param name: cluster name
:type name: str
:param cpus: number of cpus per node
:type cpus: int
:param disks: list of disks capacities in GB
:type disks: list
:param imageId: id of the image to use for the nodes
:type imageId: str
:param nodes: list of node names
:type nodes: [str]
:param numberOfNodes: number of nodes
:type numberOfNodes: int
:param ram: ram in MB per node
:type ram: int
"""
def __init__(
self,
name=None,
cpus=DEFAULT_CPUS,
disks=None,
imageId=DEFAULT_IMAGE_ID,
locationId=None,
nodes=None,
numberOfNodes=DEFAULT_NUMBER_OF_NODES,
ram=DEFAULT_RAM
):
self.name = name if name else "{}-{}".format(os.getlogin(), int(time.time()))
self.cpus = cpus
# include the default OS disk in the size
self.disks = [100] + disks if disks else [100]
self.image = imageId
self.location = locationId
# if specified use node names otherwise generate them
if nodes:
self.nodes = nodes
self.numberOfNodes = len(self.nodes)
else:
self.nodes = [
"node{}".format(i+1)
for i in range(numberOfNodes)
]
self.numberOfNodes = numberOfNodes
self.ram = ram
@classmethod
def fromHjsonSerializable(cls, hjsonDict):
"""
Convert a dictionary from Hjson into a respective Python
objects. By default the dictionary is returned as is.
:param cls: the class to deserialize into
:type cls: class
:param hjsonDict: the Hjson dictionary
:type hjsonDict: dict
:returns: modified dictionary or Python objects
"""
if "cluster" in hjsonDict:
clusterInfoDict = hjsonDict.pop("cluster")
clusterParameters = {
"cpus": getTypedParameter(clusterInfoDict, "cpus", int, default=DEFAULT_CPUS),
"disks": getTypedParameter(clusterInfoDict, "disks", [int]),
"imageId": getTypedParameter(clusterInfoDict, "imageId", str, default=DEFAULT_IMAGE_ID),
"locationId": getTypedParameter(clusterInfoDict, "locationId", str),
"name": getTypedParameter(clusterInfoDict, "name", str, default="{}-{}".format(os.getlogin(), int(time.time()))),
"ram": getTypedParameter(clusterInfoDict, "ram", int, default=DEFAULT_RAM)
}
disks = clusterInfoDict.get("disks", None)
if disks:
clusterParameters["disks"] = getTypedParameter(clusterInfoDict, "disks", [int])
nodes = clusterInfoDict.get("nodes", None)
if nodes:
if isinstance(nodes, list):
clusterParameters["nodes"] = getTypedParameter(clusterInfoDict, "nodes", [str])
else:
clusterParameters["numberOfNodes"] = getTypedParameter(clusterInfoDict, "nodes", int)
for key, value in clusterInfoDict.items():
cls.log.warn("Key '%s' with value '%s' is not a valid cluster config parameter", key, value)
return cls(**clusterParameters)
return hjsonDict
| true
|
e2e7f4d78e2c59680935937521d1f695a542bc23
|
Python
|
vetscience/Tools
|
/Utils/fasta.py
|
UTF-8
| 2,720
| 3.09375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env python
'''
Oct 10, 2017: Pasi Korhonen, The University of Melbourne
A very basic FASTA file reader.
'''
from __future__ import print_function
import sys
from . import base
###############################################################################
class Fasta(base.Base):
'''
'''
###########################################################################
def __init__(self, fname = None):
'''
'''
self.fname = fname
self.headers = []
self.seqs = []
self.idxs = None # Gives an option to reorder the sequences
self.totalLen = 0 # Total length of all sequences
if fname != None:
self._read()
self._check()
self.idxs = [i for i in xrange(len(self.headers))]
###########################################################################
def cnt(self):
''' Returns the number of sequences
'''
return len(self.headers)
###########################################################################
def header(self, i):
'''
'''
return self.headers[self.idxs[i]]
###########################################################################
def seq(self, i):
'''
'''
return self.seqs[self.idxs[i]]
###########################################################################
def __repr__(self):
'''
'''
for i in xrange(len(self.headers)):
print(">%s" %self.header(i))
print(self.seq(i))
###########################################################################
def _read(self):
'''
'''
with open(self.fname) as handle:
cnt, seq, found = 0, "", False
for line in handle:
line = line.strip()
if len(line) > 0:
if line[0] == '>':
if found == True:
self.seqs.append(seq)
self.totalLen += len(seq)
self.headers.append(line[1::])
seq = ""
found = True
else:
seq += line
cnt += 1
if found == True:
self.seqs.append(seq)
self.totalLen += len(seq)
###########################################################################
def _check(self):
'''
'''
if len(self.headers) != len(self.seqs):
print("## WARNING! Fasta: header count (%d) != sequence count (%d)" %(len(self.headers), len(self.seqs)), file=sys.stderr)
| true
|
3faf7d50cef0b39de2921ef319563b4c676f7e68
|
Python
|
cs-ox-wolf3794/SentimentAnalysis
|
/filterJson.py
|
UTF-8
| 1,591
| 3.03125
| 3
|
[] |
no_license
|
import sys
import json
import difflib
# Input argument is the filename of the JSON ascii file from the Twitter API
filename = sys.argv[1]
filtered_file = sys.argv[2]
tweets_text = [] # We will store the text of every tweet in this list
tweets_location = [] # Location of every tweet (free text field - not always accurate or given)
tweets_timezone = [] # Timezone name of every tweet
# Loop over all lines
f = file(filename, "r")
lines = f.readlines()
fileData = open(filtered_file,'w')
for line in lines:
try:
tweet = json.loads(line)
# Ignore retweets!
if tweet.has_key("retweeted_status") or not tweet.has_key("text"):
continue
# Fetch text from tweet
text = tweet["text"].lower()
# Ignore 'manual' retweets, i.e. messages starting with RT
if text.find("rt ") > -1:
continue
#print text + '\n'
if str(text).__len__() > 10:
print str(text).__len__()
fileData.write(text +'\n')
#tweets_text.append( text )
#tweets_location.append( tweet['user']['location'] )
#tweets_timezone.append( tweet['user']['time_zone'] )
except ValueError:
pass
#fileData.write(tweets_text)
fileData.close()
# Show result
#print tweets_text.newline()
#print tweets_location
#print tweets_timezone
| true
|
c75b961737d6d875958f34368e59d092424b021c
|
Python
|
Haimchen/tu
|
/mpgi4/aufgabe01_src/tomograph.py
|
UTF-8
| 4,713
| 3.34375
| 3
|
[] |
no_license
|
import ellipse
import numpy as np
import matplotlib.pyplot as plt
import math
from numpy import dot
import GaussianElimination as gauss
"""
Sarah Koehler (sarah.c.koehler@gmail.com)
Dora Szuecs (szuucs.dora@gmail.com)
"""
def orthogonalVector(v):
"""
Calculates an vector u, that is orthogonal to a given vector v
inputs:
v :: 2-dimensional vector
outputs:
u :: orthognal vector to v
"""
u = np.array([-1.0 * v[1], v[0]])
return u
def rotateVector(v, phi):
"""
rotates a given vector v by phi degree
"""
rotationMatrix = np.array([[np.cos(phi), - np.sin(phi)], [np.sin(phi), np.cos(phi)]])
u = np.dot(rotationMatrix, v)
return u
def calculateStartPoints(shotIndex, nrays, nshots):
"""
Calculates the start points and the direction vector for all rays of one shot
inputs:
shotIndex :: the index of the shot
nrays :: number of rays per shots
nshots :: number of shots
outputs:
startPoints :: an array containing all startPoints of the rays in this shot (nshots x 2)
shotDirVector :: the direction vector of all rays in this shot
"""
# the angle between two shots
phi = np.deg2rad(180 / nshots)
# first value of direction vector
dirVector = np.array([-1, 0])
# distance between two rays
rayOffset = 2 / nrays
shotDirVector = rotateVector(dirVector, shotIndex * phi)
orthVector = orthogonalVector(shotDirVector)
dirHelpVector = np.multiply(shotDirVector, - np.sqrt(2))
startPoint = np.add(dirHelpVector, orthVector)
startPoints = np.ndarray((nrays, 2), float)
for k in range(0, nrays):
v = np.multiply(orthVector, - k * rayOffset)
start = np.add(startPoint, v)
startPoints[k,:] = start
return startPoints, shotDirVector
def sinogramm(nshots, nrays):
"""
Aufgabe 3 a)
Saves the trace values in a matrix
Inputs:
nshots :: number of shots
nrays :: number of rays per shot
Output:
sino :: trace values stored in a np.array
PNG file (use plt.imshow)
"""
sino = np.zeros((nshots, nrays))
# the angle between two shots
phi = np.deg2rad(180 / nshots)
# first value of direction vector
dirVector = np.array([-1, 0])
# distance between two rays
rayOffset = 2 / nrays
for i in range(0, nshots):
startArray, shotDirVector = calculateStartPoints(i, nrays, nshots)
for ind, val in enumerate(startArray):
intensity = ellipse.trace(shotDirVector, val)
sino[i, ind] = intensity
# visualize sino
plt.matshow(sino, cmap = plt.cm.gray)
plt.show()
#sino has to be returned for 3 c)
return sino
def equalMatrix(nshots, nrays, ngrid):
"""
Aufgabe 3 b)
Calculates the equality matrix (Ausgleichsmatrix)
Inputs:
nshots :: number of shots
nrays :: number of rays per shot
ngrid :: size of raster grid (ngrid*ngrid)
Output:
A :: Matrix containing the intersections (np.array)
"""
A = np.zeros((nshots*nrays, ngrid*ngrid))
# calculate starting points for each ray
for i in range(0, nshots):
startArray, shotDirVector = calculateStartPoints(i, nrays, nshots)
I, G, dt, px, py = ellipse.grid_intersect(ngrid, startArray, shotDirVector)
for k in range(0, I.size):
ray = I[k]
quadrant = G[k]
cutLength = dt[k]
ray2shot = i * nrays + ray
A[ray2shot, quadrant] = cutLength
return A
def solveTg(nshots, nrays, ngrid):
"""
Aufgabe 3 c)
evaluates the sinogramm using the matrix containing the intersect
values and creates the tomograph picture
Inputs:
nshots :: number of shots
nrays :: number of rays per shot
ngrid :: size of raster grid (ngrid*ngrid)
Output:
PNG file (use plt.imshow and plt.show)
"""
# compute intensities and vector b
sino = sinogramm(nshots, nrays)
b = np.reshape(sino, (-1, 1))
# compute A and input values for Gauss
A = equalMatrix(nshots, nrays, ngrid)
AT = np.transpose(A)
ATA = np.dot(AT, A)
ATb = np.dot(AT, b)
# use Gauss to solve equation
M, b = gauss.gaussianElimination(ATA, ATb, use_pivoting = True)
x = gauss.backSubstitution(M, b)
#generate picture of toft (use matplotlib as plt)
densities = np.array(x).reshape((ngrid, ngrid))
# visualize densities
plt.matshow(densities, cmap = plt.cm.gray)
plt.show()
# example for sinogram in 64 * 64
sinogramm(64, 64)
# example for sinogram in 128 * 128
sinogramm(128, 128)
# example for tomograph graphic with 64*64 Grid
solveTg(128, 128, 64)
| true
|
0e7fa08e310c295764d402886950d1fbe78f11f9
|
Python
|
kennethgoodman/lazynumpy
|
/lazynumpy/internals/evals.py
|
UTF-8
| 735
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
""" file to hold internal _Evals class """
from lazynumpy.util import optimal_eval
# pylint: disable=fixme
class _Evals():
""" Holds delayed evals """
def __init__(self, val):
if isinstance(val, list):
self.vals = val
else:
self.vals = [val]
def __mul__(self, other):
# TODO: should we use extend instead so it overwrites the value in memory instead of a copy?
return _Evals(self.vals + other.vals)
def __call__(self):
ordered_vals, _ = optimal_eval.get_cost(self.vals, backtrack=True)
return_val = optimal_eval.reduce_tree(ordered_vals, lambda x, y: x.dot(y))
self.vals = [return_val]
return return_val
# pylint: enable=fixme
| true
|
f3e64f3d48cbb31fd42de7fc2baf2460147fb372
|
Python
|
MiguelTeixeiraUFPB/PythonM2
|
/algoritmos/PythonM2/Cinema2.py
|
UTF-8
| 796
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
SIMPLES=17
COMPLETO=22
FILME2D=10.50
FILME3D=17.50
TipoFilme=str(input('digite o tipo de filme [2D] ou [3D] : ')).upper()
lanche=str(input('deseja comprar lanche? ')).upper()
if lanche=='SIM' and TipoFilme=='2D':
combo=input('seu combo é simples ou completo: ').upper()
if combo=='SIMPLES':
print('o valor a ser pago é {}'.format(SIMPLES+FILME2D))
elif combo=='COMPLETO':
print('o valor a ser pago é {}'.format(COMPLETO+FILME2D))
elif lanche=='SIM' and TipoFilme=='3D':
combo=input('seu combo é simples ou completo: ').upper()
if combo=='SIMPLES':
print('o valor a ser pago é {}'.format(SIMPLES+FILME3D))
elif combo=='COMPLETO':
print('o valor a ser pago é {}'.format(COMPLETO+FILME3D))
print('Bom filme Professora Ana Liz')
| true
|
d5dae998c756ca744ad7e92153e4da9fd9b212d9
|
Python
|
Neeraj-kaushik/Geeksforgeeks
|
/Array/Smallest_Distinct_Window.py
|
UTF-8
| 183
| 3.4375
| 3
|
[] |
no_license
|
def smallest_window(str):
li = []
for i in range(len(str)):
if str[i] not in li:
li.append(str[i])
print(len(li))
str = input()
smallest_window(str)
| true
|
95afc782b40b009feeccd2e5cc8976fb2a4c886c
|
Python
|
ejbeaty/scraper
|
/Craigslist_Scraper.py
|
UTF-8
| 10,545
| 2.53125
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup
from urllib2 import urlopen
import sys
import smtplib
import MySQLdb as mdb
import MySQLdb.cursors
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import configparser
import datetime
import os
import logging
def parse_results(search_term,min_price,max_price,must_have_image,url_prefix,title_only):
# Craigslist search URL
BASE_URL = ('http://{4}.craigslist.org/search/'
'sss?sort=rel&postedToday=1&minAsk={1}&maxAsk={2}&query={0}{3}{5}')
results = []
if must_have_image == 1:
must_have_image = '&hasPic=1'
else:
must_have_image = ''
if title_only ==1:
title_only='&srchType=T'
else:
title_only=''
search_term = search_term.strip().replace(' ', '+')
search_url = BASE_URL.format(search_term,min_price,max_price,must_have_image,url_prefix,title_only)
logger.info('search url: {0}'.format(search_url))
#logger.info('')
soup = BeautifulSoup(urlopen(search_url).read())
rows = soup.find('div', 'content').find_all('p', 'row')
for row in rows:
if row.a['href'].startswith('http'):
pass
elif row.a['href'].startswith('//'):
pass
else:
url = 'http://{0}.craigslist.org'.format(url_prefix) + row.a['href']
#logger.info('The URL I found on the page is: {0}'.format(url))
#price = row.find('span', class_='price').get_text()
price = ''
create_date = row.find('time').get('datetime')
title = row.find_all('a')[1].get_text()
title = title.encode("utf-8")
d = dict({'url': url, 'create_date': create_date,'title': title,'price': price})
results.append(d)
return results
def get_active_searches():
try:
con = mdb.connect(host=db_host, db=db_database, passwd=db_pass, user=db_user, port=db_port,charset='utf8', cursorclass=MySQLdb.cursors.DictCursor);
sql = "SELECT `id`,`description`,`keywords`,`price_max`,`price_min`,`must_have_image`,`title_only`,`email` FROM v_searches WHERE `status` = 'active' AND `is_verified`=1"
#sql = "SELECT `id`,`description`,`keywords`,`price_max`,`price_min`,`must_have_image`,`title_only`,`email` FROM v_searches WHERE `status` = 'active' AND `is_verified`=1 and `Id`=24"
with con:
cur = con.cursor()
cur.execute(sql)
active_searches = cur.fetchall()
except Exception as ex:
logger.error(str(ex))
return active_searches
def interate_through_searches(active_searches):
logger.info(' entering loop for each search')
new_records = []
for search in active_searches:
try:
search_id=search['id']
description=search['description']
keywords = search['keywords']
min_price=search['price_min']
max_price=search['price_max']
must_have_image=search['must_have_image']
title_only=search['title_only']
email =search['email']
except Exception as ex:
logger.info("Unable to initialize search variables")
sys.exit(1)
locations = get_search_locations(search_id)
#logger.info('locations:{0} '.format(locations))
for location in locations:
url_prefix = location['url_prefix']
#logger.info("Scanning craigslist for search_id: {0}".format(search_id))
print''
results = parse_results(keywords,min_price,max_price,must_have_image,url_prefix,title_only)
logger.info(" Getting results for search id: {0}, location: {1}".format(search_id,location['url_prefix']))
print''
new_records = get_new_records(results,search_id)
try:
if len(new_records) > 0:
#logger.info("Sending email")
send_email(email,description,new_records)
#logger.info('writing new records for search id: {0}'.format(search_id))
write_results(new_records,search_id)
logger.info(' records successfully saved')
else:
pass
except Exception as ex:
logger.error(str(ex))
logger.info(' {0} new records found for search id {1}'.format(len(new_records),search_id))
def get_search_locations(search_id):
con = mdb.connect(host=db_host, db=db_database, passwd=db_pass, user=db_user, port=db_port,charset='utf8', cursorclass=MySQLdb.cursors.DictCursor);
sql = "SELECT `url_prefix` FROM v_search_locations WHERE `search_id` ={0}".format(search_id)
#logger.info('location sql: {0}'.format(sql))
print''
with con:
cur = con.cursor()
cur.execute(sql)
search_locations = cur.fetchall()
return search_locations
#save new postings
def write_results(new_records,search_id):
#database connection
con = mdb.connect(host=db_host, db=db_database, passwd=db_pass, user=db_user, port=db_port,charset='utf8', cursorclass=MySQLdb.cursors.DictCursor);
sql = "INSERT INTO postings(`url`,`date_posted`,`title`,`price`,`search_id`)VALUES(%(url)s,%(create_date)s,%(title)s,%(price)s,{0})".format(search_id)
logger.info('Saving new postings for search_id {0}: {1}'.format(new_records,search_id))
with con:
cur = con.cursor()
cur.executemany(sql,new_records)
def get_new_records(results,search_id):
new_records =[]
#database connection
con = mdb.connect(host=db_host, db=db_database, passwd=db_pass, user=db_user, port=db_port,charset='utf8', cursorclass=MySQLdb.cursors.DictCursor);
sql = "SELECT `url` FROM postings where `search_id` = {0}".format(search_id)
with con:
cur=con.cursor()
cur.execute(sql);
seen_posts = cur.fetchall()
is_new = False
for post in results:
# do any of the already seen posts match this post?
if any(seen_post['url'] == post['url'] for seen_post in seen_posts):
#logger.info("this post exists {0} ".format(post))
pass
else:
#logger.info("NEW POST: {0}".format(post))
new_records.append(post)
#logger.info('')
return new_records
def send_email(email,description,new_records):
# Not actually sure how this gets used or if it does
me = "MuffinB0t"
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = "New results for {0}".format(description)
msg['From'] = me
msg['To'] = email
text = "Hi!Here is the list of new postings for: {0}".format(description)
html = """\
<html>
<head></head>
<body>
<p>Here is a list of new postings for:
"""
html=html+description+" <br><ul>"
logger.info(' building email')
for row in new_records:
try:
text = text+row['title']+": "+row['url']+""
html = html+"<br><li><a href='"+row['url']+"'>"+row['title']+"</a></li>"
except Exception as ex:
logger.error(str(ex))
html = html+"""\
</ul>
</p>
</body>
</html>
"""
logger.info(' done building email')
# Create the body of the message (a plain-text and an HTML version).
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# Send the message via local SMTP server.
server = smtplib.SMTP(email_server)
server.starttls()
logger.info(' logging in to smtp server')
server.login(email_email,email_pass)
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
logger.info(' sending email')
try:
server.sendmail(me, email, msg.as_string())
except Exception as ex:
logger.error(str(ex))
logger.info(' email successfully sent')
server.quit()
logger.info(' smtp server successfully quit')
if __name__ == '__main__':
start_time = time.time()
global logger
global local_path
global log_file
global config_file
local_path = os.path.dirname(os.path.abspath(__file__))
log_file = os.path.join(local_path, 'scraper.log')
config_file = os.path.join(local_path, 'config.ini')
logger = logging.getLogger('scraper')
logger.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# create file handler which logs even debug messages
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
try:
#get config settings
config = configparser.ConfigParser()
#database config
global db_host
global db_database
global db_user
global db_pass
global db_port
#smtp settings
global email_server
global email_email
global email_pass
config.read(config_file)
db_host = config.get('databaseInfo','host')
db_database = config.get('databaseInfo','database')
db_user = config.get('databaseInfo','user')
db_pass = config.get('databaseInfo','pass')
db_port = config.getint('databaseInfo','port')
email_server = config.get('smtpInfo','server')
email_email = config.get('smtpInfo','email')
email_pass = config.get('smtpInfo','pass')
except Exception as ex:
logger.info("Unable to get config values: {0}".format(ex))
sys.exit()
logger.info("||||||| Starting new search {0} |||||||".format(time.strftime("%c")))
logger.info(" getting active searches")
active_searches = get_active_searches()
#logger.info(active_searches)
interate_through_searches(active_searches)
end_time = time.time()
execute_duration = end_time - start_time
logger.info("Execution duration: {0} seconds\n\n".format(int(execute_duration)))
| true
|
d045f090fcc096fa961ac4a9039708dd0f45cc72
|
Python
|
santhoshramesh2919/CodeKata--Hunter
|
/SubStrIndex_48.py
|
UTF-8
| 191
| 3.5
| 4
|
[] |
no_license
|
def substr(s1,s2):
for i in range(len(s1)-1):
for j in range(i+1,len(s1)):
if s1[i:j+1]==s2:
return i
else:
return -1
a=input()
b=input()
print(substr(a,b))
| true
|
38feb1d0a92bf846928844bf1839ca7f1c2d055a
|
Python
|
nmichiels/adventofcode2020
|
/day10/day10.py
|
UTF-8
| 1,554
| 2.953125
| 3
|
[] |
no_license
|
from itertools import combinations
import numpy as np
data = np.loadtxt('input.txt', delimiter='\n', dtype=np.int64)
data = np.sort(data, axis=-1)
effective_jolts = 0
chain = []
differences = {1: 0, 2:0, 3:0}
for i, jolts in enumerate(data):
if jolts <= effective_jolts + 3:
differences[jolts-effective_jolts] += 1
chain.append(jolts)
effective_jolts = jolts
chain.append(jolts+3)
effective_jolts += 3
differences[3] += 1
print(chain)
print("Result part 1: ", differences[1] * differences[3])
prev_results = {}
def get_arrangements(chain, i, prev_jolt):
if i+1 < len(chain):
total_arrangements = 0
if "(%d,%d)"%(i+1, chain[i]) in prev_results:
total_arrangements += prev_results["(%d,%d)"%(i+1, chain[i])]
else:
arrangements = get_arrangements(chain, i+1, chain[i])
prev_results["(%d,%d)"%(i+1, chain[i])] = arrangements
total_arrangements += arrangements
if chain[i+1] <= prev_jolt + 3:
if "(%d,%d)"%(i+1, prev_jolt) in prev_results:
total_arrangements += prev_results["(%d,%d)"%(i+1, prev_jolt)]
else:
arrangements = get_arrangements(chain, i+1, prev_jolt)
prev_results["(%d,%d)"%(i+1, prev_jolt)] = arrangements
total_arrangements += arrangements
return total_arrangements
return 1
num_arrangements = get_arrangements(chain, 0, 0)
print("Result part 1: ",num_arrangements)
| true
|
7a53e24993445e7a855070ba6fb7af37af981e08
|
Python
|
s-light/OLA_test_pattern_generator
|
/pattern/gradient.py
|
UTF-8
| 12,566
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python2
# coding=utf-8
"""
gradient pattern.
generates a test pattern:
gradient
logic:
start at: _calculate_step()
calc (new) current position. (position means time)
calls: _calculate_pixels_for_position()
for every pixel:
calc pixel_position
call _calculate_current_pixel_channel_values()
find stops
interpolate between stops
separate 16bit to 2x8bit
set output array with repeating
history:
see git commits
todo:
~ all fine :-)
"""
import pattern
import array
import colorsys
##########################################
# globals
##########################################
# functions
##########################################
# classes
class Gradient(pattern.Pattern):
"""Gradient Pattern Class."""
def __init__(self, config, config_global):
"""Init pattern."""
self.config_defaults = {
"cycle_duration": 10,
"position_current": 0,
"type": "channel",
"stops": [
{
"position": 0,
"red": 1,
"green": 1,
"blue": 1,
},
{
"position": 0.3,
"red": 1,
"green": 0,
"blue": 0,
},
{
"position": 0.7,
"red": 0,
"green": 1,
"blue": 0,
},
{
"position": 1,
"red": 0,
"green": 0,
"blue": 1,
},
],
}
# python3 syntax
# super().__init__()
# python2 syntax
# super(Pattern, self).__init__()
# explicit call
pattern.Pattern.__init__(self, config, config_global)
def _interpolate_channels(self, pixel_position, stop_start, stop_end):
"""Interpolate with channels."""
# print("interpolate_channels")
result = {}
# check for exact match
if pixel_position == stop_start["position"]:
result = stop_start.copy()
else:
# interpolate all colors
for color_name in self.color_channels:
result[color_name] = pattern.map(
pixel_position,
stop_start["position"],
stop_end["position"],
stop_start[color_name],
stop_end[color_name],
)
result["position"] = pixel_position
return result
def _interpolate_hsv(self, pixel_position, stop_start, stop_end):
"""Interpolate with hsv."""
print("interpolate_hsv")
result = {}
# check for exact match
if pixel_position == stop_start["position"]:
print("exact")
red, green, blue = colorsys.hsv_to_rgb(
stop_start["hue"],
stop_start["saturation"],
stop_start["value"]
)
result["red"] = red
result["green"] = green
result["blue"] = blue
result["position"] = pixel_position
else:
# interpolate all colors
print("interpolate")
hsv_values = {}
for hsv_name in ["hue", "saturation", "value"]:
hsv_values[hsv_name] = pattern.map(
pixel_position,
stop_start["position"],
stop_end["position"],
stop_start[hsv_name],
stop_end[hsv_name],
)
# multiply with global brightness value:
global_value = pattern.map_16bit_to_01(self.values['high'])
hsv_values["value"] = hsv_values["value"] * global_value
red, green, blue = colorsys.hsv_to_rgb(
hsv_values["hue"],
hsv_values["saturation"],
hsv_values["value"]
)
result["red"] = red
result["green"] = green
result["blue"] = blue
result["position"] = pixel_position
return result
def _calculate_current_pixel_channel_values(self, pixel_position):
"""Calculate current pixel values."""
# calculate value:
# input:
# current position
# list of way points
stops_list = self.stops_list
result = {}
# print("_calculate_current_pixel_channel_values:")
# print("pixel_position:", pixel_position)
# check bounds
if pixel_position <= stops_list[0]["position"]:
# print("min.")
result = stops_list[0].copy()
elif pixel_position >= stops_list[len(stops_list)-1]["position"]:
# print("max.")
result = stops_list[len(stops_list)-1].copy()
else:
# print("search:")
# we search for the correct stops
list_index = 1
while pixel_position > stops_list[list_index]["position"]:
list_index += 1
# now list_index contains the first stop
# where position is < pixel_position
# interpolate between stops:
stop_start = stops_list[list_index-1]
stop_end = stops_list[list_index]
result = self.interpolation_function(
pixel_position,
stop_start,
stop_end
)
return result
def _calculate_repeat_pixel_index(self, pixel_index, repeate_index):
pixel_offset = (
self.pixel_count *
self.color_channels_count *
repeate_index
)
local_pixel_index = pixel_offset + (
pixel_index * self.color_channels_count
)
if self.repeat_snake:
# every odd index
if ((repeate_index % 2) > 0):
# total_pixel_channel_count = (
# self.pixel_count * self.color_channels_count
# )
# local_pixel_index = local_pixel_index
local_pixel_index = pixel_offset + (
((self.pixel_count - 1) - pixel_index) *
self.color_channels_count
)
# print("local_pixel_index", local_pixel_index)
return local_pixel_index
def _set_data_output_w_repeat(
self,
data_output,
pixel_index,
pixel_values_16bit
):
mode_16bit = self.mode_16bit
for repeate_index in xrange(0, self.repeat_count):
local_pixel_index = self._calculate_repeat_pixel_index(
pixel_index,
repeate_index
)
# set colors for pixel:
for pixel_values_index in xrange(self.color_channels_count):
color_offset = pixel_values_index
if mode_16bit:
color_offset = color_offset * 2
# print("color_offset", color_offset)
output_channel_index = local_pixel_index + color_offset
# write data
if mode_16bit:
data_output[output_channel_index + 0] = (
pixel_values_16bit[pixel_values_index][0]
)
data_output[output_channel_index + 1] = (
pixel_values_16bit[pixel_values_index][1]
)
else:
data_output[output_channel_index + 0] = (
pixel_values_16bit[pixel_values_index][0]
)
def _calculate_pixels_for_position(
self,
data_output,
position_current
):
pixel_count = self.pixel_count
color_channels = self.color_channels
for pixel_index in xrange(0, pixel_count):
# map gradient to pixel position
pixel_position_step = 1.0 * pixel_index / pixel_count
pixel_position = position_current + pixel_position_step
# check for wrap around
if pixel_position > 1.0:
pixel_position -= 1.0
# print("handle wrap around")
# print("pixel_position", pixel_position)
# calculate current values
pixel_values = self._calculate_current_pixel_channel_values(
pixel_position
)
# pixel_values_16bit = []
# # pre calculate 16bit values
# for color_name in color_channels:
# # calculate high and low byte
# value = pattern.calculate_16bit_parts(
# pattern.map_01_to_16bit(
# pixel_values[color_name]
# )
# )
# pixel_values_16bit.append(value)
# is list-comprehension faster?:
# https://docs.python.org/3/tutorial/datastructures.html#list-comprehensions
# pixel_values_16bit = [pattern.calculate_16bit_parts(
# pattern.map_01_to_16bit(
# pixel_values[color_name]
# )
# ) for color_name in color_channels]
# try without the function call
pixel_values_16bit = [pattern.calculate_16bit_parts(
int(65535 * pixel_values[color_name])
) for color_name in color_channels]
# print(debug_string)
# print("0:", data_output)
self._set_data_output_w_repeat(
data_output,
pixel_index,
pixel_values_16bit
)
# print("1:", data_output)
def _calculate_step(self, universe):
"""Calculate single step."""
# pattern.Pattern._calculate_step(self)
# available attributes:
# global things (readonly)
# self.channel_count
# self.pixel_count
# self.repeat_count
# self.repeat_snake
# self.color_channels
# self.update_interval
# self.mode_16bit
# self.values['off']
# self.values['low']
# self.values['high']
# self.config_global[]
self.update_config()
# pattern specific updates:
interpolation_type = self.config['type']
if interpolation_type.startswith("hsv"):
self.interpolation_function = self._interpolate_hsv
elif interpolation_type.startswith("channels"):
self.interpolation_function = self._interpolate_channels
else:
self.interpolation_function = self._interpolate_channels
self.stops_list = self.config["stops"]
# prepare temp array
data_output = array.array('B')
data_output.append(0)
# multiply so we have a array with total_channel_count zeros in it:
# this is much faster than a for loop!
data_output *= self.total_channel_count
# fill array with meaningfull data according to the pattern :-)
position_current = self.config["position_current"]
# in milliseconds
cycle_duration = self.config["cycle_duration"] * 1000
# calculate stepsize
# step_count = cycle_duration / update_interval
# cycle_duration = 1.0
# update_interval = position_stepsize
position_stepsize = 1.0 * self.update_interval / cycle_duration
# calculate new position
position_current = position_current + position_stepsize
# check for upper bound
if position_current >= 1:
position_current -= 1
# write position_current back:
self.config["position_current"] = position_current
# print("position_current", position_current)
# print("****")
# generate values for every pixel
self._calculate_pixels_for_position(
data_output,
position_current
)
return data_output
##########################################
if __name__ == '__main__':
print(42 * '*')
print('Python Version: ' + sys.version)
print(42 * '*')
print(__doc__)
print(42 * '*')
print("This Module has no stand alone functionality.")
print(42 * '*')
##########################################
| true
|
336a1ebe8cf0b70a74a26e2c65145e1b4afd70c0
|
Python
|
danmenza/hackathon-dashboard
|
/parsePrices.py
|
UTF-8
| 1,113
| 2.640625
| 3
|
[] |
no_license
|
import os
import json
import datetime as dt
import pandas as pd
with open('clientList.json') as f:
client_list = json.load(f)
ticker_map = {v['ticker']:k for k, v in client_list.items()}
ticker_map
def dt_compile(d, t):
return dt.datetime.strptime("{} {}".format(d, t), '%m/%d/%Y %I:%M %p')
class DataFile:
data_dir = 'data'
fq_path = os.path.join(os.path.curdir, data_dir)
def __init__(self, file):
self.file = file
self.fq_file = os.path.join(DataFile.fq_path, file)
@property
def symbol(self):
return self.file[:-4]
def __repr__(self):
return self.symbol
@property
def fullname(self):
return ticker_map[self.symbol]
@property
def df(self):
df = pd.read_csv(self.fq_file)
df['datetime'] = df.apply(lambda x: dt_compile(x['Date'], x['Time']), axis=1)
return df
@staticmethod
def collect():
data = [DataFile(file) for file in os.listdir(DataFile.fq_path)]
data_dict = {d.symbol: d for d in data}
return data_dict
| true
|
7d94ff3cb76a7af29af3f8339d5035f9f0b67c2b
|
Python
|
LubergAlexander/pywal-homeassistant
|
/pywal-homeassistant.py
|
UTF-8
| 1,303
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import json
import requests
BASE_URL = "http://snowflake:8123"
TOKEN = "" # created a long lived token in HA
WAL_CACHE_FILE = "/home/alex/.cache/wal/colors.json"
def hex_to_rgb(value):
value = value.lstrip("#")
lv = len(value)
return tuple(int(value[i : i + lv // 3], 16) for i in range(0, lv, lv // 3))
def call_service(domain, service, data):
return requests.post(
"{base_url}/api/services/{domain}/{service}".format(
base_url=BASE_URL, service=service, domain=domain
),
json=data,
headers={"Authorization": "Bearer {token}".format(token=TOKEN)},
)
def set_light_color(entity_id, color, brightness_pct=30):
return call_service(
"light",
"turn_on",
{"entity_id": entity_id, "brightness_pct": brightness_pct, "rgb_color": color},
)
def main():
with open(WAL_CACHE_FILE, "r") as theme_file:
theme = json.loads(theme_file.read())
primary = hex_to_rgb(theme["colors"]["color1"])
secondary = hex_to_rgb(theme["colors"]["color3"])
third = hex_to_rgb(theme["colors"]["color5"])
set_light_color("light.office_desk", primary, brightness_pct=40)
set_light_color("light.nanoleaf", secondary, brightness_pct=70)
if __name__ == "__main__":
main()
| true
|
40ac69bc595c373be08b2923ba4648a6cc79b994
|
Python
|
venkatram64/python3_work
|
/day04/fib017.py
|
UTF-8
| 106
| 3.390625
| 3
|
[] |
no_license
|
a, b = 0, 1 #means a = 0, b = 0
while a < 10:
print(a)
a, b = b, a + b #means a = b, b = a + b
| true
|
631276caf10dc23be6c969ce9d70285c912ea101
|
Python
|
knuu/competitive-programming
|
/atcoder/corp/tenka1_2018_d.py
|
UTF-8
| 414
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
N = int(input())
yes_set = []
k = 1
while k * (k + 1) < 2 * N:
k += 1
if k * (k + 1) != 2 * N:
print("No")
quit()
ans = []
x = 1
for i in range(k + 1):
row = []
for j in range(i):
row.append(ans[j][i - 1])
for j in range(k - i):
row.append(x)
x += 1
ans.append(row)
print("Yes")
print(k + 1)
for row in ans:
print("{} {}".format(k, ' '.join(map(str, row))))
| true
|
b586b86deb93a392c4a7c518048b86c2604f5e6d
|
Python
|
evanthebouncy/class_and_style
|
/steelbox/subset_selection/other_selections.py
|
UTF-8
| 1,273
| 2.640625
| 3
|
[] |
no_license
|
import numpy as np
import random
import time
import math
from copy import deepcopy
from sklearn.cluster import KMeans
from .knn import score_subset, update_weight
# ----------------- cluster in ( raw_input / embedded class ) space ----------------
def sub_select_cluster(X, Y, n_samples):
kmeans = KMeans(n_clusters=n_samples)
kmeans = kmeans.fit(X)
cluster_labels = list(kmeans.predict(X))
# print (cluster_labels[:100])
from sklearn.metrics import pairwise_distances_argmin_min
closest, _ = pairwise_distances_argmin_min(kmeans.cluster_centers_, X)
counts = [cluster_labels.count(i) for i in range(n_samples)]
X_sub = []
Y_sub = []
for i in range(n_samples):
X_sub.append(X[closest[i]])
Y_sub.append(Y[closest[i]])
return np.array(X_sub), np.array(Y_sub), closest
if __name__ == '__main__':
def test1():
from data_raw.artificial import gen_data
X, Y, X_t, Y_t = gen_data(2000)
W = np.ones(1000)
X_rsub, Y_rsub = X[:100, :], Y[:100]
X_sub, Y_sub, _ = sub_select_cluster(X, Y, 100)
print ("score of rand subset\n", score_subset(X_rsub, Y_rsub, X, Y, W))
print ("score of cluster subset\n", score_subset(X_sub, Y_sub, X, Y, W))
test1()
| true
|
a635aaf53a45999cbaabfd528bcebc736b22a294
|
Python
|
Rodrigodp/leetPy
|
/1281. Subtract the Product and Sum of Digits of an Integer.py
|
UTF-8
| 517
| 3.3125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 19:10:39 2020
@author: Rodrigo
"""
import unittest
class Solution:
def subtractProductAndSum(self, n: int) -> int:
ns = str(n)
pr = 1
sm = 0
for d in ns:
pr = pr * int(d)
sm = sm + int(d)
sub = pr - sm
return(sub)
class Teste(unittest.TestCase):
def test(self):
self.assertEqual(Solution.subtractProductAndSum(self,4421),21)
if __name__ == '__main__':
unittest.main()
| true
|
d5649950e187c86ef11e965682040a4c85a3d14b
|
Python
|
Quantumgame/dronestorm
|
/runtime_modules/estimate/run_estimate_kalman.py
|
UTF-8
| 1,680
| 2.75
| 3
|
[] |
no_license
|
"""Estimate state from sensor measurements during runtime
run from terminal with
`python run_estimate_kalman.py`
"""
from __future__ import print_function
from dronestorm.comm.redis_util import DBRedis
import dronestorm.comm.redis_util as redis_util
from dronestorm.comm.redis_util import (
REDIS_IMU_CHANNEL, REDIS_SONAR_CHANNEL, REDIS_GPS)
from dronestorm.estimate.kalman import EKF
def run_estimate_kalman():
"""Function to compute the control signals for attitude control
Reads IMU data from the redis database
Reads sonar data from the redis database
Reads gps data from the redis database
Writes state estimate data to the redis database
"""
R = np.eye(12)
estimator = EKF(R)
estimator.initialize(Zgps=0, Zimu=0, Zagl=0)
db_redis = DBRedis()
db_sub = db_redis.subscribe(
[REDIS_IMU_CHANNEL, REDIS_SONAR_CHANNEL, REDIS_GPS_CHANNEL])
try:
print("Running state estimate...Ctrl-c to stop")
while True:
# check for new receiver or attitude data
db_notice = db_sub.get_message(timeout=10)
if db_notice is not None:
rx_data = redis_util.get_rx(db_redis)
imu = redis_util.get_imu(db_redis)
agl = redis_util.get_sonar(db_redis)
gps = redis_util.get_gps(db_redis)
estimator.step()
state_est = [x, y, z, roll, pitch, yaw, dx, dy, dz, omega_x, omega_y, omega_z]
redis_util.set_cmd(db_redis, state_est)
except KeyboardInterrupt:
print("\nInterrupt received: stopping state estimation...")
if __name__ == "__main__":
run_estimate_kalman()
| true
|
190f5d14d824be3e384730d91d6b67e46ccc03aa
|
Python
|
sahanal-2603/Hacktoberfest-2021
|
/PYTHON/longest_bitonc_subarray.py
|
UTF-8
| 1,093
| 4
| 4
|
[
"MIT"
] |
permissive
|
# Function to find the length of the longest bitonic subarray in a list
def findBitonicSublist(A):
if len(A) == 0:
return
# `I[i]` store the length of the longest increasing sublist,
# ending at `A[i]`
I = [1] * len(A)
for i in range(1, len(A)):
if A[i - 1] < A[i]:
I[i] = I[i - 1] + 1
# `D[i]` store the length of the longest decreasing sublist,
# starting with `A[i]`
D = [1] * len(A)
for i in reversed(range(len(A) - 1)):
if A[i] > A[i + 1]:
D[i] = D[i + 1] + 1
# consider each element as a peak and calculate LBS
lbs_len = 1
beg = end = 0
for i in range(len(A)):
if lbs_len < I[i] + D[i] - 1:
lbs_len = (I[i] + D[i] - 1)
beg = i - I[i] + 1
end = i + D[i] - 1
# print the longest bitonic subarray
print("The length of the longest bitonic subarray is", lbs_len)
print("The longest bitonic subarray is", A[beg:end+1])
if __name__ == '__main__':
A = [3, 5, 8, 4, 5, 9, 10, 8, 5, 3, 4]
findBitonicSublist(A)
| true
|
8e360c4843b2b2264e105b6188e2628b27ac2eb4
|
Python
|
kaitlinfrani/Python_Labs
|
/lab-08-kaitlinfrani-main/test.py
|
UTF-8
| 3,460
| 4.3125
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Kaitlin Frani
CPSC 223P-01
Wed Apr 7, 2021
kaitlinfrani@fullerton.edu
"""
import random
lowercase = "abcdefghijklmnopqrstuvwxyz"
# Hangman class
class Hangman:
def __init__(self, word, triesAllowed):
self.word = word
self.triesAllowed = triesAllowed
self.show_word = []
self.letters_used = ""
for index in range(len(self.word)):
self.show_word += "-"
def Guess(self, letter):
"""Pass in a letter (length = 1) and check to see if it is in the word.
If it is, replace blanks in display word with letter and return True
If not, decrement the number of tries left and return False
"""
letterFound = False
if len(letter) != 1:
print("Please enter one leter")
return False
if letter not in lowercase:
print("Enter a letter you havent used")
return False
else:
for index in range(len(self.word)):
if letter in self.word[index]:
self.show_word[index] = letter
letterFound = True
self.letters_used += letter
if not letterFound:
self.triesAllowed -= 1
return False
else:
return True
def GetNumTriesLeft(self):
"""Return the number of tries left"""
return self.triesAllowed
def GetDisplayWord(self):
"""Return the display word (with dashes where letters have not been guessed)
i.e. the word happy with only the letter 'p' guessed so far would be '--pp-'"""
string_word = ''.join(self.show_word)
return string_word
def GetLettersUsed(self):
"""Return a string with the list of letters that have been used"""
#self.letters_used += letter
return self.letters_used
def GetGameResult(self):
"""Return True if all letters have been guessed. False otherwise"""
if self.GetNumTriesLeft() == 0:
print("You lost.")
return True
if self.word == self.GetDisplayWord():
print("You won.")
return True
else:
return False
def DrawGallows(self):
"""Optional: Return string representing state of gallows"""
pass
# implement the logic of your game below
if __name__=="__main__":
# Read all the words from the hangman_words.txt file
wordFile = open("hangman_words.txt", "r")
wordFileText = wordFile.read()
wordFile.close()
# Seed the random number generator with current system time
random.seed()
# Convert the string of words in wordFile to a list,
# then get a random word using
# randomIndex = random.randint(min, max)
wordList = wordFileText.split()
randomIndex = random.randint(0, 1)
# Instantiate a game using the Hangman class
game = Hangman(wordList[randomIndex], 9)
# Use a while loop to play the game
while game.GetGameResult() is False:
print(game.GetNumTriesLeft())
print("Please guess a letter.")
print(game.GetDisplayWord())
letter = input()
game.Guess(letter)
lowercase = lowercase.replace(letter,"")
print("")
print("The word was " + game.word)
| true
|
a2c11309be944e310f8905f767117c0c44289a45
|
Python
|
sinister6000/cs599_project
|
/src/prepdata/myCorpus.py
|
UTF-8
| 3,617
| 2.703125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
__metaclass__ = type
import codecs
import os
import types
import prepdata.twokenize as twokenize
from gensim import corpora, utils
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
# logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
class MyCorpus(object):
"""
Class that creates a gensim-compatible corpus (i.e., an iterable that yields Bag-of-Words versions of documents,
one at a time). All linguistic pre-processing needs to happen before transforming a document into a BOW.
"""
def __init__(self, *args, **kwargs):
if len(args) == 1:
foundOneArg = True
theOnlyArg = args[0]
else:
foundOneArg = False
theOnlyArg = None
if foundOneArg and isinstance(theOnlyArg, types.StringType):
self.initializeFromFile(theOnlyArg)
else:
self.initializeFromArgs(*args)
def initializeFromFile(self, fname):
super(MyCorpus, self).__init__(fname)
self.tokenizer = fname[16:-10]
def initializeFromArgs(self, *args):
self.top_dir = args[0]
self.tokenizer = args[1]
self.dictionary = corpora.Dictionary(self.iter_documents())
# remove tokens that appear in only one document
self.dictionary.filter_extremes(no_below=2, no_above=1.0, keep_n=None)
self.dictionary.compactify()
def __iter__(self):
for tokens in self.iter_documents():
yield self.dictionary.doc2bow(tokens)
def iter_documents(self):
"""
Helper function for MyCorpus.__iter__()
Iterate over all documents in top_directory, yielding a document (=list of utf8 tokens) at a time.
"""
ven_ids = sorted(os.listdir('../../data/ven'))
make_ven_index()
for doc_file in ven_ids:
with codecs.open('../../data/ven/' + doc_file, 'r', encoding='utf-8') as fin:
doc = fin.read()
tokens = tokenize(doc, self.tokenizer)
yield tokens
# END class MyCorpus
def make_ven_index():
"""
Create a file that links venue.ID with the offset in MmCorpus.docbyoffset.
:return: None
:rtype: None
"""
with codecs.open('../../data/ven_id2i.txt', 'w', encoding='utf-8') as ven_id2i:
for i, doc in enumerate(sorted(os.listdir('../../data/ven'))):
ven_id2i.write('{0}\t{1}\n'.format(doc[:-4], i))
def tokenize(s, tokenizer):
"""
Tokenizes a string. Returns a different list of tokens depending on which tokenizer is used.
:param s: string to be tokenized
:type s: str
:param tokenizer: identifies tokenizer to use
:type tokenizer: str
:return: list of tokens
:rtype: []
"""
tokens = (twokenize.tokenize(s)
if tokenizer is 'twokenize'
else (utils.tokenize(s, lower=True)
if tokenizer is 'gensim'
else (TweetTokenizer(preserve_case=False)).tokenize(s)))
# list of symbols that can end sentences. twokenize has found these to not be attached to another token.
# (safe to remove)
punct = r'.,!!!!????!:;'
# NLTK english stopwords
stoplist = stopwords.words('english')
result = [tok.lower() for tok in tokens if tok not in punct]
result = [tok for tok in result if tok not in stoplist]
return result
if __name__ == '__main__':
pass
| true
|
86b40094c697ab0e255080ba589fbfbbd7687ef5
|
Python
|
laetitia-teo/replique-v2
|
/blocks.py
|
UTF-8
| 7,275
| 2.90625
| 3
|
[] |
no_license
|
"""
This module defines the different residual blocks for composing our generative
and discriminative model.
"""
import torch
import torch.nn.functional as F
from torch.nn import ModuleList
import utils as ut
### Residual Blocks ###
class ResBlockG(torch.nn.Module):
"""
Base residual block for the generator.
"""
def __init__(self, in_ch, h, out_ch):
"""
Initializes the block.
The block is mainly composed of two 3x3 convolutions, with 1x1
convolutions at the beginning and end to change the numberts of
channels as appropriate.
h is the "hidden" internal number of channels, smaller than input or
output channels in general, to have less parameters.
"""
super(ResBlockG, self).__init__()
# number of channels to drop in skip connexion
self.ch_diff = out_ch - in_ch
self.conv1 = torch.nn.Conv2d(in_ch, h, 1)
self.conv2 = torch.nn.Conv2d(h, h, 3, padding=1)
self.conv3 = torch.nn.Conv2d(h, h, 3, padding=1)
self.conv4 = torch.nn.Conv2d(h, out_ch, 1)
self.batchnorm1 = torch.nn.BatchNorm2d(in_ch)
self.batchnorm2 = torch.nn.BatchNorm2d(h)
self.batchnorm3 = torch.nn.BatchNorm2d(h)
self.batchnorm4 = torch.nn.BatchNorm2d(h)
if self.ch_diff > 0:
self.cast = torch.nn.Conv2d(in_ch, self.ch_diff, 1)
def forward(self, f_map):
# convolutional path
# out = self.batchnorm1(f_map)
out = F.relu(f_map)
# out = F.tanh(out)
# change the number of channels
out = self.conv1(out)
out = self.batchnorm2(out)
out = F.relu(out)
# first regular convolution
out = self.conv2(out)
out = self.batchnorm3(out)
out = F.relu(out)
# second one
out = self.conv3(out)
out = self.batchnorm4(out)
out = F.relu(out)
# change, again, the number of channels
out = self.conv4(out)
# skip connexion
if self.ch_diff <= 0:
skip = f_map[:, :out.shape[1], ...]
else:
skip = torch.cat([f_map, self.cast(f_map)], 1)
out = out + skip
return out
class ResBlockD(torch.nn.Module):
"""
Residual block for the discriminator.
"""
def __init__(self, in_ch, h, out_ch, downsample=False):
super(ResBlockD, self).__init__()
self.downsample = downsample
self.ch_diff = out_ch - in_ch
self.conv1 = torch.nn.Conv2d(in_ch, h, 1)
self.conv2 = torch.nn.Conv2d(h, h, 3, padding=1)
self.conv3 = torch.nn.Conv2d(h, h, 3, padding=1)
self.conv4 = torch.nn.Conv2d(h, out_ch, 1)
if self.ch_diff > 0:
self.conv5 = torch.nn.Conv2d(in_ch, self.ch_diff, 1)
if self.downsample:
self.avgpool = torch.nn.AvgPool2d(2)
def forward(self, f_map):
"""
Forward pass.
"""
out = F.relu(f_map)
# change number of channels
out = self.conv1(out)
out = F.relu(out)
# regular convolutions
out = self.conv2(out)
out = F.relu(out)
out = self.conv3(out)
out = F.relu(out)
# average pooling
if self.downsample:
out = self.avgpool(out)
# change again number of convolutions
out = self.conv4(out)
# skip connexion
if self.downsample:
add = self.avgpool(f_map)
else:
add = f_map
if self.ch_diff > 0:
add2 = self.conv5(add)
add = torch.cat([add, add2], 1)
out = out + add
return out
### Misc ###
def mlp_fn(layer_list):
layers = []
def mlp(f_in, f_out):
f1 = f_in
for f in layer_list:
layers.append(torch.nn.Linear(f1, f))
layers.append(torch.nn.ReLU())
f1 = f
layers.append(torch.nn.Linear(f1, f_out))
return torch.nn.Sequential(*layers)
return mlp
class AggBlock(torch.nn.Module):
"""
Aggregation block.
No learnable parameters, we simply use the last feature of the feature map
as a weight to average all the stuff.
"""
def __init__(self):
super(AggBlock, self).__init__()
def forward(self, f_map):
# make sure all this works
# a_map = torch.sigmoid(f_map[:, -1, ...])
a_map = f_map[:, -1, ...]
denom = torch.sum(a_map, (2, 3))
f_map = torch.sum(f_map[:, :-1, ...] * a_map, (2, 3))
return f_map / denom
class AggBlockv2(torch.nn.Module):
"""
Second version of the aggregation block.
In this vesion we perform 1d conv on all vectors of the feature map to
predict an attention map used as weights in the mean.
We also use spatial (x, y) information as input to the aggregation
convolution.
"""
def __init__(self, in_ch):
super(AggBlockv2, self).__init__()
self.conv = torch.nn.Conv2d(in_ch + 2, 1, 1)
def forward(self, f_map):
n, c, h, w = f_map.shape
y, x = ut.make_yx(f_map, h, w, n)
f_map2 = torch.cat((f_map, x, y), 1)
a_map = torch.sigmoid(self.conv(f_map2))
denom = torch.sum(a_map, (2, 3))
f_map = torch.sum(f_map * a_map, (2, 3))
return f_map / denom
class Cut(torch.nn.Module):
"""
A class for discarding a part of the input image.
"""
def __init__(self, out_img_shape):
super(Cut, self).__init__()
self.outs = out_img_shape
def forward(self, f_map):
n, c, w, h = f_map.shape
return f_map[..., :(w - self.outs[0]), :(h - self.outs[1])]
### Full Nets ###
class GeneratorConv(torch.nn.Module):
"""
This class defines the conv net used in the generator.
The net is not a simple sequential stacking of the residual blocks : in
addition to this we also need to feed the image (and mask ?) information
at the beginning of each residual block (+4 dimensions).
"""
def __init__(self, feature_list):
"""
Takes as input the list of tuples defining the inputs to the residual
block constructor.
"""
super(GeneratorConv, self).__init__()
# for i, (in_ch, h, out_ch) in enumerate(feature_list):
# # if i == 0:
# # # TODO : change this
# # in_ch += 2
# self.__dict__['block' + str(i)] = ResBlockG(in_ch + 6, h, out_ch)
# use ModuleList
self.mlist = ModuleList(
[ResBlockG(in_ch + 6, h, out_ch) \
for in_ch, h, out_ch in feature_list])
# self.N = len(feature_list)
def forward(self, inpt, img, x, y):
"""
X and Y info are already in the input.
"""
out = inpt
# for i in range(self.N):
# # we concatenate, in the channel dim, image and mask info
# # print('block %s' % i)
# # ut.plot_tensor_image(out, float)
# out = self.__dict__['block' + str(i)](
# torch.cat((out, img, x, y), 1))
for i, block in enumerate(self.mlist):
print(i)
out = block(torch.cat((out, img, x, y), 1))
out = torch.tanh(out)
return out
| true
|
6dfe759fb7ee7cf01365b0f6321ae3244db3afb6
|
Python
|
sudeep0901/python
|
/numpy3.py
|
UTF-8
| 4,364
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#%%
import numpy as np
a = np.array([1, 2, 3])
print(a, type(a), type(np.ndarray), type(type))
angles = np.array([0, 30, 45, 60, 90])
print(angles)
print(np.sin(angles))
print(np.sin(angles))
# convert in radians
angles_radians = angles * np.pi/180
print(angles_radians)
print(np.sin(angles_radians))
print(np.radians(angles))
print(np.degrees(angles_radians))
print(np.mean(angles))
print(np.median(angles))
# statistics
salaries = np.genfromtxt('data/salary.csv')
print(salaries)
np.mean(salaries)
np.median(salaries)
np.std(salaries)
salaries.shape
np.var(salaries)
a = np.arange(11) ** 2
a[-2]
a[3:5]
a[:7]
a[:11:2]
a[::-1] # reverse an array
students = np.array([['Alice', 'Balice', 'Calice ', ' Dalice '], [54, 56, 232, 22], [34, 343, 11, 343]])
print(students)
#%%
students[0]
#%%
students[1]
#%%
students[2]
#%%
students[0:2, 2:4]
#%%
students[:,1:2]
#%%
students[:,1:3]
#%%
students[-1,:]
#%%
students[0,...]
#%%
for i in students:
print("i =:",i)
#%%
for element in students.flatten():
print(element)
#%%
for elem in students.flatten(order="F"):
print(elem)
#%%
x = np.arange(12).reshape(3,4)
x
#%%
for i in np.nditer(x):
print(i)
for i in np.nditer(x, order="F"):
print(i)
#%%
for arr in np.nditer(x, op_flags = ["readwrite"]):
arr[...] = arr * arr
x
#%%
a = np.array([['Germany', 'Frane', 'Hungary', "Austria"],
['Berlin', 'Paris','Budapest', 'Vienna']])
a
#%%
a.shape
#%%
a.ravel() #copy made if needed
#%%
a.T # transposed Matrix
#%%
a.T.ravel()
#%%
a.reshape(2,4)
#%%
np.arange(15).reshape(3,5)
#%%
np.arange(15).reshape(5,3)
#%%
a.reshape(-1, 2) # -1 special vlaue signified we do ont know rows
#%%
#splitting arrays
arr = np.arange(20)
arr
#%%
np.split(arr, [1, 7])
#%%
p1, p2 = np.hsplit(arr, 2)
#%%
print(p1, p2)
#%% [markdown]
# # Image Manipulation
#%%
from scipy import ndimage
from scipy import misc
#%%
f = misc.face()
f
#%%
f.shape
print(type(f))
#%%
import matplotlib.pyplot as plt
plt.imshow(f)
#%%
plt.imshow(f[100:450,500:1000])
#%%
s1, s2 = np.split(f, 2)
plt.imshow(s1)
#%%
plt.imshow(s2)
#%%
b1 , b2 = (np.split(f, 2,axis=1))
plt.imshow(b1)
#%%
plt.imshow(b2)
#%%
plt.imshow(np.concatenate((b1, b2)))
#%%
plt.imshow(np.concatenate((b1, b2), axis=1))
#%%
# shallow copy of arrays edit will refelect in origional array
fruits = np.array(['Apple', 'Mango', 'WaterMelon', 'Guava'])
f_1 = fruits.view()
f_2 = fruits.view()
print(f_1)
print(f_2)
#%%
id(f_1)
#%%
id(f_2)
#%%
f_1 is fruits
#%%
f_2[2]="Strawberry"
fruits
print(f_1)
#%%
f_1 = np.array(["Chikoo", "papaya"])
#%%
fruits
#%%
np.append(f_1, "New Fruit1")
np.append(f_2, "New Fruit2")
#%%
fruits
#%%
f_2.reshape(2,2)
#%%
# Deep copy
basket = fruits.copy()
#%%
#deep copied
basket
basket.base is fruits
#%% [markdown]
# # Complex Indexing Using Numpy
#%%
import csv
#%%
a = np.arange(12)**2
a
#%%
indx_1 = [2, 6, 8]
#%%
a[indx_1]
#%%
indx_2 = np.array([[2,4], [8,10]])
indx_2
#%%
a[indx_2] # resulting array is shape is index array
#%%
import pandas as pd
gdp_16 = pd.read_csv("data/gdp_pc.csv")["2016"].values
type(gdp_16)
#%%
gdp_16.shape
#%%
plt.plot(gdp_16)
plt.show()
#%%
np.median(gdp_16)
#%%
gdp_16
#%%
gdp_16 = gdp_16[~np.isnan(gdp_16)] #not nan
#%%
gdp_16
#%%
np.median(gdp_16)
#%%
np.sort(gdp_16)
#%%
np.count_nonzero(gdp_16[gdp_16 > 40000])
#%%
print(np.mean(gdp_16))
#%%
# Indexing withh Boolean arrays
#%%
a = np.arange(16).reshape(4,4)
a
#%%
indx_bool = a > 9
#%%
indx_bool
#%%
a[indx_bool]
#%%
a[indx_bool][0:1]
#%%
np.count_nonzero(a > 6)
#%%
np.sum(a < 6)
#%%
np.sum(a < 6, axis=1)
#%%
np.sum(a < 6, axis=0)
#%%
np.any(a > 8)
#%%
np.all(a < 10)
#%%
np.all(a < 100)
#%%
# indexing structure data
name = ["Alice", "Beth", "Cathy", "Dorothy"]
student_Id = [1,2, 3,4]
score = [85.4, 90.4, 87.66,78.9 ]
#%%
student_data = np.zeros(4, dtype = {'names':('name', 'studentId','score'),
'formats':('U10', 'i4', 'f8')})
#%%
student_data
#%%
student_data['name'] = name
student_data['studentId'] = student_Id
student_data['score'] = score
#%%
student_data
#%%
student_data[1]
#%%
student_data['name']
#%%
student_data[student_data['score'] > 90]
#%%
# Broadcasting Scalers
# Array multiplying with scaler
| true
|
9472dd00f66e53c67a28fb518174f796ebcd08df
|
Python
|
germanbrunini/Microseismic-Tools-Julia-Python
|
/gathers.py
|
UTF-8
| 9,998
| 2.671875
| 3
|
[] |
no_license
|
import os
from scipy.signal import butter, lfilter
from matplotlib import rc, font_manager
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.colors as colors
rc('text', usetex=True)
plt.rc('font', family='serif')
path = 'gathers/';
fdr_fig_ = "gathers/"
filenames = os.listdir(path) ; # list of all file names in folder in path
num_files = np.shape(filenames)[0]; # length of the list: number of files
num_gathers = int(num_files/3);
##########################################
##########################################
for i in range(num_gathers):
iname = str(i+1);
print(iname)
filenamex = path+'datax.txt';
filenamey = path+'datay.txt';
filenamez = path+'dataz.txt';
datax = np.loadtxt(filenamex) ; # extract x-data.
datay = np.loadtxt(filenamey) ; # extract y-data.
dataz = np.loadtxt(filenamez) ; # extract z-data.
[nsx,ntx] = np.shape(datax) ; # dimension of x-data.
print()
print("dimension of x-data")
print("number of samples : ", nsx);
print("number of traces : ", ntx);
[nsy,nty] = np.shape(datay) ; # dimension of y-data.
print()
print("dimension of y-data")
print("number of samples : ", nsy);
print("number of traces : ", nty);
[nsz,ntz] = np.shape(dataz) ; # dimension of z-data.
print()
print("dimension of z-data")
print("number of samples : ", nsz);
print("number of traces : ", ntz);
porc_norm = 0.99 # porcentage of normalize data.
maximx = np.amax(abs(datax)) ; # extract data maximum x normalization.
maximy = np.amax(abs(datay)) ; # extract data maximum x normalization.
maximz = np.amax(abs(dataz)) ; # extract data maximum x normalization.
max = np.amax([maximx,maximy,maximz]) ;
datax = datax/max ; # normalize
datay = datay/max ; # normalize
dataz = dataz/max ; # normalize
datax = datax*porc_norm ; # take a porcentage of normalize data.
datay = datay*porc_norm ; # take a porcentage of normalize data.
dataz = dataz*porc_norm ; # take a porcentage of normalize data.
ts = 0.0 ; # time begins in 0.0 ms
te = 300.0;#nsx ; # time ends at m ms
# Because data is normalize to 1 (or porc of 1)
# each trace occupies 2 vertical units (maximum).
# we need at least 2*nt vertical space in ylim
first_trace = 0 ; # first trace is positioned at zero.
last_trace = 2*(ntx-1); # (nt-1) : number of intervals between nt traces
# 2* : time 2, vertical space.
minY = (first_trace - 1) - 0.2 ;
maxY = (last_trace + 1) + 0.2 ;
lw = 1.5; # linewidth
ls = '-'; # linestyle
alp = 0.6; # alpha ransparency for line
xlab_s = 15; # x-label size
ylab_s = 15; # y-label size
tit_s = 15; # title size
tic_s = 12; # tics size
scale = 3.0; # scales data (1.0: 0 scaling). Warn: makes data bigger.
yticksnum = np.linspace(first_trace,last_trace,ntx); # y tick numbers
ytickslabels = np.tile(((np.linspace(1,ntx,ntx)).astype(np.int64)),1); # y tick labels
fig2,ax2 = plt.subplots(figsize=(6,8));
for i in range(ntx):
# x-plot
gath_X = scale*datax[:,i];
gath_Y = scale*datay[:,i];
gath_Z = scale*dataz[:,i];
plt.subplot(3,1,1)
plt.plot(gath_X + 2*i,
linestyle = ls,
linewidth = lw,
color = 'black',
alpha = alp)
left,right = plt.xlim()
plt.xlim(left = ts) # adjust the left leaving right unchanged
plt.xlim(right = te) # adjust the right leaving left unchanged
plt.tick_params(
axis = "y",
width = 1,
length = 2.5,
direction = "in",
color = "black",
pad = 2.5,
labelsize = tic_s,
labelcolor = "black",
colors = "black",
zorder = 20,
bottom = "on", top = "off", left = "on", right = "off",
labelbottom = "on", labeltop = "off", labelleft = "on", labelright = "off");
bottom, top = plt.ylim()
plt.ylim(bottom = minY)
plt.ylim(top = maxY)
plt.yticks(yticksnum,ytickslabels);
plt.tick_params(
axis = "x",
width = 1,
length = 2.5,
direction = "in",
color = "black",
pad = 2.5,
labelsize = tic_s,
labelcolor = "black",
colors = "black",
zorder = 20,
bottom = "on", top = "off", left = "on", right = "off",
labelbottom = "on", labeltop = "off", labelleft = "on", labelright = "off");
plt.ylabel(r"receiver",fontsize = ylab_s)
plt.title(r"x-component",fontsize = tit_s)
plt.subplot(3,1,2)
# y-plot
plt.plot(gath_Y + 2*i,
linestyle = ls,
linewidth = lw,
color = 'black',
alpha = alp)
left,right = plt.xlim()
plt.xlim(left = ts) # adjust the left leaving right unchanged
plt.xlim(right = te) # adjust the right leaving left unchanged
plt.tick_params(
axis = "y",
width = 1,
length = 2.5,
direction = "in",
color = "black",
pad = 2.5,
labelsize = tic_s,
labelcolor = "black",
colors = "black",
zorder = 20,
bottom = "on", top = "off", left = "on", right = "off",
labelbottom = "on", labeltop = "off", labelleft = "on", labelright = "off");
bottom, top = plt.ylim()
plt.ylim(bottom = minY)
plt.ylim(top = maxY)
plt.yticks(yticksnum,ytickslabels);
plt.tick_params(
axis = "x",
width = 1,
length = 2.5,
direction = "in",
color = "black",
pad = 2.5,
labelsize = tic_s,
labelcolor = "black",
colors = "black",
zorder = 20,
bottom = "on", top = "off", left = "on", right = "off",
labelbottom = "on", labeltop = "off", labelleft = "on", labelright = "off");
plt.ylabel(r"receiver",fontsize = ylab_s)
plt.title(r"y-component",fontsize = tit_s)
plt.subplot(3,1,3)
# z-plot
plt.plot(gath_Z + 2*i,
linestyle = ls,
linewidth = lw,
color = 'black',
alpha = alp)
left,right = plt.xlim()
plt.xlim(left = ts) # adjust the left leaving right unchanged
plt.xlim(right = te) # adjust the right leaving left unchanged
plt.tick_params(
axis = "y",
width = 1,
length = 2.5,
direction = "in",
color = "black",
pad = 2.5,
labelsize = tic_s,
labelcolor = "black",
colors = "black",
zorder = 20,
bottom = "on", top = "off", left = "on", right = "off",
labelbottom = "on", labeltop = "off", labelleft = "on", labelright = "off");
bottom, top = plt.ylim()
plt.ylim(bottom = minY)
plt.ylim(top = maxY)
plt.yticks(yticksnum,ytickslabels);
plt.tick_params(
axis = "x",
width = 1,
length = 2.5,
direction = "in",
color = "black",
pad = 2.5,
labelsize = tic_s,
labelcolor = "black",
colors = "black",
zorder = 20,
bottom = "on", top = "off", left = "on", right = "off",
labelbottom = "on", labeltop = "off", labelleft = "on", labelright = "off");
plt.xlabel(r"time (ms)",fontsize = xlab_s)
plt.ylabel(r"receiver",fontsize = ylab_s)
plt.title(r"z-component",fontsize = tit_s)
plt.tight_layout(w_pad=0.0,h_pad=0.2)
fname = fdr_fig_+'gather_xyz.png'
plt.savefig(fname, bbox_inches='tight',
dpi=300,
facecolor='w',
edgecolor='w',
orientation='portrait',
papertype=None,
transparent=False,
pad_inches=0.1,
frameon=None,
metadata=None)
# fname = fdr_fig_+'gather_xyz_'+iname+'.eps'
# plt.savefig(fname)
# plt.show()
plt.close(fig2)
| true
|
35de5599a8d4f843a9e0bbfc32a58cec545667b0
|
Python
|
HYUNMIN-HWANG/LinearAlgebra_Study
|
/LinearAlgebra_Function/5.05.DifferentialEquation.py
|
UTF-8
| 637
| 3.5625
| 4
|
[] |
no_license
|
# Differential Equation
# http://allman84.blogspot.com/2018/10/sympy-1.html
from sympy import *
x, y, z, t = symbols('x y z t')
f, g, h = symbols('f, g, h', cls=Function)
# 미분 방정식 해 구하기
y = symbols('y', cls=Function)
deq = Eq( y(t).diff(t), -2*y(t) )
result = dsolve( deq, y(t) )
print(result)
# Eq(y(t), C1*exp(-2*t))
# 연립미분방정식
y1, y2 = symbols('y1 y2', cls=Function)
eq1 = Eq( y1(t).diff(t), -0.02*y1(t)+0.02*y2(t) )
eq2 = Eq( y2(t).diff(t), 0.02*y1(t)-0.02*y2(t) )
result = dsolve( [ eq1, eq2 ] )
print(result)
# [Eq(y1(t), -1.0*C1*exp(-0.04*t) + 1.0*C2), Eq(y2(t), 1.0*C1*exp(-0.04*t) + 1.0*C2)]
| true
|
d47487dd5c9ebc640d3f5a49930887413d584934
|
Python
|
jpjuvo/HEnorm_python
|
/normalizeStaining.py
|
UTF-8
| 5,407
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
import os
import numpy as np
import cv2
def normalizeStaining(imgPath, saveDir='normalized/', unmixStains=False, Io=240, alpha=1, beta=0.15):
''' Normalize staining appearence of H&E stained images.
Produces a normalized copy of the input RGB image to the saveDir path.
If unmixStains=True, separate H and E images are also saved.
This is a modified version of the original https://github.com/schaugf/HEnorm_python
optimized for multiprocessing - June '19 Joni Juvonen
Speed optimization (~8x speed improvements) by Mikko - https://github.com/mjkvaak/HEnorm_python
Example use:
normalizeStaining('image.png', saveDir='normalized/')
Example use with multiprocessing:
with Pool(8) as p:
p.map(normalizeStaining, ImagePathList)
Input:
imgPath (string): Path to an RGB input image
saveDir (string): A directory path where the normalized image copies are saved. If this is None, the function returns the images (default='normalized/'))
unmixStains (bool): save also H and E stain images
Io (int): transmitted light intensity (default=240)
alpha (default=1)
beta (default=0.15)
Output (returns only if savePath=None):
Inorm: normalized image
H: hematoxylin image
E: eosin image
Reference:
A method for normalizing histology slides for quantitative analysis. M.
Macenko et al., ISBI 2009
'''
#extract name for the savefile
base=os.path.basename(imgPath)
name_wo_ext = os.path.splitext(base)[0]
fn = os.path.join(saveDir, name_wo_ext)
# create output directory if it doesn't exist
if not os.path.isdir(saveDir):
os.mkdir(saveDir)
# skip if this file already exists
if (os.path.isfile(fn+'.png')):
return
# read image with OpenCV (faster than PIL)
img = cv2.imread(imgPath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
HERef = np.array([[0.5626, 0.2159],
[0.7201, 0.8012],
[0.4062, 0.5581]])
maxCRef = np.array([1.9705, 1.0308])
# define height and width of image
h, w, c = img.shape
# reshape image
rimg = np.reshape(img.astype(np.float), (-1,3))
# calculate optical density
OD = -np.log((rimg+1)/Io)
# remove transparent pixels
ODhat = OD[~np.any(OD<beta, axis=1)]
#ODhat = np.array([i for i in OD if not any(i<beta)])
# compute eigenvectors and handle some of the common errors that are caused by image colors with unattainable eigenvectors
eigvals, eigvecs = None, None
try:
eigvals, eigvecs = np.linalg.eigh(np.cov(ODhat.T))
except AssertionError:
print('Failed to normalize {0}, copying this to output file unaltered.'.format(imgPath))
cv2.imwrite(fn+'.png', img)
return
except np.linalg.LinAlgError:
print('Eigenvalues did not converge in {0}, copying this to output file unaltered.'.format(imgPath))
cv2.imwrite(fn+'.png', img)
return
#project on the plane spanned by the eigenvectors corresponding to the two
# largest eigenvalues
That = ODhat.dot(eigvecs[:,1:3])
phi = np.arctan2(That[:,1],That[:,0])
minPhi = np.percentile(phi, alpha)
maxPhi = np.percentile(phi, 100-alpha)
vMin = eigvecs[:,1:3].dot(np.array([(np.cos(minPhi), np.sin(minPhi))]).T)
vMax = eigvecs[:,1:3].dot(np.array([(np.cos(maxPhi), np.sin(maxPhi))]).T)
# a heuristic to make the vector corresponding to hematoxylin first and the
# one corresponding to eosin second
if vMin[0] > vMax[0]:
HE = np.array((vMin[:,0], vMax[:,0])).T
else:
HE = np.array((vMax[:,0], vMin[:,0])).T
# rows correspond to channels (RGB), columns to OD values
Y = np.reshape(OD, (-1, 3)).T
# determine concentrations of the individual stains
C = np.linalg.lstsq(HE,Y, rcond=None)[0]
# normalize stain concentrations
maxC = np.array([np.percentile(C[0,:], 99), np.percentile(C[1,:],99)])
tmp = np.divide(maxC,maxCRef)
C2 = np.divide(C,tmp[:, np.newaxis])
#C2 = np.array([C[:,i]/maxC*maxCRef for i in range(C.shape[1])]).T
# recreate the image using reference mixing matrix
Inorm = np.multiply(Io, np.exp(-HERef.dot(C2)))
Inorm[Inorm>255] = 254
Inorm = np.reshape(Inorm.T, (h, w, 3)).astype(np.uint8)
# unmix hematoxylin and eosin
H = np.multiply(Io, np.exp(np.expand_dims(-HERef[:,0], axis=1).dot(np.expand_dims(C2[0,:], axis=0))))
H[H>255] = 254
H = np.reshape(H.T, (h, w, 3)).astype(np.uint8)
E = np.multiply(Io, np.exp(np.expand_dims(-HERef[:,1], axis=1).dot(np.expand_dims(C2[1,:], axis=0))))
E[E>255] = 254
E = np.reshape(E.T, (h, w, 3)).astype(np.uint8)
if saveDir is not None:
Inorm = cv2.cvtColor(Inorm, cv2.COLOR_BGR2RGB)
cv2.imwrite(fn+'.png', Inorm)
if unmixStains:
H = cv2.cvtColor(H, cv2.COLOR_BGR2RGB)
E = cv2.cvtColor(E, cv2.COLOR_BGR2RGB)
cv2.imwrite(fn+'_H.png', H)
cv2.imwrite(fn+'_E.png', E)
return
else:
# construct return tuple
returnTuple = (Inorm,)
if unmixStains:
returnTuple = returnTuple + (H, E)
return returnTuple
| true
|
fbb0cd72d1ce618ff605a9e938df866749634241
|
Python
|
RichyRaj/cpsc-8620-benchmark
|
/test.py
|
UTF-8
| 7,009
| 3.046875
| 3
|
[] |
no_license
|
import duckdb
from time import time
import mysql.connector
class BenchmarkResult():
'''
Represents the result of a benchmark object
'''
def __init__(self):
self.db_name = ""
self.create = 0
self.s_insert = 0
self.m_insert = 0
self.s_select = 0
self.c_select = 0
def __str__(self):
result_str = """
=================== TigerDB Benchmark ========================
===================== Database : %s =====================
Create Table : %s s
Single Insert : %s s
Multiple Inserts (Across Tables) : %s s
Single Select : %s s
Complex Select : %s s
======================== GO TIGERS ==========================
=================== TigerDB Benchmark ========================
""" % (str(self.db_name), str(self.create), str(self.s_insert), str(self.m_insert), str(self.s_select), str(self.c_select))
return result_str
def test_duck_db():
'''
Runs the benchmark on DUCK DB
'''
result = BenchmarkResult()
result.db_name = "Duck DB"
# Create an in-memory database
con = duckdb.connect(':memory:')
# To perform SQL commands
c = con.cursor()
# c.execute("CREATE TABLE sensors(id INTEGER, type VARCHAR(20), location VARCHAR(30))")
# c.execute("INSERT INTO sensors VALUES (1, 'a', 'floor')")
# c.execute("INSERT INTO sensors VALUES (3, 'b', 'ceiling')")
# Create Statements
s = time()
# c.execute("CREATE TABLE sensors(id INTEGER NOT NULL, type VARCHAR(20), location VARCHAR(30), PRIMARY KEY(id))")
c.execute("CREATE TABLE sensors(id INTEGER PRIMARY KEY NOT NULL, type VARCHAR(20), location VARCHAR(30))")
# c.execute("CREATE TABLE sensor_data(s_id INTEGER NOT NULL, temp DOUBLE, cpu DOUBLE, FOREIGN KEY(s_id) REFERENCES sensors(id))")
c.execute("CREATE TABLE sensor_data(s_id INTEGER NOT NULL, temp DOUBLE, cpu DOUBLE)")
st = time()
result.create = st - s
# Single Insert
s = time()
c.execute("INSERT INTO sensors VALUES (1, 'a', 'floor')")
st = time()
result.s_insert = st - s
# Multiple Inserts - Across Tables
s = time()
c.execute("INSERT INTO sensors VALUES (2, 'b', 'ceiling')")
c.execute("INSERT INTO sensors VALUES (3, 'a', 'floor')")
c.execute("INSERT INTO sensors VALUES (4, 'a', 'ceiling')")
c.execute("INSERT INTO sensors VALUES (5, 'b', 'ceiling')")
# Second Table
c.execute("INSERT INTO sensor_data VALUES (1, 92.23, 0.87222)")
c.execute("INSERT INTO sensor_data VALUES (2, 52.23, 0.37222)")
c.execute("INSERT INTO sensor_data VALUES (3, 22.23, 0.57222)")
c.execute("INSERT INTO sensor_data VALUES (4, 12.23, 0.27222)")
c.execute("INSERT INTO sensor_data VALUES (5, 32.23, 0.17222)")
c.execute("INSERT INTO sensor_data VALUES (1, 12.23, 0.17222)")
c.execute("INSERT INTO sensor_data VALUES (2, 22.23, 0.57222)")
c.execute("INSERT INTO sensor_data VALUES (3, 12.23, 0.27222)")
c.execute("INSERT INTO sensor_data VALUES (4, 12.23, 0.57222)")
c.execute("INSERT INTO sensor_data VALUES (5, 52.23, 0.67222)")
st = time()
result.m_insert = st - s
# Simple Select
s = time()
c.execute("SELECT * FROM sensors")
c.execute("SELECT * FROM sensor_data")
st = time()
result.s_select = st - s
# JOIN + AGGREGATE + SELECT
complex_query = """
SELECT
sensors.location,
MAX(sensor_data.temp) as total_temp,
MAX(sensor_data.cpu) as total_cpu
From
sensors
LEFT JOIN sensor_data
ON sensors.id = sensor_data.s_id
GROUP BY location;
"""
s = time()
c.execute(complex_query)
st = time()
result.c_select = st - s
print(c.fetchall())
return result
def test_mysql_db():
'''
Runs the benchmark on DUCK DB
'''
result = BenchmarkResult()
result.db_name = "MySQL"
# Create the SQL
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="password" # Not the correct password. Obviously!.
)
# To perform SQL commands
c = mydb.cursor(buffered=True)
# c.execute("CREATE TABLE sensors(id INTEGER, type VARCHAR(20), location VARCHAR(30))")
# c.execute("INSERT INTO sensors VALUES (1, 'a', 'floor')")
# c.execute("INSERT INTO sensors VALUES (3, 'b', 'ceiling')")
# c.execute("CREATE DATABASE cpsc8620;")
c.execute("USE cpsc8620;")
c.execute("DROP TABLE IF EXISTS sensor_data;")
c.execute("DROP TABLE IF EXISTS sensors;")
# Create Statements
s = time()
c.execute("CREATE TABLE sensors(id INTEGER NOT NULL, type VARCHAR(20), location VARCHAR(30), PRIMARY KEY(id))")
# c.execute("CREATE TABLE sensors(id INTEGER PRIMARY KEY NOT NULL, type VARCHAR(20), location VARCHAR(30))")
c.execute("CREATE TABLE sensor_data(s_id INTEGER NOT NULL, temp DOUBLE, cpu DOUBLE, FOREIGN KEY(s_id) REFERENCES sensors(id))")
# c.execute("CREATE TABLE sensor_data(s_id INTEGER NOT NULL, temp DOUBLE, cpu DOUBLE)")
st = time()
result.create = st - s
# Single Insert
s = time()
c.execute("INSERT INTO sensors VALUES (1, 'a', 'floor')")
st = time()
result.s_insert = st - s
# Multiple Inserts - Across Tables
s = time()
c.execute("INSERT INTO sensors VALUES (2, 'b', 'ceiling')")
c.execute("INSERT INTO sensors VALUES (3, 'a', 'floor')")
c.execute("INSERT INTO sensors VALUES (4, 'a', 'ceiling')")
c.execute("INSERT INTO sensors VALUES (5, 'b', 'ceiling')")
# Second Table
c.execute("INSERT INTO sensor_data VALUES (1, 92.23, 0.87222)")
c.execute("INSERT INTO sensor_data VALUES (2, 52.23, 0.37222)")
c.execute("INSERT INTO sensor_data VALUES (3, 22.23, 0.57222)")
c.execute("INSERT INTO sensor_data VALUES (4, 12.23, 0.27222)")
c.execute("INSERT INTO sensor_data VALUES (5, 32.23, 0.17222)")
c.execute("INSERT INTO sensor_data VALUES (1, 12.23, 0.17222)")
c.execute("INSERT INTO sensor_data VALUES (2, 22.23, 0.57222)")
c.execute("INSERT INTO sensor_data VALUES (3, 12.23, 0.27222)")
c.execute("INSERT INTO sensor_data VALUES (4, 12.23, 0.57222)")
c.execute("INSERT INTO sensor_data VALUES (5, 52.23, 0.67222)")
st = time()
result.m_insert = st - s
# Simple Select
s = time()
c.execute("SELECT * FROM sensors")
c.execute("SELECT * FROM sensor_data")
st = time()
result.s_select = st - s
# JOIN + AGGREGATE + SELECT
complex_query = """
SELECT
sensors.location,
MAX(sensor_data.temp) as total_temp,
MAX(sensor_data.cpu) as total_cpu
From
sensors
LEFT JOIN sensor_data
ON sensors.id = sensor_data.s_id
GROUP BY location;
"""
s = time()
c.execute(complex_query)
st = time()
result.c_select = st - s
print(c.fetchall())
return result
if __name__ == "__main__":
r = test_duck_db()
print(r)
r = test_mysql_db()
print(r)
| true
|
dc62f0f58ca4d851a4431fc5f55d783ebe6b1444
|
Python
|
hagarbarakat/LeetCode
|
/238. Product of Array Except Self.py
|
UTF-8
| 520
| 2.921875
| 3
|
[] |
no_license
|
class Solution(object):
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
prod = []
temp = 1
for i in range(len(nums)):
prod.append(1)
print(prod)
for i in range(len(nums)):
prod[i] = temp
temp *= nums[i]
temp = 1
i = len(nums)-1
while i >= 0:
prod[i] *= temp
temp *= nums[i]
i -= 1
return prod
| true
|
b022190cc3cebb049dc6fbf377099829026a9616
|
Python
|
Zhao-HP/PythonCode
|
/ObjectUtil.py
|
UTF-8
| 663
| 3.015625
| 3
|
[] |
no_license
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
# 将数据库字段名修改为以小驼峰命名方式
def changeFieldName(fieldName):
s = str(fieldName).split("_")
resultStr = s.pop()
for i in s:
resultStr += i.capitalize()
return resultStr
# 将查询结果解析为传入的对象
def parseResultToDomain(result, field, Object):
fieldDict = {}
for i in range(len(result)):
fieldDict[changeFieldName(field[i][0])] = result[i]
for item in dir(Object):
if not item.startswith('__'):
if fieldDict.get(item) is not None:
setattr(Object, item, fieldDict[item])
return Object
| true
|
a4c3191766f05eb98fb1092c3fc5480de2c6ab1a
|
Python
|
wmfxly/emqx_restart_resume_v2
|
/plugin/Test.py
|
UTF-8
| 2,119
| 2.6875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import paho.mqtt.client as mqtt
import time
import traceback
client = mqtt.Client(client_id='emqx_test', clean_session=False)
# 用于响应服务器端 CONNACK 的 callback,如果连接正常建立,rc 值为 0
def on_connect(mqttClient, userdata, flags, rc):
print("Connection returned with result code:" + str(rc))
def on_subscribe():
# time.sleep(0.5)
client.subscribe("test", 1) # 主题为"test"
client.subscribe("test123123123", 1)
client.subscribe("testtopic", 1)
client.on_message = on_message_come # 消息到来处理函数
# 消息处理函数
def on_message_come(mqttClient, userdata, msg):
print("产生消息", msg.payload.decode("utf-8"))
# 在连接断开时的 callback,打印 result code
def on_disconnect(mqttClient, userdata, rc):
print("Disconnection returned result:" + str(rc))
flag = True
while flag:
try:
# 连接 broker
# connect() 函数是阻塞的,在连接成功或失败后返回。如果想使用异步非阻塞方式,可以使用 connect_async() 函数。
mqttClient.connect('192.168.3.163', 1883, 60)
flag = False
except:
traceback.print_exc()
time.sleep(1)
mqttClient.loop_start()
def on_socket_close(client, userdata):
print(client, userdata)
if __name__ == '__main__':
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_socket_close = on_socket_close
flag = True
while flag:
try:
# 连接 broker
# connect() 函数是阻塞的,在连接成功或失败后返回。如果想使用异步非阻塞方式,可以使用 connect_async() 函数。
client.connect('192.168.3.163', 1883, 60)
flag = False
except:
traceback.print_exc()
time.sleep(1)
client.loop_start()
on_subscribe()
while not flag:
# 断开连接
time.sleep(5) # 等待消息处理结束
| true
|