blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
286a75d8d83a1394971efd152e638f58862d1252 | Python | rafaelperazzo/programacao-web | /moodledata/vpl_data/309/usersdata/299/73585/submittedfiles/atm.py | UTF-8 | 161 | 2.78125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import division
import math
#COMECE SEU CODIGO AQUI
#entrada
valor=int(input('Digite o valor: ')
a=20
b=10
c=5
d=2
e=1
| true |
4eac959e01cab4d612d24a1003ee76393acd3cfc | Python | rolandstaring/machikoro | /content/dice/show_multi_dice.py | UTF-8 | 2,301 | 2.765625 | 3 | [] | no_license |
import sys
import random
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtGui as qtg
from PyQt5 import QtNetwork as qtn
from PyQt5 import QtCore as qtc
class Dice(qtw.QWidget):
def __init__(self, nr_of_dices ):
super().__init__()
self.title = 'Roll the dice'
self.left = 10
self.top = 10
self.width = 150
self.height = 150
self.rounds = 0
self.finish = False
self.nr_of_dices = nr_of_dices
self.dice_list = []
self._rnd_list = []
self.interval_seconds = 0.2
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width*self.nr_of_dices, self.height)
self.timer = qtc.QTimer()
self.timer.setInterval(self.interval_seconds *1000)
self.timer.timeout.connect(self.roll_dices)
# Create widget
self.gridlayout = qtw.QGridLayout()
self.setLayout(self.gridlayout)
#self.dobbelen_button = qtw.QPushButton(
# "Dobbel",
# clicked= self.show_next_pixmap)
self.pm_list = []
for n in range(1,7):
file_name = 'dice/dice_' + str(n) + '.jpg'
self.pm_list.append(qtg.QPixmap(file_name))
for n in range(self.nr_of_dices):
self.dice = qtw.QLabel(self)
self.dice_list.append(self.dice)
for dice in self.dice_list:
self.gridlayout.addWidget(dice, 1,self.dice_list.index(dice)+1 )
self.dice.setPixmap(self.pm_list[0])
#self.gridlayout.addWidget(self.dobbelen_button,2,1,2,1)
self.show()
def run(self):
self.roll_dices()
def stop(self):
self.timer.stop()
count = 0
for dice in self.dice_list:
dice.setPixmap(self.pm_list[self._rnd_list[count]-1])
count +=1
self.counter = 0
self.show()
qtc.QTimer.singleShot(2 *1000, self.close)
def roll_dices(self):
if self.rounds == 10:
self.stop()
return None
self.randomize_all_dices()
self.rounds+=1
self.timer.start()
self.show()
def randomize_all_dices(self):
rnd_round_l = []
for dice in self.dice_list:
dice.clear()
rnd_int = random.randrange(1,7,1)
dice.setPixmap(self.pm_list[rnd_int-1])
rnd_round_l.append(rnd_int)
self._rnd_list = rnd_round_l
def __getitem__(self,position):
return self._rnd_list[position]
if __name__ == '__main__':
app = qtw.QApplication(sys.argv)
dice = Dice(1)
sys.exit(app.exec_()) | true |
d05708c7060f7101dcedd9cab999bfa5feeed119 | Python | lioneleoy/FlaskAPI | /Basics/basicClass.py | UTF-8 | 236 | 3.15625 | 3 | [] | no_license | class test:
def __init__(self):
self.name ="lionel"
self.age = 25
self.numbers= (1,2,3,4,5)
def total(self):
result = sum(self.numbers)
return result
test1 = test()
print test1.total() | true |
866268df2d7924f71edde6c7fa631e085cf0bc26 | Python | facebookresearch/vizseq | /vizseq/scorers/_cider/__init__.py | UTF-8 | 6,749 | 2.6875 | 3 | [
"MIT",
"BSD-2-Clause-Views"
] | permissive | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Adapted from
# https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/cider/cider_scorer.py
# (authored by Tsung-Yi Lin <tl483@cornell.edu> and Ramakrishna Vedantam
# <vrama91@vt.edu>)
from typing import List, Dict, Tuple
from collections import defaultdict
import math
from concurrent.futures import ProcessPoolExecutor, as_completed
import numpy as np
from tqdm import tqdm
def _batch(a_list: list, n_batches: int):
batch_size = len(a_list) // n_batches + int(len(a_list) % n_batches > 0)
for i in range(0, len(a_list), batch_size):
yield a_list[i: min(i + batch_size, len(a_list))]
def _extract_sentence_n_grams(s: str, n: int = 4) -> Dict[Tuple[str], int]:
"""
:param s: str : sentence to be converted into n-grams
:param n: int : number of n-grams for which representation is calculated
:return: term frequency vector for n-grams
"""
words = s.split()
counts = defaultdict(int)
for k in range(1, n + 1):
for i in range(len(words) - k + 1):
ngram = tuple(words[i:i + k])
counts[ngram] += 1
return counts
def _batch_extract_n_grams(
sentences: List[str], n: int = 4
) -> List[Dict[Tuple[str], int]]:
return [_extract_sentence_n_grams(s, n) for s in sentences]
def _multiprocess_batch_extract_n_grams(
sentences: List[str], n: int = 4, n_workers: int = 1,
verbose: bool = False
) -> List[Dict[Tuple[str], int]]:
if n_workers == 1:
return _batch_extract_n_grams(sentences, n)
else:
batches = list(_batch(sentences, n_batches=n_workers))
with ProcessPoolExecutor(max_workers=n_workers) as executor:
futures = {
executor.submit(_batch_extract_n_grams, b, n): i
for i, b in enumerate(batches)
}
progress = as_completed(futures)
if verbose:
progress = tqdm(progress)
tmp = {futures[future]: future.result() for future in progress}
result = []
for k in sorted(tmp):
result.extend(tmp[k])
return result
class _CIDErScorer(object):
def __init__(
self, n: int = 4, sigma: float = 6.0, n_workers: int = 1,
verbose: bool = False
):
"""
:param n: int : number of n-grams for which representation is calculated
:param sigma: float :
"""
self.n = n
self.sigma = sigma
self.n_workers = n_workers
self.verbose = verbose
self.doc_freq = defaultdict(float)
self.n_examples = 0
self.ref_len = 1.
self.refs = None
def _counts_to_vec(
self, counts: Dict[Tuple[str], int]
) -> Tuple[List[Dict[Tuple[str], float]], List[float], int]:
"""
Function maps counts of ngram to vector of tf-idf weights.
The function returns vec, an array of dictionary that store mapping of
n-gram and tf-idf weights. The n-th entry of array denotes length of
n-grams.
:param counts:
:return: vec (array of dict), norm (array of float), length (int)
"""
vec = [defaultdict(float) for _ in range(self.n)]
length = 0
norm = [0.0 for _ in range(self.n)]
for ngram, term_freq in counts.items():
# Give word count 1 if it doesn't appear in reference corpus
cur_doc_freq = np.log(max(1.0, self.doc_freq[ngram]))
# ngram index
n = len(ngram) - 1
# tf (term_freq) * idf (precomputed idf) for n-grams
vec[n][ngram] = float(term_freq) * (self.ref_len - cur_doc_freq)
# Compute vector norm, use it for computing similarity
norm[n] += pow(vec[n][ngram], 2)
if n == 1:
length += term_freq
norm = [np.sqrt(n) for n in norm]
return vec, norm, length
def _get_sim(
self, vec_h: List[Dict[Tuple[str], float]],
vec_r: List[Dict[Tuple[str], float]], norm_h: List[float],
norm_r: List[float], len_h: int, len_r: int, sigma: float
) -> np.ndarray:
"""
Compute the cosine similarity of two vectors.
:param vec_h: array of dictionary for vector corresponding to hypothesis
:param vec_r: array of dictionary for vector corresponding to reference
:param norm_h: array of float for vector corresponding to hypothesis
:param norm_r: array of float for vector corresponding to reference
:param len_h: int containing length of hypothesis
:param len_r: int containing length of reference
:param sigma: float
:return: array of score for each n-grams cosine similarity
"""
delta = float(len_h - len_r)
# measure cosine similarity
val = np.array([0.0 for _ in range(self.n)])
for n in range(self.n):
for ngram, count in vec_h[n].items():
val[n] += min(
vec_h[n][ngram], vec_r[n][ngram]
) * vec_r[n][ngram]
if norm_h[n] != 0 and norm_r[n] != 0:
val[n] /= norm_h[n] * norm_r[n]
assert not math.isnan(val[n])
val[n] *= np.e ** (-(delta ** 2) / (2 * sigma ** 2))
return val
def _get_idf(self, references: List[List[str]]):
self.refs = [
_multiprocess_batch_extract_n_grams(
r, n=self.n, n_workers=self.n_workers, verbose=self.verbose
) for r in references
]
for cur_refs in zip(*self.refs):
for ngram in set(ngram for r in cur_refs for ngram, c in r.items()):
self.doc_freq[ngram] += 1
def get_sent_scores(
self, hypothesis: List[str], references: List[List[str]]
) -> List[float]:
self.n_examples = len(hypothesis)
self.ref_len = np.log(self.n_examples)
self._get_idf(references)
scores = []
hypo = _multiprocess_batch_extract_n_grams(
hypothesis, self.n, self.n_workers, self.verbose
)
for h, cur_refs in zip(hypo, self.refs):
vec_h, norm_h, len_h = self._counts_to_vec(h)
score = np.array([0.0 for _ in range(self.n)])
for r in cur_refs:
vec_r, norm_r, len_r = self._counts_to_vec(r)
score += self._get_sim(
vec_h, vec_r, norm_h, norm_r, len_h, len_r, self.sigma
)
score_avg = np.mean(score) / len(cur_refs) * 10.0
scores.append(score_avg)
return scores
| true |
f93f1ed2b88f7c102d44add6cff0c1a9697250e5 | Python | Hrishi29/anwala.github.io | /Assignments/A3/read.py | UTF-8 | 1,122 | 2.71875 | 3 | [] | no_license | import justext
import re
para = ''
d = {}
inv = []
count = 1
while (count < 1001):
with open('raw_html/%s.html' % count, 'r', encoding='utf-8') as fp:
#reading the files for raw html content
wordstring = fp.read()
#extracting linguistic sentences from the content by excluding the boilerplate content
paragraphs = justext.justext(wordstring, justext.get_stoplist("English"))
for paragraph in paragraphs:
if not paragraph.is_boilerplate:
para = para + ' ' + (paragraph.text)
#creating a list of words
wordlist = para.split()
for w in wordlist:
#excluding non-alphanumeric characters
w = re.sub(r'\W+', '', w)
#checkig for duplicates
if w not in inv:
# check if string not empty
if w:
inv.append(w)
for w1 in inv:
#appending values to keys in dictionary
try:
d[w1].append(count)
except KeyError:
d[w1] = [count]
pass
count = count + 1
#writing the result in invertedindex.txt file in ascii format
with open('invertedindex.txt', 'w', encoding='ascii') as pp:
for k, v in d.items():
pp.write(str(k) + ' >>> '+ str(v) + '\n\n')
| true |
36e4215c4f645afadedb979e072c1714196de3c4 | Python | byi649/Design-Automation-Optimisation-for-Multi-Material-3D-Printing | /MultiMaterialBeam/heuristicVoxels.py | UTF-8 | 2,338 | 2.6875 | 3 | [] | no_license | import blackbox
import numpy as np
import matplotlib.pyplot as plt
import algos
from toolkit import *
N = 6
#ElementOrder = 1
if __name__ == '__main__':
NGEN = 10
verbose = True
nVoxels = 40
nPop = 10
algorithm = "GA_voxel"
if algorithm == "GA_voxel":
(bin, fbest, best) = algos.GA_voxel_uniform(verbose, NGEN, nVoxels, nPop)
print("Best solution:", bin)
freq = blackbox.blackbox_voxel(bin)
print("Natural frequencies:")
print('\n'.join('{}: {} Hz'.format(*k) for k in enumerate(freq, 1)))
true_freq = np.loadtxt('benchmark_frequencies.txt')
true_freq = true_freq[:N]
errors = []
for i in range(N):
errors.append(abs(freq[i] - true_freq[i]) / true_freq[i] * 100)
errors = np.array(errors)
print("Average error: {0:.3e}%".format(np.average(errors)))
gs = plt.GridSpec(4, 4)
# X-axis = generation
x = list(range(NGEN))
plt.figure()
plt.subplot(gs[0:2,2:4])
plt.bar(range(1,7), freq)
plt.title("Natural frequencies")
ax = plt.gca()
ax.set_ylabel('Frequency (Hz)')
ax.set_xlabel('Mode')
plt.subplot(gs[0:2,0:2])
plt.plot(x, fbest, "-c")
plt.grid(True)
plt.title("Average percentage error")
ax = plt.gca()
ax.set_ylabel('Error(%)')
ax.set_xlabel('Generation number')
if False:
plt.subplot(gs[2,0:2])
plt.imshow([bin[3::4], bin[2::4], bin[1::4], bin[0::4]])
plt.title("Beam voxels: yellow = AL, blue = PLA")
plt.axis("off")
goal = np.loadtxt('material_array.txt')
plt.subplot(gs[3,0:2])
plt.imshow([goal[3::4], goal[2::4], goal[1::4], goal[0::4]])
plt.title("True voxels: yellow = AL, blue = PLA")
plt.axis("off")
else:
plt.subplot(gs[2:4,0:2])
plt.scatter(range(1,7), freq)
plt.plot(range(1,7), true_freq, 'r')
plt.title("Natural frequencies vs goal")
ax = plt.gca()
ax.set_xlabel("Mode")
ax.set_ylabel("Frequency (Hz)")
plt.subplot(gs[2:4,2:4])
plt.bar(x=range(1, N + 1), height=errors)
print(errors)
plt.title("Percentage error for each mode")
ax = plt.gca()
ax.set_ylim(bottom=min(errors)-np.std(errors), top=max(errors)+np.std(errors))
plt.tight_layout()
plt.savefig('Heuristic output')
#plt.show()
| true |
5d9e70e7feacd2c208ef1afc8d9431d1f3e0d0a0 | Python | KenAdeniji/WordEngineering | /IIS/WordEngineering/Python/flask.pocoo.org/BibleBookFile.py | UTF-8 | 1,618 | 3.359375 | 3 | [] | no_license | """
2020-02-16 Created. john.zelle@wartburg.edu; Postal address: Department of Math/CS/Physics Wartburg College 100 Wartburg Blvd. Waverly, IA 50677
http://127.0.0.1:5000
http://127.0.0.1:5000/bibleBooks
http://127.0.0.1:5000/bibleBook/John
https://www.pythonforbeginners.com/dictionary/how-to-use-dictionaries-in-python
https://stackoverflow.com/questions/3294889/iterating-over-dictionaries-using-for-loops
line = infile.readline()
while line != "":
columns = line.split(",")
line = infile.readline()
2020-02-17
https://stackoverflow.com/questions/6579876/how-to-match-a-substring-in-a-string-ignoring-case
"""
from flask import Flask
app = Flask(__name__)
import re
@app.route("/")
def hello():
return "Hello World!"
@app.route('/bibleBooks')
def bibleBooks():
return query()
@app.route('/bibleBook/<bookTitle>')
def bibleBook(bookTitle):
return query(bookTitle)
def readFile():
infile = open("BibleBook.txt", "r")
for line in infile.readlines():
columns = line.split(",")
bookID = columns[0]
bookTitle = columns[1]
chapters = columns[2]
bibleBooks[bookTitle] = chapters
def query(bookTitle = ""):
infoSet = "<table border='1'><caption>Bible Books</caption><thead><tr><th>ID</th><th>Title</th><th>Chapters</th></tr></thead><tbody>"
bookID = 0
for the_key, the_value in bibleBooks.items():
bookID += 1
if (re.search(bookTitle, the_key, re.IGNORECASE)):
infoSet += "<tr><td>" + str(bookID) + "</td><td>" + the_key + "</td><td>" + the_value + "</td></tr>"
infoSet += "</tbody></table>"
return infoSet
bibleBooks = {}
readFile()
| true |
546387a5f92bbfd273f7cfedfbb9a2064df2800f | Python | geekori/numpy | /src/chapter02/demo10.py | UTF-8 | 354 | 3.84375 | 4 | [] | no_license | # NumPy数组:分割数组
from numpy import *
# 水平分割、垂直分割、深度分割
# 水平分割
a = arange(9).reshape(3,3)
print(a)
b = hsplit(a,3)
print(b[0])
print(b[2])
# 垂直分割
print("---------")
c = vsplit(a,3)
print(c)
print(c[0])
print(c[2])
# 深度分割
a = arange(27).reshape(3,3,3)
print(a)
b = dsplit(a,3)
print(b)
| true |
4265f15f595de0b0123642eae840c30225b25e5e | Python | SMEISEN/pyWATTS | /examples/example_keras.py | UTF-8 | 4,044 | 2.859375 | 3 | [
"MIT"
] | permissive | # -----------------------------------------------------------
# This example presents the code used in the advanced example
# guide in the pyWATTS documentation.
# -----------------------------------------------------------
import pandas as pd
from sklearn.preprocessing import StandardScaler
from tensorflow.keras import layers, Model
from pywatts.callbacks import LinePlotCallback
# From pyWATTS the pipeline is imported
from pywatts.core.computation_mode import ComputationMode
from pywatts.core.pipeline import Pipeline
# Import the pyWATTS pipeline and the required modules
from pywatts.modules import ClockShift, LinearInterpolater, SKLearnWrapper, KerasWrapper
from pywatts.summaries import RMSE
from tensorflow.keras import backend as K
def get_keras_model():
# write the model with the Functional API, Sequential does not support multiple input tensors
D_in, H, D_out = 2, 10, 1 # input dimension, hidden dimension, output dimension
input_1 = layers.Input(shape=(1,),
name='ClockShift_Lag1') # layer name must match time series name
input_2 = layers.Input(shape=(1,),
name='ClockShift_Lag2') # layer name must match time series name
merged = layers.Concatenate(axis=1)([input_1, input_2])
hidden = layers.Dense(H,
input_dim=D_in,
activation='tanh',
name='hidden')(merged)
output = layers.Dense(D_out,
activation='linear',
name='target')(hidden) # layer name must match time series name
model = Model(inputs=[input_1, input_2], outputs=output)
return model
if __name__ == "__main__":
keras_model = get_keras_model()
pipeline = Pipeline(path="../results")
# Deal with missing values through linear interpolation
imputer_power_statistics = LinearInterpolater(method="nearest", dim="time",
name="imputer_power")(x=pipeline["load_power_statistics"])
# Scale the data using a standard SKLearn scaler
power_scaler = SKLearnWrapper(module=StandardScaler(), name="scaler_power")
scale_power_statistics = power_scaler(x=imputer_power_statistics)
# Create lagged time series to later be used in the regression
# sampler_module -> 2D-Zeitreihe
shift_power_statistics = ClockShift(lag=1, name="ClockShift_Lag1")(x=scale_power_statistics)
shift_power_statistics2 = ClockShift(lag=2, name="ClockShift_Lag2")(x=scale_power_statistics)
keras_wrapper = KerasWrapper(keras_model,
custom_objects={"<lambda>": lambda x, y: K.sqrt(K.mean(K.square(x - y)))},
fit_kwargs={"batch_size": 8, "epochs": 1},
compile_kwargs={"loss": lambda x, y: K.sqrt(K.mean(K.square(x - y))),
"optimizer": "Adam",
"metrics": ["mse"]}) \
(ClockShift_Lag1=shift_power_statistics,
ClockShift_Lag2=shift_power_statistics2,
target=scale_power_statistics)
inverse_power_scale_dl = power_scaler(x=keras_wrapper,
computation_mode=ComputationMode.Transform,
use_inverse_transform=True,
callbacks=[LinePlotCallback("prediction")])
rmse_dl = RMSE()(keras_model=inverse_power_scale_dl, y=pipeline["load_power_statistics"])
# Now, the pipeline is complete
# so we can load data and train the model
data = pd.read_csv("../data/getting_started_data.csv",
index_col="time",
parse_dates=["time"],
infer_datetime_format=True,
sep=",")
pipeline.train(data)
pipeline.to_folder("../results/pipe_keras")
pipeline = Pipeline.from_folder("../results/pipe_keras")
pipeline.train(data)
| true |
26d7125708f548872f408eb52b99d186f4d5245c | Python | nelhage/taktician | /python/tak/ptn/ptn.py | UTF-8 | 3,184 | 2.609375 | 3 | [
"MIT"
] | permissive | import re
import attr
import tak
from . import tps
@attr.s
class PTN(object):
tags = attr.ib()
moves = attr.ib()
@classmethod
def parse(cls, text):
head, tail = text.split("\n\n", 1)
tags_ = re.findall(r'^\[(\w+) "([^"]+)"\]$', head, re.M)
tags = dict(tags_)
tail = re.sub(r"{[^}]+}", " ", tail)
moves = []
tokens = re.split(r"\s+", tail)
for t in tokens:
if t == "--":
continue
if re.search(r"\A(0|R|F|1|1/2)-(0|R|F|1|1/2)\Z", t):
continue
if re.match(r"\A\d+\.\Z", t):
continue
if t == "":
continue
t = re.sub(r"['!?]+$", "", t)
m = parse_move(t)
moves.append(m)
return cls(tags=tags, moves=moves)
def initial_position(self):
if "TPS" in self.tags:
return tps.parse_tps(self.tags["TPS"])
return tak.Position.from_config(tak.Config(size=int(self.tags["Size"])))
slide_map = {
"-": tak.MoveType.SLIDE_DOWN,
"+": tak.MoveType.SLIDE_UP,
"<": tak.MoveType.SLIDE_LEFT,
">": tak.MoveType.SLIDE_RIGHT,
}
slide_rmap = dict((v, k) for (k, v) in slide_map.items())
place_map = {
"": tak.MoveType.PLACE_FLAT,
"S": tak.MoveType.PLACE_STANDING,
"C": tak.MoveType.PLACE_CAPSTONE,
"F": tak.MoveType.PLACE_FLAT,
}
place_rmap = {
tak.MoveType.PLACE_FLAT: "",
tak.MoveType.PLACE_STANDING: "S",
tak.MoveType.PLACE_CAPSTONE: "C",
}
def parse_move(move):
m = re.search(r"\A([CFS]?)([1-8]?)([a-h])([1-8])([<>+-]?)([1-8]*)[CFS]?\Z", move)
if not m:
raise BadMove(move, "malformed move")
stone, pickup, file, rank, dir, drops = m.groups()
x = ord(file) - ord("a")
y = ord(rank) - ord("1")
if pickup and not dir:
raise BadMove(move, "pick up but no direction")
typ = None
if dir:
typ = slide_map[dir]
else:
typ = place_map[stone]
slides = None
if drops:
slides = tuple(ord(c) - ord("0") for c in drops)
if (drops or pickup) and not dir:
raise BadMove(move, "pickup/drop without a direction")
if dir and not pickup and not slides:
pickup = "1"
if pickup and not slides:
slides = (int(pickup),)
if pickup and int(pickup) != sum(slides):
raise BadMove(
move, "inconsistent pickup and drop: {0} v {1}".format(pickup, drops)
)
return tak.Move(x, y, typ, slides)
def format_move(move):
bits = []
bits.append(place_rmap.get(move.type, ""))
if move.type.is_slide():
pickup = sum(move.slides)
if pickup != 1:
bits.append(pickup)
bits.append(chr(move.x + ord("a")))
bits.append(chr(move.y + ord("1")))
if move.type.is_slide():
bits.append(slide_rmap[move.type])
if len(move.slides) > 1:
bits += [chr(d + ord("0")) for d in move.slides]
return "".join(map(str, bits))
class BadMove(Exception):
def __init__(self, move, error):
self.move = move
self.error = error
super().__init__("{0}: {1}".format(error, move))
| true |
01d34de19d09ddd4405a0ef79c1664d25c4a0847 | Python | PacktPublishing/OpenCV-4-for-Secret-Agents | /Section3/deep-learning-opencv/imageClassification.py | UTF-8 | 1,049 | 2.890625 | 3 | [
"MIT"
] | permissive | import numpy as np
import cv2
import time
# load the input image from disk
image = cv2.imread("images/image_02.jpg")
# load the class labels from disk
rows = open("synset_words.txt").read().strip().split("\n")
classes = [r[r.find(" ") + 1:].split(",")[0] for r in rows]
blob = cv2.dnn.blobFromImage(image, 1, (224, 224), (104, 117, 123))
net = cv2.dnn.readNetFromCaffe("bvlc_googlenet.prototxt", "bvlc_googlenet.caffemodel")
net.setInput(blob)
preds = net.forward()
#sort the predictions in order of probability and grab the first 3.
idxs = np.argsort(preds[0])[::-1][:3]
for (i, idx) in enumerate(idxs):
# draw the top prediction on the input image
if i == 0: #the first in the list will be the prediction with most probability
text = "Label: {}, {:.2f}%".format(classes[idx],
preds[0][idx] * 100)
cv2.putText(image, text, (5, 25), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 0, 255), 2)
# display
print("Predicted label {}, probability: {:.5}".format(i + 1,
classes[idx], preds[0][idx]))
cv2.imshow("Image",image)
cv2.waitKey(0) | true |
3b63626bd15c9a6f82fcece252b245be5dc98d71 | Python | tathyam/bots_for_some_indian_shopping_sites | /snapdeal1.py | UTF-8 | 1,058 | 2.59375 | 3 | [] | no_license | #! /usr/bin/python2.7
#imports for parsing
import urllib2
from bs4 import BeautifulSoup
import json
snapdeal="http://www.snapdeal.com"
print "opening site :"+snapdeal
#req=urllib2.Request("http://www.snapdeal.com/offers/best-discounts",headers={'User-Agent':"Magic Browser"})
#flip=urllib2.urlopen(req);
#"http://www.snapdeal.com/offers/featured-deals"
#if flip.code is not 200:
# print "error in fetching site \n Error code :"+str(flip.code)
# exit(0)
flip=open('snap.html','r')
print "Reading HTML\n";
html=flip.read();
print "Parsing in BeautifulSoup\n";
bt=BeautifulSoup(html);
print "Done Parsing";
offer_list=bt.find_all(attrs={'class':'OffersContentBox flt'});
f=open('snapdeal.json','w');
j=0
json_list={}
for i in offer_list:
text=i.text
link=i.find('a').attrs['href']
#link=flipkart+link
image_link=i.find('img').attrs['src']
offer_no="offer %d"%(j)
json_list.update({offer_no:({'text':text},{'link':link},{'image-link':image_link})})
j+=1
to_json=json.dumps(json_list)
f.write(str(to_json))
f.close()
print "retrieval succedded"
| true |
65a59d5150f77daeed370a0d2e6ad70007d98eee | Python | johnhungerford/hackerrank-solutions | /project-euler/prime-factors-binomial-coefficient.py | UTF-8 | 3,657 | 3.28125 | 3 | [] | no_license | import sys
all_primes = [2] # Cache of primes found so far (see gen_primes below)
non_primes = {4:[2]} # Maps composites to prime factors
def gen_primes():
# The running integer that's checked for primeness
i = 0
q = 2
while True:
if i < len(all_primes):
yield all_primes[i]
q += 1
i += 1
continue
if q not in non_primes:
# q is a new prime.
# Yield it and mark its first multiple that isn't
# already marked in previous iterations
yield q
all_primes.append(q)
non_primes[q * q] = [q]
else:
# q is composite. non_primes[q] is the list of primes that
# divide it. Since we've reached q, we no longer
# need it in the map, but we'll mark the next
# multiples of its prime factors to prepare for larger
# numbers
for p in non_primes[q]:
non_primes.setdefault(p + q, []).append(p)
del non_primes[q]
q += 1
i += 1
def append_combinations(p_map, p_list, res_list, current, k):
if len(current) > 0:
most_rec = current[-1]
num_most_rec = 0
#print(f'current: {current}')
for i in current:
#print(f'i: {i}, most_rec: {most_rec}')
if i == most_rec:
num_most_rec += 1
#print(f'most_rec: {most_rec}; num_most_rec: {num_most_rec}')
for i in range(0, len(p_list)):
if p_list[i] == most_rec:
start = i + 1 if num_most_rec == p_map[most_rec] else i
break
if start >= len(p_list):
return
else:
start = 0
for i in range(start, len(p_list)):
new_curr = current[::]
new_curr.append(p_list[i])
if k == 1:
res_list.append(new_curr)
else:
append_combinations(p_map, p_list, res_list, new_curr, k - 1)
def get_sum(p_map, k):
#print(p_map)
p_list = []
for key in p_map:
p_list.append(int(key))
p_list.sort()
divisors = []
# get a list of all unique products of k elements from p_list
current = []
append_combinations(p_map, p_list, divisors, current, k)
#print(divisors)
sum = 0
for i in divisors:
product = 1
for j in i:
product *= j
sum += product
return sum
def get_primes(x):
rem = x
product = 1
out = []
for i in gen_primes():
if rem == 1:
return out
if rem % i == 0:
while rem % i == 0:
rem = rem / i
product *= i
out.append(i)
elif i * (i + 1) * product > x:
out.append(rem)
return out
return False
def get_binomial_primes(N, M):
pr_dict = {}
for i in range(max(M+1, N-M+1), N+1):
#print(f'i: {i}')
for j in get_primes(i):
#print(f'j: {j}')
if j in pr_dict:
pr_dict[j] += 1
else:
pr_dict[j] = 1
#print(pr_dict)
for i in range(2,min(M+1, N-M+1)):
tmp = get_primes(i)
for j in tmp:
if pr_dict[j] > 1:
pr_dict[j] -= 1
else:
del pr_dict[j]
print(pr_dict)
return pr_dict
if __name__ == '__main__':
inp = input()
inp = inp.split(' ')
N = int(inp[0])
M = int(inp[1])
K = int(inp[2])
p = get_binomial_primes(N, M)
for k in range(1, K + 1):
print(get_sum(p, k))
| true |
6d4b352769fa443601861d1d5578aefbdea8382d | Python | kasteph/schemepy | /scheme_test.py | UTF-8 | 4,253 | 3.328125 | 3 | [] | no_license | import unittest
from parser import tokenize, parser, read
from environment import Environment
from evaluator import Evaluator
class TestParser(unittest.TestCase):
line = '((lambda(x) x) "Lisp")'
def test_x(self):
self.assertEqual(tokenize('x'), ['x'])
def test_x_read(self):
self.assertEqual(parser(tokenize('x')), 'x')
def test_tokenize(self):
self.assertEqual(tokenize(self.line), ['(', '(', 'lambda', '(', 'x', ')', 'x', ')', '"Lisp"', ')'])
def test_parser(self):
self.assertEqual(parser(tokenize(self.line)), [['lambda', ['x'], 'x'], '"Lisp"'])
def test_read_number_list(self):
self.assertEqual(read('(1 2 3.14 22.22)'), [1, 2, 3.14, 22.22])
def test_read_simple_lambda(self):
self.assertEqual(read(self.line), [['lambda', ['x'], 'x'], '"Lisp"'])
class TestEnvironment(unittest.TestCase):
def test_environment(self):
env = Environment([{}])
self.assertEqual(env.get('x'), None)
env.set('x', 1)
self.assertEqual(env.get('x'), 1)
env.set('y', 'foo')
self.assertEqual(env.get('y'), 'foo')
self.assertEqual(env.get('x'), 1)
env.add_scope()
env.set('x', 2)
self.assertEqual(env.get('x'), 2)
env.remove_scope()
self.assertEqual(env.get('x'), 1)
class TestEval(unittest.TestCase):
def test_undefined_variable(self):
exp = Evaluator().eval('x')
self.assertEqual(exp, None)
def test_getting_variable_value(self):
exp = Evaluator().eval('x', Environment([{'x': 1}]))
self.assertEqual(exp, 1)
def test_eval_string(self):
exp = Evaluator().eval('"foo"')
self.assertEqual(exp, '"foo"')
def test_eval_int(self):
exp = Evaluator().eval(-5)
self.assertEqual(exp, -5)
def test_eval_float(self):
exp = Evaluator().eval(3.14)
self.assertEqual(exp, 3.14)
def test_eval_quote(self):
exp = Evaluator().eval(['quote', '"foo"'])
self.assertEqual(exp, '"foo"')
def test_eval_plus(self):
exp = Evaluator().eval(['+', 1, 1])
self.assertEqual(exp, 2)
def test_eval_if(self):
exp = Evaluator().eval(['if', ['<', 1, 2], '#t', '#f'])
self.assertEqual(exp, True)
def test_lambda_is_func_and_evals_exp(self):
exp = Evaluator().eval(['lambda', ['x'], 'x'])
self.assertIsInstance(exp, type(lambda: None))
self.assertEqual(exp(1), 1)
def test_lamda_expression(self):
exp = Evaluator().eval(read('(lambda (x) (+ x 2))'))
self.assertIsInstance(exp, type(lambda: None))
def test_eval_lambda_expression(self):
exp = Evaluator().eval(read('((lambda(x) (+ x 2)) 42)'))
self.assertEqual(exp, 44)
def test_eval_simple_let(self):
exp = Evaluator().eval(read('(let ((x 4)) x)'))
self.assertEqual(exp, 4)
def test_eval_nested_let(self):
exp = Evaluator().eval(read('(let ((x 4)) (let ((x 3)) x))'))
self.assertEqual(exp, 3)
def test_eval_let_with_two_vals_in_last_sexp(self):
exp = Evaluator().eval(read('(let ((x 4)) (let ((x 3)) "foo" x))'))
self.assertEqual(exp, 3)
def test_define_var(self):
e = Evaluator()
exp = e.eval(read('(define x 4)'))
self.assertEqual(e.env.get('x'), 4)
def test_define_lambda(self):
e = Evaluator()
exp = e.eval(read('(define square (lambda (x) (* x x)))'))
next_exp = e.eval(read('(square 5)'))
self.assertEqual(next_exp, 25)
def test_define_func(self):
e = Evaluator()
exp = e.eval(read('(define (square a) (* a a))'))
self.assertIsInstance(exp, type(lambda: None))
def test_eqv_special_form(self):
e = Evaluator()
exp = e.eval(read('(eqv? 1 1)'))
self.assertEqual(exp, True)
def test_complex_eqv_special_form(self):
e = Evaluator()
exp = e.eval(read('(let ((p (lambda (x) x))) (eqv? p p)'))
self.assertEqual(exp, True)
def test_falsy_eqv_special_form(self):
e = Evaluator()
exp = e.eval(read('(eqv? 1 2)'))
self.assertEqual(exp, False)
if __name__ == '__main__':
unittest.main() | true |
97894fe3902d076924854cc8d2b0015bcde2efef | Python | vanpact/pyradius | /imageConverter.py | UTF-8 | 3,191 | 2.890625 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Oct 12, 2012
@author: yvesremi
"""
from PyQt4 import QtGui
import numpy, cv2
import gc, sys, debugsp
class ImageConverter(object):
"""Class to convert a numpy array to QImage and a QImage to a Numpy array."""
def __init__(self):
"""Constructor"""
@staticmethod
def qimageToNdarray(img, colorToGray=False):
"""Convert a QImage to a Numpy array
:param img: The QImage to be converted
:type img: QImage
:param colorToGray: if true, convert the image to grayScale
:type colorToGray: bool
:return: The converted image
:rtype: Numpy array
"""
if(isinstance(img, QtGui.QImage)):
imageShape = (img.height(), img.width())
temporaryShape = (img.height(), img.bytesPerLine() * 8 / img.depth())
if img.format() in (QtGui.QImage.Format_ARGB32_Premultiplied, QtGui.QImage.Format_ARGB32, QtGui.QImage.Format_RGB32):
imageShape += (4, )
temporaryShape += (4, )
elif (img.format()==QtGui.QImage.Format_RGB888):
imageShape += (3, )
temporaryShape += (3, )
else:
raise ValueError("Only 32 and 24 bits RGB and ARGB images are supported.")
buf = img.bits().asstring(img.numBytes())
ndimg = numpy.frombuffer(buf, numpy.uint8).reshape(temporaryShape)
ndimg = ndimg[:, :, (2, 1, 0)]
if imageShape != temporaryShape:
ndimg = ndimg[:,:imageShape[1]]
if img.format() == QtGui.QImage.Format_RGB32:
ndimg = ndimg[...,:3]
if(colorToGray):
return ndimg[:, :, 0]*0.299+ndimg[:, :, 1]*0.587+ndimg[:, :, 2]*0.114
return ndimg
else:
raise TypeError('Argument 1 must be a QtGui.QImage')
return 0
@staticmethod
def ndarrayToQimage(ndimg, form=QtGui.QImage.Format_RGB888):
"""Convert a Numpy array to a QImage
:param ndimg: The Numpy array to be converted
:type ndimg: Numpy array
:param form: The QImage pixel format
:type form: int
:return: The converted image
:rtype: QImage
"""
if(isinstance(ndimg, numpy.ndarray)):
ndimg1 = numpy.asarray(ndimg, numpy.uint8)
if(len(ndimg1.shape)==2):#Grayscale images
ndimg1 = numpy.dstack((ndimg1, numpy.copy(ndimg1), numpy.copy(ndimg1)))#cv2.cvtColor(ndimg1, cv2.cv.CV_GRAY2RGB)
# ndimg = numpy.resize(ndimg,(ndimg.shape[0], ndimg.shape[1], 3))
shape=ndimg1.shape
ndimg3 = numpy.ravel(ndimg1)
ndimg3.tostring()
return QtGui.QImage(ndimg3.data, shape[1], shape[0], form)
else:
raise TypeError('Argument 1 must be a numpy.ndarray')
return None
#if __name__ == '__main__':
# app = QtGui.QApplication(sys.argv)
# image = QtGui.QImage('testGoodSize.png')
# ndarray=ImageConverter.qimageToNdarray(image, True)
# ImageConverter.ndarrayToQimage(ndarray).save('testConversion.png') | true |
c9f77eb3cd63f6b994cdcf334641066f370c188e | Python | mfong92/compbio | /nussinov/nussinov.py | UTF-8 | 2,383 | 3.609375 | 4 | [] | no_license | #! /usr/bin/python
import sys
import random
import re
import time
#timeStart = time.time(); #time check statement
'''
Implementation of the Nussinov Algorithm given an input of RNA as either upper or lower case.
Returns the number of maximum basepairs, assuming that there are no pseudoknots.
Input format: python nussinov.py augcaugc
(augcaugc is an arbitrary RNA sequence)
'''
#check to see if an RNA sequence is input, assign seq and lenSeq to the sequence
if len(sys.argv) < 2:
print "No sequence provided!"
sys.exit()
seq = str(sys.argv[1]).upper()
lenSeq = len(seq)
#check of input RNA sequence is valid
def isValid(seq):
disallowedBases = re.search(r"[^acguACGU]", seq)
if disallowedBases:
print "\nNot valid sequence!"
sys.exit()
else:
return 1
#function that is called to give any WC match a score of 1
def wc(pair):
if ((pair == 'AU') or (pair == 'UA') or (pair == 'GC') or (pair == 'CG')):
return 1
else:
return 0
def nussinov(seq):
#dictionary that corresponds to the score of each base pair - either 1 for the WC basepairs or 0 for all else.
arr = []
#initialize the 2d matrix as a array of arrays
for i in xrange(lenSeq):
arr.append([])
for j in xrange(lenSeq):
arr[i].append([])
#initialize entire array as zeroes rather than just diagonals to improve performance
for a in range(lenSeq):
for i in range(lenSeq):
arr[a][i] = 0
for x in range (1,lenSeq): #one dimension of the dynamic programming table
for j in range(x,lenSeq):
i=j-x #need to fill out table moving along diagonal
temp1=arr[i][j-1] #internal bp with i
temp2=arr[i+1][j] #internal bp with j
temp3=arr[i+1][j-1]+wc(seq[i]+seq[j]) #ends base pair, check if WC pair
bifurcation = [] #temp array to hold possible bifurcation values
temp4=0 #in case the following loop is not entered
if i+1 < j: #don't enter loop if the for loop below will never execute
for k in range(i+1,j): #keep track of all possible bifurcations
bifurcation.append(arr[i][k]+arr[k+1][j])
temp4=max(bifurcation)
arr[i][j]=max(temp1, temp2, temp3, temp4) #return max to arr[i][j]
return arr[0][lenSeq-1]
#actually run nussinov on input
if isValid(seq):
print "\nMaximum number of basepairs is " + str(nussinov(seq))
#time check statement
#timeEnd = time.time()-timeStart
#print "\n"+ str(timeEnd) + " seconds"
| true |
cf92713678353951de5c959551c0957b805bfac1 | Python | Machele-codez/todo | /apps/tasks/forms.py | UTF-8 | 1,220 | 2.640625 | 3 | [] | no_license | from .models import Task
from django import forms
from django.http import request
import datetime, pytz
class TaskForm(forms.ModelForm):
due_date = forms.DateField(widget=forms.DateInput(attrs={'type': 'date'}))
due_time = forms.TimeField(widget=forms.TimeInput(attrs={'type': 'time'}))
class Meta:
model = Task
fields = ('text', 'priority')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['text'].widget.attrs['placeholder'] = 'Add Task Here'
def clean(self):
due_date = self.cleaned_data['due_date']
due_time = self.cleaned_data['due_time']
due_datetime = datetime.datetime(year=due_date.year, month=due_date.month, day=due_date.day) + datetime.timedelta(hours=due_time.hour, minutes=due_time.minute) #? add both date and time
due_datetime = due_datetime.replace(tzinfo=datetime.timezone.utc)
#? if due datetime is before current date then raise an error
if due_datetime < datetime.datetime.today().replace(tzinfo=datetime.timezone.utc):
raise forms.ValidationError(
'due date and time cannot occur before current date',
'backdating'
) | true |
e482aa24f3abe617c2590c7f6bd93de313223e36 | Python | wewe89/Tools | /知识库.py | UTF-8 | 5,674 | 2.765625 | 3 | [] | no_license | import xlrd
import mysql.connector
import re
import string
def connectdb():
print('连接到mysql服务器...')
# 打开数据库连接
# 用户名:hp, 密码:Hp12345.,用户名和密码需要改成你自己的mysql用户名和密码,并且要创建数据库TESTDB,并在TESTDB数据库中创建好表Student
config = {
'host': '127.0.0.1',
'user': 'root',
'password': '123456',
'port': 3306,
'database': 'itkb',
'charset': 'utf8'
}
db = mysql.connector.connect(**config)
print('连接上了!')
return db
def createtable(db):
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 如果存在表Sutdent先删除
cursor.execute("DROP TABLE IF EXISTS Student")
sql = """CREATE TABLE Student (
ID CHAR(10) NOT NULL,
Name CHAR(8),
Grade INT )"""
# 创建Sutdent表
cursor.execute(sql)
def insertdb(db,createtime,systemid,issue,causes,solution,ps,status,annex):
# 使用cursor()方法获取操作游标
cursor = db.cursor()
param = (createtime,systemid,issue,causes,solution,ps,status,annex)
# SQL 插入语句
sql = 'INSERT INTO knowdedge_info(createtime,systemid,issue,causes,solution,ps,status,annex) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)'
#sql = "INSERT INTO Student(ID, Name, Grade) \
# VALUES ('%s', '%s', '%d')" % \
# ('001', 'HP', 60)
try:
# 执行sql语句
cursor.execute(sql,param)
# 提交到数据库执行
db.commit()
except mysql.connector.Error as e:
# Rollback in case there is any error
print('query error!{}'.format(e))
db.rollback()
def querydb(db):
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 查询语句
#sql = "SELECT * FROM Student \
# WHERE Grade > '%d'" % (80)
sql = "SELECT * FROM Student"
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
ID = row[0]
Name = row[1]
Grade = row[2]
# 打印结果
print("ID: %s, Name: %s, Grade: %d" % \
(ID, Name, Grade))
except:
print ("Error: unable to fecth data")
def cleardb(db):
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 删除语句
sql = "truncate knowdedge_info"
try:
# 执行SQL语句
cursor.execute(sql)
# 提交修改
db.commit()
except:
print ('删除数据失败!')
# 发生错误时回滚
db.rollback()
def deletedb(db):
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 删除语句
sql = "DELETE FROM Student WHERE Grade = '%d'" % (100)
try:
# 执行SQL语句
cursor.execute(sql)
# 提交修改
db.commit()
except:
print ('删除数据失败!')
# 发生错误时回滚
db.rollback()
def updatedb(db):
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 更新语句
sql = "UPDATE Student SET Grade = Grade + 3 WHERE ID = '%s'" % ('003')
try:
# 执行SQL语句
cursor.execute(sql)
# 提交到数据库执行
db.commit()
except:
print( '更新数据失败!')
# 发生错误时回滚
db.rollback()
def closedb(db):
db.close()
def handleChar(issue):
issue=issue.replace(' ','')
issue=issue.replace("\n",'')
items =re.split("问题:|问题原因:|解决办法:",issue)
datas=['','','']
if(len(items)==1):
datas[0]=items[0]
datas[1]=''
datas[2]=''
if(len(items)==2):
datas[0] = items[0]
datas[1] = ''
datas[2] = items[1]
if(len(items)==3):
datas[0] = items[1]
datas[1] = ''
datas[2] = items[2]
if(len(items)==4):
datas[0] = items[1]
datas[1] = items[2]
datas[2] = items[3]
return datas
def main():
db = connectdb() # 连接MySQL数据库
cleardb(db)
filename = '信息科技服务台晨报(知识库)-2018.6.12.xls'
# 打开excel文件
sourcedata = xlrd.open_workbook(filename)
# 获取第一张工作表(通过索引的方式)
sourcetable = sourcedata.sheets()[0]
lasttime=''
lastSystemid=''
index = 0
while index < sourcetable.nrows:
# data_list用来存放数据
data_list = []
# 将table中第一行的数据读取并添加到data_list中
data_list.extend(sourcetable.row_values(index))
if(data_list[0]!=""):
lasttime=data_list[0]
createtime=lasttime
else:
createtime=lasttime
# systemid = data_list[1]
if (data_list[1] != ""):
lastSystemid = data_list[1]
systemid = lastSystemid
else:
systemid = lastSystemid
data=handleChar(data_list[2])
issue = data[0]
causes = data[1]
solution = data[2]
ps = ""
status = data_list[3]
annex = ''
insertdb(db, createtime, systemid, issue, causes, solution, ps, status, annex) # 插入数据
index += 1
# createtable(db) # 创建表
querydb(db)
# deletedb(db) # 删除数据
# print ('\n删除数据后:')
# querydb(db)
# updatedb(db) # 更新数据
# print( '\n更新数据后:')
# querydb(db)
closedb(db) # 关闭数据库
if __name__ == '__main__':
main() | true |
7101f3b4a37bc55891656437da8d73e15299cf2d | Python | RessCris2/algorithm020 | /Week4/跳跃游戏.py | UTF-8 | 440 | 2.984375 | 3 | [] | no_license | # 跳跃游戏
class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
reach = 0
n = len(nums)
for i,x in enumerate(nums):
if i <= reach:
reach = max(reach, i+nums[i])
if reach >= n-1:
return True
else:
return False
return False | true |
373945d1b16a2081006d2fdeeb75c187ed2b06f5 | Python | rack-leen/python-tutorial-notes | /chapter5_data_structures/list_queue.py | UTF-8 | 347 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env python
# coding=utf-8
from collections import deque #被设计来从末尾更快速的append and pop元素
queue = deque(["Eric","Join","Michae"])
#从末尾增加元素
queue.append("Terry")
print(queue)
queue.append("Graham")
print(queue)
#从队列左边退出队列
queue.popleft()
print(queue)
queue.popleft()
print(queue)
| true |
61d7226da68de2e20f027e6dc72076446b264971 | Python | achuthasubhash/digital-makeup-using-opencv | /bread.py | UTF-8 | 1,971 | 2.890625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 08:00:26 2020
@author: DELL
"""
import cv2
import face_recognition
from PIL import Image, ImageDraw
import numpy
jewel_img = cv2.imread("jewelery.png")
frame = cv2.imread('priyanka.jpeg')
frame = cv2.resize(frame,(432, 576)) #resize frame
# Returns a list of face landmarks present on frame
face_landmarks_list = face_recognition.face_landmarks(frame)
# For demo images only one person is present in image
face_landmarks = face_landmarks_list[0] #0 so 1 persom
shape_chin = face_landmarks['chin'] #print list of points of chin
# x,y cordinates on frame where jewelery will be added
x = shape_chin[0][0] #start from 4th point so 3 ,0 give x coord,1 give y coord
y = shape_chin[0][1]
img_width = abs ( shape_chin[0][0] - shape_chin[17][0])
img_height = int(x* img_width) #based on jewllery height and width did else disort happens
jewel_img = cv2.resize(jewel_img, (img_width,img_height), interpolation=cv2.INTER_AREA) #area downsize area
jewel_gray = cv2.cvtColor(jewel_img, cv2.COLOR_BGR2GRAY)
# All pixels greater than 230 will be converted to white and others will be converted to black
thresh, jewel_mask = cv2.threshold(jewel_gray, 230, 255, cv2.THRESH_BINARY)
# Convert to black the background of jewelry image bec add black it be adding
jewel_img[jewel_mask == 255] = 0 #white to black
# Crop out jewelry area from original frame
jewel_area = frame[y:y+img_height, x:x+img_width]
# bitwise_and will convert all black regions in any image to black in resulting image
masked_jewel_area = cv2.bitwise_and(jewel_area, jewel_area, mask=jewel_mask)
# add both images so that the black region in any image will result in another image non black regions being rendered over that area
final_jewel = cv2.add(masked_jewel_area, jewel_img)
# replace original frame jewel area with newly created jewel_area
frame[y:y+img_height, x:x+img_width] = final_jewel
plt.show(frame) | true |
236e3b07c2f127236715738b14ab82ecaa359046 | Python | sciglio/Python-Scripts | /Function_fit.py | UTF-8 | 2,295 | 3.484375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 20 13:54:16 2018
@author: marco
"""
"""FIT PROCEDURE IN PYTHON"""
"""VERSION 0.5"""
#GENERATE RANDOM DATA
import numpy as np # import math library
np.random.seed(0) # Seed the random number generator for reproducibility
x_data = np.linspace(-5, 5, num=50) #Create a vector of 50 elements between -5 and 5
y_data = np.sin(x_data) + 0.2*np.random.normal(size=50) #Create a sinusoidal sequence with noise
#FIT A FUNCTION TO THE DATA
from scipy import optimize #import optimization library
def test_func(x,a,b):
return a*np.sin(b*x) #define a test function
p, p_cov= optimize.curve_fit(test_func, x_data, y_data, p0=[1, 1])#fit the data with the test function using the initial parameter 1 and 1
p_err=(p_cov[0][0]**0.5, p_cov[1][1]**0.5) #the square root of covariance diagonal elements is an indication of the error in the parameters
print(p, p_err) #print out the results
#PLOT THE DATA, THE FIT AND THE RESIDUES
import matplotlib.pyplot as plt #import plot library
fig1 = plt.figure(1) #define the figure
frame1=fig1.add_axes((.1,.3,.8,.6)) #xstart, ystart, xend, yend [units are fraction of the image frame, from bottom left corner]
#plt.figure(figsize=(6, 4)) #chose the domensions of the plot
plt.scatter(x_data, y_data, label='Data') #plot the data
plt.plot(x_data, test_func(x_data, p[0], p[1]), label='Best fit function') #plot the test function with the fitted parameter
plt.plot(x_data, test_func(x_data, p[0]+p_err[0], p[1]+p_err[0]), label='Upper extreme') #plot function with parameters plus error
plt.plot(x_data, test_func(x_data, p[0]-p_err[0], p[1]-p_err[0]), label='Lower extreme') #plot function with parameters less error
plt.legend(loc='best') #put the legend in the best position
frame1.set_xticklabels([]) #Remove x-tic labels for the first frame
plt.grid() #put grid on the plot
difference = y_data-test_func(x_data, p[0], p[1]) #claculate the residuals
y_zeros=np.zeros(50) #make a vector for the zero residue line
frame2=fig1.add_axes((.1,.1,.8,.2)) #create the second frame of the residuals plot
plt.plot(x_data,difference,'or') #Residual plot
plt.plot(x_data,y_zeros) #plot the line with zero residue
plt.grid() #put a grid on the plot
plt.show() #show the plot
| true |
3b428aa9bab973bb8da7d704e4d3a521ae3790fb | Python | harmsm/linux-utilities | /combine_data.py | UTF-8 | 1,759 | 3.296875 | 3 | [
"Unlicense"
] | permissive | #!/usr/bin/env python
__description__ = \
"""
Combine a set of R-compatible files, re-indexing line counter and adding a
column for the input file. All comments and blank lines are removed. The
program assumes that each data file has a single-line header and a %10i first
column containing line numbers.
"""
__author__ = "Michael J. Harms"
__date__ = "080205"
import os, sys
def combineFiles(to_combine):
"""
Combine files in to_combine.
"""
header = None
out = []
for file in to_combine:
f = open(file,'r')
contents = f.readlines()
f.close()
# Remove comments and blank lines
contents = [l for l in contents if l[0] != "#" and l.strip() != ""]
# Deal with header
if header == None:
header = contents[0]
header = "%s%10s%s" % (header[:10],"type",header[10:])
contents = contents[1:]
# Grab file name (splitting at "_")
file_root = file.split("_")[0]
if file_root == "":
file_root = file
# Remove counter and add file_root to contents
contents = ["%10s%s" % (file_root,l[10:]) for l in contents]
out.extend(contents)
# Add counter and re-append header
out = ["%10i%s" % (i,l) for i, l in enumerate(out)]
out.insert(0,header)
return out
def main():
"""
If called from command line.
"""
try:
to_combine = sys.argv[1:]
except IndexError:
print __usage__
sys.exit()
for file in to_combine:
if not os.path.isfile(file):
print "%s does not exist!" % file
sys.exit()
out = combineFiles(to_combine)
print "".join(out)
if __name__ == "__main__":
main()
| true |
de455dd6eede20d398af3a7b84fe2dc1f732bcac | Python | edgarinvillegas/deep-learning-v2-pytorch | /wasserstein-loss-gradient-2022/solution/tests.py | UTF-8 | 1,367 | 2.765625 | 3 | [
"MIT"
] | permissive | from typing import Callable
import torch
FAKE_LOGITS = torch.tensor([-0.2990, 1.6582, 0.0880, -0.2078, 0.1899, 1.9609, 0.1763, 0.9490,
-0.1863, 0.0479, -0.6072, 0.7648, 0.0189, 0.3165, 0.4645, -0.9585])
REAL_LOGITS = torch.tensor([-0.2219, 2.3058, -0.7330, -1.3858, -0.1590, 0.2727, -1.0026, 0.1778,
-0.3092, 1.1968, 1.4372, 1.2228, 1.3839, -0.7218, 1.5818, -1.1987])
def check_gen_w_loss(loss_fn: Callable):
expected = torch.tensor(-0.2735)
torch.testing.assert_close(loss_fn(FAKE_LOGITS),
expected,
atol=1e-4,
rtol=1e-4,
msg='There is something wrong with your implementation.')
print('Congratulations, you successfully implemented the W-Loss for the generator')
def check_disc_w_loss(loss_fn: Callable):
expected = torch.tensor(0.0331)
torch.testing.assert_close(loss_fn(REAL_LOGITS, FAKE_LOGITS),
expected,
atol=1e-4,
rtol=1e-4,
msg='There is something wrong with your implementation.')
print('Congratulations, you successfully implemented the W-Loss for the discriminator') | true |
ec462f886cae35f0fcf779a5c37a45d440e0eab2 | Python | csaddison/Physics-129L | /Homework_7/exercise1.py | UTF-8 | 2,158 | 3.265625 | 3 | [] | no_license | #------------------------------------------------------------
# Conner Addison 8984874
# Physics 129L
#------------------------------------------------------------
# Homework 7, Exercise 1
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
import ccHistStuff as cc
# Loading data
txt_file = 'straightTracks.txt'
raw = np.loadtxt(txt_file)
# Separating data into tracks 0 and 1 with lists of [(xi1,xi2,xi3,xi4), (yi1,yi2,yi3,yi4)]
x = (2, 3, 5, 7)
data = []
X0 = []
Y0 = []
for r in raw:
track0 = [x, (r[2], r[3], r[4], r[5])]
track1 = [x, (r[6], r[7], r[8], r[9])]
data.append([track0, track1])
X0.append(r[0])
Y0.append(r[1])
# Fitting curve
X1 = []
X2 = []
Xavg = []
def func(x, m, b):
f = (m * x) + b
return f
for trial in data:
opt1 = optimize.curve_fit(func, trial[0][0], trial[0][1])
X1.append(opt1[0][1])
opt2 = optimize.curve_fit(func, trial[1][0], trial[1][1])
X2.append(opt2[0][1])
Xavg.append((opt1[0][1] + opt2[0][1]) / 2)
# Preparing data for histogram
x1_x0 = []
x2_x0 = []
xa_x0 = []
for i in range(len(X0)):
x1_x0.append(X1[i] - X0[i])
x2_x0.append(X2[i] - X0[i])
xa_x0.append(Xavg[i] - X0[i])
# Adding plot of tracks
fig = plt.figure(figsize = (14,6))
plot = fig.add_subplot(131)
fig.subplots_adjust(wspace = .5)
for n in range(10):
plot.plot(data[n][0][1], data[n][1][0], color = '.6', linestyle = '--')
plot.plot(data[n][1][1], data[n][1][0], color = '.6', linestyle = '--')
plot.set(title = 'First 10 collisions', xlabel = r'Sensor $x$ position', ylabel = r'Sensor $y$ position')
# Adding histogram of error
hist1 = fig.add_subplot(132)
hist2 = fig.add_subplot(133)
(n1, bins1, patches1) = hist1.hist((x1_x0, x2_x0), 20, stacked = True)
(n2, bins2, patches2) = hist2.hist(xa_x0, 20)
hist1.set(xlim = (-.05, .05), title = r'$(x_1/x_2 - x_0 )$', xlabel = r'$\Delta x$', ylabel = 'Counts')
hist2.set(xlim = (-.05, .05), title = r'$x_a - x_0$', xlabel = r'$\Delta x$', ylabel = 'Counts')
# Adding stat box
cc.statBox(hist1, x1_x0, bins1)
cc.statBox(hist2, xa_x0, bins2)
# Saving/showing figure
#plt.savefig('tracks.png')
plt.show() | true |
1f5e932b52314df0b3d6ed1ab97db055312c511f | Python | cadbane/rfactor-udp | /pyfactor/hd44780.py | UTF-8 | 1,186 | 2.640625 | 3 | [] | no_license | import RPi.GPIO as GPIO
from time import sleep
class HD44780:
def __init__(self, pin_rs=7, pin_e=8, pins_db=[25,24,23,18]):
GPIO.setwarnings(False)
self.pin_rs = pin_rs
self.pin_e = pin_e
self.pins_db = pins_db
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin_e, GPIO.OUT)
GPIO.setup(self.pin_rs, GPIO.OUT)
for pin in self.pins_db:
GPIO.setup(pin, GPIO.OUT)
self.clear()
def clear(self):
self.cmd(0x33)
self.cmd(0x32)
self.cmd(0x28)
self.cmd(0x0c)
self.cmd(0x06)
self.cmd(0x01)
def cmd(self, bits, char_mode=False):
sleep(0.001)
bits=bin(bits)[2:].zfill(8)
GPIO.output(self.pin_rs, char_mode)
for pin in self.pins_db:
GPIO.output(pin, False)
for i in range(4):
if bits[i] == "1":
GPIO.output(self.pins_db[::-1][i], True)
GPIO.output(self.pin_e, True)
GPIO.output(self.pin_e, False)
for pin in self.pins_db:
GPIO.output(pin, False)
for i in range(4,8):
if bits[i] == "1":
GPIO.output(self.pins_db[::-1][i-4], True)
GPIO.output(self.pin_e, True)
GPIO.output(self.pin_e, False)
def message(self, text):
for char in text:
if char == "\n":
self.cmd(0xc0)
else:
self.cmd(ord(char), True)
| true |
cf095fb7330e279d18bc45c92e7ee35f59896405 | Python | David-Papworth/Flask_practice | /flask_routes_excercise/app.py | UTF-8 | 305 | 3.03125 | 3 | [] | no_license | from flask import Flask
app = Flask(__name__)
@app.route('/')
@app.route('/home')
def home():
return 'This is the home page'
@app.route('/square/<int:number>')
def about(number):
return f'The square of {number} is {number**2}'
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0') | true |
91543af6cbc4d2982d6b22b139b6bdc57219d7bb | Python | wkslearner/skill_advance | /parameter_advance/arg_parse.py | UTF-8 | 849 | 2.71875 | 3 | [] | no_license | import argparse
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.cross_validation import train_test_split
df=pd.DataFrame({'col1':[1,2,3,4],'col2':['a','b','a','c'],'col3':[7,8,9,10],'col4':[87,83,98,19]})
df=pd.pivot_table(df,index='col1',columns='col2',values=['col3','col4']).reset_index(level='col1',col_level=0,col_fill='d')
df.columns.names=['col','cate']
df=df.swaplevel('cate','col',axis=1)
# df=df.sortlevel(level=[0,1],ascending=[0,0],axis=1)
# df=df.sortlevel(level=[1],ascending=[0],axis=1)
# df=df.reset_index(level='col1',col_level=1)
df.columns.names=[None,None]
print(df)
excel_writer=pd.ExcelWriter('',engine='xlsxwriter')
df.to_excel(excel_writer,index=False)
excel_writer.save()
# parser = argparse.ArgumentParser()
# parser.add_argument("echo",default=10)
# args = parser.parse_args()
# print (args)
| true |
0afe6711476703df52b7a023a0123bbc154bc446 | Python | jaime7981/Arduino_EFI | /EFI_map.py | UTF-8 | 1,111 | 2.796875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
rpms = np.array([4000,3500,3000,2500,2000,1500,1000,500])
throttle = np.array([0,0,10,20,40,60,80,100,120])
efi_map = np.array([[17.2, 16.8, 15.5, 14.8, 13.8, 13.0, 12.2],
[17.0, 16.5, 15.0, 14.0, 13.4, 13.0, 12.4],
[16.8, 16.0, 14.6, 14.2, 13.6, 13.2, 12.6],
[16.6, 15.8, 14.8, 14.4, 13.8, 13.4, 12.8],
[16.4, 15.5, 15.0, 14.6, 14.0, 13.6, 13.0],
[16.2, 15.6, 15.2, 14.8, 14.2, 13.8, 13.2],
[16.0, 15.8, 15.5, 15.1, 14.6, 14.0, 13.5]])
def ShowEFIMap():
plt.figure(figsize = (6, 6))
ax = plt.subplot(111)
ax.set_ylabel("RPM")
ax.set_xlabel("Throttle")
plt.imshow(efi_map, cmap = "autumn")
ax.set_xticklabels(throttle)
ax.set_yticklabels(rpms)
for a in range(len(efi_map)):
for b in range(len(efi_map[a])):
ax.text(a,b,efi_map[b,a], ha = "center", va = "center", color = "b")
ax.set_title("EFI MAP")
plt.colorbar()
plt.show()
ShowEFIMap() | true |
0b537dce803fc0ce2d760e611bbd7c2bd672cf45 | Python | Jay23Cee/100_Day_Challenge | /Python/Merge_two_array.py | UTF-8 | 749 | 3.5 | 4 | [] | no_license | def mergeArrays(arr1, arr2, n1,n2):
i=0;
j=0;
k=0;
arr3 = [None] * (n1+n2)
while i < n1 and j < n2:
if arr1[i] > arr2[j]:
arr3[k] = arr2[j]
j= j+1
k =k+1
elif arr1[i] < arr2[j]:
arr3[k] = arr1[i]
i =i+1
k =k+1
else:
arr3[k] = arr2[i]
j= j+1
i =i+1
k =k+1
while i < n1:
arr3[k] = arr1[i]
k =k+1
i =k+1
while j<n2:
arr3[k] = arr1[j]
k =k+1
i =i+1
print(arr3, end = " " )
# Driver code
arr1 = [1, 3, 5, 7,9]
n1 = len(arr1)
arr2 = [2, 4, 6, 8]
n2 = len(arr2)
mergeArrays(arr1, arr2, n1, n2); | true |
91210c24d407cab9a2e323df32cab7e71d39f88f | Python | Incarnation/home_risk_competition_kaggle | /LightGbmOneFold/LightGbmOneFold.py | UTF-8 | 3,566 | 2.765625 | 3 | [] | no_license | # coding:utf-8
import re
import os
import sys
import numpy as np
import pandas as pd
from category_encoders import TargetEncoder
from lightgbm import LGBMClassifier
from sklearn.metrics import roc_auc_score
np.random.seed(7)
class LightGbmOneFold(object):
def __init__(self, *, input_path, output_path):
self.__input_path, self.__output_path = input_path, output_path
# data prepare
self.__sample_submission = None
self.__train, self.__test = [None for _ in range(2)]
self.__train_feature, self.__test_feature = [None for _ in range(2)]
self.__train_label = None
self.__categorical_columns = None
self.__encoder = None
# model fit
self.__folds = None
self.__train_preds = None
self.__test_preds = None
self.__gbm = None
def data_prepare(self):
self.__sample_submission = pd.read_csv(os.path.join(self.__input_path, "sample_submission.csv"))
# selected feature
self.__train = pd.read_csv(
os.path.join(self.__input_path, "train_select_feature_df.csv"))
self.__test = pd.read_csv(
os.path.join(self.__input_path, "test_select_feature_df.csv"))
self.__train_label = self.__train["TARGET"]
self.__train_feature = self.__train.drop(
["TARGET"] + [col for col in self.__train.columns.tolist() if re.search(r"SK_ID", col)], axis=1)
self.__test_feature = self.__test[self.__train_feature.columns.tolist()]
self.__categorical_columns = self.__train_feature.select_dtypes("object").columns.tolist()
self.__encoder = TargetEncoder()
self.__encoder.fit(self.__train_feature.loc[:, self.__categorical_columns], self.__train_label)
self.__train_feature.loc[:, self.__categorical_columns] = (
self.__encoder.transform(self.__train_feature.loc[:, self.__categorical_columns])
)
self.__test_feature.loc[:, self.__categorical_columns] = (
self.__encoder.transform(self.__test_feature.loc[:, self.__categorical_columns])
)
def model_fit(self):
feature_importance_df = pd.DataFrame()
self.__gbm = LGBMClassifier(
n_estimators=5000,
learning_rate=0.0128,
max_depth=8,
num_leaves=11,
min_split_gain=0.0018,
min_child_weight=2.6880,
colsample_bytree=0.5672,
subsample=0.6406,
reg_alpha=3.5025,
reg_lambda=0.9549,
n_jobs=-1
)
self.__gbm.fit(self.__train_feature, self.__train_label, verbose=True)
self.__train_preds = self.__gbm.predict_proba(self.__train_feature)[:, 1]
self.__test_preds = self.__gbm.predict_proba(self.__test_feature)[:, 1]
feature_importance_df["feature"] = pd.Series(self.__train_feature.columns)
feature_importance_df["importance"] = self.__gbm.feature_importances_
feature_importance_df.to_csv(os.path.join(self.__output_path, "feature_importance.csv"), index=False)
print("Train AUC score %.6f" % roc_auc_score(self.__train_label, self.__train_preds))
def model_predict(self):
self.__sample_submission["TARGET"] = self.__test_preds
self.__sample_submission.to_csv(os.path.join(self.__output_path, "sample_submission.csv"), index=False)
if __name__ == "__main__":
lgof = LightGbmOneFold(
input_path=sys.argv[1],
output_path=sys.argv[2]
)
lgof.data_prepare()
lgof.model_fit()
lgof.model_predict() | true |
a03a5db1b46461a8cadfea756fd6779974cc9b6d | Python | MaartenGr/KeyBERT | /keybert/_maxsum.py | UTF-8 | 2,287 | 3.421875 | 3 | [
"MIT"
] | permissive | import numpy as np
import itertools
from sklearn.metrics.pairwise import cosine_similarity
from typing import List, Tuple
def max_sum_distance(
doc_embedding: np.ndarray,
word_embeddings: np.ndarray,
words: List[str],
top_n: int,
nr_candidates: int,
) -> List[Tuple[str, float]]:
"""Calculate Max Sum Distance for extraction of keywords
We take the 2 x top_n most similar words/phrases to the document.
Then, we take all top_n combinations from the 2 x top_n words and
extract the combination that are the least similar to each other
by cosine similarity.
This is O(n^2) and therefore not advised if you use a large `top_n`
Arguments:
doc_embedding: The document embeddings
word_embeddings: The embeddings of the selected candidate keywords/phrases
words: The selected candidate keywords/keyphrases
top_n: The number of keywords/keyhprases to return
nr_candidates: The number of candidates to consider
Returns:
List[Tuple[str, float]]: The selected keywords/keyphrases with their distances
"""
if nr_candidates < top_n:
raise Exception(
"Make sure that the number of candidates exceeds the number "
"of keywords to return."
)
elif top_n > len(words):
return []
# Calculate distances and extract keywords
distances = cosine_similarity(doc_embedding, word_embeddings)
distances_words = cosine_similarity(word_embeddings, word_embeddings)
# Get 2*top_n words as candidates based on cosine similarity
words_idx = list(distances.argsort()[0][-nr_candidates:])
words_vals = [words[index] for index in words_idx]
candidates = distances_words[np.ix_(words_idx, words_idx)]
# Calculate the combination of words that are the least similar to each other
min_sim = 100_000
candidate = None
for combination in itertools.combinations(range(len(words_idx)), top_n):
sim = sum(
[candidates[i][j] for i in combination for j in combination if i != j]
)
if sim < min_sim:
candidate = combination
min_sim = sim
return [
(words_vals[idx], round(float(distances[0][words_idx[idx]]), 4))
for idx in candidate
]
| true |
5193cb544250d1c78a0fc63a44b507c2f6297bd0 | Python | CORE-GATECH-GROUP/hydep | /src/hydep/internal/cram.py | UTF-8 | 6,946 | 2.96875 | 3 | [
"MIT"
] | permissive | """
Shadow of OpenMC CRAM solvers
Inspired and, in some places, copied from OpenMC
Copyright: 2011-2020 Massachusetts Institute of Technology and
OpenMC collaborators
https://docs.openmc.org
https://docs.openmc.org/en/stable/pythonapi/deplete.html
https://github.com/openmc-dev/openmc
"""
import numbers
import numpy
import scipy.sparse
from scipy.sparse.linalg import spsolve
from hydep.typed import IterableOf, TypedAttr
class IPFCramSolver:
r"""CRAM depletion solver that uses incomplete partial factorization
Provides a :meth:`__call__` that utilizes an incomplete
partial factorization (IPF) for the Chebyshev Rational Approximation
Method (CRAM), as described in the following paper: M. Pusa, "`Higher-Order
Chebyshev Rational Approximation Method and Application to Burnup Equations
<https://doi.org/10.13182/NSE15-26>`_," Nucl. Sci. Eng., 182:3, 297-318.
Parameters
----------
alpha : numpy.ndarray
Complex residues of poles used in the factorization. Must be a
vector with even number of items.
theta : numpy.ndarray
Complex poles. Must have an equal size as ``alpha``.
alpha0 : float
Limit of the approximation at infinity
Attributes
----------
alpha : numpy.ndarray
Complex residues of poles :attr:`theta` in the incomplete partial
factorization. Denoted as :math:`\tilde{\alpha}`
theta : numpy.ndarray
Complex poles :math:`\theta` of the rational approximation
alpha0 : float
Limit of the approximation at infinity
"""
alpha = IterableOf("alpha", numbers.Complex)
theta = IterableOf("theta", numbers.Complex)
alpha0 = TypedAttr("alpha0", numbers.Real)
def __init__(self, alpha, theta, alpha0):
self.alpha = numpy.asarray(alpha)
self.theta = numpy.asarray(theta)
if self.alpha.size != self.theta.size:
raise ValueError(
"Input vectors must be of same size. Alpha: {}. "
"Theta: {}".format(self.alpha.size, self.theta.size))
self.alpha0 = alpha0
def __call__(self, A, n0, dt):
"""Solve depletion equations using IPF CRAM
Parameters
----------
A : scipy.sparse.csr_matrix
Sparse transmutation matrix ``A[j, i]`` desribing rates at
which isotope ``i`` transmutes to isotope ``j``
n0 : numpy.ndarray
Initial compositions, typically given in number of atoms in some
material or an atom density
dt : float
Time [s] of the specific interval to be solved
Returns
-------
numpy.ndarray
Final compositions after ``dt``
"""
A = scipy.sparse.csr_matrix(A * dt, dtype=numpy.float64)
y = numpy.asarray(n0, dtype=numpy.float64)
ident = scipy.sparse.eye(A.shape[0])
for alpha, theta in zip(self.alpha, self.theta):
y += 2*numpy.real(alpha*spsolve(A - theta*ident, y))
return y * self.alpha0
# Coefficients for IPF Cram 16
c16_alpha = numpy.array([
+5.464930576870210e+3 - 3.797983575308356e+4j,
+9.045112476907548e+1 - 1.115537522430261e+3j,
+2.344818070467641e+2 - 4.228020157070496e+2j,
+9.453304067358312e+1 - 2.951294291446048e+2j,
+7.283792954673409e+2 - 1.205646080220011e+5j,
+3.648229059594851e+1 - 1.155509621409682e+2j,
+2.547321630156819e+1 - 2.639500283021502e+1j,
+2.394538338734709e+1 - 5.650522971778156e+0j],
dtype=numpy.complex128)
c16_theta = numpy.array([
+3.509103608414918 + 8.436198985884374j,
+5.948152268951177 + 3.587457362018322j,
-5.264971343442647 + 16.22022147316793j,
+1.419375897185666 + 10.92536348449672j,
+6.416177699099435 + 1.194122393370139j,
+4.993174737717997 + 5.996881713603942j,
-1.413928462488886 + 13.49772569889275j,
-10.84391707869699 + 19.27744616718165j],
dtype=numpy.complex128)
c16_alpha0 = 2.124853710495224e-16
Cram16Solver = IPFCramSolver(c16_alpha, c16_theta, c16_alpha0)
del c16_alpha, c16_alpha0, c16_theta
# Coefficients for 48th order IPF Cram
theta_r = numpy.array([
-4.465731934165702e+1, -5.284616241568964e+0,
-8.867715667624458e+0, +3.493013124279215e+0,
+1.564102508858634e+1, +1.742097597385893e+1,
-2.834466755180654e+1, +1.661569367939544e+1,
+8.011836167974721e+0, -2.056267541998229e+0,
+1.449208170441839e+1, +1.853807176907916e+1,
+9.932562704505182e+0, -2.244223871767187e+1,
+8.590014121680897e-1, -1.286192925744479e+1,
+1.164596909542055e+1, +1.806076684783089e+1,
+5.870672154659249e+0, -3.542938819659747e+1,
+1.901323489060250e+1, +1.885508331552577e+1,
-1.734689708174982e+1, +1.316284237125190e+1])
theta_i = numpy.array([
+6.233225190695437e+1, +4.057499381311059e+1,
+4.325515754166724e+1, +3.281615453173585e+1,
+1.558061616372237e+1, +1.076629305714420e+1,
+5.492841024648724e+1, +1.316994930024688e+1,
+2.780232111309410e+1, +3.794824788914354e+1,
+1.799988210051809e+1, +5.974332563100539e+0,
+2.532823409972962e+1, +5.179633600312162e+1,
+3.536456194294350e+1, +4.600304902833652e+1,
+2.287153304140217e+1, +8.368200580099821e+0,
+3.029700159040121e+1, +5.834381701800013e+1,
+1.194282058271408e+0, +3.583428564427879e+0,
+4.883941101108207e+1, +2.042951874827759e+1])
c48_theta = numpy.array(theta_r + theta_i * 1j, dtype=numpy.complex128)
alpha_r = numpy.array([
+6.387380733878774e+2, +1.909896179065730e+2,
+4.236195226571914e+2, +4.645770595258726e+2,
+7.765163276752433e+2, +1.907115136768522e+3,
+2.909892685603256e+3, +1.944772206620450e+2,
+1.382799786972332e+5, +5.628442079602433e+3,
+2.151681283794220e+2, +1.324720240514420e+3,
+1.617548476343347e+4, +1.112729040439685e+2,
+1.074624783191125e+2, +8.835727765158191e+1,
+9.354078136054179e+1, +9.418142823531573e+1,
+1.040012390717851e+2, +6.861882624343235e+1,
+8.766654491283722e+1, +1.056007619389650e+2,
+7.738987569039419e+1, +1.041366366475571e+2])
alpha_i = numpy.array([
-6.743912502859256e+2, -3.973203432721332e+2,
-2.041233768918671e+3, -1.652917287299683e+3,
-1.783617639907328e+4, -5.887068595142284e+4,
-9.953255345514560e+3, -1.427131226068449e+3,
-3.256885197214938e+6, -2.924284515884309e+4,
-1.121774011188224e+3, -6.370088443140973e+4,
-1.008798413156542e+6, -8.837109731680418e+1,
-1.457246116408180e+2, -6.388286188419360e+1,
-2.195424319460237e+2, -6.719055740098035e+2,
-1.693747595553868e+2, -1.177598523430493e+1,
-4.596464999363902e+3, -1.738294585524067e+3,
-4.311715386228984e+1, -2.777743732451969e+2])
c48_alpha = numpy.array(alpha_r + alpha_i * 1j, dtype=numpy.complex128)
c48_alpha0 = 2.258038182743983e-47
Cram48Solver = IPFCramSolver(c48_alpha, c48_theta, c48_alpha0)
del c48_alpha, c48_alpha0, c48_theta, alpha_r, alpha_i, theta_r, theta_i
| true |
40313fa1029d5146bd1db0dd8857c60f6f5583d8 | Python | chinitacode/Python_Learning | /Concurrent_Programming/multhread/Daemon/test_daemon.py | UTF-8 | 1,865 | 4.09375 | 4 | [] | no_license | '''
如果你设置一个线程为守护线程,,就表示你在说这个线程是不重要的,在进程退出的时候,不用等待这个线程退出。
如果你的主线程在退出的时候,不用等待那些子线程完成,那就设置这些线程的daemon属性。
即,在线程开始(thread.start())之前,调用setDeamon()函数,设定线程的daemon标志。
(thread.setDaemon(True))就表示这个线程“不重要”。
如果你想等待子线程完成再退出,那就什么都不用做。
或者显示地调用thread.setDaemon(False),设置daemon的值为false。
新的子线程会继承父线程的daemon标志。
主线程会在所有的非守护线程退出后才会结束,即进程中没有非守护线程存在的时候才结束。
'''
from threading import Thread
import time
def hello(name):
print('%s says hello.' % name)
time.sleep(2)
print('Thread ended.')
if __name__ == '__main__':
# 不设置为守护线程的情况:
# print('主线程')
# t = Thread(target = hello, args = ('Nick',))
# t.start()
# #time.sleep(3)
# print(t.is_alive())
# print('主线程结束')
'''
【输出结果】:
主线程
Nick says hello.
True
主线程结束
Thread ended.
'''
# 开启守护线程
print('主线程')
t1 = Thread(target = hello, args = ('Nick',), daemon = True) # 等同于单独设置t1.setDaemon(True)
t1.start()
print(t1.is_alive())
print('主线程结束')
'''
【输出结果】:
主线程
Nick says hello.
True
主线程结束
【Note】:
可看出设置为了守护线程后,线程虽然未运行完毕,但是在主线程结束后也就结束了。
并未来得及打印'Thread ended.'。
'''
| true |
84f6519b65e3978b5a68e9e5f14b3d7873701555 | Python | catrina-brunkow/RA_Boyce_Astro | /tool/forms.py | UTF-8 | 2,507 | 2.75 | 3 | [] | no_license | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, IntegerField, SelectField, FloatField, RadioField
from wtforms.validators import Optional, NumberRange, Regexp
class QueryForm(FlaskForm):
'''
QueryForm class
Used to generate a form with various fields that work as filters for searching the WDS-GAIA dataset
Prevents users from entering invalid number ranges
Also uses a regex to validate the wds_name field
'''
# Wds_name: ex: '12345+1234'
WDS_name = StringField('WDS_name', validators=[Optional(), Regexp('[0-9]{5}[+,-]{1}[0-9]{4}', message='Invalid format.')])
min_mag = FloatField('Min primary component magnitude', validators=[Optional(), NumberRange(0, 50, message='Mag invalid')])
max_mag = FloatField('Max primary component magnitude', validators=[Optional(), NumberRange(0, 50, message='Mag invalid')])
# ex: 120000 (HHMMSS)
min_ra = FloatField('Min RA', validators=[Optional(), NumberRange(0, 240000, message='RA invalid')])
max_ra = FloatField('Max RA', validators=[Optional(), NumberRange(0, 240000, message='RA invalid')])
# ex: -75
min_dec = FloatField('Min DEC', validators=[Optional(), NumberRange(-90, 90, message='Dec invalid')])
max_dec = FloatField('Max DEC', validators=[Optional(), NumberRange(-90, 90, message='Dec invalid')])
# ex: 8
min_sep = FloatField('Min Separation', validators=[Optional(), NumberRange(0, 100, message='Sep invalid')])
max_sep = FloatField('Max Separation', validators=[Optional(), NumberRange(0, 100, message='Sep invalid')])
max_delta_mag = FloatField('Max Delta Mag', validators=[Optional(), NumberRange(0, 12, message='Delta Mag invalid')])
# change all nobs to max_nobs and add a min_nobs
min_nobs = FloatField('Minimum Number Observations', validators=[Optional(), NumberRange(0,100, message ='Number observations invalid')])
nobs = FloatField('Number Observations', validators=[Optional(), NumberRange(0,100, message='Number observations invalid')])
last_obs = FloatField('Last Observation', validators=[Optional(), NumberRange(1782, 2030, message='Last observation invalid')])
# add delta separation min and max
min_d_sep = FloatField('Minimum Delta Separation', validators=[Optional(), NumberRange(-10,10, message='Delta separation invalid')])
max_d_sep = FloatField('Maximum Delta Separation', validators=[Optional(), NumberRange(-10,10, message='Delta separation invalid')])
submit = SubmitField('Search')
| true |
a84b8449fcb995a9606f56cd782e8e07c4c91e7b | Python | IdrisovRI/python_test_application_backend | /helper/validate.py | UTF-8 | 248 | 2.890625 | 3 | [] | no_license | class PackageDataValidator(object):
@staticmethod
async def validate(package: str):
result = False
if "Symbol" in package and "Bid" in package and "Ask" in package:
result = True
return result
| true |
a374ae6b0f0d910c0d009ff9527102d2a844629b | Python | LuckyTiger66/py-scraping-analysis-book | /ch5/sqlite.py | UTF-8 | 1,334 | 3.1875 | 3 | [
"MIT"
] | permissive | import sqlite3
import csv
def execute_db(fname, sql_cmd):
conn = sqlite3.connect(fname)
c = conn.cursor()
c.execute(sql_cmd)
conn.commit()
conn.close()
def select_db(fname, sql_cmd):
conn = sqlite3.connect(fname)
c = conn.cursor()
c.execute(sql_cmd)
rows = c.fetchall()
conn.close()
return rows
if __name__ == '__main__':
db_name = 'db.sqlite'
print('建立資料庫及資料表')
cmd = 'CREATE TABLE record (id INTEGER PRIMARY KEY AUTOINCREMENT, item TEXT, price INTEGER, shop TEXT)'
execute_db(db_name, cmd)
print('插入測試資料')
cmd = 'INSERT INTO record (item, price, shop) VALUES ("PS4測試機", 1000, "測試賣家")'
execute_db(db_name, cmd)
print('更新資料')
cmd = 'UPDATE record SET shop="EZ賣家" where shop="測試賣家"'
execute_db(db_name, cmd)
print('插入多筆資料')
with open('ezprice.csv', 'r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
cmd = 'INSERT INTO record (item, price, shop) VALUES ("%s", %d, "%s")' % (row['品項'], int(row['價格']), row['商家'])
execute_db(db_name, cmd)
print('選擇資料')
cmd = 'SELECT * FROM record WHERE shop="friDay購物"'
for row in select_db(db_name, cmd):
print(row)
| true |
d7bbb36fd22d0cdbb9faffdd04f7206c16448f8e | Python | pluemthnn/ITCS451-AI | /assign1/checkpoint1.py | UTF-8 | 1,462 | 3.21875 | 3 | [] | no_license | import numpy as np
from hw1.envutil import gen_maze, render_maze
from hw1.pathfinding import MazeState, TreeNode
from hw1.pqueue import SimplePriorityQueue
maze1 = gen_maze(10, add_mud=True)
state1 = MazeState(maze1)
print(render_maze(maze1))
# try to perform action:
for action in state1.actions:
print('-' * 50)
print('ACTION: ', action)
state2 = MazeState.transition(state1, action)
cost = MazeState.cost(state1, action)
if state2 is None:
print('Impossible!')
else:
print(render_maze(state2.grid))
print('Cost: ', cost)
print('=' * 50)
# try random actions for a while:
cur_state = state1
print(render_maze(cur_state.grid))
for __ in range(10):
action = np.random.choice(cur_state.actions)
print('-' * 50)
print('ACTION: ', action)
next_state = MazeState.transition(cur_state, action)
cost = MazeState.cost(cur_state, action)
if next_state is None:
print('Impossible!')
else:
print(render_maze(next_state.grid))
cur_state = next_state
print('Cost: ', cost)
# check goal
print('=' * 50)
y = np.array(maze1)
y[1, 1] = 0
y[-2, -2] = 3
goal_state = MazeState(y)
print('Start state is goal?', MazeState.is_goal(state1))
print(render_maze(goal_state.grid))
print('Goal state is goal?', MazeState.is_goal(goal_state))
# check heuristics
print('=' * 50)
print('Heristics:', MazeState.heuristic(state1))
print('Heristics:', MazeState.heuristic(goal_state)) | true |
d09b14129c9aca67d2611b6ff8ca0223aaae381a | Python | deangroom/Demo-JSON | /main.py | UTF-8 | 543 | 3.046875 | 3 | [] | no_license | import json
with open('Stats.json') as stats:
#data = json.load(stats)
print(data)
Strength = input("How much strength? ")
data["Strength: "] = Strength
Constitution = input("How much constituion? ")
data["Constitution: "] = Constitution
Dexterity = input("How much Dexterity? ")
data["Dexterity: "] = Dexterity
Intelligence = input("How much Intelligence? ")
data["Intelligence: "] = Intelligence
Wisdom = input("How much Wisdom? ")
data["Wisdom: "] = Wisdom
Charisma = input("How much Charisma? ")
data["Charisma: "] = Charisma
print(data) | true |
a7cf226615af2e28fac60db046d5b8d5c17cda41 | Python | FionnMarf/ProjectEuler | /Euler003.py | UTF-8 | 1,069 | 3.6875 | 4 | [] | no_license | import math as math
#Need a reasonably efficient (sub exponential?) factorization algorithm for the largest prime factor
#ideas
#begin at sqrt(n) and work backwards until a prime factor is found (still exponential)
def factorize(n):
#finished = False
new = math.floor(math.sqrt(n))
print new
i = 3
primelist = []
while (i <= new):
if n%i == 0:
if checkprime(i):
primelist.append(i)
print i
i += 2
elif i >= new:
return primelist
else:
i += 2
#print i
def checkprime(p): #composites made up of 2 and 3 are excluded
if p%6 == 5 or p%6 == 1:
stop = math.floor(math.sqrt(p))
i = 5
while i <= stop:
if p%i == 0 or p%(i + 2) == 0:
return False
else:
i += 6
return True
else:
return False
if __name__ == "__main__":
#print factorize(600851475143)
print factorize(3646740004375894379)
| true |
99c426825645e581d9cb83d07fadccc5bdf954a8 | Python | RasaHQ/rasa | /rasa/core/evaluation/marker_tracker_loader.py | UTF-8 | 3,658 | 2.796875 | 3 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"MIT"
] | permissive | import random
from rasa.shared.exceptions import RasaException
from rasa.shared.core.trackers import DialogueStateTracker
from typing import Any, Iterable, List, Text, Optional, AsyncIterator
from rasa.core.tracker_store import TrackerStore
import rasa.shared.utils.io
STRATEGY_ALL = "all"
STRATEGY_FIRST_N = "first_n"
STRATEGY_SAMPLE_N = "sample_n"
def strategy_all(keys: List[Text], count: int) -> Iterable[Text]:
"""Selects all keys from the set of keys."""
return keys
def strategy_first_n(keys: List[Text], count: int) -> Iterable[Text]:
"""Takes the first N keys from the set of keys."""
return keys[:count]
def strategy_sample_n(keys: List[Text], count: int) -> Iterable[Text]:
"""Samples N unique keys from the set of keys."""
return random.sample(keys, k=count)
class MarkerTrackerLoader:
"""Represents a wrapper over a `TrackerStore` with a configurable access pattern."""
_STRATEGY_MAP = {
"all": strategy_all,
"first_n": strategy_first_n,
"sample_n": strategy_sample_n,
}
def __init__(
self,
tracker_store: TrackerStore,
strategy: str,
count: int = None,
seed: Any = None,
) -> None:
"""Creates a MarkerTrackerLoader.
Args:
tracker_store: The underlying tracker store to access.
strategy: The strategy to use for selecting trackers,
can be 'all', 'sample_n', or 'first_n'.
count: Number of trackers to return, can only be None if strategy is 'all'.
seed: Optional seed to set up random number generator,
only useful if strategy is 'sample_n'.
"""
self.tracker_store = tracker_store
if strategy not in MarkerTrackerLoader._STRATEGY_MAP:
raise RasaException(
f"Invalid strategy for loading markers - '{strategy}' was given, \
options 'all', 'sample_n', or 'first_n' exist."
)
self.strategy = MarkerTrackerLoader._STRATEGY_MAP[strategy]
if strategy != STRATEGY_ALL:
if not count:
raise RasaException(
f"Desired tracker count must be given for strategy '{strategy}'."
)
if count < 1:
# If count is ever < 1, user has an error, so issue exception
raise RasaException("Parameter 'count' must be greater than 0.")
self.count = count
if count and strategy == STRATEGY_ALL:
rasa.shared.utils.io.raise_warning(
"Parameter 'count' is ignored by strategy 'all'."
)
self.count = None
if seed:
if strategy == STRATEGY_SAMPLE_N:
random.seed(seed)
else:
rasa.shared.utils.io.raise_warning(
f"Parameter 'seed' is ignored by strategy '{strategy}'."
)
async def load(self) -> AsyncIterator[Optional[DialogueStateTracker]]:
"""Loads trackers according to strategy."""
stored_keys = list(await self.tracker_store.keys())
if self.count is not None and self.count > len(stored_keys):
# Warn here as user may have overestimated size of data set
rasa.shared.utils.io.raise_warning(
"'count' exceeds number of trackers in the store -\
all trackers will be processed."
)
self.count = len(stored_keys)
keys = self.strategy(stored_keys, self.count)
for sender in keys:
yield await self.tracker_store.retrieve_full_tracker(sender)
| true |
cf1a659cf8fa34bb193b6eaece5f58043f417497 | Python | wgolling/BreathOfFire3Notes | /scripts/objects/test_datatracker.py | UTF-8 | 15,621 | 2.921875 | 3 | [] | no_license | from datatracker import *
import unittest
#
#
# Enums
class TestCharacter(unittest.TestCase):
def test_character_enum(self):
assert(Character)
assert(len(list(Character)) == 7)
class TestSkillInk(unittest.TestCase):
def test_skill_ink_enum(self):
assert(SkillInk)
assert(len(list(SkillInk)) == 4)
class TestZenny(unittest.TestCase):
def test_zenny_enum(self):
assert(Zenny)
assert(len(list(Zenny)) == 6)
class TestWeapon(unittest.TestCase):
def test_weapon_enum(self):
assert(Weapon)
assert(len(list(Weapon)) == 16)
#
#
# Helper methods
class TestHelperMethods(unittest.TestCase):
#
# Add key/value to dict
def test_add_key_value_to_dict(self):
d = {"buh": 1, DAGGER: 2}
add_key_value_to_dict(d, 7, 8)
add_key_value_to_dict(d, DAGGER, 1)
assert(d == {"buh": 1, DAGGER: 3, 7: 8})
def test_add_key_value_to_dict_wrong_dict_type(self):
with self.assertRaises(TypeError):
add_dicts(0, 0, 0)
def test_add_key_value_to_dict_wrong_value_type(self):
with self.assertRaises(TypeError):
add_dicts(dict(), 0, "buh")
#
# Add dicts
def test_add_dicts(self):
d1 = {"buh": 1, DAGGER: 2}
d2 = {DAGGER: 1, 7: 8}
d3 = add_dicts(d1, d2)
assert(d3 == {"buh": 1, DAGGER: 3, 7: 8})
def test_add_dicts_wrong_type(self):
with self.assertRaises(TypeError):
add_dicts(0, dict())
with self.assertRaises(TypeError):
add_dicts(dict(), 0)
#
#
# DataTracker
class TestConstructor(unittest.TestCase):
def test_constructor(self):
dt = DataTracker()
assert(dt)
class TestStaticFields(unittest.TestCase):
def test_starting_levels(self):
levels = DataTracker.STARTING_LEVELS
assert(levels[RYU] == 1)
assert(levels[REI] == 5)
assert(levels[TEEPO] == 1)
assert(levels[NINA] == 5)
assert(levels[MOMO] == 10)
assert(levels[PECO] == 1)
assert(levels[GARR] == 13)
def test_weapon_requirements(self):
reqs = DataTracker.WEAPON_REQUIREMENTS
for w in Weapon:
if w in [DAGGER, BALLOCK_KNIFE, BENT_SWORD, POINTED_STICK]:
assert(reqs[w] == 2)
else:
assert(reqs[w] == 1)
class TestCharacterInterface(unittest.TestCase):
def setUp(self):
self.dt = DataTracker()
def test_get_party_levels(self):
pl = self.dt.get_party_levels()
for c in list(Character):
assert(c in pl)
assert(pl[c] == DataTracker.STARTING_LEVELS[c])
def test_gain_character(self):
self.dt.gain_character(NINA)
assert(NINA in self.dt.get_party())
assert(self.dt.get_party_levels()[NINA] == 5)
def test_gain_duplicate_character(self):
with self.assertRaises(KeyError):
self.dt.gain_character(RYU)
def test_gain_character_wrong_type(self):
with self.assertRaises(TypeError):
self.dt.gain_character("buh")
def test_lose_character(self):
self.dt.level_up(RYU)
self.dt.lose_character(RYU)
assert(RYU not in self.dt.get_party())
assert(self.dt.get_party_levels()[RYU] == 2)
self.dt.gain_character(RYU)
assert(RYU in self.dt.get_party())
assert(self.dt.get_party_levels()[RYU] == 2)
def test_lose_character_wrong_type(self):
with self.assertRaises(TypeError):
self.dt.lose_character("buh")
def test_lose_character_missing(self):
with self.assertRaises(KeyError):
self.dt.lose_character(NINA)
def test_level_up(self):
self.dt.level_up(RYU)
assert(self.dt.get_party_levels()[RYU] == 2)
self.dt.level_up(RYU, levels=4)
assert(self.dt.get_party_levels()[RYU] == 6)
def test_level_up_wrong_type(self):
with self.assertRaises(TypeError):
self.dt.level_up("buh")
def test_level_up_missing_character(self):
with self.assertRaises(KeyError):
self.dt.level_up(NINA)
def test_level_up_nonpositive_level(self):
with self.assertRaises(ValueError):
self.dt.level_up(RYU, levels=0)
class TestSkillInkInterface(unittest.TestCase):
def setUp(self):
self.dt = DataTracker()
def test_pick_up_skill_ink(self):
self.dt.pick_up_skill_ink()
current_pick_up = self.dt.get_current(SkillInk.PICK_UP)
assert(current_pick_up == 1)
def test_buy_skill_ink(self):
self.dt.buy_skill_ink()
current_buy = self.dt.get_current(SkillInk.BUY)
assert(current_buy == 1)
def test_buy_skill_ink_nonpositive_amount(self):
with self.assertRaises(ValueError):
self.dt.buy_skill_ink(amt=0)
def test_use_skill_ink(self):
self.dt.use_skill_ink()
current_use = self.dt.get_current(SkillInk.USE)
assert(current_use == 1)
def test_current_skill_ink(self):
self.dt.pick_up_skill_ink()
self.dt.buy_skill_ink()
self.dt.use_skill_ink()
current_sk = self.dt.get_current(SkillInk.CURRENT)
assert(current_sk == 1)
class TestZennyInterface(unittest.TestCase):
def setUp(self):
self.dt = DataTracker()
def test_pick_up_zenny(self):
self.dt.pick_up_zenny(100)
self.dt.pick_up_zenny(50)
current_zenny = self.dt.get_current_raw(Zenny.PICK_UP)
assert(len(current_zenny) == 2)
assert(current_zenny[0] == 100)
assert(current_zenny[1] == 50)
def test_pick_up_zenny_nonpositive_amount(self):
with self.assertRaises(ValueError):
self.dt.pick_up_zenny(amt=0)
def test_boss_drop_zenny(self):
self.dt.boss_drop_zenny(100)
self.dt.boss_drop_zenny(50)
current_zenny = self.dt.get_current_raw(Zenny.BOSS_DROP)
assert(len(current_zenny) == 2)
assert(current_zenny[0] == 100)
assert(current_zenny[1] == 50)
assert(current_zenny == [100, 50])
def test_boss_drop_zenny_nonpositive_amount(self):
with self.assertRaises(ValueError):
self.dt.boss_drop_zenny(amt=0)
def test_sell(self):
self.dt.sell(100)
self.dt.sell(50)
current_zenny = self.dt.get_current_raw(Zenny.SALES)
assert(len(current_zenny) == 2)
assert(current_zenny[0] == 100)
assert(current_zenny[1] == 50)
def test_sell_nonpositive_amount(self):
with self.assertRaises(ValueError):
self.dt.sell(amt=0)
def test_buy(self):
self.dt.buy(100)
self.dt.buy(50)
current_zenny = self.dt.get_current_raw(Zenny.BUY)
assert(len(current_zenny) == 2)
assert(current_zenny[0] == 100)
assert(current_zenny[1] == 50)
def test_buy_nonpositive_amount(self):
with self.assertRaises(ValueError):
self.dt.buy(amt=0)
def test_set_current_zenny(self):
self.dt.set_current_zenny(100)
current_zenny = self.dt.get_current(Zenny.CURRENT)
assert(current_zenny == 100)
self.dt.set_current_zenny(50)
current_zenny = self.dt.get_current(Zenny.CURRENT)
assert(current_zenny == 50)
def test_set_current_zenny(self):
with self.assertRaises(ValueError):
self.dt.set_current_zenny(amt=-1)
def test_get_enemy_drop(self):
self.dt.pick_up_zenny(100) # total 100
self.dt.boss_drop_zenny(50) # total 150
self.dt.sell(5) # total 155
self.dt.buy(75) # total 80
self.dt.set_current_zenny(100)
enemy_drops = self.dt.get_current(Zenny.ENEMY_DROP)
assert(enemy_drops == 20)
class TestWeaponInterface(unittest.TestCase):
def setUp(self):
self.dt = DataTracker()
def test_pick_up_weapon(self):
dt = self.dt
dt.pick_up_weapon(DAGGER)
weapons = dt.get_weapons()
assert(weapons[DAGGER] == 2)
def test_pick_up_weapon_wrong_type(self):
with self.assertRaises(TypeError):
self.dt.pick_up_weapon("buh")
def test_buy_weapon(self):
dt = self.dt
dt.buy_weapon(DAGGER, 50)
weapons = dt.get_weapons()
assert(weapons[DAGGER] == 2)
assert(dt.get_current(Zenny.BUY) == 50)
assert(dt.get_current_raw(Zenny.BUY) == [50])
def test_buy_weapon_wrong_type(self):
with self.assertRaises(TypeError):
self.dt.buy_weapon("buh")
def test_buy_weapon_nonpositive_amount(self):
with self.assertRaises(ValueError):
self.dt.buy_weapon(DAGGER, cost=0)
def test_have_all_weapons(self):
dt = self.dt
assert(not dt.have_all_weapons())
for w in list(Weapon):
assert(not dt.have_all_weapons())
while dt.get_weapons()[w] < DataTracker.WEAPON_REQUIREMENTS[w]:
dt.pick_up_weapon(w)
assert(dt.have_all_weapons())
class TestSplitting(unittest.TestCase):
def setUp(self):
self.dt = DataTracker()
def test_number_of_splits(self):
for x in range(10):
assert(self.dt.number_of_splits() == x)
self.dt.split(str(x), 0)
assert(self.dt.number_of_splits() == 10)
def test_split_name(self):
dt = self.dt
dt.split("Test split", 0)
assert(dt.get_name(0) == "Test split")
def test_party_levels(self):
dt = self.dt
assert(RYU in dt.current_entry.party)
dt.level_up(RYU)
assert(dt.get_party_levels()[RYU] == 2)
dt.split("Level Up Ryu", 0)
assert(RYU in dt.current_entry.party)
assert(dt.get_party_levels()[RYU] == 2)
dt.level_up(RYU)
assert(dt.get_party_levels()[RYU] == 3)
#
# Test Skill Ink
def skill_ink_helper(self, cc=0, tc=0, cp=0, tp=0, cb=0, tb=0, cu=0, tu=0):
assert(self.dt.get_current(SkillInk.CURRENT) == cc)
assert(self.dt.get_total(SkillInk.CURRENT) == tc)
assert(self.dt.get_current(SkillInk.PICK_UP) == cp)
assert(self.dt.get_total(SkillInk.PICK_UP) == tp)
assert(self.dt.get_current(SkillInk.BUY) == cb)
assert(self.dt.get_total(SkillInk.BUY) == tb)
assert(self.dt.get_current(SkillInk.USE) == cu)
assert(self.dt.get_total(SkillInk.USE) == tu)
def skill_ink_helper_with_split(self, split, gc=0, tc=0, gp=0, tp=0, gb=0, tb=0, gu=0, tu=0):
assert(self.dt.get_gain(SkillInk.CURRENT, split) == gc)
assert(self.dt.get_total(SkillInk.CURRENT, split) == tc)
assert(self.dt.get_gain(SkillInk.PICK_UP, split) == gp)
assert(self.dt.get_total(SkillInk.PICK_UP, split) == tp)
assert(self.dt.get_gain(SkillInk.BUY, split) == gb)
assert(self.dt.get_total(SkillInk.BUY, split) == tb)
assert(self.dt.get_gain(SkillInk.USE, split) == gu)
assert(self.dt.get_total(SkillInk.USE, split) == tu)
def test_skill_ink(self):
dt = self.dt
# Check default state.
self.skill_ink_helper()
# Pick up a skill ink.
dt.pick_up_skill_ink()
self.skill_ink_helper(cc=1, tc=1, cp=1, tp=1)
# Split 1.
dt.split("Pick Up Skill Ink", 0)
self.skill_ink_helper(tc=1, tp=1)
# Buy skill ink
dt.buy_skill_ink()
self.skill_ink_helper(cc=1, tc=2, tp=1, cb=1, tb=1)
# Split 2
dt.split("Buy Skill Ink", 0)
self.skill_ink_helper(tc=2, tp=1, tb=1)
# Use skill ink
dt.use_skill_ink()
self.skill_ink_helper(cc=-1, tc=1, tp=1, tb=1, cu=1, tu=1)
# Split
dt.split("Use Skill Ink", 0)
self.skill_ink_helper(tc=1, tp=1, tb=1, tu=1)
# Check previous splits
self.skill_ink_helper_with_split(0, gc=1, tc=1, gp=1, tp=1)
self.skill_ink_helper_with_split(1, gc=1, tc=2, tp=1, gb=1, tb=1)
self.skill_ink_helper_with_split(2, gc=-1, tc=1, tp=1, tb=1, gu=1, tu=1)
#
# Test Zenny
def test_zenny(self):
dt = self.dt
dt.pick_up_zenny(100)
dt.pick_up_zenny(50) # 150
dt.boss_drop_zenny(500) # 650
dt.sell(120)
dt.sell(50) # 820
dt.buy(600) # 220
dt.split("Pick up zenny", 250)
assert(dt.get_gain_raw(Zenny.PICK_UP , 0) == [100, 50])
assert(dt.get_total(Zenny.PICK_UP , 0) == 150)
assert(dt.get_gain_raw(Zenny.BOSS_DROP , 0) == [500])
assert(dt.get_total(Zenny.BOSS_DROP , 0) == 500)
assert(dt.get_gain_raw(Zenny.SALES , 0) == [120, 50])
assert(dt.get_total(Zenny.SALES , 0) == 170)
assert(dt.get_gain_raw(Zenny.BUY , 0) == [600])
assert(dt.get_total(Zenny.BUY , 0) == 600)
assert(dt.get_gain_raw(Zenny.CURRENT , 0) == 250)
assert(dt.get_total(Zenny.CURRENT , 0) == 250)
assert(dt.get_gain_raw(Zenny.ENEMY_DROP , 0) == 30)
assert(dt.get_total(Zenny.ENEMY_DROP, 0) == 30)
assert(dt.get_current_raw(Zenny.PICK_UP ) == [])
assert(dt.get_total(Zenny.PICK_UP , 1) == 150)
assert(dt.get_current_raw(Zenny.BOSS_DROP ) == [])
assert(dt.get_total(Zenny.BOSS_DROP , 1) == 500)
assert(dt.get_current_raw(Zenny.SALES ) == [])
assert(dt.get_total(Zenny.SALES , 1) == 170)
assert(dt.get_current_raw(Zenny.BUY ) == [])
assert(dt.get_total(Zenny.BUY , 1) == 600)
assert(dt.get_current_raw(Zenny.CURRENT ) == 0)
assert(dt.get_total(Zenny.CURRENT , 1) == 250)
assert(dt.get_current_raw(Zenny.ENEMY_DROP ) == 0)
assert(dt.get_total(Zenny.ENEMY_DROP, 1) == 30)
dt.split("Another split", 250)
assert(dt.get_gain_raw(Zenny.PICK_UP , 1) == [])
assert(dt.get_total(Zenny.PICK_UP , 1) == 150)
assert(dt.get_gain_raw(Zenny.BOSS_DROP , 1) == [])
assert(dt.get_total(Zenny.BOSS_DROP , 1) == 500)
assert(dt.get_gain_raw(Zenny.SALES , 1) == [])
assert(dt.get_total(Zenny.SALES , 1) == 170)
assert(dt.get_gain_raw(Zenny.BUY , 1) == [])
assert(dt.get_total(Zenny.BUY , 1) == 600)
assert(dt.get_gain_raw(Zenny.CURRENT , 1) == 0)
assert(dt.get_total(Zenny.CURRENT , 1) == 250)
assert(dt.get_gain_raw(Zenny.ENEMY_DROP , 1) == 0)
assert(dt.get_total(Zenny.ENEMY_DROP, 1) == 30)
#
# Test Weapons
def test_weapons(self):
dt = self.dt
dt.pick_up_weapon(DAGGER)
dt.buy_weapon(BALLOCK_KNIFE, 50)
dt.split("Get stuff", 0)
dt.pick_up_weapon(DAGGER)
weapons = dt.get_weapons()
assert(weapons[DAGGER] == 3)
assert(weapons[BALLOCK_KNIFE] == 1)
assert(dt.get_total(Zenny.BUY) == 50)
class TestGetterMethodErrors(unittest.TestCase):
def setUp(self):
self.dt = DataTracker()
#
# Helper methods
def out_of_bounds(self, function):
with self.assertRaises(IndexError):
function(-1)
with self.assertRaises(IndexError):
function(0)
self.dt.split("Test split", 0)
with self.assertRaises(IndexError):
function(1)
def wrong_key_type(self, function):
with self.assertRaises(KeyError):
function("buh")
#
# Split name
def test_split_name_out_of_bounds(self):
self.out_of_bounds(self.dt.get_name)
#
# Party levels
def test_party_levels_out_of_bounds(self):
self.out_of_bounds(lambda split: self.dt.get_party_levels(split=split))
#
# Weapons
def test_get_weapons_out_of_bounds(self):
self.out_of_bounds(lambda split: self.dt.get_weapons(split=split))
#
# Gain
def test_get_gain_out_of_bounds(self):
self.out_of_bounds(lambda split: self.dt.get_gain(Zenny.PICK_UP, split))
def test_get_gain_wrong_type(self):
self.dt.split("Test", 0)
self.wrong_key_type(lambda att: self.dt.get_gain(att, 0))
#
# Total
def test_get_total_out_of_bounds(self):
with self.assertRaises(IndexError):
self.dt.get_total(Zenny.PICK_UP, -1)
with self.assertRaises(IndexError):
self.dt.get_total(Zenny.PICK_UP, 1)
self.dt.split("Test split", 0)
with self.assertRaises(IndexError):
self.dt.get_total(Zenny.PICK_UP, 2)
def test_get_total_wrong_type(self):
self.wrong_key_type(lambda att: self.dt.get_total(att, 0))
#
# Current
def test_get_current_wrong_type(self):
self.wrong_key_type(self.dt.get_current)
#
# Strings
def test_get_strings(self):
dt = self.dt
dt.gain_character(NINA)
dt.level_up(RYU)
dt.split("First split", 0)
strings = dt.get_strings()
gains = strings[0]['party_levels']['gain']
assert(gains[RYU] == '2')
if __name__ == "__main__":
unittest.main()
| true |
b6f9ca285f6f99f1f5f18a9d30afd3e3908340fd | Python | geometer/sandbox | /python/sandbox/rules/abstract.py | UTF-8 | 2,759 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | class AbstractRule:
@classmethod
def priority(clazz):
return 2
class SyntheticPropertyRule(AbstractRule):
__instance = None
@staticmethod
def instance():
if SyntheticPropertyRule.__instance is None:
SyntheticPropertyRule.__instance = SyntheticPropertyRule()
return SyntheticPropertyRule.__instance
class PredefinedPropertyRule(AbstractRule):
__instance = None
@staticmethod
def instance():
if PredefinedPropertyRule.__instance is None:
PredefinedPropertyRule.__instance = PredefinedPropertyRule()
return PredefinedPropertyRule.__instance
@classmethod
def priority(clazz):
return 0.5
class Rule(AbstractRule):
def __init__(self, context):
self.context = context
def accepts(self, src):
return True
def generate(self):
for src in self.sources():
if self.accepts(src):
for reason in self.apply(src):
yield reason
class source_type:
def __init__(self, property_type):
from ..property import Property
assert issubclass(property_type, Property), 'Source type must be subclass of Property'
self.property_type = property_type
def __call__(self, clazz):
assert not hasattr(clazz, 'sources'), 'Cannot use @%s on class with sources() method' % type(self).__name__
return type(
clazz.__name__,
(clazz,),
{'sources': lambda inst: inst.context.list(self.property_type)}
)
class source_types:
def __init__(self, *property_types):
from ..property import Property
assert all(issubclass(t, Property) for t in property_types), 'Source type must be subclass of Property'
self.property_types = property_types
def sources(self, inst):
full = []
for t in self.property_types:
full += inst.context.list(t)
return full
def __call__(self, clazz):
assert not hasattr(clazz, 'sources'), 'Cannot use @%s on class with sources() method' % type(self).__name__
return type(
clazz.__name__,
(clazz,),
{'sources': lambda inst: self.sources(inst)}
)
class processed_cache:
def __init__(self, cache_object):
self.cache_object = cache_object
def __call__(self, clazz):
return type(
clazz.__name__,
(clazz,),
{'processed': self.cache_object}
)
def accepts_auto(clazz):
#assert not hasattr(clazz, 'accepts'), 'Cannot use @accepts_auto on class with accepts()'
return type(
clazz.__name__,
(clazz,),
{'accepts': lambda inst, src: src not in inst.processed}
)
| true |
f4707040c0b2aa51cfb2e2926293ee883fece27a | Python | sayan1995/BFS-2 | /problem3.py | UTF-8 | 1,154 | 3.625 | 4 | [] | no_license | '''
Iterative:
Time Complexity: O(n)
Space Complexity: O(n)
Did this code successfully run on Leetcode : Yes
Explanation: Add all the elements to the hashmap and use bfs using queue to iterate through the hashmap and add importance
'''
"""
# Employee info
class Employee:
def __init__(self, id: int, importance: int, subordinates: List[int]):
# It's the unique id of each node.
# unique id of this employee
self.id = id
# the importance value of this employee
self.importance = importance
# the id of direct subordinates
self.subordinates = subordinates
"""
class Solution:
def getImportance(self, employees: List['Employee'], id1: int) -> int:
mydict = {}
for i in employees:
mydict[i.id] = [i.id, i.importance, i.subordinates]
queue = [(mydict[id1])[0]]
total = 0
while len(queue) != 0:
current = queue[0]
queue = queue[1:]
total = mydict[current][1] + total
for i in range(0, len(mydict[current][2])):
queue.append(mydict[current][2][i])
return total
| true |
a764af339e51c416b5018cf854989fcfe2cba63d | Python | DTuscher/TA | /driveur.py | UTF-8 | 747 | 2.578125 | 3 | [] | no_license | import urx
from time import sleep
rob = urx.Robot("172.16.174.128")
rob.set_tcp((0, 0, 0.1, 0, 0, 0))
rob.set_payload(2, (0, 0, 0.1))
rob.set_simulation(True)
sleep(0.2) #leave some time to robot to process the setup commands
#rob.movel((0,0,0.1,0,0,0), relative=True)
try:
l = 0.05
v = 0.05
a = 0.3
pose = rob.getl()
print("robot tcp is at: ", pose)
print("absolute move in base coordinate ")
pose[2] += l
rob.movel(pose, acc=a, vel=v)
print("relative move in base coordinate ")
rob.translate((0, 0, -l), acc=a, vel=v)
print("relative move back and forth in tool coordinate")
rob.translate_tool((0, 0, -l), acc=a, vel=v)
rob.translate_tool((0, 0, l), acc=a, vel=v)
finally:
rob.close() | true |
d8df627b266082ef08ba975f27733c253cd5d26f | Python | richardmitic/waview | /waview/waview.py | UTF-8 | 12,667 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python3
import curses
import sys
import argparse
import logging
import wave
import numpy as np
from scipy.io import wavfile
from scipy.signal import resample
INT16_MAX = int(2**15)-1
LOG = logging.getLogger("waview")
SAMPLE_FORMATS = {
"S16_LE": np.int16
}
def log_call(func):
ret = None
def wrapper(*args, **kwargs):
LOG.debug("{0!r} {1} {2}".format(func, args, kwargs))
return func(*args, **kwargs)
return wrapper
def clip(n, _min, _max):
return min(max(n,_min), _max)
def absmax(arr):
return max(np.abs(arr)) if len(arr) > 0 else 0 # be careful for zero-length arrays
class Wave():
def __init__(self):
self.samples = np.empty((1,0)) # 1 channel, 0 samples
self.nchannels = 1
def nsamples():
doc = "The nsamples property."
def fget(self):
return len(self.samples[0])
return locals()
nsamples = property(**nsamples())
@staticmethod
def frames_to_channels(samples):
"Convert frame-based samples to channel-based"
return np.transpose(samples)
def load_file(self, filename, sample_format=None, channels=1):
if sample_format:
self.load_pcm_file(filename, sample_format, channels)
else:
self.load_wav_file(filename)
def load_pcm_file(self, filename, sample_format, channels):
self.nchannels = channels
with open(filename, "rb") as pcm:
np_format = SAMPLE_FORMATS[sample_format]
self.samples = np.frombuffer(pcm.read(), dtype=np_format)
if channels == 1:
self.samples = np.array([self.samples])
else:
num_frames = len(self.samples) / channels
self.samples = np.transpose(np.split(self.samples, num_frames))
def load_wav_file(self, filename):
_, self.samples = wavfile.read(filename)
if isinstance(self.samples[0], np.ndarray):
self.nchannels = len(self.samples[0])
self.samples = self.frames_to_channels(self.samples)
else:
self.nchannels = 1
self.samples = np.array([self.samples]) # So we can still index the only channel
def get_samples(self, offset=0, num_samples=None, channel=0, num_chunks=1):
samps = self.samples[channel]
num_samples = len(samps)-offset if num_samples is None else num_samples
# Pad samples with zeros if we go outside the range of the wave
pre_padding = np.zeros(-offset if offset < 0 else 0)
stop = offset + num_samples
post_padding = np.zeros(stop-len(samps) if stop >= len(samps) else 0)
start = clip(offset, 0, len(samps))
stop = clip(stop, 0, len(samps))
samples_to_display = np.concatenate((pre_padding, samps[start:stop], post_padding))
chunks = np.array_split(samples_to_display, num_chunks)
return chunks
@log_call
def get_peaks(self, offset, num_samples, channel, num_peaks):
return list(map(absmax, self.get_samples(offset, num_samples, channel, num_chunks=num_peaks)))
class ChannelDisplay():
def __init__(self, parent, window_h, window_w, begin_y, begin_x):
self.width = window_w
self.height = window_h
self.begin_y = begin_y
self.begin_x = begin_x
self.screen = parent.subwin(window_h, window_w, begin_y, begin_x)
self.border_size = 1
# The free space we have available for drawing, i.e. inside the borders
self.draw_width = self.width - (2 * self.border_size)
self.draw_height = self.height - (2 * self.border_size)
def set_wave(self, wave, channel):
"Wave object to draw. A window can only draw 1 channel."
self.wave = wave
self.channel = channel
def scale_sample(self, samp):
half_height = self.draw_height / 2
return int(((samp/INT16_MAX) * half_height) + half_height)
@staticmethod
def zero_crossings(arr):
return len(np.where(np.diff(np.sign(arr)))[0])
def should_draw_peaks(self, offset, nsamples):
""" Decide whether to draw individual samples or peaks based on the number
of zero crossings in the references section. Avoid aliasing by only
drawing if there are roughly 4 draw columns per zero crossing.
"""
samples = self.wave.get_samples(offset, nsamples, self.channel)[0]
zero_crossings_per_column = self.zero_crossings(samples) / self.draw_width
return zero_crossings_per_column < 0.25, zero_crossings_per_column
# @staticmethod
# def gradient_to_symbol(gradient):
# if gradient == 0:
# return curses.ACS_S1
# elif gradient == 1:
# return '\\'
# elif gradient == -1:
# return '/'
# elif (gradient >= 2) or (gradient <= -2):
# return curses.ACS_VLINE
# else:
# raise Exception("This should never happen")
def sample_to_point(self, sample, n):
x = n + self.border_size
y = self.scale_sample(sample) + self.border_size
return x,y
def draw_samples(self, offset, nsamples):
# Make sure we don't try to draw outside the drawing area
samples = self.wave.get_samples(offset, nsamples, self.channel)[0] # get_samples returns a list of chunks
t = np.array(range(offset, offset+nsamples))
samples, re_t = resample(samples, self.draw_width+1, t=t) # we don't actually draw the last point
points = [self.sample_to_point(s, n) for n,s in enumerate(samples)]
t_diff = np.diff(re_t.astype(int)) # This will be 1 when we have an actual sample, 0 for interpolated samples.
for point, is_real_sample in zip(points, t_diff):
x,y = point
symbol = 'o' if nsamples < self.draw_width and is_real_sample else curses.ACS_BULLET
try:
self.screen.addch(y, x, symbol)
except curses.error as e:
LOG.error("addch error {!r}: {} {} {} {} {}".format(e,y,x,symbol,self.draw_width,self.draw_height))
def scale_peak(self, peak):
half_height = self.draw_height / 2
length = int((peak/INT16_MAX) * half_height)
top = int(half_height-length)
return top, length
@log_call
def draw_peaks(self, offset, nsamples):
# Make sure we don't try to draw outside the drawing area
peaks = self.wave.get_peaks(offset, nsamples, self.channel, self.draw_width)
for x, peak in enumerate(peaks, self.border_size):
top, length = self.scale_peak(peak)
top += self.border_size
if length == 0:
self.screen.addch(top, x, curses.ACS_S1)
self.screen.addch(top-1, x, curses.ACS_S9)
else:
reflected_length = 2 * length
self.screen.vline(top, x, curses.ACS_CKBOARD, reflected_length)
def draw(self, start, end):
""" Draw the given section of the wave
start: Starting point as proportion of total length, i.e. from 0. to 1.
end: Ending point as proportion of total length, i.e. from 0. to 1.
"""
self.screen.box()
offset = int(self.wave.nsamples * start)
nsamples = int(self.wave.nsamples * (end-start))
_should_draw_peaks, zero_crossings_per_column = self.should_draw_peaks(offset, nsamples)
info = "samples[{0}:{1}] {2:.2} {3:.4}:{4:.4}:{5:.4}".format( \
offset, nsamples, zero_crossings_per_column, start, end, end-start)
self.screen.addstr(info)
if _should_draw_peaks:
self.draw_samples(offset, nsamples)
else:
self.draw_peaks(offset, nsamples)
class App():
def __init__(self, zoom=1.):
self.wave = Wave()
self.wave_centroid = 0.5 # Point of the wave at the centre of the screen
self.wave_centroid_delta = 0.2 # Proportion of the displayed area to move
self.zoom = zoom # 1. means entire wave
self.zoom_delta_multipler = 0.2 # change in zoom value for each key press
self.running = True
def quit(self):
self.running = False
def shift_left(self):
current_range = 1. / self.zoom
self.wave_centroid += (current_range*self.wave_centroid_delta)
LOG.info("shift left {}".format(self.wave_centroid))
def shift_right(self):
current_range = 1. / self.zoom
self.wave_centroid -= (current_range*self.wave_centroid_delta)
LOG.info("shift right {}".format(self.wave_centroid))
def zoom_in(self):
coeff = 1+self.zoom_delta_multipler
self.zoom = clip(self.zoom * coeff, 1., float('inf'))
LOG.info("zoom in {}".format(self.zoom))
def zoom_out(self):
coeff = 1-self.zoom_delta_multipler
self.zoom = clip(self.zoom * coeff , 1., float('inf'))
LOG.info("zoom out {}".format(self.zoom))
def reset_view(self):
self.zoom = 1.
self.wave_centroid = 0.5
def get_window_rect(self, screen, channel):
"Calculate x, y, width, height for a given channel window"
total_h, total_w = screen.getmaxyx()
begin_x, begin_y = 0, int( (total_h / self.wave.nchannels) * channel )
end_x, end_y = 0, int( (total_h / self.wave.nchannels) * (channel+1) )
window_w, window_h = total_w, end_y - begin_y
return begin_x, begin_y, window_w, window_h
def get_channel_windows(self, screen):
for n in range(self.wave.nchannels):
begin_x, begin_y, window_w, window_h = self.get_window_rect(screen, n)
yield ChannelDisplay(screen, window_h, window_w, begin_y, begin_x)
def handle_key_press(self, key):
if key == "q":
self.quit()
if key == "r":
self.reset_view()
elif key == "KEY_LEFT":
self.shift_left()
elif key == "KEY_RIGHT":
self.shift_right()
elif key == "KEY_UP":
self.zoom_in()
elif key == "KEY_DOWN":
self.zoom_out()
def load(self, filename, sample_format, channels):
"Load a file by automatically detecting the type"
try:
self.wave.load_wav_file(filename)
except ValueError:
self.wave.load_pcm_file(filename, sample_format, channels)
def draw(self, screen):
screen.clear()
screen.border()
_, max_width = screen.getmaxyx()
for channel, window in enumerate(self.get_channel_windows(screen)):
window.set_wave(self.wave, channel)
wave_start = self.wave_centroid - (1./(self.zoom*2))
wave_end = self.wave_centroid + (1./(self.zoom*2))
window.draw(wave_start, wave_end)
screen.refresh()
def main(self, stdscr):
self.draw(stdscr)
while self.running:
self.handle_key_press(stdscr.getkey())
self.draw(stdscr)
def __call__(self, stdscr):
self.main(stdscr)
class LoggerWriter(object):
""" Python logger that looks and acts like a file.
"""
def __init__(self, level):
self.level = level
def write(self, message):
msg = message.strip()
if msg != "\n":
self.level(msg)
def flush(self):
pass
def get_argparser():
p = argparse.ArgumentParser()
p.add_argument("inputfile", help="File to display. The format will be determined automatically.")
p.add_argument("-f", "--format", help="Sample format for raw files", choices=SAMPLE_FORMATS.keys(), default="S16_LE")
p.add_argument("-c", "--channels", help="Number of channels for raw files", default=1, type=int)
p.add_argument("-z", "--zoom", help="Initial zoom value", default=1, type=float)
p.add_argument("-l", "--logfile", help="Log file path")
p.add_argument("-v", help="Log verbosity", action="count", default=0)
return p
def get_log_format():
return "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
def setup_logging(logfile, verbosity):
if logfile:
log_format = get_log_format()
log_level = logging.ERROR - (verbosity * 10) # default=error, -v=warn, -vv=info, -vvv=debug
logging.basicConfig(level=log_level, format=log_format, filename=logfile)
sys.stdout = LoggerWriter(LOG.debug)
sys.stderr = LoggerWriter(LOG.error)
def main():
argparser = get_argparser()
args = argparser.parse_args()
setup_logging(args.logfile, args.v)
app = App(zoom=args.zoom)
app.load(args.inputfile, args.format, args.channels)
curses.wrapper(app)
if __name__ == '__main__':
main() | true |
f2f462185a604beaf2364d558906c591fc834f5f | Python | IlidanNaga/317_Hometask_Practicum | /Big_task_1/completing_task_2.py | UTF-8 | 2,003 | 2.984375 | 3 | [] | no_license | from cross_validation import knn_cross_val_score
from cross_validation import kfold
from sklearn.datasets import fetch_mldata
from random import seed
from random import shuffle
import numpy as np
# part_2
mnist = fetch_mldata("MNIST-original")
data = mnist.data / 255.0
target = mnist.target.astype("int0")
# последний параметр - это число тестовых переменных, для которых разом вычисляются соседи
# i'd take smaller subsets for faster calculations
seed(1024)
indixes_list = np.arange(70000)
shuffle(indixes_list)
sub_data = data[indixes_list[:3500]]
sub_target = target[indixes_list[:3500]]
cv = kfold(3500, 3, False)
result_euclidean = knn_cross_val_score(sub_data,
sub_target,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"accuracy",
cv,
strategy="brute",
metric="euclidean",
weights=False,
test_block_size=0)
result_cosine = knn_cross_val_score(sub_data,
sub_target,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"accuracy",
cv,
strategy="my_own",
metric="cosine",
weights=False,
test_block_size=0)
print("Euclidean results: ", result_euclidean)
print("Cosine results: ", result_cosine)
better = 0
for item in range(1, 11):
if np.sum(result_euclidean[item]) > np.sum(result_cosine[item]):
better += 1
else:
better -= 1
if better > 0:
print("Euclidean worked better")
else:
print("Cosine worked better")
| true |
fd3ed4e46570584aa95f908dcd533885f7f9ff8b | Python | abhayprakash/competitive_programming | /spoj/BISHOPS-5354458-src.py | UTF-8 | 93 | 2.984375 | 3 | [] | no_license | try:
while 1:
n=int(raw_input())
if n==1:
print 1
else:
print 2*(n-1)
except:0
| true |
79aea6a6a2e4984da3c648fda977b87feac4f8b1 | Python | alexnwang/SketchEmbedNet-public | /util/utils.py | UTF-8 | 4,565 | 2.515625 | 3 | [] | no_license | import os
import numpy as np
import tensorflow as tf
from multiprocessing import Process, Queue
from PIL import Image
from .quickdraw_utils import stroke_three_format, scale_and_rasterize
def process_write_out(write_fn, fn_args, max_queue_size=5000):
"""
Begins a parallelized writer that runs write_fn. Need to disable cuda devices when beginning Process.
:param write_fn:
:param fn_args:
:param max_queue_size:
:return:
"""
os.environ['CUDA_VISIBLE_DEVICES'] = ''
write_queue = Queue(maxsize=max_queue_size)
process = Process(target=write_fn, args=fn_args + (write_queue,))
process.start()
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
return process, write_queue
def gaussFilter(fx, fy, sigma):
"""
Creates a filter with gaussian blurring
:param fx:
:param fy:
:param sigma:
:return:
"""
x = tf.range(-int(fx / 2), int(fx / 2) + 1, 1)
y = x
Y, X = tf.meshgrid(x, y)
sigma = -2 * (sigma ** 2)
z = tf.cast(tf.add(tf.square(X), tf.square(Y)), tf.float32)
k = 2 * tf.exp(tf.divide(z, sigma))
k = tf.divide(k, tf.reduce_sum(k))
return k
def gaussian_blur(image, filtersize, sigma):
"""
Applies gaussian blur to image based on provided parameters
:param image:
:param filtersize:
:param sigma:
:return:
"""
n_channels = image.shape[-1]
fx, fy = filtersize[0], filtersize[1]
filt = gaussFilter(fx, fy, sigma)
filt = tf.stack([filt] * n_channels, axis=2)
filt = tf.expand_dims(filt, 3)
padded_image = tf.pad(image, [[0, 0], [fx, fx], [fy, fy], [0, 0]], constant_values=0.0)
res = tf.nn.depthwise_conv2d(padded_image, filt, strides=[1, 1, 1, 1], padding="SAME")
return res[:, fx:-fx, fy:-fy, :]
def bilinear_interpolate_4_vectors(vectors, interps=10):
"""
Bilinear interplation of 4 vectors in a 2D square
:param vectors:
:param interps:
:return:
"""
#build bilinear interpolation weights
arr = np.zeros((interps, interps, 4))
interval = 1.0/(interps-1)
for i in range(interps):
for j in range(interps):
x_pt, y_pt = i * interval, j * interval
arr[j, i, :] = np.array([(1-x_pt) * (1-y_pt), x_pt * (1-y_pt), (1-x_pt) * y_pt, x_pt * y_pt])
return np.einsum('ija,ak->ijk', arr, vectors)
def interpolate(model, test_dataset, result_name, steps=1, generation_length=64, interps=20):
"""
Used to generate 2D interpolated embeddings.
:param model:
:param test_dataset:
:param result_name:
:param steps:
:param generation_length:
:param interps:
:return:
"""
sampling_dir = os.path.join(model._sampling_dir, result_name)
os.makedirs(sampling_dir)
test_dataset, _ = test_dataset
# Begin Writing Child-Process
for step, entry in enumerate(test_dataset):
if step == steps:
break
if len(entry) == 2:
x_image, class_names = entry
else:
y_sketch_gt, y_sketch_teacher, x_image, class_names = entry[0:4]
z, _, _ = model.embed(x_image, training=False)
for idx in range(0, z.shape[0], 4):
embeddings = z[idx: idx+4].numpy()
classes = class_names[idx: idx+4].numpy()
interpolated_embeddings = bilinear_interpolate_4_vectors(embeddings, interps=interps)
flattened_embeddings = np.reshape(interpolated_embeddings, (-1, z.shape[-1])).astype(np.float32)
_, flattened_strokes = model.decode(flattened_embeddings, training=False, generation_length=generation_length).numpy()
flattened_images = []
for strokes in flattened_strokes:
stroke_three = stroke_three_format(strokes)
flattened_images.append(scale_and_rasterize(stroke_three, (28, 28), 1).astype('uint8'))
flattened_images = np.array(flattened_images, dtype=np.uint8)
interpolated_images = np.reshape(flattened_images, list(interpolated_embeddings.shape[:2]) + list(flattened_images.shape[1:]))
image_rows = []
for row in interpolated_images:
concat_row = np.concatenate(row, axis=1)
image_rows.append(concat_row)
np_image = np.concatenate(image_rows, axis=0)
Image.fromarray(np_image).save(os.path.join(sampling_dir, "{}-{}_{}_{}_{}.png".format(idx//4, *classes)))
| true |
1bc4d43c44fbe332748fa7b743adc3b647dca9fd | Python | phrodrigue/URI-problems | /iniciante/1005/main.py | UTF-8 | 107 | 3.140625 | 3 | [] | no_license | n1 = float(input())
n2 = float(input())
media = (n1 * 3.5 + n2 * 7.5) / 11
print(f"MEDIA = {media:0.5f}") | true |
5bbcf2a37e2e39247e3efd46b5699262e671a043 | Python | rusianh/cours_python_basic_advance_dllt | /Unit7. FileIO - List/40.list.py | UTF-8 | 394 | 3.875 | 4 | [] | no_license | colors = ["red", "green", "blue"]
print(colors)
print(colors[0])
print(colors[1])
print(colors[2])
print(len(colors))
colors.append("yellow")
print(colors)
print(colors[3])
for i in range(4): #hard code: nên đổi 4 = len(colors) vì biết đây là 4
print(colors[i])
last_index = len(colors) - 1
print(colors[-1]) # -1 là vị trí thằng cuối cùng
print(colors[last_index]) | true |
69d59e3f864b7181e836fcbc39236eefc16fa1db | Python | GJumge/lab01 | /strings.py | UTF-8 | 203 | 3.140625 | 3 | [] | no_license | strings = ['This', 'list', 'is', 'now', 'all', 'together']
sentence = ''
#sentence = 'yo ' + 'hello'
#print(sentence)
for word in strings:
sentence = sentence + ' ' + word
print(sentence[1:])
| true |
29516da2744665313717d7cf4229e0f352a81297 | Python | pedrorault/discordBot | /ballBot/ballCog.py | UTF-8 | 1,456 | 2.859375 | 3 | [] | no_license | import discord
from discord.ext import commands
from ballBot.ball import randomResponse
import random
class BallCog(commands.Cog):
def __init__(self,bot):
self.bot = bot
@commands.command(name='8ball')
async def _8ball(self,ctx):
msg = randomResponse()
await ctx.send(msg)
@commands.command(name='random')
async def _random(self,ctx):
alternativas = ['A','B','C','D','E']
msg = f'Escolha {random.choice(alternativas)}'
await ctx.send(msg)
@commands.command(name='vino')
async def _vino(self,ctx):
msg = f'O QUEEE???!?!?!! VOCÊ PEGOU ESSE PERSONAGEM?!!?!?! NAO ACREDITO'
for i in range(5):
await ctx.send(msg)
await ctx.send(f"🤯🤯🤯🤯🤯🤯🤯🤯🤯")
@commands.command(name='oniv')
async def _oniv(self,ctx):
msg = f'Ainda bem que você pegou esse personagem!!! 😊😊😊'
for i in range(3):
await ctx.send(msg)
@commands.command(name='vinok')
async def _vinok(self,ctx):
await ctx.send('O QUE!? EU NUNCA PEGO KAKERA')
await ctx.send('NÃO ACREDITO, COMO VCS PEGAM TÃO RÁPIDO?!?!?!')
await ctx.send('O KAKERA BRANCO SEMPRE SÓ VEM 3 ROXOS PRA MIM')
# await ctx.send('$profile 205005381032869888')
await ctx.send('VAI SE FUDER MARCOS!!!')
await ctx.send('🤬🤬🤬🤬🤬🤬🤬🤬🤬🤬🤬')
| true |
1cfbed4f3226e8f697dd8ad78f2c5bcaad95dc16 | Python | ORNL/AADL | /model_zoo/TestFunctions_models.py | UTF-8 | 1,736 | 2.84375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 10:30:19 2020
@author: 7ml
"""
import torch
###############################################################################
class Paraboloid(torch.nn.Module):
def __init__(self, dim, condition_number=1.e3, device='cpu'):
super().__init__()
self.device = device
# A is orthogonal matrix
A, _ = torch.qr( torch.rand(dim,dim) )
for i in range(dim):
A[i,:] *= condition_number**(i/dim)
self.register_buffer('A', A)
self.weight = torch.nn.Parameter(torch.rand(dim))
def forward(self, x):
return torch.mv(self.A, self.weight)
def get_model(self):
return self
def get_weight(self):
return self.weight
def get_device(self):
return self.device
class Rosenbrock(torch.nn.Module):
def __init__(self, dim, device='cpu', initial_guess=None):
super().__init__()
self.device = device
if initial_guess is None:
self.weight = torch.nn.Parameter(torch.rand(dim))
else:
initial_guess = torch.tensor(initial_guess, dtype=torch.float).view(-1)
assert initial_guess.numel()==dim, "initial_guess has wrong dimension, need "+str(dim)+", got "+str(initial_guess.numel())
self.weight = torch.nn.Parameter(initial_guess)
def forward(self, x):
f = 0
for i in range(self.weight.numel()-1):
f = f + 100 * (self.weight[i+1]-self.weight[i]**2)**2 + (1-self.weight[i])**2
return f
def get_model(self):
return self
def get_weight(self):
return self.weight
def get_device(self):
return self.device | true |
556e661d7e79e28fbe563e37c0f17babc7b8ea73 | Python | pcidale/coffee-shop-udacity | /backend/src/api.py | UTF-8 | 4,473 | 2.515625 | 3 | [] | no_license | from flask import Flask, request, jsonify, abort
import json
from flask_cors import CORS
from .database.models import db_drop_and_create_all, setup_db, Drink
from .auth.auth import AuthError, requires_auth
app = Flask(__name__)
setup_db(app)
CORS(app)
'''
@TODO uncomment the following line to initialize the database
!! NOTE THIS WILL DROP ALL RECORDS AND START YOUR DB FROM SCRATCH
!! NOTE THIS MUST BE UNCOMMENTED ON FIRST RUN
'''
db_drop_and_create_all()
# ROUTES
@app.route('/drinks')
def get_drinks_short():
"""
Public endpoint to get short representation of all drinks in the database
:return: status code and json containing the list of drinks
"""
drinks = [drink.short() for drink in Drink.query.all()]
return jsonify({
'success': True,
'drinks': drinks
}), 200
@app.route('/drinks-detail')
@requires_auth('get:drinks-detail')
def get_drinks_long():
"""
Reserved endpoint for users with permission 'get:drinks-detail' containing
long representation of all drinks in the database
:return: status code and json containing the list of drinks
"""
drinks = [drink.long() for drink in Drink.query.all()]
return jsonify({
'success': True,
'drinks': drinks
}), 200
@app.route('/drinks', methods=['POST'])
@requires_auth('post:drinks')
def post_drink():
"""
Reserved endpoint for users with permission 'post:drinks-detail', allowing
them to create new drinks in the database
:return: status code and json containing the newly created drink
"""
drink = Drink(
title=request.json['title'],
recipe=json.dumps(request.json['recipe'])
)
drink.insert()
return jsonify({
'success': True,
'drinks': [drink.long()]
})
@app.route('/drinks/<int:drink_id>', methods=['PATCH'])
@requires_auth('patch:drinks')
def patch_drink(drink_id):
"""
Reserved endpoint for users with permission 'patch:drinks-detail', allowing
them to modify a drink in the database
:param drink_id: drink unique id in the database
:return: status code and json containing the updated drink
"""
drink = Drink.query.get(drink_id)
if drink:
if 'title' in request.json:
drink.title = request.json['title']
if 'recipe' in request.json:
drink.recipe = json.dumps(request.json['recipe'])
drink.update()
return jsonify({
'success': True,
'drinks': [drink.long()]
})
else:
abort(404, f'Drink id {drink_id} not found')
@app.route('/drinks/<int:drink_id>', methods=['DELETE'])
@requires_auth('delete:drinks')
def delete_drink(drink_id):
"""
Reserved endpoint for users with permission 'delete:drinks-detail', allowing
them to delete drinks in the database
:param drink_id: drink unique id in the database
:return: status code and json containing the removed drink id
"""
drink = Drink.query.get(drink_id)
if drink:
drink.delete()
return jsonify({
'success': True,
'delete': drink_id
})
else:
abort(404, f'Drink id {drink_id} not found')
# Error Handling
@app.errorhandler(422)
def unprocessable(error):
"""
Handler for unprocessable errors
:param error:
:return: status code and json with error message
"""
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
@app.errorhandler(404)
def not_found(error):
"""
Handler for resources not found
:param error:
:return: status code and json with error message
"""
return jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
@app.errorhandler(AuthError)
def handle_auth_error(exception):
"""
Authentication error handler for scoped endpoints
:param exception: AuthError instance
:return: response: status code and the error description
"""
response = jsonify(exception.error)
response.status_code = exception.status_code
return response
@app.errorhandler(500)
def internal_server_error(error):
"""
Handler for internal errors
:param error:
:return: status code and json with error message
"""
return jsonify(
{
'success': False,
'message': 'internal error',
'error': 500
}
), 500
| true |
3d3757e11b539c07dc8059de34ae4ceacda5a5f7 | Python | BrunoAfonsoHenrique/CursoEmVideoPython | /AlgoritmosPython_parte1/059.py | UTF-8 | 136 | 4.09375 | 4 | [] | no_license | num = int(input('Digite um numero: '))
if num % 10 == 0:
print('É divisivel por 10.')
else:
print('Não é divisivel por 10.')
| true |
c267cd41680d406de0d2562950877552b492276f | Python | dw/py-bulksms | /old/Commander.py | UTF-8 | 2,511 | 2.796875 | 3 | [] | no_license | """
BulkSMS/Commander.py: Interactive curses interface to BulkSMS.
"""
__author__ = 'David Wilson'
import os, time, sys
import curses, curses.textpad
import BulkSMS, BulkSMS.PhoneBook
def real_interactive(screen):
'''
Act as an interactive curses application. This is called from
curses.wrapper.
'''
screen.erase()
t = curses.textpad.Textbox(screen)
res = t.edit()
screen.erase()
screen.addstr(0, 0, "TESTING!\r\n", curses.A_REVERSE)
screen.addstr(0, 0, "%r" % repr(curses.color_pair(1)))
screen.refresh()
time.sleep(1)
return res
class Keys:
def findkey(self, key):
for name, value in vars(self).iteritems():
if value == key:
return name
this = locals()
for name in dir(curses):
if name.startswith('KEY_'):
this[name[4:]] = getattr(curses, name)
this.update({
'CTRL_A': 0x01, 'CTRL_B': 0x02, 'CTRL_C': 0x03,
'CTRL_D': 0x04, 'CTRL_E': 0x05, 'CTRL_F': 0x06,
'CTRL_G': 0x07, 'CTRL_H': 0x08, 'CTRL_I': 0x09,
'CTRL_J': 0x0a, 'CTRL_K': 0x0b, 'CTRL_L': 0x0c,
'CTRL_M': 0x0d, 'CTRL_N': 0x0e, 'CTRL_O': 0x0f,
'CTRL_P': 0x10, 'CTRL_Q': 0x11, 'CTRL_R': 0x12,
'CTRL_S': 0x13, 'CTRL_T': 0x14, 'CTRL_U': 0x15,
'CTRL_V': 0x16, 'CTRL_W': 0x17, 'CTRL_X': 0x18,
'CTRL_Y': 0x19, 'CTRL_Z': 0x1a
})
del this
keys = Keys()
class LineInput(object):
'''
A line editing widget for curses. Has the same sort of goals as
curses.textpad, except keybindings are different.
'''
def __init__(self, target):
'''
Initialise this LineInput object. <target> is a window to draw
on to.
'''
self.target = target
self.active = False
self.X, self.Y = 0, 0
self.bindings = dict(self._default_bindings)
def read(self):
while 1:
ch = self.target.getch()
action = self.bindings.get(ch, None)
if action is not None:
action(self)
else:
self.process_key(ch)
def move_left(self):
pass
move_left = classmethod(move_left)
move_right = move_left
history_last = move_left
history_next = move_left
delete_word = move_left
delete_line = move_left
reverse_search = move_left
complete = move_left
_default_bindings = [
( keys.LEFT, move_left, ),
( keys.RIGHT, move_right, ),
( keys.UP, history_last, ),
( keys.DOWN, history_next, ),
( keys.CTRL_W, delete_word, ),
( keys.CTRL_U, delete_line, ),
( keys.CTRL_R, reverse_search, ),
( keys.CTRL_I, complete, ),
]
def interactive(argv):
'''
Act as an interactive curses application.
'''
print repr(curses.wrapper(real_interactive))
| true |
3ee925dd40e12495e1aa1aeeb8bfc4a8e52462c9 | Python | ipcoo43/algorithm | /lesson170.py | UTF-8 | 275 | 3.53125 | 4 | [] | no_license | print('''메모리공간을 동적으로 사용하여 데이터 관리하기''')
total=0
num = int(input('입력 할 수 >> '))
for i in range(num):
data = int(input('{} 번 점수 ? '.format(i+1)))
total += data
print('총점 = {}, 평균 = {}'.format(total, total//num)) | true |
782ba656b72436b57e36bbe663db8e805e7c816b | Python | zivadinac/pmf-master | /src/utils.py | UTF-8 | 2,512 | 2.734375 | 3 | [] | no_license | """Utility functions and variables"""
import tensorflow as tf
import gym
import numpy as np
from skimage.transform import resize
from skimage.color import rgb2grey, gray2rgb
import matplotlib.pyplot as plt
import time
import pickle
def getTerminalStates(env):
""" Get terminals states of a given env. """
pS = env.unwrapped.P
terminalStates = []
for state in range(env.observation_space.n):
for action in range(env.action_space.n):
psa = pS[state][action]
for prob in psa:
if prob[3]:
terminalStates.append(prob[1])
return set(terminalStates)
def oneHot(lentgh, hot):
""" Generates one-hot encoded vector of a given lentght and givet hot var."""
vec = np.zeros((1, lentgh))
vec[0, hot] = 1
return vec
def extractPolicyFromQ(q):
""" Extract policy given tabular state-action value function q. """
return np.argmax(q, 1)
def extractPolicyFromApprox(approx, allStates):
""" Extract policy given aproximator approx. """
policy = np.zeros(len(allStates))
for state in allStates:
policy[state] = approx(oneHot(len(allStates), state))
return policy
def epsGreedyProbs(probs, eps):
""" For given probs create eps-greedy policy. """
eGP = np.ones(len(probs)) * eps / len(probs)
eGP[np.argmax(probs)] += 1 - eps
return eGP
def getMinibatchInds(bs, allInds):
mbs = []
for m in range(int(len(allInds) / bs) + 1):
inds = allInds[m*bs:m*bs+bs]
if len(inds) > 0:
mbs.append(inds)
return mbs
def preprocess(state):
s = state.astype(np.float32) / 255.0
resized = resize(s, (110, 84, 3))
cropped = resized[17:101, :, :]
grey = rgb2grey(cropped).reshape((84, 84, 1))
return (grey - np.mean(grey)).astype(np.float32)
def createAdvDiffFrame(advDiff):
f = np.zeros((110, 84, 3))
f[17:101, :, 0] = advDiff
f[17:101, :, 1] = advDiff
f[17:101, :, 2] = advDiff
#return resize(gray2rgb(f), (210, 160, 3)).astype(np.uint8)
return (resize(f, (210, 160, 3)) * 255).astype(np.uint8)
def pushframe(currentFrames, frame):
newFrames = np.zeros_like(currentFrames, dtype=np.float32)
newFrames[0, :, :, 0:3] = currentFrames[:, :, :, 1:]
newFrames[0, :, :, 3] = frame[:,:,0]
return newFrames
def save(obj, path):
f = open(path, "wb")
pickle.dump(obj, f, protocol=4)
f.close()
def load(path):
f = open(path, "rb")
obj = pickle.load(f)
f.close()
return obj
| true |
64db8a8fa4f5b764cc488d884eaa9d7101fcf1b8 | Python | harshilpatel312/dbgame | /background.py | UTF-8 | 842 | 3.71875 | 4 | [] | no_license | import pygame
class Background:
# scrolling background logic: https://www.youtube.com/watch?v=PjgLeP0G5Yw
def __init__(self, image):
self.bg = pygame.image.load(image)
self.bg_x = 0 # position of first background image
self.bg_x2 = self.bg.get_width() # position of second background image
self.bg_speed = 1.4
def scroll(self, speed):
# scroll background before the end of the race
self.bg_x -= speed
self.bg_x2 -= speed
if self.bg_x < self.bg.get_width() * -1:
self.bg_x = self.bg.get_width()
if self.bg_x2 < self.bg.get_width() * -1:
self.bg_x2 = self.bg.get_width()
def update(self, screen):
screen.blit(self.bg, (self.bg_x, 0))
screen.blit(pygame.transform.flip(self.bg, True, False), (self.bg_x2, 0))
| true |
b479ea2025f8d3d427fa045df67325c83d2f0faf | Python | MarkDM/red-python-scripts | /windows10-wifi_ptBr.py | UTF-8 | 1,159 | 2.671875 | 3 | [
"MIT"
] | permissive | import subprocess
import re
command_output = subprocess.run(
["netsh", "wlan", "show", "profiles"],
capture_output=True).stdout.decode(encoding='iso-8859-1')
profile_names = (re.findall(r"[^:|\s]+$", command_output, flags=re.MULTILINE))
wifi_list = []
if len(profile_names) != 0:
for name in profile_names:
wifi_profile = {}
profile_info = subprocess.run(
["netsh", "wlan", "show", "profile", name], capture_output=True).stdout.decode()
profile_info = profile_info.replace(r'\\s+', '')
if re.search("Chave de segu.+:\s+Presente", profile_info):
wifi_profile["ssid"] = name
profile_info_pass = subprocess.run(
["netsh", "wlan", "show", "profile", name, "key=clear"], capture_output=True).stdout.decode()
password = re.search(
"(?<=Conte.do da Chave:).+\r", profile_info_pass)
if password == None:
wifi_profile["password"] = None
else:
wifi_profile["password"] = password[1]
wifi_list.append(wifi_profile)
for x in range(len(wifi_list)):
print(wifi_list[x])
| true |
47bf658db25a11f27909ad69fb9b3d0006958752 | Python | AufzumAtem/ItProg | /IT Prog 2/2.3.py | UTF-8 | 1,320 | 3.59375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 2 13:14:00 2018
@author: Romeo
"""
class bankaccount:
def __init__(self, currency, first , last):
self.first = first
self.last = last
self.balance = 0
self.currency = currency
def deposit(self, amount):
if amount < self.balance:
return "invalid amount entered"
else:
self.balance = self.balance + amount
return self.balance
def withdraw(self, amount):
if amount > self.balance:
return "invalid amount entered"
else:
self.balance = self.balance - amount
return self.balance
def owner(self):
return '{} {}'.format(self.first, self.last), self.balance
def currency(self):
return self.currency
def balance(self):
return self.balance
acc1 = bankaccount("USD","Romeo", "Moritzi")
acc2 = bankaccount("CHF","Test", "User")
print(bankaccount.balance(acc1))
print(bankaccount.deposit(acc1,1000))
print(bankaccount.withdraw(acc1, 1200))
print(bankaccount.balance(acc1))
print(bankaccount.withdraw(acc1, 200))
print(bankaccount.balance(acc1))
print(bankaccount.currency(acc1))
print(bankaccount.currency(acc2))
print(bankaccount.owner(acc1))
print(bankaccount.owner(acc2)) | true |
8464f88da72e5d80e9d3f3b3dad37faed276a841 | Python | SIMELLI25/esercizi_python | /pasta.py | UTF-8 | 2,359 | 2.640625 | 3 | [] | no_license | from spalla import Verifica
Verifica.firma("Simone Melli")
#Verifica.stampa_esercizi()
#Verifica.stampa_voto()
'''
es = Verifica.inizia_esercizio(1)
print(es)
print(es.dati)
es.consegna(es.dati.lower())
'''
'''
es = Verifica.inizia_esercizio(2)
print(es)
print(es.dati)
es.consegna(es.dati*es.dati)
'''
'''
es = Verifica.inizia_esercizio(3)
print(es)
print(es.dati)
es.consegna(es.dati["cognome"])
'''
'''
es = Verifica.inizia_esercizio(4)
print(es)
print(es.dati)
es.consegna(len(es.dati))
'''
'''
es = Verifica.inizia_esercizio(5)
print(es)
print(es.dati)
lista = []
for i in es.dati:
s = i.upper()
lista.append(s)
es.consegna(lista)
'''
'''
es = Verifica.inizia_esercizio(6)
print(es)
print(es.dati)
es.consegna(sum(es.dati))
'''
'''
es = Verifica.inizia_esercizio(7)
print(es)
print(es.dati)
somma = 0
for i in es.dati:
if i > 5:
somma += i
es.consegna(somma)
'''
'''
es = Verifica.inizia_esercizio(8)
print(es)
print(es.dati)
somma = 0
for i in es.dati:
if es.dati.index(i) % 2 == 0:
somma += i
es.consegna(somma)
incompleto
'''
'''
es = Verifica.inizia_esercizio(9)
print(es)
print(es.dati)
somma = 0
for i in es.dati:
if i % 2 != 0:
somma += i
es.consegna(somma)
'''
'''
es = Verifica.inizia_esercizio(10)
print(es)
print(es.dati)
es.consegna(sorted(es.dati))
'''
'''
es = Verifica.inizia_esercizio(11)
print(es)
print(es.dati)
lista = []
for i in es.dati:
e = i.lower()
lista.append(e)
es.consegna(sorted(lista))
'''
'''
es = Verifica.inizia_esercizio(12)
print(es)
print(es.dati)
lista = []
for i in es.dati:
e = i-1
lista.append(e)
es.consegna(lista)
'''
'''
es = Verifica.inizia_esercizio(13)
print(es)
print(es.dati)
lista = []
for i in es.dati:
if es.dati[i] == len(es.dati)-1:
e = i
else:
e = i + es.dati[es.dati.index(i)+1]
lista.append(e)
print(lista)
incompleto
'''
'''
es = Verifica.inizia_esercizio(14)
print(es)
print(es.dati)
diz = {}
count_zeri = 0
count_pos = 0
count_neg = 0
for i in es.dati:
if i == 0:
count_zeri += 1
elif i > 0:
count_pos += 1
elif i < 0:
count_neg += 1
diz["zeri"] = count_zeri
diz["positivi"] = count_pos
diz["negativi"] = count_neg
es.consegna(diz)
'''
| true |
e98702cbadbb064f457b83a89be85cb7b80a1cc8 | Python | RFloTeo/RoboStuff | /test.py | UTF-8 | 480 | 2.828125 | 3 | [] | no_license | import brickpi3
import time
BP = brickpi3.BrickPi3()
turn_dps = 100
move_dps = -100
def TurnLeft():
BP.set_motor_dps(BP.PORT_A, turn_dps)
BP.set_motor_dps(BP.PORT_B, -turn_dps)
time.sleep(1.42)
BP.reset_all()
try:
for i in range(4):
BP.set_motor_dps(BP.PORT_A, move_dps)
BP.set_motor_dps(BP.PORT_B, move_dps)
time.sleep(4.62)
TurnLeft()
BP.reset_all()
except KeyboardInterrupt:
BP.reset_all()
| true |
72c137ae0e8b5ea65f28765935993b1482212976 | Python | wheatmushi/projectEuler | /euler_021 (Amicable numbers).py | UTF-8 | 427 | 3.171875 | 3 | [] | no_license | from math import ceil, sqrt
def d(n):
div = [1,]
for i in range(2,ceil(sqrt(n))+1):
if n%i == 0:
div.append(i)
div.append(int(n/i))
return sum(div)
maximum=10000
amicables=[]
n_div={}
for n in range(1,maximum):
n_div[n] = d(n)
for n in range(1,maximum):
if n_div[n]<maximum and n_div[n_div[n]]==n and n_div[n] != n:
amicables.append(n)
print(sum(amicables))
| true |
eb0a19c684bbcbc0236d8077667dd08d93ee711b | Python | lobhaum/python | /py2zumbis/arquivoPratica01.py | UTF-8 | 160 | 3.296875 | 3 | [] | no_license | f = open('arquivoPratica01.txt', 'w')
f.write('Gravando uma única linha no arquivo')
for l in range(1, 101):
f.write(f'Gravando a linha {l} \n')
f.close()
| true |
44620fbe6ec751547c32650876eef0f6e0f32bf0 | Python | PraneshASP/LeetCode-Solutions-2 | /904 - Fruit into Baskets.py | UTF-8 | 3,028 | 3.4375 | 3 | [] | no_license | # Solution 1: Relatively straight forward, just make sure to handle all the edges cases
class Solution(object):
def totalFruit(self, tree):
"""
:type tree: List[int]
:rtype: int
"""
fruit_one = 0
fruit_one_type = None
fruit_two = 0
fruit_two_type = None
max_fruit = 0
for i in range(len(tree)):
if fruit_one_type is None:
fruit_one += 1
fruit_one_type = tree[i]
continue
if tree[i] == fruit_one_type:
fruit_one += 1
continue
if fruit_two_type is None:
fruit_two += 1
fruit_two_type = tree[i]
continue
if tree[i] == fruit_two_type:
fruit_two += 1
continue
# If we get to this point, we've hit a new fruit type
# Check if we have a bigger max and reset
max_fruit = max(fruit_one + fruit_two, max_fruit)
new_fruit_one = 1
new_fruit_one_type = tree[i-1]
for j in range(i-2,-1,-1):
if tree[j] != new_fruit_one_type:
break
new_fruit_one += 1
fruit_one = new_fruit_one
fruit_one_type = new_fruit_one_type
fruit_two = 1
fruit_two_type = tree[i]
max_fruit = max(fruit_one + fruit_two, max_fruit)
return max_fruit
# Solution 2: Avoid backtracking to guarantee O(n) solution
class Solution(object):
def totalFruit(self, tree):
"""
:type tree: List[int]
:rtype: int
"""
if len(tree) < 2:
return len(tree)
fruit_one = 0
fruit_one_type = None
fruit_two = 0
fruit_two_type = None
max_fruit = 0
new_fruit_one = 1
new_fruit_one_type = tree[0]
for i in range(len(tree)):
if i > 1 and tree[i-1] == new_fruit_one_type:
new_fruit_one += 1
else:
new_fruit_one_type = tree[i-1]
new_fruit_one = 1
if fruit_one_type is None:
fruit_one += 1
fruit_one_type = tree[i]
continue
if tree[i] == fruit_one_type:
fruit_one += 1
continue
if fruit_two_type is None:
fruit_two += 1
fruit_two_type = tree[i]
continue
if tree[i] == fruit_two_type:
fruit_two += 1
continue
# If we get to this point, we've hit a new fruit type
# Check if we have a bigger max and reset
max_fruit = max(fruit_one + fruit_two, max_fruit)
fruit_one = new_fruit_one
fruit_one_type = new_fruit_one_type
fruit_two = 1
fruit_two_type = tree[i]
max_fruit = max(fruit_one + fruit_two, max_fruit)
return max_fruit | true |
b89127dbc34619542e8db0960036d3c05ad66529 | Python | grchristensen/django-app | /functional_tests.py | UTF-8 | 551 | 2.984375 | 3 | [] | no_license | from selenium import webdriver
import unittest
class TestToDo(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
# Bob has heard about this awesome new to-do app.
self.browser.get('http://localhost:8000')
# He notices 'to-do' in the title.
self.assertIn('To-Do', self.browser.title)
self.fail('Finish the test!')
if __name__ == '__main__':
unittest.main() | true |
327af45312be5354133bbb7c1cc271667680c660 | Python | silky/bell-ppls | /env/lib/python2.7/site-packages/observations/r/monica.py | UTF-8 | 2,200 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def monica(path):
"""WHO Monica Data
The `monica` data frame has 6357 rows and 12 columns. Note that
`mifem` is the female subset of this data frame.
This data frame contains the following columns:
outcome
mortality outcome, a factor with levels `live`, `dead`
age
age at onset
sex
m = male, f = female
hosp
y = hospitalized, n = not hospitalized
yronset
year of onset
premi
previous myocardial infarction event, a factor with levels `y`,
`n`, `nk` not known
smstat
smoking status, a factor with levels `c` current, `x` ex-smoker,
`n` non-smoker, `nk` not known
diabetes
a factor with levels `y`, `n`, `nk` not known
highbp
high blood pressure, a factor with levels `y`, `n`, `nk` not
known
hichol
high cholesterol, a factor with levels `y`, `n` `nk` not known
angina
a factor with levels `y`, `n`, `nk` not known
stroke
a factor with levels `y`, `n`, `nk` not known
Newcastle (Australia) centre of the Monica project; see the web site
http://www.ktl.fi/monica
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `monica.csv`.
Returns:
Tuple of np.ndarray `x_train` with 6367 rows and 12 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'monica.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/monica.csv'
maybe_download_and_extract(path, url,
save_file_name='monica.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| true |
c0437c7405b57b734606c9a48135291d1b0651a8 | Python | hitanshkadakia/cool_text_making | /cooltext.py | UTF-8 | 208 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | import os
from pyfiglet import Figlet
def cool(text):
cooltext=Figlet(font="slant")
os.system("cls")
return str(cooltext.renderText(text))
if __name__ == "__main__":
print(cool("Happy_coding01")) | true |
40c58a95ba8611443dbb3c78d1bd7bf9142465b8 | Python | mentecatoDev/python | /diccionarios/03.py | UTF-8 | 554 | 3.828125 | 4 | [] | no_license | """
Ejercicio 03
Escribir un programa que lea un log de correo construya un histograma usando
un diccionario para contar cuántos mensajes se recibieron de cada dirección
de correo electrónico y visualice los resultados.
"""
try:
fhandle = open(input("Introduzca el nombre del fichero: "))
except IOError:
print("El fichero no existe")
exit()
mails = dict()
for line in fhandle:
if line.startswith("From "):
line = line.split()
mails[line[1]] = mails.get(line[1],0) + 1
for mail in mails:
print(mail, mails[mail])
| true |
66aa4c6e162ffcaefa351728e6020354bc658815 | Python | AIRI-Institute/GENA_LM | /downstream_tasks/promoter_prediction/dataset_generator.py | UTF-8 | 1,703 | 2.859375 | 3 | [
"MIT"
] | permissive | from Bio import SeqIO
import numpy as np
import pandas as pd
np.random.seed(42)
print('Please, input fasta filename: ')
path = input()
ind = path.find('len')
name = path.split('.')[0]
length = name[ind:]
count = 0
sequences = [] # Here we are setting up an array to save our sequences for the next step
for seq_record in SeqIO.parse(path, "fasta"):
sequences.append(seq_record.seq)
count = count + 1
if len(sequences[0]) > 16000:
positive_seqs = [str(sequence[:16000]) for sequence in sequences]
else:
positive_seqs = [str(sequence) for sequence in sequences]
def sample_negative(sequence):
n = len(sequence)
assert n % 20 == 0, f'Sequence length {n} should be divisible by 20. E.g. 300 = epdnew from -249 to 50'
step = n//20
subs = [ sequence[i:i+step] for i in range(0, n, step) ]
selected_inds = list(np.random.choice(20, 12, replace=False))
selected = [subs[i] for i in selected_inds]
not_selected_inds = list(set(range(20)).difference(selected_inds))
not_selected = [subs[i] for i in not_selected_inds]
new_s = ''
np.random.shuffle(selected)
for i in range(20):
if i in selected_inds:
new_s += selected.pop(0)
else:
new_s += not_selected.pop(0)
return new_s
negative_seqs = []
for s in positive_seqs:
negative_seqs.append(sample_negative(s))
# Generate dataset
l = len(positive_seqs)
all_seqs = positive_seqs.copy()
all_seqs.extend(negative_seqs)
len(all_seqs)
df = pd.DataFrame.from_dict({'sequence' : all_seqs, 'promoter_presence' : [1]*l + [0]*l})
df = df.sample(frac=1, random_state=42).reset_index(drop=True)
df.to_csv(f'hg38_promoters_{length}_dataset.csv', index=False)
| true |
fc932596eaa40b26550dad42f2ae3eaeb8f829bb | Python | JaneHQ1/Path-to-Python3-v2 | /thirteen/c1.py | UTF-8 | 988 | 3.078125 | 3 | [
"MIT"
] | permissive | """
13-1 分析抓取目的确定抓取页面
"""
# 我们做的爬虫只是功能非常简单的案例,不是一个完整的爬虫。
# 一个完整的爬虫非常复杂,反扒机制,自动登陆,代理IP等辅助功能非常繁琐。
# 这个案例没有利用任何爬虫框架,纯粹依靠之前学习过的知识来做数据的爬取。
# 爬虫小案例目标:
# 巩固之前学习的知识
# 学习合理编码的规范和方式
# 了解爬虫的基本原理
# Step 1
# 写爬虫一定要知道目的是什么,才能很好地去写爬虫。因为网络上数据和信息量非常大。
# 目标:爬熊猫TV英雄联盟分类下面主播人气排行。
# 怎么爬?
# 我们要的数据都在网站上,如何将数据抓取到自己的程序里?
# Step 2
# 充分了解网站结构
# 观看人数是我们要抓取的数据。
# 把页面上所有主播的观看人数都爬取到程序里,最后比较运算,就可以得到主播的人气排行。
| true |
a65f7a30c2b65ebfea3a0d1e834497766c2b0f51 | Python | IvanWoo/coding-interview-questions | /puzzles/same_tree.py | UTF-8 | 956 | 3.703125 | 4 | [] | no_license | # https://leetcode.com/problems/same-tree/description/
"""
Given the roots of two binary trees p and q, write a function to check if they are the same or not.
Two binary trees are considered the same if they are structurally identical, and the nodes have the same value.
Example 1:
Input: p = [1,2,3], q = [1,2,3]
Output: true
Example 2:
Input: p = [1,2], q = [1,null,2]
Output: false
Example 3:
Input: p = [1,2,1], q = [1,1,2]
Output: false
Constraints:
The number of nodes in both trees is in the range [0, 100].
-104 <= Node.val <= 104
"""
from typing import Optional
from puzzles.utils import TreeNode
def is_same_tree(p: Optional[TreeNode], q: Optional[TreeNode]) -> bool:
def helper(p, q):
if not p and not q:
return True
if not p or not q:
return False
if p.val != q.val:
return False
return helper(p.left, q.left) and helper(p.right, q.right)
return helper(p, q)
| true |
0440473de32317a2a441c2c88efe29710497f744 | Python | dla1434/book_python_effective_flow_control | /chapter5/multiprocessing/daemon_process_join.py | UTF-8 | 400 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | ###################################
# File Name : daemon_process_join.py
###################################
#!/usr/bin/python3
import time
import multiprocessing
def daemon():
print ("Start")
time.sleep(5)
print ("Exit")
def main():
d = multiprocessing.Process(name="daemon", target=daemon)
d.daemon = True
d.start()
d.join()
if __name__ == "__main__":
main()
| true |
b5b5924bd82f8a79c13c04a16ee2ef1901f89589 | Python | rigved-sanku/Covid-19-Prediction | /initializations.py | UTF-8 | 808 | 2.703125 | 3 | [] | no_license |
import numpy as np
class Layer:
def __init__(self,no_layers,num_nodes:list,activation:list):
self.layers=no_layers
self.nodes=num_nodes
self.activation=activation
self.params=self.intialize_weight()
self.grads={}
def intialize_weight(self):
np.random.seed(3)
parameters = {}
L = len(self.nodes)
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(self.nodes[l],self.nodes[l-1])*0.01
parameters['b' + str(l)] = np.zeros((self.nodes[l],1))
assert(parameters['W' + str(l)].shape == (self.nodes[l], self.nodes[l-1]))
assert(parameters['b' + str(l)].shape == (self.nodes[l], 1))
return parameters | true |
2db7dbd201fe3c70b1f593d27383d3422ad7342c | Python | humuhimi/wkwk_nlp | /Chapter2/excecise2_7__.py | UTF-8 | 1,507 | 3.40625 | 3 | [] | no_license | """
16. ファイルをN分割する
自然数Nをコマンドライン引数などの手段で受け取り,入力のファイルを行単位でN分割せよ.同様の処理をsplitコマンドで実現せよ.
1.分割するファイル数を入力
2.分割ファイル数より分割行数を計算する
3.リストから分割行分別ファイルに書き込み 書き込んだ分はリストから削除する
3.5リストのループを終わらせる。
4.最後分割数以下のファイルを全て書き出す。(残ったリストを全て書き出す)
"""
# import math
# with open(path) as file:
# lines = data.readlines()
# count = len(lines)
# unit = math.ceil(count / n)
# for i ,offset in enumerate(range(0,count,unit),1):
# with open('child_{02d}.txt'.format(i),mode='w') as f:
# for line in lines[offset:offset + unit]:
# f.write(line)
def make_list(path):
path_i = path
with open(path_i,'r') as I:
for i in I:
lists.append(i)
return lists;
# ファイル数と分割行数を決める
def decide_size():
N = int(input('分割ファイル数を入力してください:'))
# N1 = int(input('分割ファイル数を入力してください:'))
# N2 =
if __name__ == '__main__':
import string
a_Z = string.ascii_letters
path_I = "hightemp.txt"
path_O = "path_f_splited.txt"
lists = []
lists = make_list(path_I)
print(lists)
| true |
a239c5b64aedbbddc0506934e0719cc6f7b9133b | Python | zakkitao/python-turtle-for-fun | /用turtle画python的logo.py | UTF-8 | 1,684 | 3.125 | 3 | [] | no_license | import turtle as t
t.getscreen().bgcolor("black")
t.setup(650,350,200,200)
t.pensize(15)
t.pencolor("yellow")
t.hideturtle()
t.speed(10)
#p
t.pu()
t.goto(-150,50)
t.pd()
t.seth(-90)
t.forward(110)
t.pu()
t.goto(-150,0)
t.seth(-30)
t.pd()
t.forward(15)
t.circle(25,120)
t.forward(30)
t.seth(110)
t.circle(25,120)
#y
t.pu()
t.goto(-70,55)
t.seth(-90)
t.pd()
t.forward(40)
t.circle(25,120)
t.pu()
t.goto(-25,55)
t.seth(-90)
t.pd()
t.fd(90)
t.seth(90)
t.circle(25,-40)
t.left(150)
t.forward(15)
#t
t.pencolor("white")
t.pu()
t.goto(10,70)
t.seth(-90)
t.pd()
t.fd(80)
t.circle(5,120)
t.pu()
t.goto(0,55)
t.seth(0)
t.pd()
t.forward(20)
#h
t.pu()
t.goto(45,80)
t.pd()
t.seth(-90)
t.forward(95)
t.pu()
t.goto(45,30)
t.pd()
t.circle(25,-150)
t.seth(-90)
t.fd(56)
#o
t.pu()
t.goto(145,-15)
t.seth(-180)
t.pd()
foot = 1.2
for a in range(2):
for i in range(60):
if i<30:
foot += 0.03
t.right(3)
t.fd(foot)
else:
foot -= 0.03
t.right(3)
t.fd(foot)
#n
t.pu()
t.goto(200,55)
t.seth(-90)
t.pd()
t.forward(70)
t.goto(200,30)
t.circle(25,-150)
t.seth(-90)
t.fd(58)
#蟒蛇
t.pu()
t.goto(0,-20)
t.seth(-40)
t.pd()
t.pencolor("#082E54")
t.circle(15,80)
t.circle(-15,80)
t.circle(15,80)
t.circle(-15,80)
t.circle(15,80)
t.circle(-15,80)
t.circle(15,80)
t.circle(-15,80)
t.circle(15,80)
t.circle(-15,80)
t.circle(15,80)
t.circle(-15,80)
t.circle(15,80)
t.circle(-15,80)
t.seth(0)
t.fd(15)
t.circle(10,180)
t.seth(-180)
t.fd(15)
t.pensize(1)
t.pencolor("black")
t.circle(1)
t.done() | true |
4ca0b6d157002a34a16a08333719560fa23f1785 | Python | iTrauco/list_exercises | /hello_python/loop_choose.py | UTF-8 | 574 | 3.8125 | 4 | [] | no_license | whole_game = True
while whole_game:
stil_playing = True
while stil_playing == True:
try:
user_number = int(input('Give me a number '))
if user_number == 3:
print('Well done')
stil_playing = False
else:
print('Try again')
except ValueError:
print('Please type a number. Thank you. ')
to_continue = input('Would you like to keep playing? y/n ')
if to_continue == 'n':
whole_game = False
if to_continue == 'y':
whole_game = True | true |
942df48c51d298b43b216782297c6e23f9615c47 | Python | patilakshay227/Complex-Term-Project | /sectionExtract.py | UTF-8 | 1,749 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 8 20:45:43 2018
@author: Ashwin
"""
import simplejson as json
import pickle
import datetime
import sqlite3
import traceback
import os
import dateutil.parser as dp
if os.path.exists("sectionintegritylog"):
os.remove("sectionintegritylog")
inputFile = "/home/akshay/IIT KGP/SEM 2/Complex Network/Term Project/articleJSON.txt"
db = sqlite3.connect('../commentsData.db')
c = db.cursor()
c.execute('CREATE TABLE IF NOT EXISTS ArticleSection(id text,section text,PRIMARY KEY(id,section) ) ')
def writeArticleInDB(article):
ID = article['_id']
secName = article['section_name']
sqlStat = "INSERT INTO ArticleSection VALUES(?,?)"
if secName!=None and secName!='':
c.execute(sqlStat,(ID,secName))
def parseFile():
noOfLinesParsed = 0
with open(inputFile) as f:
for line in f:
try:
noOfLinesParsed += 1
line = json.loads(line)
for record in line['response']['docs']:
writeArticleInDB(record)
if (noOfLinesParsed % 1000 == 0):
print "No of lines Parsed : ", noOfLinesParsed
except sqlite3.IntegrityError as i:
with open("sectionintegritylog", "a") as ilog:
ilog.write("Line no : " + str(noOfLinesParsed) + "\n")
except Exception as e:
if e.message!='response':
with open("log", "a") as log:
log.write("Error on line " + str(noOfLinesParsed) + "\n")
log.write(traceback.format_exc())
db.commit()
if __name__ == "__main__":
parseFile()
db.close()
| true |
de1784513fcb81341213164df6f9824bf80d74b7 | Python | compjour/compjour-class-site | /source/files/code/answers/json-quiz/5.py | UTF-8 | 833 | 3.125 | 3 | [] | no_license | import requests
import json
data_url = 'http://www.compjour.org/files/code/json-examples/single-tweet-librarycongress.json'
data = json.loads(requests.get(data_url).text)
print('A.', data['created_at'])
print('B.', data['user']['created_at'])
print('C.', data['text'])
print('D.', data['user']['screen_name'])
print('E.', data['id'])
print('F.', len(data['entities']['user_mentions']))
### For G.
hashtag_objs = data['entities']['hashtags']
hashtag_texts = []
for h in hashtag_objs:
hashtag_texts.append(h['text'])
print('G.', ','.join(hashtag_texts))
# alternatively, you could also use the list comprehension syntax:
# hashtag_texts = [h['text'] for h in data['entities']['hashtags']]
### For H
urls = data['entities']['urls']
urltxts = []
for u in urls:
urltxts.append(u['display_url'])
print('G.', ','.join(urltxts))
| true |
27c0e344ba4da1d22e7aac1c3f6b0a644982c19c | Python | PratikshaDanti/APS | /Code Library/111.Self_dividing_numbers.py | UTF-8 | 613 | 3.5625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 10:48:20 2020
@author: Pratiksha
"""
def selfDividingNumbers(left,right):
l1=[]
for i in range(left,right+1):
s=str(i)
if '0' in s:
continue
l=[]
for ele in s:
l.append(int(ele))
f=0
for ele in l:
if i % ele == 0:
f=1
else:
f=0
break
if f==1:
l1.append(i)
return(l1)
a,b=map(int,input('Enter range : ').split())
arr=selfDividingNumbers(a,b)
print(arr) | true |
5604a9a252ac2f69d5160ba563dd8f7e4c222d98 | Python | dyf102/LC-daily | /binary-search/python/minimum-limit-of-balls-in-a-bag.py | UTF-8 | 726 | 3.4375 | 3 | [] | no_license | class Solution:
def minimumSize(self, nums: List[int], maxOperations: int) -> int:
"""LC 1760. Minimum Limit of Balls in a Bag
Args:
nums (List[int]): [description]
maxOperations (int): [description]
Returns:
int: [description]
"""
left = 1
right = 10**9
nums.sort(reverse=True)
n = len(nums)
while left < right:
middle = (left + right) // 2
count = 0
for num in nums:
count += (num - 1) // middle
if count > maxOperations:
left = middle + 1
else:
right = middle
return right | true |
4fa0eb2c306b95241dc939046e1801d0c203ea28 | Python | svmyhome/ToDoProg | /count_letter.py | UTF-8 | 311 | 3.578125 | 4 | [] | no_license | word_list = ['python', 'c++', 'c', 'scala', 'java']
l_task = input('Введите букву: ')
def count_letter(letter, word_list):
result = 0
for word in word_list:
if l_task in word:
print(word)
result += 1
return result
print(count_letter(l_task, word_list))
| true |
da59716d09afbababa736a1b81d07fa783578749 | Python | Nesqwik/SVL | /TP3/carte.py | UTF-8 | 2,650 | 3.546875 | 4 | [] | no_license | import caisse
from caisse import SoldeInsuffisantError
from caisse import NbTicketInsuffisantError
class Carte:
def __init__(self, solde, nb_ticket, prix_ticket):
'''
Permet d'instancier une carte avec :
solde / nombre de tickets / prix du ticket
>>> c = Carte(42,10,7)
>>> c.solde()
42
>>> c.nb_ticket()
10
>>> c.prix_ticket()
7
'''
self.m_solde = solde
self.m_nb_tickets = nb_ticket
self.m_prix_ticket = prix_ticket
def nb_ticket(self):
'''
Permet de connaitre le nombre de tickets restants
>>> c = Carte(42,10,7)
>>> c.nb_ticket()
10
'''
return self.m_nb_tickets
def solde(self):
'''
Permet de connaire le solde restant
>>> c = Carte(42,10,7)
>>> c.solde()
42
'''
return self.m_solde
def prix_ticket(self):
'''
Permet de connaitre le prix d'un ticket
>>> c = Carte(42,10,7)
>>> c.prix_ticket()
7
'''
return self.m_prix_ticket
def debiter(self, amount):
'''
Permet de debiter la valeur passee en parametre au solde
>>> c = Carte(42,10,7)
>>> c.debiter(10)
>>> c.solde()
32
Attention un montant inferieur ou egal a zero provoquera une ValueError
>>> c = Carte(42,10,7)
>>> c.debiter(-10)
Traceback (most recent call last):
[...]
ValueError
Si le solde est inferieur au montant a debiter SoldeInsuffisantError est remonte
>>> c = Carte(10,10,7)
>>> c.debiter(50)
Traceback (most recent call last):
[...]
SoldeInsuffisantError
'''
if(amount <= 0):
raise ValueError
if(self.m_solde < amount):
raise SoldeInsuffisantError
self.m_solde -= amount
def utiliser_ticket(self):
'''
Utilise un ticket sur la carte
>>> c = Carte(10,10,7)
>>> c.utiliser_ticket()
>>> c.nb_ticket()
9
Si le nombre de tickets est a zero NbTicketInsuffisantError est remonte
>>> c = Carte(10,0,7)
>>> c.utiliser_ticket()
Traceback (most recent call last):
[...]
NbTicketInsuffisantError
'''
if(self.m_nb_tickets == 0):
raise NbTicketInsuffisantError
self.m_nb_tickets -= 1
| true |
9d34133cc54538b8e471cf9cb837f72f02253c24 | Python | jennac/candidate_classifier | /splitfile.py | UTF-8 | 1,843 | 3.015625 | 3 | [] | no_license | from argparse import ArgumentParser
from csv import DictReader, DictWriter
def get_args():
parser = ArgumentParser(description='splits infile into n smaller chunks')
parser.add_argument('-f', '--filename',
help='file to import and split')
parser.add_argument('-n', '--number',
help='number of output files to split into')
parser.add_argument('-p', '--path',
help='path to file')
return parser.parse_args()
def read_file(infile):
with open(infile, 'rU') as f:
reader = DictReader(f)
fields = reader.fieldnames
data = [row for row in reader]
return fields, data
def write_file(outfile, fields, data):
with open(outfile, 'w') as f:
writer = DictWriter(f, fieldnames=fields)
writer.writeheader()
for d in data:
writer.writerow(d)
def split():
args = get_args()
split_num = int(args.number)
infile = args.filename
if args.path:
path = args.path
else:
path = ''
# path = '/Users/jcolazzi/Dropbox/BIP Production/candidates/reports/social/'
# path = '/Users/jcolazzi/bip/candidate_classifier/web/srsplit/'
path = '/Users/jcolazzi/bip/candidate_classifier/twitter/srsplit/'
fields, data = read_file(path+infile)
# x and y are indices for the m sized split files
# R is the remainder and will be tacked on the final chunk
m = len(data) / split_num
R = len(data) % split_num
x = 0
y = x + m
print 'SPLITS WILL BE LEN {}'.format(m)
for i in range(split_num):
outfile = 'SPLIT_{}_{}'.format(i, infile)
write_file(path+outfile, fields, data[x:y])
x = y
y = x + m
if i == (split_num - 2):
y += R
if __name__ == '__main__':
split()
| true |
cfa81530171b7ca0c9d735e36ea8cab2bab61035 | Python | PeterKoka1/SP500_Twitter_Sentiment_Compiler | /daily_update.py | UTF-8 | 3,567 | 3.09375 | 3 | [] | no_license | """
The script that will update (i.e., append a new row) the SPX_sentiment.csv file based on today's date
"""
import requests
import bs4 as bs
import pickle
import datetime
import pandas as pd
import warnings
from twitter_scraper_api import SPX_Twitter_Scraper
class Daily_Update(object):
def __init__(self):
self.today = datetime.datetime.now().date()
self.csv_path = 'C:\\Users\\PeterKokalov\\lpthw\\SUMMER2018\\Twitter_SPX_sentiment\\SPX_sentiment.csv'
def wiki_scrape(self):
r = requests.get(
'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
)
soup = bs.BeautifulSoup(r.text, 'lxml')
SP500 = soup.find(
'table',
{
'class': 'wikitable sortable'
}
)
tickers = []; full_names = []
for ticker in SP500.find_all('tr')[1:]:
tickers.append(ticker.find_all('td')[0].text)
full_names.append(ticker.find_all('td')[1].text)
with open('SP500tickers.pickle', 'wb') as f:
pickle.dump(tickers, f)
with open('SP500names.pickle', 'wb') as f:
pickle.dump(full_names, f)
return tickers
def compile_pandas_dataframe(self, update):
if update:
tickers = self.wiki_scrape()
else:
with open('SP500tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
todays_date = datetime.datetime.now().date()
index = pd.date_range(todays_date + datetime.timedelta(), periods=300, freq='D')
columns = tickers
df_ = pd.DataFrame(index=index, columns=columns)
df_ = df_.fillna(0)
try:
df_.to_csv(path_or_buf=self.csv_path)
print("csv safed successfully")
except Exception as e:
print(e)
def ticker_sentiment(self, day):
with open('SP500tickers.pickle', 'rb') as t:
tickers = pickle.load(t)
with open('SP500names.pickle', 'rb') as n:
company_names = pickle.load(n)
df = pd.read_csv(self.csv_path, index_col=[0])
df.index = pd.to_datetime(df.index)
api = SPX_Twitter_Scraper()
warnings.filterwarnings('ignore')
for tick, name in zip(tickers, company_names):
df_entry = {
'positive_tweets': 0,
'negative_tweets': 0,
'neutral_tweets': 0
}
tick_sentiment = api.return_percentages(tick)
name_sentiment = api.return_percentages(name)
if tick_sentiment == 0:
pass
else:
df_entry['positive_tweets'] += tick_sentiment[0]
df_entry['negative_tweets'] += tick_sentiment[1]
df_entry['neutral_tweets'] += tick_sentiment[2]
if name_sentiment == 0:
pass
else:
df_entry['positive_tweets'] += name_sentiment[0]
df_entry['negative_tweets'] += name_sentiment[1]
df_entry['neutral_tweets'] += name_sentiment[2]
df.loc[day, tick] = [df_entry]
df.to_csv(self.csv_path)
def main():
Daily_Update_api = Daily_Update()
# Daily_Update_api.wiki_scrape()
# Daily_Update_api.compile_pandas_dataframe(update=False)
Daily_Update_api.ticker_sentiment(Daily_Update_api.today)
if __name__ == "__main__":
main()
| true |
34666e6cbe79a31db8d854adf5bb45a39e932661 | Python | DiliaoM/metaphor-recognition | /sen-embedding.py | UTF-8 | 4,902 | 2.671875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 29 15:05:44 2018
@author: Miranda
"""
import tensorflow as tf
from numpy.random import RandomState
import xlrd
import matplotlib.pyplot as plt
import numpy as np
import os
import xlsxwriter
from tensorflow.contrib import rnn
# 重置
tf.reset_default_graph()
data_1 = xlrd.open_workbook(r'F:\大学\科研立项\18年3月\3-30\词向量_最终准备.xlsx')
# data_2 = xlrd.open_workbook(r'F:\大学\科研立项\18年3月\词向量10\Dataset2.xlsx')
table_1 = data_1.sheets()[0]
# table_2 = data_2.sheets()[0]
# Y = table.col_values(0)
X = []
Y = []
for i in range(table_1.nrows-1):
# for i in range(20000):
xx = table_1.row_values(i+1)
xx = xx[4:table_1.ncols]
X.append(xx)
yy = table_1.cell_value(i+1,2)
Y.append([yy,1-yy])
dataset_size = table_1.nrows
'''
X_test = []
Y_test = []
for i in range(20000):
xx = table_1.row_values(i+20001)
xx = xx[3:table_1.ncols]
X_test.append(xx)
yy = table_1.cell_value(i+20001,2)
Y_test.append([yy,1-yy])
rdm = RandomState(1)
dataset_size = 129
X = rdm.rand(dataset_size,1000)
# Y = [[int(x1 < 2)] for x1 in X]
Y = [[int(x1<0.5),1-int(x1<0.5)]for x1 in X[:,1]]
'''
# ---------------------------------------------------------------------
# Hyperparameters
lr = 0.001
training_iters = 1000
batch_size = 100
n_inputs = 100
n_steps = 10
n_hidden_units = 200
n_classes = 2
# tf Graph input
x = tf.placeholder(tf.float32,shape = (None,1000), name ='x-input')
y_ = tf.placeholder(tf.float32,shape = (None,2), name = 'y-input')
# Define weights
weights = {
# (10,200)
'in':tf.Variable(tf.random_normal([n_inputs,n_hidden_units])),
# (200,2)
'out': tf.Variable(tf.random_normal([n_hidden_units,n_classes]))
}
biases = {
'in':tf.Variable(tf.constant(0.1,shape=[n_hidden_units,])),
'out':tf.Variable(tf.constant(0.1,shape=[n_classes,]))
}
# def RNN(X,weights,biases):
# hidden layer for input to cell
# X (8 batch, 10 steps, 100 inputs)
# -->(8*10, 100 inputs)
X1 = tf.reshape(x,[-1,n_inputs])
# X_in ==> (8 batch* 10 steps, 200 hidden)
X_in = tf.matmul(X1,weights['in'])+biases['in']
# X_in ==> (8 batch, 10 steps, 200 hidden)
X_in = tf.reshape(X_in,[-1,n_steps,n_hidden_units])
# cell
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units,forget_bias=1.0,state_is_tuple=True,reuse=tf.AUTO_REUSE)
# lstm cell is divided into two parts (c_state,m_state)
_init_state = lstm_cell.zero_state(batch_size,dtype=tf.float32)
outputs,states = tf.nn.dynamic_rnn(lstm_cell,X_in,initial_state = _init_state,time_major=False)
# hidden layer for outputs as the final results
results = tf.matmul(states[1],weights['out'])+biases['out']
# return results
# pred = RNN(x,weights,biases)
pred = results
cost = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y_))
# tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_))
train_op = tf.train.AdamOptimizer(lr).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
step = 0
ac = []
for i in range(training_iters):
start = (i*batch_size) % dataset_size
end = min(start+batch_size,dataset_size)
if end == dataset_size:
start = -batch_size
accu = sess.run(accuracy,feed_dict = {x:X[start:end], y_ :Y[start:end]})
if i % 10 == 0:
ac.append(accu)
# Create the sen2vec
workbook = xlsxwriter.Workbook('sentence1.xlsx')
sheet1 = workbook.add_worksheet()
for i in range(dataset_size//batch_size +1 ):
start = (i*batch_size) % dataset_size
q = dataset_size - start
end = min(start+batch_size,dataset_size)
if end == dataset_size:
start = -batch_size
sen1 = sess.run(states,feed_dict = {x:X[start:end], y_ :Y[start:end]})
sen1 = np.array(sen1)
sen2 = sen1[1]
if q>= batch_size:
for j in range(batch_size):
for k in range(200):
sheet1.write(start+j,k,sen2[j,k])
else:
q = batch_size - q
for j in range(batch_size):
if q+j+1 >=batch_size:
break
for k in range(200):
sheet1.write(dataset_size-batch_size+q+j,k,sen2[q+j+1,k])
workbook.close()
accux = range(0, 100)
plt.plot(accux,ac,label="accuracy",color='r')
plt.xlabel('train step (unit:*100)')
plt.legend()
plt.show() | true |
b1f46446034177239f960268921d933eb471305f | Python | rivereasterling/Space-Invaders-with-a_Twist | /thetestCase.py | UTF-8 | 998 | 3.0625 | 3 | [] | no_license | import unittest
from Lab10 import Laser
from Lab10 import Player
from Lab10 import Ship
from Lab10 import Enemy
from Lab10 import collide
class All_Test(unittest.TestCase):
def test_will_work(self):
pass
#the above test wouldn't work for me so I know that somethings up with how pycharm does unit tests in pygame
# I tried doing unit test examples outside of pygame and they worked fine so I am completely stumped the above works when put outside pygame but won't even run a test when its in here.
def Laser_test(self):
self.assertTrue(Laser, "there are no lasers")
def Ship_test(self):
self.assertTrue(Ship, "There are no ships")
def Enemy_test(self):
self.assertTrue(Enemy, "There are no enemies on screen")
def Player_test(self):
self.assertTrue(Player, "The player isn't on screen")
def Collide_Test(self):
self.assertTrue(collide(obj1=Player,obj2= Enemy), "The Player can not collide with an enemie")
if __name__ == '__main__':unittest.main()
| true |
fca1ac8f599bba174ecc1222b05cea867d669fa6 | Python | zhenchentl/Machine_Learning | /datatest20151218.py | UTF-8 | 486 | 2.546875 | 3 | [] | no_license | #coding=utf-8
#注意现在还是没有解决中显示乱码的状况
import pandas as pd
#from scipy import stats as ss
import matplotlib.pyplot as plt
csvfile=file('d:\Pycharm\\forpython.csv','rb')
df=pd.read_csv(csvfile)
print df.head()
print df.columns
print df.index
print df.ix[:].head(4)
print df.describe()
#print ss.ttest_1samp(a=df.ix[:,3],popmean=14000)
plt.show(df.plot(kind='box'))
#? pd.options.display.mpl_style='defeaut'
#? df.plot(kind='box')
| true |
d816cb5cbe65c466b8d40cc985720e415e787bf8 | Python | alineat/python-exercicios | /desafio051.py | UTF-8 | 416 | 4.21875 | 4 | [] | no_license | # Desenvolva um programa q leia o primeiro termo e a razão de uma PA. No final, mostre os 10 primeiros termos dessa
# progressão.
print('=' * 6, '10 TERMOS DE UMA PA', '=' * 6)
pri = int(input('Primeiro termo: '))
ra = int(input('Razão: '))
decimo = pri + (10 - 1) * ra #formula matematica para saber qual o 10º termo
for c in range(pri, decimo + ra, ra):
print('{}'.format(c), end=' -> ')
print('Acabou.')
| true |
960a7c00c4725b38e4923b85cc0d19629f4e5192 | Python | shhuan/algorithms | /leetcode/easy/CountandSay.py | UTF-8 | 1,262 | 3.6875 | 4 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
created by huash at 2015-04-11 19:11
The count-and-say sequence is the sequence of integers beginning as follows:
1, 11, 21, 1211, 111221, ...
1 is read off as "one 1" or 11.
11 is read off as "two 1s" or 21.
21 is read off as "one 2, then one 1" or 1211.
Given an integer n, generate the nth sequence.
Note: The sequence of integers will be represented as a string.
"""
__author__ = 'huash'
import sys
import os
class Solution:
# @return a string
def countAndSay(self, n):
if n < 1:
return ''
elif n == 1:
return '1'
next_str = '1'
for ni in range(n-1):
s = next_str
next_str = ''
pre = ''
count = 0
for ch in s:
if pre == '':
pre = ch
count += 1
elif ch == pre:
count += 1
else:
next_str += str(count)
next_str += pre
pre = ch
count = 1
next_str += str(count)
next_str += pre
return next_str
s = Solution()
for i in range(25):
print('f({}) = {}'.format(i, s.countAndSay(i))) | true |
67009179574febaacd2329500b1af39ca421c37b | Python | rasooll/Python-Learning | /mian-term/barname2-adad-aval.py | UTF-8 | 473 | 3.6875 | 4 | [] | no_license | # Type your code here
def list_of_primes(n):
def isprime(r):
r = abs(int(r))
if r < 2:
return False
if r == 2:
return True
if not r & 1:
return False
for x in range(3, int(r**0.5)+1, 2):
if r % x == 0:
return False
return True
#print(isprime(n))
i=1
mylist=[]
while i < n:
prim = isprime(i)
if prim:
mylist.append(i)
i = i + 1
return mylist
print(list_of_primes(99)) | true |
51c6c113c03407b6eccb2d59338fa25231ff138c | Python | sibis-platform/ncanda-data-integration | /datadict/datadict_update.py | UTF-8 | 5,598 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# """
# Given a CSV with the current data dictionary and a list of patch files with
# updated / newly inserted variables, produce a full patched data dictionary.
# """
from __future__ import print_function
from __future__ import absolute_import
import sys
import pandas as pd
import csv
import argparse
from datadict_utils import load_datadict, insert_rows_at
parser = argparse.ArgumentParser(
description="Apply patches to the current data dictionary.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-c', '--current',
help="CSV file with the current data dictionary",
action="store", required=True)
parser.add_argument('-o', '--output',
help="CSV file to write output in.",
action="store",
default=sys.stdout)
parser.add_argument('patch_files', help="CSV file(s) with patch for datadict",
nargs='+',
action="store")
parser.add_argument('-v', '--verbose',
help="Write to stdout what the script is doing",
action="store_true")
parser.add_argument('--update-only',
help="Do not add any new variables.",
action="store_true")
# TODO: Instead of "do not update", enhance logic to "do not overwrite"
parser.add_argument('--skip-branching',
help="Do not update branching logic information.",
action="store_true")
parser.add_argument('--skip-section-headers',
help="Do not update section headers.",
action="store_true")
parser.add_argument('--skip-field-notes',
help="Do not update field notes.",
action="store_true")
# TODO: Implement
parser.add_argument('--keep-options',
help=("Prevent the patch from downgrading Field Type to "
"text and/or removing options"),
action="store_true")
# TODO: Trimming options
args = parser.parse_args()
dd = load_datadict(args.current)
dd_columns = dd.columns.tolist() # To preserve order
# 0. For each patch file:
for patch_file in args.patch_files:
patch_df = load_datadict(patch_file, trim_all=True)
existing_rows = dd.index.intersection(patch_df.index)
new_rows = patch_df.index.difference(dd.index)
if args.verbose:
print("\nProcessing %s:" % patch_file.name)
print("Updating the following columns:")
print(existing_rows.tolist())
if args.update_only:
print("Ignoring the following new columns:")
else:
print("Inserting the following new columns:")
print(new_rows.tolist())
# 1. In the patch, find the entries that already exist and simply rewrite
# them
#
# TODO: Implement overwriting only a subset of values
overwrite_columns = set(dd.columns)
if args.skip_branching:
overwrite_columns = overwrite_columns - set(["Branching Logic (Show field only if...)"])
if args.skip_section_headers:
overwrite_columns = overwrite_columns - set(["Section Header"])
if args.skip_field_notes:
overwrite_columns = overwrite_columns - set(["Field Note"])
if len(existing_rows) > 0:
dd.loc[existing_rows, overwrite_columns] = patch_df.loc[existing_rows, overwrite_columns]
# 2. If there were new entries:
if (len(new_rows) > 0) and (not args.update_only):
# 2a. If there were existing entries, try smart placement of the new
# variables
if len(existing_rows) > 0: # Try smart placement of new entries
buffer_new = []
last_old = None
for colname, _ in patch_df.iterrows():
# Check if it's an existing row; if it is, mark it
if colname in existing_rows:
if len(buffer_new) > 0:
if last_old is None:
# We must insert before this variable
insert_before = True
else:
# We can insert after the last found variable
insert_before = False
# Insert buffer_new
dd = insert_rows_at(dd, colname,
patch_df.loc[buffer_new],
insert_before)
buffer_new = []
# Reset last_old
last_old = colname
else:
# It's a new one -> put it in the buffer
buffer_new.append(colname)
# 2b. If there were no already-existing entries, append the new entries
# to the end of the form (or whatever CLI says)
else: # No existing entries to append to
forms = patch_df['Form Name'].unique().tolist()
# Find the shared form name (if possible) and append to its end
for form in forms:
if dd['Form Name'].str.contains(form).any():
insertion_point = dd[dd['Form Name'] == form].index[-1]
else:
insertion_point = dd.index[-1]
dd = insert_rows_at(dd, insertion_point,
patch_df[patch_df['Form Name'] == form])
# Write out the updated data dictionary (with correctly ordered entries)
dd[dd_columns].to_csv(args.output, quoting=csv.QUOTE_NONNUMERIC)
| true |
e49083ded17e4a107419d1467593ba06b21d6387 | Python | finiteV/backuptool | /dbman.py | UTF-8 | 3,703 | 3.0625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import sqlite3
class DBMan():
def __init__(self,dbname):
'''
只进行一次
'''
self.dbname = dbname+'.db';
self.conn = sqlite3.connect(self.dbname,check_same_thread = False);
self.conn.text_factory = bytes#解决中文出错
#if not exits, create it
#if not os.path.exists(self.dbname):
##1.新建表
sqls = ("create table if not exists filehash("
"id integer not null primary key autoincrement,"
"absname char(100) not null,"
"hash char(200) not null,"
##初始0,添加新文件为1,更新文件2,已存在未改变状态3,
##一次备份后删除状态为0文件
"status integer default 0"
")");
self.conn.execute(sqls)
self.conn.commit()
##2.更新状态
sqls = "update filehash set status=0"
self.conn.execute(sqls)
self.conn.commit()
def query(self,sha1str,path=''):
'''
查询hash值,若hash值存在,则返回文件对于的绝对路径,以及id;不存在返回False
'''
if path=='':
sqls = 'select id,absname from filehash where hash = "%s"' % sha1str
else:
sqls = 'select id from filehash where absname = "%s"' % path
try:
cur = self.conn.cursor()
cur.execute(sqls)
res = cur.fetchall()
cur.close()
except Exception as e:
print e
if len(res)==1:
return res
else:
return False
def insert(self,abssrc,sha1str):
#1.路径,文件名
sqls = 'insert into filehash(absname,hash,status) values ("%s","%s",%d)' % (abssrc,sha1str,1)
try:
self.conn.execute(sqls)
self.conn.commit()
except Exception as e:
print e
def update(self,iid,abssrc='',hash_str=''):
'''
更新hash值存在的文件的路径值
'''
if hash_str!='':
sqls = 'update filehash set hash="%s",status=%d where id=%d' % (hash_str,1,iid)
elif abssrc=='':##文件已经存在,更新状态
sqls = 'update filehash set status=%d where id=%d' % (3,iid)
else:##文件目录改变,内容未变
sqls = 'update filehash set absname="%s",status=%d where id=%d' % (abssrc,2,iid)
try:
self.conn.execute(sqls)
self.conn.commit()
except Exception as e:
print e
return True
def query_rbsh(self):
'''
查询数据库中垃圾文件,不存在返回False,存在返回id,路径数组
只进行一次
'''
sqls = 'select id,absname from filehash where status = 0'
cur = self.conn.cursor()
cur.execute(sqls)
res = cur.fetchall()
cur.close()
if len(res)>0:
return res
else:
return False
def delete(self,iid):
'''
删除数据库中已经不存在的文件
'''
sqls = 'delete from filehash where id = %d' % iid
try:
self.conn.execute(sqls)
self.conn.commit()
except Exception as e:
print e
return True
def __del__(self):
self.conn.close()
if __name__=='__main__':
dbman = DBMan()
#dbman.insert('/test', '11111111')
#print dbman.query('11111112')
print dbman.query_rbsh()
#print dbman.update(2)
#print dbman.delete(3)
| true |
72382a9c63d92b144ca635258abd48e14b221c5e | Python | drtierney/hyperskill-EasyRiderBusCompany-Python | /Easy Rider Bus Company/task/easyrider/easyrider.py | UTF-8 | 2,142 | 2.953125 | 3 | [
"MIT"
] | permissive | import json
import re
all_keys = ['bus_id', 'stop_id', 'stop_name', 'next_stop', 'stop_type', 'a_time']
format_keys = ['stop_name', 'stop_type', 'a_time']
type_error_counter = dict.fromkeys(all_keys, 0)
format_error_counter = dict.fromkeys(format_keys, 0)
bus_id_values = {128, 256, 512}
stop_name_regex = r'[A-Z]\w+\s?\w+?\s(Road|Avenue|Boulevard|Street)$'
stop_type_values = {'S', 'O', 'F', ''}
a_time_regex = r"([01]\d|2[0-3]):([0-5]\d)$"
bus_lines = dict()
data = json.loads(input())
for entry in data:
bus_id = entry['bus_id']
stop_id = entry['stop_id']
stop_name = entry['stop_name']
next_stop = entry['next_stop']
stop_type = entry['stop_type']
a_time = entry['a_time']
# Stage 1 - Type and Required
if type(bus_id) != int:
type_error_counter['bus_id'] += 1
if type(stop_id) != int:
type_error_counter['stop_id'] += 1
if type(stop_name) != str:
type_error_counter['stop_name'] += 1
if type(next_stop) != int:
type_error_counter['next_stop'] += 1
if type(stop_type) != str:
type_error_counter['stop_type'] += 1
if type(a_time) != str:
type_error_counter['a_time'] += 1
# Stage 2 - Format
if not re.match(stop_name_regex, stop_name):
format_error_counter['stop_name'] += 1
if entry['stop_type'] not in stop_type_values:
format_error_counter['stop_type'] += 1
if not re.match(a_time_regex, entry['a_time']):
format_error_counter['a_time'] += 1
# Stage 3 - Bus line info
if bus_id in bus_lines.keys():
bus_lines[bus_id] += 1
else:
bus_lines[bus_id] = 1
# type_error_count = sum(type_error_counter.values())
# print(f"Type and required field validations: {format_error_count} errors")
# for key in all_keys:
# print(f"{key}: {format_error_counter[key]}")
# format_error_count = sum(format_error_counter.values())
# print(f"Format validations: {format_error_counter} errors")
# for key in format_keys:
# print(f"{key}: {format_error_counter[key]}")
print("Line names and number of stops:")
for k in bus_lines:
print(f"bus_id: {k}, stops: {bus_lines[k]}")
| true |
8767d2baae90d75e3fa37c7f86c411686a676e5c | Python | thisisparthjoshi/Basic-Programs | /factorial.py | UTF-8 | 187 | 3.890625 | 4 | [] | no_license | n=int(input("Please enter any number here--"))
Prod =1
for i in range (1, n):
Prod = Prod * n
n=n-1
print (" The factorial is:", Prod)
print (" The Value of N is:", n)
| true |
cc57d1fb8d19c971fb5e7be5093240e22ca0e7cd | Python | vincentmartin/m1la-python | /c3_3_fonctions.py | UTF-8 | 3,835 | 4.125 | 4 | [] | no_license | # -*-coding:utf-8 -*
'''
Created on 13 mars 2014
@author: vincent
Cours 3. : Fonctions.
'''
#######################################
# 1. Définition et utilisation de fonctions.
#######################################
# Définition d'une fonction qui affiche bonjour.
# 0 paramètres, pas de valeur de retour.
def bonjour():
'''Ceci est la documentation de la fonction (docstring)'''
print "Bonjour"
# Utilisation
bonjour()
# Définition d'une fonction prenant un paramètre.
def bonjour_a_vous(nom):
'''Fonction affichant bonjour suivi du nom.'''
print "Bonjour " + nom
bonjour_a_vous("Toto")
# EXO. 1 : Écrire une fonction prenant en paramètre votre date de naissance et affichant votre âge.
# Tester la en affichant l'appelant..
#######################################
# 2. Valeur de retour.
#######################################
# Fonction retournant une valeur.
def retourner_bonjour_a_vous(nom):
'''Fonction retournant bonjour suivi du nom.'''
return ("Bonjour " + nom)
retourner_bonjour_a_vous("Tata") # la fonction n'affiche rien, c'est normal, pas de 'print' dans la fonction.
# Récupération du retour de la fonction dans une variable.
texte = retourner_bonjour_a_vous("Tata")
print texte
def retourner_bonjour_nom_prenom(prenom, nom):
'''Fonction retournant bonjour suivi du nom.'''
return ("Bonjour " + prenom + " " + nom)
texte = retourner_bonjour_nom_prenom("Franck", "Dupond")
print texte
# EXO. 2. Transformez la fonction de l'EXO1. pour qu'elle retourne votre âge.
print "\n"
#######################################
# 3. Valeurs par défaut des paramètres
#######################################
def afficher_table_multiplication(n, debut=1, fin=9):
'''Affichage d'une table de multiplication.'''
compteur = debut
while compteur <= fin:
print str(n) + " * " + str(compteur) + " = " + str(n * compteur)
compteur += 1
afficher_table_multiplication(2) # multiplications de 2 par 1 jusqu'à 9
print "\n"
afficher_table_multiplication(2, 2) # multiplications de 2 par 2 jusqu'à 9
print "\n"
afficher_table_multiplication(2, fin=15) # multiplications de 2 par 1 jusqu'à 15
print "\n"
#######################################
# 4. Attention aux types des paramètres.
#######################################
# Le programme plante si la ligne suivante est décommentée.
#texte = retourner_bonjour_nom_prenom(2, "Dupond")
def retourner_bonjour_nom_prenom2(prenom, nom):
'''Fonction retournant bonjour suivi du nom.'''
if type(prenom) != str or type(nom) != str:
print "Les paramètres nom et prenom doivent etre de type string (str)"
else:
return ("Bonjour " + prenom + " " + nom)
# Le programme ne plante plus et affiche l'erreur à l'utilisateur.
# Nous verrons par la suite une façon plus élégante d'attraper les erreurs par la gestion des exceptions.
texte = retourner_bonjour_nom_prenom2(2, "Dupond")
print "\n"
#######################################
# 5. Utilisation d'une fonction dans une autre fonction.
#######################################
# Définition d'une première fonction.
def verifier_parametres(p1, p2):
'''Fonction vérifiant que les paramètres soient bien de type string (str)'''
if type(p1) != str or type(p2) != str:
return False
return True
# Puis utilisation dans la seconde.
def retourner_bonjour_nom_prenom3(prenom, nom):
'''Fonction retournant bonjour suivi du nom.'''
params_ok = verifier_parametres(prenom, nom) # appel de la fonction verifier_parametres
if params_ok == False:
print "Les paramètres nom et prenom doivent etre de type string (str)"
else:
return ("Bonjour " + prenom + " " + nom)
print retourner_bonjour_nom_prenom3("Marie", 8)
print retourner_bonjour_nom_prenom3("Marie", "Durand") | true |
5d6aa84e048957f706f197d529c1d80928700547 | Python | gregnordin/parampool | /parampool/utils/__init__.py | UTF-8 | 7,305 | 2.78125 | 3 | [] | no_license | def legal_variable_name(name):
"""
Make a variable name from the string name.
Replace space by underscore and remove all illegal
characters in a Python variable name.
"""
var_name = name.replace(' ', '_').replace('/', '__')
for char in r'''[]{}\^%$#@!+-<>?|'"=~`,.;:''':
if char in var_name:
var_name = var_name.replace(char, '')
for char in var_name:
if ord(char) > 127: # remove non-ascii characters
var_name = var_name.replace(char, '')
return var_name
def save_png_to_str(plt, plotwidth=400):
"""
Given a matplotlib.pyplot object plt, the current figure
is saved to a PNG string which is embedded in an HTML
image tag and returned.
"""
from StringIO import StringIO
figfile = StringIO()
plt.savefig(figfile, format='png')
figfile.seek(0) # rewind to beginning of file
figdata_png = figfile.buf # extract string
import base64
figdata_png = base64.b64encode(figdata_png)
html_text = '<img src="data:image/png;base64,%(figdata_png)s" width="%(plotwidth)s">' % vars()
return html_text
def fenicsxml2pool(xmlfile, pool=None):
"""Return a Pool tree from an XML file with FEniCS parameters."""
if pool is None:
from parampool.pool.Pool import Pool
pool = Pool()
pool.subpool('Main menu')
# else: add menus wherever we are in a pool tree
import xml.etree.ElementTree as ET
tree = ET.parse(xmlfile)
root = tree.getroot()
def iterate(element):
if element.tag == 'parameters':
# New subpool of parameters
pool.subpool(element.attrib['name'])
elif element.tag == 'parameter':
# Add data item
value = element.attrib['value']
if value == '':
value = 'emptystring' # just a magic code, the value gets transformed back to '' in set_fenics_prm
widget = 'textline'
if element.attrib['type'] == 'double':
str2type = float
value = str2type(value)
widget = 'float'
elif element.attrib['type'] == 'int':
str2type = int
value = str2type(value)
widget = 'integer'
elif element.attrib['type'] == 'string':
str2type = str
value = str2type(value)
elif element.attrib['type'] == 'bool':
value = value.capitalize() # True/False, not true/false
# Short call to make sure bools are correctly handled
pool.add_data_item(
name=element.attrib['key'],
default=value,
widget='checkbox')
else:
raise ValueError('Not impl element.attrib["type"]=%s' % element.attrib['type'])
if element.attrib['type'] != 'bool':
pool.add_data_item(
name=element.attrib['key'],
default=value,
str2type=str2type,
widget=widget)
for child in element:
iterate(child)
if element.tag == 'parameters' and \
pool.get_current_subpool().name != 'Main menu':
pool.change_subpool('..')
iterate(root)
return pool
def set_dolfin_prm(path, level, item, fenics_parameters):
"""
Fill parameters dict in FEniCS from a leaf in the Pool tree.
Callback function for leaf in Pool tree in a FEniCS program:
pool.traverse(set_fenics_prm, user_data=fenics.parameters).
"""
submenu = path[2:] # drop considering Main menu, dolfin
value = item.get_value()
changed_value = False
if value in ('False', 'True'):
if value != item.data['default']:
changed_value = True
value = value == 'True'
if value == 'emptystring': # code for empty string that GUIs don't like
value = ''
changed_value = False
if (not isinstance(value, bool)) and value != item.data['default']:
#print('parameter %s changed from %s to %s' % (item.name, item.data['default'], value))
changed_value = True
if not changed_value:
return
if len(submenu) == 0:
try:
fenics_parameters[item.name] = value
except KeyError:
pass # user's parameter, not in FEniCS's parameters
elif len(submenu) == 1:
try:
fenics_parameters[submenu[0]][item.name] = value
except KeyError:
pass # user's parameter, not in FEniCS's parameters
elif len(submenu) == 2:
try:
fenics_parameters[submenu[0]][submenu[1]][item.name] = value
except KeyError:
pass # user's parameter, not in FEniCS's parameters
elif len(submenu) == 3:
try:
fenics_parameters[submenu[0]][submenu[1]][submenu[2]][item.name] = value
except KeyError:
pass # user's parameter, not in FEniCS's parameters
else:
raise ValueError('FEniCS XML parameter trees are not so deeply nested')
def pydiff(text1, text2, text1_name='text1', text2_name='text2',
prefix_diff_files='tmp_diff', n=3):
"""
Use Python's ``difflib`` module to compute the difference
between strings `text1` and `text2`.
Produce text and html diff in files with `prefix_diff_files`
as prefix. The `text1_name` and `text2_name` arguments can
be used to label the two texts in the diff output files.
No files are produced if the texts are equal.
"""
if text1 == text2:
return False
# Else:
import difflib, time, os
text1_lines = text1.splitlines()
text2_lines = text2.splitlines()
diff_html = difflib.HtmlDiff().make_file(
text1_lines, text2_lines, text1_name, text2_name,
context=True, numlines=n)
diff_plain = difflib.unified_diff(
text1_lines, text2_lines, text1_name, text2_name, n=n)
filename_plain = prefix_diff_files + '.txt'
filename_html = prefix_diff_files + '.html'
f = open(filename_plain, 'w')
# Need to add newlines despite doc saying that trailing newlines are
# inserted...
diff_plain = [line + '\n' for line in diff_plain]
f.writelines(diff_plain)
f.close()
f = open(filename_html, 'w')
f.writelines(diff_html)
f.close()
return True
def assert_equal_text(text1, text2,
text1_name='text1', text2_name='text2',
prefix_diff_files='tmp_diff',
msg=''):
if msg != '' and msg[-1] not in ('.', '?', ':', ';', '!'):
msg += '.'
if msg != '':
msg += '\n'
msg += 'Load tmp_diff.html into a browser to see differences.'
assert not pydiff(text1, text2, text1_name, text2_name,
prefix_diff_files, n=3), msg
def assert_equal_files(file1, file2,
text1_name='text1', text2_name='text2',
prefix_diff_files='tmp_diff',
msg=''):
text1 = open(file1, 'r').read()
text2 = open(file2, 'r').read()
assert_equal_text(text1, text2,
text1_name=file1, text2_name=file2,
prefix_diff_files=prefix_diff_files,
msg=msg)
| true |
6aa9fda03c5980cad1529b6f2f5bebef07fc38db | Python | YuriyKozhev/PythonCourse | /Lecture_8/fast_mode.py | UTF-8 | 1,285 | 3.453125 | 3 | [
"MIT"
] | permissive | import itertools
def puzzle_input() -> dict:
with open('input.txt') as f:
return {
layer: range_
for layer, range_ in (
map(int, row.strip().split(': ')) for row in f)}
def calc_scanner_pos(scanner_height: int, time_step: int) -> int:
"""
Calculates the position of a scanner within its range at a
given time step.
"""
cycle_midpoint = scanner_height - 1
full_cycle = cycle_midpoint * 2
cycle_position = time_step % full_cycle
return (
cycle_position
if cycle_position <= cycle_midpoint
else full_cycle - cycle_position)
def caught_crossing(firewall: dict, width: int, t_start: int) -> bool:
"""Returns True if the packet is caught while crossing, otherwise False."""
for pos in range(width):
if pos in firewall:
scanner_height = firewall[pos]
scanner_pos = calc_scanner_pos(scanner_height, t_start + pos)
if scanner_pos == 0:
return True
return False
def find_start(firewall: dict) -> int:
width = max(firewall.keys()) + 1
for t_start in itertools.count(0):
if not caught_crossing(firewall, width, t_start):
break
return t_start
print(find_start(puzzle_input())) | true |