blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bfc9e074c0972cbde593666e2d08a3a0d32ede2e
|
Python
|
luizfirmino/python-labs
|
/Python II/Assignment 4/210426_Filho_Luiz_q6.py
|
UTF-8
| 2,303
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
# Assignment 4 - Q6
# Author: Luiz Firmino
# Scope
# In the accounts.txt file:
# update the name 'Zoltar' to 'Robert'
# create a tempfile with the new data
# remove accounts.txt file from the directory
# rename the tempfile to a new file called myaccounts.txt
import os
from os import path
# CONST
CONST_WORKING_DIR = os.path.dirname(os.path.abspath(__file__))
CONST_ACCOUNTS_FILE = "accounts.txt"
CONST_TEMP_FILE ="_temp.txt"
def renameFile(filePath, newName):
if path.exists(filePath):
os.rename(filePath, newName)
else:
print("Cannot rename it! File or path does not exist\n")
def deleteFile(filePath):
if path.exists(filePath):
os.remove(filePath)
else:
print("Cannot delete! File or path does not exist\n")
def main():
# check if accounts file exists
if path.exists(os.path.join(CONST_WORKING_DIR, CONST_ACCOUNTS_FILE)):
print(CONST_ACCOUNTS_FILE, "exists")
# open Accounts.txt file
f = open(os.path.join(CONST_WORKING_DIR, CONST_ACCOUNTS_FILE), "r")
print(CONST_ACCOUNTS_FILE, "opened")
# open temp file
f_temp = open(os.path.join(CONST_WORKING_DIR, CONST_TEMP_FILE), "w")
print(CONST_TEMP_FILE, "created")
file1 = f.read().split("\n")
for line in file1:
if "Zoltar" in line:
data = line.replace('Zoltar', 'Robert')
print(line, " replaced by ", data)
f_temp.write(data + '\n')
print(data, " written in", CONST_TEMP_FILE)
else:
f_temp.write(line + '\n')
print(line, " written in", CONST_TEMP_FILE)
# close files
f.close()
print(CONST_ACCOUNTS_FILE, "closed")
f_temp.close()
print(CONST_TEMP_FILE, "closed")
# delete file Accounts.txt
deleteFile(os.path.join(CONST_WORKING_DIR, CONST_ACCOUNTS_FILE))
print(CONST_ACCOUNTS_FILE, "deleted")
# rename temp file
renameFile(os.path.join(CONST_WORKING_DIR, CONST_TEMP_FILE), "myaccounts.txt")
print(CONST_TEMP_FILE, "rename to", "myaccounts.txt")
else:
print("File", CONST_ACCOUNTS_FILE, "does not exist!")
if __name__ == "__main__":
main()
| true
|
00a858882440a44a7e5ffaace7db2e414ad7020b
|
Python
|
zachwill/cookiecutter-scrapy
|
/{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/pipelines.py
|
UTF-8
| 840
| 2.78125
| 3
|
[] |
no_license
|
"""
Save ModelItem's to a local SQLite database.
"""
from collections import defaultdict
from peewee import chunked
from {{cookiecutter.repo_name}}.items import ModelItem
class ModelPipeline(object):
"The pipeline stores scraped data in a database."
def open_spider(self, spider):
self.models = defaultdict(list)
def close_spider(self, spider):
if len(self.models):
for model_name, list_of_models in self.models.items():
model = list_of_models[0].model
for batch in chunked(list_of_models, 25):
model.insert_many(batch).on_conflict_replace().execute()
def process_item(self, item, spider):
if isinstance(item, ModelItem):
model_name = type(item)
self.models[model_name].append(item)
return item
| true
|
955c1ef179e3b8ab274f94fc55e3cca2d281eba2
|
Python
|
bianchimro/genepi
|
/genepi/tests/unit_tests/ga_test.py
|
UTF-8
| 4,563
| 2.640625
| 3
|
[] |
no_license
|
import os
import unittest
from genepi.core.ga import GeneticAlgorithm
from genepi.core.gene import IntGene
from genepi.core.protogene import ProtoGene
from genepi.core.protogenome import ProtoGenome
from genepi.core.population import POPULATION_DEFAULT_SIZE
from genepi.core.stopcriteria import convergence_stop, raw_score_stop
from genepi.cache.base import NoCache
from genepi.storage.sqlite import SqliteStorage
def fitness_evaluator(genome):
v = genome.get_value('a')
return v
class GeneticAlgorithmTest(unittest.TestCase):
def setUp(self):
self.protogene_a = ProtoGene(IntGene, 'a', min_value=0, max_value=100)
self.protogenome = ProtoGenome([self.protogene_a])
def tearDown(self):
pass
def test_init(self):
algo = GeneticAlgorithm(self.protogenome, fitness_evaluator, cache_instance=NoCache())
assert algo.population.size == POPULATION_DEFAULT_SIZE
def test_initialize(self):
algo = GeneticAlgorithm(self.protogenome, fitness_evaluator)
algo.initialize()
def test_should_terminate(self):
protogene = ProtoGene(IntGene, 'a', min_value=0, max_value=100, value=1)
protogenome = ProtoGenome([protogene], mutation_probability=1)
algo = GeneticAlgorithm(protogenome, fitness_evaluator, termination_criteria=convergence_stop)
algo.initialize()
for individual in algo.population.individuals:
individual.score = 1
for x in range(11):
stats = {'min_score' : 0}
algo.population_stats.append(stats)
algo.generation = 10
st = algo.should_terminate()
assert st == True
algo.generation = 1
st = algo.should_terminate()
assert st == False
def test_evaluate_population(self):
protogene = ProtoGene(IntGene, 'a', min_value=0, max_value=100)
protogenome = ProtoGenome([protogene])
algo = GeneticAlgorithm(protogenome, fitness_evaluator, termination_criteria=convergence_stop)
algo.initialize()
algo.evaluate_population()
for individual in algo.population.individuals:
assert individual.score == fitness_evaluator(individual)
def test_best_individual(self):
protogene = ProtoGene(IntGene, 'a', min_value=0, max_value=100)
protogenome = ProtoGenome([protogene])
algo = GeneticAlgorithm(protogenome, fitness_evaluator, termination_criteria=convergence_stop)
algo.initialize()
for i, individual in enumerate(algo.population.individuals):
individual.set_value('a', i)
algo.evaluate_population()
bi = algo.best_individual()
assert bi.score == 0
def test_evolve_population(self):
protogene = ProtoGene(IntGene, 'a', min_value=0, max_value=10)
protogenome = ProtoGenome([protogene])
algo = GeneticAlgorithm(protogenome, fitness_evaluator, termination_criteria=convergence_stop)
algo.initialize()
algo.evaluate_population()
g1 = algo.generation
algo.evolve_population()
g2 = algo.generation
assert g1 == 0 and g2 == 1
def test_evolve(self):
protogene = ProtoGene(IntGene, 'a', min_value=0, max_value=10)
protogenome = ProtoGenome([protogene], mutation_probability=0.1)
algo = GeneticAlgorithm(
protogenome,
fitness_evaluator,
termination_criteria=[raw_score_stop,convergence_stop],
termination_criteria_options = [{'stop_score':0}]
)
algo.evolve()
def test_evolve_storage(self):
storage_instance = SqliteStorage("test.sqlite")
protogene = ProtoGene(IntGene, 'a', min_value=0, max_value=10)
protogenome = ProtoGenome([protogene], mutation_probability=0.1)
algo = GeneticAlgorithm(protogenome, fitness_evaluator,
termination_criteria=convergence_stop, storage_instance=storage_instance)
algo.evolve()
def test_evolve_2(self):
protogene = ProtoGene(IntGene, 'a', min_value=0, max_value=100)
protogenome = ProtoGenome([protogene], mutation_probability=0.1)
algo = GeneticAlgorithm(
protogenome,
fitness_evaluator,
termination_criteria=[raw_score_stop,convergence_stop],
termination_criteria_options = [{'stop_score':0},{'num_generations':20}]
)
algo.evolve()
| true
|
7fe18b72b214b314ef9865d166236751a3d901e1
|
Python
|
suchita-25/ameritradestockdata
|
/streamingdatatodb.py
|
UTF-8
| 2,475
| 2.84375
| 3
|
[] |
no_license
|
"""
This program gets the streaming Data from TD Ameritrade and store in Timescale DB
"""
import asyncio
import psycopg2
from tda.auth import easy_client
from tda.streaming import StreamClient
import pandas as pd
import config
CONN = psycopg2.connect(host=config.DB_HOST,
database=config.DB_NAME,
user=config.DB_USER,
password=config.DB_PASS)
CURSOR = CONN.cursor()
CLIENT = easy_client(
api_key=config.api_key,
redirect_uri=config.redirect_uri,
token_path=config.token_path)
STREAM_CLIENT = StreamClient(CLIENT, account_id=config.account_id)
def order_book_handler(msg):
"""
This is the Message Handler, and store streaming Data in Timescale DB
"""
count = len(msg['content'])
CONN.commit()
try:
for i in range(count):
dict2 = {**msg['content'][i]}
# print(dict2)
cols = dict2.keys()
cols_str = ','.join(cols)
vals = [dict2[k] for k in cols]
# print("Vals", vals)
vals_str = ','.join(["%s" for j in range(len(vals))])
sql_str = """INSERT INTO stocksdata({0},{1}) VALUES ({2}, {3})""" \
.format('timestamp', cols_str, msg['timestamp'], vals_str)
# print(sql_str)
CURSOR.execute(sql_str, vals)
except KeyboardInterrupt:
print('Halted')
CONN.commit()
CONN.commit()
async def read_stream():
"""
This method reads the input csv file and creates the streaming connection to TD Ameritrade.
"""
await STREAM_CLIENT.login()
await STREAM_CLIENT.quality_of_service(StreamClient.QOSLevel.EXPRESS)
# Always add handlers before subscribing because many streams start sending
# data immediately after success, and messages with no handlers are dropped.
# stream_client.add_nasdaq_book_handler(
# lambda msg: print(json.dumps(msg, indent=4)))
# await stream_client.nasdaq_book_subs(['GOOG']) #Nasdaq
# await stream_client.listed_book_subs(['GOOG']) #NYSE Bid & ASk
ftp1 = pd.read_csv(config.ticker_path, usecols=['Symbol'])
list1 = ftp1.Symbol.to_list()
print(list1)
await STREAM_CLIENT.level_one_equity_subs(list1)
STREAM_CLIENT.add_level_one_equity_handler(order_book_handler)
# stream_client.add_listed_book_handler(order_book_handler)
while True:
await STREAM_CLIENT.handle_message()
asyncio.run(read_stream())
| true
|
4f6f43c5d2160af6aaa98cfe9a42ccbe0990aa9b
|
Python
|
ucfilho/Metodos_Numericos_2021
|
/GaussSeidel.py
|
UTF-8
| 1,356
| 3.046875
| 3
|
[] |
no_license
|
import numpy as np
############################################################
## Implementation of the Gauss Seidel algorithm
## A matrix of the linear system
## f right hand side
## x0 initial guess of the solution
############################################################
def gauss_seidel(A,f,x0,ITER_MAX = 100, tol = 1E-8,_debug_=1):
# size of the system
n = A.shape[0]
# initialize the residual
res = np.linalg.norm(f-np.dot(A,x0))
# init the new vector
x_new = np.zeros(n)
# copy the guess
x = np.array(x0,copy=True)
# init niter
niter = 0
# loop over the
while (res>tol) and (niter<ITER_MAX):
# loop over all the lines
for i in range(n):
# initialize the sums
sum1, sum2 = 0.0, 0.0
# loop over the line elements
for j in range(n):
# if j<i we use the new values
if j<i:
sum1 += A[i,j]*x_new[j]
# else we use the old ones
elif j>i:
sum2 += A[i,j]*x[j]
# we store the new values
x_new[i] = (f[i]-sum1-sum2)/A[i,i]
# change the old solution to the new one
x = x_new
# compute the new residual
res = np.linalg.norm(f-np.dot(A,x))
# increment niter
niter += 1
# print the final status of the algorithm
if niter == ITER_MAX:
info = 0
else:
info = 1
return x,info, niter
| true
|
c7f0e21f583b94220ac49a3392e331392e45ed3e
|
Python
|
Premnath08/GUVI-Python-Class
|
/Problems/Hangman Game.py
|
UTF-8
| 644
| 3.640625
| 4
|
[] |
no_license
|
import random
country=["india","australia","america","brazil","england"]
place=random.choice(country)
a=input("Play Game!!! Press Enter ")
count=0
i=''
letter=''
length=len(place)
while(count<5):
chance=0
char=input("\nEnter letter : ")
letter=char+letter
for i in place:
if i in letter:
print(i,end=" ")
else:
print(end=" _ ")
chance=chance+1
if chance==0:
print("\nWin")
print("Word:",place)
break
if char not in place:
print("Try Again")
count=count+1
if count==10:
print("Lost")
| true
|
a19b4d0aa690fb5dfe0c6e3d0241d3bbf2783c1c
|
Python
|
vshmyhlo/similarity-learning
|
/transforms.py
|
UTF-8
| 192
| 2.875
| 3
|
[] |
no_license
|
class CheckSize(object):
def __init__(self, size):
self.size = size
def __call__(self, input):
assert input.size == (self.size[1], self.size[0])
return input
| true
|
93f85345856800abd360daf4a6237a8e01d9abc8
|
Python
|
jcass8695/Interview-Prep
|
/middle_elem_ll.py
|
UTF-8
| 1,113
| 4.03125
| 4
|
[] |
no_license
|
class Node:
def __init__(self, val):
self.val = val
self.next = None
class LL:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, val):
new_node = Node(val)
if self.head is None:
self.head = new_node
if self.tail != None:
self.tail.next = new_node
self.tail = new_node
def find_middle_elem(self):
lag = self.head
curr = self.head
lag_index = 0
curr_index = 0
while curr:
if lag_index < curr_index // 2:
lag = lag.next
lag_index += 1
curr = curr.next
curr_index += 1
return lag.val
def print_list(self):
curr = self.head
while curr:
print(curr.val)
curr = curr.next
if __name__ == '__main__':
linked_list = LL()
linked_list.insert_node(1)
linked_list.insert_node(2)
linked_list.insert_node(3)
linked_list.insert_node(4)
linked_list.insert_node(5)
print(linked_list.find_middle_elem())
| true
|
c6d3a2c9f4d525bd2f3fab16cea0eb7bc78f4850
|
Python
|
hakbailey/advent-of-code-2020
|
/05/05a.py
|
UTF-8
| 564
| 2.96875
| 3
|
[] |
no_license
|
import math
file = "day-5-input.txt"
ids = []
def pick_half(half, r):
dist = (r[1] - r[0])/2
if half == "F" or half == "L":
r[1] = r[1] - math.floor(dist)
else:
r[0] = r[0] + math.ceil(dist)
return r
with open(file, "r") as f:
passes = f.readlines()
for p in passes:
p = p.rstrip()
row = [0, 127]
col = [0, 7]
for c in p[:7]:
row = pick_half(c, row)
for c in p[7:]:
col = pick_half(c, col)
id = row[0] * 8 + col[0]
ids.append(id)
print(row[0], col[0], id)
print(max(ids))
| true
|
7d16d3875ff5144360ab6c4f66a9385802862152
|
Python
|
rjairath/python-algos
|
/graphs/graphRepresentUsingLL.py
|
UTF-8
| 2,199
| 3.96875
| 4
|
[] |
no_license
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
class Graph:
def __init__(self, vertexCount):
self.vertexCount = vertexCount
self.adjList = [None] * self.vertexCount
self.visitedArray = [False] * self.vertexCount
def addEdge(self, from_val, to_val):
if self.adjList[from_val] is None:
from_node = Node(from_val)
self.adjList[from_val] = from_node
to_node = Node(to_val)
# insert at the head of LL as it is an O(1) operation
# to_node.next = self.adjList[from_val]
# self.adjList[from_val] = to_node
# try inserting at end now
temp = self.adjList[from_val]
while(temp.next):
temp = temp.next
temp.next = to_node
def printGraph(self):
for i in range(self.vertexCount):
temp = self.adjList[i]
print('vertex ', i, ': ', end="")
while(temp):
print(temp.value, end="->")
temp = temp.next
print()
def DFS(self, start_node_num):
start_node = self.adjList[start_node_num]
if self.visitedArray[start_node_num] == True:
return
self.visitedArray[start_node_num] = True
print(start_node_num)
temp = start_node.next
while(temp):
# print(temp.value, 'val.......')
self.DFS(temp.value)
temp = temp.next
def BFS(self, start_node_num):
start_node = self.adjList[start_node_num]
queue = [start_node_num]
self.visitedArray[start_node_num] = True
while(len(queue) > 0):
# item is a number
item = queue.pop(0)
print(item)
temp = self.adjList[item]
temp = temp.next
while(temp):
if self.visitedArray[temp.value] == False:
queue.append(temp.value)
self.visitedArray[temp.value] = True
temp = temp.next
g = Graph(4)
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
g.printGraph()
print('BFS: ')
g.BFS(2)
| true
|
fd6e860e285e45d7d414f84cec5c685d1cabdf5f
|
Python
|
yuceltoluyag/pythondersnotlarim
|
/lessons3/ders5.py
|
UTF-8
| 162
| 2.625
| 3
|
[] |
no_license
|
import requests
import json
result = requests.get("https://jsonplaceholder.typicode.com/todos")
result = json.loads(result.text)
for i in result:
print(i)
| true
|
87c046f5d40eb789739a416830da332c3931736e
|
Python
|
sourcery-ai-bot/library-python
|
/concepts/strings/multi_line_string.py
|
UTF-8
| 278
| 4.25
| 4
|
[] |
no_license
|
text_str = "multi-line text string"
str_method = "Python's f strings"
my_string = (
f"This is an example of a {text_str} with interpolated variables " +
f"displayed within placeholders using {str_method} method."
)
# Prints entire string on one line.
print(my_string)
| true
|
7f827eb4bcfcb878d5ae5fe5dbae08868a36609a
|
Python
|
nasigh/assignment-2
|
/rock, paper,scissor.py
|
UTF-8
| 1,218
| 3.515625
| 4
|
[] |
no_license
|
import random
user=0
computer=0
print ("*lets play game*","\U0001F60E")
print ("you have 5 set to play whith computer")
print ("if you are ready choose 1 and if you are not choose 2")
print ("1-yes","2-no")
agreement = input("tel me:")
if agreement == '1':
for i in range(1,6):
print("round:" ,+i )
print("computer:",+computer)
print("user:", +user)
gamelist = ["1-rock","2-paper","3-scissor"]
for x in gamelist:
print(x)
ch = input ("enter your choice:")
pc = random.randint(1,3)
print("pc number:",+pc)
if pc==ch :
print("equal")
elif pc==1 and ch=='2':
user += 1
elif pc==1 and ch=='3':
computer += 1
elif pc==2 and ch=='1':
computer += 1
elif pc==2 and ch=='3':
user += 1
elif pc==3 and ch=='1':
user += 1
elif pc==3 and ch=='2':
computer += 1
else:
if computer == user:
print("equal")
elif computer > user:
print("computer won")
elif user > computer:
print ("user won")
elif agreement == '2' :
print("good bye")
| true
|
e5fc2c080258694dba1686fe9a6cff537793068e
|
Python
|
Tech4AfricaRHoK/Education
|
/backend/learnervoice/teacherfeedback/serializers.py
|
UTF-8
| 2,333
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
"""
Serializers for the teacher feedback API.
"""
from teacherfeedback.models import Profile
from rest_framework import serializers
class TeacherSubjectSerializer(serializers.Serializer):
"""
Allows us to serialize/deserialize the grades/subjects that a teacher teaches.
"""
grade = serializers.IntegerField(required=True, min_value=1, max_value=12)
subject = serializers.CharField(required=True, max_length=255)
class UserSerializer(serializers.Serializer):
"""
Serializer for the different types of users.
"""
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(required=True, max_length=255)
surname = serializers.CharField(required=True, max_length=255)
mobile = serializers.CharField(required=False, allow_null=True, max_length=100)
password = serializers.CharField(max_length=255, required=False, allow_null=True)
profile_type = serializers.ChoiceField(choices=Profile.TYPES, default=Profile.STUDENT)
email = serializers.EmailField(required=False, max_length=255, allow_null=True)
# for when we're creating a teacher - we need a subject list of what they teach
subjects = serializers.ListField(
child=TeacherSubjectSerializer(),
required=False,
allow_null=True
)
# for when we're creating a student and/or teacher - the pk of the school
school_id = serializers.IntegerField(required=False, allow_null=True)
# for when we're creating students
grade = serializers.IntegerField(required=False, allow_null=True, min_value=1, max_value=12)
def validate(self, data):
"""
Checks if the data we've received is actually valid.
"""
# if we're adding a student here
if data['profile_type'] == Profile.STUDENT:
if 'mobile' not in data or \
'password' not in data or \
'school' not in data or \
'grade' not in data:
raise serializers.ValidationError("Missing field(s) in incoming request")
elif data['profile_type'] == Profile.TEACHER:
if 'mobile' in data:
if 'password' not in data or 'subjects' not in data:
raise serializers.ValidationError("Missing field(s) in incoming request")
return data
| true
|
59e25c37fca73f8aa794124e6ea43010ff5d860c
|
Python
|
HenryPaik1/WalmartDemandForecast
|
/utils.py
|
UTF-8
| 9,092
| 2.796875
| 3
|
[] |
no_license
|
import warnings
import pandas as pd
import numpy as np
import pickle
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
from sklearn.decomposition import PCA
from pmdarima.arima import auto_arima
from dateutil.relativedelta import relativedelta
from datetime import *
from dateutil import rrule
from slacker import Slacker
warnings.filterwarnings('ignore')
def init_df(df, test):
"""
return: df, test
"""
print('loading data'+ '...')
#df_train = df.groupby(['Store', 'Date']).agg(np.mean).drop(columns=['Dept', 'IsHoliday'])
#df_t = df_train.unstack().T
df['Date'] = pd.to_datetime(df['Date'])
test['Date'] = pd.to_datetime(test['Date'])
#test.drop(columns='IsHoliday', inplace=True)
test['Weekly_Sales'] = 0
df['check'] = df.apply(lambda x: str(x['Store']) + '_' + str(x['Dept']), axis=1)
test['check'] = test.apply(lambda x: str(x['Store']) + '_' + str(x['Dept']), axis=1)
print('loading data'+ '...' * 2)
def supplement_data(df, test, store, dept):
"""
- Add the ID which is in test but not in train
- Fill df with weekly_sales of consecutive date to meet consistency
"""
df_s = pd.DataFrame({'Store': store,
'Dept': dept,
'Date': all_date,
'Weekly_Sales': 0,
'check': str(store) + '_' + str(dept)})
return df_s
# make complete data
all_date = df['Date'].unique()
all_test_check = test['check'].unique()
all_train_check = df['check'].unique()
check_tag = np.where(~np.isin(all_test_check, all_train_check))[0]
need_to_add = all_test_check[check_tag]
print('loading data'+ '...' * 3)
for tag in need_to_add:
store = int(tag.split('_')[0])
dept = int(tag.split('_')[1])
df = df.append(supplement_data(df, test, store, dept), ignore_index=True).fillna(0)
return df, test
def get_value(data, store=1, dept=1):
"""
return values of specific store and dept
"""
c = data["Store"] == store
c2 = data["Dept"] == dept
return data[c&c2].reset_index(drop=True)
def pivot_df(df, dept=1):
"""
pivot dataframe and fillna(0)
"""
c = df['Dept'] == dept
df_pivot = df[c].pivot(index='Date', columns='Store', values='Weekly_Sales').fillna(0)
start = df_pivot.index[0]
end = df_pivot.index[-1]
idx = pd.DatetimeIndex(start=start, end=end, freq='W-FRI')
df_pivot = df_pivot.merge(pd.DataFrame(idx).rename(columns={0:'Date'}), how='outer', on='Date').fillna(0)
df_pivot = df_pivot.sort_index()
return df_pivot.set_index('Date')
def reframe_df(previous_df, processed_data):
"""
convert array to pivot_table
"""
idx = previous_df.index
col = previous_df.columns
df = pd.DataFrame(data=processed_data, index=idx, columns=col)
return df
def pca_decomposition(data, dept, n_components=12):
"""
PCA deomposition according to the Dept
"""
try:
df_svd = pivot_df(data, dept)
pca = PCA(n_components=n_components)
df_low = pca.fit_transform(df_svd)
df_inverse = pca.inverse_transform(df_low)
# re-frame
df_inverse = reframe_df(previous_df=df_svd, processed_data=df_inverse)
return df_inverse
except:
# if pca fail,
return pivot_df(data, dept)
def get_fcst_len(store, dept, data, data_test):
"""
Get the length of periods to forecast
"""
def weeks_between(start_date, end_date):
weeks = rrule.rrule(rrule.WEEKLY, dtstart=start_date, until=end_date)
return weeks.count()
c = data_test['Store'] == store
c2 = data_test['Dept'] == dept
start = pivot_df(data, dept).index[-1] + relativedelta(weeks=1)
end = data_test[c&c2]['Date'].iloc[-1]
fcst_len = weeks_between(start, end)
return fcst_len
def custom_seasonal_adjust(denoise):
"""
- Average sales of each week num
- It is used to do diff to adjust seasonality
"""
df_adjust = pd.DataFrame()
df_adjust[0] = denoise.values
df_adjust[1] = denoise.shift(-52).values
df_adjust[2] = denoise.shift(-104).values
seasonality = df_adjust.mean(axis=1)[:52]
start_idx = denoise.index[0]
df_seasonality = seasonality.append(seasonality, ignore_index=True).append(seasonality[:39], ignore_index=True)
idx = pd.DatetimeIndex(start=start_idx, freq='W-FRI', periods=len(df_seasonality))
df_seasonality.index = idx
seasonal_adjust = (denoise - df_seasonality).dropna()
df_seasonality = df_seasonality.reset_index().\
assign(week_num = lambda x: x['index'].dt.week).\
drop_duplicates('week_num').\
drop(columns='index').rename(columns={0:'Weekly_Sales'})
return df_seasonality, seasonal_adjust
def fill_test_form(data, data_test, fcst, store, dept):
"""
fill test form with fcst data
"""
c = data_test['Store'] == store
c2 = data_test['Dept'] == dept
fcst = pd.DataFrame(fcst).rename(columns={0: 'Weekly_Sales'})
try:
fcst_for_test = data_test[c&c2].set_index('Date').join(fcst, on='Date', how='left', lsuffix='_0', rsuffix='_1').drop(columns='Weekly_Sales_0').Weekly_Sales_1
except:
start = pivot_df(data, dept).index[-1] + relativedelta(weeks=1)
idx = pd.DatetimeIndex(start=start, periods=len(fcst), freq='W-FRI')
fcst.index = idx
fcst_for_test = data_test[c&c2].set_index('Date').join(fcst, on='Date', how='left', lsuffix='_0', rsuffix='_1').drop(columns='Weekly_Sales_0').Weekly_Sales_1
data_test.loc[c&c2,'Weekly_Sales'] = fcst_for_test.values
c = np.where(data_test['Weekly_Sales'] > 0)[0]
return data_test
def send_message(arg, name, store=False, fail=False):
"""
send slack message of the result of modeling
"""
with open('token.pkl', 'rb') as f:
token = pickle.load(f)
slack = Slacker(token)
attachments_dict = dict()
attachments_dict['pretext'] = name
if store:
if fail:
text = '! fail: store {}'.format(arg)
else:
text = 'store {} success'.format(arg)
else:
if fail:
text = '! fail: dept {}'.format(arg)
else:
text = 'dept {} success'.format(arg)
attachments_dict['text'] = text
slack.chat.post_message('#random', text=None, attachments=[attachments_dict])
def send_text(name, text):
with open('token.pkl', 'rb') as f:
token = pickle.load(f)
slack = Slacker(token)
attachments_dict = dict()
attachments_dict['pretext'] = name
attachments_dict['text'] = text
slack.chat.post_message('#random', text=None, attachments=[attachments_dict])
# submission related code below:
def make_Id_check(df):
"""
make 'check' columns: 'store_dept'
- eg. store1 dept1 = '1_1'
"""
df['check'] = df.apply(lambda x: str(x['Store']) + '_' + str(x['Dept']), axis=1)
df = df.drop(columns=['Store', 'Dept'])
return df
def fill_test(answer_filename):
name = answer_filename
# get answer csv
ans = pd.read_csv(name)
try:
ans['Date'] = pd.to_datetime(ans['Date'])
except:
ans = ans.rename(columns={'index':'Date'})
ans['Date'] = pd.to_datetime(ans['Date'])
ans = make_Id_check(ans)
# get test form
test = pd.read_csv('test.csv')
test['Date'] = pd.to_datetime(test['Date'])
test = test.drop(columns=['IsHoliday'])
test = make_Id_check(test)
# fill test form
test = test.merge(ans, how='left', on=['check', 'Date']).fillna(0)
return test
def shift_2_5(sub):
def modify_sub(sub):
sub['Date'] = sub['Id'].apply(lambda x: x.split('_')[2])
sub['check'] = sub['Id'].apply(lambda x: x.split('_')[0] + '_' + x.split('_')[1])
sub['Date'] = pd.to_datetime(sub['Date'])
sub['Week_num'] = sub['Date'].dt.week
return sub
# prepare apply shift to submission file
modified_df = modify_sub(sub)
c52 = modified_df['Week_num'] == 52
c51 = modified_df['Week_num'] == 51
len_ = len(modified_df['check'].unique())
len_ = int(len_ * 0.1)
print('total number of IDs: ', len_); i = 0;
for Id in modified_df['check'].unique():
i += 1
if not i % len_:
print('complete: ', int(i / (len_ * 10) * 100), '%')
c = modified_df['check'] == Id
try:
val1 = modified_df.loc[c&c51].Weekly_Sales.values[0] * (2.5/7)
val2 = modified_df.loc[c&c52].Weekly_Sales.values[0] * (2.5/7)
modified_df.loc[c&c51, 'Weekly_Sales'] = modified_df.loc[c&c51, 'Weekly_Sales'] - val1 + val2
modified_df.loc[c&c52, 'Weekly_Sales'] = modified_df.loc[c&c52, 'Weekly_Sales'] - val2 + val1
except:
pass
return modified_df.drop(columns=['Date', 'check', 'Week_num'])
| true
|
d01b339c724d084d417837fe3de6a41269cc98f5
|
Python
|
tylerharter/caraza-harter-com
|
/tyler/cs301/fall18/materials3/code/lec-08-loops/code06_simple_loop.py
|
UTF-8
| 165
| 4.0625
| 4
|
[] |
no_license
|
num = input('Enter a number: ')
num = int(num)
counter = 1
while counter <= num:
print(counter)
counter += 1
print('counter =', counter)
print('Goodbye!')
| true
|
236d584a47acc264236ef5c2573449ceab09956b
|
Python
|
Rayan-arch/flask_api
|
/api/repositories.py
|
UTF-8
| 3,074
| 2.609375
| 3
|
[] |
no_license
|
from db import get_connection
from psycopg2 import extras
from auth import User
class AuthorsRepository:
def __init__(self):
self.connection = get_connection()
self.cursor = self.connection.cursor(cursor_factory=extras.RealDictCursor)
def check_exists(self, author_id):
self.cursor.execute('SELECT id, first_name, last_name FROM authors WHERE id = %s;', (author_id,))
return self.cursor.fetchone()
def get_authors(self):
self.cursor.execute('SELECT id, first_name, last_name FROM authors;')
return self.cursor.fetchall()
def add_author(self, *args):
self.cursor.execute('INSERT INTO authors (first_name, last_name) VALUES (%s,%s) RETURNING id;', args)
data = self.cursor.fetchone()
self.connection.commit()
return data['id']
def delete_author(self, id):
self.cursor.execute('DELETE FROM authors WHERE id=%s', (id,))
self.connection.commit()
class BooksRepository:
def __init__(self):
self.connection = get_connection()
self.cursor = self.connection.cursor(cursor_factory=extras.RealDictCursor)
def check_exists(self, id):
self.cursor.execute('SELECT id, title, description FROM books WHERE id=%s;', (id,))
return self.cursor.fetchone()
def get_books(self):
self.cursor.execute('SELECT id, title, author_id, description FROM books;')
return self.cursor
def add_one(self, *args):
self.cursor.execute('INSERT INTO books (title, author_id, description) VALUES (%s,%s,%s) RETURNING id;', args)
book_id = self.cursor.fetchone()
self.connection.commit()
return book_id['id']
def delete_books(self, book_id):
self.cursor.execute('DELETE FROM books WHERE id=%s', (book_id,))
self.connection.commit()
class UsersRepository:
def __init__(self):
self.connection = get_connection()
self.cursor = self.connection.cursor(cursor_factory=extras.RealDictCursor)
def map_row_to_user(self, row):
user = User()
if row is not None:
user.id = row['id']
user.username = row['user_name']
user.password = row['password']
else:
user.id = None
user.username = None
user.password = None
return user
def get_by_id(self, user_id):
self.cursor.execute('SELECT id, user_name, password FROM users WHERE id=%s', (user_id))
return self.map_row_to_user(
self.cursor.fetchone()
)
def get_by_username(self, username):
self.cursor.execute('SELECT id, user_name, password FROM users WHERE user_name=%s;', (username,))
return self.map_row_to_user(
self.cursor.fetchone()
)
def save_new(self, username, password):
self.cursor.execute('INSERT INTO users(user_name,password) VALUES (%s,%s) RETURNING id;', (username, password))
user_id = self.cursor.fetchone()
self.connection.commit()
return user_id['id']
| true
|
dc4898d454a7f19d817c95cc0f578e385c97c4f0
|
Python
|
ademmy/Academic-Performance-in-Maths
|
/math_students.py
|
UTF-8
| 6,764
| 2.796875
| 3
|
[] |
no_license
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier, BaggingClassifier
from sklearn.metrics import accuracy_score
import warnings
warnings.filterwarnings('ignore')
sns.set_style('darkgrid')
df_math = pd.read_csv("student-mat.csv", sep=";", names=['school', 'sex', 'age', 'address', 'famsize', 'Pstatus',
'Medu',
'Fedu', 'Mjob', 'Fjob', 'reason', 'guardian', 'traveltime',
'studytime', 'failures', 'schoolsup', 'famsup', 'paid',
'activities', 'nursery', 'higher', 'internet', 'romantic',
'famrel', 'freetime', 'goout', 'Dalc', 'Walc', 'health',
'absences', 'G1', 'G2', 'G3'])
# data exploration
maths = df_math.drop(index=0, axis=0)
print()
print('the data for the people offering maths')
print(maths.head())
# maths
print('the info from maths', '\n', maths.info())
print('the description from maths', '\n', maths.describe())
print(maths.isnull().sum().sort_values(ascending=False))
print()
for cols in maths.columns:
pass
# print(maths[cols].value_counts())
# print()
# print('Checking for missing values')
# print(maths.isnull().sum().sort_values())
# print(maths.columns)
cols = ['school', 'sex', 'age', 'address', 'famsize', 'Pstatus', 'Medu', 'Fedu', 'Mjob', 'Fjob', 'reason', 'guardian',
'traveltime', 'studytime', 'failures', 'schoolsup', 'famsup', 'paid', 'activities', 'nursery',
'higher', 'internet', 'romantic', 'famrel', 'freetime', 'goout', 'Dalc', 'Walc', 'health', 'absences',
'G1', 'G2', 'G3']
# for i in cols:
# plt.figure(figsize=(8, 8))
# sns.countplot(maths[i], palette=sns.color_palette("cubehelix"))
# plt.show()
df = maths.copy()
int_features = ['age', 'Fedu', 'failures', 'famrel', 'freetime', 'goout', 'Dalc', 'Walc', 'health', 'absences', 'G1',
'G2', 'G3', 'freetime', 'goout', 'health', 'Medu', 'studytime', 'traveltime']
print(len(int_features))
for vals in int_features:
df[vals] = df[vals].astype(int)
print('the number of numerical features we have in this dataset is set at', df.info())
# df['G1'] = df['G1'].astype(int)
# df['G2'] = df['G2'].astype(int)
# df['G3'] = df['G3'].astype(int)
# df['age'] = df['age'].astype(int)
score_map = {0: 'Below Average', 1: 'Below Average', 2: 'Below Average', 3: 'Below Average', 4: 'Below Average',
5: 'Below Average', 6: 'Below Average', 7: 'Below Average', 8: 'Below Average', 9: 'Below Average',
10: 'Average', 11: 'Above Average', 12: 'Above Average', 13: 'Above Average', 14: 'Above Average',
15: 'Above Average', 16: 'Above Average', 17: 'Above Average', 18: 'Above Average', 19: 'Above Average',
20: 'Above Average',
}
print()
# for cols in df.columns:
# if df[cols].dtypes == 'object':
# cat_cols = cols
cat_cols = df.select_dtypes(exclude=int)
print()
num_cols = df.select_dtypes(include=int)
# explore the numerical side of things
df_corr = num_cols.corr()
print(df_corr['G3'].sort_values(ascending=False))
# for i in df_corr.columns:
# plt.figure(figsize=(8, 8))
# sns.regplot(x=df_corr['G3'], y=df_corr[i])
# plt.show()
for cols in num_cols.columns:
print(num_cols[cols].value_counts())
print()
print(df['G3'].value_counts())
print()
print(df['G2'].value_counts())
data = df.copy()
data['G1'] = data['G1'].map(score_map)
data['G2'] = data['G2'].map(score_map)
data['G3'] = data['G3'].map(score_map)
print(data['G2'].unique())
print()
print(data['G1'].unique())
print()
print(data['G3'].unique())
print(data.isnull().sum().sort_values(ascending=False))
# for cols in cat_cols:
# plt.figure(figsize=(8, 8))
# sns.countplot(cat_cols[cols], hue=data['G3'], palette=sns.color_palette("cubehelix"))
# plt.show()
# Categorical Features
print(cat_cols.columns)
cat_list = ['school', 'sex', 'address', 'famsize', 'Pstatus', 'Mjob', 'Fjob', 'reason', 'guardian',
'schoolsup', 'famsup', 'paid', 'activities', 'nursery', 'higher', 'internet', 'romantic']
lb = LabelEncoder()
data['G1'] = lb.fit_transform(data['G1'])
data['G2'] = lb.fit_transform(data['G2'])
data['G3'] = lb.fit_transform(data['G3'])
print(data['G1'].value_counts())
print()
for cols in cat_cols:
cat_cols[cols] = lb.fit_transform(cat_cols[cols])
print(cat_cols.info())
# merging the two data frames together
full_df = pd.concat([cat_cols, data], axis=1)
print(type(full_df))
print()
# separate the target from the data
label = full_df['G3']
df_ = full_df.drop('G3', axis='columns')
print(label.head())
print(df_.info())
x_train, x_test, y_train, y_test = train_test_split(df_, label, test_size=0.3, random_state=41)
print('the length of the train data is', len(x_train))
print()
print('the length of the test data is', len(x_test))
log_reg = LogisticRegression()
log_reg.fit(x_train, y_train)
print()
print('the accuracy of logistic regression is set at')
print(log_reg.score(x_train, y_train))
prediction = log_reg.predict(x_test)
print()
print('the accuracy score of the test data for logistic regression is set at')
print(accuracy_score(y_test, prediction))
print()
print('Next up is the Support Vector Machines')
svc = SVC()
svc.fit(x_train, y_train)
print('the train accuracy for SVC is set at')
print(svc.score(x_train, y_train))
svc_prediction = svc.predict(x_test)
print()
print('the accuracy for the test is set at')
print(accuracy_score(y_test, svc_prediction))
print()
print('Random Forest')
rfc = RandomForestClassifier()
rfc.fit(x_train, y_train)
print()
print('The train accuracy is set at')
print(rfc.score(x_train, y_train))
print('the test accuracy is ste at')
rfc_prediction = rfc.predict(x_test)
print(accuracy_score(y_test, rfc_prediction))
print()
print('Decision Trees')
dsc = DecisionTreeClassifier()
dsc.fit(x_train, y_train)
print('the train accuracy of decision tree is set at')
print(dsc.score(x_train, y_train))
print()
print('the test accuracy is set at')
dsc_prediction = dsc.predict(x_test)
print(accuracy_score(y_test, dsc_prediction))
| true
|
4fe959e6fe0d46326bce7ab9fce381fba5a763bd
|
Python
|
bozege/Functions.py
|
/karatsuba.py
|
UTF-8
| 982
| 3.125
| 3
|
[] |
no_license
|
#This function returns the product of given parameters using karatsuba algorithm.
def karatsuba(n,m):
strn = str(n)
strm = str(m)
if len(strn)%2 != 0:
strn = "0" + strn
if len(strm)%2 != 0:
strm = "0" + strm
nhalf1 = ""
nhalf2 = ""
mhalf1 = ""
mhalf2 = ""
A = 0
B = 0
C = 0
for i in range (int(len(strn))):
if len(nhalf1) < len(strn)/2:
nhalf1 += strn[i]
else:
nhalf2 += strn[i]
for i in range (int(len(strm))):
if len(mhalf1) < len(strm)/2:
mhalf1 += strm[i]
else:
mhalf2 += strm[i]
A = (int(nhalf1)*int(mhalf1)) #* ((10**(int(len(nhalf1))))**2)
B = (int(nhalf2)*int(mhalf2))
C = (int(nhalf1) + int(nhalf2)) * (int(mhalf1) + int(mhalf2))
result = (A * ((10**(int(len(nhalf1))))**2)) + ((C - (A + B)) * (10**(int(len(nhalf1))))) + B
return result
| true
|
f125902465661669acaa2b8a303a9d86cc95946a
|
Python
|
timkl/alexa-runlog
|
/alexa-runlog.py
|
UTF-8
| 2,386
| 2.53125
| 3
|
[] |
no_license
|
import logging
import csv
from datetime import datetime, timedelta
from flask import Flask, render_template
from flask_ask import Ask, question, statement
app = Flask(__name__)
ask = Ask(app, "/")
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
@ask.launch
def launch():
welcome_msg = render_template('welcome')
return question(welcome_msg)
@ask.intent("DurationIntent", convert={'duration': 'timedelta'})
def log_minutes(duration):
s = duration.total_seconds()
hours, remainder = divmod(s, 3600)
minutes, seconds = divmod(remainder, 60)
minutes = int(minutes)
seconds = int(seconds)
logged_msg = render_template('run_logged',
minutes=minutes,
seconds=seconds)
personal_best_msg = render_template('personal_best',
minutes=minutes,
seconds=seconds)
personal_worst_msg = render_template('personal_worst',
minutes=minutes,
seconds=seconds)
# Parse CSV file and get the personal best/worst.
get_data = open('data.csv', 'r')
reader = csv.reader(get_data)
run_results = (filter(None, sum(list(reader), [])))
pb = min(run_results)
pb_ = datetime.strptime(pb, "%H:%M:%S")
personal_best = timedelta(hours=pb_.hour,
minutes=pb_.minute,
seconds=pb_.second)
pw = max(run_results)
pw_ = datetime.strptime(pw, "%H:%M:%S")
personal_worst = timedelta(hours=pw_.hour,
minutes=pw_.minute,
seconds=pw_.second)
get_data.close()
# Write duration to CSV file
write_data = open('data.csv', 'a')
write_data.write("{0},\n".format(duration))
write_data.close()
# Determine what message to play
if(personal_best > duration):
return statement(personal_best_msg)
if(personal_worst < duration):
return statement(personal_worst_msg)
else:
return statement(logged_msg)
@ask.session_ended
def session_ended():
log.debug("Session ended!")
return "", 200
if __name__ == '__main__':
app.run(debug=True)
| true
|
28b4fcc999f1c39a48845e8d62848ccce787fb6b
|
Python
|
Evgeny-Ivanov/mathcup_server
|
/answers/services.py
|
UTF-8
| 461
| 3.015625
| 3
|
[] |
no_license
|
class СheckAnswersService(object):
@staticmethod
def normalize_answer(answer):
answer = answer.strip()
answer = answer.lower()
answer = answer.replace(',', '.')
return answer
@staticmethod
def check_answer(user_answer):
answer1 = СheckAnswersService.normalize_answer(user_answer.answer)
answer2 = СheckAnswersService.normalize_answer(user_answer.task.answer)
return answer1 == answer2
| true
|
aade309916bde2e10cf786c0267fcb981891531b
|
Python
|
pooja1909/ai-ml-projects-data-science
|
/project-iris-dataset/final/q4/node.py
|
UTF-8
| 448
| 2.703125
| 3
|
[] |
no_license
|
#The class is just used to simply store each node.
class decisionnode:
def __init__(self,col=-1,value=None,results=None,tb=None,fb=None):
self.col=col # column index of criteria being tested
self.value=value # vlaue necessary to get a true result
self.results=results # dict of results for a branch, None for everything except endpoints
self.tb=tb # true decision nodes
self.fb=fb # false decision nodes
| true
|
50fdcb058fe2c5ff11277ac97b4fda2f8bb2c72a
|
Python
|
fenrrir/pug-pb-17032018
|
/descriptors/descr1.py
|
UTF-8
| 715
| 3.484375
| 3
|
[] |
no_license
|
class selfclsmethod(object): # um novo tipo de método pro python
# o método irá receber automaticamente a instância e a classe atual
def __init__(self, method):
self.method = method
def __get__(self, obj, type):
def new_method( *args, **kwargs ):
return self.method( obj, type, *args, **kwargs)
return new_method
class Exemplo:
@selfclsmethod
def test(self, cls):
return self, cls
@classmethod
def test_class(cls):
return cls
def test():
exm = Exemplo()
assert exm.test_class() == Exemplo
assert exm.test() == (exm, Exemplo)
if __name__ == '__main__':
test()
| true
|
c6e88925309196a3436ad6121900e3c70fc23b99
|
Python
|
FerCremonez/College-1st-semester-
|
/lista3e6.py
|
UTF-8
| 552
| 3.765625
| 4
|
[] |
no_license
|
import math
print('informe os coeficientes de uma equação de 2º grau:')
a=float(input('a='))
b=float(input('b='))
c=float(input('c='))
if a==0:
print('não é uma equação de 2º grau ')
else:
delta=b**2 -4*a*c
if delta<0:
print('Não existe raiz real')
else:
if delta==0:
x=(-b)/2*a
print('raiz= {}. raiz unica')
else:
x1= (-b+math.sqrt(delta))/(2*a)
x2= (-b-math.sqrt(delta))/(2*a)
print('raízes x1={} e x2={}'.format(x1,x2))
| true
|
aa4d2632eb4dda1dc7a0d7be9665bc99fb0927a2
|
Python
|
sungminoh/algorithms
|
/leetcode/solved/2432_Number_of_Zero-Filled_Subarrays/solution.py
|
UTF-8
| 1,749
| 3.671875
| 4
|
[] |
no_license
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <smoh2044@gmail.com>
#
# Distributed under terms of the MIT license.
"""
Given an integer array nums, return the number of subarrays filled with 0.
A subarray is a contiguous non-empty sequence of elements within an array.
Example 1:
Input: nums = [1,3,0,0,2,0,0,4]
Output: 6
Explanation:
There are 4 occurrences of [0] as a subarray.
There are 2 occurrences of [0,0] as a subarray.
There is no occurrence of a subarray with a size more than 2 filled with 0. Therefore, we return 6.
Example 2:
Input: nums = [0,0,0,2,0,0]
Output: 9
Explanation:
There are 5 occurrences of [0] as a subarray.
There are 3 occurrences of [0,0] as a subarray.
There is 1 occurrence of [0,0,0] as a subarray.
There is no occurrence of a subarray with a size more than 3 filled with 0. Therefore, we return 9.
Example 3:
Input: nums = [2,10,2019]
Output: 0
Explanation: There is no subarray filled with 0. Therefore, we return 0.
Constraints:
1 <= nums.length <= 105
-109 <= nums[i] <= 109
"""
from typing import List
import pytest
import sys
class Solution:
def zeroFilledSubarray(self, nums: List[int]) -> int:
ret = 0
zero_cnt = 0
for n in nums:
if n != 0:
ret += zero_cnt * (zero_cnt+1) // 2
zero_cnt = 0
else:
zero_cnt += 1
ret += zero_cnt * (zero_cnt+1) // 2
return ret
@pytest.mark.parametrize('args', [
(([1,3,0,0,2,0,0,4], 6)),
(([0,0,0,2,0,0], 9)),
(([2,10,2019], 0)),
])
def test(args):
assert args[-1] == Solution().zeroFilledSubarray(*args[:-1])
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
| true
|
fb7c5588b97591642ee14751e6e81ae880c33195
|
Python
|
wanghongjuan/intel-iot-refkit
|
/meta-iotqa/lib/oeqa/runtime/nodejs/ocfdemoapp/led.py
|
UTF-8
| 3,238
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
import os
import sys
import time
import subprocess
from uiautomator import device as d
from oeqa.oetest import oeRuntimeTest
sys.path.append(os.path.dirname(__file__))
from appmgr import AppMgr
import data_settings
class CordovaPluginOCFDemoAppLedTest(oeRuntimeTest):
'''Automatize the Cordova plugin OCF demo app tests like
checking if the resources are found, and resource information is readonly.
'''
pkg_id = 'com.example.CordovaPluginOcfDemo'
led_item = None
device_found = False
resource_found = False
details_btn = None
id_item = None
led_value_item = None
led_value = None
appmgr = AppMgr()
@classmethod
def setUpClass(cls):
'''
Launch the app and find the OCF resources.
'''
cls.appmgr.kill_app(cls.pkg_id)
cls.appmgr.launch_app(cls.pkg_id)
time.sleep(data_settings.app_launch_and_wait)
def init_led_sensor(self):
'''
Go to the detailed page of the OCF resource.
'''
self.led_item = d(className='android.view.View', descriptionContains='Path: /a/led')
self.led_item.click()
time.sleep(1)
self.details_btn = d(className='android.widget.Button')
self.id_item = d(className='android.view.View', descriptionStartsWith='id')
self.led_value_item = d(className='android.view.View', index=14).child(className='android.view.View', index=1)
self.led_value = self.led_value_item.description
def test_led_resource_found(self):
'''Check if the led resources can be found.'''
self.appmgr.go_to_resources_for_ocfdemo()
time.sleep(data_settings.app_found_res_and_wait)
self.resource_found = d.exists(className='android.view.View', descriptionContains='/a/led')
self.assertTrue(self.resource_found, 'The led resource is not found.')
def test_led_resource_has_properties(self):
'''Check if the led resource has properties like id and value.'''
self.init_led_sensor()
self.assertEqual(len(self.details_btn), 2)
self.assertEqual(self.id_item.description.split(':')[-1].strip(), 'led',
'Id of led resource not found.')
self.assertEqual(self.led_value, 'false', 'Initial led value is not false!')
def test_led_z_device_found(self):
''''Check if the OCF device can be found.'''
self.appmgr.go_to_devices_for_ocfdemo()
time.sleep(data_settings.app_found_dev_and_wait)
self.device_found = d.exists(descriptionContains='UUID')
self.device_found = self.device_found and d.exists(descriptionContains='URL')
self.device_found = self.device_found and d.exists(descriptionContains='Name')
self.device_found = self.device_found and d.exists(descriptionContains='Data models')
self.device_found = self.device_found and d.exists(descriptionContains='Core spec version')
self.device_found = self.device_found and d.exists(descriptionContains='Role')
self.assertTrue(self.device_found, 'OCF device is not found.')
@classmethod
def tearDownClass(cls):
'''Terminate the app.'''
cls.appmgr.kill_app(cls.pkg_id)
| true
|
ea8ecf1081db1f353e4e19aea0daa212b9dd77c0
|
Python
|
mateusgruener/cursopython
|
/Aulas/1/funcoes.py
|
UTF-8
| 176
| 3.0625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 15 17:20:09 2021
@author: Usuario
"""
def importada(x):
importada=x**2
return importada
print(importada(8))
| true
|
31627118778e05e3ecf828ec2fe7b939a0b91eb7
|
Python
|
junbinding/algorithm-notes
|
/recs.construct-binary-tree-from-preorder-and-inorder-traversal.py
|
UTF-8
| 980
| 4.15625
| 4
|
[] |
no_license
|
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
"""
105. 从前序与中序遍历序列构造二叉树
https://leetcode-cn.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/
根据一棵树的前序遍历与中序遍历构造二叉树。
前序遍历 preorder = [3,9,20,15,7],根 + 左 + 右
中序遍历 inorder = [9,3,15,20,7],左 + 根 + 右
"""
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
if not preorder or not inorder:
return
# 找到根节点
root = TreeNode(preorder[0])
# 从中序列表中,通过根节点分割左右子树
idx = inorder.index(preorder[0])
root.left = self.buildTree(preorder[1:idx+1], inorder[:idx])
root.right = self.buildTree(preorder[1+idx:], inorder[idx+1:])
return root
| true
|
ce237ad7a825624bb9184006e2d229269e8c80f2
|
Python
|
sqlconsult/byte
|
/Python/binaryTree.py
|
UTF-8
| 1,998
| 3.4375
| 3
|
[] |
no_license
|
import math
import sys
#import pyodbc
import datetime
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
class Tree:
def __init__(self):
self.root = None
def insert(self, root, data):
#print('data=', data)
if self.root == None:
#print(' self.root == None')
self.root = Node(data)
elif root == None:
#print(' root == None')
root = Node(data)
else:
if data <= root.data:
#print(' data <= root.data')
root.left = self.insert(root.left, data)
elif data > root.data:
#print(' data > root.data')
root.right = self.insert(root.right, data)
return root
def inorder(self, root):
if root == None:
return
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
def findNode(self, node, val):
if node is None:
return False
elif val == node.data:
return True
elif val < node.data:
return self.findNode(node.left, val)
else:
return self.findNode(node.right, val)
# Define a main() function
def main():
tree = Tree()
tree.insert(tree.root, 8)
tree.insert(tree.root, 3)
tree.insert(tree.root, 10)
tree.insert(tree.root, 1)
tree.insert(tree.root, 6)
tree.insert(tree.root, 4)
tree.insert(tree.root, 7)
tree.insert(tree.root, 14)
tree.insert(tree.root, 13)
tree.inorder(tree.root)
findVal = tree.findNode(tree.root, 5)
print('5', findVal)
findVal = tree.findNode(tree.root, 6)
print('6', findVal)
# This is the standard boilerplate that calls main() function
if __name__ == '__main__':
main()
| true
|
13996dc2d1c7c901a0b084c59c60e1c4a4a0c211
|
Python
|
muralidhar8190/Royal
|
/if.py
|
UTF-8
| 102
| 2.984375
| 3
|
[] |
no_license
|
inp=[]
if len(inp)==0:
inp.append(10)
print("i am inside if condition")
print(inp)
| true
|
52779a40c092d18ee9d80296d1a87d88b2339bfe
|
Python
|
GLAMOS/dataflow
|
/dataflow/DataReaders/DatabaseReaders/VolumeChangeReader.py
|
UTF-8
| 3,656
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
'''
Created on 12.07.2018
@author: yvo
'''
from dataflow.DataReaders.DatabaseReaders.GlamosDatabaseReader import GlamosDatabaseReader
from dataflow.DataObjects.VolumeChange import VolumeChange
from dataflow.DataObjects.Enumerations.HeightCaptureMethodEnumeration import HeightCaptureMethodEnum
from dataflow.DataObjects.Enumerations.VolumeChangeEnumerations import AnalysisMethodEnum
import uuid
class VolumeChangeReader(GlamosDatabaseReader):
'''
Reader object to retrieve volume change data stored in the GLAMOS PostGIS database.
Attributes:
_TABLE_VOLUME_CHANGE str Absolute name of the table or view to retrieve the volume-change data from (<schema>.<table | view>).
'''
_TABLE_VOLUME_CHANGE = "volume_change.vw_volume_change"
def __init__(self, accessConfigurationFullFileName):
'''
Constructor
@type accessConfigurationFullFileName: string
@param accessConfigurationFullFileName: Path to the private database access configuration file.
'''
super().__init__(accessConfigurationFullFileName)
def getData(self, glacier):
'''
Retrieves all volume change measurement of the given glacier. As identification
of the glacier, the uuid-based primary key of the glacier will be used.
The measurements are stored in the volumeChange dictionary of the glacier instance.
@type glacier: DataObject.Glacier.Glacier
@param glacier: Glacier of which the time series of volume changes has to be retrieved.
'''
statement = "SELECT * FROM {0} WHERE pk_glacier = '{1}';".format(self._TABLE_VOLUME_CHANGE, glacier.pk)
results = super().retriveData(statement)
for result in results:
glacier.addVolumeChange(self._recordToObject(result))
def _recordToObject(self, dbRecord):
'''
Converts a single record of the database into a glacier object.
@type dbRecord: list
@param dbRecord: List with all values of one database record.
@rtype: DataObjects.VolumeChange.VolumeChange
@return: VolumeChange object of the database record.
'''
# Converting the PostgreSQL data types into Python data types.
pk = uuid.UUID(dbRecord[0])
dateFrom = dbRecord[11]
dateFromQuality = None
dateTo = dbRecord[12]
dateToQuality = None
areaFrom = float(dbRecord[13])
areaTo = float(dbRecord[14])
heightCaptureMethodFrom = HeightCaptureMethodEnum(int(dbRecord[5]))
heightCaptureMethodTo = HeightCaptureMethodEnum(int(dbRecord[7]))
analysisMethod = AnalysisMethodEnum(int(dbRecord[9]))
elevationMaximumFrom = float(dbRecord[15])
elevationMinimumFrom = float(dbRecord[16])
elevationMaximumTo = float(dbRecord[17])
elevationMinimumTo = float(dbRecord[18])
volumeChange = float(dbRecord[20])
heightChangeMean = float(dbRecord[19])
return VolumeChange(
pk,
dateFrom, dateFromQuality,
dateTo, dateToQuality,
areaFrom, areaTo,
heightCaptureMethodFrom, heightCaptureMethodTo,
analysisMethod,
elevationMaximumFrom, elevationMinimumFrom,
elevationMaximumTo, elevationMinimumTo,
volumeChange,
heightChangeMean)
| true
|
49cfc313ff708e8f868ade60e91b20cccece3200
|
Python
|
karishmachawla4/PersonalDevelopment
|
/WordCounter.py
|
UTF-8
| 406
| 4.0625
| 4
|
[] |
no_license
|
# Count words in sentence
def wordCounter(string):
print("Enter string: ")
counts = {}
words = string.split()
# print(string)
for word in words:
if str.capitalize(word) in counts:
counts[str.capitalize(word)] += 1
else:
counts[str.capitalize(word)] = 1
return counts
print(wordCounter('My Name is Akshay Kamra and I am the first of my name'))
| true
|
d90cf9e2a902c2799e38c256fbbb3555f203044b
|
Python
|
khushigupta515/contentaggregator
|
/webscrapingfinal.py
|
UTF-8
| 3,525
| 2.984375
| 3
|
[] |
no_license
|
import requests
from bs4 import BeautifulSoup
import sqlite3
import schedule
import time
import smtplib, ssl
conn = sqlite3.connect('scraping.db')
global finalstr=""
def scrapequotes():
conn.execute('''CREATE TABLE if not exists tablenew
(counter int,date text)''')
count=0
finalstr=""
for j in range(3):
url = "http://quotes.toscrape.com/tag/love/page/{}/".format(j)
stuff = requests.get(url)
soup = BeautifulSoup(stuff.content)
firstQuote = soup.findAll("span",attrs={"class","text"})
for i in range(len(firstQuote)):
count+=1
k=firstQuote[i].text
ar=[count,k]
conn.execute("INSERT INTO tablenew VALUES (?,?)",ar)
print("LOVE QUOTES FROM quotes.toscrape.com")
for row in conn.execute('SELECT * FROM tablenew'):
print(row)
finalstr=finalstr+"\n"+row[1]
conn.commit()
global finalstr2=""
def scrapeBooks(url,count=1,urlNum=1):
conn.execute('''CREATE TABLE if not exists table2
(data1 text,data2 text)''')
bookInfo = requests.get(url)
soup2 = BeautifulSoup(bookInfo.content)
anotherSoup = soup2.findAll("li",attrs={"class", "col-xs-6 col-sm-4 col-md-3 col-lg-3"})
for i in range(len(anotherSoup)):
print(count,')', 'Title: ', anotherSoup[i].h3.a['title'])
print(' Price:',anotherSoup[i].find('p',attrs='price_color').text)
print()
title1=anotherSoup[i].h3.a['title']
price1=anotherSoup[i].find('p',attrs='price_color').text
ar=[title1,price1]
conn.execute("INSERT INTO table2 VALUES (?,?)",ar)
finalstr2=finalstr2+title1+price1+"\n"
conn.commit()
count+=1
if(len(soup2.findAll('li',attrs={'class','next'}))==1):
if urlNum>1:
url="http://books.toscrape.com/catalogue/"+soup2.findAll('li',attrs={'class','next'})[0].a['href']
print('---- URL Being Scraped ---- ')
print(url)
print('--------------------------')
print()
scrapeBooks(url,count,urlNum)
else:
url="http://books.toscrape.com/"+soup2.findAll('li',attrs={'class','next'})[0].a['href']
print('---- URL Being Scraped ---- ')
print(url)
print('---------------------------')
print()
urlNum+=1
scrapeBooks(url,count,urlNum)
def sendingdatabasethroughmail():
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
mailid=input("Enter e-mail id\n")
sender_email = "khushigupta515@gmail.com" # Enter your address
receiver_email = mailid # Enter receiver address
password = input("Type your password and press enter: ")
message = """\
Subject: Hi there
This message is sent from Python."""+finalstr+finalstr2
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
scrapequotes()
url = "http://books.toscrape.com/"
scrapeBooks(url)
sendingdatabasethroughmail()
lambda : scrapeBooks(url)
schedule.every().day.at("00:00").do(scrapequotes)
schedule.every().day.at("00:00").do(lambda : scrapeBooks(url))
while True:
schedule.run_pending()
time.sleep(1)
conn.close()
| true
|
a9ad3e6e260d1689cf6378c257606a1fd67a85fe
|
Python
|
financo/learn-ml
|
/04-kNN/02-kNN-in-scikit-learn/kNN_function/kNN.py
|
UTF-8
| 1,293
| 2.890625
| 3
|
[] |
no_license
|
import numpy as np
from math import sqrt
from collections import Counter
def kNN_classify(k, X_train, y_train, x):
assert 1 <= k <= X_train.shape[0], "k must be valid"
assert X_train.shape[0] == y_train.shape[0], "the size of X_train must equal to the size of y_train"
assert X_train.shape[1] == x.shape[0], "the feature number of x must be equal to X_train"
distance = [sqrt(sum((x_train - x) ** 2)) for x_train in X_train]
most_common = Counter(y_train[np.argsort(distance)[:k]]).most_common(1)
predict = most_common[0][0]
return predict
if __name__ == '__main__':
raw_data_X = [[3.393533211, 2.331273381],
[3.110073483, 1.781539638],
[1.343808831, 3.368360954],
[3.582294042, 4.679179110],
[2.280362439, 2.866990263],
[7.423436942, 4.696522875],
[5.745051997, 3.533989803],
[9.172168622, 2.511101045],
[7.792783481, 3.424088941],
[7.939820817, 0.791637231]
]
raw_data_y = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
X_train = np.array(raw_data_X)
y_train = np.array(raw_data_y)
x = np.array([8.093607318, 3.365731514])
print(kNN_classify(6, X_train, y_train, x))
| true
|
19edfbb154a0a62b5aefc94544372ec3a9a2472b
|
Python
|
cyy-hub/InterviewCoding
|
/cyy_xiecheng_1.py
|
UTF-8
| 863
| 2.9375
| 3
|
[] |
no_license
|
import sys
str1 = sys.stdin.readline().strip()
str2 = sys.stdin.readline().strip()
def find_b(string):
char_dict = {}
for char in string:
if not char_dict.get(char):
char_dict[char] = 1
else:
char_dict[char] += 1
small_num = len(string)+1
for key in char_dict:
small_num = min(small_num, char_dict[key])
for key in char_dict:
char_dict[key] /= small_num
need_dit = {}
match = 0
b = ""
for char in string:
if not need_dit.get(char):
need_dit[char] = 1
else:
need_dit[char] += 1
if need_dit[char] == char_dict[char]:
match +=1
b+=char
if match == len(char_dict):
break
return b
b1 = find_b(str1)
b2 = find_b(str2)
if b1 == b2:
print(b1)
else:
print("")
| true
|
0f436a7c18600670ab489534e16fa7d7fa09b349
|
Python
|
wkwkgg/atcoder
|
/abc/problems150/142/c.py
|
UTF-8
| 184
| 2.828125
| 3
|
[] |
no_license
|
N = int(input())
A = list(map(int, input().split()))
xs = []
for i in range(N):
xs.append((i+1, A[i]))
xs = sorted(xs, key=lambda x: x[1])
print(" ".join(str(x[0]) for x in xs))
| true
|
95a2b5ab5823c267a7e9e41600da87c8eaffdf62
|
Python
|
1horstmann/Calculo-Numerico
|
/main.py
|
UTF-8
| 1,013
| 3.15625
| 3
|
[] |
no_license
|
import classes
# Definição de dados
x = [2.5 ,3.9 ,2.9, 2.4, 2.9, 0.8, 9.1, 0.8, 0.7, 7.9, 1.8, 1.9, 0.8, 6.5, 1.6, 5.8, 1.3, 1.2, 2.7] #Definindo os dados da variável independente
y = [211, 167, 131, 191, 220, 297, 7, 211, 300, 107, 167, 266, 227, 86, 207, 115, 285, 199, 172] #Definindo os dados da variável dependente
# Chamando a classe
a = classes.AjustamentoDeCurvas(x,y)
# Utilizando a classe
a.calculo_r() #calculando o valor de r
a.calculo_r2() #calculando o valor de r^2
a.ajustamento_linear() #calculando o ajustamento linear
a.ajustamento_quadratico() #calculando o ajustamento quadrático
a.ajustamento_exponencial() #calculando o ajustamento exponencial
a.ajustamento_hiperbolico() #calculando o ajustamento hiperbólico
a.ajustamento_loglog() #calculando o ajustamento log log
a.melhor_expressão_ajustamento(mostrar=True) #verificando qual é o melhor ajustamento
a.calculo_ajustamento(x=10,ajustamento='Linear') #calculando um valor de y, dado um valor de x em determinado ajustamento
| true
|
9c7891985ca22558d166c96bdbe9884b36ddd039
|
Python
|
hunseok329/programmers
|
/소수 찾기.py
|
UTF-8
| 468
| 3
| 3
|
[] |
no_license
|
from itertools import permutations
def solution(numbers):
sumP = []
count = 0
for s in range(1, len(numbers)+1):
p = list(permutations(numbers, s))
for num in set(p):
sumP.append(int(''.join(num)))
sumP = set(sumP)
for w in sumP:
if w == 1 or w == 0:
continue
for n in range(2, w//2+1):
if w % n == 0:
break
else:
count += 1
return count
| true
|
8b6f6a40755e49c97a993cd35334d7072a139024
|
Python
|
liyi0206/leetcode-python
|
/170 two sum III - data structure design.py
|
UTF-8
| 954
| 4.09375
| 4
|
[] |
no_license
|
class TwoSum(object):
# Trade off in this problem should be considered
# if need to add fast, use array to hold numbers
# if need to find fast,use hashmap to hold numbers
def __init__(self):
"""
initialize your data structure here
"""
self.mp={}
def add(self, number):
"""
Add the number to an internal data structure.
:rtype: nothing
"""
if number in self.mp: self.mp[number]+=1
else: self.mp[number]=1
def find(self, value):
"""
Find if there exists any pair of numbers which sum is equal to the value.
:type value: int
:rtype: bool
"""
for i in self.mp:
j = value-i
if i==j and self.mp[i]>1 or i!=j and j in self.mp: return True
return False
twoSum = TwoSum()
twoSum.add(1)
twoSum.add(3)
twoSum.add(5)
print twoSum.find(4) #True
print twoSum.find(7) #False
| true
|
70d117ddf2073c55e469ab7eed808662db6e5ad1
|
Python
|
nashtash/python_covid19_nrw
|
/get_data_rki_ndr_districts.py
|
UTF-8
| 2,554
| 2.609375
| 3
|
[] |
no_license
|
from functools import lru_cache
from io import BytesIO
import requests
import pandas as pd
from utils.storage import upload_dataframe
url = 'https://ndrdata-corona-datastore.storage.googleapis.com/rki_api/current_cases_regions.csv'
@lru_cache
def get_data():
# Download website
response = requests.get(url)
assert bool(response), 'Laden der RKI-NDR-Daten fehlgeschlagen'
# Parse into data frame
df = pd.read_csv(BytesIO(response.content))
return df, response
def clear_data():
df, response = get_data()
# Clean up data here
grouped = df[df['IdLandkreis'] != 11000].groupby('Bundesland').sum()
def sum_of(column):
return grouped[column]['Berlin']
population = 3644826
berlin = {
'IdLandkreis': 11000,
'Bundesland': 'Berlin',
'IdBundesland': 11,
'Landkreis': 'Berlin',
'Faelle': sum_of('Faelle'),
'FaelleDelta': sum_of('FaelleDelta'),
'Todesfaelle': sum_of('Todesfaelle'),
'TodesfaelleDelta': sum_of('TodesfaelleDelta'),
'Genesen': sum_of('Genesen'),
'GenesenDelta': sum_of('GenesenDelta'),
'population': population,
'Inzidenz': sum_of('Faelle') / population * 100_000,
'Todesrate': sum_of('Todesfaelle') / population * 100_000,
'NeueFaelleLetzte7Tage': sum_of('NeueFaelleLetzte7Tage'),
'InzidenzLetzte7Tage': sum_of('NeueFaelleLetzte7Tage') / population * 100_000,
}
# Drop old Berlin rows and add new one
df = df[df['Bundesland'] != 'Berlin']
df = df.append(berlin, ignore_index=True).sort_values(by='IdLandkreis')
df['IdLandkreis'] = df['IdLandkreis'].astype('int')
df['IdBundesland'] = df['IdBundesland'].astype('int')
df['Faelle'] = df['Faelle'].astype('int')
df['FaelleDelta'] = df['FaelleDelta'].astype('int')
df['Todesfaelle'] = df['Todesfaelle'].astype('int')
df['TodesfaelleDelta'] = df['TodesfaelleDelta'].astype('int')
df['Genesen'] = df['Genesen'].astype('int')
df['GenesenDelta'] = df['GenesenDelta'].astype('int')
df['population'] = df['population'].astype('int')
df['NeueFaelleLetzte7Tage'] = df['NeueFaelleLetzte7Tage'].astype('int')
df = df.rename(columns={"IdLandkreis": "ID"})
return df
def write_data_rki_ndr_districts():
df = clear_data()
filename = 'rki_ndr_districts.csv'
upload_dataframe(df, filename)
# If the file is executed directly, print cleaned data
if __name__ == '__main__':
df = clear_data()
print(df)
# print(df.to_csv(index=False))
| true
|
10ccb3f85e2814f6f6c75376b259ec518bac1daa
|
Python
|
ppitu/Python
|
/Studia/Cwiczenia/Zestaw4/zadanie2.py
|
UTF-8
| 756
| 4.15625
| 4
|
[] |
no_license
|
#Zadanie 3.5
def rysuj(n):
string = '|' + '....|' * n + '\n'
string += str(0)
for x in range(n):
string += str(x + 1).rjust(5)
return string
print("Zadanie 3.5:")
dlugosc = input('Podaj dlugosc: ')
dlugosc = int(dlugosc)
print(rysuj(dlugosc))
#Zadanie 3.6
def pobierz_liczbe(string):
number = input(string)
try:
number = int(number)
except ValueError:
print('Nie podano liczby')
else:
return number
def rysuj1():
string = ''
wiersz = pobierz_liczbe('Podaj ile wierszy: ')
kolumny = pobierz_liczbe('Podaj ile kolumn: ')
for x in range(wiersz):
string += '+' + '----+' * (kolumny) + '\n' + '|' + ' |' * (kolumny) + '\n'
string += '+' + '----+' * (kolumny) + '\n'
return string
print("Zadanie 3.6:")
print(rysuj1())
| true
|
8639ffb6f43df4d6c8a92fb805982a3b1d92fe16
|
Python
|
kelvin926/korea_univ_python_1
|
/210516과제/210516_과제.py
|
UTF-8
| 12,088
| 3.84375
| 4
|
[] |
no_license
|
# 2021271424 장현서 - 파이썬 과제 210516 제출. github: @kelvin926
##########################################################################################################
'''
<2번> - 양의 정수 중에서 자신과 1로만 나누어지는 수를 소수라고 한다. N이 주어지면 N보다 작은 소수를 찾는 프로그램을 작성하시오
'''
'''
# 2번 코드
N = int(input("양의 정수 값 N을 입력 : "))
sosu = []
for i in range(2, N + 1):
is_Sosu = 1
for j in range(2, ((i // 2) + 1)):
if i % j == 0:
is_Sosu = 0
break
if is_Sosu == 1:
sosu.append(i)
print(sosu)
'''
#####################################################################################################
'''
<4번> - 우유병이 있다. 이 병에 담긴 물을 똑같은 양으로 나누어 몇명이 마실 수 있는지 궁금하다.
N명이 마시려면 몇 병이 필요한지도 궁금하다.
병의 크기와 잔의 용량은 가변적이라고 가정하자.
어떤 길이 정보가 필요한지를 파악하여 물의 부피와 물 잔의 개수 등을 계산하는 프로그램을 작성하시오
'''
'''
# 4번 코드
w = float(input("가로 길이는? :" ))
d = float(input("세로 길이는? :" ))
h = float(input("높이 길이는? :" ))
v = w*d*h
p = int(input("몇 사람이 나눠마실 예정? : "))
print("1병기준으로, {}명이 {}ml씩 나눠 마실 수 있습니다.".format(p, v//p))
need_p = int(input("사람 수는? : "))
cup = float(input("잔의 크기는? (ml) : "))
if (need_p*cup) <= v:
print("{}명이 가득 채운 잔으로 1인 1잔을 마신다고 하였을 때, 1병이 필요합니다.".format(need_p))
else:
i=2
while True:
if(need_p*cup) <= (i*v):
break
else:
i+=1
print("{}명이 가득 채운 잔으로 1인 1잔을 마신다고 하였을 때, {}병이 필요합니다.".format(need_p, i))
'''
#####################################################################################################
'''
<6번> - 원소 기호는 화학 원소를 나타내는 기호이다. 라틴어나 그리스어로 된 원소 이름의 첫 글자를 대문자로 나타내고,
첫 글자가 같을 때는 중간 글자를 택하여 첫 글자 다음 소문자로 나타낸다.
원소는 수소(H), 탄소(C), 질소(N) 등을 포함한 92개이고, 나머지는 인공적으로 만들어졌다.
반복적으로 원소 이름과 원소 기호 맞히기를 연습할 수 있는 퀴즈 프로그램을 딕셔너리를 이용하여 프로그래밍하시오.
이 때, 원소 기호 입력, 수정, 삭제 기능을 프로그래밍하시오.
'''
'''
import random
element = [{'한글':'수소','기호':'H'}, {'한글':'헬륨','기호':'He'}, {'한글':'리튬','기호':'Li'}, {'한글':'베릴륨','기호':'Be'}, {'한글':'붕소','기호':'B'},
{'한글':'탄소','기호':'C'},{'한글':'질소','기호':'N'},{'한글':'산소','기호':'O'},{'한글':'플루오린','기호':'F'},{'한글':'네온','기호':'Ne'},
{'한글':'나트룸','기호':'Na'},{'한글':'마그네슘','기호':'Mg'}, {'한글':'알루미늄','기호':'Al'}, {'한글':'규소','기호':'Si'},{'한글':'인','기호':'P'},
{'한글':'황','기호':'S'},{'한글':'염소','기호':'Cl'},{'한글':'아르곤','기호':'Ar'},{'한글':'칼륨','기호':'K'},{'한글':'칼슘','기호':'Ca'},
{'한글':'스칸듐','기호':'Sc'},{'한글':'타이타늄','기호':'Ti'},{'한글':'바나듐','기호': 'V'},{'한글':'크로뮴','기호': 'Cr'}, {'한글':'망가니즈','기호':'Mn'},
{'한글':'철','기호':'Fe'},{'한글':'코발트','기호':'Co'},{'한글':'니켈','기호':'Ni'},{'한글':'구리','기호':'Cu'},{'한글':'아연','기호':'Zn'},
{'한글':'갈륨','기호':'Ga'},{'한글':'저마늄','기호':'Ge'}, {'한글':'비소','기호':'As'}, {'한글':'샐래늄','기호':'Se'}, {'한글':'브로민','기호':'Br'},
{'한글':'크립톤','기호':'Kr'},{'한글':'루비듐','기호':'Rb'},{'한글':'스트론튬','기호':'Sr'},{'한글':'이트륨','기호':'Y'},{'한글':'지르코늄','기호':'Zr'},
{'한글':'나이오븀','기호':'Nb'},{'한글':'몰리브데넘','기호':'Mo'},{'한글':'테크네듐','기호':'Tc'},{'한글':'루테늄','기호':'Ru'},{'한글':'루듐','기호':'Rh'},
{'한글':'팔라듐','기호':'Pd'},{'한글':'은','기호':'Ag'}, {'한글':'카드뮴','기호':'Cd'}, {'한글':'인듐','기호':'In'}, {'한글':'주석','기호':'Sn'},
{'한글':'안티모니','기호':'Sb'},{'한글':'텔루륨','기호':'Te'}, {'한글':'아이오딘','기호':'I'},{'한글':'제논','기호':'Xe'},{'한글':'세슘','기호':'Cs'},
{'한글':'바륨','기호':'Ba'},{'한글':'란타넘','기호':'La'},{'한글':'세륨','기호':'Ce'},{'한글':'프라세오디뮴','기호':'Pr'}, {'한글':'네오디뮴','기호':'Nd'},
{'한글':'프로메튬','기호':'Pm'}, {'한글':'사마륨','기호':'Sm'},{'한글':'유로퓸','기호':'Eu'},{'한글':'가돌리늄','기호':'Gd'}, {'한글':'터븀','기호':'Tb'},
{'한글':'디스프로슘','기호':'Dy'}, {'한글':'홀뮴','기호':'Ho'}, {'한글':'어븀','기호':'Er'}, {'한글':'툴륨','기호':'Tm'}, {'한글':'이터븀','기호':'Yb'},
{'한글':'루테튬','기호':'Lu'},{'한글':'하프늄','기호':'Hf'},{'한글':'탄탈럼','기호':'Ta'},{'한글':'텅스텐','기호':'W'},{'한글':'레븀','기호':'Re'},
{'한글':'오스뮴','기호':'Os'},{'한글':'이라듐','기호':'Ir'},{'한글':'백금','기호':'Pt'},{'한글':'금','기호':'Au'},{'한글':'수은','기호':'Hg'},{'한글':'딜륨','기호':'Tl'},
{'한글':'납','기호':'Pb'},{'한글':'비스무트','기호':'Bi'}, {'한글':'폴로늄','기호':'Po'},{'한글':'아스타틴','기호':'At'},{'한글':'라돈','기호':'Rn'},
{'한글':'프랑슘','기호':'Fr'},{'한글':'라듐','기호':'Ra'},{'한글':'악티늄','기호':'Ac'},{'한글':'토륨','기호':'Th'},{'한글':'프로트악티늄','기호':'Pa'},
{'한글':'우라늄','기호':'U'}, {'한글':'넵투늄','기호':'Np'},{'한글':'플루토늄','기호':'Pu'},{'한글':'아메리슘','기호':'Am'}, {'한글':'퀴륨','기호':'Cm'},
{'한글':'버클륨','기호':'Bk'},{'한글':'캘리포늄','기호':'Cf'},{'한글':'아인슈타이늄','기호':'Es'}, {'한글':'페르뮴','기호':'Fm'}, {'한글':'멘델레븀','기호':'Md'},
{'한글':'노벨륨','기호':'No'}, {'한글':'로렌슘','기호':'Lr'},{'한글':'러더포듐','기호':'Rf'}, {'한글':'더브늄','기호':'Db'},{'한글':'시보귬','기호':'Sg'},
{'한글':'보륨','기호':'Bh'},{'한글':'하슘','기호':'Hs'},{'한글':'마이트너륨','기호':'Mt'},{'한글':'다름슈타튬','기호':'Ds'},{'한글':'뢴트게늄','기호':'Rg'},
{'한글':'코르페르니슘','기호':'Cn'},{'한글':'니호늄','기호':'Nh'},{'한글':'플레로븀','기호':'Fl'},{'한글':'모스코븀','기호':'Mc'},{'한글':'리버모륨','기호':'Lv'},
{'한글':'테네신','기호':'Ts'},{'한글':'오가네손','기호':'Og'}]
while True:
print("=========현재 입력된 원소들=======")
print(element)
print("========================================================================================")
print("1. 원소 기호 맞히기 게임\n2. 원소 기호 입력\n3. 원소 기호 수정\n4. 원소 기호 삭제\n5. 그만하기")
select_num = int(input("수행할 기능을 선택해주세요(1,2,3,4,5번) : "))
if select_num == 1: #게임
while True:
question = element[random.randint(0,92)]
answer = input("{}의 원소 기호는? : ".format(question['한글']))
if answer == question['기호']:
print("정답입니다!")
else:
print("오답입니다!")
question = element[random.randint(0,92)]
answer = input("{}의 한글 이름은? : ".format(question['기호']))
if answer == question['한글']:
print("정답입니다!")
else:
print("오답입니다!")
keep_going = int(input("더 하시겠으면 1을, 그만하고 싶으시면 0을 입력해주세요 : "))
if keep_going == 1:
continue
elif keep_going == 0:
break
else:
print("오류! 정확한 값을 입력해주세요.")
print("초기 화면으로 돌아갑니다.")
continue
elif select_num == 2: #입력
korean_element = input("추가 할 원소의 한글 이름은? : ")
english_element = input("추가 할 원소의 기호는? : ")
element.append({'한글':korean_element,'기호':english_element})
print("추가 완료!")
print("초기 화면으로 돌아갑니다.")
continue
elif select_num == 3: #수정
remake_num = int(input("원소의 한글 이름을 수정하시려면 1번을, 원소 기호를 수정하시려면 2번을 입력해주세요 : "))
if remake_num == 1:
remake_element = input("수정 할 원소의 한글 이름을 입력해주세요. : ")
for i in range(0, len(element)):
confirm_list = element[i-1]
if remake_element == confirm_list['한글']:
remake_korean = input("수정할 한글 이름을 입력해주세요 : ")
element[i-1] = {'한글':remake_korean,'기호':confirm_list['기호']}
print("수정 완료!")
print("초기 화면으로 돌아갑니다.")
continue
elif remake_num == 2:
remake_element = input("수정 할 원소의 기호를 입력해주세요. : ")
for i in range(0, len(element)):
confirm_list = element[i-1]
if remake_element == confirm_list['기호']:
remake_giho = input("수정할 기호를 입력해주세요 : ")
element[i-1] = {'한글':confirm_list['한글'],'기호':remake_giho}
print("수정 완료!")
print("초기 화면으로 돌아갑니다.")
continue
else:
print("오류! 정확한 값을 입력해주세요.")
print("초기 화면으로 돌아갑니다.")
continue
elif select_num == 4: #삭제
del_element = input("삭제 할 원소의 한글 이름을 입력해주세요. : ")
for i in range(0, len(element)):
confirm_list = element[i-1]
if del_element == confirm_list['한글']:
del element[i-1]
print("삭제 완료!")
print("초기 화면으로 돌아갑니다.")
continue
elif select_num == 5: #종료
break
else:
print("오류! 1~4 사이의 값을 입력해주세요!")
'''
########################################################################################################
'''
<7번> - 재미있는 주사위 게임 프로그램 ~~
'''
'''
# 7번 코드
import random
lose_list = [2, 4, 6, 8]
re_list = [3, 5, 9, 10, 11, 12]
while True:
print("주사위 굴러가유~")
a1 = random.randint(1,6)
a2 = random.randint(1,6)
a_sum = a1 + a2
print(a1, a2)
if a_sum == 7:
print("플레이어 승리!")
break
elif a_sum in lose_list:
print("플레이어 패배!")
break
elif a_sum in re_list:
print("다시 던집니다!")
b1 = random.randint(1,6)
b2 = random.randint(1,6)
b_sum = b1 + b2
print(b1, b2)
if a_sum == b_sum:
print("플레이어 승리!")
break
else:
print("처음부터 주사위를 다시 던집니다!")
continue
'''
#############################################################################################
| true
|
0b772bee8c68261a45a16c1e6345d0719d474f7f
|
Python
|
UsacDmitriy/first
|
/base_types/useful_operator.py
|
UTF-8
| 229
| 3.703125
| 4
|
[] |
no_license
|
# print(list(range(1,123,3)))
# for i in range(1,123,3):
# print(i)
my_string = 'abfgsde'
for key, letter in enumerate(my_string):
print(str(letter) + " " + str(key))
from random import randint
print(randint(12,123))
| true
|
0bd2f22cf2bb0ff3d804c4bb44d2a3be029e6b52
|
Python
|
amey-joshi/am
|
/optim/or-tools/cp/cryptarithmetic.py
|
UTF-8
| 1,711
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/python
from ortools.sat.python import cp_model
# Is there an assignment of digits to the letter such that the equation
# CP + IS + FUN = TRUE is true?
line = 'CP + IS + FUN = TRUE'
chars = set(c for c in line if c.isalpha())
base = 10 # We are looking at decimals
if len(chars) > base:
print('No assignment is possible')
exit(0)
model = cp_model.CpModel()
# Decision variables
c = model.NewIntVar(1, base - 1, 'C')
p = model.NewIntVar(0, base - 1, 'P')
i = model.NewIntVar(1, base - 1, 'I')
s = model.NewIntVar(0, base - 1, 'S')
f = model.NewIntVar(1, base - 1, 'F')
u = model.NewIntVar(0, base - 1, 'U')
n = model.NewIntVar(0, base - 1, 'N')
t = model.NewIntVar(1, base - 1, 'T')
r = model.NewIntVar(0, base - 1, 'R')
e = model.NewIntVar(0, base - 1, 'E')
allVars = [c, p, i, s, f, u, n, t, r, e]
# All letters need to be assigned a different number.
model.AddAllDifferent(allVars)
# Add the constraint in the line
model.Add(c * base + p + i * base + s + f * base * base + u * base +
n == t * base * base * base + r * base * base + u * base + e)
solver = cp_model.CpSolver()
class SolutionPrinter(cp_model.CpSolverSolutionCallback):
def __init__(self, variables):
cp_model.CpSolverSolutionCallback.__init__(self)
self._variables = variables
self._solnCount = 0
def on_solution_callback(self):
self._solnCount += 1
for v in self._variables:
print(f'{v}={self.Value(v)}', end=' ')
print()
def solution_count(self):
return self._solnCount
solutionPrinter = SolutionPrinter(allVars)
status = solver.SearchForAllSolutions(model, solutionPrinter)
print(f'status = {solver.StatusName(status)}')
| true
|
93bf07247949ba45904ecc8bd455dc3316f98cd7
|
Python
|
zhongh/ampo-ink
|
/scripts/read_inks.py
|
UTF-8
| 3,803
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
__author__ = 'Hao'
import openpyxl
import json
from string import Template
# Initialize the inks list
inks = {
"DI_Water": {
"filepath": "../data/Inkjet Printing Process File Repository/Droplet Ejection/DI Water Ink-A/Fluid Properties-DI Water.xlsx"
},
"Fifty_Glycerol": {
"filepath": "../data/Inkjet Printing Process File Repository/Droplet Ejection/50 Glycerol Ink-A/Fluid Properties-50 Glycerol.xlsx"
},
"Eighty_Glycerol": {
"filepath": "../data/Inkjet Printing Process File Repository/Droplet Ejection/80 Glycerol Ink-A/Fluid Properties-80 Glycerol.xlsx"
},
"TangoBlack": {
"filepath": "../data/Inkjet Printing Process File Repository/Droplet Ejection/TangoBlack Ink-A/Fluid Properties-TangoBlack.xlsx"
},
"VeroClear": {
"filepath": "../data/Inkjet Printing Process File Repository/Droplet Ejection/VeroClear Ink-A/Fluid Properties-VeroClear.xlsx"
}
}
# Helper function
def process_empty_values(x):
try:
return float(x)
except:
return ""
# Keys to use in constructing inks dictionary and template matching dictionary
d_keys = ["ink_id", "ink_label", "density", "dynamic_viscosity", "color", "surface_tension"]
# Populate inks list
for ink in inks:
wb = openpyxl.load_workbook(inks[ink]["filepath"], data_only=True)
sheet = wb.active
inks[ink]["ink_id"] = ink
inks[ink]["ink_label"] = sheet["B1"].value
inks[ink]["density"] = process_empty_values(sheet["B2"].value)
inks[ink]["dynamic_viscosity"] = process_empty_values(sheet["B3"].value)
inks[ink]["surface_tension"] = process_empty_values(sheet["B4"].value)
inks[ink]["color"] = sheet["B5"].value
prefix_ampo = "https://tw.rpi.edu/web/project/ampo-ink#"
tt = """### https://tw.rpi.edu/web/project/ampo-ink#${ink_id}
:${ink_id} rdf:type :Ink ;
rdfs:label "${ink_label}"^^xsd:string ;
ampo:hasAttribute [ rdf:type :Ink_Density ;
rdfs:label "Density"^^xsd:string ;
qudt:quantityValue [ rdf:type qudt:QuantityValue ;
qudt:numericValue "${density}"^^xsd:double ;
qudt:unit qudt-unit:KilogramPerCubicMeter
]
] ,
[ rdf:type :Ink_DynamicViscosity ;
rdfs:label "Dynamic Viscosity"^^xsd:string ;
qudt:quantityValue [ rdf:type qudt:QuantityValue ;
qudt:numericValue "${dynamic_viscosity}"^^xsd:double ;
qudt:unit qudt-unit:PascalSecond
]
] ,
[ rdf:type :Ink_Color ;
rdfs:label "Color"^^xsd:string ;
ampo:descriptiveValue "${color}"^^xsd:string
] ,
[ rdf:type :Ink_SurfaceTension ;
rdfs:label "Surface Tension"^^xsd:string ;
qudt:quantityValue [ rdf:type qudt:QuantityValue ;
qudt:numericValue "${surface_tension}"^^xsd:double ;
qudt:unit qudt-unit:NewtonPerMeter
]
] .
"""
f = open("../output/inks.ttl", "w+")
for ink in inks:
t = Template(tt)
d = {key: inks[ink][key] for key in d_keys}
# print(t.substitute(d))
f.write(t.substitute(d) + "\n\n\n")
f.close()
print("All inks read.")
| true
|
dc9d0274ab7646eccc4b3ee613918f1d96f4eb0f
|
Python
|
bk-anupam/PatientClinicProximityFinderPublic
|
/src/PatientClinicProximityFinder.py
|
UTF-8
| 21,299
| 2.734375
| 3
|
[] |
no_license
|
import pandas as pd
from geopy.extra.rate_limiter import RateLimiter
from geopy.geocoders import Nominatim
import os.path
from functools import partial
import json
import logging.config
import yaml
import pygtrie
import requests
from retry import retry
from geolib import geohash
from geopy.exc import GeocoderServiceError, GeopyError
import math
with open(r"./../config/logging_config.yml", 'r') as config:
config = yaml.safe_load(config)
logging.config.dictConfig(config)
logger = logging.getLogger(__name__)
with open("./../config/app_config.json", "r") as config:
app_config = json.load(config)
country_codes = app_config["country_code"]
retry_count = int(app_config["retry_count"])
delay = int(app_config["delay"])
backoff = int(app_config["backoff"])
# These two setting are used to generate proximity geo hashes within a specific radius and specific precision
# proximity radius is in meters
proximity_radius = float(app_config["proximity_radius"])
# No of digits in the generated proximity hash
geohash_precision = int(app_config["geohash_precision"])
def in_circle_check(latitude, longitude, centre_lat, centre_lon, radius):
x_diff = longitude - centre_lon
y_diff = latitude - centre_lat
if math.pow(x_diff, 2) + math.pow(y_diff, 2) <= math.pow(radius, 2):
return True
return False
def get_centroid(latitude, longitude, height, width):
y_cen = latitude + (height / 2)
x_cen = longitude + (width / 2)
return x_cen, y_cen
def convert_to_latlon(y, x, latitude, longitude):
pi = 3.14159265359
r_earth = 6371000
lat_diff = (y / r_earth) * (180 / pi)
lon_diff = (x / r_earth) * (180 / pi) / math.cos(latitude * pi/180)
final_lat = latitude+lat_diff
final_lon = longitude+lon_diff
return final_lat, final_lon
def create_geohash(latitude, longitude, radius, precision):
"""
Generates a list of geohashes within the specified radius (in meters) of a geolocation with
specified precision
:param latitude: location latitude
:param longitude: location longitude
:param radius: proximity radius in meters
:param precision: No of digits in the generated proximity hash
:return: comma separated string of geohashes
"""
x = 0.0
y = 0.0
points = []
geohashes = []
grid_width = [5009400.0, 1252300.0, 156500.0, 39100.0, 4900.0, 1200.0, 152.9, 38.2, 4.8, 1.2, 0.149, 0.0370]
grid_height = [4992600.0, 624100.0, 156000.0, 19500.0, 4900.0, 609.4, 152.4, 19.0, 4.8, 0.595, 0.149, 0.0199]
height = (grid_height[precision - 1])/2
width = (grid_width[precision-1])/2
lat_moves = int(math.ceil(radius / height)) #4
lon_moves = int(math.ceil(radius / width)) #2
for i in range(0, lat_moves):
temp_lat = y + height*i
for j in range(0,lon_moves):
temp_lon = x + width*j
if in_circle_check(temp_lat, temp_lon, y, x, radius):
x_cen, y_cen = get_centroid(temp_lat, temp_lon, height, width)
lat, lon = convert_to_latlon(y_cen, x_cen, latitude, longitude)
points += [[lat, lon]]
lat, lon = convert_to_latlon(-y_cen, x_cen, latitude, longitude)
points += [[lat, lon]]
lat, lon = convert_to_latlon(y_cen, -x_cen, latitude, longitude)
points += [[lat, lon]]
lat, lon = convert_to_latlon(-y_cen, -x_cen, latitude, longitude)
points += [[lat, lon]]
for point in points:
geohashes += [geohash.encode(point[0], point[1], precision)]
return ','.join(set(geohashes))
# Twp way dictionary mapping column names in csv (or dataframe) to search criteria column names
# and vice versa (for nominatim API)
def get_pat_col_searchcol_map():
pat_map = {"Postal Code": "postalcode",
"FSA": "postalcode",
"postalcode": "Postal Code",
"Address": "street",
"street": "Address",
"City": "city",
"city": "City",
"Province": "state",
"state": "Province"}
return pat_map
def get_clinic_col_searchcol_map():
clinic_map = {"Postal Code": "postalcode",
"FSA": "postalcode",
"postalcode": "Postal Code",
"Clinic Address": "street",
"street": "Clinic Address",
"Clinic City": "city",
"city": "Clinic City",
"Province": "state",
"state": "Province"}
return clinic_map
def address_postalcode_search(geocode, search_params, initial_call=False):
"""
Makes a call to Nominatim api using address and postal code as search criteria
:param geocode: geopy RateLimiter object to do throttled search using Nominatim api
:param search_params: Dictionary with search criteria
:param initial_call: bool value indicating if this is the first call to the method
:return: geopy.location.Location object or None if no search result found
"""
address = search_params["street"]
postal_code = search_params["postalcode"] if "postalcode" in search_params.keys() else ""
if not initial_call:
if not address:
return None
# remove the last word from address
address_tokens = address.split(" ")[0:-1]
address = " ".join(address_tokens) if len(address_tokens) > 0 else ""
search_params["street"] = address
if postal_code:
search_params.pop("postalcode")
locations = geocode(search_params)
search_params["postalcode"] = postal_code
if locations is not None and len(locations) > 0 and postal_code:
# if address or one of its substrings returns a location, check if adding postalcode search criteria
# refines the search
refined_locations = geocode(search_params)
if refined_locations is not None and len(refined_locations) > 0:
return refined_locations[0]
else:
return locations[0]
if locations is None and search_params["street"]:
return address_postalcode_search(geocode, search_params)
else:
return None
def no_address_search(geocode, search_params):
"""
This method is called when address and postal code combination yields no search results.
The search criteria used are postal_code and fsa combination followed by only fsa. As last
resort only fixed search criteria of province and city is used.
:param geocode: geopy RateLimiter object to do throttled search using Nominatim api
:param search_params: Dictionary with search criteria
:return: geopy.location.Location object or None if no search result found
"""
locations = geocode(search_params)
if locations is None:
return None
elif len(locations) > 0:
return locations[0]
@retry((GeopyError, GeocoderServiceError), tries=retry_count, delay=delay, backoff=backoff, logger=logger)
def get_geocode(type, geocode, df_row, fixed_search_cols, var_search_cols=[], initialCall=False):
"""
Gets the geocode for either a clinic or a patient record
:param type: string "patient" or "clinic"
:param geocode: geopy RateLimiter object to do throttled search using Nominatim api
:param df_row: dataframe row (from patient or clinic dataframe)
:param fixed_search_cols: list of search columns used as search criteria in all searches (Province and City)
:param var_search_cols: list of lists containing column names for more precise search
:param initialCall: bool value indicating if this is the first call to the method
:return: pandas series object containing 'Geo_Cols', 'Geo_Code' and 'Geo_Hash'
"""
var_search_cols = get_var_search_cols(initialCall, type, var_search_cols)
prefix = 'Pat_' if type.lower() == 'patient' else 'Clinic_'
id_col = "ID" if type.lower() == 'patient' else 'Clinic ID'
col_searchcol_map = get_pat_col_searchcol_map() if type.lower() == 'patient' else get_clinic_col_searchcol_map()
id = df_row[id_col]
col_labels = [prefix+x for x in ['Geo_Cols', 'Geo_Code', 'Geo_Hash']]
search_params = get_search_params(df_row, fixed_search_cols, var_search_cols, col_searchcol_map, type, id)
if "street" in search_params.keys():
location = address_postalcode_search(geocode, search_params, True)
else:
location = no_address_search(geocode, search_params)
if location is not None:
search_cols = ",".join(["FSA" if "FSA" in var_search_cols else col_searchcol_map[key]
for key in search_params.keys()])
lat = location.latitude
lon = location.longitude
logger.info(f"For {type}ID: {id} with {search_params} => latitude = {lat}, longitude = {lon}")
loc_geohash = geohash.encode(lat, lon, 12)
return pd.Series([search_cols, (lat, lon), loc_geohash], index=col_labels)
elif len(var_search_cols) > 0 and location is None:
# Remove the most precise search criteria which is at the top of search criteria stack
var_search_cols.pop()
return get_geocode(type, geocode, df_row, fixed_search_cols, var_search_cols=var_search_cols)
else:
# Neither variable nor fixed search criteria yield a geocode
return pd.Series([None, (None, None), None], index=col_labels)
def get_var_search_cols(initialCall, type, var_search_cols):
"""
Initialize a list of lists containing column names for more precise search
:param initialCall: bool value indicating if this is the first call to the method
:param type: string "patient" or "clinic"
:param var_search_cols: list of lists containing column names for more precise search
:return: list of lists containing column names for more precise search
"""
if initialCall and type.lower() == 'clinic':
var_search_cols = [["FSA"],
["Postal Code"],
["Postal Code", "Clinic Address"]]
elif initialCall and type.lower() == 'patient':
var_search_cols = [["FSA"],
["Postal Code"],
["Postal Code", "Address"]]
return var_search_cols
def get_search_params(df_row, fixed_search_cols, var_search_cols, col_searchcol_map, type, id):
"""
Creates a dictionary of search parameters using both fixed and precise search criteria
:param df_row: dataframe row
:param fixed_search_cols: list of search columns used as search criteria in all searches (Province and City)
:param var_search_cols: list of lists containing column names for more precise search
:param col_searchcol_map: Dictionary mapping column names in csv (or dataframe) to search criteria column names
and vice versa (for nominatim API)
:param type: string "patient" or "clinic"
:param id: id of the dataframe row
:return: dictionary with search parameters
"""
if len(var_search_cols) == 0:
var_search_params = []
logger.critical(f"Exhausted all variable geocode search criteria for {type}id: {id}. No geocode found. "
f"Geocode will be returned on the basis of fixed search cols")
else:
var_search_params = [(col_searchcol_map[df_col], df_row[df_col]) for df_col in var_search_cols[-1]]
fixed_search_params = [(col_searchcol_map[df_col], df_row[df_col]) for df_col in fixed_search_cols]
search_params = dict(var_search_params + fixed_search_params)
return search_params
def get_clinic_geocode(geolocator, df_clinics):
"""
Get the geocode for a clinic record
:param geolocator: geopy RateLimiter object to do throttled search using Nominatim api
:param df_clinics: clinics dataframe
:return: clinics dataframe with Clinic_Geo_Cols, Clinic_Geo_Code and Clinic_Geo_Hash columns
"""
fixed_search_cols = ["Province", "Clinic City"]
df_clinics_geocode = df_clinics.apply(
lambda row: get_geocode("clinic", geolocator, row, fixed_search_cols, initialCall=True), axis=1)
return pd.concat([df_clinics, df_clinics_geocode], axis=1)
def get_patient_geocode(geolocator, df_patients):
"""
Get the geocode for a patient record
:param geolocator: geopy RateLimiter object to do throttled search using Nominatim api
:param df_patients: clinics dataframe
:return: patients dataframe with Pat_Geo_Cols, Pat_Geo_Code and Pat_Geo_Hash columns
"""
fixed_search_cols = ["Province", "City"]
df_patients_geocode = df_patients.apply(
lambda row: get_geocode("patient", geolocator, row, fixed_search_cols, initialCall=True), axis=1)
return pd.concat([df_patients, df_patients_geocode], axis=1)
def get_geohash_nearby_clinics(pat_geohash, clinic_geohash_trie):
"""
Perform a string prefix search to find clinic with geohashes that match a substring of patient geohash
:param pat_geohash: one of patient's location geohash
:param clinic_geohash_trie: Trie containing all clinic geohashes
:return: list of matching geohashes
"""
if not pat_geohash:
return None
try:
nearest_clinics = clinic_geohash_trie.keys(pat_geohash)
except KeyError:
pat_geohash = pat_geohash[0:-1]
return get_geohash_nearby_clinics(pat_geohash, clinic_geohash_trie)
if nearest_clinics is None or len(nearest_clinics) == 0:
pat_geohash = pat_geohash[0:-1]
return get_geohash_nearby_clinics(pat_geohash, clinic_geohash_trie)
else:
return nearest_clinics
def get_pat_nearby_clinics(pat_gh, gh_to_match, clinic_gh_trie):
"""
Perform a string prefix search to find clinic with geohashes that match a substring of
one of the patient geohash search set
:param pat_gh: patient's geohash
:param gh_to_match: set containing geohashes within a proximity radius of patient's geohash as well as
the patient's 8 immediate neighbour geohashes
:param clinic_gh_trie: Trie containing all clinic geohashes
:return: list of matching clinic geohashes
"""
pat_nearby_clinics = set()
gh_to_match.append(pat_gh)
for gh in gh_to_match:
gh_nearby_clinic = get_geohash_nearby_clinics(gh, clinic_gh_trie)
if gh_nearby_clinic is not None:
pat_nearby_clinics.update(gh_nearby_clinic)
logger.info(f"For patient geohash = {pat_gh} nearby clinics geohashes are = {pat_nearby_clinics}")
return pat_nearby_clinics
@retry(requests.exceptions.ConnectionError, tries=retry_count, delay=delay, backoff=backoff, logger=logger)
def get_osrm_clinic_travel_distance(df_pat_row, df_clinic_row):
"""
Get the shortest travel distance between a patient's and a clinic's geolocation using OSRM api
:param df_pat_row: row of patient dataframe
:param df_clinic_row: row of clinic dataframe
:return: travel distance in km
"""
pat_lat, pat_lon = df_pat_row["Pat_Geo_Code"]
clinic_lat, clinic_lon = df_clinic_row["Clinic_Geo_Code"]
# call the OSRM API
r = requests.get(f"http://router.project-osrm.org/route/v1/car/{pat_lon},{pat_lat};"
f"{clinic_lon},{clinic_lat}?overview=false""")
# then you load the response using the json library
# by default you get only one alternative so you access 0-th element of the `routes`
routes = json.loads(r.content)
fastest_route = routes.get("routes")[0]
travel_distance = float(fastest_route["distance"] / 1000)
logger.info(f"Travel distance between patient id = {df_pat_row['ID']} with geohash = {df_pat_row['Pat_Geo_Hash']}"
f" and clinic id = {df_clinic_row['Clinic ID']} with geohash = {df_clinic_row['Clinic_Geo_Hash']}"
f" is => {travel_distance}")
return travel_distance
def get_pat_nearest_clinic(df_pat_row, df_clinics, clinic_gh_trie):
"""
For a patient record finds the clinic with shortest travel distance
:param df_pat_row: row of patient dataframe
:param df_clinics: clinic dataframe
:param clinic_gh_trie: Trie containing all clinic geohashes
:return: pandas series object with columns in output.csv
"""
pat_gh = df_pat_row["Pat_Geo_Hash"]
# To handle the edge case of a when proximity searches are done near the Greenwich Meridian or the equator,
# because in those points the MSB of x and y will be 0 or 1, so their geohashes won't share a common prefix.
# To work around this we get the neighbours of a location's geohash and proximity search will then need to
# find a list of geohashes that are prefixed by the original location and the neighbour's geohash
pat_gh_neighbors = list(geohash.neighbours(pat_gh))
pat_lat, pat_lon = df_pat_row["Pat_Geo_Code"]
# list of geohashes that are with in a specific radius of the patient location. To handle the edge case
# of a location being on the boundary of a geohash bounding box, we need to broaden the geohash
# search space
gh_in_search_radius = create_geohash(pat_lat, pat_lon, proximity_radius, geohash_precision).split(",")
gh_to_match = pat_gh_neighbors + gh_in_search_radius
pat_nearby_clinics = get_pat_nearby_clinics(pat_gh, gh_to_match, clinic_gh_trie)
# Filter the clinic dataframe using nearby clinic geohashes
df_clinics_nearby = df_clinics[df_clinics["Clinic_Geo_Hash"].isin(pat_nearby_clinics)]
# From the nearby clinics use the shortest travel distance ( google maps ) or shortest travel time criteria
# ( OSRM for open street maps ) to find the closest clinic
df_clinics_nearby["Clinic_Pat_Dist"] = df_clinics_nearby.apply(
lambda row: get_osrm_clinic_travel_distance(df_pat_row, row), axis=1
)
# select the clinic row with shortest travel distance
min_distance = df_clinics_nearby["Clinic_Pat_Dist"].min()
df_nearest_clinic = df_clinics_nearby.loc[df_clinics_nearby["Clinic_Pat_Dist"] == min_distance]
nearest_clinic = df_nearest_clinic.iloc[0]
# join the patient row with selected clinic row to return a series object
result_cols = ["Patient_ID", "Pat_Geo_Cols", "Pat_Geo_Code", "Pat_Address", "Pat_Postal_Code",
"Pat_FSA", "Nearest_Clinic_ID", "Clinic_Geo_Cols", "Clinic_Geo_Code", "Clinic_Address",
"Clinic_Postal Code", "Clinic_FSA", "Clinic_Distance"]
nearest_result = pd.Series([df_pat_row["ID"], df_pat_row["Pat_Geo_Cols"], df_pat_row["Pat_Geo_Code"],
df_pat_row["Address"], df_pat_row["Postal Code"], df_pat_row["FSA"],
nearest_clinic["Clinic ID"], nearest_clinic["Clinic_Geo_Cols"],
nearest_clinic["Clinic_Geo_Code"], nearest_clinic["Clinic Address"],
nearest_clinic["Postal Code"], nearest_clinic["FSA"],
nearest_clinic["Clinic_Pat_Dist"]],index=result_cols)
return nearest_result
def get_nearest_clinics(df_patients, df_clinics):
"""
For each patient record find the clinic with shortest travel distance
:param df_patients: patient dataframe
:param df_clinics: clinic dataframe
:return: dataframe with patient and nearest clinic data
"""
# Trie to hold the geohashes of all the clinics.
clinic_geohash_trie = pygtrie.CharTrie()
df_clinics_with_geohash = df_clinics[df_clinics.Clinic_Geo_Hash.notnull()]
for clinic_gh in df_clinics_with_geohash["Clinic_Geo_Hash"]:
clinic_geohash_trie[clinic_gh] = 1
df_pat_nearest_clinic = df_patients[df_patients.Pat_Geo_Hash.notnull()].apply(
lambda row: get_pat_nearest_clinic(row, df_clinics, clinic_geohash_trie), axis=1
)
return df_pat_nearest_clinic
def process_clinic_data(geocode):
df_clinics = pd.read_csv('./data/clinics.csv')
df_clinics = get_clinic_geocode(geocode, df_clinics)
df_clinics.to_pickle("./data/clinics_with_geocode.pkl")
return df_clinics
def process_patients_data(geocode):
df_patients = pd.read_csv('./data/patients.csv')
df_patients = get_patient_geocode(geocode, df_patients)
df_patients.to_pickle("./data/patients_with_geocode.pkl")
return df_patients
df_clinics = None
df_patients = None
geolocator = Nominatim(user_agent="test-app", timeout=5)
# Country = Canada is a fixed search criteria. We are only interested in locations in Canada
geocode_partial = partial(geolocator.geocode, exactly_one=False, country_codes=country_codes)
geocode = RateLimiter(geocode_partial, min_delay_seconds=1)
if os.path.isfile("./data/clinics_with_geocode.pkl"):
df_clinics = pd.read_pickle("./data/clinics_with_geocode.pkl")
else:
df_clinics = process_clinic_data(geocode)
if os.path.isfile("./data/patients_with_geocode.pkl"):
df_patients = pd.read_pickle("./data/patients_with_geocode.pkl")
else:
df_patients = process_patients_data(geocode)
print(df_clinics.to_string())
print("==========================================================================")
print(df_patients.to_string())
df_pat_nearest_clinic = get_nearest_clinics(df_patients, df_clinics)
print("==========================================================================")
print(df_pat_nearest_clinic.to_string())
df_pat_nearest_clinic.to_csv("./output/output.csv")
print("")
| true
|
5cc858373c9fd680456327f7344f6c132c979c3f
|
Python
|
langtodu/learn
|
/algorithm/prime.py
|
UTF-8
| 1,533
| 2.890625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import numpy as np
class Prim(object):
def __init__(self, graph=None, outset=0):
self.graph = np.array(graph)
self.outset = outset
self.selectd_point = [self.outset]
self.selectd_edge = [[self.outset, self.outset]]
self.all_points = self.graph.shape[1]
self.all_edges = self.graph.shape[0]
print "init"
print self.graph
def edged(self):
print "edge"
while len(self.selectd_point) != self.all_points:
# select_point = []
# select_edge = []
record_edge = []
min = 66
for i in self.selectd_point:
for j in range(self.all_points):
num = self.graph[i][j]
if (num < min) and (num != 0) and ((i not in self.selectd_point) or (j not in self.selectd_point)):
min = num
record_edge = [i, j]
self.selectd_point.append(record_edge[1])
self.selectd_edge.append(record_edge)
print self.selectd_point, self.selectd_edge
print self.selectd_point, self.selectd_edge
if __name__ == "__main__":
graphs = [[0, 7, 0, 5, 0, 0, 0],
[7, 0, 8, 9, 7, 0, 0],
[0, 8, 0, 0, 5, 0, 0],
[5, 9, 0, 0, 15, 6, 0],
[0, 7, 5, 15, 0, 8, 9],
[0, 0, 0, 6, 8, 0, 11],
[0, 0, 0, 0, 9, 11, 0]
]
a = Prim(graph=graphs, outset=0)
a.edged()
| true
|
8c4ed9f5961ae0333decc36d25e9d10e11d241ee
|
Python
|
MauricioD13/Proyecto1_Codigos
|
/Python/grafica_procesamiento.py
|
UTF-8
| 4,459
| 2.828125
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
import sys
import math
import statistics
import mplcursors
def graphics(nombres_archivo,separator,number_separator,organization,name,x_label,y_label,samples,axis):
files=[]
legend_names=[]
temp=[]
colors=["g","r","b","c","m","k"]
for i in range(len(nombres_archivo)):
temp=nombres_archivo[i].split("/")
temp=temp[len(temp)-1].split(".")
legend_names.append(temp[0])
for i in range(len(nombres_archivo)):
archivo=open(nombres_archivo[i],"r")
files.append(archivo)
x_array=[]
y_array=[]
for file in files:
guardar(file,x_array,y_array,number_separator,separator)
columns=[]
per_graph=[]
columns, per_graph=graphics_organization(organization,per_graph,columns)
lengths=[]
for i in x_array:
lengths.append(len(i))
min_samples=min(lengths)
if(int(samples)<min_samples):
min_samples=int(samples)
if(len(columns)==1):
fig, ax=plt.subplots()
for i in range(per_graph[0]):
ax.plot(x_array[i][0:min_samples],y_array[i][0:min_samples],colors[i],label=legend_names[i]) # Plot some data on the axes.
ax.grid(True)
ax.set_ylabel(y_label,fontsize=16)
ax.set_xlabel(x_label,fontsize=16)
ax.set_title(name,fontsize=16)
if(axis=="logaritmico"):
ax.semilogx()
#ax.set(ylim=(0,1000))
plt.legend(loc='upper right')
mplcursors.cursor(multiple=True).connect("add", lambda sel: sel.annotation.draggable(False))
plt.show()
else:
fig, ax=plt.subplots(1,len(columns))
k=0
for j in range(0,len(columns)):
for i in range(per_graph[j]):
ax[j].plot(x_array[k][0:100],y_array[k][0:min_samples],colors[i],label=legend_names[i]) # Plot some data on the axes.
ax[j].grid(True)
ax[j].set_ylabel(y_label,fontsize=16)
ax[j].set_xlabel(x_label,fontsize=16)
ax[j].set_title(name,fontsize=16)
if(axis=="logaritmico"):
ax[j].semilogx()
k+=1
plt.legend(loc='upper right')
mplcursors.cursor(multiple=True).connect("add", lambda sel: sel.annotation.draggable(False))
plt.show()
def guardar(archivo,x_array,y_array,number_separator,separator):
labels=[]
temp_x=[]
temp_y=[]
if(separator=="tab"):
separator="\t"
labels=archivo.readline().split(separator)
#labels[1]=labels[1].replace("\n","")
if(number_separator==","):
for line in archivo:
linea=line.split(separator)
temp=linea[0].split(",")
try:
temp_x.append(float(temp[0]+"."+temp[1]))
temp=linea[1].split(",")
temp_y.append(float(temp[0]+"."+temp[1]))
except:
continue
x_array.append(np.array(temp_x))
y_array.append(np.array(temp_y))
if(number_separator=="."):
for line in archivo:
linea=line.split(separator)
temp_x.append(float(linea[0]))
temp_y.append(float(linea[1]))
x_array.append(np.array(temp_x))
y_array.append(np.array(temp_y))
def graphics_organization(organization,per_graph,columns):
columns=organization.split(";")
x=[]
for i in columns:
x=i.split(",")
per_graph.append(len(x))
return columns, per_graph
| true
|
ca0f2214145296312a8098fe371e38202bd03ac0
|
Python
|
KujouNozom/LeetCode
|
/python/2021_01/Question0239.py
|
UTF-8
| 1,622
| 3.796875
| 4
|
[] |
no_license
|
# 239. 滑动窗口最大值
#
# 给你一个整数数组 nums,有一个大小为 k 的滑动窗口从数组的最左侧移动到数组的最右侧。你只可以看到在滑动窗口内的 k 个数字。滑动窗口每次只向右移动一位。
# 返回滑动窗口中的最大值。
#
# 示例 1:
# 输入:nums = [1,3,-1,-3,5,3,6,7], k = 3
# 输出:[3,3,5,5,6,7]
# 解释:
# 滑动窗口的位置 最大值
# --------------- -----
# [1 3 -1] -3 5 3 6 7 3
# 1 [3 -1 -3] 5 3 6 7 3
# 1 3 [-1 -3 5] 3 6 7 5
# 1 3 -1 [-3 5 3] 6 7 5
# 1 3 -1 -3 [5 3 6] 7 6
# 1 3 -1 -3 5 [3 6 7] 7
#
# 示例 2:
# 输入:nums = [1], k = 1
# 输出:[1]
#
# 示例 3:
# 输入:nums = [1,-1], k = 1
# 输出:[1,-1]
#
# 示例 4:
# 输入:nums = [9,11], k = 2
# 输出:[11]
#
# 示例 5:
# 输入:nums = [4,-2], k = 2
# 输出:[4]
#
# 提示:
# 1 <= nums.length <= 105
# -104 <= nums[i] <= 104
# 1 <= k <= nums.length
import collections
from typing import List
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
n = len(nums)
q = collections.deque()
for i in range(k):
while q and nums[i] >= nums[q[-1]]:
q.pop()
q.append(i)
ans = [nums[q[0]]]
for i in range(k, n):
while q and nums[i] >= nums[q[-1]]:
q.pop()
q.append(i)
while q[0] <= i - k:
q.popleft()
ans.append(nums[q[0]])
return ans
| true
|
7f1552e819abc71af3f1c2b7f923d5f7ea458942
|
Python
|
olisim/eks
|
/eks/eks.py
|
UTF-8
| 3,277
| 2.890625
| 3
|
[] |
no_license
|
import socket, threading, time
class EKSResponse:
def __init__(self, command, status, payload):
self.command = command
self.status = status
self.payload = payload
def __eq__(self, other):
if other == None:
return False
return self.command == other.command \
and self.status == other.status \
and self.payload == other.payload
class EKSConnector(object):
def __init__ (self, host, port=2444, timeout=5):
self.host = host
self.port = port
self.poll_interval = 1
self.socket_timeout = timeout
def __poll(self):
last_response = None
while self.polling_enabled:
self.read_key_state(self.callback)
time.sleep(self.poll_interval)
def __send_to_socket(self, msg):
bytes_sent = self.eks_socket.send(msg)
return bytes_sent
def __read_from_socket(self):
chunks = []
bytes_recd = 0
msglen = 123
command = ''
status = -1
start = 0
length = 0
while bytes_recd < msglen:
chunk = self.eks_socket.recv(1)
if chunk == '':
raise RuntimeError("socket connection broken")
chunks.append(chunk.encode("hex"))
if bytes_recd == 0:
msglen = ord(chunk)
if bytes_recd == 1:
command += chunk
if bytes_recd == 2:
command += chunk
if bytes_recd == 6:
status = ord(chunk)
bytes_recd = bytes_recd + len(chunk)
payload = ''.join(chunks[8:len(chunks)])
return EKSResponse(command, status, payload)
def read_key_state(self, callback):
self.eks_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.eks_socket.settimeout(self.socket_timeout)
self.eks_socket.connect((self.host, self.port))
response = self.__read_from_socket()
self.__handle_response(response, callback)
if response.command == "Ek" and response.status == 1:
self.__send_to_socket("\x07TL\x01\x01\x73\x09")
response = self.__read_from_socket()
self.__handle_response(response, callback)
self.eks_socket.close()
return response
def start_listening(self, callback, interval=1):
self.polling_enabled = True
self.callback = callback
self.poll_interval = interval
self.__poll()
def stop_listening(self):
self.polling_enabled = False
def __handle_response(self, response, callback):
if response.command == "Ek":
if response.status == 1:
callback.did_insert_key()
elif response.status == 2:
callback.did_remove_key()
elif response.status == 3:
raise RuntimeError
elif response.command == "RL":
callback.did_read_key(response.payload)
class EKSCallback: #abstract
def did_insert_key(self):
raise NotImplementedError
def did_remove_key(self):
raise NotImplementedError
def did_read_key(self, data):
raise NotImplementedError
| true
|
cc8a0817d942735491475dea340e9148e7a55d99
|
Python
|
preintercede/ds-a
|
/ch1/is_Unique.py
|
UTF-8
| 542
| 3.390625
| 3
|
[] |
no_license
|
# def isUnique(string):
# letters = {}
# for letter in string:
# if letter in letters:
# return False
# letters[letter] = True
# return True
def isUnique(string):
letters = {}
for letter in string:
if letter in letters:
return False
letters[letter] = True
return True
print(isUnique('abc'))
print(isUnique('abcdc'))
print(isUnique('abcdefghijklmnopqrstuvwxyz'))
print(isUnique('abcdefghijklmnopqrstuvwxyzz'))
print(isUnique('dsfadsfas'))
print(isUnique(''))
| true
|
1dfb960ce024c875fb45d3596a3c5df12850b5ab
|
Python
|
yo-han/HandleBar
|
/lib/guessit/language.py
|
UTF-8
| 13,698
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import UnicodeMixin, base_text_type, u, s
from guessit.fileutils import load_file_in_same_dir
from guessit.country import Country
import re
import logging
__all__ = [ 'is_iso_language', 'is_language', 'lang_set', 'Language',
'ALL_LANGUAGES', 'ALL_LANGUAGES_NAMES', 'UNDETERMINED',
'search_language', 'guess_language' ]
log = logging.getLogger(__name__)
# downloaded from http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
#
# Description of the fields:
# "An alpha-3 (bibliographic) code, an alpha-3 (terminologic) code (when given),
# an alpha-2 code (when given), an English name, and a French name of a language
# are all separated by pipe (|) characters."
_iso639_contents = load_file_in_same_dir(__file__, 'ISO-639-2_utf-8.txt')
# drop the BOM from the beginning of the file
_iso639_contents = _iso639_contents[1:]
language_matrix = [ l.strip().split('|')
for l in _iso639_contents.strip().split('\n') ]
# update information in the language matrix
language_matrix += [['mol', '', 'mo', 'Moldavian', 'moldave'],
['ass', '', '', 'Assyrian', 'assyrien']]
for lang in language_matrix:
# remove unused languages that shadow other common ones with a non-official form
if (lang[2] == 'se' or # Northern Sami shadows Swedish
lang[2] == 'br'): # Breton shadows Brazilian
lang[2] = ''
# add missing information
if lang[0] == 'und':
lang[2] = 'un'
if lang[0] == 'srp':
lang[1] = 'scc' # from OpenSubtitles
lng3 = frozenset(l[0] for l in language_matrix if l[0])
lng3term = frozenset(l[1] for l in language_matrix if l[1])
lng2 = frozenset(l[2] for l in language_matrix if l[2])
lng_en_name = frozenset(lng for l in language_matrix
for lng in l[3].lower().split('; ') if lng)
lng_fr_name = frozenset(lng for l in language_matrix
for lng in l[4].lower().split('; ') if lng)
lng_all_names = lng3 | lng3term | lng2 | lng_en_name | lng_fr_name
lng3_to_lng3term = dict((l[0], l[1]) for l in language_matrix if l[1])
lng3term_to_lng3 = dict((l[1], l[0]) for l in language_matrix if l[1])
lng3_to_lng2 = dict((l[0], l[2]) for l in language_matrix if l[2])
lng2_to_lng3 = dict((l[2], l[0]) for l in language_matrix if l[2])
# we only return the first given english name, hoping it is the most used one
lng3_to_lng_en_name = dict((l[0], l[3].split('; ')[0])
for l in language_matrix if l[3])
lng_en_name_to_lng3 = dict((en_name.lower(), l[0])
for l in language_matrix if l[3]
for en_name in l[3].split('; '))
# we only return the first given french name, hoping it is the most used one
lng3_to_lng_fr_name = dict((l[0], l[4].split('; ')[0])
for l in language_matrix if l[4])
lng_fr_name_to_lng3 = dict((fr_name.lower(), l[0])
for l in language_matrix if l[4]
for fr_name in l[4].split('; '))
# contains a list of exceptions: strings that should be parsed as a language
# but which are not in an ISO form
lng_exceptions = { 'unknown': ('und', None),
'inconnu': ('und', None),
'unk': ('und', None),
'un': ('und', None),
'gr': ('gre', None),
'greek': ('gre', None),
'esp': ('spa', None),
'español': ('spa', None),
'se': ('swe', None),
'po': ('pt', 'br'),
'pb': ('pt', 'br'),
'pob': ('pt', 'br'),
'br': ('pt', 'br'),
'brazilian': ('pt', 'br'),
'català': ('cat', None),
'cz': ('cze', None),
'ua': ('ukr', None),
'cn': ('chi', None),
'chs': ('chi', None),
'jp': ('jpn', None),
'scr': ('hrv', None)
}
def is_iso_language(language):
return language.lower() in lng_all_names
def is_language(language):
return is_iso_language(language) or language in lng_exceptions
def lang_set(languages, strict=False):
"""Return a set of guessit.Language created from their given string
representation.
if strict is True, then this will raise an exception if any language
could not be identified.
"""
return set(Language(l, strict=strict) for l in languages)
class Language(UnicodeMixin):
"""This class represents a human language.
You can initialize it with pretty much anything, as it knows conversion
from ISO-639 2-letter and 3-letter codes, English and French names.
You can also distinguish languages for specific countries, such as
Portuguese and Brazilian Portuguese.
There are various properties on the language object that give you the
representation of the language for a specific usage, such as .alpha3
to get the ISO 3-letter code, or .opensubtitles to get the OpenSubtitles
language code.
>>> Language('fr')
Language(French)
>>> s(Language('eng').french_name)
'anglais'
>>> s(Language('pt(br)').country.english_name)
'Brazil'
>>> s(Language('Español (Latinoamérica)').country.english_name)
'Latin America'
>>> Language('Spanish (Latin America)') == Language('Español (Latinoamérica)')
True
>>> s(Language('zz', strict=False).english_name)
'Undetermined'
>>> s(Language('pt(br)').opensubtitles)
'pob'
"""
_with_country_regexp = re.compile('(.*)\((.*)\)')
_with_country_regexp2 = re.compile('(.*)-(.*)')
def __init__(self, language, country=None, strict=False, scheme=None):
language = u(language.strip().lower())
with_country = (Language._with_country_regexp.match(language) or
Language._with_country_regexp2.match(language))
if with_country:
self.lang = Language(with_country.group(1)).lang
self.country = Country(with_country.group(2))
return
self.lang = None
self.country = Country(country) if country else None
# first look for scheme specific languages
if scheme == 'opensubtitles':
if language == 'br':
self.lang = 'bre'
return
elif language == 'se':
self.lang = 'sme'
return
elif scheme is not None:
log.warning('Unrecognized scheme: "%s" - Proceeding with standard one' % scheme)
# look for ISO language codes
if len(language) == 2:
self.lang = lng2_to_lng3.get(language)
elif len(language) == 3:
self.lang = (language
if language in lng3
else lng3term_to_lng3.get(language))
else:
self.lang = (lng_en_name_to_lng3.get(language) or
lng_fr_name_to_lng3.get(language))
# general language exceptions
if self.lang is None and language in lng_exceptions:
lang, country = lng_exceptions[language]
self.lang = Language(lang).alpha3
self.country = Country(country) if country else None
msg = 'The given string "%s" could not be identified as a language' % language
if self.lang is None and strict:
raise ValueError(msg)
if self.lang is None:
#log.debug(msg)
self.lang = 'und'
@property
def alpha2(self):
return lng3_to_lng2[self.lang]
@property
def alpha3(self):
return self.lang
@property
def alpha3term(self):
return lng3_to_lng3term[self.lang]
@property
def english_name(self):
return lng3_to_lng_en_name[self.lang]
@property
def french_name(self):
return lng3_to_lng_fr_name[self.lang]
@property
def opensubtitles(self):
if self.lang == 'por' and self.country and self.country.alpha2 == 'br':
return 'pob'
elif self.lang in ['gre', 'srp']:
return self.alpha3term
return self.alpha3
@property
def tmdb(self):
if self.country:
return '%s-%s' % (self.alpha2, self.country.alpha2.upper())
return self.alpha2
def __hash__(self):
return hash(self.lang)
def __eq__(self, other):
if isinstance(other, Language):
return self.lang == other.lang
if isinstance(other, base_text_type):
try:
return self == Language(other)
except ValueError:
return False
return False
def __ne__(self, other):
return not self == other
def __nonzero__(self):
return self.lang != 'und'
def __unicode__(self):
if self.country:
return '%s(%s)' % (self.english_name, self.country.alpha2)
else:
return self.english_name
def __repr__(self):
if self.country:
return 'Language(%s, country=%s)' % (self.english_name, self.country)
else:
return 'Language(%s)' % self.english_name
UNDETERMINED = Language('und')
ALL_LANGUAGES = frozenset(Language(lng) for lng in lng_all_names) - frozenset([UNDETERMINED])
ALL_LANGUAGES_NAMES = lng_all_names
def search_language(string, lang_filter=None):
"""Looks for language patterns, and if found return the language object,
its group span and an associated confidence.
you can specify a list of allowed languages using the lang_filter argument,
as in lang_filter = [ 'fr', 'eng', 'spanish' ]
>>> search_language('movie [en].avi')
(Language(English), (7, 9), 0.8)
>>> search_language('the zen fat cat and the gay mad men got a new fan', lang_filter = ['en', 'fr', 'es'])
(None, None, None)
"""
# list of common words which could be interpreted as languages, but which
# are far too common to be able to say they represent a language in the
# middle of a string (where they most likely carry their commmon meaning)
lng_common_words = frozenset([
# english words
'is', 'it', 'am', 'mad', 'men', 'man', 'run', 'sin', 'st', 'to',
'no', 'non', 'war', 'min', 'new', 'car', 'day', 'bad', 'bat', 'fan',
'fry', 'cop', 'zen', 'gay', 'fat', 'cherokee', 'got', 'an', 'as',
'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr',
# french words
'bas', 'de', 'le', 'son', 'vo', 'vf', 'ne', 'ca', 'ce', 'et', 'que',
'mal', 'est', 'vol', 'or', 'mon', 'se',
# spanish words
'la', 'el', 'del', 'por', 'mar',
# other
'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii',
'vi', 'ben'
])
sep = r'[](){} \._-+'
if lang_filter:
lang_filter = lang_set(lang_filter)
slow = ' %s ' % string.lower()
confidence = 1.0 # for all of them
for lang in lng_all_names:
if lang in lng_common_words:
continue
pos = slow.find(lang)
if pos != -1:
end = pos + len(lang)
# make sure our word is always surrounded by separators
if slow[pos - 1] not in sep or slow[end] not in sep:
continue
language = Language(slow[pos:end])
if lang_filter and language not in lang_filter:
continue
# only allow those languages that have a 2-letter code, those who
# don't are too esoteric and probably false matches
if language.lang not in lng3_to_lng2:
continue
# confidence depends on lng2, lng3, english name, ...
if len(lang) == 2:
confidence = 0.8
elif len(lang) == 3:
confidence = 0.9
else:
# Note: we could either be really confident that we found a
# language or assume that full language names are too
# common words
confidence = 0.3 # going with the low-confidence route here
return language, (pos - 1, end - 1), confidence
return None, None, None
def guess_language(text):
"""Guess the language in which a body of text is written.
This uses the external guess-language python module, and will fail and return
Language(Undetermined) if it is not installed.
"""
try:
from guess_language import guessLanguage
return Language(guessLanguage(text))
except ImportError:
log.error('Cannot detect the language of the given text body, missing dependency: guess-language')
log.error('Please install it from PyPI, by doing eg: pip install guess-language')
return UNDETERMINED
| true
|
42f8b8f56618f8f70dd04ab2b4e822a904acf812
|
Python
|
ashwins-code/neural-network-library
|
/framework/tensor.py
|
UTF-8
| 5,661
| 3.0625
| 3
|
[] |
no_license
|
import numpy as np
class Tensor(object):
"""Tensor class"""
def __init__(self, value):
self.value = np.array(value)
if self.value.ndim < 2:
while self.value.ndim < 2:
self.value = np.expand_dims(self.value, axis=0)
self.parents = []
self.backward = None
self.visited_parents = set()
self.path_cache = {}
self.ones = np.ones(self.value.shape)
def has_path(self, tensor):
if self == tensor:
return True
queue = self.parents.copy()
while queue != []:
current_tensor = queue[0]
queue = queue[1:]
if current_tensor == tensor:
self.path_cache[tensor] = True
return True
queue = queue + current_tensor.parents
return False
def gradient(self, tensor, ignore_visited=True):
current_tensor = self
grad = Tensor(self.ones)
while current_tensor != tensor:
if current_tensor.parents[0].has_path(tensor) and ((current_tensor.parents[0] not in current_tensor.visited_parents and ignore_visited == False) or ignore_visited):
grad = current_tensor.backward(current_tensor.parents[0], current_tensor.parents, grad)
if ignore_visited:
current_tensor.visited_parents = set()
current_tensor.visited_parents.add(current_tensor.parents[0])
current_tensor = current_tensor.parents[0]
elif current_tensor.parents[1].has_path(tensor) and ((current_tensor.parents[1] not in current_tensor.visited_parents and ignore_visited == False) or ignore_visited):
grad = current_tensor.backward(current_tensor.parents[1], current_tensor.parents, grad)
if ignore_visited:
current_tensor.visited_parents = set()
current_tensor.visited_parents.add(current_tensor.parents[1])
current_tensor = current_tensor.parents[1]
else:
return Tensor([[0]])
grad = add(grad, self.gradient(tensor, ignore_visited=False))
for i,d in enumerate(tensor.value.shape):
if d == 1:
grad.value = np.sum(grad.value, axis=i, keepdims=True)
return grad
def __str__(self):
return f"Tensor(value={self.value})"
def __add__(self, other):
return add(self, other)
def __sub__(self, other):
return sub(self, other)
def __mul__(self, other):
return mul(self, other)
def __truediv__(self, other):
return div(self, other)
def __matmul__(self, other):
return matmul(self, other)
def __pow__(self, other):
return pow(self, other)
@staticmethod
def randn(*args):
value = np.random.randn(*args)
return Tensor(value)
def dispose(self):
del self
def add(t1, t2):
if type(t1) != Tensor:
t1 = Tensor(t1)
if type(t2) != Tensor:
t2 = Tensor(t2)
t = Tensor(t1.value + t2.value)
t.parents = [t1, t2]
def add_backward(v, parents, grad):
local_grad = 0
local_grad = int(v == parents[0]) + int(v == parents[1])
grad.value = grad.value * local_grad
return grad
t.backward = add_backward
return t
def sub(t1, t2):
if type(t1) != Tensor:
t1 = Tensor(t1)
if type(t2) != Tensor:
t2 = Tensor(t2)
t = Tensor(t1.value - t2.value)
t.parents = [t1, t2]
def sub_backward(v, parents, grad):
local_grad = 0
local_grad = int(v == parents[0]) + int(v == parents[1]) * -1
grad.value = grad.value * local_grad
return grad
t.backward = sub_backward
return t
def mul(t1, t2):
if type(t1) != Tensor:
t1 = Tensor(t1)
if type(t2) != Tensor:
t2 = Tensor(t2)
t = Tensor(t1.value * t2.value)
t.parents = [t1, t2]
def mul_backward(v, parents, grad):
local_grad = 0
if v == parents[0]:
local_grad += parents[1].value
if v == parents[1]:
local_grad += parents[0].value
grad.value = grad.value * local_grad
return grad
t.backward = mul_backward
return t
def div(t1, t2):
if type(t1) != Tensor:
t1 = Tensor(t1)
if type(t2) != Tensor:
t2 = Tensor(t2)
t = Tensor(t1.value + t2.value)
t.parents = [t1, t2]
def div_backward(v, parents, grad):
local_grad = 0
if v == parents[0]:
local_grad += 1 / parents[1].value
if v == parents[1]:
local_grad += -(parents[0] / (parents[1] ** 2))
grad.value = grad.value * local_grad
return grad
t.backward = div_backward
return t
def matmul(t1, t2):
t = Tensor(t1.value.dot(t2.value))
t.parents = [t1, t2]
def matmul_backward(v, parents, grad):
local_grad = 0
if v == parents[0]:
local_grad = np.dot(grad.value, parents[1].value.T)
elif v == parents[1]:
local_grad = np.dot(parents[0].value.T, grad.value)
grad.value = local_grad
return grad
t.backward = matmul_backward
return t
def pow(t1, t2):
t = Tensor(t1.value ** t2.value)
t.parents = [t1, t2]
def pow_backward(v, parents, grad):
local_grad = 0
if v == parents[0]:
local_grad = t2.value * (t1.value ** (np.subtract(t2.value, 1)))
grad.value = local_grad
return grad
t.backward = pow_backward
return t
| true
|
dfac8853b44f5073a714f67b0931627b13570974
|
Python
|
verzep/MLDS
|
/tools.py
|
UTF-8
| 1,653
| 3.421875
| 3
|
[] |
no_license
|
import numpy as np
def _MFNN_t(X, Y, n):
'''
Compute the Mutual False Nearest Neighbors for time intex n.
The data should be given as matrices where the first dimension is time s.t X[t] is a point in space.
:param X: A matrix with dimension (time_steps, X_space_dimension)
:param Y: A matrix with dimension (time_steps, Y_space_dimension)
:param n: The time index at which you want to compute the MFNN
:return: the MFNN(n)
'''
x_n = X[n]
y_n = Y[n]
# find the NN of the drive
n_NND = np.argpartition(np.linalg.norm(X - x_n, axis=1), 1)[1]
x_n_NND = X[n_NND]
y_n_NND = Y[n_NND]
# find the NN of the responde
n_NNR = np.argpartition(np.linalg.norm(Y - y_n, axis=1), 1)[1]
x_n_NNR = X[n_NNR]
y_n_NNR = Y[n_NNR]
R = (np.linalg.norm(y_n - y_n_NND) * np.linalg.norm(x_n - x_n_NNR)
/ (np.linalg.norm(x_n - x_n_NND) * np.linalg.norm(y_n - y_n_NNR))
)
return R
def MFNN(X, Y, transient_length=None):
'''
Compute the Mutual False Nearest Neighbors doing the temporal average.
:param X: A matrix with dimension ( X_space_dimension, time_steps)
:param Y: A matrix with dimension ( Y_space_dimension, time_steps)
:param transient_length: the number of initial point to discard. If None, the 10% will be discarded.
:return: MFNN
'''
data = []
stop = X.shape[1]
if transient_length is None:
start = stop // 10
else:
start = transient_length
for i in range(start, stop):
# Note that `_MFNN_t` uses the transpose data matrix!!!
data.append(_MFNN_t(X.T, Y.T, i))
return data
| true
|
bab4728695e18fb72cb3e9006dcad6dd210be7db
|
Python
|
maizijun/study
|
/leetcode/#461 hanming-dis.py
|
UTF-8
| 319
| 3.265625
| 3
|
[] |
no_license
|
class Solution:
def hammingDistance(self, x, y):
## &是按位且逻辑运算符
## |是按位或逻辑运算符
## ^是按位异或逻辑运算符
return bin(x^y)[2:].count('1')
# print(list(bin(11))[2:],list(bin(31))[2:])
a = Solution()
print(a.hammingDistance(11,14))
| true
|
14b51fc25473660a4799ee07e23f31594db29358
|
Python
|
Divisekara/Python-Codes-First-sem
|
/Project Euler Problems/03/project euler 3(This is my method).py
|
ISO-8859-3
| 5,661
| 4
| 4
|
[] |
no_license
|
# -*- coding: cp1252 -*-
"""
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143 ?
"""
x=600851475143
i=2
while x!=1:
while x%i==0:
x=x/i
print i,' and ' , x
i=i+1
print 'The biggest prime factor is' ,i-1
"""
The answer is 6857
"""
"""
from itertools import *
firstPrimeGen = lambda magicnum: ifilter(lambda x: magicnum%x==0, chain((i for i in [2]),count(3,2)))
def mainIter(magicnum):
while magicnum > 1:
n = firstPrimeGen(magicnum).next()
yield n
magicnum /= n
print max(list(mainIter(600851475143)))
"""
"""
sayi = 600851475143
asalBolen = 2
while sayi != 1:
if sayi%asalBolen == 0:
sayi /=asalBolen
else:
asalBolen+=1
continue
print("En byk asal blen =",asalBolen)
"""
"""
number = 600851475143
x = 0
prime = 0
check = 1
loop = True
while loop == True:
x = x + 1
if (number / x + 0.0) % 1 == 0.0:
print(x)
prime = x
check = check * x
if check == number:
loop = False
if x >= (number / 2):
loop = False
print("")
print("The biggest prime factor equals " + str(prime))
"""
"""
#find the divisors in the form of a list
divisors=[]
n = 600851475143
i=1
while i<=n:
if n%i==0:
n/=i
divisors.append(i)
i+=1
#find the list of prime divisors
pm=[]
for x in divisors:
list=[]
for i in range(2,int(sqrt(n))+1):
while x%i==0 and len(list)<2:
list.append(i)
if len(list)==0:
pm.append(x)
#Find the max prime
max(pm)
"""
"""
list1 = []
for n in range (2,600851475143):
if 600851475143%n == 0:
list1.append(n)
print(list1)
"""
"""
In Python3:
def problem3(num):
list_num = []
for i in range(2,math.floor(math.sqrt(num))+1):
while num % i == 0:
list_num.append(i)
num /= i
print(max(list_num))
problem3(600851475143)
"""
"""
umber = 600851475143
for prime in range(2,600851475143):
if (number %prime == 0):
number = number/prime
print(prime)
elif (number == 1.0):
break
"""
"""
from math import sqrt
def isPrimer(num):
k = 0
for i in range(2, num + 1):
if num % i == 0:
k += 1
if k == 1:
return 'Yes'
def largerPrimer(num):
listP = []
for i in range(int(sqrt(num)), 1, -1):
if num % i == 0 and isPrimer(i) == 'Yes':
listP += [i]
return listP[0]
print('The largest prime factor of the number 600851475143: \n' + str(largerPrimer(600851475143)))
"""
"""
import math
def MaxPrime(Prime):
isPrime=lambda x: all(x % i != 0 for i in range(int(x**0.5)+1)[2:])
for i in range(int(math.sqrt(Prime)),0,-1):
if(Prime%i==0):
if(isPrime(i)):
return i
"""
"""
class prime_generator():
'''
prime_generator(num) -> generator
Returns a generator that generates prime numbers up to num.
'''
def __init__(self, num):
self.current_primes = set([])
self.maximum = num
self.working_num = 2
def __iter__(self):
return self
def next(self):
self.working_num += 1
while not is_prime(self.working_num):
if self.working_num in self.current_primes:
return self.working_num
self.working_num += 1
self.current_primes.add(self.working_num)
return self.working_num
def is_prime(num):
'''
is_prime(int) -> bool
Finds if a number is a prime number.
'''
x = 3
while x < num:
if num % x == 0:
return False
x += 2
return True
def largest_prime(num):
'''
largest_prime(int) -> int
Returns the largest prime that a number is divisible by.
'''
x = num
largest = 1
while True:
primes = prime_generator(x)
for prime in primes:
if x == prime:
return x
if not x % prime:
x = x / prime
largest = prime
break
if prime > x:
break
return largest
print largest_prime(600851475143)
"""
"""
My inefficient solution(8 minutes). If anyone know a way of optimize this solution, could help me? Thanks in advance.
import math
from time import *
#Encontrar los numeros primos hasta un valor dado
def numPrimos(tope):
primos = [2]
for num in range(3, tope+1, 2):
aux = (num-1)/2
if not any(num%x == 0 for x in primos):
primos.append(num)
return primos
def factPrimos(num):
raizCuad = int(round(math.sqrt(num)))
primosHastaNum = numPrimos(raizCuad)
for primo in primosHastaNum[::-1]:
if num%primo == 0:
return primo
tiempoIni = time()
print (factPrimos(600851475143))
tiempoFin = time()
print (tiempoFin-tiempoIni) #8 minutes!!!!
"""
"""
def isPrime(n):
for i in range(2,int(n**0.5)+1):
if n%i==0:
return False
return True
n = 1
while n <= 600851475143:
if 600851475143 % n == 0:
if isPrime(n) == True:
print n
n += 2
"""
"""
def factor(n):
from math import sqrt
Factor=list()
while n%2==0:
Factor.append(2)
n=n//2
factor=3
maxfactor=sqrt(n)
while n>1 and factor<=maxfactor:
while n%factor==0:
Factor.append(factor)
n=n//factor
maxfactor=sqrt(n)
factor+=2
else:
if n!=1:
Factor.append(n)
return Factor
print(factor(600851475143)[-1])
"""
"""
number = 600851475143
def is_prime_number(x):
for j in range(2,i):
isPrime = True
if i%j == 0:
isPrime = False
break
return isPrime
for i in xrange(2,number):
if number%i == 0 and is_prime_number(i) == True:
print i
"""
| true
|
6ae18b6337ef835d0e12be8aed60c28ca20ce9f6
|
Python
|
eryilmazysf/assignments-
|
/odev.py
|
UTF-8
| 223
| 2.71875
| 3
|
[] |
no_license
|
x="ProgramlamaÖdeviİleriSeviyeVeriYapılarıveObjeleripynb"
f=dict()
for karakter in x:
if (karakter in f):
f[karakter]+=1
else:
f[karakter]=1
for i,j in f.items():
print (i,":",j )
| true
|
3df815dd1ce62f4dec3d91101fcce3f5925bb3d3
|
Python
|
OmarMWarraich/Assignments
|
/05-Days_Between_Date.py
|
UTF-8
| 416
| 4.0625
| 4
|
[] |
no_license
|
# Ai Assignment 05 - calculate number of days between two dates
import datetime
date1 = input("Enter First Date [DD/MM/YYYY] : ")
date2 = input("Enter Second Date [DD/MM/YYYY] : ")
d1, m1, y1 = map(int, date1.split('/'))
d2, m2, y2 = map(int, date2.split('/'))
date1 = datetime.date(y1, m1, d1)
date2 = datetime.date(y2, m2, d2)
period = date2 - date1
print("Number of Days................ : ",period.days)
| true
|
453f3ce9dfa7aa5cdb50893082c007684ad768ec
|
Python
|
TPiazza21/Thesis
|
/linreg.py
|
UTF-8
| 687
| 3.078125
| 3
|
[] |
no_license
|
# for vanilla linear regression. This is the nonprivate version
import numpy as np
def linreg(X,y,epsilon,delta):
[n,d] = X.shape
XTy = X.T.dot(y)
# identity matrix added for numerical stability --> WHICH CHANGES IT TO RIDGE WITH LAMBDA=1
XTX = (X.T).dot(X) + np.eye(d)
theta_hat = np.linalg.inv(XTX).dot(XTy)
return theta_hat
def ridgereg(X,y, epsilon, delta, lamb):
[n,d] = X.shape
XTy = X.T.dot(y)
# maybe be careful about how you sometimes have lamb = 1 for the linear regression part, so maybe add 1 to lambda, if being consistent
XTX = (X.T).dot(X) + lamb * np.eye(d)
theta_hat = np.linalg.inv(XTX).dot(XTy)
return theta_hat
| true
|
6845144bc64a215fb56d00d1634f521636ba3a90
|
Python
|
macabeus/IA
|
/MINIMAX/TicTacToe.py
|
UTF-8
| 6,562
| 3.390625
| 3
|
[] |
no_license
|
import numpy as np
import copy
class AIplayer:
def __init__(self, board, my_mark, opponent_mark):
self.board = board
class MinMaxPlay(AIplayer):
def __init__(self, board, my_mark, opponent_mark):
super(MinMaxPlay, self).__init__(board, my_mark, opponent_mark)
self.my_mark = my_mark
self.opponent_mark = opponent_mark
def searchEmptyPlaces(self, board):
empty_places = []
for row in range(3):
for col in range(3):
if board.checkEmpty(row, col):
empty_places.append((row, col))
return empty_places
def minmax(self, board, my_turn, depth):
scores = {}
empty_places = self.searchEmptyPlaces(board)
for place in empty_places:
row, col = place[0], place[1]
virt_board = copy.deepcopy(board)
if my_turn:
virt_board.setMark(row, col, self.my_mark)
else:
virt_board.setMark(row, col, self.opponent_mark)
score = self.score(virt_board, my_turn, depth)
if score is not None:
scores[(row, col)] = score
else:
score = self.minmax(virt_board, not my_turn, depth + 1)[0]
scores[(row, col)] = score
# print "scores:", scores # debug print
if my_turn:
return (max(scores.values()), max(scores, key=(lambda x: scores[x])))
else:
return (min(scores.values()), min(scores, key=(lambda x: scores[x])))
def getInput(self):
board = copy.deepcopy(self.board) # virtual board
ret = self.minmax(board, True, 0)
print()
return ret[1]
def score(self, board, my_turn, depth):
goal_flg = board.checkGoal()
if goal_flg == 1:
if my_turn:
return 10 - depth
else: # opponent wins
return depth - 10
elif goal_flg == -1: # draw
return 0 # un-finished case
else:
return None
class Board:
def __init__(self, initial_turn=True):
self.spaces = np.array([[" ", " ", " "], [" ", " ", " "], [" ", " ", " "]])
self.cnt_mark = 0
def reset(self):
self.spaces = np.array([[" ", " ", " "], [" ", " ", " "], [" ", " ", " "]])
self.cnt_mark = 0
def checkEmpty(self, row, col):
if self.spaces[row][col] == " ":
return True
else:
return False
def checkInput(self, row, col):
if (not row in [0, 1, 2]) or (not col in [0, 1, 2]):
return (False, "incorrect input, over/under range.")
elif not self.checkEmpty(row, col):
return (False, "incorrect input, it have been placed.")
else:
return (True, "")
def setMark(self, row, col, mark):
self.spaces[row][col] = mark
self.cnt_mark += 1
# 0: not finish, 1:finish, a user win -1: draw
def checkGoal(self):
if self.checkGoalOnRow() or self.checkGoalOnColumn() or \
self.checkGoalOnDiagonal():
return 1
elif self.cnt_mark == 9:
return -1
else:
return 0
def checkGoalOnRow(self):
for row in range(3):
if not self.spaces[row][0] == " " and \
self.spaces[row][0] == self.spaces[row][1] == self.spaces[row][2]:
return True
return False
def checkGoalOnColumn(self):
for col in range(3):
if not self.spaces[0][col] == " " and \
self.spaces[0][col] == self.spaces[1][col] == self.spaces[2][col]:
return True
return False
def checkGoalOnDiagonal(self):
if not self.spaces[1][1] == " ":
if self.spaces[0][0] == self.spaces[1][1] == self.spaces[2][2]:
return True
if self.spaces[0][2] == self.spaces[1][1] == self.spaces[2][0]:
return True
return False
# CUI
def dump(self):
print("-------------")
for row in range(3):
print('|', end='')
for col in range(3):
print(self.spaces[row][col], end='|',)
print("\n-------------")
class TicTacToe:
def __init__(self, initial_turn=True):
self.board = Board()
self.board.reset()
self.initial_turn = initial_turn
self.player_mark, self.ai_mark = None, None
self.ai = None
def setAI(self, mode, my_mark, opponent_mark):
if mode == 'minmax':
self.ai = MinMaxPlay(self.board, my_mark, opponent_mark)
else:
print("Unknown ai mode is input.")
exit()
self.ai_mark = my_mark
def setPlayerMark(self, mark):
self.player_mark = mark
def resetBoard(self):
self.board.reset()
def getInputFromStdin(self):
print("It's your turn, please input the next position. ie 0 0.")
while True:
user_input = input().split()
if len(user_input) != 2:
print("incorrect input, incorrect input size.")
continue
elif not user_input[0].isdigit() or not user_input[1].isdigit():
print("incorrect input, not integer input.")
continue
row, col = map(int, user_input)
ret = self.board.checkInput(row, col)
if ret[0]:
return row, col
else:
print(ret[1])
continue
def play(self):
player_turn = self.initial_turn # if true: player, false: ai
while True:
if player_turn:
row, col = self.getInputFromStdin()
self.board.setMark(row, col, self.player_mark)
else: # ai turn
row, col = self.ai.getInput()
print(row, col, "is input.")
self.board.setMark(row, col, self.ai_mark)
self.board.dump()
flg = self.board.checkGoal()
if flg == 1: # one of the player win
if player_turn:
print("You win")
else:
print("You loose")
break
elif flg == -1:
print("Draw")
break
player_turn = not player_turn
if __name__ == '__main__':
game = TicTacToe(True)
game.setPlayerMark('o')
game.setAI("minmax", 'x', 'o')
game.play()
| true
|
3e5b9ff16ca688047db0973d5022bf5f56b3c9bb
|
Python
|
Guiller1999/CursoPython
|
/BBDD/Prueba.py
|
UTF-8
| 1,813
| 3.578125
| 4
|
[] |
no_license
|
import sqlite3
def create_connection():
try:
connection = sqlite3.connect("Test.db")
return connection
except Exception as e:
print(e.__str__())
def create_table(connection, cursor):
cursor = connection.cursor()
cursor.execute(
"CREATE TABLE IF NOT EXISTS USUARIOS" +
"(Nombre VARCHAR(20), Apellido VARCHAR(20), Edad INTEGER)"
)
connection.commit()
#cursor.close()
def insert_data(connection, cursor, name, last_name, years):
data_user = [name, last_name, years]
cursor = connection.cursor()
cursor.execute(
"INSERT INTO USUARIOS VALUES(?, ?, ?)", data_user
)
connection.commit()
#cursor.close()
def get_data(connection, cursor):
cursor = connection.cursor()
rows = cursor.execute("SELECT * FROM USUARIOS")
connection.commit()
#cursor.close()
return rows.fetchall()
connection = create_connection()
cursor = connection.cursor()
create_table(connection, cursor)
answer = "s"
while(answer != "n" and answer != "N"):
name = input(">> Ingrese nombre: ")
last_name = input(">> Ingrese apellido: ")
years = input(">> Ingrese edad: ")
insert_data(connection, cursor, name, last_name, years)
print("------------------------------------------------\n")
answer = input(">> Ingresar otro usuario. Presione S(si) / N(no)....")
print("\n------------------------------------------------\n")
rows = get_data(connection, cursor)
print("\n------------------------------------------------\n")
print("\t\t MOSTRANDO DATOS \n")
for row in rows:
print(f" >> Nombre: {row[0]}")
print(f" >> Apellido: {row[1]}")
print(f" >> Edad: {row[2]}")
print("_______________________________________________________\n")
cursor.close()
connection.close()
| true
|
56531a6da673657546d71bd35c0c91c9a7cc6a39
|
Python
|
deadiladefiatri/Deadila-Defiatri_I0320023_Aditya-Mahendra_Tugas5
|
/I0320023_Deadila Defiatri_Soal2.py
|
UTF-8
| 664
| 3.6875
| 4
|
[] |
no_license
|
#Grading Nilai
Nama = str(input('Nama Lengkap : '))
Nilai = int(input('Nilai Anda skala 1-100: '))
info = 'Halo ' + Nama + '!' + ' Nilai anda setelah dikonversi adalah '
#memeriksa nilai
if Nilai <= 100 and Nilai >= 85:
print(info + 'A')
elif Nilai <= 84 and Nilai >= 80:
print(info + 'A-')
elif Nilai <=79 and Nilai >= 75:
print(info + 'B+')
elif Nilai <= 74 and Nilai >= 70:
print(info + 'B')
elif Nilai <=69 and Nilai >= 65:
print(info + 'C+')
elif Nilai <= 64 and Nilai >= 60:
print(info + 'C')
elif Nilai <=59 and Nilai >= 0:
print(info + 'E')
else:
print('Halo' + '' + Nama + '!' + 'Nilai kamu tidak dapat dikonversi')
| true
|
031478efa49a6b7e2311da9f24d23bcbb3f0bdf7
|
Python
|
reon/SmartCardDecoder
|
/t1ApduDecoder.py
|
UTF-8
| 5,278
| 2.625
| 3
|
[] |
no_license
|
##########################################################
# t1ApduDecoder.py
# Author : Bondhan Novandy
# Date : 15-16 May 2011
#
# License : Creative Commons Attribution-ShareAlike 3.0 Unported License.
# http://creativecommons.org/licenses/by-sa/3.0/
# Publish : http://bondhan.web.id (For education purpose only)
# Version : v0.1
#
# Fixes : 16 May 2011, Initial release
#
#
##########################################################
import sys
import re
# Constants
T1_ERR_MSG = {}
T1_ERR_MSG[0x500] = 'T1 stream bytes are invalid, the length is not even!'
T1_ERR_MSG[0x501] = 'T1 stream bytes are invalid. Unknown chars found!'
T1_ERR_MSG[0x502] = 'Incomplete T1 stream bytes'
T1_ERR_MSG[0x503] = 'T1 stream bytes are more than 32 bytes'
T1_ERR_MSG[0x504] = 'Unknown bytes found after EDC'
def pause():
raw_input('press Enter to continue..')
# This function will check if the stream bytes are belong
# to I, R or S block and describe the entities for each component
def decodeT1(streamBytes):
# if contains besides alphabet or numbers
pattern = '[^a-fA-F0-9]'
strBytes = ''.join(streamBytes)
print '_________________T=1__APDU_________________'
print 'T1 = '+strBytes
print
lRet = 0
if (len(strBytes) % 2) != 0:
return T1_ERR_MSG[0x500]
elif len(re.findall(pattern, strBytes)) != 0:
return T1_ERR_MSG[0x501]
# Prologue
# Consists of NAD | PCB | LEN
# NAD
byte = 0
if byte+2 > len(strBytes):
return T1_ERR_MSG[0x502]
NAD_MSB = (strBytes[byte:(byte+1)]).upper()
NAD_LSB = (strBytes[(byte+1):(byte+2)]).upper()
NAD = NAD_MSB+NAD_LSB
print 'NAD = '+ NAD
if NAD == '00':
print ' -> NAD is ignored'
elif NAD == 'FF':
print ' -> NAD is invalid'
NAD_MSB_INT = int(NAD_MSB, 16)
NAD_LSB_INT = int(NAD_LSB, 16)
print ' -> SAD (Source Address) '+ str(NAD_LSB_INT & 0b0111)
print ' -> DAD (Destination Address) '+ str(NAD_MSB_INT & 0b0111)
# PCB
byte += 2
if byte+2 > len(strBytes):
return T1_ERR_MSG[0x502]
PCB_MSB = (strBytes[byte:(byte+1)]).upper()
PCB_LSB = (strBytes[(byte+1):(byte+2)]).upper()
PCB = PCB_MSB+PCB_LSB
print 'PCB = '+ PCB
PCB_INT = int(PCB, 16)
PCB_MSB_INT = int(PCB_MSB,16)
PCB_LSB_INT = int(PCB_LSB,16)
if (PCB_MSB_INT & 0b1000) == 0:
print ' -> I Block Identifier'
if (PCB_MSB_INT & 0b0100) != 0:
print ' -> N(S) = 1'
else:
print ' -> N(S) = 0'
if (PCB_MSB_INT & 0b0010) != 0:
print ' -> M = 1 (Chaining on progress)'
else:
print ' -> M = 0'
elif (PCB_MSB_INT & 0b1100) == 0b1000:
print ' -> R Block Identifier'
if (PCB_MSB_INT & 0b0001) == 0b0001:
print ' -> N(R) = 1'
else:
print ' -> N(R) = 0'
if PCB_LSB_INT == 0x00:
print ' -> No Error'
elif PCB_LSB_INT == 0x01:
print ' -> EDC or parity error'
elif PCB_LSB_INT == 0x02:
print ' -> Other Error'
elif (PCB_MSB_INT & 0b1100) == 0b1100:
print ' -> S Block Identifier'
if (PCB_INT & 0xFF) == 0b11000000:
print ' -> Resync Request (only from terminal)'
elif (PCB_INT & 0xFF) == 0b11100000:
print ' -> Resync Response (only from smart card)'
elif (PCB_INT & 0xFF) == 0b11000001:
print ' -> Request change to information field size'
elif (PCB_INT & 0xFF) == 0b11100001:
print ' -> Response to Request change to information field size'
elif (PCB_INT & 0xFF) == 0b11000010:
print ' -> Request Abort'
elif (PCB_INT & 0xFF) == 0b11100010:
print ' -> Response to Abort Request'
elif (PCB_INT & 0xFF) == 0b11000011:
print ' -> Request waiting time extension (only from smart card)'
elif (PCB_INT & 0xFF) == 0b11100011:
print ' -> Response to waiting time extension (only from terminal)'
elif (PCB_INT & 0xFF) == 0b11100100:
print ' -> Vpp Error Response (only from smart card)'
# LEN
byte += 2
if byte+2 > len(strBytes):
return T1_ERR_MSG[0x502]
LEN_MSB = (strBytes[byte:(byte+1)]).upper()
LEN_LSB = (strBytes[(byte+1):(byte+2)]).upper()
LEN = LEN_MSB+LEN_LSB
print 'LEN = '+ LEN
intLen = int(''.join(LEN), 16)
print ' -> Inf Length = (Dec) ' + str(intLen)
# INF
byte += 2
if byte+intLen > len(strBytes):
return T1_ERR_MSG[0x502]
if intLen > 0:
print 'INF = '+strBytes[byte:(byte+(intLen*2))]
else:
print 'INF = No INF'
# EDC
byte += (intLen*2)
if byte+2 > len(strBytes):
return T1_ERR_MSG[0x502]
EDC_MSB = (strBytes[byte:(byte+1)]).upper()
EDC_LSB = (strBytes[(byte+1):(byte+2)]).upper()
EDC = EDC_MSB+EDC_LSB
print 'EDC = '+ EDC
xored = 0
for i in range (2, len(strBytes)-2, 2):
var = (strBytes[i]+strBytes[i+1])
xored ^= int(var, 16)
#print var + '->' + str(xored)
print ' -> Calculated EDC: (Dec) '+str(xored)+' or '+str(hex(xored))
if xored == int(EDC,16):
print ' -> EDC Sequence is valid!'
else:
print ' -> EDC Sequence is invalid!'
byte += 2
if byte < len(strBytes):
print 'UNKNOWN = '+ strBytes[byte:len(strBytes)]
return T1_ERR_MSG[0x504]
return lRet
## Main function
if __name__ == '__main__':
streamBytes = ''
if len(sys.argv) < 2:
streamBytes = raw_input('Enter the T1 stream bytes> ')
streamBytes = streamBytes.split()
else:
streamBytes = sys.argv[1:]
# Let's decode it
lRet = decodeT1(streamBytes)
if ( lRet != 0 ):
print
print 'Error Message: '
print lRet
| true
|
4f9510c7c5d00bd358bf0080f0675ba0f88658fc
|
Python
|
lazaropd/ai-residency
|
/Módulo 2 - Data Analysis/Curso 5 - Classificação/analise_residuos.py
|
UTF-8
| 2,642
| 3.140625
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
def calc_rss(residuo):
return float(((residuo) ** 2).sum())
def calc_r2(y, y_hat):
return r2_score(y_hat, y)
def analise_residuos(y, y_hat, graph=False):
"""sendo conhecidos y (pandas column) e y_hat (numpy array Nx1)"""
size = len(y_hat)
if isinstance(y, pd.DataFrame): y = np.array(y.values.ravel())
y_hat = np.array(y_hat)
res = y - y_hat
obs = np.arange(1, size+1)
# mostra resumo estatístico do desempenho do modelo
print('***********************************************')
print('Número de observações: ', size)
print('RSS: %.2f'%calc_rss(res))
print('R2: %.2f'%calc_r2(y, y_hat))
print('***********************************************\n')
if graph:
# configuração do gráfico para análise de resíduos
fig, ax = plt.subplots(2, 2, figsize=(16,8))
fig.suptitle('Análise de Resíduos', fontsize=20)
plt.subplots_adjust(hspace=0.3, wspace=0.3)
# análise do resíduo pela ordem das observações, avaliar se o resíduo tem comportamento estável
ax[0][0].scatter(obs, res, marker='o', c= 'r', alpha=0.8, edgecolors='none')
ax[0][0].plot(obs, res, c= 'k', lw=0.5, alpha=0.8)
ax[0][0].plot([0, size], [0, 0], c='k')
ax[0][0].set_title('Resíduos', size=16)
# teste de normalidade do resíduo, um modelo bem ajustado deve ter um resíduo totalmente estocástico e normal
a, result = stats.probplot(res, plot=ax[0][1], dist='norm')
# teste estatístico se o resíduo segue uma distribuição normal
_, p = stats.normaltest(res)
ax[0][1].text(-2, 0.8*res.max(), 'r=%.2f\np-value=%.4f'%(result[2], p))
ax[0][1].set_title('Normalidade (pp-plot)', size=16)
# verificação se a premissa de homoscedicidade está sendo atendida (variância do resíduo constante para todo o domínio)
ax[1][0].scatter(y_hat, res, marker='o', c= 'r', alpha=0.8, edgecolors='none')
ax[1][0].plot([0, y_hat.max()], [0, 0], c='k')
ax[1][0].set_title('Resíduos vs Ajustado', size=16)
# distribuição dos resíduos, aferição visual, verificar se os resíduos formam uma normal (gaussiana)
ax[1][1].hist(res, density=True, facecolor='b', alpha=0.5, edgecolor='gray')
rv = stats.norm(res.mean(), res.std())
x = np.linspace(res.min(), res.max(), 100)
h = plt.plot(x, rv.pdf(x), c='b', lw=2)
ax[1][1].set_title('Histograma', size=16)
plt.show()
| true
|
081b1cd807565906c2400487a95edec7a15225dc
|
Python
|
YoungBear/LearningNotesYsx
|
/python/code/collections_learn.py
|
UTF-8
| 844
| 3.578125
| 4
|
[] |
no_license
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
from collections import namedtuple
# namedtuple
Point = namedtuple('Poine', ['x', 'y'])
p = Point(1, 2)
print(p.x)
print(p.y)
print(isinstance(p, Point))
print(isinstance(p, tuple))
# deque 双向列表
from collections import deque
q = deque(['a', 'b', 'c'])
q.append('x')
q.appendleft('y')
print(q)
# defaultdict
from collections import defaultdict
dd = defaultdict(lambda: 'N/A')
dd['key1'] = 'abc'
print(dd['key1'])
print(dd['key2'])
# OrderedDict
from collections import OrderedDict
d = dict([('a', 1), ('b', 2), ('c', 3)])
print(d)
od = OrderedDict([('a', 1), ('b', 2), ('c', 3)])
print(od)
od = OrderedDict()
od['z'] = 1
od['y'] = 2
od['x'] = 3
print(list(od.keys()))
# Counter
from collections import Counter
c = Counter()
for ch in 'programming':
c[ch] = c[ch] + 1
print(c)
| true
|
d76a972aa48d071093e9efc1b8f35951b98d3864
|
Python
|
vipinvkmenon/canddatastructures_python
|
/chapter11/example3.py
|
UTF-8
| 167
| 3.671875
| 4
|
[] |
no_license
|
#Chapter 11.3
#Register Variables
def main():
i = 0
for i in range(2):
print("Value of i is " + str(i))
main() # Main function entry
| true
|
b0bae682bdda8b4bbbdf052d0f0542eaaf2da794
|
Python
|
apatel16/Deep_Learning_Projects
|
/Neural_Style_Transfer.py
|
UTF-8
| 6,267
| 2.65625
| 3
|
[] |
no_license
|
import numpy as np
from keras.preprocessing.image import load_img, img_to_array
from keras.applications import vgg19
from keras.preprocessing import image
#from google.colab import files
#uploaded = files.upload()
target_image_path = 'portrait.jpg'
style_reference_image_path = 'transfer_style_reference.jpg'
width, height = load_img(target_image_path).size
print(width, height)
img_height = 400
img_width = int(width * img_height / height)
print(img_height, img_width)
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_height, img_width))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg19.preprocess_input(img)
return img
def deprocess_image(x):
# zero-centering by removing the mean pixel value from ImageNet
# this reverses a transformation done by vgg19.preprocess_input
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.680
# Convergs image from 'BGR' to 'RGB', this is also part of vgg19.preprocess_input reversal process
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
from keras import backend as K
target_image = K.constant(preprocess_image(target_image_path))
style_reference_image = K.constant(preprocess_image(style_reference_image_path))
# placeholder that will contain the generated image
combination_image = K.placeholder((1, img_height, img_width, 3))
# combines the three images in a single batch
input_tensor = K.concatenate([target_image,
style_reference_image,
combination_image], axis=0)
# builds the VGG19 network with the batch of three images as input.
# the model will be loaded with pretrained imagenet weights
model = vgg19.VGG19(input_tensor=input_tensor,
weights='imagenet',
include_top=False)
print('Model loaded.')
def content_loss(base, combination):
return K.sum(K.square(combination - base))
def gram_matrix(x):
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features, K.transpose(features))
return gram
def style_loss(style, combination):
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_height * img_width
return K.sum(K.square(S - C)) / (4.0 * (channels ** 2) * (size ** 2))
def total_variation_loss(x):
a = K.square(
x[:, :img_height - 1, :img_width - 1, :] -
x[:, 1:, :img_width - 1, :])
b = K.square(
x[:, :img_height - 1, :img_width - 1, :] -
x[:, :img_height - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
# dictionary that maps layer names to activation tensors
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
# layer used for content losst
content_layer = 'block5_conv2'
# layers used for style loss
style_layers = ['block1_conv1',
'block2_conv1',
'block3_conv1',
'block4_conv1',
'block5_conv1']
# weights in the weighted average
total_variation_weight = 1e-4
style_weight = 1.0
content_weight = 0.025
# define loss by adding all components to this scalar variable
loss = K.variable(0.0)
layer_features = outputs_dict[content_layer]
target_image_features = layer_features[0, :, :, :]
combination_features = layer_features[2, :, :, :]
loss = loss + (content_weight * content_loss(target_image_features,
combination_features))
# adds a style loss component for each target layer
for layer_name in style_layers:
layer_features = outputs_dict[layer_name]
style_reference_features = layer_features[1, :, :, :]
combination_features = layer_features[2, :, :, :]
sl = style_loss(style_reference_features, combination_features)
loss += (style_weight / len(style_layers)) * sl
# adds the total variation loss
loss = loss + (total_variation_weight * total_variation_loss(combination_image))
# gets the gradients of the generated image with regard
# to the loss
grads = K.gradients(loss, combination_image)[0]
# function to fetch the values of the current loss and the
# current gadients
fetch_loss_and_grads = K.function([combination_image], [loss, grads])
# this class wraps fetch_loss_and_grads
# in a way that lets you retrieve the losses and
# gradients via two separate mthod calls, which is
# required by the SciPy optimizer you'll use
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grad_values = None
def loss(self, x):
assert self.loss_value is None
x = x.reshape((1, img_height, img_width, 3))
outs = fetch_loss_and_grads([x])
loss_value = outs[0]
grad_values = outs[1].flatten().astype('float64')
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
from scipy.optimize import fmin_l_bfgs_b
from scipy.misc import imsave
import time
result_prefix = 'my_result'
iterations = 25
# this is the initial state: the target image
x = preprocess_image(target_image_path)
# you flatten image because L-BFGS can only process flat vectors
x = x.flatten()
# run L-BFGS optimization ove rthe pixels of the
# generated image to minimize the neural style
# loss. NOTE: we pass the function to evaluate
# the loss and gradients as two separate args
for i in range(iterations):
print('Start of iteration', i)
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
x,
fprime=evaluator.grads,
maxfun=20)
print('Current loss value:', min_val)
img = x.copy().reshape((img_height, img_width, 3))
img = deprocess_image(img)
fname = result_prefix + '_at_iteration_%d.png' % i
imsave(fname, img)
print('Image saved as', fname)
end_time = time.time()
print('Iteration %d completed in %ds' % (i, end_time - start_time))
| true
|
24e040f1ff8a9db830ed8f00f2615fba21dae4e3
|
Python
|
Stark101001/Snake-Game
|
/game_py.py
|
UTF-8
| 4,319
| 3.421875
| 3
|
[] |
no_license
|
import pygame
import random
pygame.init()
# =======Color Codes======
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
# ========Title==========
title = "Snake Game"
pygame.display.set_caption(title)
#============Adding Game Icon=========
gameIcon = pygame.image.load('snake.png')
pygame.display.set_icon(gameIcon)
# =====Creating Display=============
scr_width = 800
scr_height = 600
screen = pygame.display.set_mode((scr_width, scr_height))
# ========Image Load===========
bg_img = pygame.image.load('bg.jpg').convert()
# =======Clock Function =========
FPS = 10
clk = pygame.time.Clock()
# ========Font Import======
font = pygame.font.SysFont("Times new Roman", 34)
def Snake(React, snakelist):
for XnY in snakelist:
pygame.draw.rect(screen, red, [XnY[0], XnY[1], React, React])
def End_text(msg, color):
scr_text = font.render(msg, True, color)
screen.blit(scr_text, [scr_width - 790, scr_height - 400])
def Loop():
# ==========Snake Position==========
start_x = int(scr_width / 2)
start_y = int(scr_height / 2)
# ======= Updation into Movement ====
React = 20
update_x = 0
update_y = 0
# ======= Snake List===========
snakeList = []
snakeLength = 1
# =====Creating Food ============
rFoodX = round(random.randrange(0, scr_width - React) / 20) * 20
rFoodY = round(random.randrange(0, scr_height - React) / 20) * 20
# =====Event Handling===========
Game_quit = False
Game_over = False
while not Game_quit:
while Game_over == True:
screen.fill(black)
End_text("Game Over Press\n 'Space' To Retry Press\n 'Esc' To Quit", white)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT: # Call Quit Event
Game_quit = True # Quit game
Game_over = False # Game End
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
Game_quit = True
Game_over = False
if event.key == pygame.K_SPACE:
Loop()
for event in pygame.event.get(): # import All Event By Get method
if event.type == pygame.QUIT: # Call Quit Event
Game_quit = True # Quit game
# Event Handling =======================
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
update_x = -React
update_y = 0
if event.key == pygame.K_RIGHT:
update_x = +React
update_y = 0
if event.key == pygame.K_UP:
update_y = -React
update_x = 0
if event.key == pygame.K_DOWN:
update_y = +React
update_x = 0
# If Snake Touch Boundary Then game is over Logic ==========
if start_x >= scr_width or start_x < 0 or start_y >= scr_height or start_y < 0:
Game_over = True
start_y += update_y
start_x += update_x
screen.blit(bg_img, [0, 0]) # image load method
pygame.draw.rect(screen, blue, [rFoodX, rFoodY, React, React]) # draw rectangle as Snake Food
pygame.draw.rect(screen, black, [0, 0, scr_width, scr_height], 10) # draw rectangle as boundary
snakHead = [start_x, start_y]
snakeList.append(snakHead)
if len(snakeList) > snakeLength:
del (snakeList[0])
for eachsegment in snakeList[:-1]:
if eachsegment == snakHead:
# Game_over = True
pass
Snake(React, snakeList) # Calling Snake Function
pygame.display.update()
if start_x == rFoodX and start_y == rFoodY: # Conditiion Apply Snake Hover to Food
rFoodX = round(random.randrange(0, scr_width - React) / 20) * 20 # Snake food Position Correct
rFoodY = round(random.randrange(0, scr_height - React) / 20) * 20 # Snake food Position Correct
snakeLength += 1 # Increase Snake Length
clk.tick(FPS) # Snake movement Frame Per Seconds (FPS)
Loop()
| true
|
721ae487dc8476085bde8a3bd5c8fe5bfc170cfd
|
Python
|
vbloise3/cleanJson
|
/KinesisClient.py
|
UTF-8
| 1,612
| 2.671875
| 3
|
[] |
no_license
|
import boto3
import json
import time
# define your stream name
kpl_stream = 'kinesis-kpl-demo2'
# create the Kinesis service reference for your region
kinesis_client = boto3.client('kinesis', region_name='us-west-2')
# get the description of your Kinesis Data Stream
response = kinesis_client.describe_stream(StreamName=kpl_stream)
# use the Kinesis Data Stream description to get the shard ID
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
# create your shard iterator
shard_iterator = kinesis_client.get_shard_iterator(StreamName=kpl_stream,
ShardId=shard_id,
ShardIteratorType='LATEST')
shard_iterator = shard_iterator['ShardIterator']
# retrieve your first Kinesis Data Streams record
record_response = kinesis_client.get_records(ShardIterator=shard_iterator,
Limit=2)
# loop until you have recieved all of the Kinesis Data Streams records
while 'NextShardIterator' in record_response:
record_response = kinesis_client.get_records(ShardIterator=record_response['NextShardIterator'],
Limit=2)
# add your deaggregation logic here
# where you will deaggregate the user records from each Kinesis Data Streams record
# you will then perform actions on your user records, such as storing them on S3, or copying them to a Redshift table
print (record_response, "\n")
# wait for 1 second before retrieving the next Kinesis Data Streams record
time.sleep(1)
| true
|
5ef286e16d65714f66510eacc7bed4253c55381e
|
Python
|
pmackenz/PyPCFD
|
/src/Main.py
|
UTF-8
| 3,068
| 2.515625
| 3
|
[] |
no_license
|
'''
Created on Nov 21, 2015
@author: pmackenz
'''
from Domain import *
import subprocess
import ButcherTableau as integrator
from math import floor
from Mappings import *
def Main():
# defne the Reynolds number
Re = 1000
Re = 1
# set sliding velocity
velocity = 1.0
# mass density of the fluid
density = 1000.
# set side-length of the analysis domain
edgeDomain = 1.
# set the number of cells per edge
numCellsPerEdge = 16
numCellsPerEdge = 8
numCellsPerEdge = 4
#numCellsPerEdge = 2
# viscosity of the fluid
viscosity = density * velocity * edgeDomain / Re
# create an analysis domain
domain = Domain(edgeDomain, edgeDomain, numCellsPerEdge, numCellsPerEdge, mappingFunction=IdentityMap())
#domain = Domain(edgeDomain, edgeDomain, numCellsPerEdge, numCellsPerEdge, mappingFunction=FineEdgeMap())
domain.createParticles(2,2)
# configure the analysis type
doInit = False
solveVstar = True
solveP = True
solveVtilde = True
solveVenhanced = False
updatePosition = True
updateStress = False
addTransient = True
domain.setAnalysis(doInit,
solveVstar,
solveP,
solveVtilde,
solveVenhanced,
updatePosition,
updateStress,
addTransient)
domain.setParameters(Re, density, velocity)
domain.setInitialState()
CFL = 1.0
dt = domain.getTimeStep(CFL)
print(u"CFL=1 equals to \u0394t={:f}".format(dt))
print(domain)
# define load history and print interval
dt1 = 0.01000
target1 = 0.1
dt2 = 0.1
target2 = 1.0
# ************* don't mess with stuff below *************
domain.particleTrace(True)
# defining plot settings
domain.setPlotInterval(dt1)
# defining output settings
domain.setWriteInterval(-1)
# initializing starting time
time = 0.0
# run first segment
#domain.setTimeIntegrator(integrator.ExplicitEuler())
domain.setTimeIntegrator(integrator.RungeKutta4())
domain.plotParticleTrace('tracePlot{:04d}.png'.format(floor(time*100)))
dt = dt1
while (time+dt <= target1 + 0.1 * dt):
time += dt
domain.runAnalysis(time)
domain.plotParticleTrace('tracePlot{:04d}.png'.format(floor(time*100)))
# run second segment
domain.setTimeIntegrator(integrator.RungeKutta4())
dt = dt2
while (time + dt <= target2 + 0.1 * dt):
time += dt
domain.runAnalysis(time)
if (time % 1.0 < 0.5*dt): # write 1.0 sec duration trace plots
domain.plotParticleTrace('tracePlot{:04d}.png'.format(floor(time*100)))
domain.particleTrace(False) # this wipes old trace
domain.particleTrace(True) # this restarts trace
# generate the animation
subprocess.run('./makeAnim.sh')
if __name__ == '__main__':
Main()
| true
|
7eae5de424681c8c4b850d7e0701f5978ca3a2b4
|
Python
|
komalupatil/Leetcode_Solutions
|
/Easy/Average Salary Excluding the Minimum and Maximum Salary.py
|
UTF-8
| 640
| 4.46875
| 4
|
[] |
no_license
|
#Leetcode 1491. Average Salary Excluding the Minimum and Maximum Salary
class Solution1:
def average(self, salary: List[int]) -> float:
salary.sort()
total = 0
for i in range(1, len(salary)-1):
total += salary[i]
return total/(len(salary)-2)
class Solution2:
def average(self, salary: List[int]) -> float:
minS = float('inf')
maxS = float('-inf')
total = 0
for i in range(len(salary)):
total += salary[i]
minS = min(minS, salary[i])
maxS = max(maxS, salary[i])
return (total-minS-maxS)/(len(salary)-2)
| true
|
ddacfab2e966ad2ef562f9aeb783192c145f3e44
|
Python
|
goohooh/fastcampus_wps1
|
/KimHanwool/5th_week_Algorithm/1373.py
|
UTF-8
| 524
| 3.71875
| 4
|
[] |
no_license
|
"""
2진수 8진수
문제집
시간 제한 메모리 제한 제출 정답 맞은 사람 정답 비율
1 초 128 MB 2448 742 569 36.662%
문제
2진수가 주어졌을 때, 8진수로 변환하는 프로그램을 작성하시오.
입력
첫째 줄에 2진수가 주어진다. 주어지는 수의 길이는 1,000,000을 넘지 않는다.
출력
첫째 줄에 주어진 수를 8진수로 변환하여 출력한다.
예제 입력 복사
11001100
예제 출력 복사
314
"""
n = input()
n1 = int(n, 2)
print(oct(n1)[2:])
| true
|
fc784517da0e3d8e19e53cc7c19890c52a0765f8
|
Python
|
Anirud2002/flappybird
|
/game.py
|
UTF-8
| 4,225
| 2.84375
| 3
|
[] |
no_license
|
import pygame, random
pygame.init()
pygame.display.set_caption("Flappy Bird - Anirud")
pygame.display.set_icon(pygame.image.load("assets/bluebird-midflap.png"))
game_font = pygame.font.Font("04B_19.TTF", 30)
score = 0
high_score = 0
screen = pygame.display.set_mode((376, 624))
clock = pygame.time.Clock()
# game variables
gravity = 0.25
bird_movement = 0
game_active = True
bg_surface = pygame.image.load("assets/background-day.png").convert()
bg_surface = pygame.transform.scale(bg_surface, (376, 624))
floor_surface = pygame.image.load("assets/base.png").convert()
floor_surface = pygame.transform.scale(floor_surface, (376, 100))
floor_x_pos = 0
bird_surface = pygame.image.load("assets/bluebird-midflap.png").convert_alpha()
bird_rect = bird_surface.get_rect(center=(80, 272))
pipe_surface = pygame.image.load("assets/pipe-green.png").convert()
pipe_surface = pygame.transform.scale(pipe_surface, (60, 400))
pipe_list = []
SPAWNPIPE = pygame.USEREVENT
pygame.time.set_timer(SPAWNPIPE, 1200)
pipe_height = [250, 350, 400, 470]
def draw_floor():
screen.blit(floor_surface, (floor_x_pos, 524))
screen.blit(floor_surface, (floor_x_pos + 376, 524))
def create_pipe():
random_pipe_pos = random.choice(pipe_height)
bottom_pipe = pipe_surface.get_rect(midtop=(588, random_pipe_pos))
top_pipe = pipe_surface.get_rect(midbottom=(588, random_pipe_pos - 150))
return bottom_pipe, top_pipe # it returns tuple
def move_pipes(pipes):
for pipe in pipes:
pipe.centerx -= 2
return pipes
def draw_pipes(pipes):
for pipe in pipes:
if pipe.bottom >= 524:
screen.blit(pipe_surface, pipe)
else:
flip_pipe = pygame.transform.flip(pipe_surface, False, True)
screen.blit(flip_pipe, pipe)
def check_collision(pipes):
for pipe in pipes:
if bird_rect.colliderect(pipe):
return False
if bird_rect.top <= -10 or bird_rect.bottom >= 524:
return False
return True
def rotate_bird(bird):
new_bird = pygame.transform.rotozoom(bird, -bird_movement * 3, 1)
return new_bird
def score_display(game_state):
if game_state == "main_text":
score_surface = game_font.render(str(int(score)), True, (255, 255, 255))
score_rect = score_surface.get_rect(center=(188, 70))
screen.blit(score_surface, score_rect)
if game_state == "game_over":
score_surface = game_font.render(str(int(score)), True, (255, 255, 255))
score_rect = score_surface.get_rect(center=(188, 70))
screen.blit(score_surface, score_rect)
high_score_surface = game_font.render(f"High Score: {int(high_score)}", True, (255, 255, 255))
high_score_rect = score_surface.get_rect(center=(88, 470))
screen.blit(high_score_surface, high_score_rect)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
bird_movement = 0
bird_movement -= 7
if event.key == pygame.K_SPACE and not game_active:
game_active = True
pipe_list.clear()
bird_rect.center = (80, 200)
bird_movement = 0
score = 0
if event.type == SPAWNPIPE:
pipe_list.extend(create_pipe())
screen.blit(bg_surface, (0, 0))
if game_active:
# bird
bird_movement += gravity
bird_rect.centery += bird_movement
rotated_bird = rotate_bird(bird_surface)
screen.blit(rotated_bird, bird_rect)
game_active = check_collision(pipe_list)
# pipes
pipe_list = move_pipes(pipe_list)
draw_pipes(pipe_list)
score += 0.01
score_display("main_text")
else:
score_display("game_over")
high_score = score
# floor
floor_x_pos -= 1
draw_floor()
if floor_x_pos <= -376:
floor_x_pos = 0
pygame.display.update()
clock.tick(120)
| true
|
68e08f904a2de45b90dab3de7f13d65a64f9708b
|
Python
|
AakashOfficial/ChallengeTests
|
/challenge_23/python/slandau3/BTtoLLs.py
|
UTF-8
| 2,462
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
import random
from collections import deque
"""
Given a binary tree, design an algorithm which creates a linked list of all the nodes at
each depth (e.g., if you have a tree with depth D,you'll have D linked lists).
"""
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def __repr__(self):
return str(self.data)
class NodeLL:
def __init__(self, data, next=None):
self.data = data
self.next = next
def __repr__(self):
return self.data
class LinkedList:
def __init__(self):
self.head = None
def insert(self, data):
temp = self.head
if self.head is None:
self.head = NodeLL(data)
return
while temp.next is not None:
temp = temp.next
temp.next = NodeLL(data)
def printAll(self):
temp = self.head
while temp is not None:
print(str(temp.data))
temp = temp.next
class BinaryTree:
def __init__(self, data=None):
self.root = Node(data)
def insert(self, data):
self.root = self.__insert(data, self.root)
def __insert(self, data, root):
if root is None:
return Node(data)
elif root.data < data:
root.right = self.__insert(data, root.right)
else:
root.left = self.__insert(data, root.left)
return root
def createTree():
bt = BinaryTree(50)
for _ in range(100):
bt.insert(random.randint(0,1000))
return bt
def list_linkify(btree):
# Need to do a level order traversal
currentLevel = deque()
nextLevel = deque()
linked_lists = [LinkedList()]
currentLevel.append(btree.root)
while len(currentLevel) != 0:
current = currentLevel.pop()
linked_lists[-1].insert(current.data)
if current.left is not None:
nextLevel.append(current.left)
if current.right is not None:
nextLevel.append(current.right)
if len(currentLevel) == 0:
if len(nextLevel) == 0: # if there is nothing in the next leevl. We are done
break
currentLevel = nextLevel.copy()
nextLevel = deque()
linked_lists.append(LinkedList())
return linked_lists
bt = createTree()
l = list_linkify(bt)
for i in l:
i.printAll()
print()
| true
|
03a0633453d3d803ab304b9c5ede2046edafa661
|
Python
|
jwyx3/practices
|
/leetcode/binary-search/bs-answer/arranging-coins.py
|
UTF-8
| 553
| 3.25
| 3
|
[] |
no_license
|
# 二分答案看是否能找到总数<n的最大答案
class Solution(object):
def arrangeCoins(self, n):
"""
:type n: int
:rtype: int
"""
start, end = 0, n
while start + 1 < end:
mid = (start + end) / 2
total = self.get_total(mid)
if total <= n:
start = mid
else:
end = mid - 1
if self.get_total(end) <= n:
return end
return start
def get_total(self, x):
return (1 + x) * x / 2
| true
|
52ebd873eb6120b7d2bc5a1050d94b4343524353
|
Python
|
keltecc/ructf-2019-olymp-quals
|
/tasks/forensics-300/decoder.py
|
UTF-8
| 819
| 3.28125
| 3
|
[] |
no_license
|
#!/usr/bin/python3
import sys
from PIL import Image
def sum_pixels(img, x, y, area):
result = 0
for dx in range(area):
for dy in range(area):
result += img.getpixel((x + dx, y + dy))
return result // 255
def decode_image(img):
area = 2
correct = [0, area ** 2]
result = Image.new('1', (img.width // area, img.height // area))
for x in range(result.width):
for y in range(result.height):
color = 255 if sum_pixels(img, x*area, y*area, area) in correct else 0
result.putpixel((x, y), color)
return result
def main():
if len(sys.argv) < 2:
print('usage: {} <image.png>'.format(sys.argv[0]))
sys.exit(1)
image = Image.open(sys.argv[1])
decode_image(image).show()
if __name__ == '__main__':
main()
| true
|
09a82fb190ee3bdcfeb9d2ee3a43273f50e0e60b
|
Python
|
BoobooWei/python-cx_Oracle
|
/samples/tutorial/solutions/soda.py
|
UTF-8
| 1,538
| 2.65625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
#------------------------------------------------------------------------------
# soda.py (Section 11.2)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import db_config
con = cx_Oracle.connect(db_config.user, db_config.pw, db_config.dsn)
soda = con.getSodaDatabase()
collection = soda.createCollection("friends")
content = {'name': 'Jared', 'age': 35, 'address': {'city': 'Melbourne'}}
doc = collection.insertOneAndGet(content)
key = doc.key
doc = collection.find().key(key).getOne()
content = doc.getContent()
print('Retrieved SODA document dictionary is:')
print(content)
myDocs = [
{'name': 'Gerald', 'age': 21, 'address': {'city': 'London'}},
{'name': 'David', 'age': 28, 'address': {'city': 'Melbourne'}},
{'name': 'Shawn', 'age': 20, 'address': {'city': 'San Francisco'}}
]
collection.insertMany(myDocs)
filterSpec = { "address.city": "Melbourne" }
myDocuments = collection.find().filter(filterSpec).getDocuments()
print('Melbourne people:')
for doc in myDocuments:
print(doc.getContent()["name"])
filterSpec = {'age': {'$lt': 25}}
myDocuments = collection.find().filter(filterSpec).getDocuments()
print('Young people:')
for doc in myDocuments:
print(doc.getContent()["name"])
| true
|
b9f6ca8c3db77fd61971d2aec34f78f8ae200818
|
Python
|
franklingg/LittleGames
|
/JotunsPath/Content/Assets.py
|
UTF-8
| 1,407
| 2.546875
| 3
|
[] |
no_license
|
import pygame.font
import pygame.image
from Content import Path
class Color(object):
black = (0, 0, 0)
light_black = (50, 51, 51)
light_grey = (153, 150, 165)
pearl = (208, 240, 192)
sky_blue = (93, 142, 193)
dark_blue = (24, 48, 100)
light_green = (140, 204, 76)
jade = (0, 168, 107)
forest_green = (40, 78, 56)
brown = (75, 83, 32)
beige = (138, 154, 91)
test = (198, 139, 78)
def icon():
return pygame.image.load(Path.icon)
def create_font(font, size):
if font == "karma":
return pygame.font.Font(Path.font_karma, size)
elif font == "fonBold":
return pygame.font.Font(Path.font_fonBold, size)
elif font == "papercut":
return pygame.font.Font(Path.font_papercut, size)
def home_background():
return pygame.image.load(Path.home_background)
def home_button_image():
return pygame.image.load(Path.home_button)
def level_one():
return pygame.image.load(Path.level_one)
def char_idle():
return (pygame.image.load(Path.char_idle), 4)
def char_run():
return (pygame.image.load(Path.char_run), 6)
def char_attack():
return (pygame.image.load(Path.char_attack), 6)
def char_airattack():
return (pygame.image.load(Path.char_airattack), 7)
def char_jump():
return (pygame.image.load(Path.char_jump), 2)
def char_crouch():
return (pygame.image.load(Path.char_crouch), 4)
| true
|
23a966f9a2beb45c9c296f71a391110f8b9e1140
|
Python
|
anthonyozerov/log
|
/code.py
|
UTF-8
| 9,897
| 2.515625
| 3
|
[] |
no_license
|
#imports
from datetime import date, timedelta, datetime
from dateutil.parser import parse
import sys
from netCDF4 import Dataset
from ftplib import FTP
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches #used to create legends in plots
from mpl_toolkits.basemap import Basemap
#import data
log_pd = pd.read_csv("/home/aozerov/Dropbox/programming/jupyter/log_analysis/log_analysis_db3.csv")
#delete unneeded columns
coldel = ['Unnamed: 8']
log_pd = log_pd.drop(columns = coldel)
#remove empty rows
log_pd = log_pd[np.isfinite(log_pd['spentint'])]
#sleep pie chart
sleeps = []
for i in range (0, log_pd.shape[0]):
if (log_pd.iloc[i]['Activity'] == 'sleep'):
sleeps.append([log_pd.iloc[i]['spentint'],log_pd.iloc[i]['detail']])
npsleeps = np.array(sleeps)
unique, counts = np.unique(npsleeps[:,1], return_counts=True)
print(np.asarray((unique, counts)).T)
labels = ['Alarm','No alarm', 'No data']
sizes = [113,60,19]
colors = ['lightcoral', 'yellowgreen', 'lightgray']
plt.pie(sizes, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=False, startangle=180)
plt.axis('equal')
plt.savefig("sleepalarmprops.png", dpi=300, transparent=True)
plt.show()
#sleep histogram (note: this code is quite poor, the code for the transit histogram is much better)
alarmweights = np.ones_like(alarmsleeplengths) / (len(alarmsleeplengths))
noalarmweights = np.ones_like(noalarmsleeplengths) / (len(noalarmsleeplengths))
nodataweights = np.ones_like(nodatasleeplengths) / (len(nodatasleeplengths))
red_patch = mpatches.Patch(color='red', label='Alarm')
green_patch = mpatches.Patch(color='green', label='No alarm')
blue_patch = mpatches.Patch(color='blue', label='No data')
plt.legend(handles=[red_patch,green_patch,blue_patch],prop={'size':10})
plt.hist(alarmsleeplengths, bins=binslocs,alpha=0.5, color = 'red', weights = alarmweights * 113/189)
# last part adjusts for probability of alarm so that it reflects probability of that length of sleep and an alarm
plt.hist(noalarmsleeplengths, bins=binslocs,alpha=0.5, color = 'green', weights = noalarmweights * 60/189)
plt.hist(nodatasleeplengths, bins=binslocs,alpha=0.5, color = 'blue', weights = nodataweights * 19/189)
plt.savefig("sleepalarms.png", dpi=300, transparent=True)
plt.show()
#transit pie chart
relocations = []
for i in range (0, log_pd.shape[0]):
if (log_pd.iloc[i]['Activity'] == 'relocation'):
relocations.append([log_pd.iloc[i]['spentint'],log_pd.iloc[i]['detail']])
nprelocations = np.array(relocations)
labels = ['Car','Ferry', 'Plane','Public Transit','Schoolbus','Taxi','Walk']
sizes = counts
cmap = plt.get_cmap('Pastel2')
colors = cmap(np.linspace(0, 1, len(labels)))
plt.pie(sizes, labels=None, colors=colors,
autopct=None, shadow=False, startangle=150, rotatelabels=True)
legends = []
for i in range (0, len(labels)):
new_patch = mpatches.Patch(color=colors[i], label=labels[i])
legends.append(new_patch)
plt.axis('equal')
plt.legend(handles=legends, prop={'size':10})
plt.savefig("relocations.png", dpi=300, transparent=True)
plt.show()
#transit histogram
binslocs = []
for i in range (0, 40):
binslocs.append(i*0.075)
weights=[]
binned=[]
colorarray=[]
for i in range (0, len(unique)):
transportlengths = []
for k in range(0, log_pd.shape[0]):
if (log_pd.iloc[k]['Activity'] == 'relocation' and log_pd.iloc[k]['detail'] == unique[i]):
transportlengths.append(log_pd.iloc[k]['spentint'] * 24)
transportweights = np.ones_like(transportlengths) / (len(transportlengths))
binned.append(transportlengths)
weights.append(transportweights * sizes[i]/sum(sizes))
colorarray.append(colors[i])
# plt.hist(transportlengths, bins=binslocs,alpha=0.5, color = colors[i], weights = transportweights * sizes[i]/sum(sizes))
plt.hist(binned, bins=binslocs, color = colorarray, weights = weights, stacked=True)
plt.savefig("relocations_hist.png", dpi=300, transparent=True)
plt.show()
#adjusted transit pie chart
relocations = []
for i in range (0, log_pd.shape[0]):
if (log_pd.iloc[i]['Activity'] == 'relocation'):
relocations.append(log_pd.iloc[i]['detail'])
nprelocations = np.array(relocations)
unique = np.unique(nprelocations)
relocationsums = []
for i in range (0, len(unique)):
relocationsum = 0
for k in range (0, log_pd.shape[0]):
if (log_pd.iloc[k]['detail'] == unique[i]):
relocationsum += (log_pd.iloc[k]['spentint'])
relocationsums.append(relocationsum)
labels = ['Car','Ferry', 'Plane','Public Transit','Schoolbus','Taxi','Walk']
sizes = relocationsums
cmap = plt.get_cmap('Pastel2')
colors = cmap(np.linspace(0, 1, len(unique)))
plt.pie(sizes, labels=labels, colors=colors,
autopct=None, shadow=False, startangle=150, rotatelabels=True, pctdistance = 0.8,)
legends = []
for i in range (0, len(labels)):
new_patch = mpatches.Patch(color=colors[i], label=labels[i])
legends.append(new_patch)
plt.axis('equal')
#plt.legend(handles=legends, prop={'size':10})
plt.savefig("relocationsadjusted.png", dpi=300, transparent=True, bbox_inches="tight")
plt.show()
#cumulative sums of some school-related activities
activities = ['class','schoolwork','colappwork','work','ec','test']
labels = []
labeldict = {}
cmap = plt.get_cmap('Pastel2')
colors = cmap(np.linspace(0, 1, len(activities)))
plt.figure(dpi=600)
for i in range (0, len(activities)):
new_patch = mpatches.Patch(color=colors[i], label=activities[i])
labels.append(new_patch)
x = [datetime(2018,4,18,19,2,0)]
y = [0]
for k in range (0, log_pd.shape[0]-1):
if (log_pd.iloc[k]['Activity'] == activities[i] or log_pd.iloc[k]['by;with;for;about'] == activities[i] or log_pd.iloc[k]['detail'] == activities[i]):
datetimestr = log_pd.iloc[k]['Start date/time (UTC)']
datetimeobj = parse(datetimestr)
x.append(datetimeobj)
y.append(0)
datetimestr = log_pd.iloc[k+1]['Start date/time (UTC)']
datetimeobj = parse(datetimestr)
x.append(datetimeobj)
y.append(log_pd.iloc[k]['spentint'].astype(np.float))
x.append(datetime(2018,10,20,16,54,38))
ycumsum = np.array(y).cumsum()
yprop = []
for k in range (0, len(ycumsum)):
yprop.append(ycumsum[k]/ycumsum[len(ycumsum)-1])
yprop.append(1)
plt.plot(x,yprop,'-',color = colors[i], linewidth = 1)
plt.legend(handles=labels, prop={'size':6})
x = [(parse(log_pd.iloc[0]['Start date/time (UTC)'])),(parse(log_pd.iloc[log_pd.shape[0]-1]['Start date/time (UTC)']))]
y = [0,1]
plt.plot(x, y,'--',linewidth = 0.5, color = "black")
plt.axvline(x=datetime(2018,6,20,8,45,16), linestyle='--', ymin=0, ymax = 1, linewidth=1, color='black')
plt.axvline(x=datetime(2018,8,21,5,18,56), linestyle='--', ymin=0, ymax = 1, linewidth=1, color='black')
plt.suptitle('Cumulative sums')
plt.savefig("schoolworkcumsums.png", dpi=300, transparent=True)
plt.show()
#cumulative sums of some non-school-related activities
activities = ['sleep','food','hygiene','log','language','relocation',';raina','vg']
labels = []
labeldict = {}
cmap = plt.get_cmap('Pastel2')
colors = cmap(np.linspace(0, 1, len(activities)))
plt.figure(dpi=600)
for i in range (0, len(activities)):
labeldictkey = activities[i]
new_patch = mpatches.Patch(color=colors[i], label=activities[i])
labels.append(new_patch)
x = [datetime(2018,4,18,19,2,0)]
y = [0]
for k in range (0, log_pd.shape[0]-1):
if (log_pd.iloc[k]['Activity'] == activities[i] or log_pd.iloc[k]['by;with;for;about'] == activities[i]):
datetimestr = log_pd.iloc[k]['Start date/time (UTC)']
datetimeobj = parse(datetimestr)
x.append(datetimeobj)
y.append(0)
datetimestr = log_pd.iloc[k+1]['Start date/time (UTC)']
datetimeobj = parse(datetimestr)
x.append(datetimeobj)
y.append(log_pd.iloc[k]['spentint'].astype(np.float))
x.append(parse(log_pd.iloc[log_pd.shape[0]-1]['Start date/time (UTC)']))
ycumsum = np.array(y).cumsum()
yprop = []
for k in range (0, len(ycumsum)):
yprop.append(ycumsum[k]/ycumsum[len(ycumsum)-1])
yprop.append(1)
plt.plot(x,yprop,'-',color = colors[i], linewidth = 1)
plt.legend(handles=labels, prop={'size':6})
x = [(parse(log_pd.iloc[0]['Start date/time (UTC)'])),(parse(log_pd.iloc[log_pd.shape[0]-1]['Start date/time (UTC)']))]
y = [0,1]
plt.plot(x, y,'--',linewidth = 0.5, color = "black")
plt.axvline(x=datetime(2018,6,20,8,45,16), linestyle='--', ymin=0, ymax = 1, linewidth=1, color='black')
plt.axvline(x=datetime(2018,8,21,5,18,56), linestyle='--', ymin=0, ymax = 1, linewidth=1, color='black')
plt.suptitle('Cumulative sums')
plt.savefig("nonschoolcumsums.png", dpi=300, transparent=True)
plt.show()
#orhographic globe projection
map = Basemap(projection='ortho',lat_0=45,lon_0=-20,resolution='l')
map.drawcoastlines(linewidth=0.25)
map.drawcountries(linewidth=0.25)
map.fillcontinents(color='palegreen',lake_color='lightblue', alpha = 0.5)
map.drawmapboundary(fill_color='lightblue')
map.drawmeridians(np.arange(0,360,30))
map.drawparallels(np.arange(-90,90,30))
msklon = 37.6
msklat = 55.75
map.drawgreatcircle(-74,40.75,msklon,msklat,linewidth=0.5,color='red') #ny-msk
map.drawgreatcircle(24.93,60.169,msklon,msklat,linewidth=0.5,color='red') #hel-msk
map.drawgreatcircle(16.372,48.208,msklon,msklat,linewidth=0.5,color='red') #vienna-msk
map.drawgreatcircle(17.106,48.148,16.372,48.208,linewidth=0.5,color='red') #bratislava-vienna
map.drawgreatcircle(-74,40.75,-74.8478298,46.2229071,linewidth=0.5,color='red') #ny-laclabelle
map.drawgreatcircle(-74,40.75,-75.16379,39.952,linewidth=0.5,color='red') #ny-philadelphia
plt.savefig("map.png", dpi=300, transparent=True)
plt.show()
| true
|
ee77e0d077296be4710a32a4558f8e4d87991f23
|
Python
|
tlcs11/Gold_Challenge
|
/ch4/komodo_insurance.py
|
UTF-8
| 786
| 3.28125
| 3
|
[] |
no_license
|
badges = {} #string for a key and empty list for input
while True:
print("1 to make badge",
"3 to print all badges", "2 to edit a badge")
op = input("> ")
if op == "1":
b_id = int(input("enter en id:"))
add_door = input("Add a door to badge Y or N: ")
badges.update({b_id:[]})
input("Add a door to badge Y or N: ")
if add_door in ["Y", "y"]:
b_doors = input("Door Num: " )
badges[b_id].append(b_doors)
if op == "2":
b_id = int(input("Enter badge id:"))
remove_door = input("ARE YOU SURE YOU WOULD LIKE TO REMOVE ACCESS?")
badges.update({b_id:[NONE]})
elif op == "3":
print(badges)
elif op == "4":
exit()
| true
|
4c75e8100a6ecc25dd3acfb6ecb6c9d429bcf011
|
Python
|
rjmarshall17/trees
|
/hacker_rank_height_of_a_binary_tree.py
|
UTF-8
| 3,919
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
import os
"""
The height of a binary tree is the number of edges between the tree's root and its furthest leaf.
For example, the following binary tree is of height 2:
4
/ \
/ \
2 6
/ \ / \
1 3 5 7
Function Description
Complete the getHeight or height function in the editor. It must return the height of a binary tree as an integer.
getHeight or height has the following parameter(s):
root: a reference to the root of a binary tree.
Note -The Height of binary tree with single node is taken as zero.
Input Format
The first line contains an integer n, the number of nodes in the tree.
Next line contains n space separated integer where i-th integer denotes node[i].data.
Note: Node values are inserted into a binary search tree before a reference to the tree's root
node is passed to your function. In a binary search tree, all nodes on the left branch of a node
are less than the node value. All values on the right branch are greater than the node value.
Constraints
1 <= node.data[i] <= 20
1 <= n <= 20
Output Format
Your function should return a single integer denoting the height of the binary tree.
Sample Input
3
/ \
/ \
2 5
/ / \
1 4 6
\
7
Sample Output
3
Explanation
The longest root-to-leaf path is shown below:
3
/ \ <--
/ \ <--
2 5 <--
/ / \ <--
1 4 6 <--
\ <--
7 <--
There are 4 nodes in this path that are connected by 3 edges, meaning our binary
tree's height = 3.
"""
class Node:
def __init__(self, info):
self.info = info
self.left = None
self.right = None
def __str__(self):
return str(self.info)
class BinarySearchTree:
def __init__(self):
self.root = None
def create(self, val):
if self.root is None:
self.root = Node(val)
else:
current = self.root
while True:
if val < current.info:
if current.left:
current = current.left
else:
current.left = Node(val)
break
elif val > current.info:
if current.right:
current = current.right
else:
current.right = Node(val)
break
else:
break
def height(root):
def __height__(current_node, current_height):
if current_node is None:
return current_height
left_height = __height__(current_node.left, current_height + 1)
right_height = __height__(current_node.right, current_height + 1)
return max(left_height, right_height)
return __height__(root,-1)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
expected_output = os.environ['OUTPUT_PATH'].replace('output', 'expected_output')
tree = BinarySearchTree()
t = int(input())
arr = list(map(int, input().split()))
for i in range(t):
tree.create(arr[i])
results = height(tree.root)
fptr.write(str(results))
fptr.write('\n')
fptr.close()
expected_results = open(expected_output, 'r').read().rstrip()
# print(" Output: >>%s<< %s" % (str(results), type(results)))
# print("Expected output: >>%s<< %s" % (str(expected_results), type(expected_results)))
assert str(results) == str(expected_results)
print("Tests passed for: %s" % os.environ['OUTPUT_PATH'])
| true
|
211db5bf8a870187431457805d2d7be898a47a5f
|
Python
|
jiyatu/dropoutconnect
|
/utils.py
|
UTF-8
| 12,042
| 3.296875
| 3
|
[] |
no_license
|
"""
Source Code for Homework 3 of ECBM E6040, Spring 2016, Columbia University
This code contains implementation of several utility funtions for the homework.
Instructor: Prof. Aurel A. Lazar
This code is based on
[1] http://deeplearning.net/tutorial/logreg.html
"""
import os
import sys
import numpy
import scipy.io
import theano
import theano.tensor as T
import cPickle, gzip, bz2
import copy
from numpy import linspace
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
def random_crop_image(image, crop_size, seed):
''' given the image and final size after random cropping, returns the cropped image
:type image: numpy.array (2D)
:the image to be cropped
:type crop_size: tuple or list
:the size of the image after cropping
'''
img_size = image.shape
u_end = img_size[0] - crop_size[0]
l_end = img_size[1] - crop_size[1]
rng = numpy.random.RandomState(seed)
top = rng.random_integers(0, u_end)
left = rng.random_integers(0, l_end)
img_cropped = image[top:(top + crop_size[0]), left:(left + crop_size[1])]
return img_cropped
def training_data_shuffle(img_matrix):
''' Given 2D matrix of data, where rows are individual examples, shuffles the order
of rows
'''
numpy.random.shuffle(img_matrix)
def flip_img_horiz(img_matrix):
''' Given 2D image matrix, flips the image horizontally
:type img_matrix: numpy.array
:image matrix values
'''
return numpy.fliplr(img_matrix)
def image_rescale_down(img_matrix, final_size):
''' Given 2D image matrix, 'rescales' by subsampling
'''
img_shape = img_matrix.shape
assert(final_size[0] < img_shape[0] & final_size[1] < img_shape[1])
range1 = linspace(0, img_shape[0], final_size[0]).astype(int).tolist()
range2 = linspace(0, img_shape[1], final_size[1]).astype(int).tolist()
image2 = img_matrix[range1]
return image2[:, range2]
def image_rotate(img_matrix, seed):
''' Given 2D image matrix, rotates 90 deg counterclockwise random # of times
'''
return numpy.rot90(img_matrix, k = seed)
def subtract_per_pixel_mean(img_matrix):
''' Given 2D matrix with each row a flattened image, subtracts the per pixel mean
from all the rows
:type img_matrix: numpy.array
:image matrix values
'''
row_mean = numpy.mean(img_matrix, 0)
img_matrix = img_matrix - row_mean
return img_matrix
def preprocess_data(dataset, source,crop=False,rotate=False,scaling= False):
rng = numpy.random.RandomState(99)
data_shape = dataset.shape
if source == 'MNIST':
#size normalized to 20 x 20
dataset = dataset.reshape(data_shape[0], 28, 28)
final = dataset
if crop == True:
final = numpy.empty((data_shape[0], 24, 24))
for i in range(data_shape[0]):
seed = rng.random_integers(0, 100)
if crop == True:
final[i,:,:] = random_crop_image(dataset[i,:,:],
(24, 24), seed)
if rotate == True:
final[i,:,:] = image_rotate(final[i,:,:],seed)
# final[i,:,:] = image_rescale_down(dataset[i,:,:], (20, 20))
if crop == True:
final = final.reshape(data_shape[0], 24*24)
else:
final = final.reshape(data_shape[0],28*28)
elif source == 'CIFAR-10':
#reshape to (n_examples, 3, 32, 32)
dataset = dataset.reshape(data_shape[0], 3, 32, 32)
#randomly crop to 24 x 24, randomly do horizontal flip
final = numpy.empty((data_shape[0], 3, 24, 24))
for i in range(data_shape):
for j in range(3):
seed = rng.random_integers(0, 100)
final[i,j,:,:] = random_crop_image(dataset[i,j,:,:],
(24, 24), seed)
seed = rng.random_integers(0, 1)
if seed == 0:
final[i,j,:,:] = flip_img_horiz(final[i,j,:,:])
final = final.reshape(data_shape[0], 3*24*24)
elif source == 'SVHN':
#reshape to (n_examples, 3, 32, 32)
dataset = dataset.reshape(data_shape[0], 3, 32, 32)
#randomly crop to 28 x 28, rotate, 'scale' by 85%
final = numpy.empty((data_shape[0], 3, 28, 28))
for i in range(data_shape):
for j in range(3):
seed = rng.random_integers(0, 100)
final[i,j,:,:] = random_crop_image(dataset[i,j,:,:],
(28, 28), seed)
seed = rng.random_integers(1, 4)
final[i,j,:,:] = image_rotate(final[i,j,:,:], seed)
final[i,j,:,:] = image_rescale_down(final[i,j,:,:], (24, 24))
final = final.reshape(data_shape[0], 3*28*28)
elif source == 'NORB':
#downsample from 108 x 108 to 48 x 48 (FXN NOT DONE YET), rotate, scale (FXN NOT DONE YET)
pass
return final
def load_data(ds_rate, theano_shared, source,crop = False, rotate = False,scaling = False):
''' Loads the dataset according to the source
:type ds_rate: float
:param ds_rate: downsample rate; should be larger than 1, if provided.
:type theano_shared: boolean
:param theano_shared: If true, the function returns the dataset as Theano
shared variables. Otherwise, the function returns raw data.
'''
if ds_rate is not None:
assert(ds_rate > 1.)
if source == 'SVHN':
# copied from implementation in previous homework assignment
train_set = scipy.io.loadmat('data/train_32x32_SVHN.mat')
test_set = scipy.io.loadmat('data/test_32x32_SVHN.mat')
# Convert data format from (3, 32, 32, n_samples) to (n_samples, 3*32*32)
# Also normalizes data matrix from range [1, 255] to [0, 1] by dividing by 255
# SVHN data is 3-channels (R, G, B)
def convert_data_format(data):
X = data['X'].transpose(2,0,1,3)
X = numpy.reshape(data['X'],
(numpy.prod(data['X'].shape[:-1]), data['X'].shape[-1]),
order='C').T / 255.
y = data['y'].flatten()
y[y == 10] = 0
return (X,y)
train_set = convert_data_format(train_set)
test_set = convert_data_format(test_set)
# Downsample the training dataset if specified
train_set_len = len(train_set[1])
if ds_rate is not None:
train_set_len = int(train_set_len // ds_rate)
train_set = [x[:train_set_len] for x in train_set]
# Extract validation dataset from train dataset
valid_set = [x[-(train_set_len//10):] for x in train_set]
train_set = [x[:-(train_set_len//10)] for x in train_set]
elif source == 'MNIST':
# see http://deeplearning.net/tutorial/gettingstarted.html for details
# 1 black/white channel only, each image is 1D ndarray w/28x28 values in [0,1]
f = gzip.open('data/mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
train_set = list(train_set)
valid_set = list(valid_set)
test_set = list(test_set)
# Downsample the training dataset if specified
train_set_len = len(train_set[1])
if ds_rate is not None:
train_set_len = int(train_set_len // ds_rate)
train_set = [x[:train_set_len] for x in train_set]
print 'crop :',crop,'rotate :',rotate
train_set[0] = preprocess_data(train_set[0],'MNIST',crop= crop,rotate= rotate,scaling = scaling)
valid_set[0] = preprocess_data(valid_set[0],'MNIST',crop= crop,rotate= rotate,scaling = scaling)
test_set[0] = preprocess_data(test_set[0],'MNIST',crop= crop,rotate= rotate,scaling = scaling)
elif source == 'CIFAR-10':
# see https://www.cs.toronto.edu/~kriz/cifar.html for details
# images stored as R,R,...,R, G,G,...,G, B,B,...,B
filenames = ['data/data_batch_1', 'data/data_batch_2', 'data/data_batch_3',
'data/data_batch_4', 'data/data_batch_5', 'data/test_batch']
i = 0
for item in filenames:
i += 1
fo = open(item, 'rb')
dict = cPickle.load(fo)
fo.close()
# normalize to [0,1] range
data_array = dict['data']/255.
label_array = dict['labels']
if i == 1:
train_set = [data_array, label_array]
elif i <= 5:
train_set[0] = numpy.concatenate((train_set[0], data_array))
train_set[1] = numpy.concatenate((train_set[1], label_array))
else:
test_set = [data_array, label_array]
# Downsample the training dataset if specified
train_set_len = len(train_set[1])
if ds_rate is not None:
train_set_len = int(train_set_len // ds_rate)
train_set = [x[:train_set_len] for x in train_set]
# Extract validation dataset from train dataset
valid_set = [x[-(train_set_len//10):] for x in train_set]
train_set = [x[:-(train_set_len//10)] for x in train_set]
elif source == 'NORB':
# see http://www.cs.nyu.edu/~ylclab/data/norb-v1.0-small/ for details
# labels are in {0, 1, 2, 3, 4}
train_set_x = scipy.io.loadmat('data/smallnorb-5x46789x9x18x6x2x96x96-training-dat.mat')
test_set_x = scipy.io.loadmat('data/smallnorb-5x01235x9x18x6x2x96x96-testing-dat.mat')
train_set_y = scipy.io.loadmat('data/smallnorb-5x46789x9x18x6x2x96x96-training-cat.mat')
test_set_y = scipy.io.loadmat('data/smallnorb-5x01235x9x18x6x2x96x96-testing-cat.mat')
else:
print('Invalid dataset!')
exit()
# train_set, valid_set, test_set format: tuple(input, target)
# input is a numpy.ndarray of 2 dimensions (a matrix)
# where each row corresponds to an example. target is a
# numpy.ndarray of 1 dimension (vector) that has the same length as
# the number of rows in the input. It should give the target
# to the example with the same index in the input.
if theano_shared:
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
else:
rval = [train_set, valid_set, test_set]
return rval
| true
|
a36eb95ef4dc832403fc94c9aad907b04bbc2721
|
Python
|
fmidev/stac-builder
|
/stac_builder/catalog_builder.py
|
UTF-8
| 13,541
| 2.875
| 3
|
[] |
no_license
|
import json
import os
from dateutil import rrule
from datetime import datetime, timedelta
from calendar import monthrange
from dateutil.relativedelta import relativedelta
import copy
import helpers as h # import help functions from helpers.py
def dataset_collection_builder(conf):
'''
Function reads items of a dataset and builds a dataset collection.
Finds start- and enddate, extent (bbox) and available bands of dataset collection.
Input: configuration file of dataset.
Output: dataset collection object.
'''
dataset_collection_object = {
"extent": {
"spatial": {
"bbox": None
},
"temporal": {
"interval": None
}
},
"summaries": {
"datetime": {
"minimum": None,
"maximum": None
},
"bands": []
},
"links": [
{
"rel": "self",
"href": conf["destination"]["catalogBaseUrl"] + conf["datasetId"] + ".json"
}
]
}
min_time = datetime.max
max_time = datetime.min
coords = []
existing_bands = []
itemsDestination = conf["destination"]["localItemPath"]
if "selectItemsMatching" in conf["dataset"]:
dest = list(conf["dataset"]["selectItemsMatching"].keys())[0]
val = conf["dataset"]["selectItemsMatching"]["properties.orbit"]
print("Choosing items where", dest, "has the value", val)
for entry in os.scandir(itemsDestination):
item_path = open(entry.path)
item = json.load(item_path)
# Processing for S1 asc/desc catalogs
if "selectItemsMatching" in conf["dataset"]:
dest = list(conf["dataset"]["selectItemsMatching"].keys())[0]
val = conf["dataset"]["selectItemsMatching"]["properties.orbit"]
compare_val = copy.deepcopy(item)
dest_list = dest.split(".")
for key in dest_list:
try:
compare_val = compare_val[key]
except:
print("Error:", key, "is missing in item", item["id"])
continue
if compare_val == val:
pass # Continue with processing if rule is true
else:
continue
# Find biggest starttime and smallest endtime of all items
time = item["properties"]["datetime"]
starttime = item["properties"]["start_datetime"]
endtime = item["properties"]["end_datetime"]
min_time, max_time = h.FindMinMaxTime(time, starttime, endtime, min_time, max_time)
# Collect all coordinate points to a list
polygon_coordinates = item["geometry"]["coordinates"]
for coord_list in polygon_coordinates:
for coordinate in coord_list:
coords.append(coordinate)
# Write available bands to collection object
bands = list(item["assets"].keys())
for band in bands:
if band not in existing_bands:
existing_bands.append(band)
dataset_collection_object["summaries"]["bands"].append({"name": band})
bbox = h.GetBoundingBox(coords) # Find bounding box
# Update object values
dataset_collection_object["extent"]["spatial"]["bbox"] = [bbox]
dataset_collection_object["extent"]["temporal"]["interval"] = [[min_time.isoformat(timespec='seconds') + 'Z' , max_time.isoformat(timespec='seconds') + 'Z']]
dataset_collection_object["summaries"]["datetime"]["minimum"] = min_time.isoformat(timespec='seconds') + 'Z'
dataset_collection_object["summaries"]["datetime"]["maximum"] = max_time.isoformat(timespec='seconds') + 'Z'
# Merge with template
dataset_template = conf["dataset"]["template"]
dataset_collection_object_merged = h.merge(dict(dataset_template), dataset_collection_object)
return dataset_collection_object_merged
def dataset_time_collection_step1(conf, dataset_collection_object):
'''
Function builds a first draft of dataset-time collection based on
datasets start and endtime.
Input: configuration file and dataset collection of a dataset.
Output: First version of dataset-time collection. The dataset-time collection is completed in step 2.
'''
dataset_time_collection_list = []
dataset_time_collection_template = {
"extent": {
"spatial": {
"bbox": None
},
"temporal": {
"interval": None
}
},
"summaries": {
"datetime": {
"minimum": None,
"maximum": None
},
"bands": []
},
"links": [
]
}
collection_starttime_str = dataset_collection_object["summaries"]["datetime"]["minimum"]
collection_starttime = datetime.strptime(collection_starttime_str, '%Y-%m-%dT%H:%M:%SZ')
collection_endtime_str = dataset_collection_object["summaries"]["datetime"]["maximum"]
collection_endtime = datetime.strptime(collection_endtime_str, '%Y-%m-%dT%H:%M:%SZ')
# Create empty dataset-time objects based on timeframe
timeFrame = conf["dataset-time"]["timeFrame"]
if timeFrame == "week":
rule = rrule.WEEKLY
collection_starttime_iter = collection_starttime - timedelta(days=collection_starttime.weekday()) #first day of week
collection_starttime_iter = collection_starttime_iter.replace(hour=0, minute=0, second=0)
collection_endtime_iter = collection_endtime + timedelta(days=6) # last day of week
elif timeFrame == "month":
rule = rrule.MONTHLY
collection_starttime_iter = collection_starttime.replace(day=1) # first day of month
collection_starttime_iter = collection_starttime_iter.replace(hour=0, minute=0, second=0)
collection_endtime_iter = collection_endtime.replace(day = monthrange(collection_endtime.year, collection_endtime.month)[1])
elif timeFrame == "year":
rule = rrule.YEARLY
collection_starttime_iter = datetime(collection_starttime.year, 1, 1) # first day of year
collection_endtime_iter = datetime(collection_endtime.year, 12, 31) # last day of year
else:
raise ValueError("Unrecognized time frame in configuration file")
# Add dataset-time collections to list, when iterating over time frame
for dt in rrule.rrule(rule, dtstart=collection_starttime_iter, until=collection_endtime_iter):
if timeFrame == "week":
id = conf["datasetId"] +"_"+ str(dt.year) +"-"+ '{:02d}'.format(dt.month) +"-"+ '{:02d}'.format(dt.day)
end = dt + timedelta(days=6) # last day of week
elif timeFrame == "month":
id = conf["datasetId"] +"_"+ str(dt.year) +"-"+ '{:02d}'.format(dt.month)
end = dt.replace(day = monthrange(dt.year, dt.month)[1]) # last day of month
else: # if timeFrame == "year"
id = conf["datasetId"] +"_"+ str(dt.year)
end = dt + relativedelta(years=+1, days=-1) # last day of year
# Update dataset-time object
dataset_time_collection_new = copy.deepcopy(dataset_time_collection_template)
dataset_time_collection_new["id"] = id
dataset_time_collection_new["summaries"]["datetime"]["minimum"] = dt
dataset_time_collection_new["summaries"]["datetime"]["maximum"] = end
dataset_time_collection_new["extent"]["temporal"]["interval"] = [[dt, end]]
link = conf["destination"]["catalogBaseUrl"] + id + ".json"
dataset_time_collection_new["links"].append({"rel": "self", "href": link})
# Merge with template
dataset_time_template = copy.deepcopy(conf["dataset-time"]["template"])
dataset_time_object_merged = h.merge(dict(dataset_time_template), dataset_time_collection_new)
dataset_time_collection_list.append(dataset_time_object_merged)
return dataset_time_collection_list
def dataset_time_collection_step2(conf, dataset_time_collection_list, dataset_collection_object):
'''
Loops over dataset's items and adds them to correct dataset-time collection objects.
Writes dataset-time collections to given location.
Input: configuration file of dataset, dataset-time collection list, dataset collection object.
Output: dataset collection object.
'''
itemsDestination = conf["destination"]["localItemPath"]
# Loop over "empty" dataset-time collections
for dataset_time_collection in dataset_time_collection_list:
dataset_start = dataset_time_collection["summaries"]["datetime"]["minimum"]
dataset_end = dataset_time_collection["summaries"]["datetime"]["maximum"]
min_time = datetime.max
max_time = datetime.min
bands = []
coords = []
# Loop over items
for entry in os.scandir(itemsDestination):
item_path = open(entry.path)
item = json.load(item_path)
# Processing for S1 asc/desc catalogs
if "selectItemsMatching" in conf["dataset"]:
dest = list(conf["dataset"]["selectItemsMatching"].keys())[0]
val = conf["dataset"]["selectItemsMatching"]["properties.orbit"]
compare_val = copy.deepcopy(item)
dest_list = dest.split(".")
for key in dest_list:
try:
compare_val = compare_val[key]
except:
print("Error:", key, "is missing in item", item["id"])
continue
if compare_val == val:
pass # Continue with processing if rule is true
else:
continue
assets = list(item["assets"].keys())
item_start_str = item["properties"]["start_datetime"]
item_end_str = item["properties"]["end_datetime"]
item_date_str = item["properties"]["datetime"]
if item_start_str:
item_start = datetime.strptime(item_start_str, '%Y-%m-%dT%H:%M:%SZ')
item_end = datetime.strptime(item_end_str, '%Y-%m-%dT%H:%M:%SZ')
elif item_date_str:
item_start = datetime.strptime(item["properties"]["datetime"], '%Y-%m-%dT%H:%M:%SZ')
item_end = item_start
else:
print("Error. All timestamps are None.")
item_coords = item["geometry"]["coordinates"]
# Add item to dataset-time collection
if item_end >= dataset_start and item_start <= dataset_end:
for i in item["links"]:
if "self" in str(i):
item_link = i["href"]
dataset_time_collection["links"].append({'rel': 'item', 'href': item_link, 'time': {'time_start': item_start.isoformat(timespec='seconds') + 'Z', 'time_end': item_end.isoformat(timespec='seconds') + 'Z'}})
min_time, max_time, bands, coords = h.UpdateDatasetTime(item_start, item_end, min_time, max_time, assets, bands, item_coords, coords)
else:
continue # item does not fit this dataset-time collection
if len(dataset_time_collection["links"]) > 2: # check if any items have been added to collection
# Update dataset-time collection's min and max time
dataset_time_collection["summaries"]["datetime"]["minimum"] = min_time.isoformat(timespec='seconds') + 'Z'
dataset_time_collection["summaries"]["datetime"]["maximum"] = max_time.isoformat(timespec='seconds') + 'Z'
dataset_time_collection["extent"]["temporal"]["interval"] = [[min_time.isoformat(timespec='seconds') + 'Z', max_time.isoformat(timespec='seconds') + 'Z']]
# Add info of available bands
for band in bands:
dataset_time_collection["summaries"]["bands"].append({"name": band})
bbox_collection = h.GetBoundingBox(coords) # Get bbox of items in collection
dataset_time_collection["extent"]["spatial"]["bbox"] = bbox_collection
id = dataset_time_collection["id"]
path = conf["destination"]["localCatalogPath"] + id + ".json"
# Write to location
with open(path, 'w') as outfile:
json.dump(dataset_time_collection, outfile)
# Update dataset collection
for i in dataset_time_collection["links"]:
if "self" in str(i):
child_link = i["href"]
dataset_collection_object["links"].append({'rel': 'child', 'href': child_link, 'time':{'time_start': dataset_start.isoformat(timespec='seconds') + 'Z', 'time_end': dataset_end.isoformat(timespec='seconds') + 'Z'}})
return dataset_collection_object
def main(conf):
dataset_collection_object = dataset_collection_builder(conf)
dataset_time_collection_list = dataset_time_collection_step1(conf, dataset_collection_object)
dataset_collection_object = dataset_time_collection_step2(conf, dataset_time_collection_list, dataset_collection_object)
# Write dataset -collection object
path = conf["destination"]["localCatalogPath"] + dataset_collection_object["id"] + ".json"
with open(path, 'w') as outfile:
json.dump(dataset_collection_object, outfile)
| true
|
1c98bfe8a8634a3f40d67dda486ed1628373065b
|
Python
|
andrejmoltok/13d_rendszeruz_2020
|
/szotarak.py
|
UTF-8
| 1,540
| 3.625
| 4
|
[] |
no_license
|
import random as rnd
#Szótár adatszerkezet
# A szótár adatszerkezet kulcs-érték párokat tárol. A kulcs csak egyszer szerepelhet a szótárban.
magassagok={}
# Értékek megadása
magassagok['Zoltán']=175
magassagok['Imre']=188
magassagok['Ágnes']=171
magassagok['Jolán']=166
#Hozzáférés egy értékhez
print(magassagok['Zoltán'])
print(magassagok['Ágnes'])
#Kulcsok kiíratása
print(magassagok.keys())
print(magassagok.values())
print(magassagok.items())
# Kiíratás for ciklussal
for i in magassagok:
print(i+","+str(magassagok[i]))
for i in magassagok.keys():
print(i)
#kulcshoz tartozó érték módósítása
magassagok['Zoltán']=195
#kiíratás az elemek(items) alapján
for i,j in magassagok.items():
print("Kulcs:{},érték:{}".format(i,j))
# elemek törlése
magassagok.pop('Zoltán')
print(magassagok.items())
#utolsó elem törlése
magassagok.popitem()
print(magassagok.items())
magassagok.clear()
print(magassagok.items())
nevek=["Tamás","Jolán","Elek","Imre","Róbert","Mihály","Anita","Zoltán"]
szamok=[12,120,44,55,66,77,88,99,66]
legm=list(filter(lambda x:x==max(x.mag),szamok))
print("legm:"+str(legm))
soknev=[]
elemszam=1000
for i in range(0,elemszam):
soknev.append(nevek[rnd.randint(0,len(nevek)-1)])
print(soknev)
# Jelenítsük meg, hogy az egyes nevek hányszor szerepelnek a szótárban
nevgyujt={}
for i in soknev:
if i in nevgyujt:
nevgyujt[i]+=1
else:
nevgyujt[i]=1
for i,j in nevgyujt.items():
print("{}:{}".format(i,j))
| true
|
88e9c9243dc3a940bcd183e7e07b9986e9d999af
|
Python
|
sinnuswong/learnpy
|
/su.py
|
UTF-8
| 268
| 3.515625
| 4
|
[] |
no_license
|
from math import sqrt
def f(a):
n=int(a**0.5)
for i in range(2,n+1):
if a%i==0:return False
else:return True
a=int(input("please input a:"))
for i in range(2,a):
if f(i):
print(i,end=' ')
| true
|
61c25c44871a6bf8599ec788d2480f8eb54ab293
|
Python
|
net-lisias-ksph/KerbinSideGAP
|
/geometry.py
|
UTF-8
| 4,978
| 3.3125
| 3
|
[] |
no_license
|
from math import sqrt, sin, cos, tan, asin, acos, pi
KERBIN_RADIUS = 600.0
MAX_ROUTE_STEP = 25.0
class Vector(object):
DIMENSION_ERROR = 'Can not combine Vectors with different dimensions'
@classmethod
def cross(cls, fst, sec):
assert len(fst) == len(sec), Vector.DIMENSION_ERROR
if len(fst) == 2:
return (fst[0] * sec[1] - sec[0] * fst[1])
if len(fst) == 3:
return cls(
fst[1] * sec[2] - sec[1] * fst[2],
fst[2] * sec[0] - sec[2] * fst[0],
fst[0] * sec[1] - sec[0] * fst[1],
)
raise NotImplementedError
@classmethod
def normalize(cls, obj):
normalizator = 1.0 / abs(obj)
return cls(coord * normalizator for coord in obj)
def __init__(self, *args):
if len(args) == 1:
args = list(args[0])
self.data = tuple(args)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __getitem__(self, key):
return self.data[key]
def __eq__(self, other):
return self.data == other.data
def __add__(self, other):
if isinstance(other, self.__class__):
assert len(self) == len(other), Vector.DIMENSION_ERROR
return self.__class__(pair[0] + pair[1] for pair in zip(self, other))
raise NotImplementedError
def __mul__(self, other):
if isinstance(other, self.__class__):
assert len(self) == len(other), Vector.DIMENSION_ERROR
return sum(pair[0] * pair[1] for pair in zip(self, other))
return self.__class__(coord * other for coord in self)
def __neg__(self):
return self * -1
def __sub__(self, other):
return self + (-other)
def __rmul__(self, other):
return self * other
def __abs__(self):
return sqrt(self * self)
def __repr__(self):
return self.__class__.__name__ + repr(self.data)
def svg_form(self):
return ','.join(str(round(coord, 2)) for coord in self.data)
def deg_to_rad(deg):
return pi * deg / 180
def rad_to_deg(rad):
return 180 * rad / pi
def bound(func, arg):
"""Applies arc function (acos or asin) with correct bounds."""
return func(max(-1, min(1, arg)))
def point_on_sphere(pt):
"""Returns point on a sphere given by angle coordinates."""
theta, phi = map(deg_to_rad, pt[:2])
return Vector(cos(phi) * cos(theta), sin(phi) * cos(theta), sin(theta))
def angles_from_sphere(pt):
"""Returns angle coordinates of the point on a sphere."""
theta = bound(asin, pt[2])
phi = bound(acos, pt[0] / cos(theta))
if pt[1] * cos(theta) < 0:
phi = -phi
return map(rad_to_deg, (theta, phi))
def chord_to_tangent(pt1, pt2):
"""
Returns the tangent vector for sphere in the direction of the given chord.
"""
chord = pt2 - pt1
coef = chord * pt1
tang_dir = chord - coef * pt1
return Vector.normalize(tang_dir)
def step_to(pt1, pt2, dist):
"""
Returns point lying on the line along the surface from the first point to
the second, with specified distance from the first point (in kilometres).
"""
pt1 = point_on_sphere(pt1)
pt2 = point_on_sphere(pt2)
if abs(dist) + MAX_ROUTE_STEP > KERBIN_RADIUS * pi / 2:
raise ValueError(
'Too big distance {}, can not provide acceptable accuracy'.format(dist)
+ ' (consider dividing step into several parts)'
)
tang_distance = tan(dist / KERBIN_RADIUS)
pt = Vector.normalize(pt1 + chord_to_tangent(pt1, pt2) * tang_distance)
return angles_from_sphere(pt)
def distance(pt1, pt2):
"""Calculates distance between points along the surface."""
pt1 = map(deg_to_rad, pt1[:2])
pt2 = map(deg_to_rad, pt2[:2])
ang_cos = sin(pt1[0]) * sin(pt2[0]) + cos(pt1[0]) * cos(pt2[0]) * cos(pt1[1] - pt2[1])
return KERBIN_RADIUS * bound(acos, ang_cos)
def make_route_points(pt1, pt2, include_first=True, include_last=True):
"""
Yields evenly distributed points lying not too far from each other on the
line along the surface from the first point to the second.
"""
dist = distance(pt1, pt2)
steps = int(dist / MAX_ROUTE_STEP + 0.95)
step = dist / steps
if include_first:
yield pt1
for _ in xrange(steps - 1):
pt1 = step_to(pt1, pt2, step)
yield pt1
if include_last:
yield pt2
def heading(pt1, pt2):
"""
Returns heading at the first point of the direction from the first point to
the second.
"""
pt1 = point_on_sphere(pt1)
pt2 = point_on_sphere(pt2)
dir_tang = chord_to_tangent(pt1, pt2)
north_pole = point_on_sphere((90, 0))
pole_tang = chord_to_tangent(pt1, north_pole)
heading = rad_to_deg(bound(acos, dir_tang * pole_tang))
if pt1 * Vector.cross(dir_tang, pole_tang) < 0:
return 360 - heading
return heading
| true
|
fbed05440f6898ee3ce6d121bda76df0a3644d45
|
Python
|
RsTaK/password-manager
|
/python/interaction.py
|
UTF-8
| 1,424
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
import backend_logic
print('='*30)
print('Welcome to this Password Manager')
if input('Type 1 to continue'):
pass_manager_obj = backend_logic.pass_manager('pass_manager.db')
print('='*30)
print('These are the options available')
print('st -> Store Password')
print('ge -> Get Password')
print('vw -> View Records')
print('dl -> Delete Record')
response = input('Choose your options: ')
if response == 'st':
service, username = input('Enter Service and Username here: ').split()
key = pass_manager_obj.store_password(service, username)
print('Your Encrypted password for service {} with username {} : {}'.format(service, username, key))
if response == 'ge':
service, username = input('Enter Service and Username here: ').split()
password = pass_manager_obj.get_password(service, username)
if password:
print('Your Encrypted password for service {} with username {} : {}'.format(service, username, password))
else:
print('Record for service {} and username{} not found'.format(service, username))
if response == 'vw':
for keys in pass_manager_obj.get_table():
print(keys)
if response == 'dl':
service, username = input('Enter Service and Username here: ').split()
res = pass_manager_obj.delete_row(service, username)
print(res)
else:
print('Try Again Later On')
'''
Todo :
1.Collision Control
2.Multiple User Access
'''
| true
|
37ab6a660b6870ac76437d36aab77ea98e7982d7
|
Python
|
zanghu/gitbook_notebook
|
/assets/code/python_code/python_md5/cal_md5.py
|
UTF-8
| 853
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
import hashlib
def md5_by_line(pth):
""""""
m = hashlib.md5()
with open(file_path,'rb') as f: #以二进制读的方式打开文件
for line in f: #每次传入一"行"
m.update(line) #md5值更新
md5_value = m.hexdigest() #进制转化
print(md5_value)
def md5_by_chunk(file):
md5_value=hashlib.md5()
with open(file,'rb') as f:
while True:
data_flow=f.read(8096) #每次读入8089kb进入内存
if not data_flow: #读取完后返回空值,False
break
md5_value.update(data_flow)
print(md5_value.hexdigest())
if __name__ == '__main__':
#md5_by_chunk('/home/zanghu/123.jpg')
md5_by_chunk("/home/zanghu/pylearn2_data/stl10_matlab/unlabeled.mat")
| true
|
16f0d3815bfeba90a3c9b16750c6669d07cc3b63
|
Python
|
nanthony21/PWSCalibrationSuite
|
/src/pws_calibration_suite/application/_ui/scorevisualizer.py
|
UTF-8
| 4,122
| 2.53125
| 3
|
[] |
no_license
|
from PyQt5 import QtCore
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QWidget, QGridLayout, QFormLayout, QDoubleSpinBox, QSlider
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
class RadarPlot(QWidget):
"""
Creates a Radar plot from the values in a pandas series.
Args:
data: A `Series` containing values for a number of different parameters.
"""
def __init__(self, parent: QWidget, data: pd.Series):
super().__init__(parent=parent)
interactive = False
if plt.isinteractive():
interactive = True
plt.ioff()
fig, ax = plt.subplots(subplot_kw={'polar': True})
self._ax = ax
if interactive:
plt.ion() # Set back to interactive if it originally was.
fig.suptitle("Calibration Results")
self._canv = FigureCanvasQTAgg(figure=fig)
self._canv.setFocusPolicy(QtCore.Qt.ClickFocus)
self._canv.setFocus()
self._bar = NavigationToolbar2QT(self._canv, self)
self._optionsPane = OptionDialog(self._ax, self._canv, self)
# self._optionsButton = QPushButton("Options", self)
# self._optionsButton.released.connect(self._optionsDlg)
l = QGridLayout()
l.addWidget(self._canv, 0, 0)
l.addWidget(self._bar, 1, 0)
l.addWidget(self._optionsPane, 0, 1)
# l.addWidget(self._optionsButton, 1, 1)
self.setLayout(l)
self.setData(ax, data)
@staticmethod
def setData(ax: plt.Axes, data: pd.Series):
assert isinstance(data, pd.Series), f"`data` must be a Pandas Series, not: {type(data)}"
# number of variable
categories = list(data.index)
N = len(categories)
# What will be the angle of each axis in the plot? (we divide the plot / number of variable)
angles = [n / float(N) * 2 * np.pi for n in range(N)]
angles += angles[:1]
# If you want the first axis to be on top:
ax.set_theta_offset(np.pi / 2)
ax.set_theta_direction(-1)
# Draw one axe per variable + add labels labels yet
plt.xticks(angles[:-1], categories, color='grey', size=8)
# Draw ylabels
ax.set_rlabel_position(0)
rMax = max(data) # if rMax is None else rMax
ax.set_ylim(0, rMax)
# Ind1
values = data.values.flatten().tolist()
values += values[:1]
ax.plot(angles, values, color='blue', linewidth=2, linestyle='solid')
ax.fill(angles, values, color='blue', alpha=0.4)
def _optionsDlg(self):
dlg = OptionDialog(self._ax, self._canv, self)
dlg.exec()
class OptionDialog(QWidget):
def __init__(self, ax: plt.Axes, canv: plt.FigureCanvasBase, parent: QWidget):
super().__init__(parent)#, flags=QtCore.Qt.FramelessWindowHint)
self._ax = ax
self._canv = canv
self._timer = QTimer(self)
self._timer.setInterval(10)
self._timer.setSingleShot(False)
self._timer.timeout.connect(self._evalTimer)
self._timer.start()
self.slider = QSlider(QtCore.Qt.Horizontal, self)
self.slider.sliderReleased.connect(lambda: self.slider.setValue(0))
self.slider.setMinimum(-10)
self.slider.setMaximum(10)
self.sliderVal = self._ax.get_ylim()[1]
self.radiusBox = QDoubleSpinBox(self)
self.radiusBox.setValue(self.sliderVal)
self.radiusBox.valueChanged.connect(self._valChanged)
l = QFormLayout()
l.addRow(self.slider)
l.addRow("Radius: ", self.radiusBox)
self.setLayout(l)
def _valChanged(self, value: float):
self._ax.set_ylim(0, value)
self._canv.draw_idle()
def _evalTimer(self):
value = self.slider.value()
# Convert value
neg = -1 if value < 0 else 1
value = neg * (2**(abs(value)/10) - 1)
val = self._ax.get_ylim()[1] + value
self.radiusBox.setValue(val)
| true
|
1bf8cc44e7f21e7a35602d49821e479de982099c
|
Python
|
jumbokh/MCSH-Class
|
/Python-src/connect.py
|
UTF-8
| 394
| 2.546875
| 3
|
[] |
no_license
|
import network
import time
import ubinascii
SSID='CHT-10-5'
KEY='0953313123'
sta = network.WLAN(network.STA_IF)
print(sta.active(True))
print(sta.active())
sta.connect(SSID,KEY)
mac = ubinascii.hexlify(sta.config('mac'),':').decode()
print(mac)
print(sta.ifconfig())
print(sta.isconnected())
for i in range(20):
time.sleep(0.5)
if sta.isconnected():
break
print(sta.ifconfig())
| true
|
48279d1deed992e09e67174f7130470009d821db
|
Python
|
shredderzwj/NLP-work-commit
|
/lesson01/waduanzi/waduanzi/spiders/duanzi.py
|
UTF-8
| 2,285
| 2.671875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import scrapy
from waduanzi.items import WaduanziItem
from urllib.parse import urlparse
class DuanziSpider(scrapy.Spider):
name = 'duanzi'
allowed_domains = ['www.waduanzi.com']
start_urls = [
'http://www.waduanzi.com/joke/page/1',
'http://www.waduanzi.com/joke/page/300',
'http://www.waduanzi.com/joke/page/600',
'http://www.waduanzi.com/joke/page/900',
'http://www.waduanzi.com/joke/page/1200',
'http://www.waduanzi.com/joke/page/1500',
'http://www.waduanzi.com/joke/page/1800',
'http://www.waduanzi.com/joke/page/2100',
]
def parse(self, response):
item = WaduanziItem()
divs = response.xpath('//div[@class="panel panel20 post-item post-box"]')
for div in divs:
try:
title = div.xpath('.//a[@class="cd-title-link"]/@title').extract_first()
url = div.xpath('.//a[@class="cd-title-link"]/@href').extract_first()
id = url.split('/')[-1]
content = div.xpath('.//div[@class="item-content"]/text()').extract_first()
content = content.replace('\n', '').replace('\t', '').replace(' ', '')
likes = int(div.xpath('.//a[@data-score="1"]/text()').extract_first().strip())
unlikes = int(div.xpath('.//a[@data-score="-1"]/text()').extract_first().strip())
item['id'] = id
item['url'] = url
item['title'] = title
item['content'] = content
item['likes'] = likes
item['unlikes'] = unlikes
yield item
except:
pass
# 翻页
root_url = "%s://%s" % (urlparse(response.url)[0], urlparse(response.url)[1])
next_url_path = response.xpath('//ul[@id="yw1"]//li[@class="next"]//a/@href').extract_first()
if next_url_path is not None:
next_num = next_url_path.split('/')[-1]
if next_num not in ['300', '600', '900', '1200', '1500', '1800', '2100']:
next_url = root_url + next_url_path
yield scrapy.Request(next_url, callback=self.parse)
print(next_num)
| true
|
3d56e16b2b949c62182caa88e1a47645b8c89714
|
Python
|
andreydymko/Yandex-song-title-to-file
|
/sources/nativeApp/Get_Song_Title_To_File.py
|
UTF-8
| 3,772
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import sys
import json
import struct
import os
import sys
import logging
import unicodedata
logging.basicConfig(filename=os.path.dirname(os.path.realpath(sys.argv[0])) + "\latest_error.txt",
level=logging.DEBUG,
format='%(asctime)s, %(levelname)-8s [%(filename)s:%(module)s:%(funcName)s:%(lineno)d] %(message)s')
logger = logging.getLogger(__name__)
try:
# Python 3.x version
# Read a message from stdin and decode it.
def getMessage():
try:
rawLength = sys.stdin.buffer.read(4)
if len(rawLength) == 0:
return json.loads('null')
#sys.exit(0)
messageLength = struct.unpack('@I', rawLength)[0]
message = sys.stdin.buffer.read(messageLength).decode('utf-8')
except Exception as e:
logger.error(e)
message = 'error'
return json.loads(message)
# Encode a message for transmission,
# given its content.
def encodeMessage(messageContent):
try:
encodedContent = json.dumps(messageContent).encode('utf-8')
encodedLength = struct.pack('@I', len(encodedContent))
except Exception as e:
logger.error(e)
encodedContent = json.dumps("Error encoding symbol.").encode('utf-8')
encodedLength = struct.pack('@I', len(encodedContent))
return {'length': encodedLength, 'content': encodedContent}
# Send an encoded message to stdout
def sendMessage(encodedMessage):
try:
sys.stdout.buffer.write(encodedMessage['length'])
sys.stdout.buffer.write(encodedMessage['content'])
sys.stdout.buffer.flush()
except Exception as e:
logger.error(e)
# Write message to file
def writeMessage(message):
try:
path = os.path.dirname(os.path.realpath(sys.argv[0])) + "\currentSong.txt"
file = open(path, "w+")
file.write(message)
sendMessage(encodeMessage("written 3 " + message))
file.close()
except Exception as e:
logger.error(e)
while True:
receivedMessage = getMessage()
writeMessage(unicodedata.normalize('NFKD', receivedMessage))
except AttributeError:
# Python 2.x version (if sys.stdin.buffer is not defined)
# Read a message from stdin and decode it.
def getMessage():
rawLength = sys.stdin.read(4)
if len(rawLength) == 0:
sys.exit(0)
messageLength = struct.unpack('@I', rawLength)[0]
message = sys.stdin.read(messageLength)
return json.loads(message)
# Encode a message for transmission,
# given its content.
def encodeMessage(messageContent):
try:
encodedContent = json.dumps(messageContent).encode('utf-8')
encodedLength = struct.pack('@I', len(encodedContent))
except UnicodeError as e:
encodedContent = json.dumps("Error encoding symbol.").encode('utf-8')
encodedLength = struct.pack('@I', len(encodedContent))
return {'length': encodedLength, 'content': encodedContent}
# Send an encoded message to stdout
def sendMessage(encodedMessage):
sys.stdout.write(encodedMessage['length'])
sys.stdout.write(encodedMessage['content'])
sys.stdout.flush()
# Write message to file
def writeMessage(message):
path = os.path.dirname(os.path.realpath(sys.argv[0])) + "\currentSong.txt"
file = open(path, "w+")
file.write(message)
sendMessage(encodeMessage("writed 2 " + message))
file.close()
while True:
receivedMessage = getMessage()
writeMessage(receivedMessage)
| true
|
599f8e86baf4334711acf7498ed693bc0da8932d
|
Python
|
kalkidan999/AirBnB_clone
|
/tests/test_models/test_user.py
|
UTF-8
| 837
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/python3
"""Test User"""
import unittest
from models.base_model import BaseModel
from models.city import City
from models.place import Place
from models.amenity import Amenity
from models.state import State
from models.review import Review
from models.user import User
class Testuser(unittest.TestCase):
"""unit test"""
def test_User(self):
"""
Test Class Use
"""
my_user = User()
self.assertTrue(hasattr(my_user, "first_name"))
self.assertEqual(my_user.first_name, "")
self.assertTrue(hasattr(my_user, "last_name"))
self.assertEqual(my_user.last_name, "")
self.assertTrue(hasattr(my_user, "email"))
self.assertEqual(my_user.email, "")
self.assertTrue(hasattr(my_user, "password"))
self.assertEqual(my_user.password, "")
| true
|
52a13fc816e63739dc3be544d7f04f321ba72191
|
Python
|
LvXueshuai/Python1
|
/1.py
|
UTF-8
| 103
| 3
| 3
|
[] |
no_license
|
import hashlib
obj = hashlib.md5()
obj.update("hello".encode("utf8"))
print(obj.hexdigest())
| true
|
47ef1b2789a22ae49370aeaadae20dd24f8ae11e
|
Python
|
iisdd/Courses
|
/python_fishc/35.0.py
|
UTF-8
| 834
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
import easygui as g
import sys
while 1:
g.msgbox("嗨,欢迎进入第一个界面小游戏^_^")
msg ="请问你希望在鱼C工作室学习到什么知识呢?"
title = "小游戏互动"
choices = ["谈恋爱", "编程", "OOXX", "琴棋书画"]
choice = g.choicebox(msg, title, choices)
# 注意,msgbox的参数是一个字符串
# 如果用户选择Cancel,该函数返回None
g.msgbox("你的选择是: " + str(choice), "结果")
msg = "你希望重新开始小游戏吗?"
title = "请选择"
# 弹出一个Continue/Cancel对话框
if g.ccbox(msg, title):
pass # 如果用户选择Continue
else:
sys.exit(0) # 如果用户选择Cancel
| true
|
4298c8440889801d92b55a59bdf9b3fc9fe5a34b
|
Python
|
rajatmann100/rosalind-bioinformatics-stronghold
|
/p9-long.py
|
UTF-8
| 360
| 2.953125
| 3
|
[] |
no_license
|
########## BASE FASTA CODE - START ##########
from lib.FastaReader import FASTA
file = open("./data/data.txt", "r")
input_str = file.read()
gene_arr = input_str.split(">")
gene_arr = gene_arr[1:]
########## BASE FASTA CODE - END ##########
def processDNA(c, dna):
print(c, dna)
for g in gene_arr:
fs = FASTA(g)
processDNA(fs.code(), fs.dna())
| true
|
4f519d52864a0277db94c8743ddcd63e17cc0d27
|
Python
|
arnoldliaoILMN/LaunchSpace
|
/bin/Tracker.py
|
UTF-8
| 4,070
| 2.609375
| 3
|
[] |
no_license
|
"""
Tracks the status of apps. Designed to be run on a cron, but can be run manually for debugging purposes.
"""
import os
import sys
import logging
from collections import defaultdict
# Add relative path libraries
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.abspath(os.path.sep.join([SCRIPT_DIR, "..", "lib"])))
import Repository
import AppServices
import SampleServices
import ConfigurationServices
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='update status of sample/apps')
parser.add_argument('-i', '--id', type=str, dest="id", help='update just a specific SampleApp id')
parser.add_argument('-s', '--safe', dest="safe", default=False, action="store_true", help='safe mode - say what you would do without doing it')
parser.add_argument('-l', '--logtostdout', dest="logtostdout", default=False, action="store_true", help="log to stdout instead of default log file")
parser.add_argument("-L", "--loglevel", dest="loglevel", default="INFO", help="loglevel, default INFO. Choose from WARNING, INFO, DEBUG")
args = parser.parse_args()
if args.safe or args.logtostdout:
logging.basicConfig(level=args.loglevel, format=ConfigurationServices.GetConfig("LogFormat"))
else:
logfile = ConfigurationServices.GetConfig("TRACKER_LOG_FILE")
if not os.access(os.path.dirname(logfile), os.W_OK):
print "log directory: %s does not exist or is not writeable" % (logfile)
sys.exit(1)
logging.basicConfig(filename=logfile, level=args.loglevel, format=ConfigurationServices.GetConfig("LogFormat"))
#logging.basicConfig(level=args.loglevel)
pl = logging.getLogger("peewee")
pl.setLevel(logging.INFO)
logging.debug("Starting tracker")
if args.id:
sampleApps = [ Repository.GetSampleAppByID(opts.id) ]
else:
# get all the SampleApps with statuses that the Tracker will be able to update
# these represent "live" statuses on BaseSpace
constraints = { "status" : [ "submitted", "pending", "running" ] }
sampleApps = Repository.GetSampleAppByConstraints(constraints)
logging.debug("Working on %i samples" % len(sampleApps))
# there's quite a lot code shared here with QCChecker.py, to iterate over SampleApps and update them
# record what transitions we make (state -> state for each SampleApp) so we can report at the end
# all SampleApps will end up in either "qc-failed" or "qc-passed" states
transitions = defaultdict(list)
for sampleApp in sampleApps:
# unpack the SampleApp a little
sampleName = Repository.SampleAppToSampleName(sampleApp)
appName = Repository.SampleAppToAppName(sampleApp)
sampleAppId = Repository.SampleAppToBaseSpaceId(sampleApp)
logging.debug("working on: %s %s" % (sampleName, appName))
if not sampleAppId:
logging.warn("No BaseSpace Id for SampleApp: %s" % Repository.SampleAppSummary(sampleApp))
continue
# get the new status
newstatus = AppServices.GetAppStatus(sampleAppId)
if args.safe:
logging.info("would update %s to: %s" % (Repository.SampleAppSummary(sampleApp), newstatus))
else:
# record the transition and update in the db
transition = (Repository.SampleAppToStatus(sampleApp), newstatus)
Repository.SetSampleAppStatus(sampleApp, newstatus)
transitions[transition].append(sampleAppId)
# log how many of each transition we've made. If the number is low enough, report which apps have had each transition type
for transition in sorted(transitions):
if len(transitions[transition]) > 40:
logging.info("%s : %i" % (transition, len(transitions[transition])))
else:
logging.info(
"%s : %i (%s)" % (
transition, len(transitions[transition]), ", ".join([str(x) for x in transitions[transition]])))
logging.debug("Finished tracker")
| true
|
ce1a80431fdddd39527aeb86b43117bd894135aa
|
Python
|
fsoubelet/PyhDToolkit
|
/pyhdtoolkit/cpymadtools/coupling.py
|
UTF-8
| 14,126
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
"""
.. _cpymadtools-coupling:
Betatron Coupling Utilities
---------------------------
Module with functions to perform ``MAD-X`` actions through a `~cpymad.madx.Madx` object, that
retate to betatron coupling in the machine.
"""
from typing import Dict, Sequence, Tuple
import numpy as np
import tfs
from cpymad.madx import Madx
from loguru import logger
from optics_functions.coupling import check_resonance_relation, closest_tune_approach, coupling_via_cmatrix
from scipy import stats
from pyhdtoolkit.cpymadtools.constants import MONITOR_TWISS_COLUMNS
from pyhdtoolkit.cpymadtools.lhc import get_lhc_tune_and_chroma_knobs
from pyhdtoolkit.cpymadtools.matching import match_tunes_and_chromaticities
from pyhdtoolkit.cpymadtools.twiss import get_pattern_twiss, get_twiss_tfs
# ----- General Use ----- #
def get_closest_tune_approach(
madx: Madx,
/,
accelerator: str = None,
sequence: str = None,
varied_knobs: Sequence[str] = None,
telescopic_squeeze: bool = True,
run3: bool = False,
explicit_targets: Tuple[float, float] = None,
step: float = 1e-7,
calls: int = 100,
tolerance: float = 1e-21,
) -> float:
"""
.. versionadded:: 0.16.0
Provided with an active `~cpymad.madx.Madx` object, tries to match the tunes to their mid-fractional tunes,
a.k.a tries to get them together. The difference between the final reached fractional tunes is the closest
tune approach. This should not have any effect on the user's simulation, as the varied knobs are
restored to their previous values after performing the CTA. This uses `~.tune.match_tunes_and_chromaticities`
under the hood.
.. note::
This assumes the sequence has previously been matched to the user's desired working point, as if not
explicitely given, the appropriate targets will be determined from the ``MAD-X`` internal tables.
Args:
madx (cpymad.madx.Madx): an instanciated `~cpymad.madx.Madx` object. Positional only.
accelerator (Optional[str]): name of the accelerator, used to determmine knobs if *variables* is not given.
Automatic determination will only work for `LHC` and `HLLHC`.
sequence (str): name of the sequence you want to activate for the tune matching.
varied_knobs (Sequence[str]): the variables names to ``VARY`` in the ``MAD-X`` ``MATCH`` routine. An input
could be ``["kqf", "ksd", "kqf", "kqd"]`` as they are common names used for quadrupole and sextupole
strengths (focusing / defocusing) in most examples.
telescopic_squeeze (bool): ``LHC`` specific. If set to `True`, uses the ``(HL)LHC`` knobs for Telescopic
Squeeze configuration. Defaults to `True` since `v0.9.0`.
run3 (bool): if set to `True`, uses the `LHC` Run 3 `*_op` knobs. Defaults to `False`.
explicit_targets (Tuple[float, float]): if given, will be used as matching targets for `(Qx, Qy)`.
Otherwise, the target is determined as the middle of the current fractional tunes. Defaults to
`None`.
step (float): step size to use when varying knobs.
calls (int): max number of varying calls to perform.
tolerance (float): tolerance for successfull matching.
Returns:
The closest tune approach, in absolute value.
Example:
.. code-block:: python
# Say we have set the LHC coupling knobs to 1e-3
dqmin = get_closest_tune_approach(
madx,
"lhc", # will find the knobs automatically
sequence="lhcb1",
telescopic_squeeze=True, # influences the knobs definition
run3=True, # influences the knobs definition (LHC Run 3)
)
# returns 0.001
"""
if accelerator and not varied_knobs:
logger.trace(f"Getting knobs from default {accelerator.upper()} values")
lhc_knobs = get_lhc_tune_and_chroma_knobs(
accelerator=accelerator, beam=int(sequence[-1]), telescopic_squeeze=telescopic_squeeze, run3=run3
)
tune_knobs, _ = lhc_knobs[:2], lhc_knobs[2:] # first two for tune & last two for chroma, not used
logger.debug("Running TWISS to update SUMM and TWISS tables")
madx.command.twiss()
logger.debug("Saving knob values to restore after closest tune approach")
varied_knobs = varied_knobs or tune_knobs # if accelerator was given we've extracted this already
saved_knobs: Dict[str, float] = {knob: madx.globals[knob] for knob in varied_knobs}
logger.trace(f"Saved knobs are {saved_knobs}")
if explicit_targets:
q1, q2 = explicit_targets # the integer part is used later on
else:
logger.trace("Retrieving tunes and chromaticities from internal tables")
q1, q2 = madx.table.summ.q1[0], madx.table.summ.q2[0]
logger.trace(f"Retrieved values are q1 = {q1}, q2 = {q2}")
logger.trace("Determining target tunes for closest approach")
middle_of_fractional_tunes = (_fractional_tune(q1) + _fractional_tune(q2)) / 2
qx_target = int(q1) + middle_of_fractional_tunes
qy_target = int(q2) + middle_of_fractional_tunes
logger.debug(f"Targeting tunes Qx = {qx_target} | Qy = {qy_target}")
logger.debug("Performing closest tune approach routine, matching should fail at DeltaQ = dqmin")
match_tunes_and_chromaticities(
madx,
accelerator,
sequence,
q1_target=qx_target,
q2_target=qy_target,
varied_knobs=varied_knobs,
telescopic_squeeze=telescopic_squeeze,
run3=run3,
step=step,
calls=calls,
tolerance=tolerance,
)
logger.debug("Retrieving tune separation from internal tables")
dqmin = madx.table.summ.q1[0] - madx.table.summ.q2[0] - (int(q1) - int(q2))
cminus = abs(dqmin)
logger.debug(f"Matching got to a Closest Tune Approach of {cminus:.5f}")
logger.debug("Restoring saved knobs")
with madx.batch():
madx.globals.update(saved_knobs)
madx.command.twiss() # make sure TWISS and SUMM tables are returned to their original state
return cminus
def get_cminus_from_coupling_rdts(
madx: Madx,
/,
patterns: Sequence[str] = [""],
method: str = "teapot",
qx: float = None,
qy: float = None,
filtering: float = 0,
) -> float:
"""
.. versionadded:: 0.20.0
Computes and returns the :math:`|C^{-}|` from the machine's coupling RDTs. The
closest tune approach is computed thanks to functionality from `optics_functions.coupling`.
.. hint::
A quick estimate of the :math:`|C^{-}|` is available in ``MAD-X`` as the ``dqmin``
variable in the ``SUMM`` table. However this estimate is not accurate in all situations,
and is the norm of a complex vector which is not approriate for comparisons or for
normalizations, which is the use-case of this functions.
.. note::
If using the “calaga”, “teapot”, “teapot_franchi” or “franchi” method, then the returned
value will be a real number.
Args:
madx (cpymad.madx.Madx): an instanciated `~cpymad.madx.Madx` object. Positional only.
patterns (Sequence[str]): the different patterns (such as ``MQX`` or ``BPM``) of elements
to use when computing the coupling RDTs. Defaults to `[""]` which will select and use
all elements in the ``TWISS`` outputs.
method (str): the method to use for the calculation of the :math:`C^{-}`. Defaults to
`teapot`, which is the default of `~optics_functions.coupling.closest_tune_approach`.
qx (float): the horizontal tune. Defaults to `None`, in which case the value will be taken
from the ``SUMM`` table.
qy (float): the vertical tune. Defaults to `None`, in which case the value will be taken
from the ``SUMM`` table.
filtering (float): If non-zero value is given, applies outlier filtering of BPMs based on
the abs. value of the coupling RTDs before computing the :math:`C^{-}`. The given value
corresponds to the std. dev. :math:`\\sigma` outside of which to filter out a BPM.
Defaults to 0, which means no filtering.
Returns:
The calculated :math:`|C^{-}|` value.
Examples:
To compute the :math:`|C^{-}|` taking in consideration all elements in the sequence:
.. code-block:: python
complex_cminus = get_cminus_from_coupling_rdts(madx)
To simulate the calculation from a measurement, with RDTs computed at BPMs only:
.. code-block:: python
complex_cminus = get_cminus_from_coupling_rdts(madx, patterns=["^BPM.*B[12]$"])
"""
logger.debug("Getting coupling RDTs at selected elements thoughout the machine")
twiss_with_rdts = get_pattern_twiss(madx, patterns=patterns, columns=MONITOR_TWISS_COLUMNS)
twiss_with_rdts.columns = twiss_with_rdts.columns.str.upper() # optics_functions needs capitalized names
twiss_with_rdts[["F1001", "F1010"]] = coupling_via_cmatrix(twiss_with_rdts, output=["rdts"])
# Get tunes values from SUMM table if not provided
qx = qx or madx.table.summ.q1[0]
qy = qy or madx.table.summ.q2[0]
# Do this following line here as if done above, merging model erases headers
logger.debug("Filtering out BPMs that do not respect the resonance relation")
twiss_with_rdts.headers["LENGTH"] = 26659 # LHC length, will be needed later
twiss_with_rdts = check_resonance_relation(twiss_with_rdts, to_nan=True).dropna()
if filtering:
logger.debug(f"Filtering out BPMs with RDTs outside of {filtering:f} std. dev.")
twiss_with_rdts = _filter_outlier_bpms_from_coupling_rdts(twiss_with_rdts, filtering)
# Now we do the closest tune approach calculation -> adds DELTAQMIN column to df
logger.debug(f"Calculating CTA via optics_functions, with method '{method}'")
dqmin_df = closest_tune_approach(twiss_with_rdts, qx=qx, qy=qy, method=method)
# If we use a method that returns complex values, we have to average on the abs of these values!!
if method not in ["calaga", "teapot", "teapot_franchi", "franchi"]:
logger.debug(f"Taking module of values, as method '{method}' returns complex values")
dqmin_df = dqmin_df.abs()
# Now we can take the mean of the DELTAQMIN column
return dqmin_df.DELTAQMIN.mean()
def match_no_coupling_through_ripkens(
madx: Madx, /, sequence: str = None, location: str = None, vary_knobs: Sequence[str] = None
) -> None:
"""
.. versionadded:: 0.16.0
Matching routine to get cross-term Ripken parameters :math:`\\beta_{12}` and :math:`\\beta_{21}`
to be 0 at a given location.
Args:
madx (cpymad.madx.Madx): an instanciated `~cpymad.madx.Madx` object. Positional only.
sequence (str): name of the sequence to activate for the matching.
location (str): the name of the element at which one wants the cross-term Ripkens to be 0.
vary_knobs (Sequence[str]): the variables names to ``VARY`` in the ``MAD-X`` routine.
Example:
.. code-block:: python
match_no_coupling_through_ripkens(
madx, sequence="lhcb1", location="IP5", vary_knobs=["kqsx.3l5", "kqsx.3r5"]
)
"""
logger.debug(f"Matching Ripken parameters for no coupling at location {location}")
logger.debug("Creating macro to update Ripkens")
madx.input("do_ripken: macro = {twiss, ripken=True;}") # cpymad needs .input for macros
logger.debug("Matching Parameters")
madx.command.match(sequence=sequence, use_macro=True)
for knob in vary_knobs:
madx.command.vary(name=knob)
madx.command.use_macro(name="do_ripken")
madx.input(f"constraint, expr=table(twiss, {location}, beta12)=0") # need input else includes " and fails
madx.input(f"constraint, expr=table(twiss, {location}, beta21)=0") # need input else includes " and fails
madx.command.lmdif(calls=500, tolerance=1e-21)
madx.command.endmatch()
def get_coupling_rdts(madx: Madx, /, **kwargs) -> tfs.TfsDataFrame:
"""
.. versionadded:: 0.20.0
Computed the coupling Resonance Driving Terms (RDTs) :math:`f_{1001}` and :math:`f_{1010}`
at all elements in the currently active sequence from a ``TWISS`` call.
Args:
madx (cpymad.madx.Madx): an instanciated `~cpymad.madx.Madx` object. Positional only.
**kwargs: any keyword argument will be transmitted to the ``TWISS`` command in ``MAD-X``.
Returns:
A `~tfs.TfsDataFrame` with columns of the ``TWISS`` table, and two complex columns for the
``F1001`` and ``f1010`` RDTs.
Example:
.. code-block:: python
twiss_rdts = get_coupling_rdts(madx)
"""
twiss_tfs = get_twiss_tfs(madx, **kwargs)
twiss_tfs[["F1001", "F1010"]] = coupling_via_cmatrix(twiss_tfs, output=["rdts"])
return twiss_tfs
# ----- Helpers ----- #
def _fractional_tune(tune: float) -> float:
"""
Returns only the fractional part *tune*.
Args:
tune (float): tune value.
Returns:
The fractional part.
Example:
.. code-block:: python
_fractional_tune(62.31)
## returns 0.31
"""
return tune - int(tune) # ok since int truncates to lower integer
def _filter_outlier_bpms_from_coupling_rdts(twiss_df: tfs.TfsDataFrame, stdev: float = 3) -> tfs.TfsDataFrame:
"""Only keep BPMs for which the abs. value of coupling RDTs is no further than `stdev` sigma from its mean.Example:
.. note::
This expects the `twiss_df` to have ``F1001`` and ``F1010`` complex columns.
"""
logger.debug("Filtering out outlier BPMs based on coupling RDTs")
df = twiss_df.copy(deep=True)
df = df[np.abs(stats.zscore(df.F1001.abs())) < stdev]
df = df[np.abs(stats.zscore(df.F1010.abs())) < stdev]
removed = len(twiss_df) - len(df)
if removed > 0:
logger.debug(f"{removed} BPMs removed due to outlier coupling RDTs")
return df
| true
|
0d42135d9faa594df5a3ce416cac23db310555ac
|
Python
|
des-learning/struktur-data
|
/src/06/test_slice.py
|
UTF-8
| 1,591
| 3.65625
| 4
|
[] |
no_license
|
import unittest
from doublylinkedlist import DoublyLinkedList
def reduce(function, iterable, start):
result = start
for i in iterable:
result = function(result, i)
return result
def equalList(list1, list2):
pairs = zip(list1, list2)
sameItem = lambda x, y: x and (y[0] == y[1])
return (len(list1) == len(list2) and
reduce(sameItem, pairs, True))
def setupList():
l = DoublyLinkedList()
data = [0, 7, 5, 3, 8, 2, 9, 1, 9, 4]
for i in data:
l.add(i)
return l, data
class TestSlice(unittest.TestCase):
def test_slice(self):
l, data = setupList()
self.assertTrue(equalList(l.asList(), data))
self.assertTrue(equalList(l.slice(0, len(l)).asList(), data))
self.assertTrue(equalList(l.slice(0, 1).asList(), data[0:1]))
self.assertTrue(equalList(l.slice(0, 5).asList(), data[0:5]))
self.assertTrue(equalList(l.slice(5, len(l)).asList(), data[5:]))
def test_slice_negative(self):
l, data = setupList()
self.assertTrue(equalList(l.slice(0, -1).asList(), data[0:-1]))
self.assertTrue(equalList(l.slice(5, -3).asList(), data[5:-3]))
self.assertTrue(equalList(l.slice(-3, -1).asList(), data[-3:-1]))
self.assertTrue(equalList(l.slice(0, -5).asList(), data[0:-5]))
def test_invalid_slice(self):
l, data = setupList()
self.assertTrue(equalList(l.slice(100, 200).asList(), []))
self.assertTrue(equalList(l.slice(-1, -3).asList(), []))
self.assertTrue(equalList(l.slice(0, len(l)+1).asList(), []))
| true
|
cc2a5515dc53319c287948841ed71a96fa07b353
|
Python
|
Johnny-kiv/Python
|
/напоминальщик/time 3.py
|
UTF-8
| 1,467
| 3.34375
| 3
|
[] |
no_license
|
#Это напоминальщик
#версия 2
#Автор: johnny-kiv
#Подключаем модули tkinter, time и дополнительный модуль messagebox
from tkinter import*
from tkinter import messagebox
import time
root=Tk()
#Подключаем виджет Canvas
c=Canvas(root,bg="grey",width=800,height=600)
c.pack()
a2=IntVar()
b2=IntVar()
c2=IntVar()
def begin():
if a2==0:
a4=20*60
if b2==0:
b4=20*60
else:
a4=a2*60
b4=b2*60
r=0
c.create_text(500,50,text="Работаем",fill="green")
for r in range(c2):
time.sleep(b4)
messagebox.showinfo("Отдыхаем")
time.sleep(a4)
messagebox.showinfo("Работаем")
a2l=Label(text="Введите минуты работы: ")
a2l.grid(row=0,column=0,sticky="w")
a2e=Entry(textvariable=a2)
a2e.grid(row=0,column=1,sticky="e")
b2l=Label(text="Введите минуты отдыха: ")
a2l.grid(row=1,column=0,sticky="w")
b2e=Entry(textvariable=b2)
b2e.grid(row=1,column=1,sticky="e")
с2l=Label(text="Введите сколько раз вы будете работать или отдыхать: ")
с2l.grid(row=2,column=0,sticky="w")
с2e=Entry(textvariable=c2)
с2e.grid(row=2,column=1,sticky="e")
btn=Button(text="Начать",command=begin)
btn.grid(row=3,column=0,sticky="w")
a2l.pack()
a2e.pack()
b2l.pack()
b2e.pack()
c2l.pack()
c2e.pack()
btn.pack()
root.mainloop()
| true
|