blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e313161b474761d72693f7e992c7fb5456afd159 | Python | CFhM/tedukuri-train | /Problems/0x00「基本算法」例题/0101 a^b/gen.py | UTF-8 | 250 | 2.703125 | 3 | [] | no_license | import random
def main():
with open('gen.in', 'w') as f:
a = random.randint(1, 1e9)
b = random.randint(1, 1e9)
p = random.randint(1, 1e9)
f.write('%d %d %d\n' % (a, b, p))
if __name__ == '__main__':
main()
| true |
cefdd9a664673e6881b7f9efee0ae03722a89b9f | Python | ruthdevgpwr/introducao-programacao-uniesp | /atividade-A04/Q06.py | UTF-8 | 732 | 3.453125 | 3 | [] | no_license | numero1 = float(input('Digite o 1º número: '))
numero2 = float(input('Digite o 2º número: '))
numero3 = float(input('Digite o 3º número: '))
numero4 = float(input('Digite o 4º número: '))
numero5 = float(input('Digite o 5º número: '))
quantidade_maiores_10 = 0
if numero1 > 10:
quantidade_maiores_10 = quantidade_maiores_10 + 1
if numero2 > 10:
quantidade_maiores_10 = quantidade_maiores_10 + 1
if numero3 > 10:
quantidade_maiores_10 = quantidade_maiores_10 + 1
if numero4 > 10:
quantidade_maiores_10 = quantidade_maiores_10 + 1
if numero5 > 10:
quantidade_maiores_10 = quantidade_maiores_10 + 1
print("A quantidade de números maiores que 10 é: " + str(quantidade_maiores_10))
| true |
050748907b0a688c926cd37f29d21d8553d548f7 | Python | Yllcaka/Random.python | /GAME/automate.py | UTF-8 | 1,296 | 3.671875 | 4 | [] | no_license | import random_word
string = '''The French lettering company Letraset manufactured a set of dry-transfer
sheets which included the lorem ipsum filler text in a variety of fonts, sizes, and layouts.
These sheets of lettering could be rubbed on anywhere and were quickly adopted by graphic
artists, printers, architects, and advertisers for their professional look and ease of use.'''
lst = string.split()
# print(lst)
r_stuff = []
for i in range(len(lst)):
r_stuff.append(random.choice(lst))
a = " ".join(r_stuff)
print(len(a))
print(len(string))
def reverseWords(input):
# split words of string separated by space
inputWords = input.split(" ")
# reverse list of words
# suppose we have list of elements list = [1,2,3,4],
# list[0]=1, list[1]=2 and index -1 represents
# the last element list[-1]=4 ( equivalent to list[3]=4 )
# So, inputWords[-1::-1] here we have three arguments
# first is -1 that means start from last element
# second argument is empty that means move to end of list
# third arguments is difference of steps
inputWords = inputWords[-1::-1]
# now join words with space
output = ' '.join(inputWords)
return output
print(reverseWords(string)) | true |
34dd2d1e9b929ab709ae6245b467438e6ce225b6 | Python | mrgsfl/python_lessons | /max_min_rest.py | UTF-8 | 765 | 3.15625 | 3 | [] | no_license | maxnum = int(input())
minnum = int(input())
rest = int(input())
if maxnum < minnum:
(maxnum, minnum) = (minnum, maxnum)
if maxnum < rest:
(maxnum, rest) = (rest, maxnum)
print('max', maxnum, '\nmin', minnum, '\nrest', rest)
elif minnum > rest:
(minnum, rest) = (rest, minnum)
print('max', maxnum, '\nmin', minnum, '\nrest', rest)
else:
print('max', maxnum, '\nmin', minnum, '\nrest', rest)
elif maxnum < rest:
(maxnum, rest) = (rest, maxnum)
print('max', maxnum, '\nmin', minnum, '\nrest', rest)
elif minnum > rest:
(minnum, rest) = (rest, minnum)
print('max', maxnum, '\nmin', minnum, '\nrest', rest)
else:
print('max', maxnum, '\nmin', minnum, '\nrest', rest)
| true |
338d208931900ec5b0d0922ab68d11cec40b841f | Python | vieirinhasantana/boilerplate-aws-sam-python | /{{ cookiecutter.project_name }}/default/app.py | UTF-8 | 3,259 | 2.609375 | 3 | [
"MIT-0",
"MIT"
] | permissive | import json
import boto3
from aws_lambda_powertools import Logger, Metrics, Tracer
# https://awslabs.github.io/aws-lambda-powertools-python/#features
tracer = Tracer()
logger = Logger()
metrics = Metrics()
# Global variables are reused across execution contexts (if available)
session = boto3.Session()
@metrics.log_metrics(capture_cold_start_metric=True)
@logger.inject_lambda_context
@tracer.capture_lambda_handler
def lambda_handler(event, context):
"""
AWS Lambda handler
Parameters
----------
context: object, required
Lambda Context runtime methods and attributes
Attributes
----------
context.aws_request_id: str
Lambda request ID
context.client_context: object
Additional context when invoked through AWS Mobile SDK
context.function_name: str
Lambda function name
context.function_version: str
Function version identifier
context.get_remaining_time_in_millis: function
Time in milliseconds before function times out
context.identity:
Cognito identity provider context when invoked through AWS Mobile SDK
context.invoked_function_arn: str
Function ARN
context.log_group_name: str
Cloudwatch Log group name
context.log_stream_name: str
Cloudwatch Log stream name
context.memory_limit_in_mb: int
Function memory
https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
event: dict, required
API Gateway Lambda Proxy Input Format
{
"resource": "Resource path",
"path": "Path parameter",
"httpMethod": "Incoming request's method name"
"headers": {Incoming request headers}
"queryStringParameters": {query string parameters }
"pathParameters": {path parameters}
"stageVariables": {Applicable stage variables}
"requestContext": {Request context, including authorizer-returned key-value pairs}
"body": "A JSON string of the request payload."
"isBase64Encoded": "A boolean flag to indicate if the applicable request payload is Base64-encode"
}
https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
Returns
------
API Gateway Lambda Proxy Output Format: dict
'statusCode' and 'body' are required
{
"isBase64Encoded": true | false,
"statusCode": httpStatusCode,
"headers": {"headerName": "headerValue", ...},
"body": "..."
}
# api-gateway-simple-proxy-for-lambda-output-format
https: // docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
try:
message = {"hello": "world"}
return {
"statusCode": 200,
"body": json.dumps(message)
}
except Exception as e:
logger.exception(e)
raise
| true |
03e2962dfa5cb1dffdb99525719203c790a3c5f0 | Python | Ahmer-444/ZeroMQ_Python | /ZMQ_PUB_SUB_Model/PrintHeader.py | UTF-8 | 970 | 2.71875 | 3 | [] | no_license | __author__ = "Ahmer Malik"
__copyright__ = "Copyright (C) 2016 Linux IoT"
__revision__ = "$Id$"
__version__ = "0.1"
class _header:
def __init__(self,__file__,__author__,__copyright__,__version__):
self._marker = '-------------------------------------------'
self._n = '\n'
self.__file__=__file__
self.__author__=__author__
self.__copyright__=__copyright__
self.__version__ =__version__
def _print(self):
print self._n + self._marker
print "Process name:" + self.__file__ + self._n
print "Author: " + self.__author__ + self._n
print "Copyright: " + self.__copyright__ + self._n
print "Version: " + self.__version__ + self._n
print self._marker + self._n
if __name__ == '__main__':
x = _header(__file__,__author__,__copyright__,__version__)
x._print()
| true |
e3862cd3b4b30bb87788da6eb9f6c29de978e1a6 | Python | zhangct79/py_learn | /01basic/string.py | UTF-8 | 142 | 2.828125 | 3 | [] | no_license | print("hello {0}, 你的成绩提升了{1:.1f}" . format("小明", 17.125))
print("hello %s, 你的成绩提升了%.1f" % ('小明', 17.125)) | true |
f16930fa9cc66bad2f960247a39bb54b1d079905 | Python | rogthedodge/redux | /source/create_DB.py | UTF-8 | 3,151 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
import time
import psycopg2
from os.path import dirname
from get_config import config
from import_campaigns import import_campaigns
from import_members import import_members
from import_users import import_users
def create_tables():
""" create tables in the PostgreSQL database"""
commands = [
"""DROP TABLE IF EXISTS groups CASCADE;""",
"""DROP TABLE IF EXISTS members CASCADE;""",
"""DROP TABLE IF EXISTS campaigns CASCADE;""",
"""DROP TABLE IF EXISTS calls CASCADE;""",
"""DROP TABLE IF EXISTS users CASCADE;""",
"""DROP TYPE IF EXISTS outcome;""",
"""CREATE TABLE groups (
group_id SERIAL PRIMARY KEY,
group_name VARCHAR(255) NOT NULL UNIQUE
);""",
"""CREATE TABLE members (
member_id SERIAL PRIMARY KEY,
group_id INTEGER NOT NULL,
member_name VARCHAR(255) NOT NULL,
member_tel VARCHAR(30),
FOREIGN KEY (group_id)
REFERENCES groups (group_id)
ON UPDATE CASCADE ON DELETE CASCADE
);""",
"""CREATE TABLE campaigns (
campaign_id SERIAL PRIMARY KEY,
group_id INTEGER NOT NULL,
campaign_name VARCHAR(255) NOT NULL,
campaign_desc VARCHAR(255) NOT NULL,
campaign_start_date TIMESTAMP,
campaign_end_date TIMESTAMP,
campaign_global BOOLEAN,
FOREIGN KEY (group_id)
REFERENCES groups (group_id)
ON UPDATE CASCADE ON DELETE CASCADE
);""",
"""CREATE TYPE outcome AS ENUM ('SKIPPED', 'UNANSWERED', 'ANSWERED');""",
"""CREATE TABLE calls (
call_id SERIAL PRIMARY KEY,
member_id INTEGER NOT NULL,
campaign_id INTEGER NOT NULL,
call_outcome outcome NOT NULL,
call_notes VARCHAR(255),
call_date TIMESTAMP NOT NULL,
FOREIGN KEY (member_id)
REFERENCES members (member_id)
ON UPDATE CASCADE ON DELETE CASCADE,
FOREIGN KEY (campaign_id)
REFERENCES campaigns (campaign_id)
ON UPDATE CASCADE ON DELETE CASCADE
);""",
"""CREATE TABLE users (
user_id SERIAL PRIMARY KEY,
user_email VARCHAR(255),
user_CLP VARCHAR(255)
);"""
]
# need to sleep for a few seconds to allow postgres container to start up
time.sleep(10)
#read the connection parameters
params = config()
# connect to the PostgreSQL server
print(params)
conn = psycopg2.connect(**params)
conn.autocommit = True
cur = conn.cursor()
# create table one by one
for command in commands:
cur.execute(command)
# close communication with the PostgreSQL database server
cur.close()
conn.close()
if __name__ == '__main__':
create_tables()
import_members(dirname(dirname(__file__)) + '../test/test_members.csv')
import_campaigns(dirname(dirname(__file__)) + '../test/test_campaigns.csv')
import_users(dirname(dirname(__file__)) + '../test/test_users.csv')
| true |
393e9b2adc645191dad3c90bb741a3442fae5b14 | Python | 19mateusz92/pythonPrograms | /ex.py | UTF-8 | 557 | 3.171875 | 3 | [] | no_license | from datetime import datetime
def getDivSum(num):
sum = 0
for i in range(1,num):
if (num % i == 0):
sum += i
return sum
def isPerf(num):
if (num == getDivSum(num)):
print (num)
start = datetime.now().microsecond
lista = []
lista2 = list(range(1,1001))
for i in range(1,1001):
isPerf(i)
lista.append(getDivSum(i))
for j in lista2:
pos = 0
if j in lista:
pos = lista.index(j) + 1
if getDivSum(j) == pos and j != pos:
print(pos,j)
lista.remove(pos)
stop = datetime.now().microsecond
print (stop - start , "[micro kurwa sekund]")
| true |
dc47fb7ec8f1a3048cd1eafd834a7754b5021fdf | Python | zerosum99/python_basic | /myPython/time_date/jul_ord_test.py | UTF-8 | 640 | 3.34375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 03 16:59:06 2016
@author: 06411
"""
import datetime
def cal_julian(year,month,day) :
start = datetime.date(year,month,day)
print ' start day : ', start
end = datetime.date.today()
print ' end day : ', end
time_delta = end - start
print ' julian day : ', time_delta.days
return (start,time_delta.days)
def cal_ordinal(start) :
x, y = cal_julian(1,1,1)
ordinal = y +1
print " today :", datetime.date.fromordinal(ordinal)
if __name__ == "__main__" :
start,julian_day = cal_julian(2015,2,3)
cal_ordinal(start) | true |
34c5c05ed6220388b8d9bca8e5666a417384a5b5 | Python | gregorylull/human-sound-classification | /src/utilities/data.py | UTF-8 | 6,353 | 2.53125 | 3 | [] | no_license | import glob
import pandas as pd
import numpy as np
import re
import json
import pickle
from matplotlib import pylab as plt
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.metrics import classification_report, f1_score, make_scorer, accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
# At this point i do not know how to get the root folder programmatically,
# if this file/folder is moved around the rel path needs to be updated
ROOT_FOLDER = '../../'
CHIME_FOLDER = f'analysis/chime/'
def get_params(model='logreg', **kwargs):
# exit early and use model defaults when testing pipeline and introducing new models.
if 'fast' in kwargs and kwargs['fast']:
print(f'\n Fast modeling for {model}\n')
return {
'pca__n_components': [1]
}
# there could be thousands of features, so instead of narrowing down
# how many components to capture, this is indicating how much variation to capture.
# 0.1 means keep enough pca components to explain at least 10% of the variation.
pca_component_range = np.arange(0.3, 1.0, 0.1)
if model == 'logreg':
return {
# the higher the C the more important the features.
# the lower the C the more powerful the regularization.
'logreg__C': np.arange(1, 1000, 100),
'pca__n_components': pca_component_range
}
elif model == 'svm':
return {
'svm__kernel': ['poly', 'rbf'],
# C relates to the penalty term, moves the boundary, high will lead to overfit
'svm__C': np.arange(1, 3, 0.25),
# degree is for polynomial
'svm__degree': [3, 4],
'pca__n_components': pca_component_range
}
elif model == 'knn':
return {
'knn__n_neighbors': np.arange(5, 30, 5),
'pca__n_components': pca_component_range
}
elif model == 'randomforest':
return {
'pca__n_components': pca_component_range,
'randomforest__n_estimators': np.arange(10, 150, 15)
}
def get_grid_pipeline(model_type, params_=None):
def pipeline(X, y, **kwargs):
if params_:
params = params_
else:
params = get_params(model_type, **kwargs)
grid = train_fit_test_pipeline(
X,
y,
model_type,
params,
**kwargs
)
return grid
return pipeline
def get_model(model='logreg'):
if model == 'logreg':
return LogisticRegression(solver='lbfgs', max_iter=1000)
elif model == 'svm':
return svm.SVC(gamma='auto', cache_size=1000, max_iter=5000)
elif model == 'knn':
return KNeighborsClassifier()
elif model == 'randomforest':
return RandomForestClassifier(n_estimators=10)
# returns scorer(estimator, X_, y_)
def get_scoring_metric(**kwargs):
metric = kwargs['metric'] if 'metric' in kwargs and kwargs['metric'] else 'accuracy'
if metric == 'f1':
return make_scorer(f1_score, greater_is_better=True)
elif metric == 'accuracy':
return make_scorer(accuracy_score, greater_is_better=True)
def get_pipeline(model_type):
model = get_model(model_type)
model_pipeline = Pipeline([
('standard_scaler', StandardScaler()),
('pca', PCA()),
(model_type, model)
])
return model_pipeline
def train_fit_test_pipeline(X, y, model_type='logreg', params={}, cv=3, **kwargs):
"""
using pipeline to prevent crossfold leakage, e.g.:
https://towardsdatascience.com/pre-process-data-with-pipeline-to-prevent-data-leakage-during-cross-validation-e3442cca7fdc
"""
model_pipeline = get_pipeline(model_type)
scoring_metric = get_scoring_metric(**kwargs)
grid = GridSearchCV(model_pipeline, params, cv=cv, scoring=scoring_metric)
grid.fit(X, y)
return grid
def split(X, y, test_size=0.2):
"""
Split data into stratified Train, Validate, Test.
"""
# get Test, set aside
X_remainder, X_test, y_remainder, y_test = train_test_split(
X, y, test_size=test_size, stratify=y
)
# get Train, Validate
X_train, X_validate, y_train, y_validate = train_test_split(
X_remainder, y_remainder, test_size=test_size, stratify=y_remainder
)
return {
# original
'X': X,
'y': y,
# train test split
'X_train': X_train,
'y_train': y_train,
'X_validate': X_validate,
'y_validate': y_validate,
'X_test': X_test,
'y_test': y_test,
# this is from the first split, can use cross validation
'X_cv': X_remainder,
'y_cv': y_remainder
}
def get_train_val_test(use_saved=False):
"""
This is specific to the chime data set
"""
tvt_filename = f'{CHIME_FOLDER}train_val_test.pkl'
print('\nTrainValTest - using cached pkl :', use_saved)
if use_saved:
with open(tvt_filename, 'rb') as readfile:
train_val_test = pickle.load(readfile)
else:
# clean and remove NaN
chime_mfcc_filename = f'{CHIME_FOLDER}chime_mfcc.csv'
raw_df = pd.read_csv(chime_mfcc_filename)
df = raw_df.dropna(axis='columns')
# get features and target
X = df.drop(columns=['Unnamed: 0', 'has_child',
'has_male', 'has_female', 'has_human', 'chunkname'])
y = df['has_human']
train_val_test = split(X, y)
with open(tvt_filename, 'wb') as writefile:
pickle.dump(train_val_test, writefile)
print('\nloading train_val_test pkl for chime:\n',
train_val_test.keys(), '\n')
for key, val in train_val_test.items():
print(key, val.shape)
return train_val_test
class NaiveModel:
def __init__(self, y, predicting_class=1):
self.results = [predicting_class] * len(y)
def predict(self, *args, **kwargs):
return self.results
| true |
109e1ffa0102c45d752a8b50d1313923095d3fc4 | Python | rraj29/ProgramFlow | /contrived.py | UTF-8 | 382 | 3.734375 | 4 | [] | no_license | numbers = [1,45, 32, 12, 60]
#ELSE in a LOOP.
#HERE, the use is different than that in IF statement.
#The ELSE STATEMENT will COME TO WORK ONLY IF THE WHOLE LOOP WORKS FULLY WITHOUT BREAKING ANYWHERE.
for number in numbers:
if number % 8 == 0:
#reject the list
print("The numbers are unacceptable.")
break
else:
print("The numbers are acceptable.") | true |
dbe80ee312ef48e90588562a2763cecbecd540af | Python | gffryclrk/ThinkPython2e | /test/test_ch15_9_1_geometry.py | UTF-8 | 3,548 | 3.78125 | 4 | [] | no_license | """Unit tests for shape objects created for Chapter 15 Exercise 1"""
import math
import unittest
import sys
sys.path.append('ch15/')
from ex_15_9_1 import Point, Rectangle, Circle, rect_in_circle, rect_circle_overlap, LineSegment, Line
class TestGeometry(unittest.TestCase):
def setUp(self):
self.rectangle = Rectangle(Point(0, 0), 100, 100)
def test_point_distance(self):
"""Helper method behaves as expected"""
pointa = Point(0,0)
pointb = Point(3,4)
self.assertTrue(pointa.distance(pointb) == 5)
def test_rectangle_has_4_corners(self):
"""created rectangle has 4 points"""
self.assertTrue(len(self.rectangle.vertices()) == 4)
def test_rectangle_diagonals(self):
""" Assert lengths of two diagonals in rectangle are equal """
vl = self.rectangle.vertices()
d1 = math.sqrt((vl[0].x - vl[2].x)**2 + (vl[0].y - vl[2].y)**2)
d2 = math.sqrt((vl[1].x - vl[3].x)**2 + (vl[1].y - vl[3].y)**2)
self.assertTrue(d1 == d2)
def test_rect_in_circle(self):
""" Test rect_in_circle returns correct result """
r1 = Rectangle(Point(0,0), width = 5, height = 3)
c1 = Circle(Point(0, 0), radius = 6)
self.assertTrue(rect_in_circle(c1, r1))
r1.bl = Point(3, 0) # TODO: Create a shift method that moves shapes
self.assertFalse(rect_in_circle(c1, r1))
def test_line_distance(self):
""" Test Line object distance method """
l = Line(Point(0,0), Point(1,1))
xy = Point(0, 2.5)
half_hypoteneuse = math.sqrt((2.5 ** 2) + (2.5 ** 2)) / 2
self.assertTrue(
math.isclose(
l.distance(xy), half_hypoteneuse
)
)
xy = Point(0, 2)
d = 2 * math.sin( math.pi / 4 )
self.assertTrue(
math.isclose(
l.distance(xy), d
)
)
def test_rect_circle_overlap(self):
""" Test some expected values for this overlap function"""
c = Circle(Point(), 5)
r = Rectangle(Point(0, 5), 1, 1)
self.assertTrue(rect_circle_overlap(c, r))
r.bl = Point(4,3)
self.assertTrue(rect_circle_overlap(c, r))
r.bl = Point(3.75, 3.75)
self.assertFalse(rect_circle_overlap(c, r))
r.bl = Point(5, -0.5)
self.assertTrue(rect_circle_overlap(c, r))
def test_line_segment_dot_product(self):
""" Test vector dot product implementaiton from https://www.geeksforgeeks.org/minimum-distance-from-a-point-to-the-line-segment-using-vectors/"""
a = Point(0, 0)
b = Point(2, 0)
e = Point(4, 0)
ab = LineSegment(a, b)
be = LineSegment(b, e)
ae = LineSegment(a, e)
self.assertTrue(ab.dot_product(be) == 4)
self.assertTrue(ab.dot_product(ae) == 8)
def test_line_segment_shortest_distance(self):
a = Point(0, 0)
b = Point(2, 0)
e = Point(4, 0)
ab = LineSegment(a, b)
be = LineSegment(b, e)
ae = LineSegment(a, e)
self.assertTrue(ab.distance(e) == 2)
b = Point(2, 2)
e = Point(2, 0)
ab = LineSegment(a, b)
be = LineSegment(b, e)
ae = LineSegment(a, e)
d = 2 * math.sin( math.pi / 4 )
self.assertTrue(
math.isclose(
ab.distance(e), d
)
)
if __name__ == '__main__':
unittest.main()
| true |
f90f235a4ef626b40a9cf040de00523f035752d9 | Python | phoebe4561/week2HW | /hw2.py | UTF-8 | 1,366 | 3.84375 | 4 | [] | no_license | def calculate(min, max):
sum = 0
for x in range(min, max+1):
sum = sum+x
print(sum)
calculate(1, 3)
calculate(4, 8)
def avg(data):
num = data['count']
sal = data['employees']
total = 0
for i in sal:
sal = i['salary']
total += sal
avg = total/num
print(avg)
avg({
"count": 3,
"employees": [
{"name": "John", "salary": 30000},
{"name": "Bob", "salary": 60000},
{"name": "Jenny", "salary": 50000}
]
})
def maxProduct(nums):
m = float('-inf')
for i in range(len(nums)):
for j in range(i+1, len(nums)):
k = nums[i]*nums[j]
if k > m:
m = k
print(m)
maxProduct([5, 20, 2, 6])
maxProduct([10, -20, 0, 3])
def twoSum(nums, target):
for i in range(len(nums)):
for j in range(i+1, len(nums)):
if nums[i]+nums[j] == target:
return [i, j]
result = twoSum([2, 11, 7, 15], 9)
print(result)
def maxZeros(nums):
n = 0
max = 0
for i in range(len(nums)):
if nums[i] == 0:
n += 1
if n > max:
max = n
else:
n = 0
print(max)
maxZeros([0, 1, 0, 0])
maxZeros([1, 0, 0, 0, 0, 1, 0, 1, 0, 0])
maxZeros([1, 1, 1, 1, 1])
| true |
c509bbfb2ab796ab5d5362b45efc99b268cd606a | Python | LukeTownsendJCU/CP1804-Practicals | /prac_01/sales_bonus.py | UTF-8 | 501 | 4.4375 | 4 | [] | no_license | """
Program to calculate and display a user's bonus based on sales.
If sales are under $1,000, the user gets a 10% bonus.
If sales are $1,000 or over, the bonus is 15%.
"""
def main():
users_sales = float(input("Please enter sales: $"))
while users_sales >= 0:
if users_sales < 1000:
bonus = users_sales * 0.1
else:
bonus = users_sales * 0.15
print("{:.2f}".format(bonus))
users_sales = float(input("Please enter sales: $"))
main()
| true |
71b598b813b9f97d8ba336cdd2def5c1ea6f60be | Python | nacro90/user-activity-generator | /src/data/dataset.py | UTF-8 | 8,330 | 2.703125 | 3 | [
"MIT"
] | permissive | from abc import ABC, abstractmethod
from enum import Enum, auto
from math import sqrt
from pathlib import Path
from typing import Callable, ClassVar, Dict, Optional, Tuple, Type
import pandas
from pandas import DataFrame, Series
from ..util.integrity import recursive_sha256
from .filetype import Csv, FileType
from .reader import CsvReader, Reader
class Activity(Enum):
WALKING = auto()
JOGGING = auto()
UPSTAIRS = auto()
DOWNSTAIRS = auto()
SITTING = auto()
STANDING = auto()
class Dataset(ABC):
ACTIVITY_COLUMN: ClassVar[str] = NotImplemented
ACTIVITIES: ClassVar[Dict[Activity, str]] = NotImplemented
COLUMNS: ClassVar[Dict[str, Reader.DataType]] = NotImplemented
FREQUENCY: ClassVar[int] = NotImplemented
TRIAL_COLUMN: ClassVar[str] = NotImplemented
SUBJECT_COLUMN: ClassVar[str] = NotImplemented
@classmethod
@abstractmethod
def generators(cls) -> Optional[Dict[str, Callable[[DataFrame], Series]]]:
raise NotImplementedError
@classmethod
def is_columns_valid(cls) -> bool:
generators = cls.generators()
if not generators:
return True
return all(gen_key in cls.COLUMNS.keys() for gen_key in generators.keys())
def __init__(self, path: Path):
if not self.is_columns_valid():
raise ValueError("All generator keys must be specified in column field")
self.path = path
@property
def hash(self) -> str:
return recursive_sha256(self.path)
@classmethod
def enumerate_activities(cls) -> Dict[str, int]:
label_list = list(cls.ACTIVITIES.values())
label_list.sort()
return {label: i for i, label in enumerate(label_list)}
@abstractmethod
def read(self) -> DataFrame:
pass
class Wisdm(Dataset):
ACTIVITIES = {
Activity.WALKING: "Walking",
Activity.JOGGING: "Jogging",
Activity.UPSTAIRS: "Upstairs",
Activity.DOWNSTAIRS: "Downstairs",
Activity.SITTING: "Sitting",
Activity.STANDING: "Standing",
}
ACTIVITY_COLUMN = "activity"
TRIAL_COLUMN = "trial"
SUBJECT_COLUMN = "subject"
COLUMNS = {
"user": Reader.DataType.CATEGORY,
"activity": Reader.DataType.CATEGORY,
"timestamp": Reader.DataType.INT64,
"xaccel": Reader.DataType.FLOAT64,
"yaccel": Reader.DataType.FLOAT64,
"zaccel": Reader.DataType.FLOAT64,
"magnitude": Reader.DataType.FLOAT64,
"xaccel_norm": Reader.DataType.FLOAT64,
"yaccel_norm": Reader.DataType.FLOAT64,
"zaccel_norm": Reader.DataType.FLOAT64,
"magnitude_norm": Reader.DataType.FLOAT64,
}
FREQUENCY = 20
@classmethod
def generators(cls) -> Optional[Dict[str, Callable[[DataFrame], Series]]]:
def magnitude(df: DataFrame) -> Series:
xacc = df["xaccel"]
yacc = df["yaccel"]
zacc = df["zaccel"]
euclidean = (xacc ** 2 + yacc ** 2 + zacc ** 2) ** 0.5
return Series(abs(euclidean - 10))
def normalize(series: Series) -> Series:
return Series((series - series.mean()) / (series.max() - series.min()))
return {
"magnitude": magnitude,
"xaccel_norm": lambda df: normalize(df["xaccel"]),
"yaccel_norm": lambda df: normalize(df["yaccel"]),
"zaccel_norm": lambda df: normalize(df["zaccel"]),
"magnitude_norm": lambda df: normalize(magnitude(df)),
}
def __init__(self, path: Path) -> None:
Dataset.__init__(self, path)
def read(self) -> DataFrame:
reader = CsvReader(self.path)
return reader.read(self.COLUMNS)
class MotionSense(Dataset):
ACTIVITIES = {
Activity.WALKING: "wlk",
Activity.JOGGING: "jog",
Activity.UPSTAIRS: "ups",
Activity.DOWNSTAIRS: "dws",
Activity.SITTING: "sit",
Activity.STANDING: "std",
}
COLUMNS = {
"subject": Reader.DataType.INT64,
"trial": Reader.DataType.INT64,
"activity": Reader.DataType.CATEGORY,
"attitude.roll": Reader.DataType.FLOAT64,
"attitude.pitch": Reader.DataType.FLOAT64,
"attitude.yaw": Reader.DataType.FLOAT64,
"gravity.x": Reader.DataType.FLOAT64,
"gravity.y": Reader.DataType.FLOAT64,
"gravity.z": Reader.DataType.FLOAT64,
"rotationRate.x": Reader.DataType.FLOAT64,
"rotationRate.y": Reader.DataType.FLOAT64,
"rotationRate.z": Reader.DataType.FLOAT64,
"xrot_norm": Reader.DataType.FLOAT64,
"yrot_norm": Reader.DataType.FLOAT64,
"zrot_norm": Reader.DataType.FLOAT64,
"userAcceleration.x": Reader.DataType.FLOAT64,
"userAcceleration.y": Reader.DataType.FLOAT64,
"userAcceleration.z": Reader.DataType.FLOAT64,
"magnitude": Reader.DataType.FLOAT64,
"xaccel_norm": Reader.DataType.FLOAT64,
"yaccel_norm": Reader.DataType.FLOAT64,
"zaccel_norm": Reader.DataType.FLOAT64,
"magnitude_norm": Reader.DataType.FLOAT64,
"gravity.x.real": Reader.DataType.FLOAT64,
"gravity.y.real": Reader.DataType.FLOAT64,
"gravity.z.real": Reader.DataType.FLOAT64,
"userAcceleration.x.real": Reader.DataType.FLOAT64,
"userAcceleration.y.real": Reader.DataType.FLOAT64,
"userAcceleration.z.real": Reader.DataType.FLOAT64,
}
FREQUENCY = 50
ACTIVITY_COLUMN = "activity"
SUBJECT_COLUMN = "subject"
TRIAL_COLUMN = "trial"
@classmethod
def generators(cls) -> Dict[str, Callable[[DataFrame], Series]]:
def magnitude(df: DataFrame) -> Series:
xacc = df["userAcceleration.x"]
yacc = df["userAcceleration.y"]
zacc = df["userAcceleration.z"]
euclidean = (xacc ** 2 + yacc ** 2 + zacc ** 2) ** 0.5
return Series(euclidean)
def normalize(series: Series) -> Series:
return Series((series - series.mean()) / (series.max() - series.min()))
def ms_squarize(series: Series) -> Series:
return series.multiply(10)
return {
"magnitude": magnitude,
"xaccel_norm": lambda df: normalize(df["userAcceleration.x"]),
"yaccel_norm": lambda df: normalize(df["userAcceleration.y"]),
"zaccel_norm": lambda df: normalize(df["userAcceleration.z"]),
"xrot_norm": lambda df: normalize(df["rotationRate.x"]),
"yrot_norm": lambda df: normalize(df["rotationRate.y"]),
"zrot_norm": lambda df: normalize(df["rotationRate.z"]),
"magnitude_norm": lambda df: normalize(magnitude(df)),
"gravity.x.real": lambda df: ms_squarize(df["gravity.x"]),
"gravity.y.real": lambda df: ms_squarize(df["gravity.y"]),
"gravity.z.real": lambda df: ms_squarize(df["gravity.z"]),
"userAcceleration.x.real": lambda df: ms_squarize(df["userAcceleration.x"]),
"userAcceleration.y.real": lambda df: ms_squarize(df["userAcceleration.y"]),
"userAcceleration.z.real": lambda df: ms_squarize(df["userAcceleration.z"]),
}
def __init__(self, path: Path) -> None:
Dataset.__init__(self, path)
def read(self) -> DataFrame:
pandas_columns = {
name: type_enum.value for name, type_enum in self.COLUMNS.items()
}
concated = DataFrame(columns=pandas_columns)
for folder in self.path.iterdir():
activity, trial = self.split_activity_and_trial(folder.name)
for file in folder.iterdir():
df = CsvReader(file).read(self.COLUMNS)
df.drop(columns="Unnamed: 0", inplace=True)
df["subject"] = self.strip_subject_no(file.name)
df["trial"] = trial
df["activity"] = activity
concated = pandas.concat((concated, df))
return concated.astype(pandas_columns)
def strip_subject_no(self, fname: str) -> int:
return int(fname.split("_")[1].split(".")[0])
def split_activity_and_trial(self, fname: str) -> Tuple[str, int]:
split = fname.split("_")
return split[0], int(split[1])
# reader = CsvReader(self.path)
# return reader.read(self.COLUMNS)
| true |
9e63f6af003a87bb2e8d118af3864829277eea62 | Python | wpilibsuite/supervisely | /supervisely_lib/collection/str_enum.py | UTF-8 | 116 | 2.53125 | 3 | [] | no_license | # coding: utf-8
from enum import Enum
class StrEnum(Enum):
def __str__(self):
return str(self.value)
| true |
41d36559c0e2240b8eec04ebf4dd9747473c64c7 | Python | InduPriya-pokuri/Python_Advance_Topics | /Python Advanced Workshop/files/multiplication table.py | UTF-8 | 1,080 | 3.28125 | 3 | [] | no_license | ''' n=int(input("Enter a number:"))
for i in range(1,11):
print(n,'*',i,'=',n*i)
n2=int(input("Enter the number:"))
mul=[print(n2,'*',i,'=',n2*i) for i in range(1,11)]
print(mul) '''
dic={'int':123,'float':87.67,'str':'name'}
#print(dic)
dic2={'st':'name',id:238,'cgpa':8.7}
#print(dic2)
'''print(dic.keys())
print(dic.values())
print(dic.items())
for key,val in dic.items():
print(key,val,sep=" ")
l=[]
l.append(dic)
l.append(dic2)
for key,val in dic.items():
print(key,val,sep=",")
for s in l:
for key,value in s.items():
print(key,value,sep=':')
print()
for key,val in zip(dic.items(),dic2.items()):
print(key,val,sep=",")
dic={}
st=input()
for ch in st:
dic[ch]=ord(ch)
print(dic)
dic2={ch:ord(ch) for ch in st if ord(ch)%2==1}
print(dic2) '''
def is_perfect(n):
s=0
for num in range(1,n):
if n%num==0:
s+=num
if s==n:
return True
else:
return False
dic={num:num**2 for num in range(1,200) if is_perfect(num)}
print(dic)
| true |
0ed34afd4d6c317f82595655a171a17984efc3ee | Python | howard/serdisp-python | /libdisplay.py | UTF-8 | 7,937 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python
from ctypes import *
import characters as chrs
import subprocess
class Display:
def __init__(self, driver, port, options="", lib_path="/usr/local/lib/libserdisp.so.1"):
self.lib = CDLL(lib_path)
self.driver = driver
# The following is ugly and C-ish
self.disp_conn = self.lib.SDCONN_open(port)
self.disp = self.lib.serdisp_init(self.disp_conn, self.driver, options) # display descriptor
self.width = self.lib.serdisp_getwidth(self.disp)
self.height = self.lib.serdisp_getheight(self.disp)
self.colors = self.lib.serdisp_getcolours(self.disp)
self.color_depth = self.lib.serdisp_getdepth(self.disp)
#self.pixel_aspect_ratio = self.lib.serdisp_getaspect(self.disp)
def close(self):
self.lib.serdisp_quit(self.disp)
#self.lib.serdisp_close(self.disp)
#self.lib.SDCONN_close(self.disp_conn)
def reset(self, full=False):
if full:
self.lib.serdisp_fullreset(self.disp)
else:
self.lib.serdisp_reset(self.disp)
def clear_buffer(self):
self.lib.serdisp_clearbuffer(self.disp)
def backlight(self, state=2):
"""Puts the backlight in the given state. Use 0, 1, or 2 for toggle (default)."""
if not state in (0, 1, 2):
raise "Invalid bacaklight state: it must be either 0, 1 or 2 for toggle."
else:
self.lib.serdisp_setoption(self.disp, 'BACKLIGHT', state)
def invert(self):
"""Inverts the colors."""
self.lib.serdisp_setoption(self.disp, 'INVERT', 2)
def rotate(self, n):
"""Rotates the display by n degrees."""
self.lib.serdisp_setoption(self.disp, 'ROTATE', n)
def see(self, x, y):
"""Inspects the color of a pixel at the position (x/y)."""
return self.lib.serdisp_getcolour(self.disp, x, y)
def draw(self, x, y, color=0xFF000000, update=True):
"""
Changegs a pixel's color to the third argument. Default is 0xFF000000.
The last argument decides whether the display is updated after setting
the pixel or not. Default is True.
"""
self.lib.serdisp_setcolour(self.disp, x, y, color)
if update:
self.update()
def draw_pattern(self, x_offset, y_offset, pattern="", update=True):
"""
Draws a b/w pattern, which has to be defined like this:
--++--++
++--++--
...wereas - is white and + is black. It's important that each line has the
same length.
"""
lines = pattern.split('\n')
width = len(lines[0])
height = len(lines)
for y in range(0, height):
for x in range(0, width):
try:
px = lines[y][x]
if px == '+':
color = 0xFF000000
else:
color = 0xFFFFFFFF
self.draw(x_offset+x, y_offset+y, color, False)
except IndexError:
pass # Ignore this one silently.
if update:
self.update()
def erase(self, x, y, update=True):
"""Erases a given pixel."""
self.draw(x, y, 0xFFFFFFFF, update)
def write(self, x=0, y=0, update=True, string=""):
"""
Writes a string on screen, starting at a given position. Currently,
only b/w text is available, and the font size is bound to be 10px.
"""
def get_p(what):
"""Gets a pattern specified in the first argument from the characters file."""
try:
return getattr(chrs, what)
except AttributeError:
return getattr(chrs, 'NONE')
special_chrs = {
' ':get_p('SPACE'),
'_':get_p('UNDERSCORE'),
'-':get_p('DASH'),
'.':get_p('DOT'),
':':get_p('COLON'),
',':get_p('COMMA'),
';':get_p('SEMICOLON'),
'!':get_p('EXCL_MARK'),
'?':get_p('QUES_MARK'),
'&':get_p('AMPERSAND'),
'#':get_p('HASH'),
'"':get_p('QUOT_MARK'),
'$':get_p('DOLLAR'),
'%':get_p('PERCENT'),
'\'':get_p('APOSTROPHE'),
'(':get_p('PARENTHESIS1'),
')':get_p('PARENTHESIS2'),
'*':get_p('ASTERISK'),
'+':get_p('PLUS'),
'/':get_p('SLASH'),
'\\':get_p('BACKSLASH'),
'<':get_p('SMALLER'),
'>':get_p('BIGGER'),
'=':get_p('EQUAL'),
'[':get_p('SQUAR_BRACKET1'),
']':get_p('SQUAR_BRACKET2'),
'@':get_p('AT'),
'^':get_p('CARRET'),
'`':get_p('GRAVE'),
'|':get_p('PIPE'),
'~':get_p('TILDE'),
'{':get_p('CURL_BRACKET1'),
'}':get_p('CURL_BRACKET2'),
'0':get_p('ZERO'),
'1':get_p('ONE'),
'2':get_p('TWO'),
'3':get_p('THREE'),
'4':get_p('FOUR'),
'5':get_p('FIVE'),
'6':get_p('SIX'),
'7':get_p('SEVEN'),
'8':get_p('EIGHT'),
'9':get_p('NINE')
}
x_offset = x
y_offset = y
for c in string:
if c == '\n':
x_offset = x
y_offset += 10
else:
try:
if c in special_chrs.keys():
char = special_chrs[c]
else:
char = getattr(chrs, c)
self.draw_pattern(x_offset, y_offset, char, False)
char_width = len(char.split('\n')[0])
if x_offset > self.width-15:
x_offset = x
y_offset += 10
else:
x_offset += char_width
except AttributeError:
raise "Sorry, this character is not available at this time."
if update:
self.update()
def image(self, path):
"""Draws an image on sceen."""
pass
def clear(self):
"""Clears whole display."""
self.lib.serdisp_clear(self.disp)
def update(self):
self.lib.serdisp_update(self.disp)
def rewrite(self):
self.lib.serdisp_rewrite(self.disp)
def blink(self, method, n=1, t=1):
"""
Blinks display content in the way given in method, n times in
intervals of t seconds. Methods: backlight, reverse.
"""
if not method in ('backlight', 'reverse'):
raise "Invalid blink method: it must be either 'backlight' or 'reverse'."
else:
self.lib.serdisp_blink(self.disp, method.upper(), n, t)
def test(self):
self.write(0,0,True," AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPp\nQqRrSsTtUuVvWwXxYyZz0123456789\n\
_-.:,;!?&#\"$%'()*+/\\<>=[]@^`|~{}")
self.draw_pattern(self.width/2-73, self.height/2-23, chrs.AGF_HUGE)
def get_output(command, stdout = True, stderr = False):
"""
Runs a program specified in the first argument and returns its output
as a string. Code borrowed from P1tr.
"""
if (stdout or stderr) and not (stdout and stderr):
pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if stdout:
return pipe.stdout.read()
else:
return pipe.stderr.read()
elif stdout and stderr:
return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).stdout().read()
else:
try:
return bool(subprocess.Popen(command))
except OSError:
return "Output unavailable."
| true |
9ae3ca509bc4d67f82cfb0ea1ffa1a923bd6bb19 | Python | LauraPeraltaV85/AirBnB_clone_v2 | /web_flask/5-number_template.py | UTF-8 | 1,172 | 3.015625 | 3 | [] | no_license | #!/usr/bin/python3
"""Hello Flask"""
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def hello():
"""prints hello"""
strict_slashes = False
return 'Hello HBNB!'
@app.route('/hbnb')
def _hbnb():
"""prints hbnb"""
strict_slashes = False
return 'HBNB'
@app.route('/c/<text>')
def _text_random(text):
"""prints random text"""
strict_slashes = False
if '_' in text:
text = text.replace('_', ' ')
return 'C {}'.format(text)
@app.route('/python/<text>')
@app.route('/python')
@app.route('/python/')
def _text_random_python(text='is cool'):
"""prints random text python"""
strict_slashes = False
if '_' in text:
text = text.replace('_', ' ')
return 'Python {}'.format(text)
@app.route('/number/<int:n>')
def _number(n):
"""prints random number"""
strict_slashes = False
return '{} is a number'.format(n)
@app.route('/number_template/<int:n>')
def _number_template(n):
"""prints random number template"""
strict_slashes = False
return render_template('5-number.html', num=n)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
| true |
666ca10e62d558a22ff106a0473d5b9459c91302 | Python | wheejoo/PythonCodeStudy | /5주차 DP,그래프/백준/최소스패닝트리/김휘주.py | UTF-8 | 1,002 | 3.5 | 4 | [] | no_license | # https://www.acmicpc.net/problem/1197
# 크루스칼 - 가장 적은 비용으로 노드 연결 / 사이클 X
# 이것이 코딩테스트다 책 참고
v,e = map(int,input().split())
n = []
for _ in range(e):
a,b,c = map(int,input().split())
n.append((c,a,b)) #c는 가중치
n.sort() #가중치 순으로 정렬
parent = [0] * (v+1)
result = 0
# 특정 원소 속한 집합 찾기
def find_parent(parent, x):
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
# 두 원소가 속한 집합 합치기
def union_parent(parent,a,b):
a = find_parent(parent,a)
b = find_parent(parent,b)
if a < b:
parent[b] = a
else:
parent[a] = b
# 자기자신을 초기화
for i in range(1, v+1):
parent[i] = i
for edge in n:
c,a,b = edge
# 사이클이 발생하지 않으면 집합에 포함
if find_parent(parent, a) != find_parent(parent,b):
union_parent(parent,a,b)
result += c
print(result) | true |
6297f8831ac59953f4a6fd428e79f6c8c9efed04 | Python | ddtkra/atcoder | /aising2019/B/main.py | UTF-8 | 697 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
sys.setrecursionlimit(10000)
INF = 1<<32
def solve(N: int, A: int, B: int, P: "List[int]"):
P.sort()
from bisect import bisect_left, bisect_right
x = bisect_right(P, A)
y = bisect_right(P, B)
print(min(x, y-x, N-y))
return
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
A = int(next(tokens)) # type: int
B = int(next(tokens)) # type: int
P = [int(next(tokens)) for _ in range(N)] # type: "List[int]"
solve(N, A, B, P)
if __name__ == '__main__':
main()
| true |
425a82f8a1d69b6679369dc3924e14c63139e38a | Python | Abhishek-MR/SAM | /Python/cart.py | UTF-8 | 348 | 3.765625 | 4 | [] | no_license | cart = []
def cartop( str ):
if str.lower() in ['add']:
nxt_item = raw_input("what do you want to add? ")
cart.append(nxt_item)
elif str.lower() in ['show']:
for x in range(len(cart)):
print cart[x]
return
flag=True
while flag:
opt = raw_input("hat do you want to do")
if(opt=="exit"):
flag=False
else:
cartop(opt)
| true |
0a01c2817249a93a2ed64f7353880bb42532924c | Python | charlesthomas/introspect | /introspect/introspect.py | UTF-8 | 984 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
from datetime import datetime
from os import makedirs, path
from time import sleep
from window_title import get_window_title
NOW_TO_THE_HOUR = '%Y-%m-%d-%H'
EPOCH = '%s'
def logger(run_forever=True, stdout=False):
write_path = setup_write_dir()
run = True
while run:
now = datetime.now()
file_end = now.strftime(NOW_TO_THE_HOUR)
epoch = now.strftime(EPOCH)
output = "%s: %s" % (epoch, get_window_title())
if stdout:
print output
else:
with open('%s_%s.log' % (write_path, file_end), 'a') as log_file:
log_file.write(output + '\n')
run = run_forever
if run:
sleep(1)
def main():
logger(stdout=True)
def setup_write_dir():
partial = path.join(path.expanduser('~'), '.introspect')
if not path.isdir(partial):
makedirs(partial)
return path.join(partial, 'introspect')
if __name__ == '__main__':
main()
| true |
eff4f7ce9f276c4dc5aa2c433fefbce5690bdf90 | Python | GuilhermeCaeiro/wisard | /Wisard.py | UTF-8 | 15,918 | 3.1875 | 3 | [] | no_license | """
Module that implements the WiSARD classifier.
"""
import pyximport; pyximport.install()
from Discriminator import Discriminator
from Utils import DataPreprocessor
import random
import math
import copy
import time
import pickle
import multiprocessing
class Wisard:
"""
WiSARD "neural network". A weightless, RAM-based classifier.
"""
def __init__(self, tuple_size = 2, bleaching = False, seed = 0, shuffle_observations = True, type_mem_alloc = "dalloc"):
"""
Constructor for the WiSARD class.
:param tuple_size: a number (integer) that defines the address size to be used by the memories.
:param bleaching: a boolean indicating if the bleaching functionality is active or not. It defaults to False.
:param seed: a integer to be used as seed for the random number generator, to allow reproducibility. It defaults to 0.
:param shuffle_observations: a boolean to activate the shuffling of an observation, based on the param "seed". It
defaults to True.
:param type_mem_alloc: a string indicating what type of memory allocation shoud be used. The
accepted values are "dalloc" (for dynamically allocation) and "palloc" (for pre allocation).
The consequences of that choice are that "dalloc" is expected to consume less memory and be
slower than "palloc", while "palloc" is expected to consume more memory and be faster. The
default value is "dalloc".
"""
self.seed = seed
self.shuffle_observations = shuffle_observations
self.bleaching = bleaching
self.tuple_size = tuple_size
self.observation_length = 0
self.ram_size = math.pow(2, self.tuple_size)
self.number_of_rams = 0
self.discriminators = {}
self.type_mem_alloc = type_mem_alloc
self.data_preprocessor = DataPreprocessor(
self.tuple_size,
self.ram_size,
self.shuffle_observations,
self.seed
)
"""
def random_mapping(self, observation):
"
Prepares the observation by appling to it the random mapping of the bits in the input sequence,
based on the seed provided during the class creation.
:param observation: list of binary values (lists with zeros and ones as integers).
Returns:
-> A shuffled version of "observation" if "self.shuffle_observations" is
True, or the unmodified "observation" otherwise.
"
if self.shuffle_observations:
observation = copy.deepcopy(observation)
random.seed(self.seed)
random.shuffle(observation)
return observation
"""
def train_bulk(self, observations_and_classes):
"""
Trains the WiSARD classifier based on the provide inputs and its expected outputs.
:param observations_and_classes: a matrix of binary input sequences
(lists with zeros and ones as integers) and their expected outputs.
"""
for observation_and_classe in observations_and_classes:
self.train([observation_and_classe[0]], [observation_and_classe[1]])
def train(self, observations, classes):
"""
Trains the WiSARD classifier based on the provide inputs and its expected outputs.
:param observations: list of binary input sequences (lists with zeros and ones as integers).
:param classes: list of expected outputs (preferrably as strings)
"""
#check lists sizes
if(len(observations) != len(classes)):
raise Exception("Lengths of \"observations\" and \"classes\" must be equal.")
transformed_observations = self.data_preprocessor.prepare_observations(observations, "train")
if self.observation_length == 0:
self.observation_length = self.data_preprocessor.observation_length
self.number_of_rams = self.data_preprocessor.number_of_rams
for i in range(len(transformed_observations)):
observation = transformed_observations[i]
observation_class = classes[i]
discriminator = None
if observation_class not in self.discriminators:
discriminator = Discriminator(observation_class, self.observation_length,
self.tuple_size, self.bleaching, self.type_mem_alloc)
self.discriminators[observation_class] = discriminator
else:
discriminator = self.discriminators[observation_class]
discriminator.write(observation)
def predict_single_proc(self, observations, detailed = False):
"""
Evaluates an observation and returns its predicted class.
:param observation: binary input sequence (list with zeros and ones as integers).
Returns: the class that returned the biggest discriminator response.
"""
predictions = []
start_time = time.time()
transformed_observations = self.data_preprocessor.prepare_observations(observations, "predict")
print("Time taken to prepare observations:", time.time() - start_time)
for observation in transformed_observations:
result_achieved = False
predicted_classes = []
last_scores = []
confidence = 0 # can be optional
current_threshold = 0
bleaching_actions = 0 # can be removed
discriminators_to_evaluate = self.discriminators.keys()
previously_evaluated_discriminators = [] # can be removed
while not result_achieved:
predicted_classes = [{"discriminator": None, "score": 0}]
for discriminator in sorted(discriminators_to_evaluate):
score = self.discriminators[discriminator].evaluate(observation, current_threshold)
last_scores.append(score)
if score > predicted_classes[0]["score"] or predicted_classes[0]["discriminator"] == None:
predicted_classes = [{"discriminator": self.discriminators[discriminator], "score": score}]
elif score == predicted_classes[0]["score"]:
predicted_classes.append({"discriminator": self.discriminators[discriminator], "score": score})
exit_condition = None
if not self.bleaching:
exit_condition = 1
result_achieved = True
confidence = self.calculate_confidence(last_scores)
elif self.bleaching and len(predicted_classes) > 1:
exit_condition = 2
if predicted_classes[0]["score"] == 0:
result_achieved = True
else:
bleaching_actions = bleaching_actions + 1
current_threshold = current_threshold + 1
previously_evaluated_discriminators = discriminators_to_evaluate
last_scores = []
discriminators_to_evaluate = []
for predicted_class in predicted_classes:
discriminators_to_evaluate.append(predicted_class["discriminator"].input_class)
elif self.bleaching and len(predicted_classes) == 1:
exit_condition = 3
result_achieved = True
confidence = self.calculate_confidence(last_scores)
else:
raise Exception("Unable to reach valid stopping criteria.")
break
# If the method ends with more than one class as possible, it just returns the first one.
predictions.append({
"class": predicted_classes[0]["discriminator"].input_class,
"score": predicted_classes[0]["score"],
"confidence": confidence,
"bleaching_actions": bleaching_actions,
"draw": True if len(predicted_classes) > 1 else False
})
if not detailed:
self.simplify_predictions(predictions)
return predictions
def predict_multi_proc(self, observations, detailed, process_id, manager_dict):
predictions = self.predict_single_proc(observations, detailed)
manager_dict[process_id] = predictions
def predict(self, observations, detailed = False, multi_proc = False, num_proc = 1):
"""
Evaluates an observation and returns its predicted class.
:param observation: binary input sequence (list with zeros and ones as integers).
Returns: the class that returned the biggest discriminator response.
"""
predictions = []
if not multi_proc:
predictions = self.predict_single_proc(observations, detailed)
else:
if num_proc <= 0:
raise Exception("\"num_proc\" must be an integer >= 1.")
num_observations = len(observations)
observations_per_chunk = math.ceil(num_observations / num_proc)
processes = {}
processes_predictions = multiprocessing.Manager().dict()
for i in range(num_proc):
observations_chunk = []
start_position = i * observations_per_chunk
end_position = start_position + observations_per_chunk
if i < (num_proc - 1):
observations_chunk = observations[start_position: end_position]
else:
observations_chunk = observations[start_position:]
process = multiprocessing.Process(
target = self.predict_multi_proc,
args=(
observations_chunk,
detailed,
i,
processes_predictions
)
)
process.start()
processes[i] = process
for process in processes:
processes[process].join()
for i in range(num_proc):
predictions = predictions + processes_predictions[i]
return predictions
"""
def prepare_observations(self, observations, caller):
transformed_observations = []
for observation in observations:
if caller == "train" and self.observation_length == 0:
observation_length = len(observation)
if ((observation_length % self.tuple_size) != 0):
raise Exception("Observation length MUST be multiple of tuple size.")
self.observation_length = observation_length
self.number_of_rams = int(self.observation_length / self.tuple_size)
if len(observation) != self.observation_length:
raise Exception("Observation length MUST be %s." % (str(self.observation_length)))
observation = self.random_mapping(observation)
if self.type_mem_alloc == "dalloc":
#observation = self.get_observation_as_bin_strings(observation)
observation = self.get_observation_as_ints(observation) # string consumes way, waaay too much memory here.
elif self.type_mem_alloc == "palloc":
observation = self.get_observation_as_ints(observation)
transformed_observations.append(observation)
return transformed_observations
"""
"""
# eats a lot of memory
def get_observation_as_bin_strings(self, observation):
observation_as_bin_strings = []
for i in range(self.number_of_rams):
address = observation[i * self.tuple_size: (i * self.tuple_size) + self.tuple_size]
address = "".join(str(k) for k in address)
observation_as_bin_strings.append(address)
return observation_as_bin_strings
def get_observation_as_ints(self, observation):
observation_as_ints = []
for i in range(self.number_of_rams):
observation_as_ints.append(
self.get_address_as_int(
observation,
i * self.tuple_size, (i * self.tuple_size) + self.tuple_size
)
)
return observation_as_ints
def get_address_as_int(self, pattern, start, end):
address = pattern[start: end]
address = int("".join(str(i) for i in address), 2)
return address
"""
def simplify_predictions(self, predictions):
for i in range(len(predictions)):
predictions[i] = predictions[i]["class"]
def calculate_confidence(self, scores):
ordered_scores = sorted(scores, reverse = True)
if len(ordered_scores) < 2 or ordered_scores[0] == 0:
return 0.0
elif len(ordered_scores) >= 2:
return (ordered_scores[0] - ordered_scores[1]) / ordered_scores[0]
def deactivate_bleaching(self):
"""
Dectivates bleaching, if that functionality is active. Does nothing otherwise.
"""
self.bleaching = False
def activate_bleaching(self):
"""
Activates bleaching, if that functionality is deactivated.
It is only relevant if the model was trained with bleaching activated,
having that functionality deactivated only afterwards.
"""
self.bleaching = True
def get_mental_images(self, desired_class = None):
"""
Generates a mental image for each discriminator. Each mental imagem is
a 1D list that can be converted to an array and reshaped to the desired
dimensions. If "desired_class" is provided, only the mental image of the
discriminator for the desired class will be generated.
:param desired_class: class of a given discriminator.
Returns: a dictionare of mental images indexed by discriminator's classes.
"""
shuffled_mental_images = {}
unshuffled_mental_images = {}
for discriminator_class in self.discriminators:
if desired_class != None and discriminator_class != desired_class:
continue
pixels = self.discriminators[discriminator_class].generate_mental_image()
shuffled_mental_images[discriminator_class] = pixels
original_positions = list(range(0,self.observation_length))
if self.shuffle_observations:
random.seed(self.seed)
random.shuffle(original_positions)
for mental_image_class in sorted(shuffled_mental_images):
shuffled_mental_image = shuffled_mental_images[mental_image_class]
unshuffled_mental_image = [0] * len(shuffled_mental_image)
for i in range(len(original_positions)):
unshuffled_mental_image[original_positions[i]] = shuffled_mental_image[i]
unshuffled_mental_images[mental_image_class] = unshuffled_mental_image
return unshuffled_mental_images
def save(self, file_name):
"""
Saves the object to disk.
:param file: file URI.
"""
output_file = open(file_name, "wb")
pickle.dump(self, output_file)
output_file.close()
@classmethod
def load(cls, file_name):
"""
Loads object from disk.
:param file: file URI.
Returns: a Wisard object loaded from disk.
"""
input_file = open(file_name, "rb")
loaded_object = pickle.load(input_file)
input_file.close()
return loaded_object
| true |
8d9e30031470bb98ba8073ef6bf0187fea22fc55 | Python | peteut/ramda.py | /src/ramda/internal/__init__.py | UTF-8 | 23,700 | 2.625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import collections
import types
import functools
import builtins
import math
import inspect
import fastnumbers
try:
from collections.abc import Callable, Iterable, Mapping, Sequence
except ImportError:
from collections import Callable, Iterable, Mapping, Sequence
class _Placeholder():
pass
__ = _Placeholder()
def _assign(target, *args):
return functools.reduce(lambda acc, obj: acc.update(obj) or acc, args, target)
def _keys(obj):
return obj.keys() if _is_object(obj) else \
[idx for idx in range(len(obj))] if _is_seq(obj) else []
def _get_arity(fn):
return len(inspect.signature(fn).parameters)
def _fix_arity(fn):
n = _get_arity(fn)
return lambda *args: fn(*args[:n])
def _identical(a, b):
if isinstance(a, float) and math.isnan(a):
return isinstance(b, float) and math.isnan(b)
if isinstance(a, str) and isinstance(b, str):
return a == b
return id(a) == id(b)
def _has(prop, obj):
if _is_object(obj):
return prop in obj
try:
obj[prop]
return True
except IndexError:
return False
def _is_function(x):
return isinstance(x, Callable)
def _equals(a, b, stack_a=[], stack_b=[]):
if _identical(a, b):
return True
if type(a) != type(b):
return False
if a is None or b is None:
return False
if _is_function(getattr(a, "equals", None)) or \
_is_function(getattr(b, "equals", None)):
return _is_function(getattr(a, "equals", None)) and \
a.equals(b) and \
_is_function(getattr(b, "equals", None)) and \
b.equals(a)
if isinstance(a, (int, float, str)):
if not (type(a) == type(b) and _identical(a, b)):
return False
if _is_function(a) and _is_function(b):
return id(a) == id(b)
keys_a = _keys(a)
if len(keys_a) != len(_keys(b)):
return False
for item_a, item_b in reversed(list(builtins.zip(stack_a, stack_b))):
if id(item_a) == id(a):
return id(item_b) == id(b)
stack_a.append(a)
stack_b.append(b)
for key in keys_a:
if not (_has(key, b) and _equals(b[key], a[key], stack_a, stack_b)):
return False
stack_a.pop()
stack_b.pop()
return True
def _is_placeholder(x):
return id(x) == id(__)
def _curry1(fn):
@functools.wraps(fn)
def f1(*args):
if len(args) == 0:
return f1
else:
a = args[0]
return f1 if _is_placeholder(a) else fn(a)
return f1
def _curry2(fn):
@functools.wraps(fn)
def f2(*args):
n_args = len(args)
if n_args == 0:
return f2
elif n_args == 1:
a, = args
return f2 if _is_placeholder(a) else _curry1(lambda _b: fn(a, _b))
else:
a, b = args
return f2 if _is_placeholder(a) and _is_placeholder(b) else \
_curry1(lambda _a: fn(_a, b)) if _is_placeholder(a) else \
_curry1(lambda _b: fn(a, _b)) if _is_placeholder(b) else \
fn(a, b)
return f2
def _curry3(fn):
@functools.wraps(fn)
def f3(*args):
n_args = len(args)
if n_args == 0:
return f3
elif n_args == 1:
a, = args
return f3 if _is_placeholder(a) else \
_curry2(lambda _b, _c: fn(a, _b, _c))
elif n_args == 2:
a, b = args
return f3 if _is_placeholder(a) and _is_placeholder(b) else \
_curry2(lambda _a, _c: fn(_a, b, _c)) if _is_placeholder(a) else \
_curry2(lambda _b, _c: fn(a, _b, _c)) if _is_placeholder(b) else \
_curry1(lambda _c: fn(a, b, _c))
else:
a, b, c = args
return f3 \
if _is_placeholder(a) and _is_placeholder(b) and _is_placeholder(c) else \
_curry2(lambda _a, _b: fn(_a, _b, c)) \
if _is_placeholder(a) and _is_placeholder(b) else \
_curry2(lambda _a, _c: fn(_a, b, _c)) \
if _is_placeholder(a) and _is_placeholder(c) else \
_curry2(lambda _b, _c: fn(a, _b, _c)) \
if _is_placeholder(b) and _is_placeholder(c) else \
_curry1(lambda _a: fn(_a, b, c)) if _is_placeholder(a) else \
_curry1(lambda _b: fn(a, _b, c)) if _is_placeholder(b) else \
_curry1(lambda _c: fn(a, b, _c)) if _is_placeholder(c) else \
fn(a, b, c)
return f3
def _concat(set1, set2):
return set1 + set2
def _arity(n, fn):
if n == 0:
return lambda: fn()
elif n == 1:
return lambda a0=__: fn(a0)
elif n == 2:
return lambda a0=__, a1=__: fn(a0, a1)
elif n == 3:
return lambda a0=__, a1=__, a2=__: fn(a0, a1, a2)
elif n == 4:
return lambda a0=__, a1=__, a2=__, a3=__: fn(a0, a1, a2, a3)
elif n == 5:
return lambda a0=__, a1=__, a2=__, a3=__, a4=__: fn(a0, a1, a2, a3, a4)
elif n == 6:
return lambda a0=__, a1=__, a2=__, a3=__, a4=__, a5=__: \
fn(a0, a1, a2, a3, a4, a5)
elif n == 7:
return lambda a0=__, a1=__, a2=__, a3=__, a4=__, a5=__, a6=__: \
fn(a0, a1, a2, a3, a4, a5, a6)
elif n == 8:
return lambda a0=__, a1=__, a2=__, a3=__, a4=__, a5=__, a6=__, a7=__: \
fn(a0, a1, a2, a3, a4, a5, a6, a7)
elif n == 9:
return lambda a0=__, a1=__, a2=__, a3=__, a4=__, a5=__, a6=__, a7=__, a8=__: \
fn(a0, a1, a2, a3, a4, a5, a6, a7, a8)
elif n == 10:
return lambda a0=__, a1=__, a2=__, a3=__, a4=__, a5=__, a6=__, a7=__, a8=__, a9=__: \
fn(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)
else:
raise ValueError("First argument to _arity must be a non-negative "
"integer no greater than ten")
def _curry_n(length, received, fn):
@functools.wraps(fn)
def _fn(*args):
combined = []
args_list = list(args)
left = length
while len(combined) < len(received) or len(args_list):
if len(combined) < len(received) and \
(not _is_placeholder(received[len(combined)]) or
len(args_list) == 0):
result = received[len(combined)]
else:
result = args_list.pop(0)
combined.append(result)
if not _is_placeholder(result):
left -= 1
return fn(*combined) if left <= 0 else \
_arity(left, _curry_n(length, combined, fn))
return _fn
def _is_integer(x):
return isinstance(x, int)
def _xwrap(fn):
class _XWrap():
def __init__(self, fn):
self.f = fn
@staticmethod
def _transducer_init():
raise NotImplementedError("init not implemented in Xwrap")
@staticmethod
def _transducer_result(acc):
return acc
def _transducer_step(self, acc, x):
return self.f(acc, x)
return _XWrap(fn)
def _reduce(fn, acc, xs):
def _iterable_reduce(xf, acc, iterable):
for step in iterable:
acc = xf._transducer_step(acc, step)
if acc and getattr(acc, "_transducer_reduced", False):
acc = acc._transducer_value
break
return xf._transducer_result(acc)
def _method_reduce(xf, acc, obj, method_name):
return xf._transducer_result(
getattr(obj, method_name)(xf._transducer_step, acc))
if _is_function(fn):
fn = _xwrap(fn)
if _is_object(xs) and _is_function(
getattr(xs, "reduce", None)):
return _method_reduce(fn, acc, xs, "reduce")
if isinstance(xs, Iterable):
return _iterable_reduce(fn, acc, xs)
raise ValueError("reduce: xs must be an iterable")
def _reduced(x):
return x if x and getattr(x, "_transducer_reduced", False) else \
types.SimpleNamespace(_transducer_value=x, _transducer_reduced=True)
def _force_reduced(x):
return types.SimpleNamespace(_transducer_value=x, _transducer_reduced=True)
class _XFBase():
def init(self):
return self.xf._transducer_init()
def result(self, result):
return self.xf._transducer_result(result)
@_curry2
def _xall(f, xf):
class _Xall(_XFBase):
def __init__(self, f, xf):
self.xf = xf
self.f = f
self.all = True
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
if self.all:
result = self.xf._transducer_step(result, True)
return super().result(result)
def _transducer_step(self, result, input):
if not self.f(input):
self.all = False
result = _reduced(self.xf._transducer_step(result, False))
return result
return _Xall(f, xf)
@_curry2
def _xany(f, xf):
class _Xany(_XFBase):
def __init__(self, f, xf):
self.xf = xf
self.f = f
self.any = False
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
if not self.any:
result = self.xf._transducer_step(result, False)
return super().result(result)
def _transducer_step(self, result, input):
if self.f(input):
self.any = True
result = _reduced(self.xf._transducer_step(result, True))
return result
return _Xany(f, xf)
@_curry2
def _xmap(f, xf):
class _XMap(_XFBase):
def __init__(self, f, xf):
self.xf = xf
self.f = f
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
return super().result(result)
def _transducer_step(self, result, input):
return self.xf._transducer_step(result, self.f(input))
return _XMap(f, xf)
@_curry2
def _xfilter(f, xf):
class _XFilter(_XFBase):
def __init__(self, f, xf):
self.xf = xf
self.f = f
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
return super().result(result)
def _transducer_step(self, result, input):
if self.f(input):
return self.xf._transducer_step(result, input)
return result
return _XFilter(f, xf)
def _is_transformer(obj):
return _is_function(getattr(obj, "_transducer_step", None))
def _identity(x):
return x
@_curry3
def _dispatchable(method_names, xf, fn):
@functools.wraps(fn)
def _fn(*args):
if len(args) == 0:
return fn()
obj = args[-1]
for method_name in method_names:
if _is_function(getattr(obj, method_name, None)):
return getattr(obj, method_name)(*args[:-1])
if _is_transformer(obj):
transducer = xf(*args[:-1])
return transducer(obj)
return fn(*args)
return _fn
def _step_cat(obj):
_step_cat_array = types.SimpleNamespace(
_transducer_init=lambda: [],
_transducer_step=lambda a, b: a.append(b) or a,
_transducer_result=_identity)
_step_cat_string = types.SimpleNamespace(
_transducer_init=lambda: "",
_transducer_step=lambda a, b: a + str(b),
_transducer_result=_identity)
_step_cat_obj = types.SimpleNamespace(
_transducer_init=lambda: {},
_transducer_step=lambda result, input: _assign(
result, dict([input[:2]]) if not _is_object(input) else input),
_transducer_result=_identity)
if _is_transformer(obj):
return obj
elif _is_object(obj):
return _step_cat_obj
elif isinstance(obj, str):
return _step_cat_string
elif _is_seq(obj):
return _step_cat_array
raise ValueError("Cannot create transformer for {}".format(obj))
@_curry2
def _check_for_method(method_name, fn):
@functools.wraps(fn)
def _fn(*args):
if len(args) == 0:
return fn()
obj = args[-1]
if not hasattr(obj, method_name) or \
not _is_function(getattr(obj, method_name)):
return fn(*args)
return getattr(obj, method_name)(*args[:-1])
return _fn
def _pipe(f, g):
return lambda *args: g(f(*args))
@_curry2
def _xtake(n, xf):
class _XTake(_XFBase):
def __init__(self, n, xf):
self.xf = xf
self.n = n
self.i = 0
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
return super().result(result)
def _transducer_step(self, result, input):
self.i += 1
ret = result if self.n == 0 else self.xf._transducer_step(
result, input)
return _reduced(ret) if self.n >= 0 and self.i >= self.n else ret
return _XTake(n, xf)
@functools.partial(_curry_n, 4, [])
def _xreduce_by(value_fn, value_acc, key_fn, xf):
class _XReduceBy(_XFBase):
def __init__(self, value_fn, value_acc, key_fn, xf):
self.value_fn = value_fn
self.value_acc = value_acc
self.key_fn = key_fn
self.xf = xf
self.inputs = {}
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
for key, value in self.inputs.items():
result = self.xf._transducer_step(result, value)
if getattr(result, "_transducer_reduced", False):
result = result._transducer_value
break
self.inputs = None
return super().result(result)
def _transducer_step(self, result, input):
key = self.key_fn(input)
self.inputs[key] = self.inputs.get(key, [key, self.value_acc])
self.inputs[key][1] = self.value_fn(self.inputs[key][1], input)
return result
return _XReduceBy(value_fn, value_acc, key_fn, xf)
@_curry2
def _xaperture(n, xf):
class _XAperture(_XFBase):
def __init__(self, n, xf):
self.xf = xf
self.full = False
self.acc = collections.deque([], n)
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
self.acc = None
return self.xf._transducer_result(result)
def _transducer_step(self, result, input):
self.store(input)
return self.xf._transducer_step(result, self.get_copy()) \
if self.full else result
def store(self, input):
self.acc.append(input)
if len(self.acc) == self.acc.maxlen:
self.full = True
def get_copy(self):
return list(self.acc)
return _XAperture(n, xf)
def _aperture(n, xs):
idx = 0
limit = len(xs) - (n - 1)
acc = []
while idx < limit:
acc.append(xs[idx: idx + n])
idx += 1
return acc
def _make_flat(recursive):
def flatt(xs):
result = []
for item in xs:
if _is_seq(item):
value = flatt(item) if recursive else item
result += value
else:
result.append(item)
return result
return flatt
def _xcat(xf):
class _PreservingReduced(_XFBase):
def __init__(self, xf):
self.xf = xf
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
return self.xf._transducer_result(result)
def _transducer_step(self, result, input):
ret = self.xf._transducer_step(result, input)
return _force_reduced(ret) if getattr(ret, "_transducer_reduced", False) \
else ret
class _XCat(_XFBase):
def __init__(self, xf):
self.rxf = _PreservingReduced(xf)
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
return self.rxf._transducer_result(result)
def _transducer_step(self, result, input):
return _reduce(self.rxf, result, [input]) \
if not isinstance(input, Sequence) \
else _reduce(self.rxf, result, input)
return _XCat(xf)
@_curry2
def _xchain(f, xf):
from ..list import map
return map(f, _xcat(xf))
def _index_of(xs, a, idx):
for idx, item in enumerate(xs[idx:], idx):
if _equals(item, a):
return idx
return -1
def _contains(a, xs):
return _index_of(xs, a, 0) >= 0
@_curry2
def _xdrop(n, xf):
class _XDrop(_XFBase):
def __init__(self, n, xf):
self.xf = xf
self.n = n
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
return super().result(result)
def _transducer_step(self, result, input):
if self.n > 0:
self.n -= 1
return result
return self.xf._transducer_step(result, input)
return _XDrop(n, xf)
@_curry2
def _xdrop_last(n, xf):
class _XDropLast(_XFBase):
def __init__(self, n, xf):
self.xf = xf
self.n = n
self.full = False
self.acc = collections.deque([], n)
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
return super().result(result)
def _transducer_step(self, result, input):
if self.full:
result = self.xf._transducer_step(result, self.acc[0])
self.store(input)
return result
def store(self, input):
self.acc.append(input)
if len(self.acc) == self.acc.maxlen:
self.full = True
return _XDropLast(n, xf)
@_curry2
def _xdrop_last_while(fn, xf):
class _XDropLastWhile(_XFBase):
def __init__(self, fn, xf):
self.f = fn
self.xf = xf
self.retained = []
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
self.retained = None
return super().result(result)
def _transducer_step(self, result, input):
return self.retain(result, input) if self.f(input) else \
self.flush(result, input)
def flush(self, result, input):
result = _reduce(
self.xf._transducer_step, result, self.retained)
self.retained = []
return self.xf._transducer_step(result, input)
def retain(self, result, input):
self.retained.append(input)
return result
return _XDropLastWhile(fn, xf)
@_curry2
def _xdrop_repeats_with(pred, xf):
class _XDropRepeatsWith(_XFBase):
def __init__(self, pred, xf):
self.pred = pred
self.xf = xf
self.last_value = None
self.seen_first_value = False
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
return super().result(result)
def _transducer_step(self, result, input):
same_as_last = False
if not self.seen_first_value:
self.seen_first_value = True
elif self.pred(self.last_value, input):
same_as_last = True
self.last_value = input
return result if same_as_last else self.xf._transducer_step(
result, input)
return _XDropRepeatsWith(pred, xf)
@_curry2
def _xdrop_while(pred, xf):
class _XDropWhile(_XFBase):
def __init__(self, pred, xf):
self.pred = pred
self.xf = xf
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
return super().result(result)
def _transducer_step(self, result, input):
if self.pred:
if self.pred(input):
return result
else:
self.pred = None
return self.xf._transducer_step(result, input)
return _XDropWhile(pred, xf)
@_curry2
def _xfind(pred, xf):
class _XFind(_XFBase):
def __init__(self, pred, xf):
self.pred = pred
self.xf = xf
self.found = False
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
if not self.found:
result = self.xf._transducer_step(result, None)
return self.xf._transducer_result(result)
def _transducer_step(self, result, input):
if self.pred(input):
self.found = True
result = _reduced(self.xf._transducer_step(result, input))
return result
return _XFind(pred, xf)
@_curry2
def _xfind_index(pred, xf):
class _XFindIndex(_XFBase):
def __init__(self, pred, xf):
self.pred = pred
self.xf = xf
self.idx = -1
self.found = False
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
if not self.found:
result = self.xf._transducer_step(result, -1)
return self.xf._transducer_result(result)
def _transducer_step(self, result, input):
self.idx += 1
if self.pred(input):
self.found = True
result = _reduced(self.xf._transducer_step(result, self.idx))
return result
return _XFindIndex(pred, xf)
@_curry2
def _xfind_last(pred, xf):
class _XFindLast(_XFBase):
def __init__(self, pred, xf):
self.pred = pred
self.xf = xf
self.last = None
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
return self.xf._transducer_result(
self.xf._transducer_step(result, self.last))
def _transducer_step(self, result, input):
if self.pred(input):
self.last = input
return result
return _XFindLast(pred, xf)
@_curry2
def _xfind_last_index(pred, xf):
class _XFindLastIndex(_XFBase):
def __init__(self, pred, xf):
self.pred = pred
self.xf = xf
self.idx = -1
self.last_idx = -1
def _transducer_init(self):
return super().init()
def _transducer_result(self, result):
return self.xf._transducer_result(
self.xf._transducer_step(result, self.last_idx))
def _transducer_step(self, result, input):
self.idx += 1
if self.pred(input):
self.last_idx = self.idx
return result
return _XFindLastIndex(pred, xf)
def _complement(f):
return lambda *args: not f(*args)
_is_number = functools.partial(fastnumbers.isint, num_only=True)
def _is_object(x):
return isinstance(x, Mapping)
def _is_seq(x):
return isinstance(x, Sequence)
def _is_array(x):
return _is_seq(x) and not isinstance(x, str)
def _is_string(x):
return isinstance(x, str)
def _of(x):
return [x]
| true |
aaeec1d6d647c94088b0199934bf065522efafe3 | Python | abdulkk49/Per-protein-lightning | /prepare_data.py | UTF-8 | 3,228 | 2.515625 | 3 | [] | no_license | import csv
import os
import sys
from itertools import groupby
from Bio import SeqIO
import random
from sklearn import preprocessing
test_loc = []
test_mem = []
test_seq = []
trainval_loc = []
trainval_mem = []
trainval_seq = []
def load_dataset(path_fasta):
"""
Loads dataset into memory from fasta file
"""
fasta_sequences = SeqIO.parse(open(path_fasta),'fasta')
for fasta in fasta_sequences:
desc = fasta.description.split(" ")
labels = desc[1].split("-")
if len(labels) > 2:
continue
loclabel, memlabel, sequence = labels[0], labels[1], str(fasta.seq)
if len(desc) > 2:
test_loc.append(loclabel)
test_mem.append(memlabel)
test_seq.append(sequence)
else:
trainval_loc.append(loclabel)
trainval_mem.append(memlabel)
trainval_seq.append(sequence)
def save_dataset(save_dir, seq, loc, mem):
# Create directory if it doesn't exist
print("Saving in {}...".format(save_dir))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# Export the dataset
with open(os.path.join(save_dir, 'sequences.txt'), 'w') as f1, open(os.path.join(save_dir, 'memlabels.txt'), 'w') as f2,\
open(os.path.join(save_dir, 'loclabels.txt'), 'w') as f3:
for sequence, loclabel, memlabel in zip(seq, loc, mem):
f1.write("{}\n".format(" ".join(sequence)))
f2.write("{}\n".format(memlabel))
f3.write("{}\n".format(loclabel))
print("- done.")
def fasta_iter(path_fasta):
"""
given a fasta file. yield tuples of header, sequence
"""
with open(path_fasta) as fa:
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
faiter = (x[1] for x in groupby(fa, lambda line: line[0] == ">"))
for header in faiter:
# drop the ">"
header = header.next()[1:].strip()
# join all sequence lines to one.
seq = "".join(s.strip() for s in faiter.next())
yield header, seq
def encodeLabels(labels):
locEncoder = preprocessing.LabelEncoder()
labels = locEncoder.fit_transform(labels)
locEncoder_name_mapping = dict(zip(locEncoder.classes_, locEncoder.transform(locEncoder.classes_)))
print(locEncoder_name_mapping)
return labels, locEncoder
load_dataset('deeploc_data.fasta')
trainval_locmem = list(zip(trainval_loc, trainval_mem))
from sklearn.model_selection import train_test_split
train_seq, val_seq, train_locmem, val_locmem = train_test_split(trainval_seq, trainval_locmem, test_size = 0.1, random_state = 32)
train_loc, train_mem = map(list, zip(*train_locmem))
val_loc, val_mem = map(list, zip(*val_locmem))
train_loc, locEncoder = encodeLabels(train_loc)
train_mem, memEncoder = encodeLabels(train_mem)
save_dataset("./train", train_seq, train_loc, train_mem)
val_loc, locEncoder = encodeLabels(val_loc)
val_mem, memEncoder = encodeLabels(val_mem)
save_dataset("./val", val_seq, val_loc, val_mem)
test_loc, locEncoder = encodeLabels(test_loc)
test_mem, memEncoder = encodeLabels(test_mem)
save_dataset("./test", test_seq, test_loc, test_mem) | true |
fda1c099914d401c38f7f679009276b86dcf8146 | Python | rab170/uhh-ml | /01/src/stochastic_gradient_descent.py | UTF-8 | 536 | 2.90625 | 3 | [] | no_license | import numpy as np
from numpy import pi
def sgd(X, Y, alpha=0.005, epsilon=2, max_iter=10000):
errors = []
theta = np.random.normal(-0.1, 0.1, np.shape(X)[1])
model = lambda x: np.dot(theta, x)
cur_iter = 0
e = np.sum((map(model, X) - Y)**2)
while e > epsilon:
errors.append(e)
for x, y in zip(X, Y):
theta += alpha*(y - np.dot(theta, x))*x
if cur_iter == max_iter: return theta, errors
e = np.sum((map(model, X) - Y)**2)
cur_iter+=1
return theta, errors
| true |
7060f0eac99d83d75793de8326f0b17f9d6143c5 | Python | tensorflow/probability | /tensorflow_probability/python/internal/broadcast_util.py | UTF-8 | 3,600 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for manipulating shapes and broadcasting."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import prefer_static as ps
__all__ = [
'left_justified_expand_dims_like',
'left_justified_expand_dims_to',
'left_justified_broadcast_like',
'left_justified_broadcast_to',
'right_justified_unsorted_segment_sum',
'where_left_justified_mask',
]
def left_justified_expand_dims_like(x, reference, name=None):
"""Right pads `x` with `rank(reference) - rank(x)` ones."""
with tf.name_scope(name or 'left_justified_expand_dims_like'):
return left_justified_expand_dims_to(x, ps.rank(reference))
def left_justified_expand_dims_to(x, rank, name=None):
"""Right pads `x` with `rank - rank(x)` ones."""
with tf.name_scope(name or 'left_justified_expand_dims_to'):
expand_ndims = ps.maximum(rank - ps.rank(x), 0)
expand_shape = ps.concat(
[ps.shape(x),
ps.ones(shape=[expand_ndims], dtype=tf.int32)],
axis=0)
return tf.reshape(x, expand_shape)
def left_justified_broadcast_like(x, reference, name=None):
"""Broadcasts `x` to shape of reference, in a left-justified manner."""
with tf.name_scope(name or 'left_justified_broadcast_like'):
return left_justified_broadcast_to(x, ps.shape(reference))
def left_justified_broadcast_to(x, shape, name=None):
"""Broadcasts `x` to shape, in a left-justified manner."""
with tf.name_scope(name or 'left_justified_broadcast_to'):
return tf.broadcast_to(
left_justified_expand_dims_to(x, ps.size(shape)), shape)
def where_left_justified_mask(mask, vals1, vals2, name=None):
"""Like `tf.where`, but broadcasts the `mask` left-justified."""
with tf.name_scope(name or 'where_left_justified_mask'):
target_rank = ps.maximum(ps.rank(vals1), ps.rank(vals2))
bcast_mask = left_justified_expand_dims_to(mask, target_rank)
return tf.where(bcast_mask, vals1, vals2)
def right_justified_unsorted_segment_sum(
data, segment_ids, num_segments, name=None):
"""Same as tf.segment_sum, except the segment ids line up on the right."""
with tf.name_scope(name or 'right_justified_unsorted_segment_sum'):
data = tf.convert_to_tensor(data)
segment_ids = tf.convert_to_tensor(segment_ids)
n_seg = ps.rank(segment_ids)
n_data = ps.rank(data)
# Move the rightmost n_seg dimensions to the left, where
# segment_sum will find them
perm = ps.concat(
[ps.range(n_data - n_seg, n_data), ps.range(0, n_data - n_seg)], axis=0)
data_justified = tf.transpose(data, perm=perm)
results_justified = tf.math.unsorted_segment_sum(
data_justified, segment_ids, num_segments)
# segment_sum puts the segment dimension of the result on the
# left; move it to the right.
inverse_perm = ps.concat([ps.range(1, n_data - n_seg + 1), [0]], axis=0)
return tf.transpose(results_justified, perm=inverse_perm)
| true |
5bb35aae758b1dd9692a796177e19d561c2d5687 | Python | rajlath/rkl_codes | /LeetCodeContests/81/81_822.py | UTF-8 | 1,404 | 3.546875 | 4 | [] | no_license | '''
Input: fronts = [1,2,4,4,7], backs = [1,3,4,1,3]
Output: 2
Explanation: If we flip the second card, the fronts are [1,3,4,4,7] and the backs are [1,2,4,1,3].
We choose the second card, which has number 2 on the back, and it isn't on the front of any card, so 2 is good.
'''
class Solution:
def flipgame(self, fronts, backs):
"""
:type fronts: List[int]
:type backs: List[int]
:rtype: int
"""
class Solution(object):
def flipgame(self, fronts, backs):
"""
:type fronts: List[int]
:type backs: List[int]
:rtype: int
"""
elig=[-1]*2001
for i,j in zip(fronts,backs):
if i==j:
elig[i]=0
else:
if elig[i]==-1:
elig[i]=1
if elig[j]==-1:
elig[j]=1
for i in range(1,2001):
if elig[i]==1:
return i
return 0
'''
sol by crazymerlin
'''
from collections import defaultdict
class Solution(object):
def flipgame1(self, fronts, backs):
possib = defaultdict(lambda : True)
for a, b in zip(fronts, backs):
if a == b:
possib[a] = False
possibs = [x for x in fronts + backs if possib[x]]
return min(possibs) if possibs else 0
sol = Solution()
print(sol.flipgame1([1,2,4,4,7], [1,3,4,1,3])) | true |
951cb161cb78f01775199b196ac7ff8f5d6f33ea | Python | rakshmitha/similar-minds | /csv_reader.py | UTF-8 | 907 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on
Course work:
@author:
Source:
https://stackoverflow.com/questions/2942889/reading-parsing-excel-xls-files-with-python
GSheet:
https://docs.google.com/spreadsheets/d/1YX4vLv3xj6tQC6Ou-Sx8yC1k5jT7VN4ulTWF1BldL9Y/edit#gid=39068816
'''
#import pandas as pd
import csv
#Local import
import db_service as dbs
def read_csv_and_store_in_db():
with open('developer_score_5_criteria.csv','r') as fin:
dr = csv.DictReader(fin)
to_db = [(i['name'], i['LinkedIn content'], i['Public coding activities'], i['Github Analysis'], i['Stackoverflow Analysis'], i['Tech Keys involved']) for i in dr]
for person in to_db:
dbs.insert_into_db(person[0], person[1], person[2], person[3], person[4], person[5])
def start():
read_csv_and_store_in_db()
if __name__ == '__main__':
start() | true |
888bf0e70c02a499e123049a8df116fd8e79990d | Python | paulojblack/open-health-inspection-scraper | /scraper/helpers/spare_parts.py | UTF-8 | 693 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | ### Code to parse district page using Item/Field syntax (instead of ItemLoader)
'''
health_district_item = HealthDistrictItem()
health_district_item['district_name'] = district.xpath('a/text()').extract()
health_district_item['district_link'] = district.xpath('a/@href').extract()
health_district_item['district_id'] = district.xpath('a/@id').extract()
yield health_district_item
'''
### Code to parse distict with itemloader
'''
district_loader = DistrictItemLoader(selector = district)
district_loader.add_xpath('district_name', './a/text()')
district_loader.add_xpath('district_link', './a/@href')
district_loader.add_xpath('district_id', './a/@id')
yield district_loader.load_item()
''' | true |
d5745b87e07c2b41d630fca3296b1ca2897714a3 | Python | haslem/pytest_web_mobile | /tests/screens/folder_screen.py | UTF-8 | 1,504 | 2.5625 | 3 | [] | no_license |
class FolderScreen(object):
def __init__(self, mobile):
self.mobile = mobile
@property
def get_titles(self):
title_list = []
# for i in range(1,7):
# try:
# elem = self.mobile.find_element_by_xpath(f'/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout[2]/android.widget.FrameLayout[2]/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.view.ViewGroup/android.view.ViewGroup/androidx.recyclerview.widget.RecyclerView/android.view.ViewGroup[{i}]/android.widget.TextView[1]')
# title_list.append(elem.get_attribute('text'))
# except:
# break
elem = self.mobile.find_element_by_xpath(
f'/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout[2]/android.widget.FrameLayout[2]/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.view.ViewGroup/android.view.ViewGroup/androidx.recyclerview.widget.RecyclerView/android.view.ViewGroup[1]/android.widget.TextView[1]')
return title_list
| true |
41f4adc35293bcac1c10fecb5b3feda3f1e311f1 | Python | saintifly/leetcode | /爬楼梯.py | UTF-8 | 370 | 2.953125 | 3 | [] | no_license | class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
if n==1:
return 1
if n==2:
return 2
sum = [0]*(n+1)
sum[0]=0
sum[1]=1
sum[2]=2
for i in range(3,n+1):
sum[i]= sum [i-1]+sum[i-2]
return sum[n]
| true |
8e21a52839921c4ec9d0ca1c1f551c7b6b43d14c | Python | brenthompson2/Minterface | /src/services/MintHistoryReader.py | UTF-8 | 2,934 | 2.84375 | 3 | [] | no_license | import errno
import os
import os.path
import pandas as pd
class MintHistoryReader(object):
"""Exposes methods for retrieving the saved Intuit Mint data"""
def __init__(self):
"""
Initializes the object and gets the path to the saved history data
"""
# TODO: Get the history path from config or find something cleaner
file_path = os.path.abspath(__file__)
services = os.path.split(file_path)[0]
src = os.path.split(services)[0]
minterface = os.path.split(src)[0]
self.history_path = os.path.join(minterface, 'history')
while not os.path.isdir(self.history_path):
response = input("Path to history data not found at %s. Please provide the correct path (enter 'x' to exit): " % self.history_path)
if response == 'x' or response == 'X':
raise FileNotFoundError("Path to history data not found at %s" % self.history_path)
self.history_path = response
def get_accounts_over_time(self):
"""
Get a dictionary keyed by date retrieved where the value is the DataFrame of account data retrieved at that time.
"""
account_history = {}
for folder_date in os.listdir(self.history_path):
file_path = os.path.join(self.history_path, folder_date, 'Accounts.csv')
account_history[folder_date] = pd.read_csv(file_path)
return account_history
def get_credit_score_over_time(self):
"""
Get a dictionary keyed by date retrieved where the value is the credit score retrieved at that time.
"""
credit_history = {}
for folder_date in os.listdir(self.history_path):
file_path = os.path.join(self.history_path, folder_date, 'CreditScore.csv')
infile = open(file_path)
credit_history[folder_date] = infile.readline()
return credit_history
def get_latest_transactions(self):
"""
Get the dataframe of transactions from the most recent save
"""
latest_folder_date = os.listdir(self.history_path)[-1]
file_path = os.path.join(self.history_path, latest_folder_date, "Transactions.csv")
return pd.read_csv(file_path)
# region Test the class
# print("\n===================================================================")
# print("MintHistoryReader test")
# print("===================================================================\n")
# reader = MintHistoryReader()
# print("History Path: %s" % reader.history_path)
# accounts_over_time = reader.get_accounts_over_time()
# print("\naccounts_over_time:")
# print(accounts_over_time)
# credit_score_over_time = reader.get_credit_score_over_time()
# print("\ncredit_score_over_time:")
# print(credit_score_over_time)
# latest_transactions = reader.get_latest_transactions()
# print("\nlatest_transactions:")
# print(latest_transactions)
# endregion
| true |
d77eeaedced15dacc0e1aed4d032fd57c3661b0a | Python | abityab/TBG | /Spot Check.py | UTF-8 | 1,872 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
df = pd.read_excel ('aaaa.xlsx')
df.head()
# In[2]:
df.drop("A", axis=1, inplace=True)
df.drop("B", axis=1, inplace=True)
df.drop("C", axis=1, inplace=True)
df.drop("D", axis=1, inplace=True)
df.head()
# In[3]:
colo = pd.get_dummies(df['COLO'], drop_first=True)
colo.head()
# In[4]:
df=pd.concat([df,colo],axis=1)
df.drop(['COLO'],axis=1,inplace=True)
df.head()
# In[5]:
#spot check algorithm
scoring = 'accuracy'
import pandas
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# In[7]:
X= df.drop("XL",axis=1)
y= df["XL"]
# In[14]:
validation_size = 0.2
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size = validation_size)
# In[15]:
models =[]
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
#evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = model_selection.KFold(n_splits=10)
cv_results = model_selection.cross_val_score(model, X_train, y_train, cv = kfold, scoring = scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# In[ ]:
| true |
81faf8ff4b18cf54db98a0a995915399e9bca8f0 | Python | NWood-Git/terminal-teller-sql | /tests/app/view.py | UTF-8 | 2,928 | 3.71875 | 4 | [] | no_license |
def print_login_menu():
print("""Welcome to Terminal Teller!:
1) create account
2) log in
3) quit
""")
def login_prompt():
return input("Your choice:")
def bad_login_input():
print("Input not recognized\n")
def goodbye():
print(" Goodbye!\n")
def print_main_menu(user):
#print(type(user))
print(f"""Hello, {user.first_name} {user.last_name} ({user.account_number})
1 Check balance
2 Withdraw funds
3 Deposit funds
4 Sign out
""")
#NW added 11/21 - 11/22
def main_prompt():
return input("Your choice:")
def input_first_name():
return input("What is your first name? ")
def input_last_name():
return input("What is your last name? ")
def input_new_pin():
return input("Please input a 4-digit numeric pin for your account. ")
def bad_pin():
print("The pin you have entered is invalid. Please try again.")
def inconsistent_pin():
print("Those were not the same. Please try again. ")
def good_pin(pin):
print(f'Your pin: {pin}, has been submitted.\nPlease save it.')
def new_acct_num(account_number):
print(f"This is your account number: {account_number}, please save it somewhere safe.")
def invalid_credentials():
print("Invalid credentials, please try again.")
def user_login_attempt():
print("Please enter your credentials ")
login_acct = input("Account Number: ")
login_pin = input("Pin: ")
return (login_acct, login_pin)
def goodbye():
print('Thanks for using Terminal Teller. Have a great day!')
def show_balance(user):
print(f"Your balance is: ${user.balance}")
def withdrawal_amount():
amount = float(input("How much would you like to withdraw: "))
return amount
def insufficient_funds():
print("Sorry you have insufficient funds for this transaction.")
def not_positive():
print("Sorry, you can't enter a negative number. the amount you entered needs to be positive!")
def post_withdrawal(amount, bal):
print(f"You have withdrawn ${amount} your balance is now {bal}")
def deposit_amount():
'''this is the deposit doc string'''
amount = float(input("How much would you like to deposit: "))
return amount
def post_deposit(amount, bal):
print(f"You have deposited ${amount} your balance is now {bal}") | true |
bd633896bd5346a9b5e60a6e8632a36c135d8246 | Python | cwr1518/origin | /在线二分匹配程序/print_state.py | UTF-8 | 335 | 3.234375 | 3 | [] | no_license |
def print_state(left,right,edge,i):
print("第",i,"轮")
print(("左边"))
for ii in range(len(left)):
print(left[ii][0])
print("右边")
for ii in range(len(right)):
print(right[ii][0])
print("边值")
for ii in range(len(edge)):
print(edge[ii][0]," ",edge[ii][1]," ",edge[ii][2])
| true |
cfaead2d37bf1c3962105b51261f99808a85afd6 | Python | MLsmaller/sfd_hand | /utils/to_json.py | UTF-8 | 1,002 | 2.8125 | 3 | [] | no_license | #-*- coding:utf-8 -*-
import json
import numpy as np
import os
import cv2
#-----save the keypoint of finger to json
def to_json(point_l,point_r,res,path):
num=0
dict={}
dict['filename']=path
dict['keypoint_l']=[]
for point in point_l:
if point is None:
dict['keypoint_l'].append(None)
else:
dict['keypoint_l'].append(point)
dict['keypoint_r']=[]
for point_r in point_r:
if point_r is None:
dict['keypoint_r'].append(None)
else:
dict['keypoint_r'].append(point_r)
#还要区分左右手
res.append(dict)
return res
#------可以存数组进去,也可以存字典进去----
def saveJsonFile(res,json_name):
dict={}
dict['value']=[]
for img in res:
dict['value'].append(img)
with open(json_name,'w') as f:
json.dump(dict,f,sort_keys=True,indent=4,separators=(', ',': '))
print("the json file has done") | true |
cee68f98d32ba811cd9e28ca7968cbf4c097064d | Python | PotentialPie/leetcode-python | /algorithm/leetcode-23.py | UTF-8 | 839 | 3.421875 | 3 | [] | no_license | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
#coding=utf-8
class Solution:
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
## 边界!边界!边界!咋就是不听话呢!!!!打自己记耳光
if not head or head.next is None:
return head
return_head = head.next
p = head
q = head.next
temp_node = ListNode(-1)
temp_node.next = head
while q is not None:
p.next = q.next
q.next = p
temp_node.next = q
temp_node = p
if temp_node.next is None:
break
p = p.next
q = p.next
return return_head
| true |
f096ecbe4a74f3537d927981dfba70ba82358c3a | Python | erishabh/restaurantClosuresCovid19 | /race_cleaning.py | UTF-8 | 1,856 | 3.109375 | 3 | [] | no_license | import pandas as pd
# Importing the original data file
race_orig = pd.read_excel('raw_data/ACS_data/SF_race_ethnicity.xlsx', sheet_name = 1, header = [0, 1], index_col = 0)
# Compressing the heraders
race_orig.columns = [' '.join(col).strip() for col in race_orig.columns.values]
# Dropping MOE columns and the percet, owner, and renter columns
race_drop = race_orig[race_orig.columns.drop(list(race_orig.filter(regex = r'((Percent)|(Margin of Error))')))]
# Pivoting rows and columns
race_pivot = race_drop.transpose()
# Reformatting data
race_int = race_pivot.replace(regex = r'(,)', value = '')
race_int = race_int.astype(int)
# Creating [bipoc] and [indigenous] column
race_int['bipoc_pop'] = race_int['Total population'] - race_int['White alone']
race_int['indigenous_pop'] = race_int['American Indian and Alaska Native alone'] + race_int['Native Hawaiian and Other Pacific Islander alone']
# Drop columns
race_drop_calc = race_int.drop(columns = ['Total population', 'White alone', 'American Indian and Alaska Native alone', 'Native Hawaiian and Other Pacific Islander alone'])
# Renaming columsn for ease of use
race_clean = race_drop_calc.rename(columns = {'Hispanic or Latino (of any race)' : 'hispanic_pop', 'Black or African American alone' : 'afam_pop', 'Asian alone' : 'asian_pop'})
race_clean = race_clean[['hispanic_pop', 'afam_pop', 'asian_pop', 'indigenous_pop', 'bipoc_pop']]
# Reseting index and renaming columns
race_clean = race_clean.reset_index()
race_clean = race_clean.rename(columns = {'index' : 'census_tract'})
# Extracting census tract
race_clean['census_tract'] = race_clean['census_tract'].str.extract(r'((\d+\.\d+)|(\d+))')
# Exporting dataframe as csv file
race_clean.to_csv('clean_data/ACS/SF_race_clean.csv', index = False, header = True)
print('********************** EXPORT COMPLETE **********************') | true |
b00ae33159a9ff9955b1436ba1f12e5969e0fcd0 | Python | peitalin/cryptometrics | /poloniex_api.py | UTF-8 | 1,457 | 2.921875 | 3 | [] | no_license |
import pandas as pd
import seaborn as sb
import requests
from requests import get
from datetime import datetime
import pickle
def get_json_data(json_url, cache_path):
'''Download and cache JSON data, return as a dataframe.'''
try:
f = open(cache_path, 'rb')
df = pickle.load(f)
print('Loaded {} from cache'.format(json_url))
except (OSError, IOError) as e:
print('Downloading {}'.format(json_url))
df = pd.read_json(json_url)
df.to_pickle(cache_path)
print('Cached {} at {}'.format(json_url, cache_path))
return df
def get_crypto_data(poloniex_pair):
'''Retrieve cryptocurrency data from poloniex'''
json_url = base_polo_url.format(poloniex_pair, start_date.timestamp(), end_date.timestamp(), pediod)
data_df = get_json_data(json_url, poloniex_pair)
data_df = data_df.set_index('date')
return data_df
base_polo_url = 'https://poloniex.com/public?command=returnChartData¤cyPair={}&start={}&end={}&period={}'
start_date = datetime.strptime('2015-01-01', '%Y-%m-%d') # get data from the start of 2015
end_date = datetime.now() # up until today
pediod = 86400 # pull daily data (86,400 seconds per day)
altcoins = ['ETH','LTC','XRP','ETC','STR','DASH','SC','XMR','XEM']
altcoin_data = {}
for altcoin in altcoins:
coinpair = 'BTC_{}'.format(altcoin)
crypto_price_df = get_crypto_data(coinpair)
altcoin_data[altcoin] = crypto_price_df
| true |
51c2f1f1d00c9182f735be2802d066c0f90b1a28 | Python | tok41/opencv_sample | /bin/sample_08_2.py | UTF-8 | 2,571 | 2.796875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import cv2
import cv2.cv as cv
##### 動画から顔を検出してみる
# トラックバーを動かしたときに呼び出されるコールバック関数の定義
def onTrackbarSlide(pos):
updatelock = True
cap.set(cv.CV_CAP_PROP_POS_FRAMES, pos)
updatelock = False
## カスケードファイル
cascade_path = "/usr/local/Cellar/opencv/2.4.11_2/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"
updatelock = False # トラックバー処理中のロックフラグ
windowname_in = 'inframe' # Window(元画像)の名前
trackbarname = 'Position' # トラックバーの名前
# 動画パス
video_path = "out/camera_sample.m4v"
out_video_path = "out/face_detect_sample.m4v"
# コーデックの指定
fourcc = cv2.cv.CV_FOURCC('m','p','4','v')
# 動画ファイル読み込み
cap = cv2.VideoCapture(video_path)
out = cv2.VideoWriter("face_output.m4v", fourcc, 30.0, (1280,720))
color = (0, 187, 254) #黄
#カスケード分類器をセットする
cascade = cv2.CascadeClassifier(cascade_path)
# 名前付きWindowを定義する
cv2.namedWindow(windowname_in, cv2.WINDOW_NORMAL)
# AVIファイルのフレーム数を取得する
frames = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
# フレーム数が1以上ならトラックバーにセットする
if (frames > 0):
cv2.createTrackbar(trackbarname, windowname_in, 0, frames, onTrackbarSlide)
frame_num = 0
img_cnt = 0
# フレームごとの処理
while(cap.isOpened()):
# トラックバー更新中は描画しない
if (updatelock):
continue
## フレーム取得
ret, frame = cap.read()
if (ret == False):
break
## グレースケールに変換
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
facerect = cascade.detectMultiScale(
frame_gray, scaleFactor=1.1, minNeighbors=1, minSize=(1, 1))
# 顔の検出
#print("frame : %d" % frame_num)
if len(facerect) > 0:
#検出した顔を囲む矩形の作成
for (x,y,w,h) in facerect:
cv2.rectangle(frame, (x,y),(x+w,y+h), color, thickness=7)
img_cnt += 1
#out.write(cv2.resize( frame, (1280, 720)) )
# 画面に表示
cv2.imshow(windowname_in,frame)
# 現在のフレーム番号を取得
curpos = int(cap.get(cv.CV_CAP_PROP_POS_FRAMES))
# トラックバーにセットする(コールバック関数が呼ばれる)
cv2.setTrackbarPos(trackbarname, windowname_in, curpos)
frame_num += 1
cap.release()
cv2.destroyAllWindows()
out.release()
| true |
a6b95e5a5a02dab2cf16d6d1222ed23768470298 | Python | lingomaniac88/ktaneKlaxon | /Assets/Models/fix_button.py | UTF-8 | 3,470 | 2.734375 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | # A script to fix the normals from the exported button.obj.
# This script mainly exists because I have more experience with Python than I do with Blender. :-p
# Note: the top of the button is in the +Y direction.
import collections
import functools
import math
import pywavefront
Point3 = collections.namedtuple('Point3', ['x', 'y', 'z'])
FILE_NAME = 'button.obj'
button = pywavefront.Wavefront(FILE_NAME, collect_faces=True, parse=True)
V = [Point3(*xyz) for xyz in button.vertices]
# Tests corresponding to what types of vertices we have
SHAFT = 1 << 0
BOTTOM_CASE = 1 << 1
TOP_CASE = 1 << 2
OUTER_LIP = 1 << 3
INNER_LIP = 1 << 4
DOME = 1 << 5
tests = {
SHAFT: lambda p: 0.24 < (p.x**2 + p.z**2) < 0.26,
BOTTOM_CASE: lambda p: 1.24 < p.y < 1.26,
TOP_CASE: lambda p: 1.34 < p.y < 1.36,
OUTER_LIP: lambda p: 3.99 < (p.x**2 + p.z**2) < 4.01,
INNER_LIP: lambda p: 3.79 < (p.x**2 + p.z**2) < 3.81,
DOME: lambda p: 3.79 < (p.x**2 + p.z**2 + (p.y - 1.35)**2) < 3.81,
}
classes = [sum((k for k in tests if tests[k](v))) for v in V]
expected_vertex_classes = [
SHAFT,
SHAFT | BOTTOM_CASE,
BOTTOM_CASE | OUTER_LIP,
TOP_CASE | OUTER_LIP,
TOP_CASE | INNER_LIP | DOME,
DOME
]
assert set(classes) == set(expected_vertex_classes)
tris = button.mesh_list[0].faces
tri_classes = [functools.reduce(lambda x, y: x & y, [classes[v] for v in tri]) for tri in tris]
expected_tri_classes = [
SHAFT,
BOTTOM_CASE,
OUTER_LIP,
TOP_CASE,
TOP_CASE | INNER_LIP | DOME,
DOME
]
def normalize(p):
m = math.sqrt(p.x**2 + p.y**2 + p.z**2)
return Point3(p.x/m, p.y/m, p.z/m)
# normal_calculations: class -> (vertex -> normal)
normal_calculations = {
SHAFT: lambda p: normalize(Point3(p.x, 0, p.z)),
BOTTOM_CASE: lambda p: Point3(0, -1, 0),
OUTER_LIP: lambda p: normalize(Point3(p.x, 0, p.z)),
TOP_CASE: lambda p: Point3(0, 1, 0),
TOP_CASE | INNER_LIP | DOME: lambda p: Point3(0, 1, 0),
DOME: lambda p: normalize(Point3(p.x, p.y - 1.35, p.z))
}
# Now, get ready to replace the normals
f = open(FILE_NAME)
lines = list(f)
f.close()
# This inefficient, but whatever
vn_in_lines = [(i, line) for i, line in enumerate(lines) if line.startswith('vn')] # These will be contiguous
f_in_lines = {i: line for i, line in enumerate(lines) if line.startswith('f')} # These will all be after vn_lines
normals_to_new_vn_indices = {}
f_out_lines = {}
for i in f_in_lines:
f_line = f_in_lines[i]
vtn_indices = map(lambda vtn: map(int, vtn.split('/')), f_line.split()[1:])
v, vt, vn = tuple(zip(*vtn_indices))
face_class = functools.reduce(lambda x, y: x & y, [classes[i - 1] for i in v])
normals = [normal_calculations[face_class](V[i - 1]) for i in v]
vn_new = []
for n in normals:
if n not in normals_to_new_vn_indices:
normals_to_new_vn_indices[n] = len(normals_to_new_vn_indices) + 1
vn_new.append(normals_to_new_vn_indices[n])
out_line = 'f ' + ' '.join(['%d/%d/%d' % items for items in zip(v, vt, vn_new)])
f_out_lines[i] = out_line
for i in range(vn_in_lines[0][0]):
print(lines[i])
normals = {normals_to_new_vn_indices[k]: k for k in normals_to_new_vn_indices}
for i in range(len(normals_to_new_vn_indices)):
point = normals[i + 1]
print('vn %f %f %f' % point)
for i in range(vn_in_lines[-1][0], len(lines)):
if lines[i].startswith('f '):
print(f_out_lines[i])
else:
print(lines[i])
| true |
675c0538ad2ef0d7ba280e08afccad0eaaa50506 | Python | linzai1992/myai | /NLP/Old/data_batcher.py | UTF-8 | 3,343 | 2.578125 | 3 | [] | no_license | import json
import random
import numpy as np
class DataBatcher:
def __init__(self, data_path):
self.sos_token = "<S>"
self.eos_token = "<E>"
self.unk_token = "<U>"
self.pad_token = "<P>"
with open(data_path, "r") as json_file:
raw_json = json.load(json_file)
index = 4
key_index = 0
self.word_map = {self.sos_token: 3, self.eos_token: 1, self.unk_token: 2, self.pad_token: 0}
self.key_map = {}
self.max_sentence_len = 0
for key, sentences in raw_json.items():
if not key in self.key_map:
self.key_map[key] = key_index
key_index += 1
for sentence in sentences:
tokens = sentence.split(" ")
if len(tokens) > self.max_sentence_len:
self.max_sentence_len = len(tokens)
for token in tokens:
if not token in self.word_map:
self.word_map[token] = index
index += 1
self.total_samples = 0
for key in raw_json:
self.total_samples += len(raw_json[key])
self.max_sentence_len += 2
self.index_map = dict([reversed(i) for i in self.word_map.items()])
self.index_key_map = dict([reversed(i) for i in self.key_map.items()])
self.total_words = len(self.word_map)
self.total_classes = len(self.key_map)
self.data = [(sent, label) for label, sentences in raw_json.items() for sent in sentences]
self.epoch_data = list(self.data)
def prepare_epoch(self):
self.epoch_data = list(self.data)
def epoch_finished(self):
return len(self.epoch_data) == 0
def generate_batch(self, size):
if size > len(self.epoch_data):
size = len(self.epoch_data)
batch = [self.epoch_data.pop(random.randrange(len(self.epoch_data))) for _ in range(size)]
sentence_tensors = []
label_tensors = []
for sentence, label in batch:
tensor = self.sentence_to_tensor(sentence)
sentence_tensors.append(tensor)
label_tensor = np.zeros(self.total_classes)
label_tensor[self.key_map[label]] = 1
label_tensors.append(label_tensor)
return np.array(sentence_tensors), np.array(label_tensors)
def generate_full_batch(self):
batch = list(self.data)
sentence_tensors = []
label_tensors = []
for sentence, label in batch:
tensor = self.sentence_to_tensor(sentence)
sentence_tensors.append(tensor)
label_tensor = np.zeros(self.total_classes)
label_tensor[self.key_map[label]] = 1
label_tensors.append(label_tensor)
return np.array(sentence_tensors), np.array(label_tensors)
def generate_batch_embeddings(self, size, vocab_handler):
if size > len(self.epoch_data):
size = len(self.epoch_data)
batch = [self.epoch_data.pop(random.randrange(len(self.epoch_data))) for _ in range(size)]
for sentence, label in batch:
break
def sentence_to_tensor(self, sentence):
tokens = sentence.split(" ")
tokens.insert(0, self.sos_token)
tokens.append(self.eos_token)
tensor = np.full((self.max_sentence_len, self.total_words, 1), self.word_map[self.pad_token], dtype=np.float32)
for index, token in enumerate(tokens):
tensor[index][self.word_map[token]][0] = 1
return np.array(tensor)
def preprocess_string(self, strg):
strg = strg.lower().replace(".", "").replace("?", "").replace("!", "").replace(",", "").strip()
tokens = strg.split(" ")
final = ""
for token in tokens:
if token in self.word_map:
final += token + " "
else:
final += self.unk_token + " "
return final[:-1]
| true |
f619e75a48ebe00d260b97c83ccd7809436d7e3c | Python | 6188506/LearnPythonHardWay | /tower_builder(debug).py | UTF-8 | 316 | 2.890625 | 3 | [] | no_license | def tower_builder(n):
tower = []
for i in range(1, n*2,2):
tower.append(' '*(((n*2-1)-i)/2)+i*'*'+' '*(((n*2-1)-i)/2))
print '['
for i in tower:
print " " + "\'%s\'" %i + ','
print ']'
#=>我觉得对的,系统没过去 ,idle中可以正常输出 | true |
4412a70c0e1b29b0689991b96256b5d675391a86 | Python | PeterZhu514/PythonCrawlerStudy | /Regx_Practice/正则表达式练习.py | UTF-8 | 601 | 3.15625 | 3 | [] | no_license |
#正则表达式
#匹配以".com"和".cn"结尾的域名
import re
pattern ="[a-zA-Z]+://[^\s]*[.com|.cn]"
string="<a href='http://www.baidu.com'>百度首页</a>"
result=re.search(pattern,string)
print(result)
#匹配电话号码
import re
pattern="\d{4}-\d{7}|\d{3}-\d{8}"
string="021-6728263652341"
result=re.search(pattern,string)
print(result)
#匹配电子邮件地址
import re
pattern="\w+([.+-]\w+)*@\w+([.-]\w+)*\.\w+([.-]\w+)*"
string="<a href='http://www.baidu.com'>百度首页</a><br><a href='mailto:c-e+o@iqi-anyue.com.cn'>电子邮件地址</a>"
result=re.search(pattern,string)
print(result)
| true |
fe54ebd30705a07091ecf9b4f511845a735833ff | Python | blueboy1593/algorithm | /SSAFY알고리즘정규시간 Problem Solving/10월 Problem Solving/1015/5248그룹나누기re.py | UTF-8 | 943 | 2.796875 | 3 | [] | no_license | from pprint import pprint
import sys
sys.stdin = open("5248_input.txt", "r")
T = int(input())
for tc in range(1, T + 1):
N, M = map(int, input().split())
wish_list = list(map(int, input().split()))
wish_arr = [ [0] * (N + 1) for _ in range(N + 1) ]
for i in range(len(wish_list)):
if not i % 2:
wish_arr[wish_list[i]][wish_list[i + 1]] = 1
if i % 2:
wish_arr[wish_list[i]][wish_list[i - 1]] = 1
visited = [0] * (N + 1)
def BFS(i):
queue = [i]
while queue:
for _ in range(len(queue)):
a = queue.pop(0)
visited[a] = 1
for j in range(1, N + 1):
if wish_arr[a][j] == 1 and visited[j] == 0:
queue.append(j)
couple = 0
for i in range(1, N + 1):
if visited[i] == 0:
BFS(i)
couple += 1
print("#%d %d" %(tc, couple))
| true |
d41063ed971abbe71b41283b709e6420ed646fee | Python | joshpaulchan/banksim | /main.py | UTF-8 | 3,577 | 3.40625 | 3 | [] | no_license | #
# main.py
# Bank simulator
#
# Written by Joshua Paul A. Chan
import argparse
import math
from banksim.customer import Customer
from banksim.bank import Bank
# set up argument parsing
parser = argparse.ArgumentParser()
parser.add_argument("c", type=int, help="the number of customers to visit the \
bank")
parser.add_argument("-t", type=int, default=1, help="the number of tellers at \
the bank")
parser.add_argument("-v", "--verbose", help="increase the level of output \
logging", action="store_true")
def distribute(items, ticks):
"""
`distribute(items, ticks)`
Groups the items over the specified number of ticks so that they're evenly
distributed
@param : items : items to group together into buckets
@param : ticks : the number of groups/buckets to group items into
@return : list : a list of lists, with some items grouped into each
individual list
"""
assert type(items) == list
assert type(ticks) == int
assert ticks > 0
i = 0
items_per_bucket = math.ceil(len(items) / ticks)
buckets = [ [] for _ in range(ticks) ]
for j, item in enumerate(items):
if j % items_per_bucket == 0:
bucket = buckets[i]
i += 1
bucket.append(item)
return buckets
def main():
args = parser.parse_args()
def log(s):
if args.verbose: print(s)
N = args.c if args.c > 0 else 1
n_tellers = args.t if args.t > 0 else 1
# instantiate the bank
bank = Bank(n_tellers)
# set up simulation
bank.open()
t = 0
wait_time = 0
visitors_over_time = distribute(
[Customer(str(n).zfill(3)) for n in range(N)],
10
)
while bank.is_open():
log("=" * 32 + " timestep: {} ".format(str(t).zfill(4)) + "=" * 32)
# update waiting times
for customer in bank.customers:
customer.wait_a_little()
# get new customers
if t < len(visitors_over_time):
visitors = visitors_over_time[t]
for visitor in visitors:
bank.receive_customer(visitor)
log("[visitors that came in] {}".format(len(visitors)))
# update internal bank state
bank.update()
# get available tellers
available_tellers = list(
filter(lambda t: t.is_available(), bank.tellers)
)
while len(available_tellers) > 0 and len(bank.customers) > 0:
# get teller
free_teller = available_tellers.pop(0)
# move a customer from queue to available teller
next_customer = bank.customers.get_next_customer()
free_teller.serve(next_customer)
wait_time += next_customer.has_waited
# print("{} is serving: {}".format(free_teller, next_customer))
log("[visitors left to serve] {}".format(len(bank.customers)))
# increment time step
t += 1
# if no more customers visiting, break
if t >= len(visitors_over_time) - 1 and len(bank.customers) == 0:
bank.close()
print("=" * 80)
print("[stats]")
print("total number of tellers = {}".format(n_tellers))
print("total number of customers served = {}".format(N))
print("total number of unit time steps = {}".format(t - 1))
print("average wait time per customer = {}".format(wait_time / N))
print("=" * 80)
if __name__ == '__main__':
main()
| true |
babf47ad0b8d2b6c29dc0314d18f4ee216af8e09 | Python | cunconbkhp/Deep_learning_basic | /SB_PRACTICE/CH7/kNN.py | UTF-8 | 1,144 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 17 22:58:42 2019
@author: huyquang
"""
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from simplepreprocessor import SimplePreprocessor
from simpledatasetloader import SimpleDatasetLoader
from imutils import paths
print("[INFO] loading images ...")
imagePaths=list(paths.list_images("datasets\\animals"))
sp=SimplePreprocessor(32,32)
sdl=SimpleDatasetLoader(preprocessors=[sp])
(data,labels)=sdl.load(imagePaths,verbose=500)
data=data.reshape((data.shape[0],3072))
print("[INFO] feature matrix: {:.1f}MB".format(data.nbytes/(1024*1024.0)))
le=LabelEncoder()
labels = le.fit_transform(labels)
(trainX, testX, trainY, testY) = train_test_split(data,labels,test_size=0.25,random_state=42)
print("[INFO] evaluating k-NN classifier...")
model= KNeighborsClassifier(n_neighbors=1,n_jobs=-1)
model.fit(trainX,trainY)
model.fit(trainX, trainY)
print(classification_report(testY, model.predict(testX),target_names=le.classes_))
| true |
edff8949922bf3f162b5344bf39d5c3f392193ca | Python | Parthdoshi04/Python-basics-5 | /code3.py | UTF-8 | 319 | 3.234375 | 3 | [] | no_license | from bs4 import BeautifulSoup
import requests
count=0
url = input("Enter a website to extract the URL's form: ")
r=requests.get("https://"+url)
data=r.text
soup = BeautifulSoup(data,'html.parser')
print(soup)
for link in soup.find_all('a'):
print(link)
count=count+1
print("Total number of anchors:",count) | true |
4ea9fbc636993a226016131d7f9a1f781cb0f0ad | Python | wheeler2000/mpg2kpl-python | /converters.py | UTF-8 | 222 | 3.734375 | 4 | [] | no_license | # do the simplest thing that could possibly work
# ask the user to input an MPG value
mpg = input("What is the MPG? ")
# convert the input into a numeric (float) value
mpg = float(mpg)
print(mpg * 1.609344 * 0.2641720524)
| true |
071b587323f6263c39f6109b66e6c95c4c91f1b5 | Python | Capitains/Flint | /tests/test_ingest.py | UTF-8 | 965 | 2.78125 | 3 | [] | no_license | from unittest import TestCase
from CapitainsFlint.ingest import MyCapytainLocalParser
class TestCapitainParser(TestCase):
def test_passages(self):
""" Test that passage retrieving works well
"""
parsed = MyCapytainLocalParser(
"xml_resources/1294.002.xml",
"urn:cts:latinLit:phi1294.phi002:perseus-lat2"
)
passages = parsed.getPassages()
self.assertEqual(
passages[0],
('urn:cts:latinLit:phi1294.phi002:perseus-lat2:1.pr.1', 'Spero me secutum in libellis meis tale temperamen-'),
"Passages should be formatted well"
)
self.assertEqual(
passages[4],
('urn:cts:latinLit:phi1294.phi002:perseus-lat2:1.1.2', 'Toto notus in orbe Martialis '),
"tei:note should be removed"
)
self.assertEqual(
len(passages),
11,
"All passages should be found"
) | true |
2c3fac9ba0b1175e0401c4df277653a3e8122f4d | Python | ivangar/COMP479 | /invertedIndex.py | UTF-8 | 5,205 | 2.6875 | 3 | [] | no_license | from nltk import word_tokenize
from bs4 import BeautifulSoup
from nltk.corpus import PlaintextCorpusReader
import re
import string
import json
import itertools
def generate_token_list():
# load reuters files with the help of NLTK's PlaintextCorpusReader
sgm_files = PlaintextCorpusReader("reuters", '.*\.sgm')
token_list = []
unsorted_token_list = open("files/unsorted_token_list.txt", 'w') # use for pretty print the list
for fileid in sgm_files.fileids():
f = open("reuters" + '/' + fileid)
sgm_file = f.read()
parsed_sgm = BeautifulSoup(sgm_file, 'html.parser')
for document_text in parsed_sgm.find_all('reuters'):
block_tokenizer(document_text, token_list, unsorted_token_list)
with open("files/unsorted_token_list.json", mode="w", encoding="utf-8") as myFile:
json.dump(token_list, myFile)
def block_tokenizer(document_text, token_list, unsorted_token_list):
doc_id = int(document_text['newid'])
doc_text = str(document_text.find('text'))
raw = BeautifulSoup(doc_text, 'html.parser').get_text()
raw = raw.replace("\u0002", '')
raw = raw.replace("\u0003", '')
for c in string.punctuation:
raw = raw.replace(c, " ")
tokens = word_tokenize(raw)
for token in tokens:
token_list.append((token, doc_id))
print(json.dumps([token, doc_id]), file=unsorted_token_list)
def sort_tokens(unsorted_list):
f = open(unsorted_list)
file = f.read()
res = json.loads(file)
sorted_list = sorted(res, key=lambda x: x[0])
with open("files/sorted_tokens.json", mode="w", encoding="utf-8") as myFile:
json.dump(sorted_list, myFile)
def remove_duplicates(sorted_list):
f = open(sorted_list)
file = f.read()
sorted_list_res = json.loads(file)
unique_tokens = []
term = sorted_list_res[0][0]
doc_id = sorted_list_res[0][1]
unique_tokens.append((term, doc_id))
#print to txt to pretty print
unique_list = open("files/sorted_unique_tokens.txt", 'w')
print(json.dumps([term, doc_id]), file=unique_list)
for token in sorted_list_res:
if token[0] == term:
if token[1] == doc_id:
continue
elif token[1] != doc_id:
doc_id = token[1]
unique_tokens.append((token[0], token[1]))
print(json.dumps([token[0], token[1]]), file=unique_list)
else:
term = token[0]
doc_id = token[1]
unique_tokens.append((token[0], token[1]))
print(json.dumps([token[0], token[1]]), file=unique_list)
with open("files/sorted_unique_tokens.json", mode="w", encoding="utf-8") as myFile:
json.dump(unique_tokens, myFile)
def generate_posting_list(sorted_list):
f = open(sorted_list)
file = f.read()
res = json.loads(file)
postings = []
naive_indexer = []
#print to txt to pretty print
naive_indexer_text = open("files/naive_indexer.txt", 'w')
cycle = itertools.cycle(res)
next(cycle)
for token in res:
peek_token = next(cycle)
if token[0] == peek_token[0]:
postings.append(token[1])
else:
if len(postings) > 0:
postings.append(token[1])
naive_indexer.append([token[0], [len(postings), postings.copy()]])
print(json.dumps([token[0], [len(postings), postings.copy()]]), file=naive_indexer_text)
postings.clear()
else:
naive_indexer.append([token[0], [1, [token[1]]]])
print(json.dumps([token[0], [1, [token[1]]]]), file=naive_indexer_text)
with open("files/naive_indexer.json", mode="w", encoding="utf-8") as myFile:
json.dump(naive_indexer, myFile)
#Use this helper function to test a small amount of data
def test_token_list():
# load reuters files with the help of NLTK's PlaintextCorpusReader
sgm_files = PlaintextCorpusReader("reuters", '.*\.sgm')
token_list = []
f = open("reuters/reut2-000.sgm")
sgm_file = f.read()
parsed_sgm = BeautifulSoup(sgm_file, 'html.parser')
unsorted_token_list = open("files/unsorted_token_list.txt", 'w') #use for pretty print the list
for document_text in parsed_sgm.find_all('reuters'):
doc_id = int(document_text['newid'])
if doc_id > 4:
continue
doc_text = str(document_text.find('text'))
raw = BeautifulSoup(doc_text, 'html.parser').get_text()
raw = raw.replace("\u0002", '')
raw = raw.replace("\u0003", '')
for c in string.punctuation:
raw = raw.replace(c, " ")
raw = re.sub(r"\d", "", raw)
tokens = word_tokenize(raw)
for token in tokens:
token_list.append((token, doc_id))
print(json.dumps([token, doc_id]), file=unsorted_token_list)
with open("files/unsorted_token_list.json", mode="w", encoding="utf-8") as myFile:
json.dump(token_list, myFile)
generate_token_list()
sort_tokens("files/unsorted_token_list.json")
remove_duplicates("files/sorted_tokens.json")
generate_posting_list("files/sorted_unique_tokens.json")
#test_token_list() | true |
58eed63ed352549a8bca231ea4afb9fd3218b884 | Python | lyy-leticia/PythonTest1211 | /PythonProject/PythonTest1211/prac/prac13.py | UTF-8 | 867 | 4.875 | 5 | [] | no_license | # 迭代器
# 迭代是Python最强大的功能之一,是访问集合元素的一种方式。
# 迭代器是一个可以记住遍历的位置的对象。
# 迭代器对象从集合的第一个元素开始访问,直到所有的元素被访问完结束。迭代器只能往前不会后退。
# 迭代器有两个基本的方法:iter() 和 next()。
# 字符串,列表或元组对象都可用于创建迭代器:
# alist=[1,2,3,4,5]
# it=iter(alist)# 创建迭代器对象
# print(next(it))# 输出迭代器的下一个元素
# print(next(it))
# print(next(it))
# list=[1,2,3,4]
# it = iter(list) # 创建迭代器对象
# for x in it:
# print (x, end=" ")
import sys # 引入 sys 模块
list=[1,2,3,4]
it = iter(list) # 创建迭代器对象
while True:
try:
print (next(it))
except StopIteration:
sys.exit() | true |
087d979c1e112727a123c5af1e48da88de04d292 | Python | brookicv/machineLearningSample | /keras/pyimagesearch/nn/conv/lenet.py | UTF-8 | 1,236 | 2.71875 | 3 | [] | no_license | # -*- coding:utf-8 -*-
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dense
from keras import backend as K
class LeNet:
@staticmethod
def build(width,height,depth,classes):
model = Sequential()
inputShape = (height,width,depth)
if K.image_data_format() == "channels_first":
inputShape = (depth,height,width)
# first set of CONV => RELU => POOL
model.add(Conv2D(20,(5,5),padding="same",input_shape=inputShape))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
# second set of CONV => RELU => POOL_layers
model.add(Conv2D(50,(5,5),padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
# set of FC => RELU layers
model.add(Flatten())
model.add(Dense(500))
model.add(Activation("relu"))
# softmax classifier
model.add(Dense(classes))
model.add(Activation("softmax"))
return model | true |
3c382e181f3d24bdd1ad0754513f6937c57bd35e | Python | NestorMonroy/Courses-coursera | /Django-for-Everybody/src/other/excercies/week5/01.py | UTF-8 | 732 | 3.078125 | 3 | [] | no_license | import sqlite3
conn = sqlite3.connect('week5.sqlite')
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS Ages')
cur.execute('''CREATE TABLE Ages( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL UNIQUE,
name VARCHAR(128),
age INTEGER) ''')
cur.execute('INSERT INTO Ages (name, age) VALUES (?, ?)', ('Winnifred', 16));
cur.execute('INSERT INTO Ages (name, age) VALUES (?, ?)', ('Kaysey', 23));
cur.execute('INSERT INTO Ages (name, age) VALUES (?, ?)', ('Meagan', 25));
cur.execute('INSERT INTO Ages (name, age) VALUES (?, ?)', ('Joan', 18));
cur.execute('SELECT hex(name || age) AS X FROM Ages ORDER BY X');
for row in cur:
print(row)
cur.close()
| true |
cd3be04adeb712ad763dfb2e80dd93d618f901d2 | Python | JannaKim/PS | /etc/14467_소가길을건너간이유1.py | UTF-8 | 411 | 3.203125 | 3 | [] | no_license | cow = []
[cow.append([]) for _ in range(10)]
N = int(input())
for _ in range(N):
a, b = [int(i) for i in input().split()]
cow[a-1].append(str(b))
cnt=0
for cross in cow:
if cross:
now = cross[0]
for road in cross:
if now==road:
pass
else:
cnt+=1
now=road
print(cnt)
'''
8
3 1
3 0
6 0
2 1
4 1
3 0
4 0
3 1
''' | true |
62a02043a9989c4af99db5e11f6b38171cfc5a69 | Python | seenu-g/Experiments | /py_examples/host_basic_API.py | UTF-8 | 2,497 | 2.875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
#{"1": "First", "2": "Second", "3": "Third", "4": "Fourth"}
with open("data.json") as data_file:
data = json.load(data_file)
#Defining a HTTP request Handler class
class ServiceHandler(BaseHTTPRequestHandler):
#sets basic headers for the server
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type','text/json')
length = int(self.headers['Content-Length'])
#reads the contents of the request
content = self.rfile.read(length)
temp = str(content).strip('b\'')
self.end_headers()
return temp
def do_GET(self):
#defining all the headers
self.send_response(200)
self.send_header('Content-type','text/json')
self.end_headers()
#prints all the keys and values of the json file
self.wfile.write(json.dumps(data).encode())
def do_VIEW(self):
#dict var. for pretty print
display = {}
temp = self._set_headers()
#check if the key is present in the dictionary
if temp in data:
display[temp] = data[temp]
#print the keys required from the json file
self.wfile.write(json.dumps(display).encode())
else:
error = "NOT FOUND!"
self.wfile.write(bytes(error,'utf-8'))
self.send_response(404)
def do_POST(self):
temp = self._set_headers()
key=0
#getting key and value of the data dictionary
for key,value in data.items():
pass
index = int(key)+1
data[str(index)]=str(temp)
#write the changes to the json file
with open("data.json",'w+') as file_data:
json.dump(data,file_data)
self.wfile.write(json.dumps(data[str(index)]).encode())
def do_PUT(self):
temp = self._set_headers()
#seprating input into key and value
x = temp[:1]
y = temp[2:]
#check if key is in data
if x in data:
data[x] = y
#write the changes to file
with open("data.json",'w+') as file_data:
json.dump(data,file_data)
self.wfile.write(json.dumps(data[str(x)]).encode())
else:
error = "NOT FOUND!"
self.wfile.write(bytes(error,'utf-8'))
self.send_response(404)
def do_DELETE(self):
temp = self._set_headers()
#check if the key is present in the dictionary
if temp in data:
del data[temp]
#write the changes to json file
with open("data.json",'w+') as file_data:
json.dump(data,file_data)
else:
error = "NOT FOUND!"
self.wfile.write(bytes(error,'utf-8'))
self.send_response(404)
server = HTTPServer(('127.0.0.1',8080),ServiceHandler)
server.serve_forever() | true |
fef40f64eda50d4a3eb7191d18e9e891c8a23683 | Python | rebeccabilbro/orlo | /selection/classi.py | UTF-8 | 4,373 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# classi.py
# Title: Apply Classification to UCI Datasets for ROC Curves Tour
# Author: Rebecca Bilbro
# Date: 3/9/16
# Organization: District Data Labs
"""
Apply different classification methods to applicable datasets from UCI.
Use highest-scoring-estimator wins approach.
"""
#####################################################################
# Imports
#####################################################################
import csv
import numpy as np
from sklearn.svm import SVC
# from sklearn.lda import LDA
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.feature_extraction import DictVectorizer
from sklearn import metrics
from sklearn import cross_validation as cv
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
#####################################################################
# Global Variables
#####################################################################
estimators = [LogisticRegression(),GaussianNB(),KNeighborsClassifier(),\
DecisionTreeClassifier(),RandomForestClassifier()]
#####################################################################
# Classification
#####################################################################
def openFile(fname):
"""
Opens data file.
"""
with open(fname, 'rb') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(), delimiters=';,\t')
csvfile.seek(0)
reader = csv.reader(csvfile, dialect)
data = list(reader)
return data
def labelFind(dataset):
"""
Performs unique value count on each column in dataset.
assumes that the column with the fewest unique values contains the labels
for classification.
Outputs label column.
"""
counts={}
for line in dataset:
for key in range(0,len(line)):
if key not in counts:
counts[key] = []
if line[key] not in counts[key]:
counts[key].append(line[key])
label_col = [k for k in counts.keys() if len(counts.get(k))==min([len(n) for n in counts.values()])]
labels = counts[label_col[0]]
# print "column %d contains the labels, which are:" % label_col[0], labels
targets = [row[label_col[0]] for row in dataset]
features = []
for row in dataset:
row.remove(row[label_col[0]])
features.append(row)
return tuple([features, targets])
def classi(features, targets):
"""
Takes data as input and runs different classifiers.
Outputs a dict where the classifier name is the key, and the
values are the expected and predicted values.
"""
splits = cv.train_test_split(features, targets, test_size=0.08)
X_train, X_test, y_train, y_test = splits
results = {}
for estimator in estimators:
model = estimator
model.fit(X_train, y_train)
expected = y_test
predicted = model.predict(X_test)
precision = metrics.precision_score(expected, predicted)
recall = metrics.recall_score(expected, predicted)
accuracy = metrics.accuracy_score(expected, predicted)
f1 = metrics.f1_score(expected, predicted)
results[model] = (precision,recall,accuracy,f1)
return results
if __name__ == '__main__':
bundle = labelFind(openFile("data/tic-tac-toe.data"))
labels = bundle[1]
label_enc = LabelEncoder()
encoded_labels = label_enc.fit_transform(labels)
features = bundle[0]
mapping = []
for instance in range(len(features)):
D = dict()
for f in range(len(features[instance])):
D[f] = features[instance][f]
mapping.append(D)
data_enc = DictVectorizer(sparse=False)
encoded_data = data_enc.fit_transform(mapping)
print classi(encoded_data, encoded_labels)
# bundle = labelFind(openFile("data/breast-cancer-wisconsin.data"))
# print bundle[0][0]
# print bundle[1][0]
#
# bundle = labelFind(openFile("data/balance-scale.data"))
# print bundle[0][0]
# print bundle[1][0]
#
# bundle = labelFind(openFile("data/isolet5.data"))
# print bundle[0][0]
# print bundle[1][0]
| true |
d33f75f0aa04690a3192cecca0be97c9f49fc6c3 | Python | dareryl/exploring | /fx.py | UTF-8 | 1,599 | 2.78125 | 3 | [] | no_license | #!/Users/darylchan/opt/anaconda3/bin/python3
import sys
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
import csv
import pandas as pd
import datetime
if len(sys.argv) != 2:
print('Usage: '+sys.argv[0]+' currency')
exit(1)
currency = sys.argv[1]
main_url = 'https://api.exchangeratesapi.io/'
base = '?base='+currency
## Latest currency
today = datetime.date.today()
today_url = main_url + str(today) + base
r0 = requests.get(today_url, verify=False)
j0 = r0.json()
newrates = j0['rates']
## Previous currency
for i in range(1,31):
prev = today - datetime.timedelta(days=i)
prev_url = main_url + str(prev) + base
r1 = requests.get(prev_url, verify=False)
j1 = r1.json()
if j1 != j0:
break
prevrates = j1['rates']
fp = open ('latest_currency.csv', 'w', encoding = "utf8", newline = '')
new_writer = csv.DictWriter(fp, fieldnames= ['Country','Rate to '+currency,'Rise/Fall','percentage change'])
new_writer.writeheader()
for row in newrates:
if row != currency :
percent = ((prevrates[row] - newrates[row])/prevrates[row])*100
if percent > 0 :
new_writer.writerow({'Country': row,'Rate to '+currency: newrates[row], 'Rise/Fall': 'Rise', 'percentage change': "%.2f" % percent})
if percent < 0:
new_writer.writerow({'Country': row,'Rate to '+currency: newrates[row], 'Rise/Fall': 'Fall', 'percentage change': "%.2f" % percent})
fp.close()
df = pd.read_csv('latest_currency.csv')
| true |
3b6e73b9b8923f9e0d4883a59d833fe8807207dc | Python | dochead/LeagueTable | /tests/test_matchParser.py | UTF-8 | 1,223 | 2.765625 | 3 | [
"BSD-3-Clause"
] | permissive | import unittest
from leaguetable import match_parser
from leaguetable.league import League
class TestMatchParser(unittest.TestCase):
def test_parse(self):
mp = match_parser.MatchParser()
l = League('Futbolico')
l.add_match(mp.parse(u'Lions 3, Snakes 3'))
l.add_match(mp.parse(u'Tarantulas 1, FC Awesome 0'))
l.add_match(mp.parse(u'Lions 1, FC Awesome 1'))
l.add_match(mp.parse(u'Tarantulas 3, Snakes 1'))
l.add_match(mp.parse(u'Lions 4, Grouches 0'))
self.assertEqual(
l.get_league_table,
[
{u'points': 6, u'position': 1, u'team': u'Tarantulas'},
{u'points': 5, u'position': 2, u'team': u'Lions'},
{u'points': 1, u'position': 3, u'team': u'FC Awesome'},
{u'points': 1, u'position': 3, u'team': u'Snakes'},
{u'points': 0, u'position': 5, u'team': u'Grouches'}
]
)
def test_parse_nonsense(self):
mp = match_parser.MatchParser()
l = League(u'Futbolico')
with self.assertRaises(ValueError):
l.add_match(mp.parse(u'No football for you!'))
if __name__ == u'__main__':
unittest.main()
| true |
5462c969510762a2a7b8a004809a27abdbe40585 | Python | jaredparmer/ThinkPythonRepo | /time.py | UTF-8 | 764 | 3.765625 | 4 | [] | no_license | import time
# this function returns the time of day in hrs, min, and secs, plus the
# number of days since the epoch
def time_report():
total_secs = int(time.time())
total_mins = total_secs // 60
total_hrs = total_mins // 60
total_days = total_hrs // 24
total_yrs = int(total_days // 365.25)
secs_today = total_secs % (total_days * 24 * 60 * 60)
mins_today = secs_today // 60
hrs_today = mins_today // 60
curr_hrs = hrs_today
curr_mins = mins_today % (hrs_today * 60)
curr_secs = secs_today % (mins_today * 60)
print(f'The current time is {curr_hrs}:{curr_mins}:{curr_secs} GMT.')
print(f'It has been {total_days} days since the epoch.')
def main():
time_report()
if __name__ == '__main__':
main()
| true |
4e67b6c941b6229cd561bdaa5eaddac1da0590d9 | Python | Maxondria/python-flask-rest-api | /fundamentals/inheritence.py | UTF-8 | 661 | 3.796875 | 4 | [] | no_license | class Student:
def __init__(self, name, school):
self.name = name
self.school = school
self.marks = []
def average(self):
return sum(self.marks) / len(self.marks)
@classmethod
def friend(cls, origin, friend_name, salary):
return cls(friend_name, origin.school, salary)
class WorkingStudent(Student):
def __init__(self, school, name, *args):
super().__init__(name, school)
self.salary = args
maxon = WorkingStudent('Oxford', 'Maxon', 20.00)
# print(maxon.salary)
maxon_friend = WorkingStudent.friend(maxon, "Timothy", 45.0)
print(maxon_friend.school)
print(maxon_friend.salary)
| true |
92ca31202382aa297986137e7e2e47a5fc19e1c9 | Python | trietptm/loadbalancer-finder | /loadbalancer-finder/src/updater.py | UTF-8 | 2,590 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
'''
This code is taken from GoLISMERO - Simple web mapper. Copyright (C) 2011
Original idea: Daniel Garcia aka cr0hn - dani@iniqua.com
Author and code writer: Henri Halo - henri@nerv.fi
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
try:
import sys
import git
import httplib
except ImportError, e:
sys.exit(e)
# FQDN of SCM
scm_url = 'code.google.com'
# Location of project in FQDN
scm_url_location = '/p/loadbalancer-finder/'
def check_scm_url_aliveness(scm_url, scm_url_location):
"""Does a verification if URL is alive. Please note that HTTPS certification is not validated. Returns True if alive and False if not."""
if not scm_url and scm_url_location:
print 'Error. Not all parameters defined in check_scm_url_aliveness.'
return
print('Testing for URL aliveness: %s' % 'https://' + scm_url + scm_url_location)
conn = httplib.HTTPSConnection(scm_url)
conn.request('GET', scm_url_location)
res = conn.getresponse()
if res.status == int('200'):
print('URL alive: %s' % 'https://' + scm_url + scm_url_location)
return True
else:
"""HTTP-error codes that at least should be checked here: 4xx, 5xx
http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
"""
print('URL not alive: %s' % 'https://' + scm_url + scm_url_location)
return False
def update_git_repo(scm_url, scm_url_location):
"""Calls URL verification and updates a GIT-repo."""
if not scm_url and scm_url_location:
print 'Error. Not all parameters defined in check_scm_url_aliveness.'
return
if not check_scm_url_aliveness(scm_url, scm_url_location):
print 'Update failed as URL did not return error code 200.'
return
full_url = scm_url + scm_url_location
print 'Updating from URL: %s' % 'https://' + full_url
git.Git("./").checkout(force=True)
def update():
update_git_repo(scm_url, scm_url_location) | true |
d145727b3fedf2f7669ac60a500fabc1ed8e1d9c | Python | DiegoCacau/infinitygames | /running.py | UTF-8 | 3,564 | 2.765625 | 3 | [] | no_license | from PPlay.sprite import *
from PPlay.sound import *
import random
import time
class running():
def __init__(self, window):
a = open("Running/running.txt", "r")
self.matrizMundo = []
for i in a:
linha = []
for j in i.split():
linha.append(int(j))
self.matrizMundo.append(linha)
a.close()
self.wall = Sprite("Running/pixel.png")
self.wall.set_total_duration(0)
self.wall_x = int(window.width / self.wall.width) / 2 - 3
self.wall_y = int(window.height / self.wall.height) / 2 - 3
self.car_user = Sprite("Running/car1_1.png", 1)
self.car_user.set_total_duration(0)
self.car_enemy = Sprite("Running/car2_1.png", 1)
self.car_enemy.set_total_duration(0)
self.delta = 10
self.car_user.set_position(14 * self.wall_x +
self.delta, window.height - self.car_user.height)
self.car_enemy.set_position(14 * self.wall_x +
self.delta, -self.car_enemy.height)
self.tempoAnterior = time.time()+10
self.intialTime = time.time()
self.audio = Sound("Sounds/shot.ogg")
self.audio.set_volume(100)
self.audio_end = Sound("Sounds/end.ogg")
self.audio_end.set_volume(100)
self.car_enemies = []
xx = 0
while xx < window.height:
ene = Sprite("Running/car2_1.png", 1)
ene.set_total_duration(0)
xx = xx + ene.height * 2 + random.randint(30, 50)
if (random.randint(0, 1) == 0):
ene.set_position(14 * self.wall_x + self.delta, -xx)
self.car_enemies.append(ene)
else:
ene.set_position(18 * self.wall_x + self.delta, -xx)
self.car_enemies.append(ene)
self.lastClick = window.last_time
def move(self,window,keyboard,delay):
currentTime = window.last_time
if (currentTime - self.lastClick > delay + 20):
if keyboard.key_pressed("RIGHT") and \
self.car_user.x == 14 * self.wall_x + self.delta:
self.car_user.set_position(18 * self.wall_x + self.delta, self.car_user.y)
self.audio.play()
self.lastClick = window.last_time
elif keyboard.key_pressed("LEFT") and \
self.car_user.x == 18 * self.wall_x + self.delta:
self.car_user.set_position(14 * self.wall_x + self.delta, self.car_user.y)
self.audio.play()
self.lastClick = window.last_time
self.car_user.draw()
def run(self, window, MODE, score):
for i in range(len(self.matrizMundo)):
for j in range(len(self.matrizMundo[0])):
if int(self.matrizMundo[i][j]) == 1:
posX = j * self.wall_x
posY = window.height - i * self.wall_y
self.wall.set_position(posX,posY)
self.wall.draw()
if i==0:
lin = self.matrizMundo[0]
self.matrizMundo.remove(lin)
self.matrizMundo.append(lin)
for car in self.car_enemies:
car.set_position(car.x, car.y + self.wall_y)
car.draw()
if(car.y - car.height > window.height):
if random.randint(0,1)==0:
car.set_position(14 * self.wall_x + self.delta, - car.height+20)
else:
car.set_position(18 * self.wall_x + self.delta, - car.height + 20)
if car.collided(self.car_user) and self.car_user.x == car.x:
MODE = 9
self.audio_end.play()
score = int(time.time() - self.intialTime)
self.currentTime = window.last_time
return MODE,score
def increaseDificulty(self,GAME_SPEED):
if time.time() - self.tempoAnterior >= 15:
self.tempoAnterior = time.time()
GAME_SPEED+=10
return GAME_SPEED
def game(self,GAME_SPEED,score,MODE,window,keyboard,delay):
self.move(window,keyboard,delay)
MODE, score = self.run(window, MODE, score)
GAME_SPEED = self.increaseDificulty(GAME_SPEED)
return GAME_SPEED,score,MODE
| true |
f83d54af30e4d04dd0f97bc7daf7404e35de2853 | Python | latchdevel/latch-moneybox | /MoneyBoxPython/mb_unpair.py | UTF-8 | 804 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) ElevenPaths 2016
import latch
'''
Python Script to unpair your latch account, The latch appId and secret ID are necessary
'''
## Set the needed params
APP_ID = "******vajh3********"
SECRET_KEY = "****************UEesj********************"
accountid = "****************************rkYC4m*****************************"
## Ask the latch api to unpair
try:
api = latch.Latch(APP_ID,SECRET_KEY)
response = api.unpair(accountid)
except Exception, e:
print "Latch API error :" + str(e)
## Process the Latch response and confirm the unpair process was right.
if response:
if response.get_data():
print response.get_data()
print "Unpair ok"
else:
print "error"
print response.get_error()
else:
print "Network error"
| true |
6ae27b4361889790638465f7bc0d1e8738765ae9 | Python | keepnad/py_radio | /py_radio.py | UTF-8 | 6,677 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python3
import requests
import vlc
import time
import signal
import sys
# Run a listener to exit nicely on ctrl+c
def exit_handler(signal, frame):
print(' SIGINT caught -- Exiting...')
sys.exit(0)
# return a track name given track id
def name_from_id(track_id):
url='http://us-qa.api.iheart.com/api/v1/catalog/getTrackByTrackId'
headers = {'Accept': 'application/json'}
params = (
('trackId', track_id),
)
return requests.get(url, headers=headers, params=params).json()['track']['title']
# reutrn an album genre given album id
def genre_from_album(album_id):
url = 'http://us-qa.api.iheart.com/api/v1/catalog/getAlbumsByAlbumIds'
headers = {'Accept': 'application/json'}
params = (
('albumId', album_id),
)
return requests.get(url, headers=headers, params=params).json()['trackBundles'][0]['genre']
# return all stations in a dictionary
def get_all_stations():
url = 'http://us-qa.api.iheart.com/api/v2/content/liveStations'
headers = {'Accept': 'application/json'}
params = (
('allMarkets', 'true'),
('limit', '-1'),
('offset', '0'),
)
return requests.get(url, headers=headers, params=params).json()
# return stations near a given city
def get_locational_stations(city):
url = 'http://us-qa.api.iheart.com/api/v2/content/liveStations'
headers = {'Accept': 'application/json'}
params = (
('allMarkets', 'false'),
('limit', '-1'),
('offset', '0'),
('city', city),
)
return requests.get(url, headers=headers, params=params).json()
# return stations near the requesting IP
def get_local_stations():
url = 'http://us-qa.api.iheart.com/api/v2/content/liveStations'
headers = {'Accept': 'application/json'}
params = (
('allMarkets', 'false'),
('limit', '-1'),
('offset', '0'),
('useIP', 'true'),
)
return requests.get(url, headers=headers, params=params).json()
# get stream urls, names, and desriptions from a bunch of stations
def load_station_dicts(a):
station_urls = {}
station_names = {}
station_descs = {}
# read values into dicts
for x in range (a['total']):
#print(a['hits'][x]['name'], a['hits'][x]['streams'], '\n')
try:
secure_shoutcast_url = a['hits'][x]['streams']['secure_shoutcast_stream']
except:
secure_shoutcast_url = None
try:
shoutcast_url = a['hits'][x]['streams']['shoutcast_stream']
except:
shoutcast_url = None
try:
secure_hls_url = a['hits'][x]['streams']['secure_hls_stream']
except:
secure_hls_url = None
try:
hls_url = a['hits'][x]['streams']['hls_stream']
except:
hls_url = None
try:
pls_url = a['hits'][x]['streams']['pls_stream']
except:
pls_url = None
station_urls[x] = {'sec_shout' : secure_shoutcast_url, 'shout' : shoutcast_url, 'sec_hls' : secure_hls_url, 'hls' : hls_url, 'pls' : pls_url}
station_names[x] = a['hits'][x]['name']
station_descs[x] = a['hits'][x]['description']
print(x)
return station_urls, station_names, station_descs, x
def main():
print('----- Terminal Radio -----')
print('\nAvailable local stations:\n')
signal.signal(signal.SIGINT, exit_handler)
# create instance of py-vlc
instance = vlc.Instance('--input-repeat=-1', '--fullscreen')
player = instance.media_player_new()
# read in live stations
a = get_local_stations()
# dicts to store station values
station_urls, station_names, station_descs, max_num = load_station_dicts(a)
#print(station_urls)
#return
# loop to run radio after stopping, until quit
while True:
stop = False
for k, v in station_names.items():
print('%d:\t%s - %s' % (k + 1, station_names[k], station_descs[k]))
# select a station to listen to
good_input = False
print('\nPick a station:')
while good_input == False:
try:
channel_num = int(input())
channel_num -= 1
except ValueError:
print('Enter a number corresponding to a station')
continue
if channel_num > max_num or channel_num < 0:
print('Enter a number corresponding to a station')
continue
else:
good_input = True
#print(requests.get(station_urls[channel_num]))
#return
# if it is an hls stream, get metadata
try:
b = requests.get(station_urls[channel_num]['hls'])
#print('works here')
c = b.text.split('\n')
d = requests.get(c[2])
e = d.text.split('\n')
e[4] = e[4].replace('\\', '')
#print(e[4])
title_start = e[4].find('title') + 7
title_end = e[4].find('"', title_start)
#print(e[4][title_start])
#print(e[4][title_end])
title = e[4][title_start : title_end]
artist_start = e[4].find('artist') + 8
artist_end = e[4].find('"', artist_start)
#print(e[4][artist_start])
#print(e[4][artist_end])
artist = e[4][artist_start : artist_end]
img_url_start = e[4].find('amgArtworkURL') + 15
img_url_end = e[4].find('"', img_url_start)
#print(e[4][img_url_start])
#print(e[4][img_url_end])
img_url = e[4][img_url_start : img_url_end]
print('Now Playing:', title,'-', artist)
except KeyError:
title = ''
artist = ''
img_url = ''
except IndexError:
title = ''
artist = ''
img_url = ''
except:
print('Stream failed to open. Press enter to try another.\n')
sys.stdin.read(1)
continue
# play stream
print(station_names[channel_num], '-', station_descs[channel_num])
# print('stream URL:', url)
media = instance.media_new(station_urls[channel_num]['hls'])
player.set_media(media)
player.play()
time.sleep(2)
print('(q)uit program or (s)top playback')
while True:
sig = sys.stdin.read(1)
if sig == 'q':
print('Exiting...')
sys.exit(0)
elif sig == 's':
print('Stopping playback...')
player.stop()
break
if __name__ == "__main__":
main()
| true |
7a39c3481ff8050a905479b262fdc3715cdbf8b9 | Python | GustavSB/pepper-code | /mgribb3n-pepper-5bababb73a86/Weather/WeatherAnswers.py | UTF-8 | 3,152 | 3.21875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from time_utilities import *
from weather import WeatherService, OpenWeatherMap
class WeatherAnswers(object):
def __init__(self, language):
self.default_language = language
self.weather = OpenWeatherMap()
self.time_util = TimeUtilities()
self.words = {"thunderstorm":"Lyn og Torden",
"drizzle":"Yr",
"rain":"Regn",\
"snow":"Snø",\
"atmosphere":"Atmosfære",\
"clear":"Klar himmel",\
"clouds":"Overskyet",\
"extreme":"Ekstrem vær",
"monday":"mandag",
"tuesday": "tirsdag",
"wednesday": "onsdag",
"thursday": "torsdag",
"friday": "fredag",
"saturday": "lørdag",
"sunday": "søndag"}
def set_default_language(self, language):
self.default_language = language
def get_current_weather_sentence(self, language, location):
status, temperature = self.__get_weather()
print(temperature)
temperature = int(round(float(temperature)))
if language == "English":
weather_sentence = "\\rspd=80\\ The weather in " + str(location) + " is " + status.lower() + \
". The temperature is " + str(temperature) + " degrees. "
else:
weather_sentence = "I " + str(location) + " i dag blir det " + self.translate(status) + \
". Det er omtrent " + str(temperature) + " grader ute. "
return weather_sentence
def get_forecast_weather_sentence(self, language, location, date=None):
status, minTemp, maxTemp, date = self.__get_forecast(language, location, date)
minTemp = int(round(float(minTemp)))
maxTemp = int(round(float(maxTemp)))
if language == "English":
forecast = "\\rspd=80\\ The forecast for " + str(location) + " " + \
TimeUtilities.get_day_string(date).lower() + \
" is " + status.lower() + ". The temperature will be between " + \
str(minTemp) + " and " + str(maxTemp) + " degrees. "
else:
forecast = "Værmeldingen for " + str(location) + " på " + self.translate(TimeUtilities.get_day_string(date)) \
+ " er " + self.translate(status) + "... Temperaturen vil bli mellom " + str(minTemp) \
+ " og " + str(maxTemp) + " grader."
return forecast
def translate(self, word, from_lang="ENG", to_lang="NOR"):
word = word.lower()
return self.words.get(word)
def __get_forecast(self, language, location, date=None):
if date==None:
date = self.time_util.get_tomorrow()
weather_forecast_raw = self.weather.get_raw_forecast(location, date=date)
return weather_forecast_raw[0], weather_forecast_raw[1], weather_forecast_raw[2], date
def __get_weather(self):
weather_raw = self.weather.get_raw_weather("Tennfjord")
return weather_raw[0], weather_raw[1] | true |
bb4b369d4a7e8fde12b97bcf9ae89d8972f1bbb8 | Python | Le0nerdo/ot-harjoitustyo-1 | /projekti/src/event_queue.py | UTF-8 | 1,065 | 3.265625 | 3 | [] | no_license | import pygame
from collections import deque
class Eventqueue():
def __init__(self):
self.events = deque()
def get_event(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.events.append("L")
elif event.key == pygame.K_RIGHT:
self.events.append("R")
elif event.key == pygame.K_UP:
self.events.append("U")
elif event.key == pygame.K_DOWN:
self.events.append("D")
elif event.type == pygame.QUIT or event.type == pygame.K_ESCAPE:
pygame.quit()
def return_event(self):
if len(self.events) > 0:
keypress = self.events.popleft()
if keypress == "L":
return(-30,0)
elif keypress == "R":
return(30,0)
elif keypress == "U":
return(0,-30)
elif keypress == "D":
return(0,30) | true |
f61a376945f6380f37e878530d051e1903a1a930 | Python | algemi/guess-number | /guess-number.py | UTF-8 | 928 | 4.125 | 4 | [] | no_license | import random
chances = 0
print('Welcome to the game "Guess a number"')
number = random.randint(1,100)
while chances < 7:
user_guess = int(input('Guess a number between 1 and 100: '))
#while True:
#user_guess = User_Guess()
#comp_gen = Computer_Generator()
if user_guess == number:
print('Congratulations, you win!')
break
elif user_guess < number:
print(user_guess, ' is too low. Try again.')
#User_Guess()
elif user_guess > number:
print(user_guess, ' is too high. Try again.')
#User_Guess()
chances += 1
if chances == 7:
number = str(number)
print('You lose! The correct number is ' + number + '.')
user_option = input('Play again? (yes/no) ')
if user_option in ['yes', 'Yes', 'y', 'Y', '']:
pass
elif user_option in ['No', 'no']:
break
else:
break
| true |
5db0ee64e633155f29183661d6bc1c969f36eb55 | Python | hyliush/ccks-2021-task3-baseline | /dataset.py | UTF-8 | 8,533 | 2.734375 | 3 | [] | no_license | import pandas as pd
import torch
from torch.utils.data import (DataLoader,TensorDataset)
import logging
from tqdm import tqdm
import os
from config import Config
from fastNLP import cache_results
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, **kwargs):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = kwargs.get('_id',)
self.text_a = kwargs.get('text_a')
self.text_b = kwargs.get('text_b','')
self.label = kwargs.get('label',None)
class InputFeatures(object):
def __init__(self, example_id, choices_features, label):
self.example_id = example_id
self.choices_features = {
'input_ids': choices_features[1],
'attention_mask': choices_features[2],
'token_type_ids': choices_features[3]
}
self.label = label
class DataSet(object):
def __init__(self, tokenizer, verbose=1,use_tqdm=True):
self.tokenizer = tokenizer
self.verbose = verbose
self.use_tqdm = use_tqdm
def load_dataset(self,file_path):
dataset_tmp = []
import json
f=open(file_path,'r',encoding='utf8')
for line in f:
t = json.loads(line)
dataset_tmp.append(t)
f.close()
return dataset_tmp
def load_iterator(self,file_path):
dataset_tmp = self.load_dataset(file_path)
for line in dataset_tmp:
yield {'text_a': line['query'], 'text_b': line['title'], 'label':line['label']}
def load_iterator1(self,file_path):
df = pd.read_csv(file_path)
#df=df.sample(400)
if 'sentiment' not in df.columns:
df['sentiment'] = 0
lst = df[['_id', 'content', 'sentiment']].rename(columns={'content':'text_a','sentiment':'label'}).to_dict('records')
for line in lst:
yield line
def _convert_iterator_to_example(self,iterator):
examples = []
for val in iterator:
examples.append(InputExample(**val))
return examples
def convert_examples_to_features(self, examples, max_seq_length):
'''Loads a data file into a list of `InputBatch`s.
Args:
examples : [List] 输入样本,包括guid,text_a,text_b, label
max_seq_length: [int] 文本最大长度
tokenizer : [Method] 分词方法
Returns:
features:
input_ids : [ListOf] token的id,在chinese模式中就是每个分词的id,对应一个word vector
attention_mask : [ListOfInt] 真实字符对应1,补全字符对应0
token_type_ids: [ListOfInt] 句子标识符,第一句全为0,第二句全为1
'''
features = []
if self.use_tqdm:
converting_bars = tqdm(enumerate(examples), total=len(examples),
desc='converting_examples_to_features')
else:
converting_bars = enumerate(examples)
for example_index, example in converting_bars:
text_a = self.tokenizer.tokenize(example.text_a)
# self.tokenizer.encode_plus(example.text_a)
text_b = self.tokenizer.tokenize(example.text_b)
if len(text_b) == 0:
end_token = []
else:
end_token = ["[SEP]"]
self._truncate_seq_pair(text_a, text_b, max_seq_length - 2 - len(end_token))
tokens = ["[CLS]"] + text_a + ["[SEP]"] + text_b + end_token
token_type_ids = [0] * (len(text_a) + 2) + [1] * (len(text_b) + len(end_token))
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
attention_mask = [1] * len(input_ids)
padding_length = max_seq_length - len(input_ids)
input_ids += ([0] * padding_length)
attention_mask += ([0] * padding_length)
token_type_ids += ([0] * padding_length)
label = example.label
if example_index < 1 and self.verbose == 1:
logger.info("*** Example ***")
logger.info("idx: {}".format(example_index))
logger.info("guid: {}".format(example.guid))
logger.info("tokens: {}".format(' '.join(tokens).replace('\u2581', '_')))
logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
logger.info("attention_mask: {}".format(' '.join(map(str, attention_mask))))
logger.info("token_type_ids: {}".format(' '.join(map(str, token_type_ids))))
logger.info("label: {}".format(label))
features.append(
InputFeatures(
example_id = example.guid,
choices_features = (tokens, input_ids, attention_mask, token_type_ids),
label = label
)
)
return features
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _select_field(self, features, field):
return [feature.choices_features[field]
for feature in features]
def prepare_dataloader_from_iterator(self, iterator, batch_size, max_seq_length, sampler=None):
examples = self._convert_iterator_to_example(iterator)
features = self.convert_examples_to_features(examples, max_seq_length)
all_input_ids = torch.tensor(self._select_field(features, 'input_ids'), dtype=torch.long)
all_attention_mask = torch.tensor(self._select_field(features, 'attention_mask'), dtype=torch.long)
all_token_type_ids = torch.tensor(self._select_field(features, 'token_type_ids'), dtype=torch.long)
all_label = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_label)
sampler_func = sampler(dataset) if sampler is not None else None
dataloader = DataLoader(dataset, sampler=sampler_func, batch_size=batch_size)
return dataloader
@cache_results(_cache_fp='', _refresh=False)
def prepare_dataloader(self, file_path, batch_size,max_seq_length,sampler=None):
iterator = self.load_iterator(file_path)
return self.prepare_dataloader_from_iterator(iterator,batch_size,max_seq_length,sampler)
if __name__ == '__main__':
config = Config()
args = config.get_default_cofig()
from transformers import BertTokenizer
from torch.utils.data import RandomSampler
tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path, do_lower_case=args.do_lower_case)
dt=SentimentData(tokenizer = tokenizer)
dt.prepare_dataloader(file_path=os.path.join(args.data_dir, 'train.csv'),sampler=RandomSampler,
batch_size = args.per_gpu_train_batch_size,max_seq_length=args.max_seq_length) | true |
a1deea796e166cd6fbd74298c32c10b70ef7aeb4 | Python | SimOgaard/YoloDatasetCreator | /YoloDataset/lego-gubbar-detection/TestingFiles/ManualReview.py | UTF-8 | 458 | 2.609375 | 3 | [] | no_license | import cv2
import glob
import os
for image in glob.glob("Edit Characters/ImagesNoBackground/*.png"):
img = cv2.imread(image)
while(1):
cv2.imshow('img',img)
k = cv2.waitKey(33)
if k==27:
break
elif k==0:
print("[INFO] deleting {}".format(image))
os.remove(image)
break
elif k!=-1:
break
if k ==27:
break
cv2.destroyAllWindows()
| true |
413d3944995540305aa3f6a97011855eef703e76 | Python | Li-Evelyn/vanilla-cake-bot | /Files/gaming.py | UTF-8 | 683 | 3.421875 | 3 | [
"MIT"
] | permissive | import random
rps_choices = ["rock", "paper", "scissors"]
rps_process = {"rock": "scissors", "paper": "rock", "scissors": "paper"}
def rps_classic(choice):
computer_choice = rps_choices[random.randint(0, 2)]
victory = f"I picked {computer_choice}.\nLooks like "
if choice.lower() == computer_choice:
victory += "we tied!"
elif rps_process[choice.lower()] == computer_choice:
victory += "you won!"
else:
victory += "I won!"
return victory
# TODO: vote rock paper scissors https://kakegurui.fandom.com/wiki/Vote_Rock-Paper-Scissors
# TODO: nim type zero https://kakegurui.fandom.com/wiki/Nim_Type_Zero
# TODO: m i n e s w e e p e r | true |
4d41388de02d57c16385472a8a46da0cc29a9a7c | Python | LMJW/DSA_UCSD | /Algorithm_tool_box/week3/assignment/task1/task1.py | UTF-8 | 223 | 3.546875 | 4 | [] | no_license | # Uses python3
def michange(x):
d = [10,5,1]
i = 0
count = 0
while x != 0:
while x>= d[i]:
x-=d[i]
count+=1
i+=1
return count
n = int(input())
print(michange(n)) | true |
c9a102280cc91a7abeb6e123c65727e7841698aa | Python | mit-plv/riscv-coq | /export/py/ZBitOps.py | UTF-8 | 204 | 3.171875 | 3 | [] | permissive | def bitSlice(w, start, stop):
mask = (1 << (stop - start)) - 1
return (w >> start) & mask
def signExtend(l,n):
if (n >> (l - 1)) & 1:
return n - (1 << l)
else:
return n
| true |
3e1efa9e4d72ad0da01b8d9e4d340261ea586904 | Python | Environmental-Informatics/03-working-with-files-in-python-warre112 | /warre_evaluate_raccoon_lifes.py | UTF-8 | 3,477 | 4.3125 | 4 | [] | no_license |
"""
Created on Fri Jan 31 14:05:46 2020
@author: warre112
"""
"""
Assignment 04- Raccoon Life
Les Warren
Created Friday, January 31, 2020
This program is designed to assess the life of George, the raccoon. Functions
included: creating lists, using a dictionary, summary statistics, and
saving to .txt file
"""
"""Input File"""
george = open( "2008Male00006.txt", "r" )
##opens file. "r= read"
print(george)
first= george.readline()
#specifys to read only first line
lines= george.readlines()
#reads next remaining lines (not including the first)
last= lines[14]
#specifys the last line of data
print(lines)
george.close()
#closes read file
Data = [0]*len(lines) #makes a list with values of "0"
for lidx in range(len(lines)-1):
Data[lidx] = lines[lidx].split(",") #values split by commas
Data[lidx][3] = int(Data[lidx][3]) #changing to integers
Data[lidx][4:6] = map(float,Data[lidx][4:6]) #changing to float
Data[lidx][8:15] = map(float,Data[lidx][8:15]) #chaning to float
# assigning correct type to each value. Starts with row "0" (N-1)
print(Data)
first= first.split(",")
"""Dictionary"""
raccoon= dict() #creating blank dictionary
Data2 = [0]* 15 #blank list of 15
for i in range(15):
Data2[i] = [] #15 blank lists
for i in range(15):
for j in range(14):
Data2[i].append(Data [j][i]) #loop to take first column of 14 rows
for j in range(15):
raccoon[first[j]]= Data2[j]
raccoon #view of data
Data2
"""Summary Statistics"""
from math import sqrt
def listmean(lst):
return sum(lst)/len(lst)
#function to calculate mean of list
listmean(raccoon["Energy Level"]) #test
def listsum(lst):
return sum(lst)
#function to calculate sum of list
listsum(raccoon["Energy Level"]) #test
def distance(x1, x2, y1, y2):
return sqrt((x2-x1)**2+(y2-y1)**2)
#fucntion to calculate distance traveled between x,y of each step
distance(5,15,5,15) #test
Movement= [0]*14 #list of values of 0
for i in range(1,14):
Movement[i]=distance(raccoon[' X'][i-1], raccoon[' X'][i], raccoon[' Y'][i-1], raccoon[' Y'][i])
#computes distance
Movement
raccoon['Movement']= Movement #adds to dictionary
listmean(raccoon["Energy Level"]) #calculating mean Energy Level
def averxy():
return listmean(raccoon[' X']), listmean(raccoon[' Y'])
#function to calculate average X and Y value
averxy() #tesing function
listsum(raccoon['Movement']) #calculating sum of Movement
#=
"""Creating new .txt file"""
file= open("Warre112_Georges_life.txt","w")
#opens new .txt file with ability to write
file.write("Raccoon name: George \n")
file.write("Average location: 591189.034454322, 4504604.085012094 \n")
file.write("Distance traveled: 593.9382753487247 \n")
file.write("Average energy level: 563.6214285714285 \n")
file.write("Raccoon end state: George number 6 died from starvation \n")
#file.write allows to write to file.
#"\n" specifies the end of the line
file.write("\n") #blank line
file.write("Date"+"\t"+"Time"+"\t"+"X"+"\t"+"Y"+"\t"+"Alseep"+"\t"+"Behavior"+"\t"+"Distance"+"\n")
##Wrties header information. "\t" specifies to tab between header names
for d in range(14):
file.write(str(raccoon['Day'][d])+"\t"+str(raccoon['Time'][d])+"\t"+str(raccoon[' X'][d])+"\t"+str(raccoon[' Y'][d])+"\t"+str(raccoon[' Asleep'][d])+"\t"+str(raccoon['Behavior Mode'][d])+"\t"+str(raccoon['Movement'][d])+"\n")
#loop function to write the specified lines of data
file.close()
#closes file
| true |
62f9b9a110b7f5121c5faba2f948cf354b27b385 | Python | reesporte/art | /art.py | UTF-8 | 2,620 | 3.125 | 3 | [] | no_license | import random, os, sys
def get_rgb():
"""
makes a semi-random rgb
"""
colors = [0, 0, 0]
colors[0] = random.randint(1, 255)
if colors[0] == 255:
colors[1] = random.randint(0, 100)
else:
colors[1] = random.randint(0, 255)
if colors[1] == 255 or colors[0] == 255:
colors[2] = random.randint(0, 100)
else:
colors[2] = random.randint(0, 255)
if colors[0] != 255 or colors[1] != 255 or colors[2] != 255:
colors[random.randint(0, 2)] = 255
rgb = "\"rgb(" + str(colors[0]) + "," + str(colors[1]) + "," + str(colors[2]) + ")\""
return rgb
def random_circle():
"""
generates a random circle in a random position on the map
"""
x = random.randint(0, 1280) # center coordinates
y = random.randint(0, 1280) # center coordinates
x2 = x + random.randint(0, 50) #edge coordinates
y2 = y + random.randint(0, 50) #edge coordinates
item = "circle " + str(x) + "," + str(y) + " " + str(x2) + "," + str(y2)
return item
def stroke(strokewidth, stroke_color, coord, dim, angle):
"""
"" ellipse ""
"""
x = coord[0]
y = coord[1]
width = dim[0]
height = dim[1]
start_angle = angle[0]
finish_angle = angle[1]
line = " fill none stroke-linecap round stroke-width " + str(strokewidth) + " stroke "
line += str(stroke_color) + " ellipse " + str(x) + "," + str(y) + " "
line += str(width) + "," + str(height) + " "
line += str(start_angle) + "," + str(finish_angle)
return line
def random_stroke():
"""
"" random ellipse ""
"""
strokewidth = random.randint(5, 200)
stroke_color = get_rgb()
x = random.randint(20, 1280)
y = random.randint(20, 1280)
width = random.randint(20, 350)
height = random.randint(20, 350)
start_angle = random.randint(0, 360)
finish_angle = random.randint(0, 360)
line = " fill none stroke-linecap round stroke-width " + str(strokewidth) + " stroke "
line += str(stroke_color) + " ellipse " + str(x) + "," + str(y) + " "
line += str(width) + "," + str(height) + " "
line += str(start_angle) + "," + str(finish_angle)
return line
"""
"""
"""
"""
def command(number):
"""
generates command
"""
name = "art"
thing = "convert -size 1280x1280 xc: -draw \'"
for i in range(number):
thing += "fill " + get_rgb() + " " + random_stroke() + " "
thing += "\' " + name + ".jpeg "
return thing
if __name__ == "__main__":
print("generating . . . ")
word = command(int(sys.argv[1]))
print("executing . . . ")
os.system(word)
| true |
69aabcca644c60fef9c53a316b3046966629430e | Python | legorobo/drive | /Drive.py | UTF-8 | 3,818 | 3.390625 | 3 | [] | no_license | # ------------------------------------------------------------------------
# Drive.py
# ------------------------------------------------------------------------
#
# Alan Li written on March 17, 2015
#
# Waffle Revengeance
#
# ------------------------------------------------------------------------
from .ev3dev import Motor
import time
class Drive(object):
"""
This object implements the drive system using the unicycle drive model. Instead of
thinking of the velocities of each wheel separately, we consider a single linear
velocity and an angular velocity
"""
WHEEL_RADIUS = 0.063 # meters
WHEELBASE_LENGTH = 0.6969 # meters (also not accurate)
LEFT_MOTOR = Motor.PORT.A
RIGHT_MOTOR = Motor.PORT.B
def __init__(self):
"""
Basic constructor
"""
self.left_motor = Motor(port = LEFT_MOTOR)
self.right_motor = Motor(port = RIGHT_MOTOR)
self.v = 0
self.omega = 0
@property
def v(self):
return self.v
@property
def omega(self):
return self.omega
# ------------------------------------------------------------------------
def drive_left_motor(v, time):
"""
A function that runs the left motor using linear velocity. Allows for limited
time intervals (seconds).
TODO: Test the functionality
"""
if time == -1:
self.left_motor.run_forever(v)
else:
self.left_motor.run_time_limited(time, v)
def drive_right_motor(v, time):
"""
A function that runs the right motor using linear velocity. Allos for limited
time intervals (seconds).
TODO: Test the functionality
"""
if time == -1:
self.right_motor.run_forever(v)
else:
self.right_motor.run_time_limited(time, v)
def drive_left_motor_dist(v, dist):
"""
Drives the left motor a set number of rotations.
TODO: Determine the units in run position limited
"""
self.left_motor.run_position_limited(dist, v)
def drive_right_motor_dist(v, dist):
"""
Drives the right motor a set number of rotations
"""
self.right_motor.run_position_limited(dist, v)
# ------------------------------------------------------------------------
def drive(v, omega, time = -1):
"""
Our drive operates on a unicycle system. We define the motion of our robot by its
linear velocity and angular velocity. This is easily converted to linear velocities
for both motors.
TODO: Test this
"""
self.v = v
self.omega = omega
v_left = ( (2.0 * v) - (omega * L) ) / (2.0 * R)
v_right = ( (2.0 * v) + (omega * L) ) / (2.0 * R)
self.drive_left_motor(v_left, time)
self.drive_right_motor(v_right, time)
def drive_dist(v, omega, linear_dist, angular_dist):
"""
Drives the robot to a coordinate expressed by delta theta and delta l.
TODO: Implement
"""
self.v = v
self.omega = omega
# basically do a point turn and then yeah
self.v = 0
self.omega = 0
def stop():
"""
Stops the drive train and resets the motors
"""
drive(0, 0)
self.left_motor.reset()
self.right_motor.reset()
# ------------------------------------------------------------------------
def init_motor(motor):
"""
Initializes the behavior of a given motor
"""
motor.reset()
motor.run_mode = 'forever'
motor.stop_mode = Motor.STOP_MODE.BRAKE
motor.regulation_mode = True
motor.pulses_per_second_sp = 0
motor.start()
| true |
4c73e5c4c2dbfb767fe4b89b2980639da6477536 | Python | csc202summer19/lectures | /05_stacks/array_stack.py | UTF-8 | 789 | 4.125 | 4 | [] | no_license | class Stack:
""" A last-in, first-out collection of elements """
def __init__(self):
# The backing array:
self.array = [None] * 4
# The length of the backing array:
self.capacity = 4
# The number of elements in the stack:
# NOTE: The top-of-stack is always "size - 1".
self.size = 0
def push(stack, value):
# NOTE: Since the TOS is "size - 1", the next available spot must be
# "size" itself.
# Double the capacity if necessary.
# Set "stack.array[stack.size] = value".
# Increment the size.
pass
def pop(stack):
# Decrement the size.
# Return the element at "stack.array[stack.size]".
pass
def peek(stack):
# Return the element at "stack.array[stack.size - 1]".
pass
| true |
58c1108ebe0c76780b058d653129de0e27ccc114 | Python | cliffeby/Duckpin2 | /blobtestv2.py | UTF-8 | 2,624 | 2.796875 | 3 | [
"MIT"
] | permissive | import os
import credentials
from azure.iot.device import IoTHubDeviceClient
from azure.core.exceptions import AzureError
from azure.storage.blob import BlobClient
CONNECTION_STRING = credentials.loginFree["ConnectionString"]
PATH_TO_FILE = r"/home/cliffeby/Pictures/videoCCEFrame30.jpg"
def store_blob(blob_info, file_name):
try:
sas_url = "https://{}/{}/{}{}".format(
blob_info["hostName"],
blob_info["containerName"],
blob_info["blobName"],
blob_info["sasToken"]
)
print("\nUploading file: {} to Azure Storage as blob: {} in container {}\n".format(file_name, blob_info["blobName"], blob_info["containerName"]))
# Upload the specified file
with BlobClient.from_blob_url(sas_url) as blob_client:
with open(file_name, "rb") as f:
result = blob_client.upload_blob(f, overwrite=True)
return (True, result)
except FileNotFoundError as ex:
# catch file not found and add an HTTP status code to return in notification to IoT Hub
ex.status_code = 404
return (False, ex)
except AzureError as ex:
# catch Azure errors that might result from the upload operation
return (False, ex)
def run_sample(device_client):
# Connect the client
device_client.connect()
# Get the storage info for the blob
# blob_name = os.path.basename(PATH_TO_FILE)
storage_info = device_client.get_storage_info_for_blob("temp.jpg")
# Upload to blob
success, result = store_blob(storage_info, PATH_TO_FILE)
if success == True:
# print("blob_name : ", blob_name)
print("Upload succeeded. Result is: \n")
print(result)
print()
device_client.notify_blob_upload_status(
storage_info["correlationId"], True, 200, "OK: {}".format(PATH_TO_FILE)
)
else :
# If the upload was not successful, the result is the exception object
print("Upload failed. Exception is: \n")
print(result)
print()
device_client.notify_blob_upload_status(
storage_info["correlationId"], False, result.status_code, str(result)
)
def main():
device_client = IoTHubDeviceClient.create_from_connection_string(CONNECTION_STRING)
try:
print ("IoT Hub file upload sample, press Ctrl-C to exit")
run_sample(device_client)
except KeyboardInterrupt:
print ("IoTHubDeviceClient sample stopped")
finally:
# Graceful exit
device_client.shutdown()
if __name__ == "__main__":
main() | true |
f2fb1bbb9676bf3e749276216e3cf487e326848d | Python | sirBarbouchi/aircrafts | /aircrafts/spiders/spiderUrl10.py | UTF-8 | 1,667 | 2.703125 | 3 | [] | no_license | import scrapy
from scrapy_splash import SplashRequest
from ..items import AircraftsItem
def listToString(l):
s = ''
for e in l:
s += e
return s.replace('Upgraded', '')
class TxtavSpider(scrapy.Spider):
name = 'txtav'
def start_requests(self):
url = "https://txtav.com/en/preowned"
splash_args = {'timeout': 85, 'wait': 2.5}
yield SplashRequest(url=url, callback=self.parse, args=splash_args)
def parse(self, response):
aircrafts = response.css('.listing-wrapper')
print("**************")
print(len(aircrafts))
for aircraft in aircrafts:
href = aircraft.css('.more a').xpath('@href').extract_first()
if href:
source = "https://txtav.com" + href
info = aircraft.css('h3::text').extract_first().split('-')[-1].strip().split(' ')
aircraftsItem = AircraftsItem()
if len(info) > 1:
year = info[0]
model = info[-1]
make = listToString(info[1:-1])
aircraftsItem['make'] = make
aircraftsItem['model'] = model
aircraftsItem['year'] = year
serial_number = aircraft.css('strong.ng-binding::text').extract_first()
time = aircraft.css('span.ng-binding::text').extract_first().replace('hours', '')
aircraftsItem['source'] = source
aircraftsItem['time'] = time
aircraftsItem['serial_Number'] = serial_number
aircraftsItem['dealer'] = "Textron Aviation"
yield aircraftsItem
| true |
c458f7b652ccd54c80c425de34d2a56762d50aab | Python | andrevo/covid19-ntnu | /Network models/helge master/network.py | UTF-8 | 904 | 2.828125 | 3 | [] | no_license | import networkx as nx
import numpy as np
import pandas as pd
from classes import *
from utilities import *
from parameters import *
from model import *
def loadNetwork(filename):
nodes = pickle
try:
nodes = pickle.load( open('data/{}.pkl'.format(filename), 'rb'))
print('Loaded nodes from pickle file')
except:
print('Failed loading network...')
return nodes
def saveNetwork(filename, nodes):
pickle.dump(nodes, open('data/{}.pkl'.format(filename), 'wb'))
try:
pickle.dump(nodes, open('data/{}.pkl'.format(filename), 'wb'))
except:
print('Failed saving network...')
def createNetwork(nodes):
G = nx.Graph
for node in nodes.values():
if node.state == 'R' or node.state == 'E':
print(node.infAnc, len(node.infDesc))
def main():
createNetwork(nodes)
if __name__ == '__main__':
main()
| true |
a1ee336110450a8368ef09559949f5ca9b0da9b4 | Python | sarahclarke-skc/week_03_homework_music_library | /console.py | UTF-8 | 677 | 2.625 | 3 | [] | no_license | import pdb
from models.artist import Artist
from models.album import Album
import repositories.artist_repository as artist_repository
import repositories.album_repository as album_repository
album_repository.delete_all()
artist_repository.delete_all()
artist = Artist("Michael Jackson")
artist_repository.save(artist)
artist1 = Artist("Jack White")
artist_repository.save(artist1)
album = Album("Bad", "Pop", artist)
album_repository.save(album)
album1 = Album("Thriller", "Pop", artist)
album_repository.save(album1)
album2 = Album("Blunderbuss", "Rock", artist1)
album_repository.save(album2)
for album in album_repository.select_all():
print(album.__dict__)
pdb.set_trace()
| true |
ad2bf04da0b4763d52aad94ebef0bf43c7b45588 | Python | tommyanthony/textme | /send_sms.py | UTF-8 | 588 | 2.78125 | 3 | [] | no_license | # Download the twilio-python library from http://twilio.com/docs/libraries
from twilio.rest import TwilioRestClient
# Find these values at https://twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXX"
auth_token = "YYYYYYYYYYYYYYYYYY"
FROM_NUMBER = "+15555555555"
client = TwilioRestClient(account_sid, auth_token)
def send_message(to_num, body):
"""
Sends a message to TO_NUM with a message body of BODY
"""
message = client.messages.create(to=to_num, from_=FROM_NUMBER, body=body)
def print_message(to_num, body):
print("%s: %s\n\n" % (to_num, body), file=open("out.txt", 'w'))
| true |
f1f0aa0f3588530b8048adebed12757ba2d18456 | Python | kevinhuang06/BussinessCardDetection | /util.py | UTF-8 | 2,418 | 2.875 | 3 | [] | no_license | # coding=utf-8
import cv2
import numpy as np
def find_top_k_contours(edges, k):
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours_with_len = []
for con in contours:
contours_with_len.append([con, len(con)])
sorted_contours = sorted(contours_with_len, key=lambda x: x[1], reverse=True)
k = min(k, len(sorted_contours))
return [sorted_contours[i][0] for i in range(k)]
def similar_line(l1, l2):
return abs(l1[0] - l2[0]) < 10 and abs(l1[1] - l2[1]) < 5 * np.pi / 180
def merge_similar_lines(lines):
merged_lines = []
sorted_lines = sorted(lines, key=lambda x: (abs(x[0]), x[1]))
if sorted_lines:
last = sorted_lines[0]
for i in range(1, len(sorted_lines)):
curr = sorted_lines[i]
if similar_line(curr, last):
last = [(last[0] + curr[0]) / 2., (last[1] + curr[1]) / 2.]
else:
merged_lines.append(last)
last = curr
merged_lines.append(last)
return merged_lines
def remove_lines_not_perpendicular_to_any_line(v_lines, h_lines):
min_rad = 80 * np.pi / 180.0
max_rad = 100 * np.pi / 180.0
v_lines_collect = []
h_lines_collect = []
for vl in v_lines:
for hl in h_lines:
if min_rad < abs(vl[1] - hl[1]) < max_rad:
v_lines_collect.append(vl)
break
for hl in h_lines:
for vl in v_lines_collect:
if min_rad < abs(vl[1] - hl[1]) < max_rad:
h_lines_collect.append(hl)
break
return v_lines_collect, h_lines_collect
def reduce_lines(lines):
h_lines = []
v_lines = []
for line in lines:
theta = line[1]
if (theta < (np.pi / 4.)) or (theta > (3. * np.pi / 4.0)):
h_lines.append(line)
else:
v_lines.append(line)
return remove_lines_not_perpendicular_to_any_line(
merge_similar_lines(v_lines), merge_similar_lines(h_lines))
def draw_quadrangle(img, quad):
cv2.line(img, (quad[0][0], quad[0][1]), (quad[1][0], quad[1][1]), (255, 0, 0), 3)
cv2.line(img, (quad[1][0], quad[1][1]), (quad[2][0], quad[2][1]), (255, 0, 0), 3)
cv2.line(img, (quad[2][0], quad[2][1]), (quad[3][0], quad[3][1]), (255, 0, 0), 3)
cv2.line(img, (quad[3][0], quad[3][1]), (quad[0][0], quad[0][1]), (255, 0, 0), 3)
| true |
25686dfb7b60420c312d79f2cc89d1d720d85a95 | Python | undead404/russianroulette | /get_reaches.py | UTF-8 | 1,035 | 2.578125 | 3 | [] | no_license | import json
import requests
from roulettewheel import RouletteWheel
LASTFM_API_KEY = '053c2f4d20bda39f8e353be6e277d6d0'
LASTFM_SHARED_SECRET = '573e5a2995048342d40070134835c0e1'
TAG_GETINFO_URL = 'http://ws.audioscrobbler.com/2.0/?method=tag.getinfo&tag={tag}&api_key={api_key}&format=json'
def get_tag_reach(tag):
response = requests.get(TAG_GETINFO_URL.format(api_key=LASTFM_API_KEY, tag=tag))
tag_info = json.loads(response.text)
return tag_info['tag']['reach']
genres_rw = RouletteWheel()
data = {}
for genre in (genre.rstrip() for genre in open('genres.txt') if
not genre.startswith('#') and not genre.startswith('!')):
# for genre in (genre.rstrip() for genre in open('genres.txt')):
# if genre.startswith('#') or genre.startswith('!'):
# genre = genre[2:]
genre_reach = get_tag_reach(genre)
print("{genre}: {genre_reach}".format(genre=genre, genre_reach=genre_reach))
data[genre] = genre_reach
with open('genres_reaches.json', 'w') as outfile:
json.dump(data, outfile)
| true |
b2680be43df2ca8b6fb17b6f2590e41b892d55b5 | Python | Taoge123/OptimizedLeetcode | /LeetcodeNew/DFS/LC_721_Accounts_Merge.py | UTF-8 | 9,435 | 3.59375 | 4 | [] | no_license |
"""
Given a list accounts, each element accounts[i] is a list of strings,
where the first element accounts[i][0] is a name,
and the rest of the elements are emails representing emails of the account.
Now, we would like to merge these accounts.
Two accounts definitely belong to the same person if there is some email that is common to both accounts.
Note that even if two accounts have the same name,
they may belong to different people as people could have the same name.
A person can have any number of accounts initially, but all of their accounts definitely have the same name.
After merging the accounts, return the accounts in the following format:
the first element of each account is the name,
and the rest of the elements are emails in sorted order.
The accounts themselves can be returned in any order.
Example 1:
Input:
accounts = [["John", "johnsmith@mail.com", "john00@mail.com"], ["John", "johnnybravo@mail.com"], ["John", "johnsmith@mail.com", "john_newyork@mail.com"], ["Mary", "mary@mail.com"]]
Output: [["John", 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com'], ["John", "johnnybravo@mail.com"], ["Mary", "mary@mail.com"]]
Explanation:
The first and third John's are the same person as they have the common email "johnsmith@mail.com".
The second John and Mary are different people as none of their email addresses are used by other accounts.
We could return these lists in any order, for example the answer [['Mary', 'mary@mail.com'], ['John', 'johnnybravo@mail.com'],
['John', 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com']] would still be accepted.
#Fantastic solution, aslo compared to redundunt connection
http://wulc.me/2017/10/12/LeetCode%20%E8%A7%A3%E9%A2%98%E6%8A%A5%E5%91%8A(684,685,721)-%E5%B9%B6%E6%9F%A5%E9%9B%86%E4%BB%8B%E7%BB%8D%E5%8F%8A%E5%BA%94%E7%94%A8/
http://www.cnblogs.com/grandyang/p/7829169.html
https://www.jianshu.com/p/ccaccc91d58e
https://blog.csdn.net/fuxuemingzhu/article/details/82913712
这个归组类的问题,最典型的就是岛屿问题(例如Number of Islands II),很适合使用Union Find来做,
LeetCode中有很多道可以使用这个方法来做的题,
比如Friend Circles,Graph Valid Tree,Number of Connected Components in an Undirected Graph,
和Redundant Connection等等
都是要用一个root数组,每个点开始初始化为不同的值,如果两个点属于相同的组,
就将其中一个点的root值赋值为另一个点的位置,这样只要是相同组里的两点,通过find函数得到相同的值。
在这里,由于邮件是字符串不是数字,所以root可以用哈希map来代替,我们还需要一个哈希映射owner,
建立每个邮箱和其所有者姓名之前的映射,另外用一个哈希映射来建立用户和其所有的邮箱之间的映射,也就是合并后的结果。
首先我们遍历每个账户和其中的所有邮箱,先将每个邮箱的root映射为其自身,然后将owner赋值为用户名。
然后开始另一个循环,遍历每一个账号,首先对帐号的第一个邮箱调用find函数,得到其父串p,
然后遍历之后的邮箱,对每个遍历到的邮箱先调用find函数,将其父串的root值赋值为p,
这样做相当于将相同账号内的所有邮箱都链接起来了。我们下来要做的就是再次遍历每个账户内的所有邮箱,
先对该邮箱调用find函数,找到父串,然后将该邮箱加入该父串映射的集合汇总,这样就我们就完成了合并。
最后只需要将集合转为字符串数组,加入结果res中,通过owner映射找到父串的用户名,加入字符串数组的首位置
#BFS
然后我们还需要一个visited数组,来标记某个账户是否已经被遍历过,0表示为未访问,1表示已访问。
在建立好哈希map之后,我们遍历所有的账户,如果账户未被访问过,将其加入队列queue,新建一个集合set,
此时进行队列不为空的while循环,取出队首账户,将该该账户标记已访问1,
此时将该账户的所有邮箱取出来放入数组mails中,然后遍历mails中的每一个邮箱,将遍历到的邮箱加入集合set中,
根据映射来找到该邮箱所属的所有账户,如果该账户未访问,则加入队列中并标记已访问。当while循环结束后,
当前账户的所有合并后的邮箱都保存在集合set中,将其转为字符串数组,并且加上用户名在首位置,
最后加入结果res中即可
"""
"""
这道题目虽然也用到了并查集的数据结构,但是与前面的两道题目又有点不同,主要体现在两个方面
节点不再以数字标识,因此标识 parents 的数据结构要从 array 变为 map
不需要判断是否形成闭环,而要返回最终各个集合内的元素;在这个操作中需要注意的是不能直接利用存储各个节点的
parent 的 map 直接为每个节点找到其 parent, 因为并非各个节点都进行了 path compression。
对应有两种方法 (1)借助 find 方法找到各个节点的parent (2)
对存储各个节点的 parent 的 map 再进行一次 path compression,
然后直接在 map 中找到各个节点的 parent 对应的方法入下
"""
import collections
class Solution:
def accountsMerge(self, accounts):
"""
:type accounts: List[List[str]]
:rtype: List[List[str]]
"""
owners, parents = {}, {}
for account in accounts:
owners[account[1]] = account[0]
for i in range(1, len(account)):
parents[account[i]] = account[i]
for account in accounts:
p = self.find(account[1], parents)
for i in range(1, len(account)):
parents[self.find(account[i], parents)] = p
unions = {}
for account in accounts:
for i in range(1, len(account)):
p = self.find(account[i], parents)
unions.setdefault(p, set())
unions[p].add(account[i])
result = []
for k, v in unions.items():
result.append([owners[k]] + sorted(v))
return result
def find(self, email, parents):
if parents[email] != email:
parents[email] = self.find(parents[email], parents)
return parents[email]
class Solution2:
def accountsMerge(self, accounts):
"""
:type accounts: List[List[str]]
:rtype: List[List[str]]
"""
owners, parents = {}, {}
for account in accounts:
owners[account[1]] = account[0]
for i in range(1, len(account)):
parents[account[i]] = account[i]
for account in accounts:
p = self.find(account[1], parents)
for i in range(1, len(account)):
parents[self.find(account[i], parents)] = p
# not all paths are compressed currently
for k, v in parents.items():
if k != v:
parents[k] = self.find(parents[v], parents)
unions = {}
for k, v in parents.items():
if v not in unions:
unions[v] = set()
unions[v].add(k)
result = []
for k, v in unions.items():
result.append([owners[k]] + sorted(v))
return result
def find(self, email, parents):
if parents[email] != email:
parents[email] = self.find(parents[email], parents)
return parents[email]
class Solution(object):
def accountsMerge(self, accounts):
em_to_name = {}
graph = collections.defaultdict(set)
for acc in accounts:
name = acc[0]
for email in acc[1:]:
graph[acc[1]].add(email)
graph[email].add(acc[1])
em_to_name[email] = name
seen = set()
ans = []
for email in graph:
if email not in seen:
seen.add(email)
stack = [email]
component = []
while stack:
node = stack.pop()
component.append(node)
for nei in graph[node]:
if nei not in seen:
seen.add(nei)
stack.append(nei)
ans.append([em_to_name[email]] + sorted(component))
return ans
#DFS
# https://leetcode.com/problems/accounts-merge/discuss/109194/Easy-python-solution-dfs
class Account:
def __init__(self, l):
self.name = l[0]
self.emails = l[1:]
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.name == other.name and len(self.emails) == len(other.emails) \
and set(self.emails) == set(other.emails)
def accountsMerge(self, accounts):
accounts = [self.Account(a) for a in accounts]
email_dict, visited, finalres = collections.defaultdict(set), set(), []
for acc in accounts:
for email in acc.emails:
email_dict[email].add(acc)
for acc in accounts:
if acc in visited: continue
res = set()
self.dfs(acc, email_dict, visited, res)
finalres.append([acc.name] + sorted(res))
return finalres
def dfs(self, acc, email_dict, visited, res):
if acc in visited: return
visited.add(acc)
for email in acc.emails:
res.add(email)
for a in email_dict[email]:
self.dfs(a, email_dict, visited, res)
| true |
57de5e1b3a98e118cb6eb2e25fdb2a2b78dd74d2 | Python | syo16/python_doc_practice | /functools/singledispatch-sample.py | UTF-8 | 480 | 3.46875 | 3 | [] | no_license | from functools import singledispatch
@singledispatch
def func(arg, verbose=False):
if verbose:
print('Let me just say', end='')
print(arg)
@func.register
def _(arg:int, verbose=False):
if verbose:
print('Value---->', end='')
print(arg)
@func.register
def _(arg:list, verbose=False):
if verbose:
print('Show all elements')
for element in arg:
print(element, end=' ')
print()
func([2, 3, 4, 5])
func(33, verbose=True)
| true |
0ef2a3bdb811bed369097eddbb1d48bdd16e8762 | Python | luyuan2002/PythonLearn | /100/4构造程序逻辑.py | UTF-8 | 2,082 | 4.4375 | 4 | [] | no_license | """
目前学习的内容只是Python的冰山一角,但是这些内容以及该足够我们来构建程序中的逻辑,
现在必须要做的一件事情就是尝试用所学的只是取解决现实中的问题,换句话说就是锻炼自己把用人类自然 语言描述的算法(解决问题的方法和步骤)编译成Python代码的能力
而这件事必须通过大量的联系才能达成
"""
"""
寻找水仙花数
说明:水仙花数也被称为超完全数字不变数、自恋数、自幂数、阿姆斯特朗数
它是一个3位数,该数字每个位上数字的立方之和正好等于它本身,例如:$1^3 + 5^3+ 3^3=153$。
"""
for num in range(100, 1000):
low = num % 10
mid = num // 10 % 10
high = num // 100 % 10
if num == low ** 3 + mid ** 3 + high ** 3:
print(num)
"""
将正整数反转
例如:12345变成54321
"""
num = int(input("num = "))
reversed_num = 0
while num > 0:
reversed_num = reversed_num * 10 + num % 10
num //= 10
print(reversed_num)
"""
百钱百鸡的问题
说明:百钱百鸡是我国古代数学家张丘建在《算经》一书中提出的数学问题:
鸡翁一值钱五,鸡母一值钱三,鸡雏三值钱一。百钱买百鸡,问鸡翁、鸡母、鸡雏各几何?
翻译成现代文是:公鸡5元一只,母鸡3元一只,小鸡1元三只,用100块钱买一百只鸡,问公鸡、母鸡、小鸡各有多少只?
"""
for x in range(0, 20):
for y in range(0, 33):
z = 100 - x - y
if 5 * x + 3 * y + z / 3 == 100:
print('公鸡: %d只, 母鸡: %d只, 小鸡: %d只' % (x, y, z))
"""
上面使用的方法叫做穷举法,也称为暴力搜索法,这种方法通过一项一项的列举备选解决方案中所有可能的候选项并检查每个候选项是否符合问题的描述,最终得到问题的解。
这种方法看起来比较笨拙,但对于运算能力非常强大的计算机来说,通常都是一个可行的甚至是不错的选择,而且问题的解如果存在,这种方法一定能够找到它。
"""
| true |
0c77f688163199691f5f6f928155963efae4407d | Python | omarala/tp_prog_cpp | /Tp3_omar_alex/draw.py | UTF-8 | 1,174 | 2.96875 | 3 | [] | no_license | #!/usr/bin/python3
# # import numpy as np su
import matplotlib.pyplot as plt
import sys
#crée un itérateur de point
def iterator_point(fichier):
for line in fichier:
line = line.strip()
# res = line.split(",")
res = float(line)
yield res
def create_plot():
#on ouvre un fichier .txt du format
# float,float
str_file1 = "./dataXor.txt"
alean = [1]
aleanplus1 = []
with open(str_file1, "r") as fichier:
generator = iterator_point(fichier)
aleanplus1.append(generator.__next__())
for point in generator:
alean.append(aleanplus1[-1])
aleanplus1.append(point)
fig = plt.figure()
plt.scatter(alean, aleanplus1, s=10)
plt.title("Tirage(n+1) en fonction de Tirage(n)")
ax = fig.add_subplot(111)
# fig.subplots_adjust(top=0.85)
# plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# ncol=2, mode="expand", borderaxespad=0.)
# # recup_config("../data/config.txt", ax)
ax.set_xlabel('N eme nombre aleatoire')
ax.set_ylabel('N+1 eme nombre aleatoire')
plt.show()
if __name__ == "__main__" :
create_plot()
| true |
899b6c7b64a0c4748e1efeaab5ec06e78ea059ff | Python | Sanved07/Python-Projects | /Reverse No.py | UTF-8 | 492 | 4 | 4 | [] | no_license | def makepalindrome(pali):
if pali<10:
print(f"the number is itself palindrome i.e. {pali}")
elif str(pali)==str(pali)[::-1]:
print(f"the next palindrome is {pali}")
else:
pali2=pali+1
makepalindrome(pali2)
if __name__ == "__main__":
no_ip=int(input("enter the numbers you want to make palindrome\n"))
for i in range(no_ip):
pali=int(input("enter the numbers you want to palindrome\n"))
makepalindrome(pali)
| true |
93b2bba8a8979092a56f904752a3c3d5801f9b52 | Python | CS4311-spring-2020/pick-tool-team03-we-showed-up | /src/Connections/Network.py | UTF-8 | 2,117 | 3.296875 | 3 | [] | no_license | # This Python file uses the following encoding: utf-8
import threading
import socket
class Network:
"""This class will serve to bind the analyst to the lead in order to share a session."""
def __init__(self):
# Ask clients what port they would like to connect through
self.port = 8091
#local port for now
#lead_ip = '127.0.0.1'
# Lead socket
self.socket = socket.socket()
self.serverStatus = False
pass
def start_lead_server(self):
"""Opens up the port to transfer information from the lead computer."""
s = socket.socket()
print ("Socket successfully created")
s.bind(('', self.port))
print ("socket binded to %s" %(self.port))
self.socket = s
self.serverStatus = True
#20 connections will be accepted per SRS
self.socket.listen(20)
print ("socket is listening")
thread = threading.Thread(target=self.start_server_thread)
thread.start()
def start_server_thread(self):
"""Method to be used in the thread for persistent listening through the socket."""
while self.serverStatus is True:
print('Hi')
# Establish connection with client.
c, addr = self.socket.accept()
print ('Got connection from', addr)
# send a thank you message to the client.
c.send('Thank you for connecting')
# Close the connection with the client
# c.close()
def close_server(self):
"""Closes the socket of the server,"""
self.socket.close()
self.serverStatus = False
self.socket = socket.socket()
def connect_analyst_to_lead(self, lead_ip):
"""Binds analyst to the lead server."""
s = socket.socket()
# Define the port on which you want to connect
# connect to the server on local computer
s.connect((lead_ip, self.port))
# receive data from the server
print (s.recv(1024))
print("here?")
# close the connection
# s.close()
| true |
63568284de333af0a74a47c894f3039a8fea8521 | Python | Jiangshan00001/pygerber | /src/pygerber/parser/pillow/apertures/circle.py | UTF-8 | 2,363 | 2.65625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from __future__ import annotations
from functools import cached_property
from typing import Tuple
from PIL import Image, ImageDraw
from pygerber.mathclasses import Vector2D
from pygerber.parser.pillow.apertures.arc_mixin import ArcUtilMixinPillow
from pygerber.parser.pillow.apertures.flash_mixin import FlashUtilMixin
from pygerber.renderer.aperture import CircularAperture
from pygerber.renderer.spec import ArcSpec, LineSpec
class PillowCircle(ArcUtilMixinPillow, FlashUtilMixin, CircularAperture):
canvas: Image.Image
draw_canvas: ImageDraw.ImageDraw
@cached_property
def radius(self) -> float:
return int(self._prepare_co(self.DIAMETER) / 2)
@cached_property
def diameter(self) -> float:
return int(self._prepare_co(self.DIAMETER))
def draw_shape(self, aperture_stamp_draw: ImageDraw.Draw, color: Tuple):
aperture_stamp_draw.ellipse(self.get_aperture_bbox(), color)
def line(self, spec: LineSpec) -> None:
self.prepare_line_spec(spec)
self.__line(spec.begin, spec.end)
self.flash_at_location(spec.begin)
self.flash_at_location(spec.end)
def __line(self, begin: Vector2D, end: Vector2D) -> None:
self.draw_canvas.line(
[begin.as_tuple(), end.as_tuple()],
self.get_color(),
width=self.diameter,
)
def arc(self, spec: ArcSpec) -> None:
self.prepare_arc_spec(spec)
self.__arc(spec)
self.flash_at_location(spec.begin)
self.flash_at_location(spec.end)
def __arc(self, spec: ArcSpec):
begin_angle, end_angle = self.get_begin_end_angles(spec)
if self.isCCW:
self.__draw_arc(spec, begin_angle, end_angle)
else:
self.__draw_arc(spec, -begin_angle, -end_angle)
def __draw_arc(self, spec, begin_angle, end_angle):
self.draw_canvas.arc(
self.__get_arc_bbox(spec),
begin_angle,
end_angle,
self.get_color(),
width=self.diameter,
)
def __get_arc_bbox(self, spec: ArcSpec) -> tuple:
radius = (spec.begin - spec.center).length() + self.radius
return (
spec.center.x - radius,
spec.center.y - radius,
spec.center.x + radius,
spec.center.y + radius,
)
| true |
74189faee9fc1ec850abb5c89022b2ee558483cd | Python | svenzin/ym2149f_player | /convert_ym/ympeek.py | UTF-8 | 3,589 | 2.671875 | 3 | [
"MIT"
] | permissive | import sys
from lhafile import LhaFile
import struct
import os
def to_string(ymdata):
return ymdata.decode('cp1252')
def to_byte(ymdata):
assert(isinstance(ymdata, int))
return ymdata
def to_word(ymdata):
assert(len(ymdata) == 2)
return struct.unpack('>H', ymdata)[0]
def to_dword(ymdata):
assert(len(ymdata) == 4)
return struct.unpack('>L', ymdata)[0]
def to_dword_littleendian(ymdata):
assert(len(ymdata) == 4)
return struct.unpack('<L', ymdata)[0]
def from_byte(b):
return struct.pack('<B', b)
def usage():
print("YM format parser for hardware player")
print("ymparser.py YM_FILE YMR_FILE")
exit(1)
def peek(filename):
lha = LhaFile(filename)
files = lha.namelist()
print(files)
assert(len(files) == 1)
ymdata = lha.read(files[0])
print(ymdata[0:200])
print(ymdata[-8:])
ym = {}
ym['tag'] = to_string(ymdata[0:4])
if ym['tag'] in ['YM2!', 'YM3!']:
ym['frames'] = (len(ymdata) - 4) // 14
ym['clock'] = 2000000
ym['rate'] = 50
ym['song_name'] = ''
ym['author_name'] = ''
ym['song_comment'] = ''
ym['attributes'] = 1
ym['samples'] = 0
ym['loop'] = 0
ym['additions'] = 0
elif ym['tag'] in ['YM3b']:
ym['frames'] = (len(ymdata) - 8) // 14
ym['loop'] = to_dword_littleendian(ymdata[-4:])
ym['clock'] = 2000000
ym['rate'] = 50
ym['song_name'] = ''
ym['author_name'] = ''
ym['song_comment'] = ''
ym['attributes'] = 1
ym['samples'] = 0
ym['additions'] = 0
elif ym['tag'] in ['YM4!']:
ym['check'] = to_string(ymdata[4:12])
ym['frames'] = to_dword(ymdata[12:16])
ym['attributes'] = to_dword(ymdata[16:20])
ym['samples'] = to_dword(ymdata[20:24])
ym['loop'] = to_dword(ymdata[24:28])
song_name_end = ymdata.index(b'\0', 28)
author_name_end = ymdata.index(b'\0', song_name_end + 1)
song_comment_end = ymdata.index(b'\0', author_name_end + 1)
ym['song_name'] = to_string(ymdata[28:song_name_end])
ym['author_name'] = to_string(ymdata[song_name_end + 1:author_name_end])
ym['song_comment'] = to_string(ymdata[author_name_end + 1:song_comment_end])
ym['clock'] = 2000000
ym['rate'] = 50
ym['additions'] = 0
n = ym['frames']
ym['tagend'] = to_string(ymdata[song_comment_end + 1 + 16 * n:])
elif ym['tag'] in ['YM5!', 'YM6!']:
ym['check'] = to_string(ymdata[4:12])
ym['frames'] = to_dword(ymdata[12:16])
ym['attributes'] = to_dword(ymdata[16:20])
ym['samples'] = to_word(ymdata[20:22])
ym['clock'] = to_dword(ymdata[22:26])
ym['rate'] = to_word(ymdata[26:28])
ym['loop'] = to_dword(ymdata[28:32])
ym['additions'] = to_word(ymdata[32:34])
song_name_end = ymdata.index(b'\0', 34)
author_name_end = ymdata.index(b'\0', song_name_end + 1)
song_comment_end = ymdata.index(b'\0', author_name_end + 1)
ym['song_name'] = to_string(ymdata[34:song_name_end])
ym['author_name'] = to_string(ymdata[song_name_end + 1:author_name_end])
ym['song_comment'] = to_string(ymdata[author_name_end + 1:song_comment_end])
n = ym['frames']
ym['tagend'] = to_string(ymdata[song_comment_end + 1 + 16 * n:])
print(ym)
if __name__ == '__main__':
if len(sys.argv) != 2:
usage()
if not os.path.isfile(sys.argv[1]):
usage()
peek(sys.argv[1])
| true |
b37a16839eb7054056e6a5a91dca51e5c6324f66 | Python | codeplaysoftware/py_gen | /internal/funcs.py | UTF-8 | 2,806 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (C) Codeplay Software Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use these files except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# For your convenience, a copy of the License has been included in this
# repository.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
def get_space_count(line):
"""Gets number of spaces at start of line to preserve indenting"""
return len(line) - len(line.lstrip())
def add_spaces_to_lines(count, string):
"""Adds a number of spaces to the start of each line
Doesn't add spaces to the first line, as it should already be
fully indented"""
all_lines = string.splitlines(True)
new_string = all_lines[0]
for i in range(1, len(all_lines)):
new_string += ' ' * count + all_lines[i]
return new_string
def read_from_file(file_name):
"""Reads from the given file name with .in appended
Returns the read file"""
with open(file_name, 'r') as input_file:
return input_file.read()
def insert_in_source(file_source, insertion_point, replacement_string):
"""Replaces insertion_point with replacement_string in the str file_source
Returns the updated str"""
# Get the number of spaces before insertion_point
space_count = 0
for line in file_source.splitlines(True):
if insertion_point in line:
space_count = get_space_count(line)
# Add spaces to each line in replacement_string to keep it in line with the
# other code in the source
replacement_string = add_spaces_to_lines(space_count, replacement_string)
# Replace insertion_point with the formatted replacement_string and return
return file_source.replace(insertion_point, replacement_string)
def write_to_file(file_name, file_source):
"""Discard writes file_source to file_name"""
with open(file_name, 'w') as output_file:
output_file.write(file_source)
def clang_format(file_name, clang_format_script):
"""Calls the input clang formatting script in a subprocess shell call.
Provides the input filename as the only arguement to the script."""
try:
subprocess.check_call(
clang_format_script + " " + str(file_name), shell=True)
except subprocess.CalledProcessError as cperror:
print("Call to " + clang_format_script + " failed")
print("Exit code: " + str(cperror.returncode))
| true |
15904106131eec9d1f7485bf7a0ff5cabcafc708 | Python | Parthi10/hackerrank | /hackerrank_contests/week_of_code_27/hackonacci-matrix-rotations3.PY | UTF-8 | 1,224 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/python3
import sys
from collections import defaultdict
n,q = input().strip().split(' ')
n,q = [int(n),int(q)]
hackonacciDictionary = defaultdict(int)
hackonacciDictionary = {0:0,1:1,2:2,3:3}
def hackonacci(i):
if i in hackonacciDictionary.keys():
return hackonacciDictionary[i]
else:
hackonacciDictionary[i] = hackonacci(i-1) + 3*hackonacci(i-3)
return hackonacciDictionary[i]
ans = [0]
limit = n**4 if n**4>1000 else n**4 - 1000
for i in range(4, limit):
hackonacciDictionary[i] = hackonacci(i)
count=0
for i in range(1,n+1):
for j in range(1,n+1):
if hackonacci((i*j)**2)%2 != hackonacci((j*(n+1-i))**2)%2:
count+=1
ans.append(count)
#180 degreee count
count=0
for i in range(1,n+1):
for j in range(1,n+1):
if hackonacci((i*j)**2)%2 != hackonacci(((n+1-i)*(n+1-j))**2)%2:
count+=1
ans.append(count)
#270 degreee count
count=0
for i in range(1,n+1):
for j in range(1,n+1):
if hackonacci((i*j)**2)%2 != hackonacci(((n+1-j)*(i))**2)%2:
count+=1
ans.append(count)
print(ans)
for a0 in range(q):
angle = int(input().strip())
print(ans[(angle%360)//90])
| true |
08a3aef190322a14bb838643da6cc84068aaadcb | Python | 6un9-h0-Dan/GeoInt-Localisator | /web.py | UTF-8 | 3,054 | 2.515625 | 3 | [] | no_license | import bs4
from bs4 import BeautifulSoup
import webbrowser
from sys import stdout
import requests
import sys
import time
from const import *
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def check_element(el,driver,method):
"""Check if element exists """
try:
if method == "xpath":
driver.find_element_by_xpath(el)
elif method == "css" :
driver.find_element_by_css_selector(el)
except NoSuchElementException:
return False
return True
def get_address(driver,address_list):
count = 0
not_found = 0
time.sleep(3) #1.2
for i in range(0,20):
count+=1
xpath_address ="/html/body/jsl/div[3]/div[9]/div[7]/div/div[1]/div/div/div[4]/div[1]/a["+str(count+i)+"]/div[2]/div[1]/div[2]/span[6]"
xpath2 = "/html/body/jsl/div[3]/div[9]/div[7]/div/div[1]/div/div/div[2]/div[1]/a["+str(count+i)+"]/div[2]/div[1]/div[2]/span[6]"
if check_element(xpath_address,driver,"xpath"):
address_list.append(driver.find_element_by_xpath(xpath_address).text)
elif check_element(xpath2, driver, "xpath"):
address_list.append(driver.find_element_by_xpath(xpath2).text)
else:
not_found+=1
return address_list,not_found # [str], int
def change_page(driver):
next_button = "html body.keynav-mode-off.screen-mode jsl div#app-container.vasquette.pane-open-mode div#content-container div#pane div.widget-pane.widget-pane-visible div.widget-pane-content.scrollable-y div.widget-pane-content-holder div.section-layout.section-layout-root div.section-layout.section-scrollbox.scrollable-y.scrollable-show.section-layout-flex-vertical div.n7lv7yjyC35__root div.gm2-caption div div.n7lv7yjyC35__right button#n7lv7yjyC35__section-pagination-button-next.n7lv7yjyC35__button.noprint img.n7lv7yjyC35__button-icon"
if check_element(next_button, driver,"css"):
button = driver.find_element_by_css_selector(next_button)
driver.execute_script("arguments[0].click();", button)
return True
else:
print("# No next button found")
return False
def scrape_addresses(driver,query: str) -> [str]:
"""Use selenium to return a list of addresses"""
query.replace(" ", "+")
url = "https://www.google.fr/maps/search/"+query+"/13z"
driver.get(url)
address_list = []
while 1:
address_list,not_found = get_address(driver,address_list)
a = change_page(driver)
if a is False or not_found > 10:
break
driver.quit()
if len(list(set(address_list)))+10 <= len(address_list):
return None
else:
return list(set(address_list))
# found at https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress(count, total, status=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
sys.stdout.flush()
| true |
dd40c8d143b0aa574cde92b7317ada3edc67e778 | Python | david30907d/iscom_crawler | /iscom.py | UTF-8 | 3,189 | 2.546875 | 3 | [] | no_license | from KCM.__main__ import KCM
import json, sys, requests, os, pyprind
def hybrid(ratioX, ratioY):
hybridResult = {}
for index, (i, j) in enumerate(zip(tmp, distance)):
if i[0] == j[0]:
hybridResult[i[0]] = hybridResult.setdefault(i[0], 0) + index
else:
hybridResult[i[0]] = hybridResult.setdefault(i[0], 0) + index * ratioX / (ratioX + ratioY)
hybridResult[j[0]] = hybridResult.setdefault(j[0], 0) + index * ratioY / (ratioX + ratioY)
return hybridResult
k = KCM('cht', './ptt', uri='mongodb://172.17.0.8:27017')
# k.removeDB()
# k.main()
name2add = json.load(open('台灣所有景點的地址.json', 'r'))
query = sys.argv[1]
print('以Correlation排序:\n')
tmp = [i for i in k.get(query, 10000) if i[0] in name2add and len(i[0]) > 2 ][:int(sys.argv[2])]
print(tmp[:int(sys.argv[3])])
print('\n以距離排序:\n')
distance = []
for i in pyprind.prog_bar(tmp):
try:
if os.path.isfile('json/query-' + i[0]+'.json'):
res = json.load(open('json/query-' + i[0]+'.json','r'))
else:
res = requests.get('https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins={}&destinations={}&key=AIzaSyB20qKjF1ePtq9t1luvFd-433J41anlDGU'.format(name2add[query], name2add[i[0]])).json()
json.dump(res, open('json/query-' + i[0]+'.json','w'))
if res['rows'][0]['elements'][0]['status'] == 'OK':
distance.append((i[0], res['rows'][0]['elements'][0]['distance']['value']))
except Exception as e:
pass
distance = sorted(distance, key=lambda x:x[1])[:int(sys.argv[3])]
print(distance)
print('\nHybrid方法排序,距離 : correlation = 2 : 8\n')
result = hybrid(2, 8)
result = sorted(result.items(), key=lambda x:-x[1])[:int(sys.argv[3])]
print(result)
print('\n印出類別:\n')
header = json.load(open('交通局class.json', 'r'))
print([(i[0], header.get(i[0], '未知')) for i in result])
##########################
# 計算loss的區塊 #
##########################
# error = []
# ans = json.load(open('ans.json', 'r'))
# for rationx in pyprind.prog_bar(range(1, 5000)):
# for ratioy in range(1, 5000):
# result = hybrid(rationx, ratioy)
# myans = [i[0] for i in sorted(result.items(), key=lambda x:-x[1])[:10]]
# thisError = 0
# for a, my in zip(ans, myans):
# if os.path.isfile('json/' + a + '-' + my + '.json'):
# res = json.load(open('json/' + a + '-' + my + '.json','r'))
# else:
# res = requests.get('https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins={}&destinations={}&key=AIzaSyB20qKjF1ePtq9t1luvFd-433J41anlDGU'.format(name2add[a], name2add[my])).json()
# json.dump(res, open('json/' + a + '-' + my + '.json','w'))
# try:
# if res['rows'][0]['elements'][0]['status'] == 'OK':
# thisError += res['rows'][0]['elements'][0]['distance']['value']
# except Exception as e:
# # 代表有些景點是因為地址問題或其他因素,導致google查不到距離
# pass
# error.append(thisError)
# json.dump(error, open('error.json', 'w'))
| true |
7f8d1a3e44ec7bd46987ef7ce4a6f130577fa530 | Python | ZehaoDong32/Hierarchical-Risk-Parity-Approach | /Code/Portfolio_Models.py | UTF-8 | 8,430 | 2.671875 | 3 | [] | no_license | from HRP import *
from MVP import *
import matplotlib.pyplot as plt
import seaborn as sns
import pyfolio as pf
import matplotlib
sns.set_context("talk")
sns.set_style("darkgrid")
sns.set_palette(sns.color_palette(palette='Set2'))
matplotlib.rcParams.update({'font.family': 'Arial',
'font.size': 25})
class models:
def __init__(self):
return
def get_weight(self, data, model):
if model == 'HRP':
model_weight = self.get_HRP_weights(data)
elif model == 'EW':
model_weight = self.get_EW_weights(data)
elif model == 'MVP':
model_weight = self.get_MVP_weights(data)
elif model == 'IVP':
model_weight = self.get_IVP_weights(data)
elif model == 'RDM':
model_weight = self.get_RDM_weights(data)
elif model == 'UMVP':
model_weight = self.get_UMVP_weights(data)
else:
raise ValueError('No such Model!')
return model_weight
def get_vol(self, data, weights):
# normal volatility calculation
cov = data.cov().values
weights = weights.values
var = np.dot(weights.T.dot(cov), weights)
return np.sqrt(var * 252)
def get_HRP_weights(self, price_data):
HRP_result = get_HRP_result(price_data)
return HRP_result[0]
def get_EW_weights(self, price_data):
N = price_data.shape[1]
EW_weights = [1 / N] * N
return pd.Series(EW_weights)
def get_IVP_weights(self, price_data):
return_data = price_data.pct_change().dropna()
cov = return_data.cov().values
ivp_weights = 1. / np.diag(cov)
ivp_weights /= ivp_weights.sum()
return pd.Series(ivp_weights)
def get_MVP_weights(self, price_data):
mvp_weights = MVP(price_data)[0]
return mvp_weights
def get_UMVP_weights(self, price_data):
umvp_weights = UMVP(price_data)
return umvp_weights
def get_RDM_weights(self, price_data, seed=1):
np.random.seed(seed)
N = price_data.shape[1]
rdm = np.random.randint(1, 100, N)
rdm = rdm / rdm.sum()
return pd.Series(rdm)
class out_test(models):
def __init__(self, full_p_data, cal_period=0):
models.__init__(self)
self.full_p_data = full_p_data
self.period = cal_period
self.model_weights = {}
def cal_weight(self, price_data, model):
last_index = price_data.index[-1]
last_loc = self.full_p_data.index.get_loc(last_index)
data = self.full_p_data.iloc[last_loc - self.period:last_loc, :]
weights = self.get_weight(data, model)
return weights
# how to realize rolling with steps in Python ?
def rebalance_weights(self, price_data, model, freq='BM'):
tickers = price_data.columns.tolist()
weight_df = price_data.resample(freq).apply(
self.cal_weight, model=model)
weight_df.columns = tickers
weights = weight_df.shift().dropna() # 将利用前一个月算出的weight数据对齐到下一个月的return
return weights
def rebalance_test(self, price_data, model, freq='BM'):
weights_mon = self.rebalance_weights(price_data, model, freq='BM')
return_mon = price_data.resample(freq).last().pct_change().dropna()
self.model_weights[model] = weights_mon
model_return = return_mon * weights_mon
mon_return = pd.DataFrame(model_return.sum(axis=1), columns=[model])
return mon_return
def run_test(self, models_list):
print('Out-of-sample test starts:')
for i, model in enumerate(models_list):
result = self.rebalance_test(
self.full_p_data.iloc[self.period + 1:, :], model=model)
print('%s is finished' % model)
if i == 0:
self.r_result = result
else:
self.r_result = pd.merge(
self.r_result, result, left_index=True, right_index=True)
self.r_result.columns = models_list
return self.r_result, self.model_weights
def plot_cum_return(self, r=None):
if r is not None:
self.r_result = r
cum_return = (1 + self.r_result).cumprod()
plt.figure()
colors = sns.color_palette(
palette='Set3', n_colors=self.r_result.shape[1])
for i, n in enumerate(self.r_result.columns):
cum_return[n].plot(color=colors[i])
plt.legend(loc=0)
plt.title('Cumulative returns')
def plot_SR(self, r=None, Rf=0):
if r is not None:
self.r_result = r
fig, ax = plt.subplots()
SR = pf.timeseries.sharpe_ratio(
self.r_result, risk_free=Rf, period='monthly')
sns.barplot(x=SR, y=self.r_result.columns, ax=ax)
ax.get_yticklabels()[1].set_color("red")
plt.title('Sharpe Ratio')
def plot_ann_vol(self, r=None):
if r is not None:
self.r_result = r
fig, ax = plt.subplots()
ann_vol = pf.timeseries.annual_volatility(
self.r_result, period='monthly')
sns.barplot(x=ann_vol, y=self.r_result.columns, ax=ax)
ax.get_yticklabels()[1].set_color("red")
plt.title('Annualized Volatility')
class in_test(models):
def __init__(self, full_p_data, models_list):
models.__init__(self)
self.full_p_data = full_p_data
self.return_data = self.full_p_data.pct_change().dropna()
self.r_result = []
self.vol_result = []
self.weights = {}
self.models_list = models_list
def run_test(self, models_list):
print('In-sample test starts:')
for i, model in enumerate(models_list):
w = self.get_weight(self.full_p_data, model)
ann_r = w.values.dot(self.return_data.mean().values * 252)
ann_vol = self.get_vol(self.return_data, w)
self.r_result.append(ann_r)
self.vol_result.append(ann_vol)
self.weights[model] = w
print('%s is finished' % model)
return self.r_result, self.vol_result, self.weights
def plot_SR(self, Rf=0):
fig, ax = plt.subplots()
SR = (np.array(self.r_result) - Rf) / np.array(self.vol_result)
sns.barplot(x=SR, y=self.models_list, ax=ax)
ax.get_yticklabels()[1].set_color("red")
plt.title('Sharpe Ratio')
def plot_r_vol(self):
# plot annualized return plot
fig, ax = plt.subplots(ncols=2)
sns.barplot(x=self.r_result, y=self.models_list, ax=ax[0])
ax[0].get_yticklabels()[1].set_color("red")
ax[0].set(title='Annualized Return')
# plot annualized volatility plot
sns.barplot(x=self.vol_result, y=self.models_list, ax=ax[1])
ax[1].get_yticklabels()[1].set_color("red")
ax[1].set(title='Annualized Volatility')
def plot_frontier(self):
plot_frontier(self.full_p_data)
colors = sns.color_palette(
palette='Set2', n_colors=len(
self.models_list))
for i in range(len(self.models_list)):
if self.models_list[i] == 'HRP':
plt.plot(
self.vol_result[i],
self.r_result[i],
'D',
color=colors[i],
label="point")
plt.text(
self.vol_result[i] + 0.003,
self.r_result[i] - 0.004,
self.models_list[i],
fontsize=18,
color='red')
elif self.models_list[i] != 'MVP':
plt.plot(
self.vol_result[i],
self.r_result[i],
'ro',
color=colors[i],
label="point")
plt.text(
self.vol_result[i] + 0.003,
self.r_result[i] - 0.004,
self.models_list[i],
fontsize=18)
plt.title('Efficient Frontier')
plt.xlabel('Volatility')
plt.ylabel('Return')
| true |