blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a35d84c078796a37977697c5e1b81e714406650d
|
Python
|
Boberkraft/Data-Structures-and-Algorithms-in-Python
|
/chapter5/R-5.7.py
|
UTF-8
| 352
| 3.484375
| 3
|
[] |
no_license
|
"""
Let A be an array of size n ≥ 2 containing integers from 1 to n−1, inclusive,
with exactly one repeated. Describe a fast algorithm for finding the
integer in A that is repeated.
i think there was a question like this before.
My solution is O(n)
1. a <- Sum all the numbers
2. b <- Use formula for sumarization (n^2 + n)/2
3. answer = a - b
"""
| true
|
a6d6e920937ed3713dc80298a9055fdea9105332
|
Python
|
Bubai-Rahaman/CP_2020_Assignment2
|
/question_8.py
|
UTF-8
| 2,606
| 3.28125
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_bvp as bvp
#1st equation
def fun1(x,y):
return np.vstack((y[1], -np.exp(-2*y[0])))
def bc1(ya,yb):
return np.array([ya[0], yb[0]-np.log(2)])
def y1_true(x):
return np.log(x)
#2nd equation
def fun2(x,y):
return np.vstack((y[1],y[1]*np.cos(x)-y[0]*np.log(y[0])))
def bc2(ya,yb):
return np.array([ya[0]-1,yb[0]-np.exp(1)])
def y2_true(x):
return np.exp(np.sin(x))
#3rd equation
def fun3(x,y):
return np.vstack((y[1], -(2*(y[1])**3+(y[0])**2*y[1])/np.cos(x)))
def bc3(ya,yb):
return np.array([ya[0]-2**(-1/4), yb[0]-12**(1/4)/2])
def y3_true(x):
return np.sqrt(np.sin(x))
#4th equation
def fun4(x,y):
return np.vstack((y[1], 1/2-(y[1])**2/2-y[0]*np.sin(x)/2))
def bc4(ya,yb):
return np.array([ya[0]-2, yb[0]-2])
def y4_true(x):
return 2+np.sin(x)
#1st differential equation
x = np.linspace(1,2,10)
y = np.zeros((2,x.size))
res = bvp(fun1, bc1, x, y)
x_plot1 = np.linspace(1,2,100)
y_plot1 = res.sol(x_plot1)[0]
#2nd differential equation
x =np.linspace(0,np.pi/2,5)
y = np.zeros((2,x.size))
y[0] = 1
res = bvp(fun2,bc2, x, y)
x_plot2 = np.linspace(0,np.pi/2,100)
y_plot2 = res.sol(x_plot2)[0]
#3rd differential equation
x = np.linspace(np.pi/4,np.pi/3,5)
y = np.zeros((2,x.size))
res = bvp(fun3, bc3, x, y)
x_plot3 = np.linspace(np.pi/4,np.pi/3,100)
y_plot3 = res.sol(x_plot3)[0]
#4th differential equation
x = np.linspace(0,np.pi,5)
y = np.zeros((2,x.size))
res = bvp(fun4, bc4, x, y)
x_plot4 = np.linspace(0,np.pi,100)
y_plot4 = res.sol(x_plot4)[0]
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
#plot#1
ax1.plot(x_plot1,y_plot1,'g.',label = 'Numerical solution')
ax1.plot(x_plot1,y1_true(x_plot1),'r', label = 'Analytic solution')
ax1.legend()
ax1.set_xlabel('x')
ax1.set_ylabel('y(x)')
ax1.set_title(r"$1st problem$")
#plot#2
ax2.plot(x_plot2,y_plot2,'g.',label = 'Numerical solution')
ax2.plot(x_plot2,y2_true(x_plot2),'r', label = 'Analytic solution')
ax2.legend()
ax2.set_xlabel('x')
ax2.set_ylabel('y(x)')
ax2.set_title(r"$2nd problem$")
#plot#3
ax3.plot(x_plot3,y_plot3,'g.',label = 'Numerical solution')
ax3.plot(x_plot3,y3_true(x_plot3),'r', label = 'Analytic solution')
ax3.legend()
ax3.set_xlabel('x')
ax3.set_ylabel('y(x)')
ax3.set_title(r"$3rd problem$")
#plot#4
ax4.plot(x_plot4,y_plot4,'g.',label = 'Numerical solution')
ax4.plot(x_plot4,y4_true(x_plot4),'r', label = 'Analytic solution')
ax4.legend()
ax4.set_xlabel('x')
ax4.set_ylabel('y(x)')
ax4.set_title(r"$4th problem$")
plt.show()
| true
|
848bf8e1d7225a187e427792f28b54e4326fbb50
|
Python
|
skshoyeb/wop-dev-py
|
/main.py
|
UTF-8
| 2,726
| 2.59375
| 3
|
[] |
no_license
|
from flask import Flask, request
from flask_cors import CORS, cross_origin
import json
import base64
import requests
import logging
from db import add_to_fb, user_signup, get_posts, user_login, get_user_data, update_favs, upload_to_storage, get_post_by_id
from textAnalysis import get_sentiment_info
app = Flask(__name__)
CORS(app)
def get_as_base64(url):
return base64.b64encode(requests.get(url).content)
@app.route('/')
def default():
return 'WOP!!!'
@app.route('/add-positive-backup', methods=["POST"])
def add_positive_backup():
req = json.loads(request.data)
card = json.loads(req['data'])
sentiment_info = get_sentiment_info(card)
if sentiment_info == 'positive':
add_to_fb(card)
return sentiment_info
@app.route('/add-positive', methods=["POST"])
def add_positive():
imageFile = request.files.get("imageFile")
req = request.form
card = {}
card['title'] = req.get("title")
card['content'] = req.get("content")
card['image'] = req.get("image")
sentiment_info = get_sentiment_info(card)
if sentiment_info == 'positive':
add_to_fb(card, imageFile)
response = {}
response['sentiment_info'] = sentiment_info
response['card'] = card
jsonres = json.dumps(response)
return jsonres
@app.route('/upload-image', methods=["POST"])
def upload_image():
image = request.files['imageFile']
return upload_to_storage(image,'file',image.filename)
@app.route('/get-positives', methods=["GET"])
def get_positives():
#req = json.loads(request.data)
#data = json.loads(req['data'])
return get_posts()
@app.route('/get-post', methods=["POST"])
def get_post():
req = json.loads(request.data)
post_id = req.get("data")
print(post_id)
#req = json.loads(request.data)
#data = json.loads(req['data'])
return get_post_by_id(post_id)
@app.route('/sign-up', methods=["POST"])
def sign_up():
req = json.loads(request.data)
user = json.loads(req['user'])
return user_signup(user)
@app.route('/log-in', methods=["POST"])
def log_in():
req = json.loads(request.data)
user = json.loads(req['user'])
return user_login(user)
@app.route('/get-user-info', methods=["POST"])
def get_user_info():
req = json.loads(request.data)
user = json.loads(req['user'])
return get_user_data(user)
@app.route('/update-favorites', methods=["POST"])
def update_favorites():
req = json.loads(request.data)
card = json.loads(req['card'])
return update_favs(card)
@app.route('/save-image', methods=["POST"])
def save_image():
req = json.loads(request.data)
url =req['url']
return upload_to_storage(url)
if __name__ == '__main__':
app.run('0.0.0.0')
| true
|
2410e9f4f01847bdcb964ed9b32d731e39e9d551
|
Python
|
lichao666500/-algorithm015
|
/Week_09/reverseStr.py
|
UTF-8
| 314
| 3.03125
| 3
|
[] |
no_license
|
class Solution(object):
def reverseStr(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
result=''
for i in range(0,len(s),2*k):
tmp=s[i:i+k]
tmp=tmp[::-1]+s[i+k:i+2*k]
result=result+tmp
return result
| true
|
a30bed534655cf68cc9caf11b3fbedf775bf078d
|
Python
|
bsets/Distributed_ML_with_PySpark_for_Cancer_Tumor_Classification
|
/Tumor_Gene_Classification_using_Multinomial_Logistic_Regression/csv2libsvm1.py
|
UTF-8
| 1,761
| 3.09375
| 3
|
[
"CC0-1.0"
] |
permissive
|
#!/usr/bin/env python
"""
Convert CSV file to libsvm format. Works only with numeric variables.
Put -1 as label index (argv[3]) if there are no labels in your file.
Expecting no headers. If present, headers can be skipped with argv[4] == 1.
"""
import sys
import csv
import operator
from collections import defaultdict
#def construct_line(label, line, labels_dict):
def construct_line(label, line):
new_line = []
new_line.append(label)
#if label.isnumeric():
# if float(label) == 0.0:
# label = "0"
#else:
# if label in labels_dict:
# new_line.append(labels_dict.get(label))
# else:
# label_id = str(len(labels_dict))
# labels_dict[label] = label_id
# new_line.append(label_id)
for i, item in enumerate(line):
if item == '' or float(item) == 0.0:
continue
elif item=='NaN':
item="0.0"
new_item = "%s:%s" % (i + 1, item)
new_line.append(new_item)
new_line = " ".join(new_line)
new_line += "\n"
return new_line
# ---
input_file = sys.argv[1]
try:
output_file = sys.argv[2]
except IndexError:
output_file = input_file+".out"
try:
label_index = int( sys.argv[3] )
except IndexError:
label_index = 0
try:
skip_headers = sys.argv[4]
except IndexError:
skip_headers = 0
i = open(input_file, 'rt')
o = open(output_file, 'wb')
reader = csv.reader(i)
if skip_headers:
headers = reader.__next__()
labels_dict = {}
for line in reader:
if label_index == -1:
label = '1'
else:
label = line.pop(label_index)
#new_line = construct_line(label, line, labels_dict)
new_line = construct_line(label, line)
o.write(new_line.encode('utf-8'))
| true
|
0e3bfdebf524014edcbb94e4b57c0eb091361c66
|
Python
|
bmiltz/cascade-at
|
/src/cascade_at/inputs/utilities/covariate_weighting.py
|
UTF-8
| 7,579
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from intervaltree import IntervalTree
from cascade_at.core.log import get_loggers
from cascade_at.inputs.utilities.gbd_ids import make_age_intervals, make_time_intervals
from cascade_at.inputs import InputsError
LOG = get_loggers(__name__)
class CovariateInterpolationError(InputsError):
"""Raised when there is an issue with covariate interpolation."""
pass
def values(interval):
return interval.begin, interval.end, interval.data
def interval_weighting(intervals, lower, upper):
"""
Compute a weighting function by finding the proportion
within the dataframe df's lower and upper bounds.
Note: intervals is of the form ((lower, upper, id), ...)
"""
if len(intervals) == 1:
return np.asarray([1])
wts = np.ones(len(intervals))
lower_limit, upper_limit = intervals[0], intervals[-1]
wts[0] = (lower_limit[1] - lower) / np.diff(lower_limit[:2])
wts[-1] = (upper - upper_limit[0]) / np.diff(upper_limit[:2])
return wts
class CovariateInterpolator:
def __init__(self,
covariate,
population):
"""
Interpolates a covariate by population weighting.
:param covariate: (pd.DataFrame)
:param population: (pd.DataFrame)
"""
# Covariates must be sorted by both age_group_id and age_lower because age_lower is not unique to age_group_id
indices = ['location_id', 'sex_id', 'year_id', 'age_group_id']
sort_order = indices + ['age_lower']
self.covariate = covariate.sort_values(by=sort_order)
self.population = population.sort_values(by=sort_order)
self.location_ids = self.covariate.location_id.unique()
self.year_min = self.covariate.year_id.min()
self.year_max = self.covariate.year_id.max() + 1
self.age_intervals = make_age_intervals(df=self.covariate)
self.time_intervals = make_time_intervals(df=self.covariate)
self.dict_cov = dict(zip(
map(tuple, self.covariate[indices].values.tolist()), self.covariate['mean_value'].values
))
self.dict_pop = dict(zip(
map(tuple, self.population[indices].values.tolist()), self.population['population'].values
))
@staticmethod
def _restrict_time(time, time_min, time_max):
return max(min(time, time_max), time_min)
def _weighting(self, age_lower, age_upper, time_lower, time_upper):
if age_lower == age_upper:
age_groups = sorted(map(values, self.age_intervals[age_lower]))
else:
age_groups = sorted(map(values, self.age_intervals[age_lower: age_upper]))
if not age_groups:
raise CovariateInterpolationError(
f"There is no covariate age group for age lower {age_lower} and age upper {age_upper}."
)
age_group_ids = [a[-1] for a in age_groups]
age_wts = interval_weighting(tuple(age_groups), age_lower, age_upper)
# We are *not* linearly interpolating past the covariate time
# ranges -- instead we carry over the values from the left
# or rightmost time point.
time_lower = self._restrict_time(time_lower, time_min=self.year_min, time_max=self.year_max)
time_upper = self._restrict_time(time_upper, time_min=self.year_min, time_max=self.year_max)
# This is to ensure that the time_lower can actually subset
# an interval. For example, if time_lower = 2012 and time_upper = 2012,
# but the max interval goes from 2011-2012, it will not be able
# to select that interval until we decrease time_lower.
# We don't have to do this on the leftmost end, however,
# because that's already taken care of by _restrict_time,
# and the leftmost point of the interval *is* the key for IntervalTrees.
if not self.time_intervals.overlaps(time_lower):
time_lower -= 1
if time_lower == time_upper:
time_groups = sorted(map(values, self.time_intervals[time_lower]))
else:
time_groups = sorted(map(values, self.time_intervals[time_lower: time_upper]))
if not time_groups:
raise CovariateInterpolationError(
f"There is no covariate time group for time lower {time_lower} and time upper {time_upper}."
)
year_ids = [t[-1] for t in time_groups]
time_wts = interval_weighting(tuple(time_groups), time_lower, time_upper)
# The order of outer must agree with the covariate and population sort order
wt = np.outer(time_wts, age_wts)
return age_group_ids, year_ids, wt
def interpolate(self, loc_id, sex_id, age_lower, age_upper, time_lower, time_upper):
"""
Main interpolation function.
"""
if loc_id not in self.location_ids:
LOG.warning(f"Covariate is missing for location_id {loc_id},"
f"sex_id {sex_id} -- setting the value to None.")
cov_value = None
else:
age_group_ids, year_ids, epoch_weights = self._weighting(
age_lower=age_lower, age_upper=age_upper,
time_lower=time_lower, time_upper=time_upper
)
shape = epoch_weights.shape
# This loop indexing order matters, and must agree with the covariate and population sort order
cov_value = np.asarray([self.dict_cov[(loc_id, sex_id, year_id, age_id)]
for year_id in year_ids for age_id in age_group_ids]).reshape(shape)
# This loop indexing order matters, and must agree with the covariate and population sort order
pop_value = np.asarray([self.dict_pop[(loc_id, sex_id, year_id, age_id)]
for year_id in year_ids for age_id in age_group_ids]).reshape(shape)
weight = epoch_weights * pop_value
cov_value = np.average(cov_value, weights=weight)
return cov_value
def get_interpolated_covariate_values(data_df, covariate_dict,
population_df):
"""
Gets the unique age-time combinations from the data_df, and creates
interpolated covariate values for each of these combinations by population-weighting
the standard GBD age-years that span the non-standard combinations.
:param data_df: (pd.DataFrame)
:param covariate_dict: Dict[pd.DataFrame] with covariate names as keys
:param population_df: (pd.DataFrame)
:return: pd.DataFrame
"""
data = data_df.copy()
pop = population_df.copy()
data_groups = data.groupby([
'location_id', 'sex_id', 'age_lower', 'age_upper', 'time_lower', 'time_upper'
], as_index=False)
cov_objects = {cov_name: CovariateInterpolator(covariate=raw_cov, population=pop)
for cov_name, raw_cov in covariate_dict.items()}
num_groups = len(data_groups)
for cov_id, cov_obj in cov_objects.items():
LOG.info(f"Interpolating covariate {cov_id}.")
for i, (k, v) in enumerate(data_groups):
if i % 1000 == 0:
LOG.info(f"Processed {i} of {num_groups} data groups.")
[loc_id, sex_id, age_lower, age_upper, time_lower, time_upper] = k
cov_value = cov_obj.interpolate(
loc_id=loc_id, sex_id=sex_id,
age_lower=age_lower, age_upper=age_upper,
time_lower=time_lower, time_upper=time_upper
)
data.loc[v.index, cov_id] = cov_value
return data
| true
|
2487a7143d69e90a633d79ce9dd4a23b6d7b707e
|
Python
|
czarjulius/Prime_Factor_py
|
/test_prime_factor.py
|
UTF-8
| 757
| 3.140625
| 3
|
[] |
no_license
|
from unittest import TestCase
from prime_factor import PrimeFactor
class TestFactor(TestCase):
def test(self):
self.assertEquals(True, True)
def test_0(self):
self.assertEquals(PrimeFactor.of(0), [])
def test_1(self):
self.assertEquals(PrimeFactor.of(1), [])
def test_2(self):
self.assertEquals(PrimeFactor.of(2), [2])
def test_3(self):
self.assertEquals(PrimeFactor.of(3), [3])
def test_5(self):
self.assertEquals(PrimeFactor.of(5), [5])
def test_4(self):
self.assertEquals(PrimeFactor.of(4), [2,2])
def test_9(self):
self.assertEquals(PrimeFactor.of(9), [3,3])
def test_letter(self):
self.assertEquals(PrimeFactor.of('a'), [])
| true
|
adf3b7eb59d104a19edc908a3dbdad7a04abb5b5
|
Python
|
saggarwal98/Practice
|
/Python/sets.py
|
UTF-8
| 95
| 3.109375
| 3
|
[] |
no_license
|
set1={1,2,3,4}
print(set1)
print(type(set1))
set1.add(5)
print(set1)
set1.remove(5)
print(set1)
| true
|
64b5a41fd6d42b7a2ddde74a2e927a5a9645cc28
|
Python
|
Deviantjroc710/py_automate_indeed
|
/py_indeed.py
|
UTF-8
| 3,069
| 2.796875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#######################################################################################
#
# Author: Conner Crosby
# Description:
# The purpose of the code written is to automate the process of applying to jobs
# on Indeed that can be applied via a 'Indeed Resume'.
#
#
#######################################################################################
# Standard Library Imports
import os, logging, sqlite3, configparser
# Third Party Imports
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
# Local Application Imports
from scripts.web import sign_in_to_indeed, search_preference, fetch_all_jobs_from_page,\
parse_apply_w_indeed_resume, apply, next_web_page, construct_container
from scripts.database import *
from scripts.logger import *
# #########CONFIGURATIONS#############################################################
# #########EXTERNAL INI FILE SETUP#########
config_app = configparser.ConfigParser()
config_app.read('app.ini')
py_indeed_configs = config_app['py_indeed']
# #########SETTING UP CHROME DRIVER (OR ANOTHER DRIVER TYPE)#########
chrome_option = Options()
chrome_option.add_argument("--headless")
chrome_option.add_argument("--window-size=1366,768")
# chrome_option.binary_location = r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe'
chrome = webdriver.Chrome(executable_path=(os.getcwd() + r'\chromedriver.exe'),
options=chrome_option)
# #########CONNECTING TO DB FILE 'questions__answers_db', INITIAL CREATION OF THE 'data' TABLE#########
connection_to_db = sqlite3.connect("questions__answers_db.db")
cursor = connection_to_db.cursor()
data_table_creation(cursor, connection_to_db)
# #########LOGGERS#########
general_runtime_logger = create_general_logger(__name__, level="INFO")
# #########SPECIFIC CONTAINER CREATION#########
data_container = construct_container()
# #########APPLICATION CONFIGS#########
email_address = py_indeed_configs['email_address']
password = py_indeed_configs['password']
job_title = py_indeed_configs['job_title']
location = py_indeed_configs['location']
#######################################################################################
def main():
""" MAIN PROGRAM EXECUTION """
signed_in = sign_in_to_indeed(chrome, email_address, password)
if(signed_in):
search_preference(chrome, job_title, location)
while(True):
jobs = fetch_all_jobs_from_page(chrome)
jobs_indeed_resume = parse_apply_w_indeed_resume(jobs)
apply(chrome, jobs_indeed_resume, cursor, connection_to_db, data_container)
next_web_page(chrome)
else:
general_runtime_logger.error("Could not sign into indeed account...")
def test_main():
""" THIS MAIN IS MEANT FOR TESTING SPECIFIC JOB APPLICATIONS """
sign_in_to_indeed(chrome, email_address, password)
jobs_indeed_resume = []
apply(chrome, jobs_indeed_resume, cursor, connection_to_db, data_container)
main()
#test_main()
connection_to_db.close()
logging.shutdown()
| true
|
e8afc9accb33eeb046cc604b64c35cfade133873
|
Python
|
raulgsalguero82/GithubActions
|
/tests/test_persona.py
|
UTF-8
| 2,548
| 2.828125
| 3
|
[] |
no_license
|
import unittest
import datetime
from Comunidad.Persona import Persona
from Comunidad.Base import Base, Session
class Test_persona(unittest.TestCase):
def test_prueba(self):
self.assertEqual(1, 1)
def setUp(self):
self.persona1 = Persona(nombre='Alejandra', edad=25)
self.persona2 = Persona(nombre='Diego', edad=22)
self.persona3 = Persona(nombre='Alejandra', edad=25)
self.persona4 = Persona(nombre='Diana', edad=25)
self.grupo = [self.persona1, self.persona2, self.persona3]
def test_constructor(self):
self.assertEqual(self.persona1.dar_nombre(), 'Alejandra')
self.assertEqual(self.persona1.dar_edad(), 25)
def test_anio_nacimiento(self):
self.assertEqual(self.persona1.calcular_anio_nacimiento(True), datetime.datetime.now().year - 25)
self.assertNotEqual(self.persona1.calcular_anio_nacimiento(False), datetime.datetime.now().year - 25)
self.assertEqual(self.persona1.calcular_anio_nacimiento(False), datetime.datetime.now().year - 25 + 1)
self.assertNotEqual(self.persona1.calcular_anio_nacimiento(True), datetime.datetime.now().year - 25 + 1)
def test_asignacion(self):
self.persona2.asignar_edad(28)
self.persona2.asignar_nombre("Felipe")
self.assertFalse(self.persona2.dar_nombre()=='Diego')
self.assertFalse(self.persona2.dar_edad()==22)
self.assertTrue(self.persona2.dar_nombre()=='Felipe')
self.assertTrue(self.persona2.dar_edad()==28)
def test_objetos_iguales(self):
persona_nueva = self.persona1
self.assertIsNot(self.persona1, self.persona3)
self.assertIs(self.persona1, persona_nueva)
def test_alamacenar(self):
self.persona1.almacenar()
session = Session()
persona = session.query(Persona).filter(Persona.nombre == 'Alejandra' and Persona.edad == 25).first()
self.assertEqual(persona.dar_nombre(),'Alejandra')
self.assertEqual(persona.dar_edad(),25)
def test_recuperar(self):
session = Session()
session.add(self.persona2)
session.commit()
session.close()
persona = Persona("",0)
persona.recuperar("Diego", 22)
self.assertEqual(persona.dar_nombre(),'Diego')
self.assertEqual(persona.dar_edad(),22)
def test_todos(self):
session = Session()
session = Session()
personas = session.query(Persona).all()
session.close()
self.assertIsNotNone(personas)
if __name__ == '__main__':
unittest.main()
| true
|
da49b6b31206d77fbf73ee5d7a7d2a143960b382
|
Python
|
f981113587/Python
|
/Aula 14/Desafios/067.py
|
UTF-8
| 530
| 4.09375
| 4
|
[] |
no_license
|
"""
Faça um programa que mostre a tabuada de vários números,
um de cada vez, para cada valor digitado pelo usuário. O
programa será interrompido quando o número solicitado for
negativo.
Fica
Fica, me queira e queira ficar
Fica
Faz o que quiser de mim
Contanto que não falte tempo pra me amar
Fica - Anavitório ♪♫
"""
while True:
n = int(input('Informe o número da tabuada: '))
if n < 0:
break
for c in range(0, 11):
print(f'{n} x {c} = {c*n}')
| true
|
3b7077da5ff8c1106178e76148ea4c170da4a78e
|
Python
|
kaixinhouse/pycollections
|
/utils/xlogger.py
|
UTF-8
| 4,405
| 2.515625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import sys
import os
import os.path
import logging
import logging.handlers
fmt_standard = logging.Formatter('%(asctime)s %(message)s')
fmt_compact = logging.Formatter("%(asctime)s [%(process)d][%(threadName)s] %(message)s")
fmt_full = logging.Formatter("%(asctime)s %(levelname)s [%(process)d][%(thread)d][%(threadName)s] %(message)s")
def setup_logger(log_file_path, error_log_file_path, debug_mode=False, debug_mode_level=0):
logging.raiseExceptions = True
logger = logging.getLogger()
setup_standard_logger(logger)
setup_info_logger(log_file_path, logger)
setup_error_logger(error_log_file_path, logger)
if debug_mode:
setup_debug_logger(debug_mode_level, logger)
def setup_standard_logger(logger=None):
""" standard output to the console """
if logger is None:
logger = logging.getLogger()
if not logger.handlers:
logger.setLevel(logging.NOTSET)
standard = logging.StreamHandler(sys.stderr)
standard.setLevel(logging.DEBUG)
standard.setFormatter(fmt_standard)
logger.addHandler(standard)
def setup_info_logger(log_file_path, logger=None, auto_stop_time=100, remove_old_handler=False):
""" detail information output a file """
if logger is None:
logger = logging.getLogger()
log_file_handler = logging.handlers.RotatingFileHandler(log_file_path, 'a', 1024*1024*20, 10)
log_file_handler.setLevel(logging.DEBUG)
log_file_handler.setFormatter(fmt_full)
logger.addHandler(log_file_handler)
def setup_info_logger_new(log_file_path, logger=None, auto_stop_time=100, remove_old_handler=False):
""" detail information output a file """
if auto_stop_time is None:
auto_stop_time = int(os.environ['auto_stop_time'], 0)
# auto_stop_time is 0 equals not set info logger
if not auto_stop_time:
return
if logger is None:
logger = logging.getLogger()
if getattr(logger, '_old_handler', None) is not None:
if remove_old_handler:
old_handler = logger._old_handler
logger._old_handler = None
logger.removeHandler(old_handler)
else:
return auto_stop_time
log_file_handler = logging.handlers.RotatingFileHandler(log_file_path, 'a', 1024*1024*20, 10)
log_file_handler.setLevel(logging.DEBUG)
log_file_handler.setFormatter(fmt_full)
logger.addHandler(log_file_handler)
logger._old_handler = log_file_handler
return start_logger_auto_stop_time(auto_stop_time, logger)
def start_logger_auto_stop_time(auto_stop_time, logger):
""" delete one logger after auto_stop_time """
if auto_stop_time is None:
auto_stop_time = int(os.environ['auto_stop_time'], 0)
if not auto_stop_time:
return auto_stop_time
if logger is None:
logger = logging.getLogger()
handler = getattr(logger, '_old_handler', None)
if handler is not None:
def _remove_handler():
log_file_handler = logger._old_handler
logger._old_handler = None
logger.removeHandler(log_file_handler)
if auto_stop_time > 0:
import threading
t = threading.Timer(auto_stop_time, _remove_handler)
t.setDaemon(True)
t.start()
return auto_stop_time
def setup_error_logger(error_log_file_path, logger=None):
if logger is None:
logger = logging.getLogger()
error_handler = logging.handlers.RotatingFileHandler(error_log_file_path, 'a', 1024*1024*1, 3)
error_handler.setLevel(logging.ERROR)
error_handler.setFormatter(fmt_full)
logger.addHandler(error_handler)
def setup_debug_logger(debug_mode_level=0, logger=None):
""" monitor the output information according to the debug_mode_level """
if logger is None:
logger = logging.getLogger()
monitor = logging.StreamHandler(sys.stderr)
monitor.setLevel(debug_mode_level)
monitor.setFormatter(fmt_compact)
logger.addHandler(monitor)
def main():
setup_logger('./log_file.log', './error_log_file.log', False)
logging.info('info')
logging.debug('debug')
logging.warn('warn')
logging.error('error')
logging.critical('critical')
if __name__ == '__main__':
main()
| true
|
6cba87d9e594ca10e849124f2333fdbd9f5675a0
|
Python
|
lucipherM/hackerrank
|
/algo/arrays_and_sorting/counting_sort_3.py
|
UTF-8
| 510
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
def print_int_list(l):
print " ".join(map(str, l))
def frequences(a):
c = [0] * (max(a) + 1)
for i in a:
c[i] += 1
return c
def starting_points(c):
len_c = len(c)
for idx in range(1, len_c):
c[idx] = c[idx - 1] + c[idx]
return c
if __name__ == '__main__':
n = input()
a = []
for i in range(n):
j, _ = raw_input().strip().split(" ")
a.append(int(j))
c = starting_points(frequences(a))
print_int_list(c)
| true
|
4a88fb175be8fdeab2e1c8c228466ee4cc09ae4a
|
Python
|
alidashtii/python-assignments
|
/mhmd13.py
|
UTF-8
| 429
| 3.671875
| 4
|
[] |
no_license
|
def count(x):
length = len(x)
digit = 0
letters = 0
lower = 0
upper = 0
for i in x:
if i.isalpha():
letters += 1
elif i.isnumeric():
digit += 1
elif (i.islower()):
lower += 1
elif (i.isupper()):
upper += 1
else:
other += 1
return digit,letters,lower , upper
print (count(input('pls enter a text: ')))
| true
|
f25761eb760256d73c003fdf2c10a74352944413
|
Python
|
tejasvm123/Digital_Clock.py
|
/Digital_Clock.py
|
UTF-8
| 708
| 3.265625
| 3
|
[] |
no_license
|
import time
import datetime as dt
import turtle
t = turtle.Turtle()
t1 = turtle.Turtle()
s = turtle.Screen()
s.bgcolor("white")
sec = dt.datetime.now().second
min = dt.datetime.now().minute
hr = dt.datetime.now().hour
t1.pensize(5)
t1.color('purple')
t1.goto(-20,0)
t1.pendown()
for i in range(2):
t1.forward(200)
t1.left(90)
t1.forward(70)
t1.left(90)
t1.hideturtle()
while True:
t.hideturtle()
t.clear()
t.write(str(hr).zfill(2)+":"+str(min).zfill(2)+":"+str(sec).zfill(2),font=("Arial Narrow", 35, "bold"))
time.sleep(1)
sec+=1
if sec == 60:
sec=0
min+=1
if min == 60:
min=0
hr+=1
if hr == 13:
hr=1
| true
|
ec5f2dd176365026f0bf90616c422f1562f3a34a
|
Python
|
LArbys/thrumu
|
/wire_matches_extra_tolerances.py
|
UTF-8
| 7,464
| 3.109375
| 3
|
[] |
no_license
|
import math
import numpy as np
def wire_matching_algo(plane1toplane2_tolerance, plane1toplane3_tolerance, plane2toplane3_tolerance):
fin = open("output_with_y.txt")
lines = fin.readlines()
str_data_list = []
instance_list = []
plane_num_list = []
wire_num_list = []
y_start_list = []
y_end_list = []
z_start_list = []
z_end_list = []
# Initialize a vector to contain the instance of the two plane "2" (3) hits and to find which
# one is closer to the hits on the first two planes
third_plane_hit_list = []
for l in lines:
str_data = l.split(' ') # splits by space
instance = int(str_data[0])
plane_num = int(str_data[1])
wire_num = int(str_data[2])
y_start = float(str_data[3])
y_end = float(str_data[4])
z_start = float(str_data[5])
z_end = float(str_data[6])
instance_list.append(instance)
plane_num_list.append(plane_num)
wire_num_list.append(wire_num)
y_start_list.append(y_start)
y_end_list.append(y_end)
z_start_list.append(z_start)
z_end_list.append(z_end)
# Match the wires on the first plane with the wires from the second and third planes using a dictionary, which is declared here
wire_match_dict = {}
# Declare variables for the second plane wire and the first plane wire
second_plane_wire = 0
third_plane_wire = 0
# Begin the activity for frontend hits
for i in range (0, 2400):
for j in range (2400, 4800):
# Set the second plane wire equal to -1 the third plane wire equal to -1 and see if the values are reset at the end of the loop
# -1 will be a default value if there is no match on the second plane, the third plane, or both
second_plane_wire = -1
third_plane_wire = -1
if math.fabs(y_start_list[i] - y_start_list[j]) <= plane1toplane2_tolerance and math.fabs(z_start_list[i] - z_start_list[j]) <= plane1toplane2_tolerance:
second_plane_wire = j - 2400
# Clear out the lists of the two hits on the third plane
third_plane_hit_list = []
for k in range(4800, 8256):
if math.fabs(y_start_list[k] - y_start_list[i]) <= plane1toplane3_tolerance and math.fabs(y_start_list[k] - y_start_list[j]) <= plane2toplane3_tolerance and math.fabs(z_start_list[k] - z_start_list[i]) <= plane1toplane3_tolerance and math.fabs(z_start_list[k] - z_start_list[j]) <= plane2toplane3_tolerance:
third_plane_hit_list.append(k)
if len(third_plane_hit_list) == 2:
if math.fabs(y_start_list[third_plane_hit_list[0]] - y_start_list[i]) <= math.fabs(y_start_list[third_plane_hit_list[1]] - y_start_list[i]) and math.fabs(y_start_list[third_plane_hit_list[0]] - y_start_list[j]) <= math.fabs(y_start_list[third_plane_hit_list[1]] - y_start_list[j]) and math.fabs(z_start_list[third_plane_hit_list[0]] - z_start_list[i]) <= math.fabs(z_start_list[third_plane_hit_list[1]] - z_start_list[i]) and math.fabs(z_start_list[third_plane_hit_list[0]] - z_start_list[j]) <= math.fabs(z_start_list[third_plane_hit_list[1]] - z_start_list[j]):
third_plane_wire = third_plane_hit_list[0] - 4800
else:
third_plane_wire = third_plane_hit_list[1] - 4800
# Go on to the next 'j' if there is no match on either of the two planes
if second_plane_wire == -1 and third_plane_wire == -1:
continue
else:
wire_match_dict[i] = (second_plane_wire, third_plane_wire)
# Begin the loop for backend hits
for i in range(0, 2400):
for j in range(2400, 4800):
# Set the second plane wire equal to -1 the third plane wire equal to -1 and see if the values are reset at the end of the loop
# -1 will be a default value if there is no match on the second plane, the third plane, or both
second_plane_wire = -1
third_plane_wire = -1
if math.fabs(y_end_list[i] - y_end_list[j]) <= plane1toplane2_tolerance and math.fabs(z_end_list[i] - z_end_list[j]) <= plane1toplane2_tolerance:
second_plane_wire = j - 2400
# Initialize a list to hold all of the hits on the third wire plane of the detector
third_plane_hit_list = []
for k in range(4800, 8256):
if math.fabs(y_end_list[k] - y_end_list[i]) <= plane1toplane3_tolerance and math.fabs(y_end_list[k] - y_end_list[j]) <= plane2toplane3_tolerance and math.fabs(z_end_list[k] - z_end_list[i]) <= plane1toplane3_tolerance and math.fabs(z_end_list[k] - z_end_list[j]) <= plane2toplane3_tolerance:
third_plane_hit_list.append(k)
# Include a condition for when there are two hits on the third plane recorded to compare the two and identify one as the third hit of the three
if len(third_plane_hit_list) == 2:
if math.fabs(y_end_list[third_plane_hit_list[0]] - y_end_list[i]) <= math.fabs(y_end_list[third_plane_hit_list[1]] - y_end_list[i]) and math.fabs(y_end_list[third_plane_hit_list[0]] - y_end_list[j]) <= math.fabs(y_end_list[third_plane_hit_list[1]] - y_end_list[j]) and math.fabs(z_end_list[third_plane_hit_list[0]] - z_end_list[i]) <= math.fabs(z_end_list[third_plane_hit_list[1]] - z_end_list[i]) and math.fabs(z_end_list[third_plane_hit_list[0]] - z_end_list[j]) <= math.fabs(z_end_list[third_plane_hit_list[1]] - z_end_list[j]):
third_plane_wire = third_plane_hit_list[0] - 4800
else:
third_plane_wire = third_plane_hit_list[1] - 4800
# Go on to the next 'j' if there is no match on either of the two planes
if second_plane_wire == -1 and third_plane_wire == -1:
continue
# Fill wire_match_dict[i] with (second_plane_wire, third_plane_wire) if there was a match on at least the second plane
# third_plane_wire = -1 will be a default value meaning that there was no match on the third plane
else:
wire_match_dict[i] = (second_plane_wire, third_plane_wire)
# Return the dictionary for the matching wires
return wire_match_dict
# Define a list that can contain the results of the previous function
wire_match_list = wire_matching_algo(0.30, 0.30, 0.30)
# Define another function to find the matching wires upon being given first_wire, the wire in zero_plane
def wire_assign_func(zero_wire):
# Initialize variables for the matching wires on the first (second) and second (third) planes
first_wire = 0
second_wire = 0
# Assign variables to the three lists that are given by wire_matching_algo
(first_wire, second_wire) = wire_match_list[zero_wire]
return (first_wire, second_wire)
# print wire_match_list
| true
|
100c55f3fdb304ad9b1f170946567a8f7dc1e05a
|
Python
|
shenmishajing/minisql
|
/bplustree.py
|
UTF-8
| 11,924
| 3.734375
| 4
|
[] |
no_license
|
import random
#size = 5 # 为节点中存储的记录数
class TreeNode:
def __init__(self, size):
self.__size = size
self.keys = []
self.next = None
self.parent = None
self.pointers = []
def is_full(self):
return len(self.keys) == self.__size
def is_empty(self):
return len(self.keys) == 0
def is_leaf(self):
return len(self.pointers) == len(self.keys)
def insert_value(self, key, pointer):
index = 0
for k in self.keys:
assert key != k, '存在相同的key,无法插入'
if key < k:
break
index += 1
self.keys.insert(index, key)
self.pointers.insert(index, pointer)
def length(self):
return len(self.keys)
class BPlusTree:
def __init__(self, size=3):
self.__size = size
self.__root = None
self.__data_ptr = None
def get_head(self):
return self.__data_ptr
def find_height(self):
if self.__root is None:
return 0
height = 1
cur_node = self.__root #type:TreeNode
while not cur_node.is_leaf():
cur_node = cur_node.pointers[0]
height += 1
return height
def fill(self, l, root: TreeNode, h):
if root is None:
return
l[h].append(root.keys)
for c in root.pointers:
self.fill(l, c, h + 1)
def level_order(self):
if self.__root is not None:
h = self.find_height()
l = []
for i in range(h):
l.append([])
self.fill(l, self.__root, 0)
for i in range(h - 1):
for item in l[i]:
print(item, end=' ')
print()
def print_tree(self):
if self.__root is None:
return
l = [self.__root] #type:list[TreeNode]
start = 0
end = 1
while start != end:
if not l[start].is_leaf():
for p in l[start].pointers:
l.append(p)
end += 1
if l[start].is_leaf():
print(l[start].keys, l[start].pointers)
else:
print(l[start].keys)
start += 1
def insert(self, value, pointer):
if self.__root is None: # 若树中还没有节点,则根既为内节点也为叶节点
self.__root = TreeNode(self.__size)
leaf_node = self.__root
leaf_node.insert_value(value, pointer)
self.__data_ptr = leaf_node
return
else:
leaf_node = self.__find_insert_leaf(value)
if not leaf_node.is_full():
self.__insert_in_leaf(value, pointer, leaf_node)
else:
leaf_node.insert_value(value, pointer)
upper = (self.__size + 1) // 2
new_leaf_node = TreeNode(self.__size)
new_leaf_node.keys = leaf_node.keys[upper:]
new_leaf_node.pointers = leaf_node.pointers[upper:]
leaf_node.keys = leaf_node.keys[0:upper]
leaf_node.pointers = leaf_node.pointers[0:upper]
new_leaf_node.next = leaf_node.next
leaf_node.next = new_leaf_node
key = new_leaf_node.keys[0]
self.__insert_in_parent(leaf_node, new_leaf_node, key)
def delete(self, value):
node = self.find_node(value)
if node is not None:
# print("in")
self.__delete_entry(node, value)
def find_node(self, value): # 该函数返回最大的<=搜索元素的块,若index与keyindex相同,则利用B+树查找
if self.__root is None:
return None
else:
node = self.__find_insert_leaf(value)
found = False
for key in node.keys:
if key == value:
found = True
break
if found:
return node
else:
return None
def find(self, value):
if self.__root is None:
return None
else:
node = self.__find_insert_leaf(value)
found = False
index =0
for key in node.keys:
if key >= value:
found = True
break
index += 1
if found:
return node.pointers[index], node, index
else:
return None
def __insert_in_leaf(self, value, pointer, node):
node.insert_value(value, pointer)
def __find_insert_leaf(self, value):
if self.__root is not None:
current_node = self.__root # type:TreeNode
while type(current_node) == TreeNode and not current_node.is_leaf():
index = 0
for i in current_node.keys:
if value < i:
break
index += 1
current_node = current_node.pointers[index]
return current_node
def __insert_in_parent(self, before_node: TreeNode, new_node: TreeNode, value):
if before_node.parent is None: # 若beforenode为根节点,则新建一个根节点并插入进去
new_root = TreeNode(self.__size)
new_root.pointers.append(before_node)
new_root.pointers.append(new_node)
new_root.keys.append(value)
self.__root = new_root
before_node.parent = new_root
new_node.parent = new_root
return
parent = before_node.parent # type:TreeNode
# 先将新节点插入到parent中
for i in range(0, len(parent.pointers)):
if parent.pointers[i] == before_node:
parent.pointers.insert(i + 1, new_node)
parent.keys.insert(i, value)
break
if len(parent.pointers) <= self.__size + 1: # 若插入后仍未超过规定的size
new_node.parent = parent
else: # 否则则新建一个parent节点,将parent进行分裂
upper = (self.__size + 1) // 2
new_parent = TreeNode(self.__size)
new_parent.pointers = parent.pointers[upper:]
parent.pointers = parent.pointers[0:upper]
key = parent.keys[upper - 1]
new_parent.keys = parent.keys[upper:]
parent.keys = parent.keys[:upper - 1]
for child in new_parent.pointers:
child.parent = new_parent
for child in parent.pointers:
child.parent = parent
self.__insert_in_parent(parent, new_parent, key)
def __delete_entry(self, node: TreeNode, value, child: TreeNode = None):
temp_index = 0
for temp in node.keys:
if value == temp:
node.keys.remove(temp)
break
temp_index += 1
if child is not None:
node.pointers.remove(child)
else:
node.pointers.remove(node.pointers[temp_index])
if node.parent is None and len(node.pointers) == 1 and not node.is_leaf():
self.__root = node.pointers[0]
self.__root.parent = None
del node
elif node.parent is not None and \
((node.is_leaf() == False and len(node.pointers) < (self.__size + 1) // 2) or
(node.is_leaf() == True and len(node.keys) < self.__size // 2)):
previous = True
parent = node.parent # type:TreeNode
child_index = parent.pointers.index(node)
another_node = None # type:TreeNode
key = None
if child_index == len(parent.pointers) - 1:
previous = False
another_node = parent.pointers[child_index - 1]
key = parent.keys[child_index - 1]
else:
another_node = parent.pointers[child_index + 1]
key = parent.keys[child_index]
# 若两个节点可以直接合并,则总是保留前一个节点
if (another_node.is_leaf() == True and len(node.keys) + len(another_node.keys) <= self.__size) \
or (another_node.is_leaf() == False and len(node.pointers) + len(another_node.pointers) <= self.__size + 1):
if previous:
temp = node
node = another_node
another_node = temp
if not node.is_leaf():
another_node.pointers += node.pointers
another_node.keys = another_node.keys + \
[key] + node.keys
for temp in node.pointers:
temp.parent = another_node
else:
another_node.keys += node.keys
another_node.pointers += node.pointers
another_node.next = node.next
self.__delete_entry(node.parent, key, node)
if self.__data_ptr == node:
self.__data_ptr = another_node
del node
else: # 从兄弟节点借值
# 如果node在anothernode的后面,则表明node为最后一个,此时的childindex需要注意
if not previous:
if not node.is_leaf():
temp_child = another_node.pointers[-1]
temp_value = another_node.keys[-1]
another_node.keys.remove(temp_value)
another_node.pointers.remove(temp_child)
node.keys.insert(0, key)
node.pointers.insert(0, temp_child)
temp_child.parent = node
parent.keys[child_index - 1] = temp_value
else:
temp_value = another_node.keys[-1]
temp_pointer = another_node.pointers[-1]
another_node.keys.remove(temp_value)
another_node.pointers.remove(temp_pointer)
node.keys.insert(0, temp_value)
node.pointers.insert(0, temp_pointer)
parent.keys[child_index - 1] = temp_value
else:
# 若node在anothernode的前面,那么此时的childindex与value的index是一样的,不需要减1
if not node.is_leaf():
temp_child = another_node.pointers[0]
temp_value = another_node.keys[0]
another_node.pointers.remove(temp_child)
another_node.keys.remove(temp_value)
node.keys.append(key)
node.pointers.append(temp_child)
temp_child.parent = node
parent.keys[child_index] = temp_value
else:
temp_value = another_node.keys[0]
temp_pointer = another_node.pointers[0]
another_node.keys.remove(temp_value)
another_node.pointers.remove(temp_pointer)
node.keys.append(temp_value)
node.pointers.append(temp_pointer)
temp_value = another_node.keys[0]
parent.keys[child_index] = temp_value
'''
bpt = BPlusTree(3) #type:BPlusTree
n = 15
for i in range(15, 0, -1):
tp = random.randint(1, n**2)
bpt.insert(i, (0, 0))
# bpt.level_order()
bpt.print_tree()
print("FIND")
print(bpt.find(11))
for i in range(0, 9):
bpt.delete(i)
bpt.print_tree()
print()
head = bpt.get_head() #type:TreeNode
while head is not None:
print(head.keys, head.pointers)
head = head.next
'''
| true
|
fbb447a35ce410e023c0add18df34ffcc3dfb1fe
|
Python
|
eldoria/Reinforcement_learning
|
/drl_sample_project_python/drl_lib/to_do/line_world_mdp.py
|
UTF-8
| 605
| 2.546875
| 3
|
[] |
no_license
|
import numpy as np
def reset_line_world():
NB = 5
S = np.arange(NB)
A = np.array([0, 1]) # 0 = Gauche et 1 = Droite
R = np.array([-1, 0, 1])
p = np.zeros((len(S), len(A), len(S), len(R)))
for i in range(1, NB - 2):
p[i, 1, i + 1, 1] = 1.0
for i in range(2, NB - 1):
p[i, 0, i - 1, 1] = 1.0
p[1, 0, 0, 0] = 1.0
p[NB - 2, 1, NB - 1, 2] = 1.0
V = np.zeros((len(S),))
pi = np.zeros((len(S), len(A)))
pi[:] = 0.5
gamma = 0.999999 # facteur d'amoindrissement
threshold = 0.000001
return S, A, R, p, gamma, threshold, pi, V
| true
|
fd94d2ed64bd6e832c4f8320e19654ad0d8d58b0
|
Python
|
12wb/OpenCV
|
/U4/人脸识别.py
|
UTF-8
| 7,806
| 3.09375
| 3
|
[] |
no_license
|
# import cv2
# import os
# import numpy as np
#
#
# # 检测人脸
# def detect_face(img):
# # 将测试图像转换为灰度图像,因为opencv人脸检测器需要灰度图像
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#
# # 加载OpenCV人脸检测分类器Haar
# face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
#
# # 检测多尺度图像,返回值是一张脸部区域信息的列表(x,y,宽,高)
# faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5)
#
# # 如果未检测到面部,则返回原始图像
# if (len(faces) == 0):
# return None, None
#
# # 目前假设只有一张脸,xy为左上角坐标,wh为矩形的宽高
# (x, y, w, h) = faces[0]
#
# # 返回图像的正面部分
# return gray[y:y + w, x:x + h], faces[0]
#
#
# # 该函数将读取所有的训练图像,从每个图像检测人脸并将返回两个相同大小的列表,分别为脸部信息和标签
# def prepare_training_data(data_folder_path):
# # 获取数据文件夹中的目录(每个主题的一个目录)
# dirs = os.listdir(data_folder_path)
#
# # 两个列表分别保存所有的脸部和标签
# faces = []
# labels = []
#
# # 浏览每个目录并访问其中的图像
# for dir_name in dirs:
# # dir_name(str类型)即标签
# label = int(dir_name)
# # 建立包含当前主题主题图像的目录路径
# subject_dir_path = data_folder_path + "/" + dir_name
# # 获取给定主题目录内的图像名称
# subject_images_names = os.listdir(subject_dir_path)
#
# # 浏览每张图片并检测脸部,然后将脸部信息添加到脸部列表faces[]
# for image_name in subject_images_names:
# # 建立图像路径
# image_path = subject_dir_path + "/" + image_name
# # 读取图像
# image = cv2.imread(image_path)
# # 显示图像0.1s
# cv2.imshow("Training on image...", image)
# cv2.waitKey(100)
#
# # 检测脸部
# face, rect = detect_face(image)
# # 我们忽略未检测到的脸部
# if face is not None:
# # 将脸添加到脸部列表并添加相应的标签
# faces.append(face)
# labels.append(label)
#
# cv2.waitKey(1)
# cv2.destroyAllWindows()
# # 最终返回值为人脸和标签列表
# return faces, labels
#
#
# # 调用prepare_training_data()函数
# faces, labels = prepare_training_data("training_data")
#
# # 创建LBPH识别器并开始训练,当然也可以选择Eigen或者Fisher识别器
# face_recognizer = cv2.face.LBPHFaceRecognizer_create()
# face_recognizer.train(faces, np.array(labels))
#
#
# # 根据给定的(x,y)坐标和宽度高度在图像上绘制矩形
# def draw_rectangle(img, rect):
# (x, y, w, h) = rect
# cv2.rectangle(img, (x, y), (x + w, y + h), (128, 128, 0), 2)
#
#
# # 根据给定的(x,y)坐标标识出人名
# def draw_text(img, text, x, y):
# cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (128, 128, 0), 2)
#
#
# # 建立标签与人名的映射列表(标签只能为整数)
# subjects = ["jiaju", "jiaqiang"]
#
#
# # 此函数识别传递的图像中的人物并在检测到的脸部周围绘制一个矩形及其名称
# def predict(test_img):
# # 生成图像的副本,这样就能保留原始图像
# img = test_img.copy()
# # 检测人脸
# face, rect = detect_face(img)
# # 预测人脸
# label = face_recognizer.predict(face)
# # 获取由人脸识别器返回的相应标签的名称
# label_text = subjects[label[0]]
#
# # 在检测到的脸部周围画一个矩形
# draw_rectangle(img, rect)
# # 标出预测的名字
# draw_text(img, label_text, rect[0], rect[1] - 5)
# # 返回预测的图像
# return img
#
#
# # 加载测试图像
# test_img1 = cv2.imread("test_data/test1.jpg")
# test_img2 = cv2.imread("test_data/test2.jpg")
#
# # 执行预测
# predicted_img1 = predict(test_img1)
# predicted_img2 = predict(test_img2)
#
# # 显示两个图像
# cv2.imshow(subjects[0], predicted_img1)
# cv2.imshow(subjects[1], predicted_img2)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
import cv2 as cv
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--thr', default=0.2, type=float, help='Threshold value for pose parts heat map')
parser.add_argument('--width', default=368, type=int, help='Resize input to specific width.')
parser.add_argument('--height', default=368, type=int, help='Resize input to specific height.')
args = parser.parse_args()
BODY_PARTS = {"Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
"RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14,
"LEye": 15, "REar": 16, "LEar": 17, "Background": 18}
POSE_PAIRS = [["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"],
["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"],
["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Neck", "LHip"],
["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"],
["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"]]
inWidth = args.width
inHeight = args.height
net = cv.dnn.readNetFromTensorflow("graph_opt.pb")
cap = cv.VideoCapture(args.input if args.input else 0)
while cv.waitKey(1) < 0:
hasFrame, frame = cap.read()
if not hasFrame:
cv.waitKey()
break
frameWidth = frame.shape[1]
frameHeight = frame.shape[0]
net.setInput(cv.dnn.blobFromImage(frame, 1.0, (inWidth, inHeight), (127.5, 127.5, 127.5), swapRB=True, crop=False))
out = net.forward()
out = out[:, :19, :, :] # MobileNet output [1, 57, -1, -1], we only need the first 19 elements
assert (len(BODY_PARTS) == out.shape[1])
points = []
for i in range(len(BODY_PARTS)):
# Slice heatmap of corresponging body's part.
heatMap = out[0, i, :, :]
# Originally, we try to find all the local maximums. To simplify a sample
# we just find a global one. However only a single pose at the same time
# could be detected this way.
_, conf, _, point = cv.minMaxLoc(heatMap)
x = (frameWidth * point[0]) / out.shape[3]
y = (frameHeight * point[1]) / out.shape[2]
# Add a point if it's confidence is higher than threshold.
points.append((int(x), int(y)) if conf > args.thr else None)
for pair in POSE_PAIRS:
partFrom = pair[0]
partTo = pair[1]
assert (partFrom in BODY_PARTS)
assert (partTo in BODY_PARTS)
idFrom = BODY_PARTS[partFrom]
idTo = BODY_PARTS[partTo]
if points[idFrom] and points[idTo]:
cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3)
cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)
cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)
t, _ = net.getPerfProfile()
freq = cv.getTickFrequency() / 1000
cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
cv.imshow('OpenPose using OpenCV', frame)
| true
|
bf4da9267496e3fdfeb3f5be0bb1a12143c69882
|
Python
|
635r/CSE
|
/Class notes.py
|
UTF-8
| 1,666
| 4.03125
| 4
|
[] |
no_license
|
# # # Defining a Class
# # class Cat(object):
# # # TWO UNDERSCORES BEFORE AND AFTER
# # def __init__(self, color, personality, pattern):
# # # THINGS THAT A CAT HAS
# # self.color = color
# # self.personality = personality
# # self.pattern = pattern
# # self.state = "happy"
# # self.hungry = False
# #
# #
# # # THINGS THAT CATS CAN DO :
# # def jump(self):
# # self.state = "Scared"
# # print("the cat jumps")
# #
# # def play(self):
# # self.state = "happy"
# # print("you play with the cat")
# #
# #
# # # (Instantiating) two cats
# # cute_cat = Cat("brown", False,"spots")
# # cute_cat2 = Cat("grey", False, "no spots")
# #
# #
# # #getting info about the cats
# # print(cute_cat.color)
# # print(cute_cat2.state)
# # print(cute_cat2.color)
# #
# # cute_cat.jump()
# # print(cute_cat.state)
# # print(cute_cat2.state)
# #
# # cute_cat.play()
# # print(cute_cat.state)
#
#
# class Car(object):
# def __init__(self, color, brand, num_of_cylinder):
# self.color = color
# self.brand = brand
# self.num_of_cylinder = num_of_cylinder
# self.engineOn = False
#
# def turn_on(self):
# if self.engineOn:
# print("Nothing Happens")
# self.engineOn = True
#
# def move_forward(self):`1
# if self.engineOn:
# print("you move forward")
# else:
# print("Nothing Happened")
#
# def turnOff(self):
# if self.engineOn:
# self.engineOff
#
# my_car = Car(4, "Subaru", "Blue")
#
# my_car.turn.on()
# my_car.move_forward()
# my_car.turn_off()
| true
|
2d6f82cdeb18386f821972958debf906078e412f
|
Python
|
alexeipolovin/kids_zadachki
|
/fourth.py
|
UTF-8
| 1,876
| 3.09375
| 3
|
[] |
no_license
|
import os
from os import walk
from os.path import getsize
from os.path import getctime
def fourth_walk():
f = []
for (dirpath, dirnames, filenames) in walk('./'):
f.extend(filenames)
break
print(sorted(f))
size_list = []
for i in f:
size_list.append(getsize(i))
for j in range(0, len(f)):
print(str(j) + '.' + str(f))
creation_time_list = []
for i in f:
creation_time_list.append(getctime(i))
choose_your_detiny = int(input("Сравнить или отсортировать? 1, 2:"))
if choose_your_detiny == 1:
first_file = input("file name:")
second_file = input("file name:")
f_open = open(first_file, 'r')
s_open = open(second_file, 'r')
if f_open.read() == s_open.read():
print("Они одиннаковые")
else:
print("Отличаются")
elif choose_your_detiny == 2:
choose_param = int(input("Размер, Дата? 1, 2:"))
if choose_param == 2:
search_dir = "./"
os.chdir(search_dir)
files = filter(os.path.isfile, os.listdir(search_dir))
files = [os.path.join(search_dir, f) for f in files]
files.sort(key=lambda x: os.path.getmtime(x))
print(files)
else:
filepaths = []
for basename in os.listdir('./'):
filename = os.path.join('./', basename)
if os.path.isfile(filename):
filepaths.append(filename)
for i in range(len(filepaths)):
filepaths[i] = (filepaths[i], os.path.getsize(filepaths[i]))
filepaths.sort(key=lambda filename: filename[1], reverse=False)
for i in range(len(filepaths)):
filepaths[i] = filepaths[i][0]
print(filepaths)
| true
|
09ddf7f1f714223d03fddcf54c27b076dc48f8e9
|
Python
|
Acuf5928/SimpleLanguage
|
/SimpleLanguage/code_helper.py
|
UTF-8
| 1,331
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
import json
import os
from glob import glob
from typing import List
from SimpleLanguage.code_exceptions import DatabaseNotFoundException
def foundDatabasesList(basePath: str) -> List[str]:
"""
return a list of all path of all files .json in a folder (excluding sub folder)
:param basePath: Path of the folder where you want search the files
:rtype: List[str]
:return: List of all path of all files .json in a folder (excluding sub folder)
"""
if basePath[-1] != "\\" and basePath[-1] != "/":
basePath = basePath + "/"
if not os.path.isdir(basePath):
raise DatabaseNotFoundException("Database folder not exist or is not readable")
return glob(basePath + "*.json", recursive=False)
def LoadDatabaseList(databasesList: List[str]) -> dict:
data = {}
for element in databasesList:
name = element.split("\\")[-1].split(".")[0]
try:
with open(element, "r") as read_file:
data[name] = json.load(read_file)
except IOError:
raise DatabaseNotFoundException("Database not exist or is not readable")
except Exception:
raise DatabaseNotFoundException("Database is damaged")
return data
def foundSystemLanguage():
pass
# TODO: foundSystemLanguage, Scheduled with low priority
| true
|
9eda76f1774c05fe6c8a6d4c76c49b4def2a6db4
|
Python
|
ziemowit141/GeneticAlgorithm
|
/main.py
|
UTF-8
| 3,046
| 3.359375
| 3
|
[] |
no_license
|
from Point import PositivePoint, NegativePoint, get_x, get_y
import matplotlib.pyplot as plt
from Function import Function
from DriverCode import algorithm, NUMBER_OF_POINTS
import numpy as np
def generate_points():
points_list = []
positive_points_list = []
negative_points_list = []
for _ in range(0, int(NUMBER_OF_POINTS/2)):
positive_point = PositivePoint()
for point in points_list:
if point.x == positive_point.x and point.y == positive_point.y:
while point.x == positive_point.x and point.y == positive_point.y:
positive_point.random_coordinates()
points_list.append(positive_point)
positive_points_list.append(positive_point)
for _ in range(0, int(NUMBER_OF_POINTS/2)):
negative_point = NegativePoint()
for point in points_list:
if point.x == negative_point.x and point.y == negative_point.y:
while point.x == negative_point.x and point.y == negative_point.y:
negative_point.random_coordinates()
points_list.append(negative_point)
negative_points_list.append(negative_point)
return points_list, positive_points_list, negative_points_list
function_population = []
for _ in range(100):
function_population.append(Function())
points_list, positive_points_list, negative_points_list = generate_points()
positive_x_coordinates = get_x(positive_points_list)
positive_y_coordinates = get_y(positive_points_list)
negative_x_coordinates = get_x(negative_points_list)
negative_y_coordinates = get_y(negative_points_list)
# plt.plot(positive_x_coordinates, positive_y_coordinates, 'go')
# plt.plot(negative_x_coordinates, negative_y_coordinates, 'ro')
# plt.axis([0, 500, 0, 500])
# plt.show()
_, fitness_list, generation_counter = algorithm(function_population, points_list)
for _ in range(19):
best, temp_fitness_list, temp_generation_counter = algorithm(function_population, points_list)
fitness_list = [x + y for x, y in zip(fitness_list, temp_fitness_list)]
generation_counter += temp_generation_counter
fitness_list = [x/20 for x in fitness_list]
y_pos = np.arange(len(fitness_list))
plt.bar(y_pos, fitness_list)
plt.xlabel('Number of generation')
plt.ylabel('Fitness score')
plt.title('Average fitness score per generation (20runs)')
print(fitness_list)
print("Average number of generations to find perfect solution: " + str(generation_counter/20))
plt.show()
# def plot_function(function):
# y = []
# for i in range(500):
# y.append(function.get_value(i))
# plt.plot(y, 'b-')
#
#
# plot_function(best)
# print("Winner: " + str(best.coefficients) + " Fitness score: "
# + str(best.fitness_score) + " Generation nr.: " + str(generation))
#
# plt.plot(positive_x_coordinates, positive_y_coordinates, 'go')
# plt.plot(negative_x_coordinates, negative_y_coordinates, 'ro')
# plt.axis([0, 500, 0, 500])
# plt.show()
| true
|
55f45071fc173f27342e87c6de5b990869e2931d
|
Python
|
falconsmilie/Raspberry-Pi-3-Weather
|
/models/weatherRS.py
|
UTF-8
| 8,979
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
from contracts.abstractBaseRS import AbstractBaseRS
from models.weatherRSListItem import WeatherRSListItem
from models.weatherRSListItemForecast16 import (
WeatherRSListItemForecast16
)
from utils.weatherJson import WeatherJson
class WeatherRS(AbstractBaseRS):
""" Reads a server response, or cached file, weather data """
def __init__(self):
""" Members available to each extending response type """
self._city_name = None
self._city_id = None
self._country = None
self._coord_lat = None
self._coord_lon = None
self._weather_list = None
def fix_json_string(self, weather_string):
json_utils = WeatherJson()
return json_utils.fix_json_string(weather_string)
@property
def city_name(self):
return self._city_name
@city_name.setter
def city_name(self, name):
self._city_name = name
return None
@property
def city_id(self):
return self._city_id
@city_id.setter
def city_id(self, city_id):
self._city_id = city_id
return None
@property
def country(self):
return self._country
@country.setter
def country(self, country):
""" Country code (GB, JP etc.) """
self._country = country
return None
@property
def coord_lat(self):
return self._coord_lat
@coord_lat.setter
def coord_lat(self, coord_lat):
self._coord_lat = coord_lat
return None
@property
def coord_lon(self):
return self._coord_lon
@coord_lon.setter
def coord_lon(self, coord_lon):
self._coord_lon = coord_lon
return None
@property
def weather_list(self):
return self._weather_list
@weather_list.setter
def weather_list(self, weather_list):
self._weather_list = weather_list
return None
class WeatherRSWeather(WeatherRS):
""" Handles the 'weather' type response """
def __init__(self):
""" Members of the 'weather' type response """
self._response_list_item = None
self._sunrise = None
self._sunset = None
self._weather_desc_main = None
self._weather_desc = None
self._weather_icon_id = None
self._time_of_weather = None
def set_response(self, weather):
""" Sets response variables to local members """
try:
weather = self.fix_json_string(weather)
# Base members
super(WeatherRSWeather, self.__class__). \
city_name.fset(self, weather['name'])
super(WeatherRSWeather, self.__class__). \
city_id.fset(self, weather['id'])
super(WeatherRSWeather, self.__class__). \
country.fset(self, weather['sys']['country'])
super(WeatherRSWeather, self.__class__). \
coord_lat.fset(self, weather['coord']['lat'])
super(WeatherRSWeather, self.__class__). \
coord_lon.fset(self, weather['coord']['lon'])
self.set_weather_list(weather)
# 'Weather' response type members
self._sunrise = weather['sys']['sunrise']
self._sunset = weather['sys']['sunset']
self._weather_desc_main = weather['weather'][0]['main']
self._weather_desc = weather['weather'][0]['description']
self._weather_icon_id = weather['weather'][0]['icon']
self._time_of_weather = weather['dt']
except KeyError as e:
raise Exception(
''.join(['Invalid Response Key: ', '{}'.format(e)])
)
return None
@property
def sunrise(self):
return self._sunrise
@sunrise.setter
def sunrise(self, sunrise):
""" Sunrise time, unix, UTC """
self._sunrise = sunrise
return None
@property
def sunset(self):
return self._sunset
@sunset.setter
def sunset(self, sunset):
""" sunset time, unix, UTC """
self._sunset = sunset
return None
@property
def weather_desc_main(self):
return self._weather_desc_main
@weather_desc_main.setter
def weather_desc_main(self, weather_desc_main):
self._weather_desc_main = weather_desc_main
return None
@property
def weather_desc(self):
return self._weather_desc
@weather_desc.setter
def weather_desc(self, weather_desc):
self._weather_desc = weather_desc
return None
@property
def weather_icon_id(self):
return self._weather_icon_id
@weather_icon_id.setter
def weather_icon_id(self, weather_icon_id):
self._weather_icon_id = weather_icon_id
return None
@property
def time_of_weather(self):
return self._time_of_weather
@time_of_weather.setter
def time_of_weather(self, time_of_weather):
""" Time of data calculation, unix, UTC """
self._time_of_weather = time_of_weather
return None
def set_weather_list(self, weather):
list_response = WeatherRSListItem()
list_response.set_response(weather)
super(WeatherRSWeather, self.__class__). \
weather_list.fset(self, list_response)
return None
class WeatherRSForecast5(WeatherRS):
""" Handles response for 5 day forecast """
def __init__(self):
""" Members of the 'forecast5' response """
self._list_count = None
def set_response(self, weather):
""" Sets response variables to local members """
try:
weather = self.fix_json_string(weather)
# Base Members
super(WeatherRSForecast5, self.__class__). \
city_name.fset(self, weather['city']['name'])
super(WeatherRSForecast5, self.__class__). \
city_id.fset(self, weather['city']['id'])
super(WeatherRSForecast5, self.__class__). \
country.fset(self, weather['city']['country'])
super(WeatherRSForecast5, self.__class__). \
coord_lat.fset(self, weather['city']['coord']['lat'])
super(WeatherRSForecast5, self.__class__). \
coord_lon.fset(self, weather['city']['coord']['lon'])
self.set_weather_list(weather)
# Forecast5 members
self._list_count = weather['cnt']
except KeyError as e:
raise Exception(
''.join(['Invalid Response Key: ', '{}'.format(e)])
)
return None
@property
def list_count(self):
return self._weather_list_count
@list_count.setter
def list_count(self, count):
self._weather_list_count = count
return None
def set_weather_list(self, weather):
wlist = []
for list_item in weather['list']:
list_response = WeatherRSListItem()
list_response.set_response(list_item)
wlist.append(list_response)
super(WeatherRSForecast5, self.__class__). \
weather_list.fset(self, wlist)
return None
class WeatherRSForecast16(WeatherRS):
""" Handles response for 16 day forecast """
def __init__(self):
""" Members of the 'forecast16' response """
self._list_count = None
def set_response(self, weather):
""" Sets response variables to local members """
try:
weather = self.fix_json_string(weather)
# Base Members
super(WeatherRSForecast16, self.__class__). \
city_name.fset(self, weather['city']['name'])
super(WeatherRSForecast16, self.__class__). \
city_id.fset(self, weather['city']['id'])
super(WeatherRSForecast16, self.__class__). \
country.fset(self, weather['city']['country'])
super(WeatherRSForecast16, self.__class__). \
coord_lat.fset(self, weather['city']['coord']['lat'])
super(WeatherRSForecast16, self.__class__). \
coord_lon.fset(self, weather['city']['coord']['lon'])
self.set_weather_list(weather['list'])
# Local members
self.list_count = weather['cnt']
except KeyError as e:
raise Exception(
''.join(['Invalid Response Key: ', '{}'.format(e)])
)
return None
@property
def list_count(self):
return self._weather_list_count
@list_count.setter
def list_count(self, count):
self._weather_list_count = count
return None
def set_weather_list(self, weather):
wlist = []
for list_item in weather:
list_response = WeatherRSListItemForecast16()
list_response.set_response(list_item)
wlist.append(list_response)
super(WeatherRSForecast16, self.__class__). \
weather_list.fset(self, wlist)
return None
| true
|
db17154b8bb3c59855a61c3d39ada550f15dd795
|
Python
|
rahuladream/job-hunt-practice-2020
|
/array/monkAndInversion.py
|
UTF-8
| 1,044
| 3.453125
| 3
|
[] |
no_license
|
"""
find out the number of inversion in the matrix M.
defined as the number of unordered pairs of cells
{(i,j), (p,q)} such that M[i][j] & i <=p & j<=q
2 => no of testcase
3 => 3 * 3 matrix input
1 2 3
4 5 6
7 8 9
2 => 2 * 2 matrix input
4 3
1 4
t=int(input())
while(t):
n=int(input())
a=[]
ct=0
for i in range(0,n):
a.append([int(j) for j in input().split()])
for i in range(0,n):
for j in range(0,n):
for k in range(0,n):
for l in range(0,n):
if(i<=k and j<=l and a[i][j]>a[k][l]):
ct=ct+1
print(ct)
t=t-1
"""
t = input()
for t in range(int(t)):
n = int(input())
no_of_rc = []
ct = 0
for i in range(0,n):
no_of_rc.append([int(j) for j in input().split()])
for i in range(0,n):
for j in range(0,n):
for k in range(0,n):
for l in range(0,n):
if(i<=k and j<=l and no_of_rc[i][j] > no_of_rc[k][l]):
ct=ct+1
| true
|
591426b77134dd196fd0cd0e5f4d138a7bf00054
|
Python
|
HBinhCT/Q-project
|
/hackerearth/Data Structures/Disjoint Data Structures/Basics of Disjoint Data Structures/Students and their arrangements (CAST)/solution.py
|
UTF-8
| 1,084
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
from collections import deque
def find(u, parents):
while u != parents[u]:
parents[u] = parents[parents[u]]
u = parents[u]
return u
def union(u, v, parents, ranks):
pu = find(u, parents)
pv = find(v, parents)
if pu == pv:
return
if ranks[pu] < ranks[pv]:
parents[pu] = pv
ranks[pv] += ranks[pu]
else:
parents[pv] = pu
ranks[pu] += ranks[pv]
n, m = map(int, input().strip().split())
roots = list(range(n + 1))
sizes = [0] + [1] * n
for _ in range(m):
a, b = map(int, input().strip().split())
union(a, b, roots, sizes)
queue = deque()
for _ in range(n):
query = input().strip().split()
if query[0] == 'E':
x = int(query[1])
if m > 0:
for i in range(len(queue) - 1, -1, -1):
if find(x, roots) == find(queue[i], roots):
queue.insert(i + 1, x)
break
else:
queue.append(x)
else:
queue.append(x)
elif query[0] == 'D':
print(queue.popleft())
| true
|
03550e7caa00e9b937163a260a4b83c04d852cef
|
Python
|
vbondarevsky/ones_analyzer
|
/analyzer/expression/binary_expression_syntax.py
|
UTF-8
| 879
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
from analyzer.syntax_kind import SyntaxKind
class BinaryExpressionSyntax(object):
def __init__(self, left, operator_token, right):
if operator_token.kind == SyntaxKind.MinusToken:
self.kind = SyntaxKind.SubtractExpression
elif operator_token.kind == SyntaxKind.PlusToken:
self.kind = SyntaxKind.AddExpression
elif operator_token.kind == SyntaxKind.AsteriskToken:
self.kind = SyntaxKind.MultiplyExpression
elif operator_token.kind == SyntaxKind.SlashToken:
self.kind = SyntaxKind.DivideExpression
elif operator_token.kind == SyntaxKind.PercentToken:
self.kind = SyntaxKind.ModuloExpression
self.left = left
self.operator_token = operator_token
self.right = right
def __str__(self):
return f"{self.left}{self.operator_token}{self.right}"
| true
|
1cb933f40081286c7e9cd2659ed72c25961069b1
|
Python
|
Smok323/PiSecurityCam
|
/website/picam.py
|
UTF-8
| 368
| 2.640625
| 3
|
[] |
no_license
|
from picamera import PiCamera
from picamera.array import PiRGBArray
import cv2
class Camera:
def __init__(self):
self.cam = PiCamera()
self.cap = PiRGBArray(self.cam)
def getframe(self):
self.cam.capture(self.cap, format="BGR")
image = self.cap.array
jpeg = cv2.imdecode('.jpeg', image)
return jpeg.tobytes()
| true
|
07785da6f1f4d3a5e751027f42a676ff939d25a0
|
Python
|
julvei/eth-assertion-protocol
|
/assertion/functions.py
|
UTF-8
| 1,724
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
"""
Author: JV
Date: 2021-04-26
Holds all the functions for validation
"""
from typing import Sequence
class FunctionEntry:
def __init__(self, function_id : int, name : str, function):
self.function_id = function_id
self.name = name
self.function = function
class Functions:
def __init__(self, entries : Sequence[FunctionEntry]):
self.entries = dict()
for entry in entries:
self.entries[entry.function_id] = entry
def get_function_by_id(self, function_id : int):
return self.entries[function_id].function
def get_function_by_name(self, name : str):
for _, entry in self.entries.items():
if entry.name == name:
return entry.function
def get_id_by_name(self, name : str):
for _, entry in self.entries.items():
if entry.name == name:
return entry.function_id
return -1
def get_name_by_id(self, function_id : int):
return self.entries[function_id].name
# Data structure which contains all functions
FUNCTIONS = Functions([
# Dummy
FunctionEntry(function_id = 0, name = "dummy", function = (lambda parameter, testcase: testcase == parameter)),
# Parameter
# Arrays
FunctionEntry(function_id = 200, name = "sorted_array", function = (lambda parameter, testcase: parameter[testcase] > parameter[testcase+1])),
# Double Arrays
FunctionEntry(function_id = 300, name = "greater_array", function = (lambda parameter, testcase: parameter[0][testcase[0]] >= parameter[1][testcase[1]]))
])
def main():
pass
if __name__ == '__main__':
main()
| true
|
7d43bc0d7abd3b054d765314aa3c5826381dd1d8
|
Python
|
wangr0031/mytools
|
/src/myxlsx.py
|
UTF-8
| 3,594
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'wangrong'
import xlrd
import xlwt
import os, re
from lib.logger_def import logger
class myxlsx(object):
def __init__(self, src_path):
if src_path[-1] == '/':
src_path = src_path[:-1]
self.src_file_list = self.list_all_files(src_path, match_postfix=['.xlsx'])
# 去除空格,空值
def remove_empty_from_list(self, list_object):
if list_object is None:
return list_object
else:
while [] in list_object:
list_object.remove([])
while '' in list_object:
list_object.remove('')
return list_object
def list_all_files(self, list_dir, skip_file=None, match_postfix=None):
all_file_list = []
skip_file_list = []
list_dir = list_dir.replace('\\', '/')
if os.path.isfile(list_dir):
all_file_list.append(list_dir)
elif os.path.isdir(list_dir):
##skip_file & match_postfix格式规整,去除空值,空格等无效数据
match_postfix = self.remove_empty_from_list(match_postfix)
skip_file = self.remove_empty_from_list(skip_file)
for dirpath, dirnames, filenames in os.walk(list_dir):
for file in filenames:
if match_postfix:
# match_postfix=self.remove_empty_from_list(match_postfix)
if os.path.splitext(file)[1] in match_postfix:
if skip_file:
# skip_file = self.remove_empty_from_list(skip_file)
for skip_key in skip_file:
if not re.search(skip_key, file.lower()):
fullfile = (dirpath + '/' + file).replace('\\', '/')
all_file_list.append(fullfile)
else:
fullfile = (dirpath + '/' + file).replace('\\', '/')
skip_file_list.append(fullfile)
else:
fullfile = (dirpath + '/' + file).replace('\\', '/')
all_file_list.append(fullfile)
else:
if skip_file:
# skip_file = self.remove_empty_from_list(skip_file)
for skip_key in skip_file:
if not re.search(skip_key, file.lower()):
fullfile = (dirpath + '/' + file).replace('\\', '/')
all_file_list.append(fullfile)
else:
fullfile = (dirpath + '/' + file).replace('\\', '/')
skip_file_list.append(fullfile)
else:
fullfile = (dirpath + '/' + file).replace('\\', '/')
all_file_list.append(fullfile)
return all_file_list
def open_xlsx(self, xlsx_name):
try:
xh = xlrd.open_workbook(xlsx_name)
return xh
except Exception as err:
logger.error('open file: [{}] error, error msg: [{}]'.format(xlsx_name, err))
return False
# TODO: read xlsx sheet data
def read_data(self, xlsx_handle, sheet_name, read_rows=None):
pass
# TODO: read specified sheet
# TODO: write data
| true
|
7c513574261b2595ea170d7faff1b2207b9fcec0
|
Python
|
JoshOY/DataStructureCourseDesign
|
/PROB10/my_sort/mergeSort.py
|
UTF-8
| 907
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
import copy
merge_step = 0
def merge_sort(sorting_list):
global merge_step
if(len(sorting_list) <= 1):
return sorting_list
def merge(left, right):
global merge_step
rtn = []
while len(left) != 0 and len(right) != 0:
rtn.append(left.pop(0) if left[0] <= right[0] else right.pop(0))
merge_step += 1
while len(left) != 0:
merge_step += 1
rtn.append(left.pop(0))
while len(right) != 0:
merge_step += 1
rtn.append(right.pop(0))
return rtn
ls = copy.deepcopy(sorting_list)
middle_index = int(len(ls) / 2)
left = merge_sort(ls[0:middle_index])
right = merge_sort(ls[middle_index:])
return merge(left, right)
if __name__ == "__main__":
sList=[13, 14, 94, 33, 82, 25, 59, 94, 65, 23, 45, 27, 73, 25, 39, 10 ]
print(merge_sort(sList), merge_step)
| true
|
3cba0b3f18eef84b1075d6026693bde60db3c820
|
Python
|
rodrigorahal/advent-of-code-2017
|
/10-14/knot_hash_part_1.py
|
UTF-8
| 1,113
| 3.5625
| 4
|
[] |
no_license
|
from itertools import cycle, islice
def tie_knot(elements, pos, length, skip):
size = len(elements)
selected = []
for i in range(pos, pos+length):
if i < size:
selected.append(elements[i])
else:
selected.append(elements[i-size])
for i, el in zip(range(pos, pos+length), reversed(selected) ):
if i < size:
elements[i] = el
else:
elements[i-size] = el
pos = pos + length + skip
if pos >= size:
pos -= size
skip += 1
return pos, skip
def knot_hash(elements, lengths):
pos, skip = 0, 0
for length in lengths:
pos, skip = tie_knot(elements, pos, length, skip)
return elements[0] * elements[1]
def main():
part_1_test_case_elements = [0, 1, 2, 3, 4]
part_1_test_case_lengths = [3, 4, 1, 5]
assert knot_hash(part_1_test_case_elements, part_1_test_case_lengths) == 12
elements = list(range(256))
lengths = [189,1,111,246,254,2,0,120,215,93,255,50,84,15,94,62]
print('Answer: ', knot_hash(elements, lengths))
if __name__ == '__main__':
main()
| true
|
deaba2fc43d825ff05d75c2fa62451ddd62f9356
|
Python
|
Ilovelibrary/Restaurants-Web-Server-based-on-Python
|
/project.py
|
UTF-8
| 4,987
| 2.578125
| 3
|
[] |
no_license
|
from flask import Flask, render_template, request, redirect, url_for, flash, jsonify
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem
app = Flask(__name__)
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/')
@app.route('/restaurants/')
def restaurants():
restaurants = session.query(Restaurant).all()
return render_template('restaurants.html', restaurants=restaurants)
@app.route('/restaurants/new/',methods=['GET','POST'])
def newRestaurant():
if request.method == 'POST':
newRestaurant = Restaurant(name=request.form['name'])
session.add(newRestaurant)
session.commit()
flash('New Restaurant added!')
return redirect(url_for('restaurants'))
else:
return render_template('newrestaurant.html')
@app.route('/restaurants/<int:restaurant_id>/delete/',methods=['GET','POST'])
def deleteRestaurant(restaurant_id):
selectedRestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
if request.method == 'POST':
session.delete(selectedRestaurant)
session.commit()
flash('A Restaurant deleted!')
return redirect(url_for('restaurants'))
else:
return render_template('deleterestaurant.html',restaurant=selectedRestaurant)
@app.route('/restaurants/<int:restaurant_id>/edit/',methods=['GET','POST'])
def editRestaurant(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
if request.method == 'POST':
restaurant.name = request.form['name']
session.add(restaurant)
session.commit()
flash('Restaurant name changed!')
return redirect(url_for('restaurants'))
else:
return render_template('editrestaurant.html',restaurant=restaurant)
@app.route('/restaurants/JSON')
def restaurantsJson():
restaurants = session.query(Restaurant).all()
return jsonify(Restaurants=[r.serialize for r in restaurants])
@app.route('/restaurants/<int:restaurant_id>/JSON')
def restaurantJson(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
return jsonify(Restaurants=restaurant.serialize)
@app.route('/restaurants/<int:restaurant_id>/menu/')
def restaurantMenu(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id=restaurant.id)
return render_template('menu.html', restaurant=restaurant, items = items)
@app.route('/restaurants/<int:restaurant_id>/new/', methods=['GET','POST'])
def newMenuItem(restaurant_id):
if request.method == 'POST':
newItem = MenuItem(name=request.form['name'],description=request.form['description'], price=request.form['price'],restaurant_id=restaurant_id)
session.add(newItem)
session.commit()
flash('New item added!')
return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))
else:
return render_template('newmenuitem.html', restaurant_id=restaurant_id)
@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/edit/', methods=['GET','POST'])
def editMenuItem(restaurant_id, menu_id):
editItem = session.query(MenuItem).filter_by(restaurant_id=restaurant_id, id=menu_id).one()
print editItem.name
if request.method == 'POST':
print editItem.name, request.form['name']
editItem.name = request.form['name']
editItem.description = request.form['description']
editItem.price = request.form['price']
session.add(editItem)
session.commit()
flash('Item changed!')
return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))
else:
return render_template('editmenuitem.html', restaurant_id=restaurant_id, item = editItem)
@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/delete/',methods=['GET','POST'])
def deleteMenuItem(restaurant_id, menu_id):
deleteItem = session.query(MenuItem).filter_by(restaurant_id=restaurant_id, id=menu_id).one()
print deleteItem.name
if request.method == 'POST':
session.delete(deleteItem)
session.commit()
flash('Item deleted!')
return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))
else:
return render_template('deletemenuitem.html', restaurant_id=restaurant_id, item = deleteItem)
@app.route('/restaurants/<int:restaurant_id>/menu/JSON')
def restaurantMenuJson(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()
return jsonify(MenuItems=[i.serialize for i in items])
@app.route('/restaurants/<int:restaurant_id>/menu/<int:menu_id>/JSON')
def menuItemJson(restaurant_id,menu_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
item = session.query(MenuItem).filter_by(restaurant_id=restaurant_id,id=menu_id).one()
return jsonify(MenuItems=item.serialize)
if __name__ == '__main__':
app.secret_key = 'super-secret-key'
app.debug = True
app.run(host='0.0.0.0', port=5000)
| true
|
ca2880b761ee3656e3ad941670074f9a56c57cd6
|
Python
|
pelennor/python-holidays
|
/holidays/countries/vietnam.py
|
UTF-8
| 3,510
| 3.09375
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta as rd
# Installation: pip install korean_lunar_calendar
# URL: https://github.com/usingsky/korean_lunar_calendar_py/
from korean_lunar_calendar import KoreanLunarCalendar
from holidays.constants import JAN, APR, MAY, SEP, SAT, SUN
from holidays.holiday_base import HolidayBase
class Vietnam(HolidayBase):
"""
https://publicholidays.vn/
http://vbpl.vn/TW/Pages/vbpqen-toanvan.aspx?ItemID=11013 Article.115
https://www.timeanddate.com/holidays/vietnam/
"""
country = "VN"
def __init__(self, **kwargs):
self.korean_cal = KoreanLunarCalendar()
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
super()._populate(year)
# New Year's Day
name = "International New Year's Day"
first_date = date(year, JAN, 1)
self[first_date] = name
if self.observed:
self[first_date] = name
if first_date.weekday() == SAT:
self[first_date + rd(days=+2)] = name + " observed"
elif first_date.weekday() == SUN:
self[first_date + rd(days=+1)] = name + " observed"
# Lunar New Year
name = [
"Vietnamese New Year", # index: 0
"The second day of Tet Holiday", # index: 1
"The third day of Tet Holiday", # index: 2
"The forth day of Tet Holiday", # index: 3
"The fifth day of Tet Holiday", # index: 4
"Vietnamese New Year's Eve", # index: -1
]
dt = self.get_solar_date(year, 1, 1)
new_year_date = date(dt.year, dt.month, dt.day)
if self.observed:
for i in range(-1, 5, 1):
tet_day = new_year_date + rd(days=+i)
self[tet_day] = name[i]
# Vietnamese Kings' Commemoration Day
# https://en.wikipedia.org/wiki/H%C3%B9ng_Kings%27_Festival
if year >= 2007:
name = "Hung Kings Commemoration Day"
dt = self.get_solar_date(year, 3, 10)
king_hung_date = date(dt.year, dt.month, dt.day)
self[king_hung_date] = name
else:
pass
# Liberation Day/Reunification Day
name = "Liberation Day/Reunification Day"
libration_date = date(year, APR, 30)
self[libration_date] = name
# International Labor Day
name = "International Labor Day"
labor_date = date(year, MAY, 1)
self[labor_date] = name
# Independence Day
name = "Independence Day"
independence_date = date(year, SEP, 2)
self[independence_date] = name
# convert lunar calendar date to solar
def get_solar_date(self, year, month, day):
self.korean_cal.setLunarDate(year, month, day, False)
return date(
self.korean_cal.solarYear,
self.korean_cal.solarMonth,
self.korean_cal.solarDay,
)
class VN(Vietnam):
pass
class VNM(Vietnam):
pass
| true
|
5e9840edaad1e9797434fcb981c2d862eda43599
|
Python
|
abhi-laksh/spotify-clone-react
|
/src/assets/sass.py
|
UTF-8
| 898
| 3.21875
| 3
|
[] |
no_license
|
#--- DATE : April 08, 2019 | 22:33:19
#--- --- By Abhishek Soni
#--- About (also write in below variable): Execute Sass command
about='Execute Sass command'
print('About :' + about)
import os
curDir = os.getcwd()
dirs = os.walk(curDir)
def checkFile(allDirs):
scssFile = ""
cssFile = ""
for root , d , files in list(allDirs):
for each in files:
if each == "main.scss" or each == "style.scss":
scssFile = os.path.join(root , each)
if each == "style.css" or each== "main.css":
cssFile = os.path.join(root , each)
else:
continue
return (scssFile , cssFile) if bool(scssFile) and bool(cssFile) else (None , None)
scssPath , cssPath = checkFile(dirs)
if scssPath !=None and cssPath != None:
try:
os.system("sass --watch " + scssPath + ":" + cssPath)
except Exception as e:
print(e)
else:
print("Error in finding the file ! ")
e = input("Press Enter to exit")
quit()
| true
|
e778d1d3d567c8e022d247c9a7ae263215ec8223
|
Python
|
yangahxu/Python
|
/课堂练习/第4关 收纳的艺术.py
|
UTF-8
| 3,270
| 3.390625
| 3
|
[] |
no_license
|
# students = ['党志文', '浦欣然', '罗鸿朗', '姜信然', '居俊德', '宿鸿福', '张成和', '林景辉', '戴英华', '马鸿宝', '郑翰音', '厉和煦', '钟英纵', '卢信然', '任正真', '翟彭勃', '蒋华清', '双英朗', '金文柏', '饶永思', '堵宏盛', '濮嘉澍', '戈睿慈', '邰子默', '于斯年', '扈元驹', '厍良工', '甘锐泽', '姚兴怀', '殳英杰', '吴鸿福', '王永年', '宫锐泽', '黎兴发', '朱乐贤', '关乐童', '养永寿', '养承嗣', '贾康成', '韩修齐', '彭凯凯', '白天干', '瞿学义', '那同济', '衡星文', '公兴怀', '宫嘉熙', '牧乐邦', '温彭祖', '桂永怡']
# for i in students:
# print(i+'在不在?')
# list1=['小明',18,1.70]
# print(list1)
# print(list1[0])
# list2 = [5,6,7,8,9]
# print(list2[:])
# # 打印出[5,6,7,8,9]
# print(list2[2:])
# # 打印出[7,8.9]
# print(list2[:2])
# # 打印出[5,6]
# print(list2[1:3])
# #打印出[6,7]
# print(list2[2:4])
# #打印出[7,8]
#冒号左边空,就要从偏移量为0的元素开始取;右边空,就要取到列表的最后一个元素。
# 后半句:冒号左边数字对应的元素要拿,右边的不动
#练习
# students = ['小明','小红','小刚']
# print(students[:2])
# print(students[0])
# 请运行以下代码:报错后,可读一下报错信息,然后将第6行注释掉再运行。
list3 = [1,2]
list3.append(3)
print(list3)
#list3.append(4,5)
list3.append([4,5])
print(list3)
# students = ['小明','小红','小刚']
# students.append('小美')
# print(students)
# del students[1]
# print(students)
# students = ['小明','小红','小刚']
# scores = {'小明':95,'小红':90,'小刚':90}
# print(len(students))
# print(len(scores))
# print(scores['小明'])
# print(scores['小红'])
# album = {'周杰伦':'七里香','王力宏':'心中的日月'}
# del album['周杰伦']
# print(album)
# album['周杰伦'] = '十一月的萧邦'
# print(album)
# print(album['周杰伦'])
#修改成绩
# scores = {'小明':95,'小红':90,'小刚':90}
# del scores['小刚']
# print(scores)
# scores['小刚']=92
# print(scores)
# scores['小美']=85
# print(scores)
# students1 = ['小明','小红','小刚']
# students2 = ['小刚','小明','小红']
# print(students1 == students2)
# scores1 = {'小明':95,'小红':90,'小刚':100}
# scores2 = {'小刚':100,'小明':95,'小红':90}
# print(scores1 == scores2)
# list1 = ['小明','小红','小刚','小美']
# list1[1] = '小蓝'
# print(list1)
# dict1 = {'小明':'男'}
# dict1['小明'] = '女'
# print(dict1)
#列表嵌套列表
# students = [['小明','小红','小刚','小美'],['小强','小兰','小伟','小芳']]
# print(students[1][1])
# #字典嵌套字典
# scores = {
# '第一组':{'小明':95,'小红':90,'小刚':100,'小美':85},
# '第二组':{'小强':99,'小兰':89,'小伟':93,'小芳':88}
# }
# print(scores['第一组']['小刚'])
# students = {
# '第一组':['小明','小红','小刚','小美'],
# '第二组':['小强','小兰','小伟','小芳']
# }
# scores = [
# {'小明':95,'小红':90,'小刚':100,'小美':85},
# {'小强':99,'小兰':89,'小伟':93,'小芳':88}
# ]
# print(students['第一组'][2])
# print(scores[0]['小刚'])
| true
|
04c9de054bf3b9225dbcc1d1f09435ff6a413b60
|
Python
|
shreyakarthik1210/Number-guesser
|
/numGuess.py
|
UTF-8
| 401
| 4.34375
| 4
|
[] |
no_license
|
import random
topNum = int(input("Please type the maximum number you would like to have in the game: "))
number = random.randint(1,topNum)
while True:
userInput = input("Please type in your guess for the random number: ")
if int(userInput) == number:
print("You got the right number!")
break;
elif int(userInput) > number:
print("The number is lower.")
else:
print("The number is higher.")
| true
|
ec065b6f4fc705d2168069054c6a586655a9eeff
|
Python
|
Lingesh2311/Python-Basics
|
/Generators_Python/CHAPTER 2/ch02_01.py
|
UTF-8
| 262
| 3.15625
| 3
|
[] |
no_license
|
# Basic Context Manager Framework
from contextlib import contextmanager
@contextmanager
def simple_context_manager(obj):
try:
# do something
obj.some_property += 1
yield
finally:
# wrap up
obj.some_property -= 1
| true
|
6d66abd83783f5f028d925db692d2fc069238d64
|
Python
|
arnav8/Bp_Regression_FireworksAlg
|
/Fireworks.py
|
UTF-8
| 6,530
| 3.375
| 3
|
[] |
no_license
|
#encoding=utf-8
#Date 2017.5.19
#Fireworks Algorithm
from Utils import *
'''Firework algorithm
The purpose is to get better initial values of neural network parameters through the firework algorithm, and then use gradient descent for iteration
parameter:
X: training set sample collection, numpy array
Y: training section label collection, numpy array
h: the number of hidden neurons
return value:
The optimized input layer-hidden layer weight, hidden layer threshold, hidden layer-output layer weight, and initial value of the output layer threshold are all numpy arrays
'''
def FWA(X, Y, h):
if shape(X)[0] != shape(Y)[0]:
print("The line of X and Y must be same!")
m = shape(X)[0]; n = shape(X)[1]; l = shape(Y)[1]
#Initialization algorithm parameters
#Sparks total
m = 50
#Upper and lower limit
a = 0.8; b = 0.04
#Explosion amplitude
A = 40
#Number of fireworks Gaussian explosions
mm = 5
#Fireworks dimension
dimension = n*h + h*l + h + l
#Number of fireworks
nn = 5
#Maximum and minimum boundaries
xmin = -5; xmax = 5
#Initialize fireworks
fireworks = zeros([nn, dimension])
for i in range(nn):
for j in range(dimension):
fireworks[i][j] = random.uniform(-5, 5)
#Initialize a new firework
fireworks_new = zeros([nn, 100, dimension])
#Initialize Gaussian spark
fireworks_rbf = zeros([nn, dimension])
#Sparks
#The number of sparks produced by each firework
Si = zeros([nn, 1])
#Explosion radius of each firework
Ai = zeros([nn, 1])
#Spark limit
si = zeros([nn, 1])
#Calculate the fitness function value of each firework
f = zeros([nn, 1])
#Maximum and minimum fitness
fmax = f[0]; fmin = f[nn-1]
#Error function initialization
E = zeros([5000, 1])
#Firework algorithm iteration process
for delta_num in range(5000):
# Total number of sparks produced by ordinary explosions
sum_new_fireworks = 0
# Total fitness
sum = 0
#Calculate fitness and find the maximum and minimum
for i in range(nn):
f[i] = calculatef(X, Y, fireworks[i], n, h, l)
if f[i] > fmax:
fmax = f[i]
if f[i] < fmin:
fmin = f[i]
sum += f[i]
#Find the explosion radius and number of sparks for each firework
for i in range(nn):
#Calculate the number of sparks
Si[i] = m * (fmax - f[i] + 0.0001) / (nn * fmax - sum + 0.0001)
Si[i] = round(Si[i])
if Si[i] < a * m:
si[i] = round(a * m)
elif Si[i] > b * m:
si[i] = round(b * m)
else:
si[i] = round(Si[i])
#Can not exceed the number of sparks limit
if Si[i] > si[i]:
Si[i] = si[i]
#Calculate the total number of sparks produced by ordinary explosions
sum_new_fireworks += int(Si[i])
#Calculate the explosion radius
Ai[i] = A * (f[i] - fmin + 0.0001) / (sum - nn * fmin + 0.0001)
#Create a new spark
for j in range(Si[i]):
#Initialize a new spark
fireworks_new[i][j] = fireworks[i]
#Randomly select z dimensions
z = random.randint(1, dimension)
#Randomly select the first z
zz = range(dimension)
random.shuffle(zz)
# Create a new spark
for k in range(z):
fireworks_new[i][j][zz[k]] += random.uniform(0, Ai[i])
#Generate Gaussian sparks (each firework generates a Gaussian spark)
# Randomly select z dimensions
z = random.randint(1, dimension)
zz = range(dimension)
random.shuffle(zz)
#Gaussian random number
g = random.uniform(-1, 1)
#Gaussian explosion operator
for i in range(mm):
for j in range(z):
fireworks_rbf[i][zz[j]] = g * fireworks[i][zz[j]]
#Construct total fireworks
sum_fireworks = nn + sum_new_fireworks + mm
fireworks_final = zeros([sum_fireworks, dimension])
for i in range(nn):
fireworks_final[i] = fireworks[i]
for j in range(Si[0]):
fireworks_final[nn+j] = fireworks_new[0][j]
for i in range(nn-1):
for j in range(Si[i+1]):
#print 'Si = ',Si[i]
fireworks_final[int(nn+j+Si[i])] = fireworks_new[i+1][j]
for i in range(mm):
fireworks_final[int(nn+sum_new_fireworks+i)] = fireworks_rbf[i]
#Mapping rule
for i in range(sum_fireworks):
for j in range(dimension):
if fireworks_final[i][j] > xmax or fireworks_final[i][j] < xmin:
fireworks_final[i][j] = xmin + mod(abs(fireworks_final[i][j]), \
(xmax - xmin))
#Choose a strategy
#New population fitness after explosion
f_new = zeros([sum_fireworks, 1])
f_new_min = f_new[0]
#print f_new_min
#Initialize the optimal fitness index
min_i = 0
#Select the next generation of n individuals, consisting of the maximum fitness individual and nn-1 individuals farther away
#Find the optimal fitness
for i in range(sum_fireworks):
#print fireworks_final[i]
f_new[i] = calculatef(X, Y, fireworks_final[i], n, h, l)
if f_new[i] < f_new_min:
f_new_min = f_new[i]
min_i = i
#Find the probability of each individual being selected
#Initialize the distance between two bodies
D = zeros([sum_fireworks, sum_fireworks])
#Calculate the distance between two bodies
for i in range(sum_fireworks):
for j in range(sum_fireworks):
D[i][j] = dot((fireworks_final[i] - fireworks_final[j]), \
(fireworks_final[i] - fireworks_final[j])) / 2
#Initialize the sum of the distance between each individual and other individuals
Ri = zeros([sum_fireworks, 1])
#Initialize a copy of the distance matrix
RRi = zeros([sum_fireworks, 1])
#Calculate the sum of the distance between each individual and other individuals
for i in range(sum_fireworks):
for j in range(sum_fireworks):
Ri[i] += D[i][j]
RRi = Ri
#Select nn-1 individuals with the farthest distance, that is, sort the distance matrix
for i in range(sum_fireworks-1):
for j in range(i, sum_fireworks):
if Ri[i] < Ri[j]:
tmp = Ri[i]
Ri[i] = Ri[j]
Ri[j] = tmp
#Construct a new population
fireworks[0] = fireworks_final[min_i]
for i in range(sum_fireworks):
if Ri[0] == RRi[i]:
fireworks[1] = fireworks_final[i]
if Ri[1] == RRi[i]:
fireworks[2] = fireworks_final[i]
if Ri[2] == RRi[i]:
fireworks[3] = fireworks_final[i]
if Ri[3] == RRi[i]:
fireworks[4] = fireworks_final[i]
#After iteration, return the best individual
#Initialize the optimal fitness index
ii = 0
for i in range(nn):
f[i] = calculatef(X, Y, fireworks[i], n, h, l)
fmin = f[0]
if f[i] < fmin:
ii = i
E[delta_num] = f[ii]
return final_weight(fireworks[ii], n, h, l, E)
| true
|
2919f0e83762a656160e508c47dddf734e31cc3b
|
Python
|
HappyRocky/pythonAI
|
/LeetCode/141_Linked_List_Cycle.py
|
UTF-8
| 1,486
| 4.1875
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Given a linked list, determine if it has a cycle in it.
To represent a cycle in the given linked list, we use an integer pos which represents the position (0-indexed) in the linked list where tail connects to.
If pos is -1, then there is no cycle in the linked list.
给定一个链表,判断是否存在一个环。
为了表示这个环,我们使用一个整数 pos 来代表表尾连接的索引位置。
如果 pos = -1,说明没有环。
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: true
Explanation: There is a cycle in the linked list, where tail connects to the second node.
Example 2:
Input: head = [1,2], pos = 0
Output: true
Explanation: There is a cycle in the linked list, where tail connects to the first node.
Example 3:
Input: head = [1], pos = -1
Output: false
Explanation: There is no cycle in the linked list.
"""
class ListNode:
def __init__(self, x):
if isinstance(x, list):
self.val = x[0]
self.next = None
head = self
for i in range(1, len(x)):
head.next = ListNode(x[i])
head = head.next
else:
self.val = x
self.next = None
def hasCycle(head):
"""
遍历一遍,直至遍历到重复节点
"""
p = head
node_set = set()
while(p):
if p in node_set:
return True
node_set.add(p)
p = p.next
return False
| true
|
c508935cb092339e60428846245b5980296ff387
|
Python
|
freysner/freysner
|
/skip_search.py
|
UTF-8
| 733
| 3.171875
| 3
|
[] |
no_license
|
ASIZE=256
def ArrayCmp(a,aIdx,b,bIdx,Length):
i = 0
while(i < Length and aIdx + i < len(a) and bIdx + i < len(b)):
if (a[aIdx + i] != b[bIdx + i]):
return 1
i+=1
if (i== Length):
return 0
else:
return 1
def SKIP(x,y):
resultado=[]
m=len(x)
n=len(y)
z=[]
for i in range(ASIZE):
z.append([])
# Preprocessing
for i in range(m):
z[ord(x[i])].append(i)
#Searching
#print(z)
for i in range(m-1,n,m):
ptr=z[ord(y[i])]
#print(i)
#print(ptr)
for j in range(len(ptr)):
if (ArrayCmp(x,0,y,i-ptr[j],m)==0):
#print('Ocurrencia en %d\n' % (i-ptr[j]))
resultado.append(i-ptr[j])
return resultado
| true
|
a05a02bee65ba4708ea45721f89f224cfc0dbc16
|
Python
|
itagaev/webdev2019
|
/10 week/hackerrank/7.py
|
UTF-8
| 267
| 3.21875
| 3
|
[] |
no_license
|
if __name__ == '__main__':
n = int(raw_input())
arr = map(int, raw_input().split())
max = -110
for x in arr:
if(max < x):
max = x
secmax = -110
for x in arr:
if max == x:
continue
if secmax < x:
secmax = x
print(secmax)
| true
|
db94148e1d11187307d30838842f5432a7153d89
|
Python
|
michael-swift/btreceptor
|
/build/lib/btreceptor/clustering.py
|
UTF-8
| 2,194
| 2.71875
| 3
|
[
"Apache-2.0"
] |
permissive
|
from __future__ import division
import pandas as pd
import numpy as np
import Levenshtein
from scipy.spatial.distance import squareform
from scipy.sparse.csgraph import connected_components
from itertools import combinations
def df_pw_edit(frame):
""" Returns array of pairwise edit distances in square form """
ed = np.zeros(int((frame.shape[0]/2)*(frame.shape[0]-1)), dtype='float')
for c, (x, y) in enumerate(combinations(frame.cdr3aa.values, 2)):
ed[c] = Levenshtein.distance(x, y) / np.max([len(x), len(y)])
sq = squareform(ed)
return sq
def df_lineages_from_subset(frame, similarity_cutoff):
""" Returns an array of lineage membership based on a CDR3 cutoff """
edit_sq = df_pw_edit(frame)
n_groups, labels = connected_components(edit_sq <= round(1 - similarity_cutoff, 4))
return labels
def df_add_lineages(dfin, similarity_cutoff):
""" Returns input dataframe with additional lineage column
Args:
similarity_cutoff (float): e.g. 0.8 for 80% minimum cdr3aa similarity
"""
dfin = dfin.copy()
# unique index required for join
if not dfin.index.is_unique:
print("Input DataFrame index not unique, applying reset_index().")
dfin.reset_index(drop=True, inplace=True)
lincnt = 0
lins = []
for (v, j, _), sub in dfin.groupby(['v_call_no_allele',
'j_call_no_allele',
'cdr3aa_len']):
if sub.shape[0] > 1:
# CDR3 distance comparisoin
sub_lineages = df_lineages_from_subset(sub, similarity_cutoff)
lins += zip(sub.index, sub_lineages + lincnt)
lincnt += np.unique(sub_lineages).shape[0]
else:
# single sequence belongs in its own lineage
lins.append((sub.index.values[0], lincnt))
lincnt += 1
# adds a "lineage" column corresponding to the lineage number for that cell
lins = pd.DataFrame(lins, columns=['index', 'lineage']).set_index('index')
if 'lineage' in dfin.columns:
dfin = dfin.drop('lineage', axis=1).join(lins)
else:
dfin = dfin.join(lins)
return dfin
| true
|
d6307c4a2d7f970c35c17bd542fb55a1d9c0b565
|
Python
|
avados/scrumtools
|
/burnup/features/steps/test_burnup_feature.py
|
UTF-8
| 2,638
| 2.828125
| 3
|
[] |
no_license
|
from behave import *
from burnup.utils import *
from hamcrest import *
from behave import register_type
import parse
use_step_matcher("parse")
# -- REGISTER: User-defined type converter (parse_type).
register_type(NumberList=parse_list_of_number)
@given('i have a "{list:NumberList}" of numbers')
def step_impl(context, list):
context.data = list
@then('the lowest average is "{result:g}"')
def step_impl(context, result):
assert_that(lowest_average_over_last_six_values(context.data), equal_to(result))
@step("i have more than 6 numbers")
def step_impl(context):
assert_that(len(context.data), greater_than_or_equal_to(6))
@then('the highest average is "{result:g}"')
def step_impl(context, result):
assert_that(highest_average_over_last_six_values(context.data), equal_to(result))
@step('the average is "{result:g}"')
def step_impl(context, result):
assert_that(average_over_last_six_values(context.data), equal_to(result))
@given("i have a list of past sprint velocity")
def step_impl(context):
context.data = [3, 34, 21, 7, 42, 9, 13, 21]
@then("i can give a best case forecast for the next 6 iterations based on the best 3 iterations over the last 6 iterations")
def step_impl(context):
assert_that(get_best_forecast(context.data), equal_to([178, 206, 234, 262, 290,318]))
@then("i can give a worst case forecast for the next 6 iterations based on the worst 3 iterations over the last 6 iterations")
def step_impl(context):
assert_that(get_worst_forecast(context.data), equal_to([159.67, 169.34, 179.01, 188.68, 198.35, 208.02]))
@then("i can give an average case forecast for the next 6 iterations based on at most the last 6 iterations")
def step_impl(context):
assert_that(get_average_forecast(context.data), equal_to([168.83, 187.66, 206.49, 225.32, 244.15, 262.98]))
@then("i must get the real progress by incrementing each week with the sum of previous weeks")
def step_impl(context):
assert_that(get_real_progress(context.data), equal_to([3, 37, 58, 65, 107, 116, 129, 150]))
@then(
"i can give a best case forecast for the next 6 iterations based on the standard deviation on the last 6 iterations")
def step_impl(context):
assert_that(get_best_forecast(context.data, CalculusType.STANDARDDEV), equal_to([181.61, 213.22, 244.83, 276.44, 308.05, 339.66]))
@then(
"i can give a worst case forecast for the next 6 iterations based on the the standard deviation on the last 6 iterations")
def step_impl(context):
assert_that(get_worst_forecast(context.data, CalculusType.STANDARDDEV), equal_to([156.05, 162.1, 168.15, 174.2, 180.25, 186.3]))
| true
|
f1d2d480574e64580ce7b1a9c95a98f40c9c011d
|
Python
|
hamidmoghadam/thesis
|
/tyrion2/lstm.py
|
UTF-8
| 6,572
| 2.875
| 3
|
[] |
no_license
|
'''
A Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
import sys
# Import MNIST data
from data_provider import data_provider
from tensorflow.contrib import learn
#mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
'''
To classify images using a recurrent neural network, we consider every image
row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then
handle 28 sequences of 28 steps for every sample.
'''
# Parameters
learning_rate = 0.0005
batch_size = 200
number_of_post_per_user = int(sys.argv[2])
train_iteration = int(sys.argv[3])
n_embedding = int(sys.argv[5])
# Network Parameters
n_input = 100 # MNIST data input (img shape: 28*28)
n_hidden = int(sys.argv[4]) # hidden layer num of features
n_classes = int(sys.argv[1]) # MNIST total classes (0-9 digits)
#vocab_size = 58000
dp = data_provider(size=n_classes, sent_max_len = n_input, number_of_post_per_user = number_of_post_per_user)
# tf Graph input
x = tf.placeholder(tf.int32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
dropout = tf.placeholder(tf.float32, shape=())
is_training = tf.placeholder(tf.bool)
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def RNN(x, weights, biases, dropout, is_training):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
x = tf.cond(tf.equal(is_training, tf.constant(True)), lambda: tf.nn.dropout(x, dropout), lambda:x)
#if is_training:
# x = tf.nn.dropout(x, dropout)
# Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, n_input, 1)
# Define a lstm cell with tensorflow
fw_lstm_cell = rnn.BasicLSTMCell(n_hidden , forget_bias=1.0)
bw_lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
#outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
outputs, states, _ = rnn.static_bidirectional_rnn(fw_lstm_cell, bw_lstm_cell, x, dtype=tf.float32)
output = outputs[0]
for i in range(1, len(outputs)):
output = tf.maximum(output, outputs[i])
# Linear activation, using rnn inner loop last output
return tf.matmul(output, weights['out']) + biases['out']
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [dp.vocab_size, n_embedding], dtype=tf.float32)
inputs = tf.nn.embedding_lookup(embedding, x)
pred = RNN(inputs, weights, biases, dropout, is_training)
softmax_pred = tf.nn.softmax(pred)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
lst_train_cost = []
lst_valid_cost = []
lst_train_accr = []
lst_valid_accr = []
# Launch the graph
with tf.Session() as sess:
sess.run(init)
#sess.run(embedding_init, feed_dict={embedding_placeholder: embedding})
# Keep training until reach max iterations
for i in range(train_iteration):
#print('epoch {0} :'.format(i+1))
train_accr = 0.0
valid_accr = 0.0
train_cost = 0.0
valid_cost = 0.0
step = 0
epoch_size = max(dp.train_size // batch_size, 1)
while step < epoch_size:
batch_x, batch_y= dp.get_next_train_batch(batch_size)
acc, loss, _ = sess.run([accuracy, cost, optimizer], feed_dict={x: batch_x, y: batch_y, dropout: 0.5, is_training: True})
train_accr += acc
train_cost += loss
step += 1
lst_train_cost.append(train_cost/epoch_size)
lst_train_accr.append(train_accr/epoch_size)
print("Training Loss = {:.3f}".format(train_cost/epoch_size) + ", Training Accuracy= {:.3f}".format(train_accr/epoch_size))
valid_data, valid_label = dp.get_next_valid_batch(dp.valid_size)
acc = sess.run(accuracy, feed_dict={x: valid_data, y: valid_label, dropout: 1.0, is_training:False})
loss = sess.run(cost, feed_dict={x: valid_data, y: valid_label, dropout: 1.0, is_training:False})
lst_valid_cost.append(loss)
lst_valid_accr.append(acc)
print("Validation Loss = {:.3f}".format(loss) + ", Validation Accuracy= {:.3f}".format(acc))
accr = 0
accr_per_post = 0
number_of_post = 0
for i in range(n_classes):
#print('for class number {0}'.format(i))
test_data, test_label= dp.get_next_test_batch(i)
loss, acc, prediction = sess.run([cost, accuracy, softmax_pred], feed_dict={x: test_data, y: test_label, dropout: 1.0, is_training:False})
for predict in prediction:
number_of_post += 1
if predict.argmax(axis=0) == i:
accr_per_post += 1
result = np.sum(np.log10(prediction), axis=0)
max_idx = result.argmax(axis=0)
if max_idx == i :
accr += 1
#result = (np.sum(prediction, axis=0)/np.sum(np.sum(prediction, axis=0))).tolist()
#temp = result[i]
#result.sort(reverse=True)
#max_index = result.index(temp)
#print(' '.join([str(k) for k in result[:(max_index+1)]]))
#print("Test Loss = {:.3f}".format(loss) + ", Test Accuracy= {:.3f}".format(acc))
print('accr is {0:.3f} accr per post is {1:.3f}'.format(accr / n_classes, accr_per_post/number_of_post))
#plt.plot(range(len(lst_train_cost)), lst_train_cost, 'g--', range(len(lst_valid_cost)), lst_valid_cost, 'b--')
#plt.figure()
#plt.plot(range(len(lst_train_accr)), lst_train_accr, 'g--', range(len(lst_valid_accr)), lst_valid_accr, 'b--')
#plt.show()
| true
|
f369631d63c295bd45ad116d527361cf495e5559
|
Python
|
wangzimeng/weekends
|
/day4/readCsv2.py
|
UTF-8
| 2,363
| 3.515625
| 4
|
[] |
no_license
|
# 1.之前的csv文件不能被其他testcase调用,所以应该给这段代码封装到一个方法里
# 2.每个testcase路径不同,所以path应该作为参数传到这个方法中
#
# 4.打开了一个文件,但是并没有关闭,造成内存泄露
import csv
import os
def read(file_name):
# 所有重复代码的出现都是程序设计的不合理
# 重复的代码应该封装到一个方法里
current_file_path = os.path.dirname(__file__)
path = current_file_path.replace("day4", "data/" + file_name)
# file = open(path, "r")
# with语句是一个代码块,代码块中的内容都有缩进四个空格
# with代码块可以自动关闭with中声明的变量file
# 因为file文件一旦被关闭,里面的数据也随着消失
# 所以单独声明一个列表result,来保存里面的数据
result = []
with open(path, "r") as file :
data_table = csv.reader(file)
for row in data_table:
result.append(row)
return result
# 如果在打开程序和关闭程序的代码中间,发生了异常情况导致后面代码不能正常运行
# file.close()也不执行,这时,文件依然不能关闭
# 应该用with...as..语句实现文件的关闭
if __name__ == '__main__':
# path = r"C:\Users\51Testing\PycharmProjects\weekend\data\member_info.csv"
# 这个路径是一个绝对路径,我们工作中,一个项目不只一个人编写代码
# 我们无法统一要求大家都把项目代码放在一个路径下,因为有的人放在d盘
# 这个文件因为在项目中,它的路径也会随着项目变化
# 所以应该在代码中,通过当前代码文件的路径,根据相对位置,自动找到项目路径
# 所以首先要找到当前文件的路径
# os是操作系统
# __file__ python内置变量,指的是当前文件
current_file_path = os.path.dirname(__file__)
# print(current_file_path)
# 我们真正想要的路径
# path = current_file_path.replace("day4",r"data/member_info.csv")
# print(path)
# read(path)
member_info = read("goods_info.csv")
# print(member_info)
for row in member_info:
print(row)
# 5.读出数据不是目的,目的是通过驱动测试,所以应该把数据作为方法的返回值,方便进一步调用
| true
|
ac26959f1d735a1fa54add3ebe69640a2dd8d759
|
Python
|
Sonia22545/python-programs
|
/sum_of_digits.py
|
UTF-8
| 221
| 4.3125
| 4
|
[] |
no_license
|
x = input(" Enter the integer :") # asking for the integer from the user
sum = 0
for i in x: # iterating the integer with a for loop
sum = sum + int(i) # taking the sum of the digits
print(sum) # printing the sum
| true
|
b5c158a3ae82d7afd7e21d3ac0b19075d6d9add2
|
Python
|
WallaceLiu/plantask
|
/coreNewAdjMatrix.py
|
UTF-8
| 6,739
| 2.5625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 30 11:43:31 2017
@author: liuning11
"""
from coreNewAdj import coreNewAdj
from nodeAdjMatrix import nodeAdjMatrix
import datetimeUtil
import random
class coreNewAdjMatrix(coreNewAdj):
"""创建新的任务图
"""
minmax = None
modelGraph = nodeAdjMatrix()
def __init__(self, g):
coreNewAdj.__init__(self, g)
print('--Stage: coreNewAdjMatrix...')
self.minmax = self.__getMinMax(self.graph.lastOccurTime)
self.__create(g, self.minmax)
self.__search(self.modelGraph)
self.__cut(self.modelGraph)
print('--Stage: coreNewAdjMatrix End.')
def __create(self, g, minmax):
"""添加新的可能的任务节点,并创建邻接矩阵
"""
def ready(self, g, step, minmax):
if self.config.debug == True:
print('\t-coreNewAdjMatrix.__create.ready...')
print('\t\t-Min and Max:<%s,%s>' %
(datetimeUtil.timestamp_datetime(minmax[0]),
datetimeUtil.timestamp_datetime(minmax[1])))
no = g.edgenum + g.nodenum + 1
arr = []
for i in range(g.nodenum):
if g.tTask[i] == 1: # 非终端任务节点
t = g.findRootTask(g.tasksIndex[i])
bDt = t.bDateTime - step
win = maxParentConsume(self, g.map, g.nodenum, i)
if self.config.debug == True and self.config.detail == True:
print(
'\t\t-ready:%s win=%s end=%s step=%s'
%
(t.id, win,
datetimeUtil.timestamp_datetime(minmax[0] + win),
str(step)))
while bDt > minmax[0] + win:
nt = t.cloneLocal()
nt.no = no
nt.id = nt.id + ':' + str(no) # 新ID=原ID+序号
nt.bDateTime = bDt
nt.eDateTime = nt.bDateTime + nt.consume
arr.append(nt)
if self.config.debug == True and self.config.detail == True:
print('\t\t\t-Add New:%s' % nt.toStringRT())
bDt = bDt - step
no = no + 1
if self.config.debug == True:
print('\t-coreNewAdjMatrix.__create.ready:')
for a in arr:
print(a.toStringLP())
return arr
def maxParentConsume(self, matrix, nodenum, c):
m = 0
r = c
consume = 0
while r >= 0:
rt = -1
for i in range(nodenum):
if matrix[i][r] > m:
m = matrix[i][r]
rt = i
if rt > 0:
consume += m
r = rt
return consume
def create(self, g, step, minmax):
"""添加新的肯能的任务节点,并创建新的邻接矩阵任务图
"""
t_arrs = ready(self, g, step, minmax)
ng = g.clone()
no = t_arrs[len(t_arrs) - 1].no + 1
for t_arr in t_arrs: # 添加节点
n = t_arr.cloneLocal()
ng.add(ng.tasks, n)
no = no + 1
node = ng.findRootTask(t_arr.realId)
for c in node.childs.tasks: # 添加节点边
t = ng.findRootTask(c.realId)
if len(t.childs.tasks) == 0:
ng.add(n.childs, t.cloneLocal()) # 添加终端节点的边
else:
edges = filter(
lambda x: t.realId == x.realId and t_arr.eDateTime <= x.bDateTime,
t_arrs)
for edge in edges:
e = edge.cloneLocal()
ng.add(n.childs, e)
ng.createMap()
return ng
print('\t-coreNewAdjMatrix.__create...')
self.modelGraph = create(self, g, self.config.timeStep, minmax)
print('\t-coreNewAdjMatrix.__create End.')
def __moving(self, step):
"""随机时间
避免在移动任务时,都聚集在一个时间点
参数:
step: 时间间隔
"""
seed = random.randint(0, 100)
return int(step * (1 + seed / 100))
def __getMinMax(self, lastOccurTime):
"""获得任务中最早最晚时间
"""
minmax = (lastOccurTime - self.config.period * 3600, lastOccurTime)
return minmax
def __search(self, g):
"""查找邻接矩阵所有路径
"""
def _isPath(self, g, s, paths):
"""是否为一个任务路径
"""
def _isExist(self, g, s):
"""是否已经存在
"""
path = '->'.join(s)
for p in paths:
if path in p:
return True
return False
def _isRoot(self, g, botomm):
"""是否为根节点
只有是根节点开头的才是路径
"""
bl = g.isRootTask(botomm.split(':')[0])
if bl is False:
pass # 删除边
return bl
return _isExist(self, g, s) is False and _isRoot(self, g,
s[0]) is True
def m(self, s, r, g, paths):
for i in range(g.nodenum):
if g.map[r][i] > 0:
s.append(g.tasksIndex[i])
m(self, s, i, g, paths)
s.pop()
else:
if i >= g.nodenum - 1 and _isPath(self, g, s,
paths) is True:
p = '->'.join(s)
paths.append(p)
print('--Stage: coreNewAdjMatrix.__search...')
s = []
for r in range(g.nodenum):
s.clear()
if g.rTask[r] == 0:
s.append(g.tasksIndex[r])
m(self, s, r, g, self.path)
if self.config.debug == True:
print(self.path)
print('--coreNewAdjMatrix.__search End.')
def __cut(self, g):
"""新创建任务图中与路径是有矛盾的,需要删除不用的边,重新创建完整正确的任务图
"""
pass
| true
|
b74513fb6699f6e13f593109b05de3e8ae3b2421
|
Python
|
Code7unner/SuschenkoBot
|
/model.py
|
UTF-8
| 434
| 2.796875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
class Gift:
gift_id = 0
sex = 0
name = ""
description = ""
link = ""
mark = 0.0
mark_count = 0
def __init__(self, gift_id, sex, name, description, link, mark, mark_count):
self.gift_id = gift_id
self.sex = sex
self.name = name
self.description = description
self.link = link
self.mark = mark
self.mark_count = mark_count
| true
|
bc7f5cd0cefd39fd59018c6faaee2ea81f0dad8d
|
Python
|
ethyl2/Graphs
|
/projects/ancestor/ancestor.py
|
UTF-8
| 2,525
| 4.09375
| 4
|
[] |
no_license
|
from graph import Graph
def earliest_ancestor(ancestors, starting_node):
'''
Given a list of ancestors, such as [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7), (4, 5),
(4, 8), (8, 9), (11, 8), (10, 1)]
in the format (parent, child)
And a starting_node,
Return the node at the furthest distance from the starting_node.
If more than one node is found at that furthest distance, return the one with the lowest ID.
If the starting_node has no parents, return -1.
'''
# Put the ancestors into a graph, with directed edges pointing from child to parent.
graph = Graph()
# Add the vertices
for pair in ancestors:
if not pair[0] in graph.vertices:
graph.add_vertex(pair[0])
if not pair[1] in graph.vertices:
graph.add_vertex(pair[1])
'''
print(graph)
for element in sorted(graph.vertices.keys()):
print(element)
'''
# Add the edges.
for pair in ancestors:
graph.add_edge(pair[1], pair[0])
'''
print(graph)
for element in graph.vertices:
print(f'{element}: {graph.vertices[element]}')
'''
# Do a modified dft which saves the paths, in order to later figure out the longest path,
# and to return the element at index -1 of the longest path.
# Call the get_all_dfs_paths() method to find the dfs paths
candidate_paths = graph.get_all_dft_paths(starting_node)
# If only the starting_node is in the path, it has no parents, so return -1
if len(candidate_paths[0]) == 1:
# print("no parents")
return -1
# If there is only 1 path, return the tail of the path.
elif len(candidate_paths) == 1:
# print(candidate_paths[0][-1])
return candidate_paths[0][-1]
else:
# Find the longest path length.
longest_path_length = max([len(path) for path in candidate_paths])
# print("longest_path_length: " + str(longest_path_length))
# Find the paths that have that length.
longest_paths = [path for path in candidate_paths if len(
path) == longest_path_length]
# print("longest paths: " + str(longest_paths))
# Return the tail with the smallest ID
# print(min([x[-1] for x in longest_paths]))
return min([x[-1] for x in longest_paths])
if __name__ == '__main__':
test_ancestors = [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7),
(4, 5), (4, 8), (8, 9), (11, 8), (10, 1)]
print(earliest_ancestor(test_ancestors, 9))
| true
|
c5c4374592c61c109efb7f2162e0e78342246454
|
Python
|
v-manju/manju.v
|
/stringnum.py
|
UTF-8
| 53
| 2.90625
| 3
|
[] |
no_license
|
str=input("enter the number\n")
print(str.isdigit())
| true
|
e93e3c1f354f754fddbd1c53612a7986dc22c85b
|
Python
|
daniel-reich/turbo-robot
|
/9AMT6SC4Jz8tExihs_22.py
|
UTF-8
| 641
| 3.765625
| 4
|
[] |
no_license
|
"""
Create a function to generate all nonconsecutive binary strings where
nonconsecutive is defined as a string where no consecutive ones are present,
and where `n` governs the length of each binary string.
### Examples
generate_nonconsecutive(1) ➞ "0 1"
generate_nonconsecutive(2) ➞ "00 01 10"
generate_nonconsecutive(3) ➞ "000 001 010 100 101"
generate_nonconsecutive(4) ➞ "0000 0001 0010 0100 0101 1000 1001 1010"
### Notes
N/A
"""
def generate_nonconsecutive(n):
fmt = '{:0%db}' % n
binums = (fmt.format(i) for i in range(2**n))
return ' '.join(i for i in binums if '11' not in i)
| true
|
8741f7e01fc1097b60867523c68f9f28f8bfd194
|
Python
|
c981890/LTAT.TK.001
|
/3.1 Teksti analyys.py
|
UTF-8
| 775
| 3.109375
| 3
|
[] |
no_license
|
def symbolite_sagedus(jarjend):
''' (str) -> dict
Funktsioon võtab argumendiks sõne ja tagastab sõnastiku, mis sisaldab
selles sõnes esinevate tähemärkide esinemiste sagedusi. Tagastatav sõnastik
sisaldab kirjeid, kus võtmeteks on ühetähemärgilised sõned (sümbolid) ja
väärtusteks vastavate sõnede (sümbolite) esinemiste arv argumendiks antud
sõnes.
>>> symbolite_sagedus("Hommikul silmad on kinni ja huulil on naer")
{'H': 1, 'o': 3, 'm': 3, 'i': 5, 'k': 2, 'u': 3, 'l': 4, ' ': 7, 's': 1,
'a': 3, 'd': 1, 'n': 5, 'j': 1, 'h': 1, 'e': 1, 'r': 1}
'''
sonastik = {}
for mark in jarjend:
sonastik[mark] = jarjend.count(mark)
return sonastik
print(symbolite_sagedus("Hommikul silmad on kinni ja huulil on naer"))
| true
|
fc7c2d4a45a56963ef8d344659064880a2bc65ab
|
Python
|
PratikDPatil17/TCS_Digital_Code
|
/max sum of subgroup of given length.py
|
UTF-8
| 298
| 2.71875
| 3
|
[] |
no_license
|
s = input()
n = int(input())
output = [(s[i:i+n]) for i in range(0, len(s), n)]
maxsum = cur = 0
k = []
for i in range(0,len(s),n):
k.append(s[i:i+n])
for i in k:
cur = 0
for j in range(len(i)):
cur = cur + int(i[j])
maxsum = max(cur, maxsum)
print(k,maxsum)
print(output)
| true
|
6d653739d592a136beec28a1d9901c67b09caacf
|
Python
|
GiovanaPalhares/python-introduction
|
/Letra.py
|
UTF-8
| 177
| 3.203125
| 3
|
[] |
no_license
|
def vogal(z):
vogal = ["a","e","i","o","u", "A", "E", "I", "O", "U"]
if z in vogal:
return True
else:
return False
rep = vogal("d")
print(rep)
| true
|
33f7687b9a83ba061502a31a03658c86c1c2a299
|
Python
|
akeyi2018/Python3-1
|
/web/testMacro.py
|
UTF-8
| 1,160
| 2.921875
| 3
|
[] |
no_license
|
import webiopi
webiopi.setDebug()
GPIO = webiopi.GPIO
LED1PIN = 19
LED2PIN = 26
LED3PIN = 6
pinList = [19,26,6,13]
forward = [1,0,1,0]
back = [0,1,0,1]
turnLeft = [0,1,0,0]
turnRight = [0,0,0,1]
stop = [0,0,0,0]
g_led1active = 0
g_led2active = 0
g_led3active = 0
g_speed = 50
g_active = 5
def setup():
GPIO.setFunction(pinList[0], GPIO.OUT )
GPIO.setFunction(pinList[1], GPIO.OUT )
GPIO.setFunction(pinList[2], GPIO.OUT )
GPIO.setFunction(pinList[3], GPIO.OUT )
def loop():
if g_active == 1:
FunMoveMotor(forward,1)
if g_active == 2:
FunMoveMotor(turnLeft,1)
if g_active == 3:
FunMoveMotor(turnRight,1)
if g_active == 4:
FunMoveMotor(back,1)
if g_active == 5:
FunMoveMotor(stop,0.5)
webiopi.sleep(0.5)
def FunMoveMotor(direct, tm):
for pin, val in zip(pinList, direct):
GPIO.digitalWrite(pin, val)
webiopi.sleep(tm)
def forwardDrive(val):
GPIO.digitalWrite(LED1PIN, val)
webiopi.sleep(1)
#GPIO.digitalWrite( LED1PIN, 0 )
@webiopi.macro
def MoveForward(active):
global g_active
g_active = int(active)
return active
| true
|
6d431af62191168fe689639ec65419291195edf0
|
Python
|
nikitos219745/lab6
|
/C.py
|
UTF-8
| 1,158
| 3.359375
| 3
|
[] |
no_license
|
from enum import Enum
while True:
class month (Enum):
January = 1
February = 2
March = 3
April = 4
May = 5
June = 6
July = 7
August = 8
September = 9
October = 10
November = 11
December = 12
class season (Enum):
Winter = 1
Spring = 2
Summer = 3
Autumn = 4
try:
s=int(input('month:'))
except ValueError :
print("Напиши шось путнє")
if s==month.December.value or s==month.January.value or s==month.February.value:
print(f'{season.Winter.name}')
elif s==month.March.value or s==month.April.value or s==month.May.value:
print(f'{season.Spring.name}')
elif s==month.June.value or s==month.July.value or s==month.August.value:
print(f'{season.Summer.name}')
elif s==month.September.value or s==month.October.value or s==month.November.value:
print(f'{season.Autumn.name}')
n = input("Якщо хочеш продлить нажми 1")
if n == '1':
continue
else:
break
| true
|
2b305ed61cb2278f911bc229e23833bac2dd162b
|
Python
|
AnnaAndropova/intsit_lab1
|
/hierarhical_clustering.py
|
UTF-8
| 393
| 2.703125
| 3
|
[] |
no_license
|
import data_reader
from scipy.cluster.hierarchy import linkage, dendrogram
from scipy.spatial.distance import pdist
import matplotlib.pyplot as plt
def build_graph():
labels, data = data_reader.read_data()
df = pdist(data)
Z = linkage(df, method='ward')
dendro = dendrogram(Z, labels=labels)
plt.title('Dendrogram')
plt.ylabel('Euclidean distance')
plt.show()
| true
|
de2f0696d77f184c4654b10e164c63b6e57a8640
|
Python
|
DJHyun/Algorithm
|
/SW expert/python/5521_상원이의생일파티.py
|
UTF-8
| 715
| 2.734375
| 3
|
[] |
no_license
|
import sys
sys.stdin = open("5521_상원이의생일파티.txt", "r")
T = int(input())
for test_case in range(1, T + 1):
n, m = map(int, input().split())
guest = []
friend = []
xx, yy = [], []
for i in range(m):
x, y = map(int, input().split())
if x == 1:
friend.append(y)
else:
xx.append(x)
yy.append(y)
if xx:
for i in range(len(xx)):
if xx[i] in friend and yy[i] not in guest and yy[i] not in friend:
guest.append(yy[i])
if yy[i] in friend and xx[i] not in guest and xx[i] not in friend:
guest.append(xx[i])
print(f'#{test_case} {len(guest) + len(friend)}')
| true
|
73b1f857e88c39d5f9de524c08fccecb560c3b7d
|
Python
|
papercavalier/ftps3
|
/ftps3.py
|
UTF-8
| 1,341
| 2.75
| 3
|
[] |
no_license
|
import os
import boto3
import ftplib
import tempfile
class Sync:
def __init__(self):
self.ftp = ftplib.FTP(os.environ['SERVER'])
self.ftp.login(os.environ['USER'], os.environ['PASSWORD'])
self.s3 = boto3.client('s3')
def run(self, dirname):
names = self.ftp.nlst(dirname)
for name in names:
if self.__is_file(name):
if not self.__s3_has_key(name):
with tempfile.TemporaryFile() as file:
self.ftp.retrbinary('RETR ' + name, file.write)
file.seek(0)
self.s3.upload_fileobj(file, os.environ['BUCKET'],
name)
print(name)
else:
self.run(name)
def __is_file(self, name):
try:
self.ftp.size(name)
return True
except ftplib.error_perm:
return False
def __s3_has_key(self, key):
try:
self.s3.head_object(Bucket=os.environ['BUCKET'], Key=key)
return True
except self.s3.exceptions.ClientError as e:
if e.response['Error']['Code'] == '404':
return False
else:
raise
def lambda_handler(event, context):
Sync().run('.')
| true
|
23fda8154b6b1542de75d3df1c6ced0752764c05
|
Python
|
DrewOrtego/TORK
|
/Commands/GeneralCommands.py
|
UTF-8
| 8,050
| 2.984375
| 3
|
[] |
no_license
|
import os
import sys
import time
sys.path.append([os.sep.join(os.getcwd().split(os.sep)[:-1]), 'Stuff'])
class GeneralCommands:
"""
Abstract class containing functions for running harness-centric commands.
"""
function_args = {
'help': ['all', 'assertion', 'browser', 'general', 'page', 'window'],
}
@staticmethod
def end_save(_session):
"""
Ends a save session, if active.
:param _session: Current session and its available commands.
"""
if _session.save_enabled:
_session.save_enabled = False
_session.save_file_path = None
_session.prompt = _session.prompt.replace('*', '')
print('Save session ended.')
else:
print('A save session is not currently enabled.')
@staticmethod
def help(_session, default_arg_type='all'):
"""
See InteractiveSession.InteractiveSession.help() function.
:param _session: Current session and its available commands.
:param default_arg_type: Indicates which commands to display (options found in function_args).
"""
def print_each(iterable, space=''):
for i in iterable:
print("{0}{1}".format(space, i))
if default_arg_type not in ['browser', 'general', 'assertion', 'page', 'window', 'all']:
print("Did not recognize argument {0}. Printing all available commands...".format(default_arg_type))
default_arg_type = 'all'
if default_arg_type in ['page', 'all']:
print("\nPage Commands:")
print_each(sorted(list(_session.current_page_object.page_elements.keys())), ' ')
if _session.current_page_object.window_elements:
print("\nContained Elements:")
for win_elm, elms in _session.current_page_object.window_elements.items():
print("*{0}{1}:".format(' ', win_elm))
print_each(sorted(list(elms.keys())), ' ')
if _session.commands.windowed_element_commands:
print("\nWindow Commands:")
print_each(sorted(list(_session.commands.windowed_element_commands.keys())), ' ')
if default_arg_type in ['window']:
if _session.commands.windowed_element_commands:
print("\nWindow Commands:")
print_each(sorted(list(_session.commands.windowed_element_commands.keys())), ' ')
else:
print("\nNo windowed commands found. Check 'page' for other options.")
if default_arg_type in ['general', 'all']:
print("\nGeneral Commands:")
print_each(sorted(list(_session.commands.general_commands.keys())), ' ')
if default_arg_type in ['browser', 'all']:
print("\nBrowser Commands:")
print_each(sorted(list(_session.commands.portal_commands.keys())), ' ')
if default_arg_type in ['assertion', 'all']:
print("\nAssertion Commands:")
print_each(sorted(list(_session.commands.assertion_commands.keys())), ' ')
print("\n")
@staticmethod
def pause(_session):
"""
Stops until the user preses the ENTER key (making this funciton platform-agnostic).
Useful in automated mode as a way of pausing a test, interacting with the browser,
and then starting the test again.
:param _session: test session object.
"""
input("Press ENTER to continue...")
@staticmethod
def prompt(_session, arb_text):
"""
Allows user to change the prompt character from the default.
:param _session: Current session and its available commands.
:param arb_text: token object containing the text to use as the new prompt.
"""
_session.prompt = '{0} '.format(arb_text.name.strip('"'))
@staticmethod
def save(_session, arb_file_name):
"""
Enables the save session after verifying file i/o. This begins saving
interactively-entered commands into the specified file. Makes automated
testing easier, assuming the commands are valid.
:param _session: Current session and its available commands.
:param arb_file_name: contains name of the file which the commands will written to.
:return bool, str: True indicates the file was created, otherwise False. The str
contains the full path to the save file so that subsequent commands can be written
to it.
"""
def check_extension(filename):
"""
If not already provided, append the .txt file extension
:param filename: user-provided filename or path
:return: modified filename
"""
if not filename.endswith('.txt'):
return '{}.txt'.format(filename)
else:
return filename
def get_full_path(full_filename):
"""
Verify whether the user provided a full file path or not
:param full_filename: file or full file path with .txt ext.
:return: The full file path if the directory exists, otherwise False.
"""
if len(full_filename.split(os.sep)) > 1:
# Prevents someone from writing to a specified directory
print("Invalid file name! Do not use separators in the file name.")
return False
else:
dir_path = os.path.join(
os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2]),
'SavedSessions'
)
full_file_path = os.sep.join([dir_path, full_filename])
if os.path.exists(full_file_path):
print("File already exists: {}".format(full_file_path))
return False
else:
return full_file_path
if _session.save_enabled:
print('A save session is already enabled.')
else:
file_name = arb_file_name
filename_with_ext = check_extension(file_name)
full_path = get_full_path(filename_with_ext)
if full_path:
with open(full_path, 'a') as f:
f.close()
print("Created file {0}".format(full_path))
_session.save_enabled, _session.save_file_path, _session.prompt = True, full_path, '*{}'.format(_session.prompt)
else:
_session.save_enabled, _session.save_file_path = False, ''
@staticmethod
def sleep(_, int_time):
"""
Tells the program to wait for n-number of seconds. Literally sleeps.
:param int_time: amount of seconds to sleep.
"""
try:
time.sleep(int_time)
except Exception as err:
print(err)
@staticmethod
def try_start(_session):
"""
Starts "try mode" which ignores exceptions.
This allows for portal-unique commands to coexist with non-unique commands.
E.g. A test file can include the "extra" workflow needed for AGOL, and
then turn off "try mode" for commands that it shares with Portal.
:param _session: Session object.
"""
if _session.try_mode:
print("Try mode is already active. To disable, use: try_stop.")
else:
_session.try_mode = True
@staticmethod
def try_stop(_session):
"""
Disables "try mode" after try_start has been run.
:param _session: Session object.
"""
if not _session.try_mode:
print("Try mode is already inactive. To enable, use: try_start.")
else:
_session.try_mode = False
@staticmethod
def quit(_):
"""
Exits the program. Same as "exit".
"""
sys.exit()
@staticmethod
def xyzzy(_):
print('A hollow voice says, "Quit playing around, this isn\'t a game!"')
| true
|
3f51d3d0b23b64ff6025915af20d0dfbfc2f6439
|
Python
|
jmyh/stepik_autotest
|
/lesson2/step1_task1_use checkbox&radiobutton.py
|
UTF-8
| 947
| 3.015625
| 3
|
[] |
no_license
|
from selenium import webdriver
import time
import math
link="http://suninjuly.github.io/math.html"
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
try:
browser = webdriver.Chrome()
browser.get(link)
x_element=browser.find_element_by_id("input_value")
x=x_element.text
y=calc(x)
answer_element=browser.find_element_by_id("answer")
answer_element.send_keys(y)
checkbox=browser.find_element_by_id("robotCheckbox")
checkbox.click()
radiobutton=browser.find_element_by_id("robotsRule")
radiobutton.click()
button = browser.find_element_by_css_selector("button.btn")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
# не забываем оставить пустую строку в конце файла
| true
|
54496452731cf343783173c86e4908d4347e3586
|
Python
|
HoaxShark/comp260-server
|
/Client/Scripts/window.py
|
UTF-8
| 5,867
| 2.90625
| 3
|
[] |
no_license
|
from PyQt5 import QtWidgets, uic, QtCore
from PyQt5.QtCore import Qt
from queue import *
import bcrypt
class LoginWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(LoginWidget, self).__init__(parent)
self.login_widget = uic.loadUi('login_widget_layout.ui', self)
# Center
self.move(parent.rect().center() - self.rect().center())
# Set up login window buttons
self.login_widget.login_button.clicked.connect(parent.login_clicked)
self.login_widget.create_account_button.clicked.connect(parent.create_account_clicked)
class Window(QtWidgets.QMainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.ui = uic.loadUi('gui_layout.ui', self)
self.input_manager = ''
# queue that holds all messages from the server
self.message_queue = Queue()
# setup for the timer event function
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.timerEvent)
self.timer.start(100)
self.client = ''
self.salt = ''
self.logged_in = False
self.connected = False
self.username = ''
self.password = ''
self.login_widget = LoginWidget(self)
def create_account_clicked(self):
# Set username and password
self.username = self.login_widget.username_lineEdit.text()
self.password = self.login_widget.password_lineEdit.text()
# Check username or password are not blank
if self.username == '' or self.password == '':
self.textEdit.append('You must enter a username and password')
return
# Check password is at least 6 characters long
if len(self.password) < 6:
self.textEdit.append('Password must be at least 6 characters long')
return
# Check the username is at least 4 characters long
if len(self.username) < 4:
self.textEdit.append('Username must be at least 4 characters long')
return
# Generate the salt for a new account
salt = bcrypt.gensalt(12)
# Encode password
self.password = self.password.encode('utf-8')
# Hash password
self.password = bcrypt.hashpw(self.password, salt)
# Decode password
self.password = self.password.decode()
# Decode salt
salt = salt.decode()
# Clear widget input lines
self.login_widget.username_lineEdit.clear()
self.login_widget.password_lineEdit.clear()
# Form message and send using input manager
message = '#create_account ' + self.username + ' ' + self.password + ' ' + salt
self.input_manager.send_message(message)
def login_clicked(self):
# Set username and password
self.username = self.login_widget.username_lineEdit.text()
self.password = self.login_widget.password_lineEdit.text()
if self.username == '' or self.password == '':
self.textEdit.append('You must enter a username and password')
return
# Send over to the input manager
self.input_manager.set_username_password(self.username, self.password)
# Clear widget input lines
self.login_widget.username_lineEdit.clear()
self.login_widget.password_lineEdit.clear()
# Tell input manager to send username to the server
self.input_manager.send_username()
def set_logged_in(self, logged_in):
self.logged_in = logged_in
def set_connected(self, connected):
self.connected = connected
def window_draw(self):
self.ui.show()
self.login_widget.close()
# runs during alongside the window, use as an update function
def timerEvent(self):
# If not logged in display the log in widget
if self.logged_in == False and self.connected == True:
self.login_widget.show()
else:
self.login_widget.close()
# while messages in the queue print them to client
while self.message_queue.qsize() > 0:
current_input = self.message_queue.get() # Get message out the queue
# split the player input string
split_input = current_input.split(' ', 1)
# stores the first word of the input string (use this across the board)
first_word = split_input[0].lower()
if first_word == '#username_salt':
self.salt = split_input[1]
# Set salt
self.input_manager.set_salt(self.salt)
# Tell input manager to salt and send password
self.input_manager.send_password()
elif first_word == '#login_accepted':
self.logged_in = True
self.login_widget.close()
else:
self.textEdit.append(current_input)
# sends entered text to the input manager if not blank, then clears the text box
def text_enter(self):
if self.lineEdit.text() != '' and self.logged_in:
self.input_manager.send_message(self.lineEdit.text())
self.lineEdit.clear()
def keyPressEvent(self, event):
key = event.key()
if key == Qt.Key_Return:
self.text_enter()
# runs when the pyqt window is closed, shuts down the client and ends current threads
def closeEvent(self, event):
self.client.is_running = False
self.client.is_connected = False
self.client.my_socket.close()
self.client.my_socket = None
if self.client.my_receive_thread is not None:
self.client.my_receive_thread.join
if self.client.my_connection_thread is not None:
self.client.my_connection_thread.join
def set_client(self, this_client):
self.client = this_client
| true
|
e240d11b25b6a4b5781d250007d93f0997ea8a7c
|
Python
|
leonall/algorithms_homework
|
/stack/stack.py
|
UTF-8
| 3,167
| 4.03125
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Stack Abstract Data Type (ADT)
Stack() creates a new stack that is empty.
It needs no parameters and returns an empty stack.
push(item) adds a new item to the top of the stack.
It needs the item and returns nothing.
pop() removes the top item from the stack.
It needs no parameters and returns the item. The stack is modified.
peek() returns the top item from the stack but does not remove it.
It needs no parameters. The stack is not modified.
isEmpty() tests to see whether the stack is empty.
It needs no parameters and returns a boolean value.
size() returns the number of items on the stack.
It needs no parameters and returns an integer.
'''
import numpy as np
import unittest
class Stack(object):
def __init__(self, size=10):
"""
Initialize python List with size of 10 or user given input.
Python List type is a dynamic array, so we have to restrict its
dynamic nature to make it work like a static array.
"""
self._array = [None] * size
self._top = 0
def size(self):
return self._top
def __len__(self):
return self._top
def push(self, item):
if self._top == len(self._array):
self._expend()
self._array[self._top] = item
self._top += 1
def pop(self):
if self.isEmpty():
raise IndexError('Stack is empty!')
value = self._array[self._top - 1]
self._array[self._top - 1] = None
self._top -= 1
return value
def peek(self):
if self.isEmpty():
raise IndexError('Stack is empty!')
else:
return self._array[self._top - 1]
def isEmpty(self):
return self._top == 0
def _expend(self):
'''
double the size of the stack
'''
NewArray = [None] * len(self._array) * 2
for i, item in enumerate(self._array):
NewArray[i] = self._array[i]
self._array = NewArray
def __iter__(self):
if self.isEmpty():
raise IndexError('Stack is empty!')
i = self._top
while True:
if i > 0:
yield self._array[i-1]
i -= 1
else:
raise StopIteration
class TestStack(unittest.TestCase):
def _setUp(self):
self.stack = Stack()
self._lst = []
for i in np.random.randint(0, 10, 21):
self.stack.push(i)
self._lst.append(i)
self._lst = self._lst[::-1]
def test_LinkedListStack(self):
for _ in range(10):
self._setUp()
self.assertEqual(len(self.stack), len(self._lst))
self.assertEqual(self.stack.peek(), self._lst[0])
self.assertEqual(self.stack.pop(), self._lst[0])
self.assertFalse(self.stack.isEmpty())
for i, val in enumerate(self.stack):
self.assertEqual(val, self._lst[i+1])
self.assertEqual(self.stack.pop(), self._lst[i+1])
self.assertTrue(self.stack.isEmpty())
if __name__ == '__main__':
unittest.main()
| true
|
a7730d50c5973f5e0d006218321c0b4513e85202
|
Python
|
somork/plrna
|
/plrna1.0/scripts/rnafold2tab.py
|
UTF-8
| 806
| 2.515625
| 3
|
[] |
no_license
|
# rnafold2tab.py
# S/ren M/rk
# 13/06/2012
import sys
data_in=sys.stdin.read()[:-1]
d={}
d_keys=[]
data=data_in.split('>')[1:]
input=open(sys.argv[1])
names_in=input.read()
input.close()
names=names_in.split('\n')[:-1]
the_list=[]
#i=0
for i in range(len(data)):
#i+=1
seq=''
score=0
struc=''
x=data[i].split('\n')
for k in x[1::2]:
seq+=k
for k in x[2::2]:
struc+=k.split(' ')[0]
#for k in x[2::2]:
# score+=float(k.split(' ')[1].split('(')[1].split(')')[0])
#score='score'
#print 'rna_%s\t%s\t%s\t%s'%(i,seq,struc,score)
the_list.append((int(names[i].split('.')[-2]),names[i],seq,struc))
the_list.sort()
for e in the_list:
#s=e[2].split('\t')
#seq=e[2].split()
#ann=e[3].replace(',','').replace('<','(').replace('>',')').replace(':','.')
print '%s\t%s\t%s'%(e[1],e[2],e[3])
| true
|
fc0e8b92fa80be73fc8474eb7a48eb1bb6964623
|
Python
|
Pioank/python-games
|
/calc-game.py
|
UTF-8
| 3,551
| 3.59375
| 4
|
[] |
no_license
|
import random
import operator
import time
pname=input('Choose your player name \n')
calcs = ['+','-','+,-','+,*,/'] # What calculations the player will need to do per level, each list item is a level
rang = ['0,10','0,10','0,10','0,10','0,10'] # What is the range of numbers per level, each list item is a level
ncalc= [1,1,2,1] # Number of calculations per level, each list item is a level
levell = [0,1,3] # Number of levels
atemp = 3 #How many times the player needs to give a right answer (consecutive)
scount = 0 #Counts the consecutive right answers. Every time the player gives a wrong answer it goes back to 0
i = 0 #Used as a counter for the level
n=0 #Used as a counter for the number of calculations
erw=list() #This is a list which concatenates all the random numbers and operators that are generated from the programme in order to print them as a string to the player (the question)
right=0 #Counts how many correct answers the player has given
wrong=0 #Counts how many wrong answers the player has given
ops = {"+": operator.add,"-": operator.sub,"*": operator.mul,"/": operator.truediv} # This allows you later to call the operators and randomly select them
#Below def returns rundom numbers for the range of that level
def no():
rng=rang[i]
com = rng.find(',')
rs= int(rng[:com])
rf=int(rng[com+1:])
return (int(random.randint(rs,rf)))
#Below def opsfunc utilises the operator library and randomly picks an operator which is later translated to the actual function
def opsfunc():
clc=calcs[i]
if ',' in clc:
clcs = clc.split(',')
si = random.choice(clcs)
return(si)
else:
return(clc)
for i in levell:
print('Welcome to level',i+1,' Answer ', atemp, ' consecutive questions correct and you will pass to the next level \n')
if i == 0:
sttime=time.time() #Timer starts
scount = 0
erw=list()
while scount < atemp:
print('You have answered ', scount, 'correct \n')
numcalc = ncalc[i]
n=0
erw=list()
while n < numcalc:
if n == 0:
no1=no()
no2=no()
pros=opsfunc()
proc = ops[pros]
erw.append(no1)
erw.append(pros)
prax=proc(no1,no2)
erw.append(no2)
n=n+1
elif n>0:
no3=no()
pros=opsfunc()
proc = ops[pros]
erw.append(pros)
prax=proc(prax,no3)
erw.append(no3)
n=n+1
print(*erw, sep=' ')
print('')
result=input('Write your answer here \n')
result=float(result)
prax=float(prax)
if result == prax:
scount=scount+1
print('CORRECT \n')
right=right+1
elif result == 'stop':
break
else:
print('Try again \n')
wrong=wrong+1
scount=0
fitime=time.time() #Timer finishes
ttime=int(fitime-sttime) #Time required to complete the game
message=('CONGRATULATIONS',pname,'You have answered:',right,'Right. ',wrong,'Wrong ','You have reached level',i+1,'You finished in',ttime, 'sec' ,'It took you', (wrong+right)/ttime, 'sec per question' )
print(*message, sep=' ')
message=str(message)
filename = 'results/' + pname +'results.txt' # Optional code to save the results per game in a txt file. Create the txt file in advance of running the code
file=open(filename,'w')
file.write(message)
| true
|
c060e1a710283f43955c6edd892fc61b1c2803ae
|
Python
|
lchinmay799/Stock-Market-Prediction-Using-Machine-Learning
|
/Apple/Stock Market Prediction_Apple.py
|
UTF-8
| 4,677
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
conda install -c anaconda pandas-datareader
# In[1]:
import pandas_datareader as pdr
# In[2]:
df=pdr.get_data_tiingo('AAPL',api_key='600965445e480ded65188f8485444e6643e71e2a')
# In[3]:
df.to_csv('AAPL.csv')
# In[4]:
import pandas as pd
# In[5]:
df=pd.read_csv('AAPL.csv')
# In[6]:
df.head()
# In[8]:
df.tail()
# In[11]:
df
# In[7]:
df1=df.reset_index()['close']
# df1
# In[8]:
df1
# In[9]:
df1.shape
# In[10]:
import matplotlib.pyplot as plt
# In[16]:
plt.plot(df1)
# In[11]:
import numpy as np
# In[12]:
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler(feature_range=(0,1))
df1=scaler.fit_transform(np.array(df1).reshape(-1,1))
# In[13]:
df1.shape
# In[14]:
training_size=int(len(df1)*0.65)
test_size=len(df1)-training_size
train_data,test_data=df1[0:training_size,:],df1[training_size:len(df1),:1]
# In[15]:
training_size,test_size
# In[16]:
train_data
# In[17]:
df
# In[18]:
training_size,test_size
# In[19]:
import numpy
def create_dataset(dataset,time_step=1):
dataX,dataY=[],[]
for i in range(len(dataset)-time_step-1):
a=dataset[i:(i+time_step),0]
dataX.append(a)
dataY.append(dataset[i+time_step,0])
return numpy.array(dataX),numpy.array(dataY)
# In[20]:
time_step=100
x_train,y_train=create_dataset(train_data,time_step)
x_test,y_test=create_dataset(test_data,time_step)
# In[21]:
print(x_train,y_train,x_test,y_test)
# In[22]:
print(x_train.shape),print(y_train.shape)
# In[23]:
print(x_test.shape),print(y_test.shape)
# In[24]:
x_train=x_train.reshape(x_train.shape[0],x_train.shape[1],1)
x_test=x_test.reshape(x_test.shape[0],x_test.shape[1],1)
# In[25]:
print(x_test.shape),print(y_test.shape)
# In[26]:
print(x_train.shape),print(y_train.shape)
# In[27]:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
# In[28]:
model=Sequential()
model.add(LSTM(50,return_sequences=True,input_shape=(100,1)))
model.add(LSTM(50,return_sequences=True))
model.add(LSTM(50))
model.add(Dense(1))
model.compile(loss='mean_squared_error',optimizer='adam')
# In[29]:
model.summary()
# In[30]:
model.fit(x_train,y_train,validation_data=(x_test,y_test),epochs=100,batch_size=64,verbose=1)
# In[31]:
import tensorflow as tf
# In[32]:
train_predict=model.predict(x_train)
test_predict=model.predict(x_test)
# In[33]:
train_predict=scaler.inverse_transform(train_predict)
test_predict=scaler.inverse_transform(test_predict)
# In[34]:
import math
from sklearn.metrics import mean_squared_error
math.sqrt(mean_squared_error(y_train,train_predict))
# In[35]:
y_test.shape
# In[36]:
math.sqrt(mean_squared_error(y_test,test_predict))
# In[ ]:
# In[37]:
look_back=100
trainPredictPlot=numpy.empty_like(df1)
trainPredictPlot[:, :]=np.nan
trainPredictPlot[look_back:len(train_predict)+look_back, :]=train_predict
testPredictPlot=numpy.empty_like(df1)
testPredictPlot[:, :]=numpy.nan
testPredictPlot[len(train_predict)+(look_back*2)+1:len(df1)-1, :]=test_predict
plt.plot(scaler.inverse_transform(df1))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show
# In[38]:
len(test_data)
# In[39]:
x_input=test_data[341:].reshape(1,-1)
# In[40]:
temp_input=list(x_input)
temp_input=temp_input[0].tolist()
# In[41]:
from numpy import array
list_output=[]
n_steps=100
i=0
while(i<30):
if(len(temp_input)>100):
x_input=np.array(temp_input[1:])
print("{} day input {}".format(i,x_input))
x_input=x_input.reshape(1,-1)
x_input=x_input.reshape((1,n_steps,1))
yhat=model.predict(x_input,verbose=0)
print("{} day output{}".format(i,yhat))
temp_input.extend(yhat[0].tolist())
temp_input=temp_input[1:]
list_output.extend(yhat.tolist())
i+=1
else:
x_input=x_input.reshape((1,n_steps,1))
yhat=model.predict(x_input,verbose=0)
print(yhat[0])
temp_input.extend(yhat[0].tolist())
print(len(temp_input))
list_output.extend(yhat.tolist())
i+=1
print(list_output)
# In[42]:
day_new=np.arange(1,101)
day_pred=np.arange(101,131)
# In[43]:
import matplotlib.pyplot as plt
# In[44]:
len(df1)
# In[45]:
df3=df1.tolist()
df3.extend(list_output)
# In[47]:
plt.plot(day_new,scaler.inverse_transform(df1[1159:]))
plt.plot(day_pred,scaler.inverse_transform(list_output))
# In[48]:
df3=df1.tolist()
df3.extend(list_output)
plt.plot(df3[1:])
# In[ ]:
# In[ ]:
| true
|
ca4450426e5dad2b5a8e40dcd876d2f6e5fbb38d
|
Python
|
medo5682/Robotics
|
/lab_7/lab7.py
|
UTF-8
| 1,706
| 2.59375
| 3
|
[] |
no_license
|
import argparse
import rospy
from geometry_msgs.msg import Pose, Point, Quaternion, PoseStamped
from std_msgs.msg import Header
global prev_x
global prev_y
global prev_theta
def check_start(args):
if args.x_goal == None:
print("X goal set to previous")
args.x_goal = prev_x
if args.y_goal == None:
print("Y goal set to previous")
args.y_goal = prev_y
if args.theta_goal == None:
print("Theta goal set to previous")
args.theta_goal = prev_theta
def main(args):
rospy.init_node('gazebo', anonymous = True)
goal_publisher = rospy.Publisher('/move_base_simple/goal', PoseStamped, queue_size = 10)
rospy.sleep(1)
header= Header()
header.seq = 1
header.stamp = rospy.Time.now()
header.frame_id = 'map'
check_start(args)
point = Point()
point.x = float(args.x_goal[0])
point.y = float(args.y_goal[0])
point.z = 0.0
quat = Quaternion()
quat.x = float(args.x_goal[0])
quat.y = float(args.y_goal[0])
quat.z = 90
quat.w = float(args.theta_goal[0])
pose = Pose()
pose.position = point
pose.orientation = quat
posestamped = PoseStamped()
posestamped.header = header
posestamped.pose = pose
print(posestamped)
prev_x = args.x_goal
prev_y = args.y_goal
prev_theta = args.theta_goal
while not rospy.is_shutdown():
goal_publisher.publish(posestamped)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Gazebo")
parser.add_argument('-x','--x_goal', nargs=1, help='goal x')
parser.add_argument('-y','--y_goal', nargs=1, help='goal y ')
parser.add_argument('-theta','--theta_goal', nargs='?', help='goal theta in quaternions')
args = parser.parse_args()
prev_x = ['-2.0']
prev_y = ['-0.5']
prev_theta = ['0.0']
main(args)
| true
|
3f140da1b4570fc9243843ae8ec6c1bfcc644425
|
Python
|
lsankar4033/programming_gym
|
/hackerrank/the_quickest_way_up/run.py
|
UTF-8
| 2,660
| 3.65625
| 4
|
[] |
no_license
|
# Challenge here: https://www.hackerrank.com/challenges/the-quickest-way-up
# Solution involves creating a graph and doing BFS. Graph must be altered based on snakes/ladders so that edge
# pointing to the start of a snake/ladder actually points to its end and all start points are just removed.
import sys
BOARD_SIZE = 10 # 10 x 10
# snakes, ladders are both lists of tuples
def build_graph(snakes, ladders):
adjacency_map = {}
reverse_adjacency_map = {} # used to quickly do snake/ladder xforms
# init empty board
for i in range(1, BOARD_SIZE * BOARD_SIZE + 1):
adjacency_map[i] = set([i + j for j in range(1, 7) if i + j <= 100])
reverse_adjacency_map[i] = set([i - j for j in range(1, 7) if i - j >= 1])
# NOTE this assumes (as the problem statement does) that the end of any snake/ladder isn't the start of
# another
snake_ladders = snakes + ladders
for (s, e) in snake_ladders:
# connect all nodes pointing to start to end
for r in reverse_adjacency_map[s]:
adjacency_map[r].add(e)
adjacency_map[r].remove(s)
for (s, _) in snake_ladders:
del adjacency_map[s]
return adjacency_map
# Return the number of steps in the min path from start -> end. If no path exists, return -1
def get_shortest_path_length(adjacency_map, start, end):
visited = set()
frontier = {start}
path_length = 0
while len(frontier) > 0:
if end in frontier:
return path_length
else:
neighbors = {n for f in frontier for n in adjacency_map[f]}
visited |= frontier
frontier = neighbors - visited
path_length += 1
# end wasn't reachable from start
return -1
# NOTE - this method is just copied around in all hackerrank modules
def get_int_from_stdin():
return int(sys.stdin.readline().strip())
# NOTE - this method is just copied around in all hackerrank modules
def get_tuple_from_stdin():
l = sys.stdin.readline().strip()
return [int(i) for i in l.split(" ")]
if __name__ == "__main__":
num_problems = get_int_from_stdin()
for i in range(num_problems):
# get snakes
num_snakes = get_int_from_stdin()
snakes = []
for j in range(num_snakes):
snakes.append(get_tuple_from_stdin())
# get ladders
num_ladders = get_int_from_stdin()
ladders = []
for j in range(num_ladders):
ladders.append(get_tuple_from_stdin())
adjacency_map = build_graph(snakes, ladders)
path_length = get_shortest_path_length(adjacency_map, 1, 100)
print(path_length)
| true
|
84d7660c17b758d3aff11de48d8aff41cbce6ae6
|
Python
|
Ronel-Mehmedov/dissertation2021
|
/executeEmpty.py
|
UTF-8
| 1,656
| 2.78125
| 3
|
[] |
no_license
|
import os
import csv
import shutil
# rootDir = "../data/"
empty = 0
notEmpty = 0
singleEntry = 0
emptyFoldersList = []
singleFileFolders = []
def fixName(folderName):
if "10532" in folderName:
website = folderName.split("10532")[0]
return website
return folderName
websites = []
foldersList = []
with open('./marketplace/finalListToCrawl.csv') as csv_file:
# with open('./marketplace/mainMarketplaceList.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
for entry in row:
websites.append(entry)
counter = 0
with open('./emptyFolders.csv') as folders:
# with open('./marketplace/mainMarketplaceList.csv') as csv_file:
csv_reader = csv.reader(folders, delimiter=',')
for row in csv_reader:
for entry in row:
foldersList.append(entry)
counter += 1
print(websites)
print(foldersList)
sorted(foldersList)
print(counter)
emptyWebsites = []
for entry in foldersList:
for website in websites:
if entry in website:
emptyWebsites.append(website)
sortedWebsites = sorted(emptyWebsites)
length = 1
previous = sortedWebsites[0]
index = 1
for i in range(1,len(sortedWebsites)):
if sortedWebsites[i] != previous:
length += 1
previous = sortedWebsites[i]
sortedWebsites[index] = sortedWebsites[i]
index+=1
print(sortedWebsites)
# for subdir, dirs, files in os.walk(rootDir):
with open('emptyWebsites3.csv', 'w') as empties:
writer = csv.writer(empties, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(sortedWebsites)
| true
|
42f11f90ff283dac88e335fe814b2b6dd14c59a6
|
Python
|
aquadros1003/Data-Visualization
|
/4/Klasy/Robaczek.py
|
UTF-8
| 649
| 3.03125
| 3
|
[] |
no_license
|
class Robaczek:
wspolrzedna_x = 0
wspolrzedna_y = 0
ruch = 1
def __init__(self, x, y, krok):
self.wspolrzedna_x = x
self.wspolrzedna_y = y
self.ruch = krok
def idz_w_gore(self,ile_krokow):
self.wspolrzedna_y += (ile_krokow * krok)
def do_dolu(self,ile_krokow):
self.wspolrzedna_y -= (ile_krokow * krok)
def w_prawo(self, ile_krokow):
self.wspolrzedna_x += (ile_krokow * krok)
def w_lewo(self, ile_krokow):
self.wspolrzedna_x -= (ile_krokow * krok)
def gdzie_jestes(self):
print("X = " + str(self.wspolrzedna_x) + "Y = " + str(self.wspolrzedna_y))
| true
|
5bf08c4f2218439698a423c18f9110045ba4864c
|
Python
|
LAB-Rio/governoaberto-wikilegis
|
/wikilegis/core/templatetags/convert_numbers.py
|
UTF-8
| 2,457
| 2.53125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template import Library
from collections import OrderedDict
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
import string
from wikilegis.core.models import BillSegment
register = Library()
def int_to_letter(number):
num2alpha = dict(zip(range(1, 27), string.ascii_lowercase))
return num2alpha[number]
def int_to_roman(num):
roman = OrderedDict()
roman[1000] = "M"
roman[900] = "CM"
roman[500] = "D"
roman[400] = "CD"
roman[100] = "C"
roman[90] = "XC"
roman[50] = "L"
roman[40] = "XL"
roman[10] = "X"
roman[9] = "IX"
roman[5] = "V"
roman[4] = "IV"
roman[1] = "I"
def roman_num(num):
for r in roman.keys():
x, y = divmod(num, r)
yield roman[r] * x
num -= (r * x)
if num > 0:
roman_num(num)
else:
break
return "".join([a for a in roman_num(num)])
@register.simple_tag
def segment_numbering(segment):
if segment.number:
type_name = slugify(segment.type.name)
int_number = int(segment.number)
if type_name == 'artigo':
if int_number <= 9:
return "Art. %dº " % int_number
else:
return "Art. %d " % int_number
elif type_name == 'paragrafo':
if int_number <= 9:
if int_number == 1 and BillSegment.objects.filter(type__name=segment.type.name, parent_id=segment.parent_id).count() == 1:
return "%s. " % _("Sole paragraph")
else:
return "§ %dº " % int_number
else:
return "§ %d " % int_number
elif type_name == 'inciso':
return "%s - " % int_to_roman(int_number)
elif type_name == 'alinea':
return "%s) " % int_to_letter(int_number)
elif type_name == 'titulo':
return "%s" % int_to_roman(int_number)
elif type_name == 'livro':
return "%s" % int_to_roman(int_number)
elif type_name == 'capitulo':
return "%s" % int_to_roman(int_number)
elif type_name == 'secao':
return "%s" % int_to_roman(int_number)
elif type_name == 'subsecao':
return "%s" % int_to_roman(int_number)
else:
return ''
| true
|
d1d620d60f38b0a427bafae4a156bd931e970f37
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03626/s390663489.py
|
UTF-8
| 543
| 3.03125
| 3
|
[] |
no_license
|
n = int(input())
s1 = input()
s2 = input()
if s1[0] == s2[0]:
result = 3
last_pattern = "h"
place = 1
else:
result = 6
last_pattern = "w"
place = 2
while place < n:
if last_pattern == "h":
if s1[place] == s2[place]:
result *= 2
last_pattern = "h"
place += 1
else:
result *= 2
last_pattern = "w"
place += 2
else:
if s1[place] == s2[place]:
last_pattern = "h"
place += 1
else:
result *= 3
last_pattern = "w"
place += 2
print(result % (7 + 10**9))
| true
|
0deeb3d911e998c345be69d24930603547461eac
|
Python
|
imsahil007/SudokuSolver
|
/sudoku_grid.py
|
UTF-8
| 2,134
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
import cv2
import numpy as np
def display_rects(in_img, rects, colour=255):
"""Displays rectangles on the image."""
img = in_img.copy()
for rect in rects:
img = cv2.rectangle(img, tuple(int(x) for x in rect[0]), tuple(int(x) for x in rect[1]), colour)
return img
def distance_between(p1, p2):
"""Returns the scalar distance between two points"""
a = p2[0] - p1[0]
b = p2[1] - p1[1]
return np.sqrt((a ** 2) + (b ** 2))
def crop_and_warp(img, crop_rect):
"""Crops and warps a rectangular section from an image into a square of similar size."""
# Rectangle described by top left, top right, bottom right and bottom left points
top_left, top_right, bottom_right, bottom_left = crop_rect[0], crop_rect[1], crop_rect[2], crop_rect[3]
# Explicitly set the data type to float32 or `getPerspectiveTransform` will throw an error
src = np.array([top_left, top_right, bottom_right, bottom_left], dtype='float32')
# Get the longest side in the rectangle
side = max([
distance_between(bottom_right, top_right),
distance_between(top_left, bottom_left),
distance_between(bottom_right, bottom_left),
distance_between(top_left, top_right)
])
# Describe a square with side of the calculated length, this is the new perspective we want to warp to
dst = np.array([[0, 0], [side - 1, 0], [side - 1, side - 1], [0, side - 1]], dtype='float32')
# Gets the transformation matrix for skewing the image to fit a square by comparing the 4 before and after points
m = cv2.getPerspectiveTransform(src, dst)
# Performs the transformation on the original image
return cv2.warpPerspective(img, m, (int(side), int(side)))
def infer_grid(img):
"""Infers 81 cell grid from a square image."""
squares = []
side = img.shape[:1]
side = side[0] / 9
for i in range(9):
for j in range(9):
p1 = (i * side, j * side) # Top left corner of a bounding box
p2 = ((i + 1) * side, (j + 1) * side) # Bottom right corner of bounding box
squares.append((p1, p2))
return squares
| true
|
815391fafaa270b64561229ba75a12f0eb5be410
|
Python
|
cminmins/Pixel_processing
|
/venv/Lib/site-packages/pydicom/tag.py
|
UTF-8
| 7,270
| 2.921875
| 3
|
[] |
no_license
|
# Copyright 2008-2017 pydicom authors. See LICENSE file for details.
"""Define Tag class to hold a DICOM (group, element) tag and related functions.
The 4 bytes of the DICOM tag are stored as an arbitrary length 'long' for
Python 2 and as an 'int' for Python 3. Tags are stored as a single number and
separated to (group, element) as required.
"""
# NOTE: Tags must be not be stored as a tuple internally, as some code logic
# (e.g. in filewriter.write_AT) checks if a value is a multi-value
# element
import traceback
from contextlib import contextmanager
from pydicom import compat
@contextmanager
def tag_in_exception(tag):
"""Use `tag` within a context.
Used to include the tag details in the traceback message when an exception
is raised within the context.
Parameters
----------
tag : pydicom.tag.Tag
The tag to use in the context.
"""
try:
yield
except Exception as ex:
stack_trace = traceback.format_exc()
msg = 'With tag {0} got exception: {1}\n{2}'.format(
tag,
str(ex),
stack_trace)
raise type(ex)(msg)
def Tag(arg, arg2=None):
"""Create a Tag.
General function for creating a Tag in any of the standard forms:
* Tag(0x00100015)
* Tag('0x00100015')
* Tag((0x10, 0x50))
* Tag(('0x10', '0x50'))
* Tag(0x0010, 0x0015)
* Tag(0x10, 0x15)
* Tag(2341, 0x10)
* Tag('0xFE', '0x0010')
Parameters
----------
arg : int or str or 2-tuple/list
If int or str, then either the group or the combined
group/element number of the DICOM tag. If 2-tuple/list
then the (group, element) numbers as int or str.
arg2 : int or str, optional
The element number of the DICOM tag, required when
`arg` only contains the group number of the tag.
Returns
-------
pydicom.tag.BaseTag
"""
if isinstance(arg, BaseTag):
return arg
if arg2 is not None:
arg = (arg, arg2) # act as if was passed a single tuple
if isinstance(arg, (tuple, list)):
if len(arg) != 2:
raise ValueError("Tag must be an int or a 2-tuple")
valid = False
if isinstance(arg[0], compat.string_types):
valid = isinstance(arg[1], (str, compat.string_types))
if valid:
arg = (int(arg[0], 16), int(arg[1], 16))
elif isinstance(arg[0], compat.number_types):
valid = isinstance(arg[1], compat.number_types)
if not valid:
raise ValueError("Both arguments for Tag must be the same type, "
"either string or int.")
if arg[0] > 0xFFFF or arg[1] > 0xFFFF:
raise OverflowError("Groups and elements of tags must each "
"be <=2 byte integers")
long_value = (arg[0] << 16) | arg[1]
# Single str parameter
elif isinstance(arg, (str, compat.text_type)):
long_value = int(arg, 16)
if long_value > 0xFFFFFFFF:
raise OverflowError("Tags are limited to 32-bit length; tag {0!r}"
.format(long_value))
# Single int parameter
else:
long_value = arg
if long_value > 0xFFFFFFFF:
raise OverflowError("Tags are limited to 32-bit length; tag {0!r}"
.format(long_value))
if long_value < 0:
raise ValueError("Tags must be positive.")
return BaseTag(long_value)
if compat.in_py2:
# May get an overflow error with int if sys.maxsize < 0xFFFFFFFF
BaseTag_base_class = long
else:
BaseTag_base_class = int
class BaseTag(BaseTag_base_class):
"""Represents a DICOM element (group, element) tag.
If using python 2.7 then tags are represented as a long, while for python
3 they are represented as an int.
Attributes
----------
element : int
The element number of the tag.
group : int
The group number of the tag.
is_private : bool
Returns True if the corresponding element is private, False otherwise.
"""
# Override comparisons so can convert "other" to Tag as necessary
# See Ordering Comparisons at:
# http://docs.python.org/dev/3.0/whatsnew/3.0.html
def __le__(self, other):
"""Return True if `self` is less than or equal to `other`."""
return self == other or self < other
def __lt__(self, other):
"""Return True if `self` is less than `other`."""
# Check if comparing with another Tag object; if not, create a temp one
if not isinstance(other, BaseTag):
try:
other = Tag(other)
except Exception:
raise TypeError("Cannot compare Tag with non-Tag item")
return BaseTag_base_class(self) < BaseTag_base_class(other)
def __ge__(self, other):
"""Return True if `self` is greater than or equal to `other`."""
return self == other or self > other
def __gt__(self, other):
"""Return True if `self` is greater than `other`."""
return not (self == other or self < other)
def __eq__(self, other):
"""Return True if `self` equals `other`."""
# Check if comparing with another Tag object; if not, create a temp one
if not isinstance(other, BaseTag_base_class):
try:
other = Tag(other)
except Exception:
raise TypeError("Cannot compare Tag with non-Tag item")
return BaseTag_base_class(self) == BaseTag_base_class(other)
def __ne__(self, other):
"""Return True if `self` does not equal `other`."""
return not self == other
# For python 3, any override of __cmp__ or __eq__
# immutable requires explicit redirect of hash function
# to the parent class
# See http://docs.python.org/dev/3.0/reference/
# datamodel.html#object.__hash__
__hash__ = BaseTag_base_class.__hash__
def __str__(self):
"""Return the tag value as a hex string '(gggg, eeee)'."""
return "({0:04x}, {1:04x})".format(self.group, self.element)
__repr__ = __str__
@property
def group(self):
"""Return the tag's group number."""
return self >> 16
@property
def element(self):
"""Return the tag's element number."""
return self & 0xffff
elem = element # alternate syntax
@property
def is_private(self):
"""Return True if the tag is private (has an odd group number)."""
return self.group % 2 == 1
@property
def is_private_creator(self):
"""Return True if the tag is a private creator."""
return self.is_private and 0x0010 <= self.element < 0x0100
def TupleTag(group_elem):
"""Fast factory for BaseTag object with known safe (group, elem) tuple"""
long_value = group_elem[0] << 16 | group_elem[1]
return BaseTag(long_value)
# Define some special tags:
# See DICOM Standard Part 5, Section 7.5
# start of Sequence Item
ItemTag = TupleTag((0xFFFE, 0xE000))
# end of Sequence Item
ItemDelimiterTag = TupleTag((0xFFFE, 0xE00D))
# end of Sequence of undefined length
SequenceDelimiterTag = TupleTag((0xFFFE, 0xE0DD))
| true
|
88dcd24e83722729d79c1011df1678d4978f7d8e
|
Python
|
sebdiem/euler
|
/64.py
|
UTF-8
| 1,045
| 3.21875
| 3
|
[] |
no_license
|
from fractions import gcd
def period(n, sq_n, p, current, seen):
# p stores the digits of the continued fraction sequence
# current enables to retrieve the current value of the "remainder": a/(sqrt(n)-b)
# seen stores the old values of current to detect periodicity
a, b = current
sq_diff = n-b**2
g = gcd(sq_diff, a)
temp = int(a*(sq_n+b)/(n-b**2))
current = (sq_diff/g , -((a/g)*b) + temp*(sq_diff/g))
if current in seen: return tuple(p)
p.append(temp)
seen.append(current)
return period(n, sq_n, p, current, seen)
#print period(23,24**0.5,[],(1,4))
def continued_frac(n):
sq_n = n**0.5
sq_n_trunc = int(sq_n)
result = [sq_n_trunc, tuple()]
if sq_n != sq_n_trunc:
result[1] = period(n, sq_n, [], (1, sq_n_trunc), [])
return result
#print "\n".join(map(lambda x: str(x), [(i, continued_frac(i)) for i in range(2, 24)]))
def problem64():
f = continued_frac
return sum([1 for i in range(2, 10001) if len(f(i)[1])%2])
print problem64()
| true
|
f91409c4ecb9e300e1fcb2bb1104792eb7616280
|
Python
|
tepharju/Code1--Harjoitusteht-v-t
|
/CODE1_3_4_Hypotenuusa.py
|
UTF-8
| 283
| 3.515625
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 12:27:35 2021
@author: tepha
Code1 3.4 Hypotenuusa
"""
import math
a = float(input("Anna sivu a: "))
b = float(input("Anna sivu b: "))
c = math.sqrt(a**2+b**2)
print("Kolmion hypotenuusan pituus on:", c)
| true
|
991cdc6eb795900cb2e8e9f4976fafea354f3077
|
Python
|
Dandiaz14/invernadero_18B
|
/menuRegistro.py
|
UTF-8
| 869
| 3.25
| 3
|
[] |
no_license
|
from registro import Registro
from datetime import datetime,date
class MenuRegistro:
def __init__(self,conexion,cursor):
self.registro = Registro(conexion,cursor)
while True:
print("1) Crear Registro")
print("2) Mostrar Registro")
print("0) Salir")
op = input()
if op == '1':
self.agregar()
elif op == '2':
self.buscar()
elif op == '0':
break
def agregar(self):
fecha = datetime.now().date()
ph = input("PH: ")
luz = input("Luz")
humedad = input("Humedad: ")
co2 = input("CO2: ")
id_planta = input("id_planta: ")
self.registro.agregar(fecha, ph, luz, humedad, co2,id_planta)
def buscar(self):
id_planta = input("Id_planta: ")
resultados = self.registro.buscar(id_planta)
for p in resultados:
print("{0:2} {1:10} {2:10} {3:10} {4:10} {5:10} {6:2} ".format(p[0],str(p[1]),p[2],p[3],p[4],p[5],p[6]))
| true
|
4cbb77eb1f87014a195e270bcf0a861b4f076c2c
|
Python
|
ChangXiaodong/Leetcode-solutions
|
/Introduction_to_algorithm/section_15/LCS_length.py
|
UTF-8
| 1,624
| 3.21875
| 3
|
[] |
no_license
|
def LCS_length(X, Y):
m = len(X)
n = len(Y)
b = [["" for i in range(n + 1)] for i in range(m + 1)]
c = [[0 for i in range(n + 1)] for i in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if X[i - 1] == Y[j - 1]:
c[i][j] = c[i - 1][j - 1] + 1
b[i][j] = '\\'
elif c[i - 1][j] >= c[i][j - 1]:
c[i][j] = c[i - 1][j]
b[i][j] = '|'
else:
c[i][j] = c[i][j - 1]
b[i][j] = '-'
print_LCS(b, X, m, n)
return c, b
def print_LCS(b, X, i, j):
if i == 0 or j == 0:
return
if b[i][j] == '\\':
print(X[i - 1])
print_LCS(b, X, i - 1, j - 1)
elif b[i][j] == '|':
print_LCS(b, X, i - 1, j)
else:
print_LCS(b, X, i, j - 1)
def get_LCS_length(X, Y, c, i, j):
if c[i][j] > 0:
return c[i][j]
if i == 0 or j == 0:
return 0
if X[i - 1] == Y[j - 1]:
c[i][j] = get_LCS_length(X, Y, c, i - 1, j - 1) + 1
else:
c[i][j] = max(get_LCS_length(X, Y, c, i - 1, j), get_LCS_length(X, Y, c, i, j - 1))
return c[i][j]
def memorized_LCS_length(X, Y):
m = len(X)
n = len(Y)
ary = [[0 for i in range(n + 1)] for i in range(m + 1)]
get_LCS_length(X, Y, ary, m, n)
return ary
c, b = LCS_length(['A', 'B', 'C', 'B', 'D', 'A', 'B'], ['B', 'D', 'C', 'A', 'B', 'A'])
for line in c:
print(line)
for line in b:
print(line)
for line in memorized_LCS_length(['A', 'B', 'C', 'B', 'D', 'A', 'B'], ['B', 'D', 'C', 'A', 'B', 'A']):
print(line)
| true
|
2d9cb47647f42f7a768f949b781f4e04eaf4044a
|
Python
|
tf2keras/image-computer-processing
|
/project-1-captcha-recognition/captcha_input.py
|
UTF-8
| 4,523
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
"""
This module contains captcha generator.
"""
import os
import h5py
import multiprocessing
import threading
import generate_data
import numpy as np
import tensorflow as tf
class CaptchaDataManager(object):
"""
Class for captcha data managment.
"""
def __init__(self, batch_size, captcha_size,
hdf5_files_dir, coordinator, data_format, queue_size=32):
self.batch_size = batch_size
self.data_format = data_format
self.coordinator = coordinator
self.filenames = []
self.data_size = 0
for filename in os.listdir(hdf5_files_dir):
if filename.endswith(".hdf5"):
self.filenames.append(os.path.join(hdf5_files_dir, filename))
cur_hdf5 = h5py.File(self.filenames[-1], "r")
self.data_size += cur_hdf5["data"].maxshape[0]
cur_hdf5.close()
self.i = 0
self.file_ind = 0
self.cur_hdf5 = h5py.File(self.filenames[self.file_ind], "r")
self.images = self.cur_hdf5["data"]
self.labels = self.cur_hdf5["labels"]
self.samples_count = self.images.maxshape[0]
self.im_width = self.images.maxshape[2]
self.im_height = self.images.maxshape[1]
self.lock = threading.Lock()
self.batches_count = self.samples_count / self.batch_size
# Init queue parameters
self.images_pl = tf.placeholder(tf.float32, [
batch_size, self.im_height, self.im_width, 3])
self.labels_pl = tf.placeholder(tf.int32,
[batch_size, captcha_size,
len(generate_data.ALPHABET)])
if self.data_format == "NCHW":
self.images_pl = tf.transpose(self.images_pl, [0, 3, 1, 2])
self.queue = tf.FIFOQueue(queue_size,
[self.images_pl.dtype, self.labels_pl.dtype],
[self.images_pl.get_shape(),
self.labels_pl.get_shape()])
self.threads = []
self.enqueue_op = self.queue.enqueue([self.images_pl, self.labels_pl])
def next_batch(self):
"""
Return next batch. Cyclic.
"""
with self.lock:
selection = np.s_[self.i * self.batch_size:
(self.i + 1) * self.batch_size]
if self.i + 1 < self.batches_count:
self.i = self.i + 1
else:
self.i = 0
if len(self.filenames) > 1:
self.file_ind = (self.file_ind + 1) % len(self.filenames)
self.cur_hdf5.close()
self.cur_hdf5 = h5py.File(self.filenames[self.file_ind],
"r")
self.images = self.cur_hdf5["data"]
self.labels = self.cur_hdf5["labels"]
self.samples_count = self.images.maxshape[0]
self.im_width = self.images.maxshape[2]
self.im_height = self.images.maxshape[1]
#
#
images_batch = self.images[selection]
labels_batch = self.labels[selection]
images_batch = images_batch.astype(np.float32)
images_batch = (images_batch - np.mean(images_batch, axis=(1, 2, 3),
keepdims=True)) / 255.0
labels_batch = labels_batch.astype(np.int32)
if self.data_format == "NCHW":
images_batch = np.transpose(images_batch, axes=[0, 3, 1, 2])
return images_batch, labels_batch
def size(self):
return self.queue.size()
def dequeue(self):
output = self.queue.dequeue()
return output
def thread_main(self, session):
while not self.coordinator.should_stop():
images, labels = self.next_batch()
try:
session.run(self.enqueue_op,
feed_dict={self.images_pl: images,
self.labels_pl: labels})
except tf.errors.CancelledError:
return
def start_threads(self, session, n_threads=multiprocessing.cpu_count()):
for _ in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(session,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
return self.threads
| true
|
eb6a8d8d9a2cfe215cf5a23765b441bf31bd7764
|
Python
|
Oswald97/Mapcom-Covid-Programming-Challenge
|
/day 8/telco.py
|
UTF-8
| 1,406
| 3.0625
| 3
|
[] |
no_license
|
n,c,d = map(int,input().split(" "))
a,b = min(c,d),max(c,d)
stations = list(map(int,input().strip().split(" ")))
stations.sort()
min = 0
for i in range(n):
if n==1:
min = -1
break
else:
if i != 0:
if i != n-1:
if (stations[i] - stations[i-1]) <= a:
if (stations[i+1] - stations[i]) <=a:
min += a
elif (stations[i+1] - stations[i]) <=b:
min += b
else:
min = -1
break
elif (stations[i] - stations[i-1]) <= b:
if (stations[i+1] - stations[i]) <=b:
min += b
else:
min = -1
break
else:
min = -1
break
else:
if (stations[i] - stations[i-1]) <= a:
min +=a
elif (stations[i] - stations[i-1]) <= b:
min += b
else:
min = -1
break
else:
if (stations[1] - stations[0]) <=a:
min += a
elif (stations[1] - stations[0]) <=b:
min += b
else:
min = -1
break
print(min)
| true
|
e1522b6b63aa60c0fb2cabb147074d77efde564e
|
Python
|
johnberroa/Finger-Counting-Neural-Network
|
/keras/keras_finger_LargeCNN.py
|
UTF-8
| 4,959
| 2.5625
| 3
|
[] |
no_license
|
#
# LARGE CNN (not really large, but that's how I named it)
#
import os, time
import numpy as np
import cv2
from sklearn.model_selection import train_test_split as split_data
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Conv2D, Flatten, MaxPool2D, Dense, Dropout, BatchNormalization
from tensorflow.python.keras.utils import to_categorical
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
# Boolean to tell the program to resize the images to a smaller resolution
RESIZE = False
print("RESIZE to 200", RESIZE)
print("MORE LAYERS")
# Hyperparameters at top so that they are easily changed in vim
EPOCHS = 200
BATCHSIZE = 100
print("BATCHSIZE:", BATCHSIZE)
if RESIZE:
size = (200, 200)
else:
size = (300, 300)
train_datagen = ImageDataGenerator( rescale=1./255,
rotation_range=10.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
zca_whitening=True
)
valid_datagen = ImageDataGenerator(rescale=1./255, zca_whitening=True) # maybe no whitening on testing sets?
train_gen = train_datagen.flow_from_directory(
'data/train/',
target_size=size,
color_mode='color',
batch_size=BATCHSIZE,
classes=['1','2','3','4','5'],
class_mode='categorical',
save_to_dir='augmented', # SAVING THE IMAGES TO VISUALIZE WHAT THE AUGMENTATION IS DOING
save_prefix='AUG',
save_format="jpeg"
)
valid_gen = test_datagen.flow_from_directory(
'data/val/',
target_size=size,
color_mode='color',
batch_size=BATCHSIZE,
classes=['1','2','3','4','5'],
class_mode='categorical'
)
# Create proper input dimensions
if RESIZE:
input_dims = (200, 200, 3)
else:
input_dims = (300, 300, 3)
# Creation of the model
# Only 3 Conv sections because max pool would make the images too small for any useful analysis
model = Sequential()
model.add(Conv2D(16, (3, 3), input_shape=input_dims, padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01)))
model.add(BatchNormalization())
model.add(MaxPool2D((2,2)))
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01)))
model.add(BatchNormalization())
model.add(MaxPool2D((2,2)))
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01)))
model.add(BatchNormalization())
model.add(MaxPool2D((2,2)))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01)))
model.add(BatchNormalization())
model.add(MaxPool2D((2,2)))
model.add(Conv2D(16, (3, 3), padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01)))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(512, activation='relu', activity_regularizer=regularizers.l1(0.01)))
model.add(Dropout(.5))
model.add(Dense(128, activation='relu', activity_regularizer=regularizers.l1(0.01)))
model.add(Dropout(.5))
model.add(Dense(5, activation='softmax', activity_regularizer=regularizers.l1(0.01)))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
#one_hots = to_categorical(labels-1, num_classes=5) # due to out of bounds error, must be labels - 1 to make it start at 0
#one_hots_v = to_categorical(v_labels-1, num_classes=5)
#hist = model.fit(train, one_hots, epochs=EPOCHS, batch_size=BATCHSIZE, validation_data=(validation, one_hots_v), verbose=2)
callbacks_list = [ModelCheckpoint(filepath='weights.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=True)]
hist = model.fit_generator(
train_gen,
steps_per_epoch=200//BATCHSIZE, # len(train) / batchsize
epochs=EPOCHS,
validation_data=valid_gen,
validation_steps=100//BATCHSIZE, # same as above
callbacks=callbacks_list,
verbose=2
)
# Save the training data into npy save files so that they can be plotted separately
np.save('tLb{}-acc.npy'.format(BATCHSIZE), hist.history['acc'])
np.save('tLb{}-loss.npy'.format(BATCHSIZE), hist.history['loss'])
np.save('vLb{}-acc.npy'.format(BATCHSIZE), hist.history['val_acc'])
np.save('vLb{}-loss.npy'.format(BATCHSIZE), hist.history['val_loss'])
print("FINAL ACCURACY:".format(BATCHSIZE), hist.history['acc'][-1])
| true
|
89d4512533d85b88e0cb891d8e594f04e18221e2
|
Python
|
taymoorkhan/maze_project
|
/maze/controllers/app.py
|
UTF-8
| 5,702
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
# controllers/end.py
# import pygame and required controllers
import datetime
import pygame
import pygame.locals
from controllers.start import StartController
from controllers.end import EndController
from controllers.game import GameController
from models.score_manager import ScoreManager
from models.score import Score
from flask import Flask, request
app = Flask(__name__)
score_manager = ScoreManager()
class App:
"""
This is the app class. It manipulates the controllers and opens the window
until the game ends.
"""
def __init__(self):
pass
def run(self):
"""
This is the main method for our application.
It runs an infinite loop, unless the user decides to quit or the game ends
"""
# initiate pygame
# initiates pygame clock
pygame.init()
pygame.display.set_caption('Kidd World')
pygame.font.init()
clock = pygame.time.Clock()
# open a basic 800x600 pixel window and fill with white background
window = pygame.display.set_mode((800, 700))
window.fill((255, 255, 255))
# runs window until it ends
running = True
# initiates start_page maze, and end_page views from controllers with window as a parameter to manipulate
start_page = StartController(window)
end_page = EndController(window)
game_page = GameController(window)
# begins by displaying start page
start_page.display()
# adds views/sprites to the sprite group
sprite_group = pygame.sprite.Group()
sprite_group.add(start_page.view)
sprite_group.add(end_page.view)
#Initiate Score and ScoreManager instances
player = Score(0)
manager = ScoreManager()
while running:
# game's fps
clock.tick(50)
# deletes a sprite if the sprite is not alive
for sprite in sprite_group.sprites():
if not sprite.alive():
del sprite
# loop running to check for events in pygame
for event in pygame.event.get():
# checks if user clicked X on top right, if so, close window
if event.type == pygame.locals.QUIT:
running = False
# mouse click event, x and y are coordinates of the click
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = event.pos
# checks if start sprite is in sprite group
if start_page.view in sprite_group:
# if the start sprite is still in group and clicked, then it moves to the next page
if start_page.view.rect.collidepoint(x, y):
# removes start from the sprites so it can't be called again
sprite_group.remove(start_page.view)
window.fill((0, 0, 0))
#runs the game loop through game controller and view, collects win condition when game ends
condition, score = game_page.loop()
#------------------------------------------------------
player._score = score
# Create date variable for the day and time the game is played
x = datetime.datetime.now()
date = x.strftime("%x")
time = x.strftime("%X")
#---------------------------------------------------------
# if x is pressed on game, condition returns kill and window closes
if condition == "kill":
running == False
else:
#colors page white with win/loss reply
#----------------------------------------------------------------------
if condition:
background_image = pygame.image.load('views/images/win.jpg').convert()
window.blit(background_image, (0, 0))
impact_large = pygame.font.SysFont('impact', 60)
congratulations = impact_large.render("Congratulations you won!", True, (107, 140, 255))
window.blit(congratulations, (75, 75))
impact_small = pygame.font.SysFont('impact', 40)
second_line = impact_small.render("Please enter your name in the console.", True, (107, 140, 255))
window.blit(second_line, (100, 500))
pygame.display.update()
player._name = [input('PLEASE ENTER PLAYER NAME '), date, time]
manager.from_json()
manager.add_score(player, True)
manager.to_json()
#----------------------------------------------------------------------
window.fill((255, 255, 255))
end_page.display(condition, score)
# if the end view sprite is clicked, then closes the application
elif end_page.view.rect.collidepoint(x,y):
running = False
pygame.display.update()
| true
|
31c61c0f4059775cd0a16174fafc15b2af11dcc2
|
Python
|
CHENG-KH/Python
|
/APCS_哆拉A夢_difficult.py
|
UTF-8
| 1,577
| 4.03125
| 4
|
[] |
no_license
|
#跟大雄猜拳,大雄任意出拳(random)
#請使用者請使用者輸入一數字,分別代表以下猜拳的手勢
#石頭 -> 0, 剪刀 -> 1, 布 -> 2
#顯示猜拳結果(輸,贏,平手 )
#大0 -> 你0:平手
#大0 -> 你1:輸
#大0 -> 你2:贏
#大1 -> 你0:贏
#大1 -> 你1:平手
#大1 -> 你2:輸
#大2 -> 你0:輸
#大2 -> 你1:贏
#大2 -> 你2:平手
#五戰三勝(平手不算)
import random
nobita_win = list()
usr_win = list()
while len(nobita_win) != 3 and len(usr_win) != 3:
print(nobita_win or usr_win)
usr_input = int(input("請使用者請使用者輸入一數字,分別代表以下猜拳的手勢:石頭 -> 0, 剪刀 -> 1, 布 -> 2"))
nobita_input = random.randint(0,2)
print("大雄出的是:",nobita_input)
if nobita_input == 0:
if usr_input == 0:
print("平手")
if usr_input == 1:
nobita_win.append("a")
print("大雄贏")
if usr_input == 2:
usr_win.append("a")
print("玩家贏")
if nobita_input == 1:
if usr_input == 0:
usr_win.append("b")
print("玩家贏")
if usr_input == 1:
print("平手")
if usr_input == 2:
nobita_win.append("b")
print("大雄贏")
if nobita_input == 2:
if usr_input == 0:
nobita_win.append("c")
print("大雄贏")
if usr_input == 1:
usr_win.append("c")
print("玩家贏")
if usr_input == 2:
print("平手")
print("遊戲結束")
| true
|
49a125da75ff49e3b6756e86abd9bebfee8ad39f
|
Python
|
nickcernis/scancat
|
/scancat/themes.py
|
UTF-8
| 3,844
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""Probe a WordPress site for theme information."""
import logging
import requests
from bs4 import BeautifulSoup
from . import wordpress as wp
from .message import msg
def is_genesis_child_theme(soup=None):
"""Is the active theme a Genesis child theme?
:param soup: The parsed HTML, defaults to None
:param soup: BeautifulSoup, optional
:return: True if Genesis child theme detected
:rtype: bool
"""
if soup is None:
logging.info('⚠️ No HTML content available.')
return
info, _ = theme_info(soup)
if info is None:
msg.send('ℹ️ A Genesis child theme was not found (or may be minified).')
return False
if 'template' in info and info['template'].lower() == 'genesis':
msg.send('🎨 A Genesis child theme is active.')
return True
def print_genesis_info(soup=None):
"""Get Genesis parent theme version info if it can be found.
:param soup: The parsed HTML, defaults to None
:param soup: BeautifulSoup, optional
"""
if soup is None:
logging.info('⚠️ No HTML content available.')
return
_, child_theme_style_url = theme_info(soup)
if child_theme_style_url:
genesis_style_url = child_theme_style_url.split(
'/wp-content/', 1)[0] + '/wp-content/themes/genesis/style.css'
genesis_theme_info, url = theme_info(None, [genesis_style_url])
if genesis_theme_info and url:
msg.send('• Genesis version: ' +
genesis_theme_info['version'] + ' <a href="{0}" target="_blank">{0}</a>'.format(url))
def stylesheets(soup=None):
"""Find stylesheet URLs in HTML code.
:param soup: The parsed HTML, defaults to None
:param soup: BeautifulSoup, optional
:return: All stylesheet URLs from link tags
:rtype: list
"""
if soup is None:
logging.info('⚠️ No HTML content available.')
return
links = soup.findAll('link', attrs={'rel': 'stylesheet'})
stylesheet_urls = [link["href"] for link in links]
return stylesheet_urls
def theme_info(soup=None, theme_stylesheet_urls=None):
"""Get active theme information.
:param soup: The parsed HTML, defaults to None
:param soup: BeautifulSoup, optional
:param theme_stylesheet_urls: List of stylesheet URLs, defaults to None
:param theme_stylesheet_urls: list, optional
:return: Tuple of theme info and the stylesheet URL
:rtype: list, string or None, None
"""
if soup is None and theme_stylesheet_urls is None:
logging.info('⚠️ No HTML content available.')
return None, None
if theme_stylesheet_urls is None:
stylesheet_urls = stylesheets(soup)
theme_stylesheet_urls = list(
filter(lambda url: '/themes/' in url, stylesheet_urls))
for url in theme_stylesheet_urls:
css = requests.get(url)
info = wp.parse_stylesheet_header(css.text)
if 'theme_name' in info:
return info, url
return None, None
def print_theme_info(soup=None):
"""Print active theme information.
:param soup: The parsed HTML, defaults to None
:param soup: BeautifulSoup, optional
"""
not_found_message = 'No theme info found. Styles may be minified, in an unexpected place, behind a maintenance mode page, or the site is not using WordPress.'
if soup is None:
logging.info('⚠️ No HTML content available.')
return
info, url = theme_info(soup)
if info is None:
msg.send(not_found_message)
return
if 'theme_name' in info:
msg.send('• Theme name: ' + info['theme_name'])
else:
msg.send(not_found_message)
if 'version' in info:
msg.send('• Version: ' + info['version'] +
' <a href="{0}" target="_blank">{0}</a>'.format(url))
| true
|
0b5574baac9afa8b98d52750514fc0b6e215faf4
|
Python
|
StepanSZhuk/PythonCore377
|
/CODEWARS/Count of positives_sum of negatives.py
|
UTF-8
| 494
| 4.03125
| 4
|
[] |
no_license
|
#Given an array of integers.
#Return an array, where the first element is the count of positives numbers and the second element is sum of negative numbers.
#If the input array is empty or null, return an empty array.
def count_positives_sum_negatives(arr):
if not arr:
return []
count_positives = 0
sum_negatives = 0
for i in arr:
if i > 0:
count_positives += 1
if i < 0:
sum_negatives += i
return [count_positives, sum_negatives]
| true
|
32eded7d550c7a8e1ccb61d1894a6c7759a36350
|
Python
|
anchandm/fooof
|
/tutorials/plot_01-ModelDescription.py
|
UTF-8
| 3,303
| 3.90625
| 4
|
[
"Apache-2.0"
] |
permissive
|
"""
01: Model Description
=====================
A theoretical / mathematical description of the FOOOF model.
"""
###################################################################################################
# Introduction
# ------------
#
# A neural power spectrum is fit as a combination of an aperiodic signal and periodic oscillations.
#
# The aperiodic component of the signal displays 1/f like properties.
#
# Putative oscillations (hereafter referred to as 'peaks'), are frequency regions
# in which there are 'bumps' of power over and above the aperiodic signal.
#
# This formulation roughly translates to fitting the power spectrum as:
#
# .. math::
# P = L + \sum_{n=0}^{N} G_n
#
# Where `P` is the power spectrum, `L` is the aperiodic signal, and each :math:`G_n`
# is a Gaussian fit to a peak, for `N` total peaks extracted from the power spectrum.
#
###################################################################################################
# Aperiodic Fit
# -------------
#
# The aperiodic fit uses an exponential function, fit on the semilog power spectrum
# (linear frequencies and :math:`log_{10}` power values).
#
# The exponential is of the form:
#
# .. math::
# L = 10^b * \frac{1}{(k + F^\chi)}
#
# Or, equivalently:
#
# .. math::
# L = b - \log(k + F^\chi)
#
# In this formulation, the parameters `b`, `k`, and :math:`\chi`
# define the aperiodic signal, as:
#
# - `b` is the broadband 'offset'
# - `k` relates to the 'knee'
# - :math:`\chi` is the 'exponent' of the aperiodic fit
# - `F` is the vector of input frequencies
#
# Note that fitting the knee parameter is optional. If used, the knee defines a bend in the
# aperiodic `1/f` like component of the signal.
#
# By default the aperiodic signal is fit with the 'knee' parameter set to zero.
# This fits the aperiodic signal equivalently to fitting a linear fit in log-log space.
#
# Broader frequency ranges typically do not display a single 1/f like characteristic,
# and so for these cases fitting with the knee parameter allows for modelling bends
# in the aperiodic signal.
#
###################################################################################################
# Peaks
# -----
#
# Regions of power over above this aperiodic signal, as defined above, are considered
# to be putative oscillations and are fit in the model by a Gaussian.
#
# For each Gaussian, :math:`G_n`, with the form:
#
# .. math::
# G_n = a * exp (\frac{- (F - c)^2}{2 * w^2})
#
# Each peak is defined in terms of parameters `a`, `c` and `w`, where:
#
# - `a` is the height of the peak, over and above the aperiodic signal
# - `c` is the center frequency of the peak
# - `w` is the width of the peak
# - `F` is the vector of input frequencies
#
# The full power spectrum fit is therefore the combination of the aperiodic fit,
# `L` defined by the exponential fit, and `N` peaks, where each :math:`G_n` is
# formalized as a Gaussian process.
#
# Full method details are available in the paper:
# https://www.biorxiv.org/content/early/2018/04/11/299859
#
###################################################################################################
# This procedure is able to create a model of the neural power spectrum,
# that is fully described mathematical by the mathematical model from above.
#
| true
|
2cc640e07f26cdeaa6089fa31afa9c6c12842897
|
Python
|
noisyoscillator/Statistical-Mechanics-1
|
/anharm_path_integral_montecarlo.py
|
UTF-8
| 2,492
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
%pylab inline
import math, random, pylab
# Define the anharmonic (quartic) potential
def V_anharmonic(x, gamma, kappa):
V = x**2 / 2 + gamma * x**3 + kappa * x**4
return V
def rho_free(x, y, beta): # free off-diagonal density matrix
return math.exp(-(x - y) ** 2 / (2.0 * beta))
def read_file(filename):
list_x = []
list_y = []
with open(filename) as f:
for line in f:
x, y = line.split()
list_x.append(float(x))
list_y.append(float(y))
f.close()
return list_x, list_y
beta = 4.0
g = 1.0 #-cubic and quartic coefficients
T = 1 / beta
N = 16 # number of imaginary times slices
dtau = beta / N
delta = 1.0 # maximum displacement on one slice
n_steps = 1000000 # number of Monte Carlo steps
x = [0.0] * N # initial path
hist_data = []
for step in range(n_steps):
k = random.randint(0, N - 1) # random slice
knext, kprev = (k + 1) % N, (k - 1) % N # next/previous slices
x_new = x[k] + random.uniform(-delta, delta) # new position at slice k
old_weight = (rho_free(x[knext], x[k], dtau) *
rho_free(x[k], x[kprev], dtau) *
math.exp(-dtau * V_anharmonic(x[k], -g, g)))
new_weight = (rho_free(x[knext], x_new, dtau) *
rho_free(x_new, x[kprev], dtau) *
math.exp(-dtau * V_anharmonic(x_new ,-g, g)))
if random.uniform(0.0, 1.0) < new_weight / old_weight:
x[k] = x_new
if step % 10 == 0:
hist_data.append(x[0])
# Figure output:
list_x, list_y = read_file('data_anharm_matrixsquaring_beta' + str(beta) + '.dat')
v = [V_anharmonic(a, -g, g) for a in list_x]
pylab.plot(list_x, v, c='gray', linewidth=2.0, label='Anharmonic potential')
pylab.plot(list_x, list_y, c='red', linewidth=4.0, label='path integral Monte Carlo')
pylab.hist(hist_data, 100, normed = 'True', label='matrix squaring') #histogram of the sample
pylab.ylim(0,1)
pylab.xlim(-2,2)
pylab.title('Position distribution at $T=%.2f$, $\gamma_{cubic}=%.2f$, $\gamma_{quartic}=%.2f$' % (T,-g,g), fontsize = 13)
pylab.xlim(-2.0, 2.0) #restrict the range over which the histogram is shown
pylab.xlabel('$x$', fontsize = 15)
pylab.ylabel('$\pi(x)$', fontsize = 15)
pylab.legend()
pylab.savefig('plot_T_%.2f_anharm_g_%.1f_prob_path_int.png' % (T,g))
pylab.show()
| true
|
9c86da1507d99f90701457869d7dbf26424fc9f7
|
Python
|
soumendrak/demonetization
|
/Demonetization.py
|
UTF-8
| 3,202
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""
Created by Soumendra Kumar Sahoo
Date: 26th November 2016
Function: This program will calculate the overall sentiment of public
on the demonetization issue by fetching data from twitter
Future plans:
1. Data extraction from twitter functionality will be added
2. Visualization of the sentiments using seaborn/matplotlib module
3. Performance improvement
4. Converting it to Unsupervised learning
"""
import csv
import re
from nltk.tokenize import word_tokenize
import math
# AFINN-111 is as of June 2011 the most recent version of AFINN
# filenameAFINN = 'AFINN/AFINN-111.txt'
afinn = {}
with open('AFINN/sentiments.txt') as SentimentFile:
for row in SentimentFile:
afinn[row.split('\t')[0]] = int(row.split('\t')[1].strip())
emoticons_str = r'(?:[:=;][oO\-]? [D\)\]\(\]/\\OpP])'
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
# URLs
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+',
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'(' + '|'.join(regex_str) + ')',
re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^' + emoticons_str + '$',
re.VERBOSE | re.IGNORECASE)
def sentiment(words):
"""
Returns a float for sentiment strength based on the input text.
Positive values are positive valence, negative value are negative valence.
"""
# words = pattern_split.split(text.lower())
sentiments = map(lambda word: afinn.get(word, 0), words)
if sentiments:
# How should you weight the individual word sentiments?
# You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)
sntmnt = float(sum(sentiments)) / math.sqrt(len(sentiments))
else:
sntmnt = 0
return sntmnt
def tokenize(s):
# return tokens_re.findall(s)
return word_tokenize(s)
def preprocess(s, lowercase=False):
tokens = tokenize(s)
if lowercase:
tokens = [token if emoticon_re.search(
token) else token.lower() for token in tokens]
return tokens
def filereader(total=0):
"""
This has been used to read the csv file
:return read handler
"""
with open('demonetization-tweets-Test.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
try:
tweet = row['text']
total += sentiment(preprocess(tweet))
except UnicodeDecodeError:
# There are some characters which can not be handled by Python
# We need to ignore those characters
pass
return total
def main():
"""
main paragraph to handle the processes
:return:
"""
Total = filereader()
if Total > 0:
print "Positive sentiments"
else:
print "Negative sentiments"
if __name__ == "__main__":
main()
| true
|
a8a01ad2a75fe2478e3d9c1e8b33836c48941fcb
|
Python
|
sjbober/Most-Common-Death-Row-Last-Words
|
/clean.py
|
UTF-8
| 1,667
| 3.640625
| 4
|
[] |
no_license
|
import sqlite3
import pandas as pd
import re
from removecontdupes import removeContractionsDuplicates
#connect to the executions db and create a panda Series from the statement row
conn = sqlite3.connect('executions.sqlite')
statements = pd.read_sql_query('SELECT statement FROM Executions', conn)
statements = statements['statement']
#clean up the data- convert to all lowercase, remove headings like "statement to the media", remove all punctuation EXCEPT apostophres (we will deal with those later by expanding contractions). plus replace the weird ’ and ‘ with regular apostrophes so that we can accurately expand extractions later
statements= statements.str.lower()
statements = statements.str.replace("statement to the media:","")
statements = statements.str.replace('spoken:', "")
statements = statements.str.replace('written statement:', "")
statements = statements.str.replace(r'\.|\!|\?|,|-|\(|\)', "")
statements = statements.str.replace(':', "")
statements = statements.str.replace('"', "")
statements = statements.str.replace(';', "")
statements = statements.str.replace('\n', "")
statements = statements.str.replace('\r', "")
statements = statements.str.replace('\’',"\'")
statements = statements.str.replace('\‘',"\'")
#now we will use the function removeContractionsDuplicates to build a string that has no contraction and no duplicates.
for statement in statements:
new_state = removeContractionsDuplicates(statement)
statements = statements.str.replace(statement,new_state)
# #at this point all "'s" that occur in the text should be possessive and can be removed
statements = statements.str.replace("\'s","")
# print(statements)
| true
|
3605eda012bbbdc82374e58cba08581a97dc808f
|
Python
|
lilgaage/lilgaage_scripts
|
/python/05/05Animal_Class.py
|
UTF-8
| 2,009
| 4.53125
| 5
|
[] |
no_license
|
class Animal:
def __init__(self,name,age,gender,weight):
#非私有属性
self.name = name
self.age = age
self.gender = gender
#私有属性 不能被继承,也不能在类的外部被调用
self.__weight = weight
#私有方法 不能被继承,也不能在类的外部被调用
def __eat(self,food):
print("{}不爱吃肉,爱吃{}".format(self.name,food))
#非私有方法
def run(self):
print("{}吃完饭了喜欢跑来跑去".format(self.name))
def shower(self):
print("{}喜欢吃饭前去洗澡".format(self.name))
def introduce(self):
msg = "{}是一个{}孩子,小可爱今年才{}岁,但是体重居然已经{}斤了。"
print(msg.format(self.name,self.gender,self.age,self.__weight))
self.__eat("鬼鬼") #调用私有化方法
#狗类,子类继承父类,获得父类所有非私有的属性和方法
class Dog(Animal):
def drink(self):
print("{}喜欢喝牛奶".format(self.name))
#子类重写了父类的同名方法
def run(self):
print("{}吃完饭了就在那躺着,根本就不动".format(self.name))
class Cat(Animal):
def __init__(self,name,age,gender,weight):
super(Cat,self).__init__(name,age,gender,weight) #super关键字重写父类构造方法
self.name = name
print("我是{}".format(self.name))
def getName(self):
return "Cat"+self.name
def drink(self):
print("{}喜欢喝酸奶".format(self.name))
a1 = Animal("瑰丝",18,"女",40)
a1.run()
# a1.__eat("鬼鬼") #私有方法不能在类的外部被调用
# print(a1.__weight) #私有属性不能在类的外部被调用
d1 = Dog("小瑰",3,"男",20)
#子类调用父类非私有方法
d1.introduce()
d1.drink()
#子类调用方法时,如果已经重写了父类同名方法,则调用自己的
d1.run()
c1 = Cat("娃琳可",2,"女",18)
c1.introduce()
| true
|
af8a4271f1ba012646b1a547352c2abd9c88dcd4
|
Python
|
sererenaa/connected_corridors
|
/AimsunExtractNetwork.py
|
UTF-8
| 17,114
| 2.609375
| 3
|
[] |
no_license
|
from PyANGBasic import *
from PyANGKernel import *
from PyANGGui import *
from PyANGAimsun import *
#from AAPI import *
import datetime
import pickle
import sys
import csv
import os
def ExtractJunctionInformation(model,outputLocation):
#####################Get the junction information#####################
junctionInfFileName=outputLocation+'\JunctionInf.txt'
#print junctionInfFileName
junctionInfFile = open(junctionInfFileName, 'w')
global DefaultAngle
# Get the number of nodes
numJunction=0
for types in model.getCatalog().getUsedSubTypesFromType(model.getType("GKNode")):
numJunction = numJunction+ len(types)
junctionInfFile.write('Number of junctions:\n')
junctionInfFile.write(('%i\n') % numJunction)
junctionInfFile.write('\n')
# Loop for each junction
for types in model.getCatalog().getUsedSubTypesFromType(model.getType("GKNode")):
for junctionObj in types.itervalues():
junctionInfFile.write(
'Junction ID,Name, External ID, Signalized,# of incoming sections,# of outgoing sections, # of turns\n')
junctionID = junctionObj.getId() # Get the junction ID
junctionExtID = junctionObj.getExternalId() # Get the external ID
junctionName = junctionObj.getName() # Get name of the junction
numEntranceSections = junctionObj.getNumEntranceSections() # Get the number of entrance sections
numExitSections = junctionObj.getNumExitSections() # Get the number of exit sections
entranceSections = junctionObj.getEntranceSections() # Get the list of GKSection objects
exitSections = junctionObj.getExitSections()
turns=junctionObj.getTurnings()
numTurn = len(turns) # Get the number of turns
signalGroupList = junctionObj.getSignals() # Check whether a junction is signalzied or not
if len(signalGroupList) == 0:
signalized = 0
else:
signalized = 1
# Write the first line
junctionInfFile.write('%i,%s,%s,%i,%i,%i,%i\n' % (
junctionID, junctionName, junctionExtID, signalized, numEntranceSections, numExitSections, numTurn))
# Write the entrance sections
junctionInfFile.write("Entrances links:\n")
for j in range(numEntranceSections - 1):
junctionInfFile.write(("%i,") % entranceSections[j].getId())
junctionInfFile.write(("%i\n") % entranceSections[numEntranceSections - 1].getId())
# Write the exit sections
junctionInfFile.write("Exit links:\n")
for j in range(numExitSections - 1):
junctionInfFile.write(("%i,") % exitSections[j].getId())
junctionInfFile.write(("%i\n") % exitSections[numExitSections - 1].getId())
## Update the turning description
UpdateTurningDescription(numEntranceSections, entranceSections, junctionObj, DefaultAngle)
# Write the turn information
junctionInfFile.write(
"Turning movements:turnID,origSectionID,destSectionID,origFromLane,origToLane,destFromLane,destToLane, description, turn speed\n")
for j in range(numTurn):
turnObj = turns[j]
origin=turnObj.getOrigin()
destination=turnObj.getDestination()
originObj = model.getCatalog().find(origin.getId()) # Get the section object
numLanesOrigin=len(originObj.getLanes())
destinationObj = model.getCatalog().find(destination.getId()) # Get the section object
numLanesDest = len(destinationObj.getLanes())
turnAngle=turnObj.calcAngleBridge()
# FromLane: leftmost lane number (GKTurning)/ rightmost lane number (API/our definition)
# ToLane: rightmost lane number /leftmost lane number (API/our definition)
# Note: lanes are organized from right to left in our output!!
# It is different from the definition in the GKSection function
junctionInfFile.write("%i,%i,%i,%i,%i,%i,%i,%s,%i,%.4f\n" % (
turnObj.getId(), origin.getId(), destination.getId(), numLanesOrigin-turnObj.getOriginToLane(),
numLanesOrigin-turnObj.getOriginFromLane(),numLanesDest-turnObj.getDestinationToLane(),
numLanesDest-turnObj.getDestinationFromLane(), turnObj.getDescription(),turnObj.getSpeed()*0.621371,turnAngle))
# Write the turn orders by section from left to right
junctionInfFile.write(
"Turning movements ordered from left to right in a give section: section ID, # of turns, [turn IDs]\n")
for j in range(numEntranceSections):
string = str(entranceSections[j].getId()) + ','
turnInfSection = junctionObj.getFromTurningsOrderedFromLeftToRight(entranceSections[j])
string = string + str(len(turnInfSection)) + ','
for k in range(len(turnInfSection) - 1):
string = string + str(turnInfSection[k].getId()) + ','
string = string + str(turnInfSection[len(turnInfSection) - 1].getId()) + '\n'
junctionInfFile.write(string)
junctionInfFile.write("\n")
#Write position of junction
translator=GKCoordinateTranslator(model)
coords = translator.toDegrees(junctionObj.getPosition())
junctionInfFile.write("Coordinates:\n")
junctionInfFile.write(("%.6f,%.6f\n") % (coords.x, coords.y))
return 0
def UpdateTurningDescription(numEntranceSections,entranceSections,junctionObj,DefaultAngle):
# This function is used to update the turning description in Aimsun
# Francois has added descriptions to some turning movements
# (pertected left, permissive left, U turn, two way stopbar)
for j in range(numEntranceSections):
turnInfSection = junctionObj.getFromTurningsOrderedFromLeftToRight(entranceSections[j])
# Get the turning movements from left to right
#Returns the angle, in degrees, between the last segment of the origin section and
# the turn line. When going clockwise the angle will be negative and when going
# counterclockwise the angle will be positive
# Get the turn with the minumum angle
curAddr = 0
minAngle = abs(turnInfSection[0].calcAngleBridge())
descriptions=[]
leftTurnIdx=[]
lastLeftIdx=[]
for k in range(len(turnInfSection)):
individualDescription=turnInfSection[k].getDescription()
descriptions.append(individualDescription)
if(individualDescription is not None): # If we have additional descriptions from the model
# Check whether it is a left-turn movement or not
idxLeft=False
if (individualDescription.find("Left")>=0):
idxLeft=True
idxUTurn=False
if (individualDescription.find("U Turn")>=0):
idxUTurn=True
if(idxLeft or idxUTurn): # If yes
leftTurnIdx.append(1)
lastLeftIdx=k # Get the index of the last left turn movement
else: # If no
leftTurnIdx.append(0)
else: # No additional description
leftTurnIdx.append(0)
# Get the minimum angle
if(minAngle>abs(turnInfSection[k].calcAngleBridge())):
curAddr=k
minAngle = abs(turnInfSection[k].calcAngleBridge())
if(sum(leftTurnIdx)==0): # No additional description to help?
if minAngle <=DefaultAngle: # Through movement
turnInfSection[curAddr].setDescription('Through'+':'+descriptions[curAddr])
for t in range(curAddr): # Set turns on the left to be Left Turn
turnInfSection[t].setDescription('Left Turn'+':'+descriptions[t])
for t in range(curAddr+1,len(turnInfSection)): # Set turns on the right to be Right Turn
turnInfSection[t].setDescription('Right Turn'+':'+descriptions[t])
else:
if len(turnInfSection)==3:
# It is possible for some special case that Through movement has
# a big turning angle, then Overwrite it
# In the case of three movements, we consider they are left, through, and right
turnInfSection[0].setDescription('Left Turn'+':'+descriptions[0])
turnInfSection[1].setDescription('Through'+':'+descriptions[1])
turnInfSection[2].setDescription('Right Turn'+':'+descriptions[2])
elif (turnInfSection[curAddr].calcAngleBridge()>DefaultAngle): # Have a bigger angle to the left
for t in range(curAddr+1): # Set turns on the left to be Left Turn
turnInfSection[t].setDescription('Left Turn'+':'+descriptions[t])
for t in range(curAddr+1,len(turnInfSection)): # Set turns on the right to be Right Turn
turnInfSection[t].setDescription('Right Turn'+':'+descriptions[t])
elif (turnInfSection[curAddr].calcAngleBridge()<-DefaultAngle): # Have a bigger angle to the right
for t in range(curAddr): # Set turns on the left to be Left Turn
turnInfSection[t].setDescription('Left Turn'+':'+descriptions[t])
for t in range(curAddr,len(turnInfSection)): # Set turns on the right to be Right Turn
turnInfSection[t].setDescription('Right Turn'+':'+descriptions[t])
else: # Has additional descriptions
if minAngle <= DefaultAngle: # It is probably a through movement
if lastLeftIdx<curAddr: # Yes, it is!
for t in range(curAddr): # Set turns on the left to be Left Turn
turnInfSection[t].setDescription('Left Turn' + ':' + descriptions[t])
turnInfSection[curAddr].setDescription('Through' + ':' + descriptions[curAddr])
for t in range(curAddr+1,len(turnInfSection)): # Set turns on the right to be Right Turn
turnInfSection[t].setDescription('Right Turn' + ':' + descriptions[t])
else: # If, it is not! No through movements!
for t in range(lastLeftIdx+1): # Set turns on the left to be Left Turn
turnInfSection[t].setDescription('Left Turn' + ':' + descriptions[t])
for t in range(lastLeftIdx+1,len(turnInfSection)): # Set turns on the right to be Right Turn
turnInfSection[t].setDescription('Right Turn' + ':' + descriptions[t])
else:
if len(turnInfSection)==3 and lastLeftIdx==0:
# It is possible for some special case that Through movement has
# a big turning angle, then Overwrite it
# In the case of three movements, we consider they are left, through, and right
turnInfSection[0].setDescription('Left Turn'+':'+descriptions[0])
turnInfSection[1].setDescription('Through'+':'+descriptions[1])
turnInfSection[2].setDescription('Right Turn'+':'+descriptions[2])
elif (turnInfSection[curAddr].calcAngleBridge() > DefaultAngle): # Have a bigger angle to the left
if lastLeftIdx>curAddr:
curAddr=lastLeftIdx
for t in range(curAddr+1): # Set turns on the left to be Left Turn
turnInfSection[t].setDescription('Left Turn' + ':' + descriptions[t])
for t in range(curAddr+1,len(turnInfSection)): # Set turns on the right to be Right Turn
turnInfSection[t].setDescription('Right Turn' + ':' + descriptions[t])
elif (turnInfSection[curAddr].calcAngleBridge() < -DefaultAngle): # Have a bigger angle to the right
if lastLeftIdx >=curAddr:
curAddr = lastLeftIdx+1
for t in range(curAddr): # Set turns on the left to be Left Turn
turnInfSection[t].setDescription('Left Turn' + ':' + descriptions[t])
for t in range(curAddr, len(turnInfSection)): # Set turns on the right to be Right Turn
turnInfSection[t].setDescription('Right Turn' + ':' + descriptions[t])
def ExtractSectionInformation(model,outputLocation):
####################Get the section information#####################
sectionInfFileName=outputLocation+'\SectionInf.txt'
sectionInfFile = open(sectionInfFileName, 'w')
translator=GKCoordinateTranslator(model)
# Get the number of sections
numSection=0
for types in model.getCatalog().getUsedSubTypesFromType(model.getType("GKSection")):
numSection=numSection+len(types)
sectionInfFile.write('Number of sections:\n')
sectionInfFile.write(('%i\n') % numSection)
sectionInfFile.write('\n')
for types in model.getCatalog().getUsedSubTypesFromType(model.getType("GKSection")):
for sectionObj in types.itervalues():
sectionID = sectionObj.getId() # Get the section ID
sectionExtID = sectionObj.getExternalId() # Get the section external ID
sectionName = sectionObj.getName() # Get the section name
# Write the first line
lanes=sectionObj.getLanes()
totLane=len(lanes)
points = sectionObj.calculatePolyline() # Get the shape files
totPoint = len(points)
sectionInfFile.write('Section ID,Name,External ID,# of lanes,# of points\n')
sectionInfFile.write('%i,%s,%s,%i,%i\n' % (sectionID, sectionName, sectionExtID, totLane,totPoint))
# Write the lane lengths
sectionInfFile.write("Lane lengths:\n")
for j in range(totLane - 1): # Loop for each lane: from leftmost to rightmost
length = float(sectionObj.getLaneLength(j)) * 3.28084
sectionInfFile.write(("%.4f,") % length) # Get the lane length in feet
length = float(sectionObj.getLaneLength(totLane - 1)) * 3.28084
sectionInfFile.write(("%.4f\n") % length)
# Write the lane starting point
sectionInfFile.write("Initial starting point (initial offset):\n")
#An entry side lane have initialOffset equal to 0.0 and finalOffset equal to the length of the side lane.
# An exit side lane have initialOffset equal to the length of the side lane (but negative) and finalOffset equal to 0.0.
for j in range(totLane - 1): # Loop for each lane: from leftmost to rightmost
sectionLane = sectionObj.getLane(j) # Get the section_lane object
sectionInfFile.write(("%.4f,") % (sectionLane.getInitialOffset() * 3.28084)) # Get the initial offset
sectionLane = sectionObj.getLane(totLane - 1) # Get the section_lane object
sectionInfFile.write(("%.4f\n") % (sectionLane.getInitialOffset() * 3.28084))
# Write the lane properties
sectionInfFile.write("Is full lane:\n")
for j in range(totLane - 1): # Loop for each lane: from leftmost to rightmost
sectionLane = sectionObj.getLane(j) # Get the section_lane object
sectionInfFile.write(("%i,") % sectionLane.isFullLane()) # Get the lane status
sectionLane = sectionObj.getLane(totLane - 1) # Get the section_lane object
sectionInfFile.write(("%i\n") % sectionLane.isFullLane()) # Get the lane status: To find whether it is a full lane: use to identify left-turn and right-turn pockets
# Write the shape files
sectionInfFile.write("Shape points:\n")
for j in range(totPoint-1):
point= translator.toDegrees(points[j])
sectionInfFile.write(("%.6f,%.6f,") % (point.x,point.y))
point = translator.toDegrees(points[totPoint-1])
sectionInfFile.write(("%.6f,%.6f\n") % (point.x, point.y))
#Write the speed
sectionInfFile.write("Speed:\n")
speed = sectionObj.getSpeed()
sectionInfFile.write(("%i\n") % speed)
sectionInfFile.write("\n")
return 0
DefaultAngle=8
gui=GKGUISystem.getGUISystem().getActiveGui()
model = gui.getActiveModel()
outputLocation='C:\Users\Serena\connected_corridors'
# Call to extract junction information
print 'Extract junction information!'
ExtractJunctionInformation(model,outputLocation)
# Call to extract Section information
print 'Extract section information!'
ExtractSectionInformation(model,outputLocation)
print 'Done with network extraction!'
| true
|
f41f5482df71070f71393e47b07ca857cf3372e7
|
Python
|
periyandavart/ty.py
|
/palin.py
|
UTF-8
| 147
| 3.40625
| 3
|
[] |
no_license
|
n=input()
num=int(n)
orig=num
rev=0
while num>0:
rev=(rev*10)+num%10
num//=10
if orig==rev:
print("yes")
else:
print("no")
| true
|
4af85253550220a9e178eb7891300d905361821d
|
Python
|
jakestrouse00/image-collection
|
/requestCollect.py
|
UTF-8
| 1,216
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
import requests
import urllib.request
import threading
import os
def download(data, number, fileName):
print(f"Downloading image number: {number}")
r = requests.get(data['webformatURL'])
with open(f'imageSets/{fileName}/{number}.jpeg', 'wb') as f:
f.write(r.content)
fileNames = ['human', 'dog', 'cat', 'chicken', 'baby', 'child']
for fileName in fileNames:
counter = 1
os.makedirs(f'imageSets/{fileName}')
print(f"Using {fileName}")
terms = [f"realistic+{fileName}", f"realistic+looking+{fileName}"]
for term in terms:
print(f"Searching for: {term}\n\n")
for i in range(1, 6):
payload = {
'key': 'API_KEY',
'q': term,
'image_type': 'photo',
'colors': 'rgb',
'per_page': 200,
'page': i
}
try:
r = requests.get('https://pixabay.com/api/', params=payload)
b = r.json()
except Exception as e:
break
for image in r.json()['hits']:
threading.Thread(target=download, args=(image, counter,fileName)).start()
counter += 1
| true
|
7c492198edd2e9f68f757c3b58ccacc90ad8352a
|
Python
|
Sandy4321/analysis
|
/univariate_thresholder.py
|
UTF-8
| 1,904
| 2.984375
| 3
|
[] |
no_license
|
import numpy as np
class UnivariateThresholder:
def __init__(self):
self.threshold = None
self.left_class = None
self.right_class = None
self.classes = None
def fit(self, x, y):
classes = np.unique(y)
self.classes = classes
if len(x.shape == 2):
x = x[:, 0]
thresholds = np.unique(x)
best_threshold = None
best_left_class = None
best_right_class = None
best_acc = 0
for t in thresholds:
gt = x > t
lte = ~gt
left = y[lte]
right = y[gt]
left_class = None
left_count = 0
for c in classes:
curr_count = np.sum(left == c)
if curr_count > left_count:
left_count = curr_count
left_class = c
n_left = float(len(left))
left_acc = left_count / n_left
right_count = 0
right_class = None
for c in classes:
curr_count = np.sum(right == c)
if curr_count > right_count:
right_count = curr_count
right_class = c
n_right = float(len(right))
right_acc = right_count / n_right
acc = (n_left * left_acc + n_right * right_acc) / (n_left + n_right)
if acc > best_acc:
best_acc = acc
best_threshold = t
best_left_class = left_class
best_right_class = right_class
print "thresh = %s, left_class = %s, right_class = %s, with training accuracy = %s" %\
(best_threshold, best_left_class, best_right_class, best_acc)
self.threshold = best_threshold
self.left_class = best_left_class
self.right_class = best_right_class
def predict(self, x):
if len(x.shape == 2):
x = x[:, 0]
n = len(x)
y = np.zeros(n, dtype = self.classes.dtype)
left_mask = x <= self.threshold
right_mask = ~left_mask
y[left_mask] = self.left_class
y[right_mask] = self.right_class
return x
| true
|
590ecea73e943641a31e345f66ce79805d95b6ea
|
Python
|
id774/sandbox
|
/python/pandas/demo/by_normal.py
|
UTF-8
| 460
| 3.234375
| 3
|
[] |
no_license
|
from collections import defaultdict
filename = "product.csv"
header_skipped = False
sales = defaultdict(lambda: 0)
with open(filename, 'r') as f:
for line in f:
if not header_skipped:
header_skipped = True
continue
line = line.split(",")
product = line[0]
num_sales = int(line[1])
sales[product] += num_sales
top10 = sorted(sales.items(), key=lambda x: x[1], reverse=True)[:10]
print(top10)
| true
|
19bc42ac5223a1a3d782538c50697d6038e40b40
|
Python
|
mycherrylarry/leetcode
|
/python/python-126/singleNumberII.py
|
UTF-8
| 998
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
'''
Solution1. Hashmap
Solution2. convert each number to binary representation, and sum every bit and mod 3(or k)
Result: AC
'''
class Solution:
def singleNumber(self, A):
v = [self.convertToBinary(item) for item in A]
t = [sum(item)%3 for item in zip(*v)]
x = reduce(lambda x,y: x*2 + y, t[1:])
if t[0] == 1:
return -x
return x
def convertToBinary(self, n):
li = [0]*64
if n < 0:
li[0] = 1
n = abs(n)
i = 63
while n!= 0:
li[i] = n%2
n = n>>1
i -= 1
return li
s = Solution()
print s.singleNumber([1,1,1,2,2,2,3,3,3,4])
print s.singleNumber([-401451,-177656,-2147483646,-473874,-814645,-2147483646,-852036,-457533,-401451,-473874,-401451,-216555,-917279,-457533,-852036,-457533,-177656,-2147483646,-177656,-917279,-473874,-852036,-917279,-216555,-814645,2147483645,-2147483648,2147483645,-814645,2147483645,-216555])
| true
|
ec56a4ee9d44610f3b1d9060c485dde18d38f30b
|
Python
|
JvN2/NucTool
|
/NucleosomePositionCore.py
|
UTF-8
| 4,759
| 2.734375
| 3
|
[] |
no_license
|
import numpy as nu
import math, re, random, csv
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
from pylab import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
def SavePlot(y, filename, xtitle = '', ytitle = '', title = ''):
plot = Figure(figsize=(12, 3))
ax =plot.add_subplot(111)
# plot.grid(True)
ax.set_title(title)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
# ax.axis(ymax=1, ymin =-3)
plot.subplots_adjust(left=0.1, bottom=0.2)
x = range(len(y))
# ax.plot(x, nu.log2(Y))
ax.plot(x, y)
FigureCanvasAgg(ax.get_figure()).print_figure(filename, dpi=120)
return
def Save2DArray(d, filename, names =[] ):
f = open(filename, mode='wb')
csvwriter = csv.writer(f, dialect='excel-tab')
for i in transposed2(d):
csvwriter.writerow(i)
f.close
return
def DisplayPlot(y):
x = range(len(y))
ax = plt.subplot(111)
plt.subplots_adjust(bottom=0.2)
plt.plot(x,y)
plt.show()
return
def transposed2(lists, defval=0):
if not lists: return []
return map(lambda *row: [elem or defval for elem in row], *lists)
def CleanSeq(dna):
dna = dna.upper() # uppercase
dna = dna.replace('U','T') # use T instead of U (in case we get RNA)
dna = re.sub(r'[^GATC]','', dna) # remove every character (including whitespace) that is not G, A, T, or C
return dna
def cshift(l, offset):
offset %= len(l)
return nu.concatenate((l[-offset:], l[:-offset]))
def SaveSequence(dna, filename):
f = open(filename, "w")
print >>f, dna
f.close()
retrun
def base2index(base):
if base=='A':
i = 0
if base=='C':
i = 1
if base=='G':
i = 2
if base=='T':
i = 3
return i
def getweight(w,p,b):
x = nu.arange (w)
AA = 0.25 + b * nu.sin(2 * math.pi *x /p)
AC = 0.25 - b * nu.sin(2 * math.pi *x /p) / 3
AG = AC
AT = AC
CA = x*0 + 0.25
CC = CA
CG = CA
CT = CA
GA = 0.25 + b * nu.sin(2 * math.pi *x /p) / 3
GC = 0.25 - b * nu.sin(2 * math.pi *x /p)
GG = GA
GT = GA
TA = 0.25 + b * nu.sin(2 * math.pi *x /p)
TC = 0.25 - b * nu.sin(2 * math.pi *x /p)
TG = TC
TT = TA
return [[AA, AC, AG, AT],[CA, CC, CG, CT],[GA, GC, GG, GT],[TA, TC, TG, TT]]
def calcE(seq, w, B, p):
prob_array = getweight(w, p ,B)
p_f = []
p_r = []
for i in range( len(seq)-w):
p_s_f = 1.
p_s_r = 1.
for s in range(w):
p_s_f = p_s_f * prob_array[base2index(seq[i+s-1])][base2index(seq[i+s])][s]
p_s_r = p_s_r * prob_array[3-base2index(seq[i+w-s])][3-base2index(seq[i+w-s-1])][s]
p_f = nu.append(p_f,p_s_f)
p_r = nu.append(p_r,p_s_r)
p_f = p_f * 4.**w
p_r = p_r * 4.**w
p_r = cshift(p_r,-1)
E = (p_r * nu.log(p_r) + p_f * nu.log(p_f)) / ( p_r + p_f)
return E
def smooth(x,window_len):
s=nu.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
w=nu.ones(window_len,'d')
y=nu.convolve(w/w.sum(),s,mode='valid')
return y[len(x[window_len-1:0:-1]):len(x[window_len-1:0:-1])+len(x)+1]
def vanderlick(Energy, mu):
E_out = Energy - mu
footprint = 147
forward = nu.zeros(len(Energy))
for i in range( len(Energy) ):
tmp = sum(forward[ max( i - footprint , 0):i])
forward[i] = nu.exp(E_out[i] - tmp)
backward = nu.zeros(len(Energy))
r_forward = forward[::-1]
for i in range( len(Energy) ):
backward[i] = 1 - sum(r_forward[max(i - footprint , 0):i]* backward[max(i - footprint,0):i])
P = forward * backward[::-1]
return P
def CreateDNA(dnalength):
dna601 = 'ACAGGATGTATATATCTGACACGTGCCTGGAGACTAGGGAGTAATCCCCTTGGCGGTTAAAACGCGGGGGACAGCGCGTACGTGCGTTTAAGCGGTGCTAGAGCTGTCTACGACCAATTGAGCGGCCTCGGCACCGGGATTCTCCAG'
flanklength = (dnalength - len(dna601))//2
dna = ''.join(random.choice('ACGT') for x in range(flanklength)) + dna601 +''.join(random.choice('ACGT') for x in range(flanklength))
dna = CleanSeq(dna)
return dna
def CalcNucPositions(dna, w, mu, B, period):
E_n = calcE(dna, w, B, period)
E = smooth(E_n,10)
P = vanderlick(E, mu)
P = nu.concatenate( (nu.zeros(math.ceil(w//2)), P, nu.zeros(w//2) ) )
N = nu.convolve(P,nu.ones(146), mode = 'same')
# print 'Integrated probability', sum(P), ' NLD', dnalength/sum(P)
#print 'diad', dna.find('GCGCGTACGTGCGTTTAA'), ', max found at ', P.argmax() , P.argmax() - dna.find('GCGCGTACGTGCGTTTAA')
return [E_n, E, P, N]
w = 147
mu = -1.5
B = 0.2
period = 10.1
dna = CreateDNA(2000)
res = CalcNucPositions(dna, w, mu, B, period)
#DisplayPlot(res[2])
SavePlot(res[3], 'c:\\tmp\\numpytest.jpg' ,'position (bp)', 'P', 'Nucleosome occupancy')
| true
|
60bb1bec05cccbd32540763f9522540a9942a176
|
Python
|
sergeymusienko/bowfast
|
/aligner-compare/scripts/roc-bam.py
|
UTF-8
| 1,547
| 2.53125
| 3
|
[] |
no_license
|
"""
generate qual vs count
"""
import collections
from toolshed import nopen
import sys
def counter(fname):
qual_count = collections.defaultdict(int)
for sam_line in (l.split("\t") for l in nopen(fname)):
qual = int(sam_line[4])
qual_count[qual] += 1
return qual_count
# samtools view $BAM -L regions.bed
f_bam = sys.argv[1]
f_region = sys.argv[2]
total_reads = float(int(sys.argv[3]))
on_target = counter(nopen('| samtools view %s -L %s -F 4' % (f_bam, f_region)))
off_target = counter(nopen('| intersectBed -abam %s -b \
%s -wa -v | samtools view - -F 4' % (f_bam, f_region)))
tot_on_target = on_target.copy()
tot_off_target = off_target.copy()
from matplotlib import pyplot as plt
xs = []
ys = []
print "#mapq\t%on-target\tfalse+\ttrue+"
for i in range(0, 255):
for j in range(i + 1, 256):
tot_on_target[i] += on_target[j]
tot_off_target[i] += off_target[j]
for i in range(1, 257):
#tot_on_target[i] += tot_on_target[i - 1]
#tot_off_target[i] += tot_off_target[i - 1]
on = tot_on_target[i - 1]
off = tot_off_target[i - 1]
false_plus = 100. * off / total_reads
true_plus = 100. * on / total_reads
d = on / (float(on + off) or 1)
print "\t".join(map(str, (i - 1, d, false_plus, true_plus, off, on)))
xs.append(false_plus)
ys.append(true_plus)
plt.plot(xs, ys, 'b.')
plt.plot( 100. * off_target[250] / total_reads,
100. * on_target[250] / total_reads, 'ro')
plt.xlim(0, 8)
plt.ylim(0, 80)
plt.title(f_bam)
plt.show()
| true
|
ed369fec7abf7fbf8a6239349c029c72e4d4157c
|
Python
|
zstall/PythonProjects
|
/Automate the Boring Stuff/Chapter 3/The Collatz Sequence.py
|
UTF-8
| 1,480
| 4.9375
| 5
|
[] |
no_license
|
'''
The Collatz Sequence
Chapter 3 Pg. 77
By Zachary Stall
This program asks the user to input a number and using the Collatz
Sequence will reduce the number to one. The program does this with a
collatz() method that if the number is even will //2, if the number is
odd then it will collatz() will return 3*number+1. It will recurse until
the number is one.
'''
print('Enter any integer number: ', end='') # Prompt user to input number (end='' allows user to put num on same line)
number = input() # Get input from user
try:
num = int(number) # Convert number from string to an int
except ValueError:
print('Error: Invalid Entry')
print('Enter a new integer value: ', end='')
num = int(input())
def collatz(n): # Collatz method
while True: # Loop until n is 1
if((n%2)==0): # Check in number is even
n = n//2 # If even int devide by 2
print(n) # Print number
if n == 1: # Check for end
break
else:
n = 3*n+1 # Check if negative
print(n) # Print number
if n == 1: # Check for end
break
collatz(num) # Call method with number entered
| true
|