blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b739a61e9f5732aa158cc34d08b5270cb1a52726 | Python | ROMSOC/benchmarks-acoustic-propagation | /source/03_scattering_pu_probe/mpp_impedance.py | UTF-8 | 2,280 | 2.84375 | 3 | [
"MIT"
] | permissive | # ------------------------------------------------------------------ #
# ╦═╗╔═╗╔╦╗╔═╗╔═╗╔═╗
# ╠╦╝║ ║║║║╚═╗║ ║║
# ╩╚═╚═╝╩ ╩╚═╝╚═╝╚═╝
# Reduced Order Modelling, Simulation, Optimization of Coupled Systems
# 2017-2021
#
# Authors :
# Ashwin Nayak, Andres Prieto, Daniel Fernandez Comesana
#
# Disclaimer :
# In downloading this SOFTWARE you are deemed to have read and agreed
# to the following terms: This SOFTWARE has been designed with an
# exclusive focus on civil applications. It is not to be used for any
# illegal, deceptive, misleading or unethical purpose or in any
# military applications. This includes ANY APPLICATION WHERE THE USE
# OF THE SOFTWARE MAY RESULT IN DEATH, PERSONAL INJURY OR SEVERE
# PHYSICAL OR ENVIRONMENTAL DAMAGE. Any redistribution of the software
# must retain this disclaimer. BY INSTALLING, COPYING, OR OTHERWISE
# USING THE SOFTWARE, YOU AGREE TO THE TERMS ABOVE. IF YOU DO NOT
# AGREE TO THESE TERMS, DO NOT INSTALL OR USE THE SOFTWARE.
#
# Acknowledgements:
# The ROMSOC project has received funding from the European Union’s
# Horizon 2020 research and innovation programme under the Marie
# Skłodowska-Curie Grant Agreement No. 765374.
# ------------------------------------------------------------------- #
from math import sqrt
def mpp_impedance(w):
"""
Computes the Impedance of a Micro-Perforated Plate
in accordance with DY Maa (1987): https://doi.org/10.3397/1.2827694
:param w: Angular Frequency
:return: complex-type impedance
"""
eta = 1.789e-5 # Coefficient of viscosity
rho = 1.213 # Density of Air
c = 343.0 # Speed of sound
p = 0.57 # Ratio of perforated area
t = 50e-6 # Thickness of MPP
d = 77e-6 # Diameter of orifice
x = d * sqrt((w*rho)/(4*eta))
relative_impedance = (((32*eta*t)/(p*rho*c*d*d)) * (sqrt(1 + x*x/32) + sqrt(x*d/(4*t)))
+ 1j * (w*t/(p*c)) * (1 + 1/sqrt(9+x*x/2) + 0.85*d/t))
impedance = relative_impedance * rho * c
return impedance
#print(mpp_impedance(2*3.141529625*800))
| true |
852ea9b2545d3030cbc0051bf7df62259eea942d | Python | tmkasun/apim_pyclient | /mock_servers/simple_websocket.py | UTF-8 | 2,337 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import asyncio
import ssl
import websockets
from websockets import WebSocketServerProtocol
# Simple WS server for testing APIM WS APIs
"""
http://websockets.readthedocs.io/en/stable/deployment.html#port-sharing
"""
class SimpleServer(WebSocketServerProtocol):
async def process_request(self, path, request_headers):
print("TMKASUN:WS:HANDSHAKE:HTTP <<<< path ={path}".format(path=path))
for name in request_headers:
print("TMKASUN:WS:HANDSHAKE:HTTP <<<< {name} ---- {header}".format(
name=name, header=request_headers[name]))
# return http.HTTPStatus.SWITCHING_PROTOCOLS, [], b'OK\n'
return None
async def simpleWS(websocket, path):
# http://websockets.readthedocs.io/en/stable/cheatsheet.html#keeping-connections-open
while True:
try:
name = await asyncio.wait_for(websocket.recv(), timeout=20)
except asyncio.TimeoutError:
# No data in 20 seconds, check the connection.
try:
pong_waiter = await websocket.ping()
await asyncio.wait_for(pong_waiter, timeout=10)
except asyncio.TimeoutError:
# No response to ping in 10 seconds, disconnect.
break
else:
print("TMKASUN:WS:WIRE <<<< {name}".format(name=name))
reply = "Hello {name}".format(name=name)
await websocket.send(reply)
print("TMKASUN:WS:WIRE >>>> {reply}".format(reply=reply))
def main():
secured = False
host = "localhost"
port = 8005
ssl_context = None
scheme = "ws"
if secured:
scheme = "wss"
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
cert_path = 'certificates/public.crt'
ssl_context.load_cert_chain(cert_path)
print("Starting simple Websocket Server with TLS")
else:
print("Starting simple Websocket Server")
start_server = websockets.serve(
simpleWS, host, port, create_protocol=SimpleServer, ssl=ssl_context)
asyncio.get_event_loop().run_until_complete(start_server)
print("Simple Websocket Server started!\nURL => {scheme}://{host}:{port}".format(
host=host, port=port, scheme=scheme))
asyncio.get_event_loop().run_forever()
if __name__ == '__main__':
main()
| true |
fed5f7b7337032b65fce37e77c20949996a89877 | Python | Aasthaengg/IBMdataset | /Python_codes/p03130/s322520079.py | UTF-8 | 237 | 2.984375 | 3 | [] | no_license | from collections import Counter
import sys
A = []
for i in range(3):
a,b = map(int,input().split())
A.append(a)
A.append(b)
c = Counter(A)
for i in c.values():
if i >=3:
print('NO')
sys.exit()
print('YES') | true |
a98c8ea2d6eba37291f97a58d5ad4a635af44aa0 | Python | brunofurmon/challenge-bravo | /src/contracts/currencyconversion/currencyconversionapi.py | UTF-8 | 1,518 | 3.15625 | 3 | [] | no_license | import types
class CurrencyConversionApi(type):
def __new__(cls, name, bases, attr):
# Check upon existence of a string list called 'validCurrencies' for the integration subclass
# Verifies if there are any None or '' or non-str types
if not 'validCurrencies' in attr \
or not isinstance(attr['validCurrencies'], list) \
or len(attr['validCurrencies']) == 0 \
or len(list(filter(lambda objType: not isinstance(objType, str) or not objType, attr['validCurrencies']))) > 0:
raise NotImplementedError(
'Subclass {name} must have a list of (non-empty) strings called ''validCurrencies'' containing all possible currency conversion arguments'
.format(name=name))
if not 'convert' in attr \
or not (attr['convert'], types.MethodType):
# using inspect, get the arguments to test its signature too
raise NotImplementedError(
'Subclass {name} must implement a ''convert(conversionRequest)'' method receiving the correct parameter'
.format(name=name))
return super(CurrencyConversionApi, cls).__new__(cls, name, bases, attr)
# Example class
# class Test(metaclass=CurrencyConversionApi):
# # valid
# validCurrencies = ['USD', 'BTC']
# # invalid
# #validCurrencies = ['', None, [], (), {}, 0, lambda x: x]
# def __init__(self):
# print('hello from subclass')
# t = Test()
| true |
d6bea34b803c3c2e49d755de0df84bad2c0634a6 | Python | MaDITIY/TerminalCalculator | /test/test_parser.py | UTF-8 | 1,684 | 2.796875 | 3 | [] | no_license | """Test module to test parser module."""
import ddt
from unittest import TestCase
from pycalc import exeptions
from pycalc import parser
@ddt.ddt
class TestParser(TestCase):
"""Test class to test parser module."""
@ddt.data(
(('2 + 2', ), ('2 + 2', [])),
(('2 + 2', '-m', 'module'), ('2 + 2', ['module'])),
(('2 + 2', '-m', 'module.py'), ('2 + 2', ['module.py'])),
(
('2 + 2', '-m', 'module1.py', 'module2.py'),
('2 + 2', ['module1.py', 'module2.py'])
),
(
('2 + 2', '-m', 'module1.py', 'module2.py', 'module3.py'),
('2 + 2', ['module1.py', 'module2.py', 'module3.py'])
),
(('2 + 2', '--use_modules', 'module.py'), ('2 + 2', ['module.py'])),
(
('2 + 2', '--use_modules', 'module1.py', 'module2.py'),
('2 + 2', ['module1.py', 'module2.py'])
),
(
(
'2 + 2',
'--use_modules',
'module1.py',
'module2.py',
'module3.py'
),
('2 + 2', ['module1.py', 'module2.py', 'module3.py'])
),
)
@ddt.unpack
def test_parse_arguments(self, arguments, expected_result):
"""Test parse_arguments works as expected."""
self.assertEqual(parser.parse_arguments(arguments), expected_result)
def test_parse_arguments_empty_expression(self):
"""Test 'empty string' is raised in case of empty expression."""
with self.assertRaises(exeptions.GeneralError) as exc:
parser.parse_arguments([''])
self.assertEqual(exc.exception.message, 'empty string')
| true |
f08e253e629f929446edaf9346a0ffefdd6f58d9 | Python | Rajiv-Nayan/HackerRank-Regex | /Introduction/Matching Start & End.py | UTF-8 | 115 | 2.640625 | 3 | [
"MIT"
] | permissive | Regex_Pattern = r"^\d{1}\w{4}[.]{1}$"
import re
print(str(bool(re.search(Regex_Pattern, input()))).lower())
| true |
2f94bca8063cdd6f49bd5e24f581f5d01c48594c | Python | snckmykek/magicbox | /contents/budget/reports/report_maker.py | UTF-8 | 2,204 | 2.671875 | 3 | [] | no_license | from kivy.lang import Builder
from kivy.uix.modalview import ModalView
import pandas as pd
import sqlite3
Builder.load_file(r'contents/budget/reports/report_maker.kv')
class ReportMaker(ModalView):
def __init__(self, **kwargs):
super(ReportMaker, self).__init__(**kwargs)
def get_report(self):
lst = get_report()
shop = lst[0]
prod = lst[1]
self.ids.report.text = str(shop) + '\n' + str(prod)
class Database(object):
def __init__(self):
self.con = sqlite3.connect('database_havka.db')
self.cur = self.con.cursor()
def read_table(self, table_name, columns=None, sort_by=None, values=None, products=[], **wheres):
request = 'SELECT '
if columns:
for column in columns:
request += column + ', '
request = request[:-2]
else:
request += '*'
request += ' FROM ' + table_name
if wheres or products:
request += ' WHERE '
if wheres:
request += '"'
for col, val in wheres.items():
request += col + '" = "' + str(val) + '" AND "'
if products:
request = request[:-1]
else:
request = request[:-6]
if products:
request += 'name IN ('
for prod in products:
request += '"{}", '.format(prod)
request = request[:-2] + ')'
if sort_by:
request += ' ORDER BY ' + sort_by
df = pd.read_sql_query(request, self.con)
return df
def close(self):
self.cur.close()
self.con.close()
db = Database()
def get_report():
df = db.read_table('budget_products')
categories = db.read_table('personal_products', products=list(df['name']), is_category=False,
columns=['name', 'category'])
new_table = pd.merge(df, categories)
grouped_shop_price = new_table.groupby(['date', 'shop_name'])['price'].sum()
grouped_prod = new_table.groupby(['category'])['quantity', 'price'].sum()
return [grouped_shop_price, grouped_prod]
| true |
a62f3e462c9170c44e38bc4bed30dbd933ed0e6b | Python | leohakim/dontforget-backend | /app/server/database.py | UTF-8 | 2,930 | 2.734375 | 3 | [] | no_license | """ Persistence Classes and methods """
from app.config import settings
import motor.motor_asyncio
from bson.objectid import ObjectId
from datetime import datetime
client = motor.motor_asyncio.AsyncIOMotorClient(settings.MONGODB_URL)
database = client.dontforget
task_collection = database.get_collection('dontforget')
# helpers
def task_helper(task) -> dict:
return {
"id": str(task["_id"]),
"name": task["name"],
"timestamp": task["timestamp"],
"completed": task["completed"],
}
### CRUD
# Retrieve all tasks present in the database
# TODO: Filter by user (Token JWT)
async def retrieve_tasks():
tasks = []
async for task in task_collection.find({"is_active": True}):
tasks.append(task_helper(task))
return tasks
# Add a new task into to the database
async def add_task(task_data: dict) -> dict:
task = await task_collection.insert_one(task_data)
new_task = await task_collection.find_one({"_id": task.inserted_id})
return task_helper(new_task)
# Retrieve a task with a matching ID
async def retrieve_task(id: str) -> dict:
task = await task_collection.find_one({"_id": ObjectId(id), "is_active": True})
if task:
return task_helper(task)
# Update a task with a matching ID
async def update_task(id: str, data: dict):
# Return false if an empty request body is sent.
if len(data) < 1:
return False
task = await task_collection.find_one({"_id": ObjectId(id), "is_active": True})
if task:
updated_task = await task_collection.update_one(
{"_id": ObjectId(id)}, {"$set": data}
)
if updated_task:
return True
return False
# Delete a task from the database
async def delete_task(id: str):
task = await task_collection.find_one({"_id": ObjectId(id)})
if task:
task['is_active'] = False
updated_task = await task_collection.update_one(
{"_id": ObjectId(id)}, {"$set": task}
)
if updated_task:
return True
return False
# Mark a task as completed in the database
async def complete_task(id: str):
task = await task_collection.find_one({"_id": ObjectId(id)})
if not task['completed']:
task['completed'] = True
task['completed_at'] = datetime.now()
updated_task = await task_collection.update_one(
{"_id": ObjectId(id)}, {"$set": task}
)
if updated_task:
return True
return False
# Mark a task as uncompleted in the database
async def uncomplete_task(id: str):
task = await task_collection.find_one({"_id": ObjectId(id)})
if task['completed']:
task['completed'] = False
task['completed_at'] = None
updated_task = await task_collection.update_one(
{"_id": ObjectId(id)}, {"$set": task}
)
if updated_task:
return True
return False
| true |
9991faba506a57fa463242c6995bf0117e7061b3 | Python | yunjoon-soh/Spring2017_CSE360_Project1 | /GenerateX.py | UTF-8 | 1,151 | 2.9375 | 3 | [] | no_license | #!/usr/bin/python
import sys
ARGC=len(sys.argv) # python does not have argc
if(ARGC < 2):
print 'Usage: ' + sys.argv[0] + ' [NUMBER OF %08X]'
print 'Usage: ' + sys.argv[0] + ' [STARTING NUMBER I FOR %I$8X] [ENDING NUMBER J FOR %J$8X]'
exit(1)
# base of the string
STR="e "
# NEW_LINE=4 # \n does not work for pretty print, instead adjust the terminal width
# CNT=0
MAX_READ=0x513
MAX_READ_WITHOUT_NL=MAX_READ - 0x1
if(ARGC == 2):
TO=int(sys.argv[1])
for i in range(TO):
STR += "%8X "
elif(ARGC == 3):
FROM=int(sys.argv[1])
TO=int(sys.argv[2])
for i in range(FROM, TO):
# add %N$8x to the string
STR += "%" + str(i) + "$8x "
# L is the added length of STR
L=len(STR)
# if the remaining available length is not enought for next string to add
if( MAX_READ_WITHOUT_NL - L < (5 + len(str(i+1))) + 2):
#print("Preparing " + str(L) + " characters")
while(len(STR) <= MAX_READ_WITHOUT_NL - 1):
STR += " "
#print("Not enough room")
# if not enough room to append something more
print(STR)
#print("Printed " + str(len(STR)) + " characters")
STR="e "
print(STR)
#print( STR )
#print( hex(len(STR)) )
| true |
ec548e8fa5bbf9738f88ba2c82a1dc41c117a9a2 | Python | 15110500442/2017-python- | /day12/多继承.py | UTF-8 | 676 | 2.9375 | 3 | [] | no_license | class Aaimal(object):
def zu(self):
print('祖宗')
class Ma(Aaimal):
def __init__(self):
zi_G = '有'
def fly(self):
print('飞')
def heihei(self):
print('我会嘿嘿')
def zu(self):
print('我是新祖宗')
class Lv(Aaimal):
def __init__(self):
BZ = '有'
def swin(self):
print('游')
def heihei(self):
print('我会赫赫')
class LZ(Ma,Lv):
def haha(self):
print('我会哈哈')
def heihei(self):
print('我呵呵')
def zu(self):
print('我是最新的祖宗')
骡子 = LZ()
骡子.swin()
骡子.fly()
骡子.haha()
骡子.heihei()
骡子.zu()
| true |
38cbc861a4c94c295fc9562747fcf92f5010c2ec | Python | sjbitcode/panchang | /panchang/helpers/utils.py | UTF-8 | 3,095 | 3.484375 | 3 | [] | no_license | import datetime
import pytz
from panchang.settings import CELERY_TIMEZONE
def get_date_obj():
return datetime.datetime.now(pytz.timezone(CELERY_TIMEZONE))
def string_padding(key, width=21):
'''
Returns appropriate whitespace string
for left padding.
'''
padding = ' ' * abs(width - len(key))
return padding
def create_string_from_dict(d):
'''
Given a dict, create a string from keys and values.
Ex. {'Red': 'cherry, apple', 'Yellow': 'banana, lemon'}
return the following string depending on what width string_padding set to
"Yellow: banana, lemon\nRed: cherry, apple\n"
'''
text = ''
for key in d:
text += '{}:{}{}'.format(key, string_padding(key), d[key])
text += '\n'
return text
def update_params(query_params):
'''
Get datetime object and
modify query params values with
year, month, and day.
Months are 0-based.
Example of parameters (mypanchang page for September, 2017):
"?yr=2017&cityhead=New%20York,%20NY&cityname=NewYork-NY&monthtype=0&mn=8"
'''
date_obj = get_date_obj()
query_params['mn'] = date_obj.month-1
query_params['yr'] = date_obj.year
return query_params
def military_to_standard(time):
'''
Given a time in string format,
return standardized time string.
Ex.
'13:25:34' -> '01:25:34 PM'
'08:22:41' -> '08:22:41 AM'
'24:41:22' -> '12:41:22 AM (tomorrow)'
'75:30:12' -> '03:30:12 AM (3 days from today)'
'''
time_list = time.split(':')
days = 0
# Convert the hour to a 12-hour value, store days quotient.
if (int(time_list[0]) >= 24):
hour = int(time_list[0])
days, remainder_hours = divmod(hour, 24)
time_list[0] = str(remainder_hours)
time = ':'.join(time_list)
# Convert string to datetime object, then format datetime object.
datetime_obj = datetime.datetime.strptime(str(time), '%H:%M:%S')
formatted_time = datetime_obj.strftime("%I:%M:%S %p")
# Format days string depending on if day=1 or more.
if days:
day_string = ''
if days == 1:
day_string = '(tomorrow)'
else:
day_string = '({} days from today)'.format(days)
return '{} {}'.format(formatted_time, day_string)
return formatted_time
def format_time_ranges(time):
'''
If given a range of times, ex. '08:20:10-10:25:16',
call military_to_standard() for each time.
Else, call military_to_standard for a single time.
'''
if '-' in time:
time_list = time.split('-')
return '{} - {}'.format(
military_to_standard(time_list[0]),
military_to_standard(time_list[1])
)
else:
return military_to_standard(time)
def get_first_time(time):
'''
If given a range of times, ex. '08:20:10-10:25:16',
return the first time, ex. '08:20:10'.
Else, return the time.
'''
if '-' in time:
time_list = time.split('-')
return time_list[0]
else:
return time
| true |
13078892fcbc2a69517c4af2ac314ba0926e2348 | Python | pabloruancoder/aula-pec-2020 | /atividade002.py | UTF-8 | 130 | 3.390625 | 3 | [] | no_license | def letra(a):
return ord(a)
def main():
a = str(input())
print(f'{letra(a)}')
if __name__ == "__main__":
main()
| true |
79f60892c667bcc4500651a6c6e10f2667fec889 | Python | victor4107/ddddddddd | /lab4/main.py | UTF-8 | 1,721 | 3.109375 | 3 | [] | no_license | import psycopg2
from tabulate import tabulate
class Psql:
def __init__(self, password, dbname = 'postgres', user ='postgres', host='localhost'):
self.conn = psycopg2.connect(dbname = dbname,
user = user,
password = password,
host = host)
self.cursor = self.conn.cursor()
print('Connect Successed')
def get_data_command(self):
text = input("Input SQL command -->")
self.cursor.execute(text)
data = self.cursor.fetchall()
print(tabulate(data, tablefmt='orgtbl'))
def get_data_file(self):
file_name = input("Paste absolute SQL file location -->")
with open(file_name, "r") as f:
for line in f.readlines():
self.cursor.execute(line)
data = self.cursor.fetchall()
print(tabulate(data, tablefmt='orgtbl'))
def execute(self, command):
try:
self.cursor.execute(command)
data = self.cursor.fetchall()
except psycopg2.ProgrammingError:
return False
return data
def get_table_data(self, table_name):
self.cursor.execute(f"SELECT * FROM {table_name} ORDER BY id;")
colnames = [desc[0] for desc in self.cursor.description]
data = self.cursor.fetchall()
print([colnames, data])
return [colnames, data]
def __del__(self):
self.cursor.close()
self.conn.close()
if __name__ == "__main__":
psql = Psql('crosspepes123',
dbname='lab2',
user='postgres',
host='localhost')
psql.get_table_data("items")
| true |
dd2ca2c8f6d843ffa4e718c8b4f2825ce65fb8e3 | Python | chuckkang/greatgame | /server.py | UTF-8 | 918 | 2.84375 | 3 | [] | no_license | from flask import Flask, render_template, request, redirect, session
import random
app = Flask(__name__)
app.secret_key = "thisisasecret"
@app.route('/', methods=['POST', 'GET'])
def index():
isCorrect=False
errMsg = False
if (request.method=="GET"):
#create random variable
session['rnd']= random.randrange(0, 10)
print session['rnd'], "this is the rnd"
isCorrect = "new"
elif (request.method=="POST"):
guessedvalue = request.form['guess']
if (guessedvalue==''):
errMsg=True
else:
if int(request.form['guess'])==session['rnd']:
isCorrect = True
else:
isCorrect = False
print session['rnd'], "this is the rnd"
return render_template("index.html", isCorrect=isCorrect, rnd=session['rnd'], errMsg=errMsg)
app.run(debug=True) # run our server | true |
9f3f21dfec0c57cdafbc46fb3610deb98119f824 | Python | kaneron676/PyIPS | /end_program.py | UTF-8 | 3,302 | 2.625 | 3 | [] | no_license | import smtplib
import ssl
import os.path
import subprocess
import re
import time
from datetime import date
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
class EndProgram:
def __init__(self):
self.port = 465
self.ips_email_address = ""
self.admin_email = ""
self.iptables_file = 'iptables'
self.two_weeks = 60
def check_firewall(self):
subprocess.check_output('iptables-save > iptables', shell=True)
if os.path.exists(self.iptables_file):
lines_seen = []
outfile = open('iptables-copy', "w+")
for line in open(self.iptables_file, "r"):
if line not in lines_seen:
if "comment" in line:
m = re.findall(r"\"(1[1-9].*?)\"", line)
if time.time() - float(m[0]) < self.two_weeks:
outfile.write(line)
lines_seen.append(line)
else:
outfile.write(line)
lines_seen.append(line)
else:
if "COMMIT" in line:
lines_seen = []
outfile.write(line)
lines_seen.append(line)
outfile.close()
subprocess.check_output('iptables-restore < iptables-copy', shell=True)
def make_email_message(self, list_of_events):
password = input("Type your password and press enter:")
message = MIMEMultipart("alternative")
message["Subject"] = "IPS raport {}".format(date.today().strftime('%d_%m_%y'))
message["From"] = self.ips_email_address
message["To"] = self.admin_email
events = "\n".join(list_of_events)
# plain-text
text = """\
Hi!
List of today's events:
{}
Have a nice day!""".format(events)
events = "<br>".join(list_of_events)
# html code
html = """\
<html>
<body style ="font-family: Arial, Helvetica, sans-serif; text-align: center;
background: linear-gradient(90deg, rgba(2,0,36,1) 0%, rgba(138,61,81,1) 35%, rgba(0,212,255,1) 100%);
padding: 3%; max-width: 600px;">
<div style = "position: relative; display:grid; background: rgba(255,255,255,0.94);
border-radius: 5px; box-shadow: 0px 0px 11px -1px rgba(46,46,46,1); width: 600px;">
<h2 style=" padding:40px; font-weight:lighter; text-transform:uppercase; color:#414141;"
>Hi admin</h2>
<p>List of today's events:<br> </p>
<p>
{}
</p>
<p style="padding:40px;">Have a nice day!</p>
<br>
</div>
</body>
</html>
""".format(events)
part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
message.attach(part1)
message.attach(part2)
make_context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=make_context) as server:
server.login(self.ips_email_address, password)
server.sendmail(
self.ips_email_address, self.admin_email, message.as_string()
)
| true |
84101fb2e704b5b743e96e32d9b4347b507709af | Python | moracarlos/Face-recognition-surveillance-system | /recognizer/src/oldMain.py | UTF-8 | 2,240 | 2.640625 | 3 | [] | no_license | import sys
sys.path.append('/usr/local/lib/python2.7/site-packages')
import cv2
import numpy as np
from csvmaker import CSVmaker
def read_csv():
images = []
labels = []
fileName = "./assets/faces/faces.csv"
with open(fileName) as f:
content = f.readlines()
for l in content:
pair = l.split(';')
if pair.__len__() == 2:
images.append(cv2.imread(pair[0]))
labels.append(int(pair[1]))
print pair[0], pair[1]
print labels
return images, np.array(labels)
#-----------------------------------------------------------------------------------------------
faceCascade = cv2.CascadeClassifier("./assets/haarcascades/haarcascade_frontalface_alt.xml")
video_capture = cv2.VideoCapture(0)
csv = CSVmaker()
csv.loadFaces()
images = csv.images
labels = np.asarray(csv.labels)
imgHeight, imgWidth, channels = images[0].shape
print imgWidth, imgHeight
imgWidth = 300
imgHeight = 300
for i in range(0, images.__len__()):
images[i] = cv2.resize(images[i], (imgWidth, imgHeight), None, cv2.INTER_CUBIC)
images[i] = cv2.cvtColor(images[i], cv2.COLOR_BGR2GRAY) #Necesaria
#cv2.imshow("vent", images[i])
#cv2.waitKey(0)
faceRecognizer = cv2.createFisherFaceRecognizer()
faceRecognizer.train(images, labels)
print "trained"
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
face = gray[y:y+h, x:x+w]
faceResized = cv2.resize(face, (imgWidth, imgHeight), None, cv2.INTER_CUBIC) #No se necesita en LBPH
prediction, confidence = faceRecognizer.predict(faceResized)
print prediction
# Display the resulting frame
cv2.imshow('Video', faceResized)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
| true |
17693b0c3bd32a449cc3dc9acf39fedacc817759 | Python | stephenluc/AdventOfCode2017 | /06_code.py | UTF-8 | 1,323 | 3.296875 | 3 | [] | no_license | def memory_reallocation(blocks):
redistribution = 0
states = set()
seen_state = False
while not seen_state:
distribute_memory = max(blocks)
curr_index = blocks.index(distribute_memory)
blocks[curr_index] = 0
while distribute_memory > 0:
curr_index += 1
blocks[curr_index % len(blocks)] += 1
distribute_memory += -1
redistribution += 1
if tuple(blocks) not in states:
states.add(tuple(blocks))
else:
seen_state = True
print redistribution
def part_2(blocks):
redistribution = 0
states = []
seen_state = False
while not seen_state:
distribute_memory = max(blocks)
curr_index = blocks.index(distribute_memory)
blocks[curr_index] = 0
while distribute_memory > 0:
curr_index += 1
blocks[curr_index % len(blocks)] += 1
distribute_memory += -1
redistribution += 1
if tuple(blocks) not in states:
states.append(tuple(blocks))
else:
seen_state = True
print redistribution - states.index(tuple(blocks)) - 1
f = open('06_input.txt', 'r')
blocks = map(int, f.readline().split('\t'))
# Part 1
# memory_reallocation(blocks)
# Part 2
# part_2(blocks)
| true |
310de8aa8be29f6601e53c57a6fd355a8ff018d4 | Python | georgianamaxim/flcd | /lab3/fa.py | UTF-8 | 3,870 | 3.59375 | 4 | [] | no_license | import re
class FiniteAutomata(object):
def __init__(self):
self.__set_of_states = []
self.__alphabet = []
self.__initial_state = ""
self.__final_states = []
self.__transitions = {}
self.read_fa()
def read_fa(self):
with open("fa.txt", "r") as f:
line = f.readline()
self.__set_of_states = [value.strip() for value in
line.strip().split('=')[1].strip()[1:-1].strip().split(',')]
line = f.readline()
self.__alphabet = [value.strip() for value in line.strip().split('=')[1].strip()[1:-1].strip().split(',')]
line = f.readline()
self.__initial_state = \
[value.strip() for value in line.strip().split('=')][1]
line = f.readline().strip()
self.__final_states = \
[value.strip() for value in line.strip().split('=')[1].strip()[1:-1].strip().split(',')]
line = f.readline().strip()
while line != "":
reg = '|'.join(map(re.escape, ["(", ")", "="]))
tokens = re.split(reg, line)
first = tokens[1].split(",")
trans_key = (first[0], first[1])
if trans_key in self.__transitions.keys():
self.__transitions[trans_key].append(tokens[3])
else:
self.__transitions[trans_key] = [tokens[3]]
line = f.readline().strip()
def get_states(self, transitions):
states = []
for t in transitions:
states.append(t[0])
return states
def is_dfa(self):
for trans in self.__transitions:
if len(self.__transitions[trans]) > 1:
return False
return True
def get_set_of_states(self):
return self.__set_of_states
def get_alphabet(self):
return self.__alphabet
def get_final_state(self):
return self.__final_state
def get_initial_state(self):
return self.__initial_state
def get_transitions(self):
return self.__transitions
def get_set_of_final_states(self):
return self.__final_states
def is_accepted(self, seq, current_state):
if self.is_dfa():
accepted = False
seq_elems = list(seq)
if len(seq_elems) > 1 and current_state in self.__final_states and current_state not in self.get_states(self.__transitions):
return False
for tr in self.__transitions.keys():
if tr[0] == current_state and tr[1] == seq_elems[0]:
if len(seq_elems) == 1:
for transition in self.__transitions[tr]:
if transition in self.__final_states:
return True
return False
else:
for _ in self.__transitions[tr]:
accepted = self.is_accepted(seq[1:], self.__transitions[tr][0])
if accepted:
break
return accepted
if __name__ == '__main__':
fa = FiniteAutomata()
fa.read_fa()
ans = True
while ans:
print("""
1.Set Of States
2.Alphabet
3.Initial State
4.Set of final states
5.Exit/Quit
""")
ans = input(">>")
if ans == "1":
print(fa.get_set_of_states())
elif ans == "2":
print(fa.get_alphabet())
elif ans == "3":
print(fa.get_initial_state())
elif ans == "4":
print(fa.get_transitions())
elif ans == "5":
ans = False
break
elif ans != "":
print("\n Not Valid Choice Try again")
| true |
bd5d9fd15f69e833257c2d475e660c26e749d517 | Python | meredytheco/bioagents | /bioagents/bionlg/bionlg_module.py | UTF-8 | 2,795 | 2.59375 | 3 | [
"BSD-2-Clause"
] | permissive | import sys
import json
import logging
logging.basicConfig(format='%(levelname)s: %(name)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger('BIONLG')
from indra.statements import stmts_from_json
from indra.assemblers import EnglishAssembler
from kqml import *
class BioNLGModule(KQMLModule):
def __init__(self, argv):
super(BioNLGModule, self).__init__(argv)
self.tasks = ['INDRA-TO-NL']
for task in self.tasks:
self.subscribe_request(task)
self.ready()
self.start()
def receive_tell(self, msg, content):
tell_content = content[0].to_string().upper()
if tell_content == 'START-CONVERSATION':
logger.info('BioNLG resetting')
def receive_request(self, msg, content):
"""Handle request messages and respond.
If a "request" message is received, decode the task and the content
and call the appropriate function to prepare the response. A reply
message is then sent back.
"""
try:
content = msg.get('content')
task_str = content.head().upper()
logger.info(task_str)
except Exception as e:
logger.error('Could not get task string from request.')
logger.error(e)
self.error_reply(msg, 'Invalid task')
try:
if task_str == 'INDRA-TO-NL':
reply = self.respond_indra_to_nl(content)
else:
self.error_reply(msg, 'Unknown task ' + task_str)
return
except Exception as e:
logger.error('Failed to perform task.')
logger.error(e)
reply = KQMLList('FAILURE')
reply.set('reason', 'NL_GENERATION_ERROR')
reply_msg = KQMLPerformative('reply')
reply_msg.set('content', reply)
self.reply(msg, reply_msg)
def respond_indra_to_nl(self, content):
"""Return response content to build-model request."""
stmts_json_str = content.gets('statements')
stmts = decode_indra_stmts(stmts_json_str)
txts = assemble_english(stmts)
txts_kqml = [KQMLString(txt) for txt in txts]
txts_list = KQMLList(txts_kqml)
reply = KQMLList('OK')
reply.set('NL', txts_list)
return reply
def decode_indra_stmts(stmts_json_str):
stmts_json = json.loads(stmts_json_str)
stmts = stmts_from_json(stmts_json)
return stmts
def assemble_english(stmts):
txts = []
for stmt in stmts:
ea = EnglishAssembler([stmt])
txt = ea.make_model()
if txt and txt[-1] == '.':
txt = txt[:-1]
txts.append(txt)
return txts
if __name__ == "__main__":
BioNLGModule(['-name', 'BIONLG'] + sys.argv[1:])
| true |
fdb27700f14444f7d47a92872d197b9fb46e2acc | Python | nocheacosador/UPLS | /UPLS_Py/utils/determine_shiboken_version.py | UTF-8 | 590 | 2.59375 | 3 | [] | no_license | import os, sys
sys.path.append('/usr/local/bin')
def clean_path(path):
return path if sys.platform != 'win32' else path.replace('\\', '/')
def find_package_path(dir_name):
for p in sys.path:
package = os.path.join(p, dir_name)
if os.path.exists(package):
return clean_path(os.path.realpath(package))
return None
def determine_shiboken_version():
# First check for shiboken6
path = find_package_path('shiboken6')
if path:
print('shiboken6')
path = find_package_path('shiboken2')
if path:
print('shiboken2')
determine_shiboken_version() | true |
6336aa5f8b1c7224e60d060c68a9d92bbcfecfa5 | Python | afrench14/PetGame-OOP | /PetGame_MainCode.py | UTF-8 | 2,876 | 3.65625 | 4 | [] | no_license | class Pet:
#constructor
def __init__(self, petName, petType):
#setting attributes with an initial value
self.petName = petName
self.petType = petType
self.bored = 0
self.hunger = 50
self.intelligence = 50
self.alive = True
self.entertained = True
self.educated = True
#show hunger values
def showHunger(self, hunger):
if self.hunger > 100:
self.hunnger = 100
if self.hunger == 100:
print(self.petName, "has", self.hunger, "hunger points remaining and is full, good job")
elif 70 <= self.hunger < 100:
print(self.petName, "has", self.hunger, "hunger points remaining and is satisfied")
elif 50 <= self.hunger < 70:
print(self.petName, "has", self.hunger, "hunger points remaining and is getting hungry")
elif 20 <= self.hunger < 50:
print(self.petName, "has", self.hunger, "hunger points remaining and is a little hungry")
elif 10 <= self.hunger < 20:
print(self.petName, "has", self.hunger, "hunger points remaining and is extremely hungry, feed asap")
elif 0 < self.hunger < 10:
print(self.petName, "has", self.hunger, "hunger points remaining and will soon starve, feed immediately")
elif self.hunger == 0:
print("unfortunately,", self.petName + "'s hunger has reached zero, so", self.petName, "has died")
self.alive = False
else:
print("error returning", self.petName + "'s hunger, sorry :(")
#show bored values
def showBored(self, bored):
if self.bored == 0:
print(self.petName, "is", self.bored, "percent bored, another good job")
elif 0 < self.bored <= 20:
print(self.petName, "is", self.bored, "percent bored and is not too un-entertained just yet")
elif 20 < self.bored <= 50:
print(self.petName, "is", self.bored, "percent bored and will probably get bored some time soon")
elif 50 < self.bored <= 70:
print(self.petName, "is", self.bored, "hunger points remaining and is a little hungry")
elif 10 <= self.bored < 20:
print(self.petName, "is", self.bored, "hunger points remaining and is extremely hungry, feed asap")
elif 0 < self.bored < 10:
print(self.petName, "is", self.bored, "hunger points remaining and will soon starve, feed immediately")
elif self.bored == 0:
print("unfortunately,", self.petName + "'s bored has reached zero")
print("so", self.petName, "has run away to find something more interesting to do")
self.entertained = False
else:
print("error returning", self.petName + "'s bored level, sorry :(")
def outputGreeting(self, petName):
print("hello, i am", self.petName, "and i'm a / an", self.petType)
listOfPets = []
pet1 = Pet("springy", "tiger")
pet1.outputGreeting(pet1.petName)
pet1.showHunger(pet1.hunger) | true |
e2bd49acdebcd032d36f0acca428b118b461d178 | Python | famaxth/Way-to-Coding | /Python/Grades.py | UTF-8 | 256 | 3.65625 | 4 | [] | no_license | mark = int(input("Enter your mark out of 100 : "))
if(mark>90):
print("A+")
elif(mark<=90 and mark >=80):
print("A")
elif(mark<80 and mark >=70):
print("B")
elif(mark<70 and mark >=60):
print("C")
elif(mark<60):
print("D")
| true |
5c1c85e8a54d724d12a144c522279a8f4a9f7025 | Python | cat-in-the-dark/ludum_43_game | /python/examples/line_prisma_draw.py | UTF-8 | 759 | 2.765625 | 3 | [
"MIT"
] | permissive | import jvcr
import math
base = jvcr.DISPLAY_HEIGHT - 1
PI_8 = math.pi / 8
PI_2 = math.pi * 2
t = 0
GREEN = 11
RED = 8
GREY = 6
BLACK = 0
def update(dt):
global t
jvcr.cls(BLACK)
i = math.fmod(t, 8.0)
while i < base:
jvcr.line(i, 0, 0, base - i, RED)
jvcr.line(i, base, base, 143 - i, GREY)
t += 0.005
i += 8
i = math.fmod(t / 16.0, PI_8)
while i < PI_2:
x = base / 2.0 + (base / 4.0) * math.cos(i)
y = base / 2.0 + (base / 4.0) * math.cos(i)
jvcr.line(base, 0, x, y, GREEN)
jvcr.line(0, base, x, y, GREEN)
i += PI_8
jvcr.line(0, 0, base, 0, 8)
jvcr.line(0, 0, 0, base, 8)
jvcr.line(base, 0, base, base, 6)
jvcr.line(0, base, base, base, 6)
| true |
2ef9cf63b8dcfbd4ef233e2ebc4425d5861dbcfc | Python | portal2312/blog | /docs/develop/N-Z/Python/lib/twisted/study/OReilly.Twisted.Network.Programming.Essentials.2nd.Edition/chapter_7/part_1/logging_echoserver.py | UTF-8 | 668 | 2.546875 | 3 | [] | no_license | # -*- coding:utf8 -*-
from twisted.internet import protocol, reactor
from twisted.python import log
import sys
class Echo(protocol.Protocol):
def dataReceived(self, data):
log.msg(data)
self.transport.write(data)
class EchoFactory(protocol.Factory):
def buildProtocol(self, addr):
return Echo()
def run():
# XXX: Logging - files
# log.startLogging(file=open('echo.log', 'w'))
# XXX: Logging - direct print out
log.startLogging(sys.stdout)
reactor.listenTCP(8000, EchoFactory())
reactor.run()
if __name__ == '__main__':
print 'ex.7-1(7-2) logging_test.py'
run()
| true |
5621335f0007b57bf7229bf810690c4eddbb8d8e | Python | 02stevenyang850527/EECS504Final_AVSpeechSeparation | /ICA/ICA.py | UTF-8 | 3,808 | 2.78125 | 3 | [] | no_license | import numpy as np
import sys
import scipy.io.wavfile as wavfile
#########################
### Utility Functions ###
#########################
def mix_audio(wav_list, sr=16000, output_name='mixed.wav'):
audio_num = len(wav_list)
source = np.zeros((sr*3, audio_num))
for idx, file_name in enumerate(wav_list):
s, wav = wavfile.read(file_name)
source[:, idx] = wav[:sr*3]
sample = np.random.random((audio_num, audio_num))
mixed = source @ sample
wavfile.write(output_name, sr, mixed[:, 0])
return source, mixed
def normalize(data):
return 0.99 * data / np.max(np.abs(data))
def sigmoid(x):
sig0 = 1 / (1 + np.exp(-x))
sig1 = np.exp(x) / (1 + np.exp(x))
return np.where(x >= 0, sig0, sig1)
###########################
### ICA ###
###########################
def train_ICA(X):
anneal = [0.1, 0.1, 0.1, 0.05, 0.05, 0.05, 0.02, 0.02, 0.01, 0.01,
0.005, 0.005, 0.002, 0.002, 0.001, 0.001]
print('Separating tracks ...')
lr = 1e-3
max_iter = 100
M, N = X.shape
W = np.eye(N)
for it in range(1, max_iter+1):
for x_i in X:
tmp = 1 - 2 * sigmoid(np.dot(W, x_i.T))
W += lr * (np.outer(tmp, x_i) + np.linalg.inv(W.T))
if it % 10 == 0:
print("Iteration: %d"%it)
"""
Sanity Check
"""
print("W:")
print(W)
return W
def test_ICA(X, W):
return np.dot(X, W.T)
def main():
sdrs, sirs, sars, pesqs = np.zeros(10), np.zeros(10), np.zeros(10), np.zeros(10)
for idx in range(1, 11):
sr = 16000
wav_list = ['data/3.wav', sys.argv[1]+str(idx)+'.wav']
src, mixed = mix_audio(wav_list)
""" self-defined ICA
## train
W = train_ICA(mixed)
## test
S = normalize(test_ICA(mixed, W))
"""
from sklearn.decomposition import FastICA
W = FastICA(n_components=len(wav_list))
S = W.fit_transform(mixed)
## write audio into file
for i in range(len(wav_list)):
output_name = "result/%d.wav"%(i+1)
wavfile.write(output_name, sr, S[:, i])
"""
Caluculate sdr, sir, sar
"""
from mir_eval.separation import bss_eval_sources
sdr, sir, sar, _ = bss_eval_sources(S.T, src.T) ## shape = (channels, samples)
print("SDR: ", sdr) ## np.array(channels, )
print("SIR: ", sir)
print("SAR: ",sar)
from pypesq import pesq
pesq_score = pesq(src[:, 0], S[:, 0], fs=16000)
print("PESQ: ", pesq_score)
sdrs[idx-1] = sdr[0]
sirs[idx-1] = sir[0]
sars[idx-1] = sar[0]
pesqs[idx-1] = pesq_score
print("SDR: ", np.mean(sdrs)) ## np.array(channels, )
print("SIR: ", np.mean(sirs))
print("SAR: ", np.mean(sars))
print("PESQ: ", np.mean(pesqs))
def sample_main():
sr = 16000
wav_list = ['data/1.wav', 'data/2.wav', 'data/3.wav', 'data/4.wav', 'data/5.wav']
src, mixed = mix_audio(wav_list)
from sklearn.decomposition import FastICA
W = FastICA(n_components=len(wav_list))
S = W.fit_transform(mixed)
## write audio into file
for i in range(len(wav_list)):
output_name = "result/%d.wav"%(i+1)
wavfile.write(output_name, sr, S[:, i])
"""
Caluculate sdr, sir, sar
"""
from mir_eval.separation import bss_eval_sources
sdr, sir, sar, _ = bss_eval_sources(S.T, src.T) ## shape = (channels, samples)
print("SDR: ", sdr) ## np.array(channels, )
print("SIR: ", sir)
print("SAR: ",sar)
from pypesq import pesq
pesq_score = pesq(src, S, fs=16000)
print("PESQ: ", pesq_score)
if __name__ == '__main__':
## for displaying
sample_main()
## for running experiment
# main() | true |
81d6bb2d5bbe3e8d34b31295f3e170314a7b3423 | Python | Soham-Rakhunde/VInLP | /VideoProcessor.py | UTF-8 | 4,436 | 2.5625 | 3 | [] | no_license | import threading
import cv2
import concurrent.futures
from dataClass import DataClass
from webScraper import Scraper
import numpy as np
class VideoProcessor:
def __init__(self, vidPath):
self.capture = cv2.VideoCapture(vidPath.name)
FPS = self.capture.get(cv2.CAP_PROP_FPS)
self.FRAME_SKIP = int(FPS / 5)
self.frame = np.zeros((532, 945, 3), np.uint8)
self.plateImage = np.zeros((25, 100, 3), np.uint8)
self.frame[:] = (250, 250, 255)
self.plateImage[:] = (250, 250, 255)
self.isFrameAvailable = True
thread = threading.Thread(target=self.processFrameWise)
thread.start()
def processFrameWise(self):
self.isFrameAvailable, self.frame = self.capture.read()
# pixelTolerance = 12
pixelTolerance = 120
count = 0
d1, d2, d3, d4 = 0, 0, 0, 0 # coordinates of last detection
with concurrent.futures.ProcessPoolExecutor() as multiProcessExecutor:
while self.isFrameAvailable:
self.isFrameAvailable, self.frame = self.capture.read()
if self.isFrameAvailable:
H = self.frame.shape[0]
W = self.frame.shape[1]
# imgcrop = self.frame[0:h, 0:w]
imgcrop = self.frame[int(H * 0.6):int(H * 0.9), int(W * 0.20):int(W * 0.7)]
gray = cv2.cvtColor(imgcrop, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 13, 17, 17)
edged = cv2.Canny(gray, 150, 200)
contours, new = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
img1 = imgcrop.copy()
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:30]
cv2.drawContours(img1, contours, -1, (255, 0, 0), 2)
img3 = imgcrop.copy()
for c in contours:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
x, y, w, h = cv2.boundingRect(c)
area = cv2.contourArea(c)
if len(approx) == 4 and w > 2 * h and area >= 500 and y >= 80:
A = not -pixelTolerance < (y - d1) < pixelTolerance
B = not -pixelTolerance < (y + h - d2) < pixelTolerance
C = not -pixelTolerance < (x - d3) < pixelTolerance
D = not -pixelTolerance < (x + w - d4) < pixelTolerance
if (A and B and (C or D)) or (C and D and (A or B)):
# boolean minimized pos equation for selecting ones with 3 differences
dataObj = DataClass()
cv2.drawContours(img3, c, -1, (0, 255, 0), 2)
self.plateImage = imgcrop[y:y + h, x:x + w]
dataObj.plateImage = imgcrop[y:y + h, x:x + w]
# dataObj.photo = cv2.rectangle(self.frame, (x - 3, y - 3), (x + w + 3, 3 + y + h),
# (0, 0, 255), 2)
dataObj.photo = cv2.rectangle(self.frame, (int(W * 0.2) + x - 3, int(H * 0.6) + y - 3),
(int(W * 0.2) + x + w + 3, int(H * 0.6) + 3 + y + h),
(0, 0, 255), 2)
print(y, y + h, x, x + w)
print("Diff:", y - d1, y + h - d2, x - d3, x + w - d4, '\n')
d1, d2, d3, d4 = y, y + h, x, x + w
# while cv2.waitKey(2000) & 0xFF == ord('q'):
# break
multiProcessExecutor.submit(Scraper, dataObj)
break
# cv2.imshow("img", img3)
# cv2.imshow("imgcrop", imgcrop)
cv2.imshow("edg", edged)
cv2.waitKey(1)
# while cv2.waitKey(1) & 0xFF == ord('q'):
# break
count += self.FRAME_SKIP # i.e. at 5 fps, this advances one second
self.capture.set(1, count)
| true |
b4c194f6d9ebdb61111d13898a9176a10a42b005 | Python | niudong1001/word-embed-api | /embed_server.py | UTF-8 | 6,027 | 2.9375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from flask import Flask
from flask.ext.restful import Resource, Api, reqparse
from gensim.models.word2vec import Word2Vec
import argparse
import json
import numpy as np
import random
parser = reqparse.RequestParser()
app = Flask(__name__)
api = Api(app)
def verify_words_exist(words):
if not isinstance(words, list):
raise BaseException("Words must be a list!")
for word in words:
if word not in model:
return False, word
return True, None
def create_error(word):
return {
"code": 400,
"error": "Word '"+word+"' is not exist in the model!"
}
def create_exception_error(e):
return {
"code": str(e.code),
"error": str(e.data["message"])
}
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
class Model(Resource):
def get(self):
try:
parse = reqparse.RequestParser()
parse.add_argument("word", type=str, required=True, help="Word for query")
_args = parse.parse_args()
word = _args['word']
valid, w = verify_words_exist([word])
if valid:
return json.dumps(list(model[word]), cls=MyEncoder)
else:
return create_error(w)
except BaseException as e:
print("Exception of model get: ", e)
return create_exception_error(e)
class Vocab(Resource):
def get(self):
try:
parse = reqparse.RequestParser()
parse.add_argument("page_number", type=int, help="Page number", default=0)
parse.add_argument("page_size", type=int, help="Page size", default=100)
parse.add_argument("shuffle", type=str, help="If shuffle the vocab in every request", default="False")
_args = parse.parse_args()
if _args["shuffle"] == "True":
random.shuffle(words_shuffle)
__words = words_shuffle
else:
__words = words
start_index = _args["page_number"]*_args["page_size"]
_words = __words[start_index:start_index+_args["page_size"]]
_words = " ".join(_words).strip()
res = json.dumps(_words, ensure_ascii=False)
return res
except BaseException as e:
print("Exception of vocab get: ", e)
return create_exception_error(e)
class VocabSize(Resource):
def get(self):
try:
size = len(model.index2word)
return size
except BaseException as e:
print("Exception of vocab get: ", e)
return create_exception_error(e)
class MostSimilar(Resource):
def get(self):
try:
parse = reqparse.RequestParser()
parse.add_argument("positive_words", type=str, help="Positive words is a sets of words(split with camma) or one word", required=True)
parse.add_argument("negative_words", type=str, help="Negative words is a word to query")
parse.add_argument("topn", type=int, help="Get top n similar words", default=5)
_args = parse.parse_args()
positive_words = _args["positive_words"].split(",")
if _args["negative_words"] is not None:
negative_words = _args["negative_words"].split(",")
else:
negative_words = []
valid, w = verify_words_exist(positive_words+negative_words)
if valid:
infers = model.most_similar(positive=positive_words, negative=negative_words, topn=_args["topn"])
return infers
else:
return create_error(w)
except BaseException as e:
print("Exception of inference get: ", e)
return create_exception_error(e)
class Similarity(Resource):
def get(self):
try:
parse = reqparse.RequestParser()
parse.add_argument("word_a", type=str, help="word_a is a required str query", required=True)
parse.add_argument("word_b", type=str, help="word_b is a required str query", required=True)
_args = parse.parse_args()
word_a = _args["word_a"]
word_b = _args["word_b"]
valid, w = verify_words_exist([word_a, word_b])
if valid:
sim = model.similarity(word_a, word_b)
return sim
else:
return create_error(w)
except BaseException as e:
print("Exception of inference get: ", e)
return create_exception_error(e)
if __name__ == "__main__":
global model
global words
global words_shuffle
# parse argument
p = argparse.ArgumentParser()
p.add_argument("--model", help="Path to the pre-trained model", required=True)
p.add_argument("--binary", help="Specifies if the loaded model is binary", default=False) # can't be wrong!
p.add_argument("--host", help="Host name", default="localhost")
p.add_argument("--port", help="Host port", default=5555)
args = p.parse_args()
# create model
print("Loading model...")
model = Word2Vec.load_word2vec_format(args.model, binary=args.binary, unicode_errors='ignore')
print("Finished load.")
# get words
words = model.index2word
words_shuffle = words.copy()
base_url = "/word2vec"
api.add_resource(Model, base_url+"/model")
api.add_resource(Vocab, base_url+"/vocab")
api.add_resource(VocabSize, base_url + "/vocab_size")
api.add_resource(MostSimilar, base_url + "/most_similar")
api.add_resource(Similarity, base_url + "/similarity")
# start web
app.run(host=args.host, port=args.port, debug=False) # debug=True
| true |
e54553d13cf2c9af50c3286abf7b45a8e82ca7d4 | Python | ahhnljq/GAN_PID | /simulate_delta_gan.py | UTF-8 | 1,568 | 2.703125 | 3 | [
"MIT"
] | permissive | import numpy as np
from utils_log import MetricSaver
data = 1.
delta_t = 0.01
class GAN_simualte(object):
def __init__(self, gantype, controller_d, damping):
self.type = gantype
self.controller_d = controller_d
self.damping = damping
self.d = 0.
self.g = 0.
def d_step(self):
error = data - self.g
error = self.controller_d(error)
self.d += error * delta_t - self.damping * self.d
def g_step(self):
self.g += self.d * delta_t
class PID_controller(object):
def __init__(self, p, i, d):
self.p = p
self.i = i
self.d = d
self.i_buffer = 0.
self.d_buffer = 0.
def __call__(self, error):
p_signal = error
self.i_buffer += error * delta_t
i_signal = self.i_buffer
d_signal = (error - self.d_buffer) / delta_t
self.d_buffer = error
return self.p * p_signal + self.i * i_signal + self.d * d_signal
p, i, d = 1, 0, 0
damping = 2.
saver = MetricSaver("Generator_{}_{}_{}_{}_g".format(p, i, d, damping),
"./delta_gan/",
save_on_update=False)
saver1 = MetricSaver("Generator_{}_{}_{}_{}_d".format(p, i, d, damping),
"./delta_gan/",
save_on_update=False)
controller = PID_controller(p, i, d)
gan = GAN_simualte('gan', controller, damping)
for i in range(200000):
gan.d_step()
gan.g_step()
saver.update(i, gan.g, save=False)
saver1.update(i, gan.d, save=False)
saver.save()
saver1.save()
| true |
1d87d8a1a672bae9ca6a4c0465208d02add82d73 | Python | aajshaw/Ringable-Ensemble | /Methods.py | UTF-8 | 17,105 | 2.78125 | 3 | [
"Unlicense"
] | permissive | from threading import Thread
import socket
from time import sleep
from sys import byteorder
from Config import Config
import configparser
from Row import Row
import os
import sys
from random import randrange
def bell_indicators(MAX_BELLS,
INDICATE_BELL_NUMBER_SHIFT,
INDICATE_BELL_HANDSTROKE,
INDICATE_BELL_BACKSTROKE,
INDICATE_BELL_GRAPHIC_SHOW,
INDICATE_BELL_GRAPHIC_CLEAR):
hand = {}
back = {}
graphic_show = {}
graphic_clear = {}
for ndx in range(MAX_BELLS):
hand[ndx] = INDICATE_BELL_HANDSTROKE | (ndx << INDICATE_BELL_NUMBER_SHIFT)
back[ndx] = INDICATE_BELL_BACKSTROKE | (ndx << INDICATE_BELL_NUMBER_SHIFT)
graphic_show[ndx] = INDICATE_BELL_GRAPHIC_SHOW | (ndx << INDICATE_BELL_NUMBER_SHIFT)
graphic_clear[ndx] = INDICATE_BELL_GRAPHIC_CLEAR | (ndx << INDICATE_BELL_NUMBER_SHIFT)
return hand, back, graphic_show, graphic_clear
class Row():
def __init__(self, number_of_bells):
self.positions = [None for ndx in range(number_of_bells)]
self.call_go = False
self.call_thats_all = False
self.call_bob = False
self.call_single = False
self.call_stand = False
def __str__(self):
say = ''
if self.call_go:
say += 'Go Method '
if self.call_thats_all:
say += 'Thats All '
if self.call_bob:
say += 'Bob '
if self.call_single:
say += 'Single '
if self.call_stand:
say += 'Stand Next '
say += str(self.positions)
return say
class Extent():
LEAD_TYPE_PLAIN = 'P'
LEAD_TYPE_BOB = 'B'
LEAD_TYPE_SINGLE = 'S'
def __init__(self, method, extent_id, cover = True, intro_courses = 1, extent_courses = 1, wait_learner = False):
self.name = method.extent_name(extent_id)
self.length = method.extent_length(extent_id) * extent_courses
self.definition = method.extent_definition(extent_id)
self.wait = wait_learner
# If the extent is mutable it can be shift shuffled
# The sections that can be shifted are delimited by '-' characters so will be split, shifted and then stuck togather
if method.extent_mutable(extent_id):
# Remove all formatting spaces
self.definition = self.definition.replace(' ', '')
# Break into sections
sections = self.definition.split('-')
for ndx in range(len(sections)):
s = sections[ndx]
# Decide how many shifts to perform on the section
shifts = randrange(len(s))
for shift in range(shifts):
s = s[-1] + s[0:-1]
sections[ndx] = s
# Reassemble the sections
self.definition = ''.join(sections)
# The number of bells being rung is the number of bells in the method plus the optional cover
self.number_of_bells = method.number_of_bells()
self.cover = cover
if self.cover:
self.number_of_bells += 1
# A reference to the parent method is only needed for dumping to text
self.method = method
self.rows = []
# The last lead is 'plain' to force a plain start in the first lead
last_lead = Extent.LEAD_TYPE_PLAIN
for courses in range(extent_courses):
# Build the course
for lead in self.definition:
Extent._add_lead_start(last_lead, self.rows, method, self.length, cover)
if lead in ('p', 'P'):
Extent._add_plain(self.rows, method, self.length, cover)
last_lead = Extent.LEAD_TYPE_PLAIN
elif lead in ('b', 'B'):
Extent._add_bob(self.rows, method, self.length, cover)
last_lead = Extent.LEAD_TYPE_BOB
elif lead in ('s', 'S'):
Extent._add_single(self.rows, method, self.length, cover)
last_lead = Extent.LEAD_TYPE_SINGLE
# Add the intro rounds and the Go Method call to the last backstroke of the intro
intro = []
for ndx in range(intro_courses):
Extent._add_round(intro, self.number_of_bells)
intro[((intro_courses - 1) * 2) + 1].call_go = True
self.rows = intro + self.rows
# Add That's All to the second to last row of the extent
self.rows[len(self.rows) - 2].call_thats_all = True
# If the extent ended on a back stroke add the extra half round
if len(self.rows) % 2 != 0:
Extent._add_half_round(self.rows, self.number_of_bells)
# Add the final rounds and the call to Stand
Extent._add_round(self.rows, self.number_of_bells)
self.rows[len(self.rows) - 2].call_stand = True
def _add_half_round(rows, bells):
row = Row(bells)
for ndx in range(bells):
row.positions[ndx] = ndx + 1
rows.append(row)
def _add_round(rows, bells):
Extent._add_half_round(rows, bells)
Extent._add_half_round(rows, bells)
def _add_lead_start(last_lead, rows, method, length, cover):
if last_lead == Extent.LEAD_TYPE_PLAIN:
Extent._apply(rows, method.number_of_bells(), method.plain_start, length, cover)
elif last_lead == Extent.LEAD_TYPE_BOB:
Extent._apply(rows, method.number_of_bells(), method.bob_start, length, cover)
elif last_lead == Extent.LEAD_TYPE_SINGLE:
Extent._apply(rows, method.number_of_bells(), method.single_start, length, cover)
def _add_plain(rows, method, length, cover):
Extent._apply(rows, method.number_of_bells(), method.tracks, length, cover)
Extent._apply(rows, method.number_of_bells(), method.plain, length, cover)
def _add_bob(rows, method, length, cover):
Extent._apply(rows, method.number_of_bells(), method.tracks, length, cover)
# Call the Bob at the beginning of the last row BEFORE the Bob
rows[len(rows) - 1].call_bob = True
Extent._apply(rows, method.number_of_bells(), method.bob, length, cover)
def _add_single(rows, method, length, cover):
Extent._apply(rows, method.number_of_bells(), method.tracks, length, cover)
# Call the Single at the beginning of the last row BEFORE the Single
rows[len(rows) - 1].call_single = True
Extent._apply(rows, method.number_of_bells(), method.single, length, cover)
def _apply(rows, number_of_bells, work, length, cover):
prev = len(rows) - 1
bells = number_of_bells
if cover:
bells += 1
if len(work) > 0:
for ndx in range(len(work[0])):
if length > len(rows):
row = Row(bells)
if cover:
row.positions[bells -1] = bells
rows.append(row)
for track in range(number_of_bells):
if prev < 0:
bell = track + 1
else:
bell = rows[prev].positions[track]
curr = prev + 1
for t in work[track]:
if curr < length:
rows[curr].positions[t - 1] = bell
curr += 1
def to_mdf(self):
print('[INFO]')
print('name={} {}'.format(self.method.name, self.name))
print('bells={}'.format(self.method.number_of_bells()))
print('rows={}'.format(self.length))
print()
# For dump purposes 'assume' there are two intro and two extro rounds
print('[ROWS]')
row_id = 1
for ndx in range(self.length):
r = self.rows[ndx + 2]
print('M{:04}='.format(row_id,), end = '')
if r.call_bob:
print('(B) ', end = '')
if r.call_single:
print('(S) ', end = '')
for p in range(self.method.number_of_bells()):
print('{} '.format(r.positions[p]), end = '')
print()
row_id += 1
def dump(self):
row_id = -1
for r in self.rows:
print('M{:04}='.format(row_id,), end = '')
if r.call_bob:
print('(B) ', end = '')
if r.call_single:
print('(S) ', end = '')
for p in r.positions:
print('{} '.format(p), end = '')
print()
row_id += 1
class Method():
def __init__(self, file):
self.definition = configparser.ConfigParser()
self.definition.optionxform = str # Don't want keys to be lower cased
self.definition.read(file)
self.name = self.definition.get('INFO', 'name')
self.tracks = {}
for key in self.definition['TRACKS']:
self.tracks[int(key) - 1] = [int(v) for v in self.definition['TRACKS'][key].split()]
# Just in case a method is added where the Bobs and singles have an
# effect across the end of a lead and into the start of the next lead. To account for
# this the concept of the start of a lead being different depending on the previous
# lead was introduced. The PLAIN_START, BOB_START and SINGLE_START sections of the
# definition files are optional as they are not necessary for most mothods
self.plain_start = {}
if self.definition.has_section('PLAIN_START'):
for key in self.definition['PLAIN_START']:
self.plain_start[int(key) - 1] = [int(v) for v in self.definition['PLAIN_START'][key].split()]
self.plain = {}
if self.definition.has_section('PLAIN'):
for key in self.definition['PLAIN']:
self.plain[int(key) - 1] = [int(v) for v in self.definition['PLAIN'][key].split()]
self.bob_start = {}
if self.definition.has_section('BOB_START'):
for key in self.definition['BOB_START']:
self.bob_start[int(key) - 1] = [int(v) for v in self.definition['BOB_START'][key].split()]
self.bob = {}
if self.definition.has_section('BOB'):
for key in self.definition['BOB']:
self.bob[int(key) - 1] = [int(v) for v in self.definition['BOB'][key].split()]
self.single_start = {}
if self.definition.has_section('SINGLE_START'):
for key in self.definition['SINGLE_START']:
self.single_start[int(key) - 1] = [int(v) for v in self.definition['SINGLE_START'][key].split()]
self.single = {}
if self.definition.has_section('SINGLE'):
for key in self.definition['SINGLE']:
self.single[int(key) - 1] = [int(v) for v in self.definition['SINGLE'][key].split()]
def name(self):
return self.name
def extent_exists(self, extent_id):
key = 'EXTENT-' + str(extent_id)
return key in self.definition
def number_of_bells(self):
return self.definition.getint('INFO', 'bells')
def coverable(self):
return self.definition.getboolean('INFO', 'coverable', fallback = False)
def extent_name(self, key):
return self.definition.get(key, 'NAME')
def extent_length(self, key):
return self.definition.getint(key, 'LENGTH')
def extent_size(self, key, cover, intros, courses):
bells = self.number_of_bells()
if self.coverable() and cover:
bells += 1
size = self.extent_length(key) * bells * courses
size += intros * bells * 2
size += bells * 2 # Always two extro rounds
return size
def extent_definition(self, key):
return self.definition.get(key, 'DEFINITION')
def extent_mutable(self, key):
return self.definition.getboolean(key, 'MUTABLE', fallback = False)
def methods(conn, ring_addr, ring_port):
app_path = getattr(sys, '_MEIPASS', os.path.abspath(os.path.dirname(__file__)))
config = Config()
MAX_BELLS = config.getint('BELLS', 'bells')
GO = config.getint('STRIKE_COMMANDS', 'go')
THATS_ALL = config.getint('STRIKE_COMMANDS', 'thats_all')
BOB = config.getint('STRIKE_COMMANDS', 'bob')
SINGLE = config.getint('STRIKE_COMMANDS', 'single')
STAND = config.getint('STRIKE_COMMANDS', 'stand_next')
INDICATE_BELL_NUMBER_SHIFT = config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_bell_number_shift')
INDICATE_BELL_HANDSTROKE = config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_type_bell') << config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_type_shift')
INDICATE_BELL_BACKSTROKE = INDICATE_BELL_HANDSTROKE + \
(config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_stroke_mask') << config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_stroke_shift'))
INDICATE_BELL_GRAPHIC_CLEAR = config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_type_graphic') << config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_type_shift')
INDICATE_BELL_GRAPHIC_SHOW = INDICATE_BELL_GRAPHIC_CLEAR + \
(config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_graphic_mask') << config.getint('GUI_EVENT_LISTENER_COMMANDS', 'indicate_graphic_shift'))
handstroke_indicators, backstroke_indicators, graphic_show_indicators, graphic_clear_indicators = bell_indicators(MAX_BELLS,
INDICATE_BELL_NUMBER_SHIFT,
INDICATE_BELL_HANDSTROKE,
INDICATE_BELL_BACKSTROKE,
INDICATE_BELL_GRAPHIC_SHOW,
INDICATE_BELL_GRAPHIC_CLEAR)
bells = [True] * MAX_BELLS
bells_rung = [False] * MAX_BELLS
stop_playing = False
method = None
extent = None
pace = 3.0
pause = pace / MAX_BELLS
courses = 1
intro_rounds = 1
def play(ring_port):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ndx = 0
stroke_type = "B"
LOOK_TO = config.getint('STRIKE_COMMANDS', 'look_to')
sock.sendto(LOOK_TO.to_bytes(1, byteorder), (ring_addr, ring_port))
sleep(4)
for row in extent.rows:
if stop_playing:
break
if row.call_go:
sock.sendto(GO.to_bytes(1, byteorder), (ring_addr, ring_port))
if row.call_thats_all:
sock.sendto(THATS_ALL.to_bytes(1, byteorder), (ring_addr, ring_port))
if row.call_bob:
sock.sendto(BOB.to_bytes(1, byteorder), (ring_addr, ring_port))
if row.call_single:
sock.sendto(SINGLE.to_bytes(1, byteorder), (ring_addr, ring_port))
if row.call_stand:
sock.sendto(STAND.to_bytes(1, byteorder), (ring_addr, ring_port))
stroke_type = "H" if stroke_type == "B" else "B"
for strike in row.positions:
if stop_playing:
break
sock.sendto(graphic_show_indicators[strike - 1].to_bytes(1, byteorder), (config.get('GUI_EVENT_LISTENER', 'addr',), config.getint('GUI_EVENT_LISTENER', 'port')))
sleep(pause / 2.0)
if stroke_type == 'H':
indicator = backstroke_indicators[strike - 1]
else:
indicator = handstroke_indicators[strike - 1]
sock.sendto(indicator.to_bytes(1, byteorder), (config.get('GUI_EVENT_LISTENER', 'addr',), config.getint('GUI_EVENT_LISTENER', 'port')))
if bells[strike - 1]:
sock.sendto(strike.to_bytes(1, byteorder), (ring_addr, ring_port))
else:
if extent.wait:
while not bells_rung[strike - 1] and not stop_playing:
sleep(0.01)
bells_rung[strike - 1] = False
sleep(pause / 2.0)
sock.sendto(graphic_clear_indicators[strike - 1].to_bytes(1, byteorder), (config.get('GUI_EVENT_LISTENER', 'addr',), config.getint('GUI_EVENT_LISTENER', 'port')))
if stroke_type == 'B':
# Hand stroke lead pause
sleep(pause)
t = None
while True:
command = conn.recv().split(",")
if command[0] == "Exit":
stop_playing = True
if t and t.is_alive():
t.join()
break
elif command[0] == "Start":
stop_playing = False
if method:
t = Thread(target = play, args = (ring_port,))
t.start()
elif command[0] == "Stop":
stop_playing = True
if t and t.is_alive():
t.join()
elif command[0] == 'Pace':
pace = float(command[1])
if extent:
pause = pace / extent.number_of_bells
else:
pause = pace / MAX_BELLS
elif command[0] == "Load":
method = Method(app_path + '/data/' + command[1] + '.mcf')
extent = Extent(method, extent_id = command[2], cover = (command[3] == 'cover'), intro_courses = int(command[4]), extent_courses = int(command[5]), wait_learner = (command[6] == 'True'))
# extent.dump()
extent.to_mdf()
pause = pace / extent.number_of_bells
elif command[0] == "Play":
bell = int(command[1])
bells[bell - 1] = command[2] == "True"
elif command[0] == "Rung":
bell = int(command[1])
bells_rung[bell - 1] = True
| true |
509c4c2539a8837b6642f2888cf6a41fa4bc87b6 | Python | g1ibby/GA | /genetics/cross.py | UTF-8 | 1,383 | 3.171875 | 3 | [] | no_license | __author__ = 'swaribrus'
import itertools
import random
def one_point_crossover(length):
point = random.randint(0, length)
yield from itertools.repeat(True, point)
yield from itertools.repeat(False, length - point)
def two_point_crossover(length):
point1, point2 = sorted(random.randint(0, length) for _ in range(2))
yield from itertools.repeat(True, point1)
yield from itertools.repeat(False, point2 - point1)
yield from itertools.repeat(True, length - point2)
def three_point_crossover(length):
point1, point2, point3 = sorted(random.randint(0, length) for _ in range(3))
yield from itertools.repeat(True, point1)
yield from itertools.repeat(False, point2 - point1)
yield from itertools.repeat(True, point3 - point2)
yield from itertools.repeat(False, length - point3)
def uniform_point_crossover(length):
return (random.choice((False, True)) for i in range(length))
def ordered_one_point_crossover(genes1, genes2):
length = len(genes1)
point = random.randint(0, length)
child1 = []
child1 = genes1[:point]
for x in genes2:
if x not in child1:
child1.append(x)
child2 = []
child2 = genes2[point:]
for x in genes1:
if x not in child2:
child2.append(x)
return child1, child2
def combine_element_pairs(pairs):
return tuple(zip(*pairs))
| true |
c081947ea15be45988dc840a894c88b557f935e3 | Python | codicuz/gb_python | /Lesson03/task2.py | UTF-8 | 879 | 3.890625 | 4 | [] | no_license | '''
2. Реализовать функцию, принимающую несколько параметров, описывающих данные пользователя:
имя, фамилия, год рождения, город проживания, email, телефон. Функция должна принимать параметры
как именованные аргументы. Реализовать вывод данных о пользователе одной строкой.
'''
def user_function(name, surname, year_of_born, city, mail, phone):
print(f"Имя: {name} Фамилия: {surname} Год рождения: {year_of_born} город проживания: {city} e-mail: {mail} телефон {phone}")
user_function(surname="Иванов", name="Иван", city="Москва", year_of_born="1970", mail="mail@mail.ru", phone="+79139139113")
| true |
3e25b85555634100f33dd6053a4052d589035e16 | Python | Pfliger/Decorators | /main.py | UTF-8 | 2,909 | 3.40625 | 3 | [] | no_license | import json
import hashlib
from datetime import date, datetime
class CountryReader():
def __init__(self, file_name: str):
self.cursor = - 1
with open(file_name, 'r', encoding='utf8') as file:
self.countries = json.load(file)
def __iter__(self):
return self
def __next__(self):
self.cursor += 1
if self.cursor == len(self.countries):
raise StopIteration
return self.countries[self.cursor]['name']['common']
def parameterized_decorator(file_name):
def decorator(old_function):
def new_function(*args, **kwargs):
with open(file_name, 'a+', encoding='utf8') as file:
file.write(f'Имя функции: "{old_function.__name__}" дата и время запуска: {date.today()} {datetime.now().time()}\n'
f'Аргументы функции: {args}, {kwargs}\n')
result = old_function(*args, **kwargs)
file.write(f'Результат выполнения: {result}\n\n')
return result
return new_function
return decorator
def decorator(old_function):
def new_function(*args, **kwargs):
with open('decorator.txt', 'a+', encoding='utf8') as file:
file.write(f'Имя функции: "{old_function.__name__}" дата и время запуска: {date.today()} {datetime.now().time()}\n'
f'Аргументы функции: {args}, {kwargs}\n')
result = old_function(*args, **kwargs)
file.write(f'Результат выполнения: {result}\n\n')
return result
return new_function
if __name__ == '__main__':
countries_reader = CountryReader('countries.json')
with open('result.txt', 'a+', encoding='utf8') as file:
counter = 0
for country in countries_reader:
counter += 1
file.write(f'{country} - https://en.wikipedia.org/wiki/{country.replace(" ", "_")}\n')
file.write(f'\nнайдено {counter} стран')
@decorator
def LineReader(file_name: str):
with open(file_name, 'r', encoding='utf8') as my_file:
while True:
line = my_file.readline()
if line:
yield hashlib.md5(line.encode('utf8')).hexdigest()
else:
break
@parameterized_decorator('parameterized_decorator.txt')
def LineReader_1(file_name: str):
with open(file_name, 'r', encoding='utf8') as my_file:
while True:
line = my_file.readline()
if line:
yield hashlib.md5(line.encode('utf8')).digest()
else:
break
for item in LineReader('result.txt '):
print(item)
for item in LineReader_1('result.txt '):
print(item)
| true |
31c45a21302f826c9d1dfd7ff8696f9727b43215 | Python | Evan1987/BaseML | /Python_ML_and_Kaggle/chap02_linearsvc_svc.py | UTF-8 | 1,724 | 2.796875 | 3 | [] | no_license |
# coding: utf-8
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC, SVC
from sklearn.datasets import load_digits
from sklearn.metrics import classification_report, roc_curve, auc
digits = load_digits()
data = digits.data
label = digits.target
x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.25, random_state=33)
ss = StandardScaler()
x_train = ss.fit_transform(x_train)
x_test = ss.transform(x_test)
lsvc = LinearSVC()
lsvc.fit(x_train, y_train)
lsvc_y_predict = lsvc.predict(x_test)
svc = SVC(probability=True)
svc.fit(x_train, y_train)
svc_y_predict = svc.predict(x_test)
prob = svc.predict_proba(x_test)
# confusion matrix analysis for label “1”
fpr, tpr, threhold = roc_curve(y_true=y_test, y_score=prob[:, 1], pos_label=1)
df = pd.DataFrame({"actual": y_test, "pred": svc_y_predict})
newDF = df.applymap(lambda x: 1 if x == 1 else -1)
confusionMatrix = newDF.pivot_table(index="actual",
columns="pred",
fill_value=0,
aggfunc=pd.Series.count)\
.sort_index(axis=0, ascending=False).sort_index(axis=1, ascending=False)
print(auc(fpr, tpr))
print("the ACC of Linear SVC is %f"%lsvc.score(x_test, y_test))
print(classification_report(y_true=y_test,
y_pred=lsvc_y_predict,
target_names=digits.target_names.astype(str)))
print(classification_report(y_true=y_test,
y_pred=svc_y_predict,
target_names=digits.target_names.astype(str)))
| true |
662cf9b9c641c441c2b70a7f1ef7c7f7a23acb06 | Python | 07kshitij/CS60075-Team-11-Task-1 | /Models/NeuralNet.py | UTF-8 | 589 | 2.609375 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class NN(nn.Module):
def __init__(self, embedding_dim):
super(NN, self).__init__()
self.linear1 = nn.Linear(embedding_dim, 128, bias=True)
self.linear2 = nn.Linear(128, 256, bias=True)
self.linear3 = nn.Linear(256, 64, bias=True)
self.linear4 = nn.Linear(64, 1)
def forward(self, input):
out = torch.tanh(self.linear1(input))
out = torch.tanh(self.linear2(out))
out = torch.tanh(self.linear3(out))
out = torch.sigmoid(self.linear4(out))
return out
| true |
0dbe121f8c8a80919a9a1623d1ca4f49e02c72a9 | Python | dmitry-shaurov/-homework-itmo2018-dmitryshaurov | /task_exception_free_land.py | UTF-8 | 871 | 3.5625 | 4 | [] | no_license | def get_free_land(area, bed):
area_square = area[0] * 100
bed_square = bed[0] * bed[1]
area_lenth_k = int(area[1].split(":")[0])
area_width_k = int(area[1].split(":")[1])
k = area_square / (area_lenth_k * area_width_k)
area_lenth = area_lenth_k * k
area_width = area_width_k * k
if area[0] <= 0:
raise ValueError("Не задана площадь участка")
elif (bed[0] <=0) or (bed[1] <= 0):
raise ValueError("Не задана площадь грядки")
elif (bed[0] > area_lenth) or (bed[0] > area_width) or (bed[1] > area_width) or (bed[1] > area_lenth) or (bed_square > area_square):
raise ValueError("Размер грядки больше размера участка")
else:
return area_square % bed_square
# area1 = (6, "3:2")
# bed1 = (40,28)
# get_free_land(area1, bed1)
| true |
b09785d3a8c194d9205cffd993a07842d045657a | Python | hanseaston/stock-analysis-engine | /analysis_engine/perf/profile_algo_runner.py | UTF-8 | 1,803 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | """
Example tool for to profiling algorithm performance for:
- CPU
- Memory
- Profiler
- Heatmap
The pip includes `vprof for profiling algorithm code
performance <https://github.com/nvdv/vprof>`__
#. Start vprof in remote mode in a first terminal
.. note:: This command will start a webapp on port ``3434``
::
vprof -r -p 3434
#. Start Profiler in a second terminal
.. note:: This command pushes data to the webapp
in the other terminal listening on port ``3434``
::
vprof -c cm ./analysis_engine/perf/profile_algo_runner.py
"""
import datetime
import vprof.runner as perf_runner
import analysis_engine.consts as ae_consts
import analysis_engine.algo_runner as algo_runner
import spylunking.log.setup_logging as log_utils
log = log_utils.build_colorized_logger(
name='profile-algo')
def start():
"""start"""
back_a_few_days = (
datetime.datetime.now() - datetime.timedelta(days=3))
start_date = back_a_few_days.strftime(
ae_consts.COMMON_DATE_FORMAT)
ticker = 'SPY'
s3_bucket = 'perftests'
s3_key = (
f'{ticker}_{start_date}')
algo_config = (
'./cfg/default_algo.json')
history_loc = (
f's3://{s3_bucket}/{s3_key}')
log.info(
f'building {ticker} trade history '
f'start_date={start_date} '
f'config={algo_config} '
f'history_loc={history_loc}')
runner = algo_runner.AlgoRunner(
ticker=ticker,
start_date=start_date,
history_loc=history_loc,
algo_config=algo_config,
verbose_algo=False,
verbose_processor=False,
verbose_indicators=False)
runner.start()
# end of start
if __name__ == '__main__':
perf_runner.run(start, 'cm', args=(), host='0.0.0.0', port=3434)
| true |
b4dd3f5145c7d7e51b4123cf3d22634726050a56 | Python | Onebigbera/Daily_Practice | /test/sorting_algorithm/simple_selection_sort.py | UTF-8 | 2,039 | 4.3125 | 4 | [] | no_license | # -*- coding: utf-8-*-
# ****************Second One ****************
"""
简单选择排序(simple_selection_sort) 时间复杂度O(N^2)
通过n-1次关键字之间的比较,从n-i+1个记录中选出关键字最小的记录,并和第i(1<=i<=n)个元素护换位置
通俗的说,对尚未完成排序的所有元素,从头到尾比较一边,记录下来最小的那个元素的下标,也就是该元素的位置,再把该元素教化到当前遍历的最前面,其效率住处在于:每一轮进行了很多的比较,却只交换一次。因为它的时间复杂度也是O(n^2)但还是要比冒泡排序要好一点。
"""
__author__ = 'George'
__date__ = '2018/10/29'
class SQList(object):
def __init__(self, list=None):
"""
:param list: 传入的可迭代对象
"""
self.list = list
def swap(self, i, j):
"""
定义一个交换元素的方法,方便在之后使用 依据索引交换位置
:param i:
:param j:
:return:
"""
temp = self.list[i]
self.list[i] = self.list[j]
self.list[j] = temp
def select_sort(self):
"""
简单的选择排序,时间复杂度为O(n^2)
:return:
"""
list = self.list
length = len(list)
for i in range(length):
# 默认最小值的索引为i
minimum = i
for j in range(i + 1, length):
if list[minimum] > list[j]:
# 如果后面有更小的 定位最小值得索引
minimum = j
# 如果最小值得位置不是i
if i != minimum:
# 将最小值位置和当前遍历元素护换位置
self.swap(i, minimum)
def __str__(self):
list = []
for i in self.list:
list.append(i)
return str(list)
if __name__ == '__main__':
my_list = SQList([1, 78, 98, 7, 15, 56, 23])
my_list.select_sort()
print(my_list) | true |
e667130e5e4c6bf2ee56b6651b52fdac242fa89d | Python | vishalpatil0/Python-cwh- | /dictionary-1.py | UTF-8 | 283 | 3.421875 | 3 | [] | no_license | #program to take create dictionary and take input (keys) from user and give the result which is value
d1={"vishal":"patil","namrata":"badge","mayur":"dhakane"}
search=input("please give the keys = ")
if(d1.get(search)==None):
print("go to hell")
else:
print(d1[search])
| true |
2ab114caf9609481b12d81d7439c21a08a51b779 | Python | spanneerselvam/Cracking-The-Code-Problems | /DataStructures/ch4_trees_graphs/graphs.py | UTF-8 | 1,033 | 3.765625 | 4 | [] | no_license | """
Graph Implementation in Python
"""
class Graph:
def __init__(self):
self.graph = {}
def add_edge(self, node, neighbor=None):
edges = []
if neighbor != None:
if node not in self.graph:
edges.append(neighbor)
self.graph[node] = edges
self.add_edge(neighbor, node)
if node in self.graph:
edges = self.graph[node]
if neighbor not in edges:
edges.append(neighbor)
else:
if node not in self.graph:
self.graph[node] = edges
def print_graph(self):
return self.graph
def show_vertices(self, mode):
keys = []
keys = self.graph.keys()
return keys
def show_edges(self, node):
return node, self.graph[node]
a = Graph()
a.add_edge("A")
a.add_edge("D")
print(a.print_graph())
a.add_edge("B", "A")
print(a.print_graph())
a.add_edge("C", "A")
print(a.print_graph())
| true |
77850d981ec1ee417031ec89163bcc8ee3876e71 | Python | APY-Plus/API-Jnilib | /test.py | UTF-8 | 193 | 2.78125 | 3 | [] | no_license | from time import sleep
from threading import Thread
def test():
sleep(3)
print('[py]new thread over')
t1 = Thread(target=test, daemon=False)
t1.start()
print('[py]main thread over')
| true |
b666a497a65dbd5f242f374acf2916d20eda5399 | Python | LeGeRyChEeSe/dogsbot | /assets/Games/Chess/classes/chess.py | UTF-8 | 2,783 | 3.203125 | 3 | [] | no_license | from collections import OrderedDict
from discord.ext import commands
from assets.Games.Chess.classes.player import Player
class Chess:
def __init__(self, white_player, black_player, super, ctx: commands.Context):
self.super = super
self.ctx = ctx
self.black = ":black_large_square:"
self.white = ":white_large_square:"
self.white_player = Player(white_player, "white", self.super, self.ctx)
self.black_player = Player(black_player, "black", self.super, self.ctx)
self.chess_board = []
self.over = False
self.set_default_chess_board()
self.game()
def set_default_chess_board(self):
for i in range(8):
self.chess_board.append([])
for j in range(8):
if i % 2 == 0:
if j % 2 == 0:
self.chess_board[i].append(self.black)
else:
self.chess_board[i].append(self.white)
else:
if j % 2 == 0:
self.chess_board[i].append(self.white)
else:
self.chess_board[i].append(self.black)
def set_chess_board(self):
# White set
self.chess_board[7][0] = self.white_player.rook
self.chess_board[7][1] = self.white_player.knight
self.chess_board[7][2] = self.white_player.bishop
self.chess_board[7][3] = self.white_player.queen
self.chess_board[7][4] = self.white_player.king
self.chess_board[7][5] = self.white_player.bishop
self.chess_board[7][6] = self.white_player.knight
self.chess_board[7][7] = self.white_player.rook
for i in range(8):
self.chess_board[6][i] = self.white_player.pawn
# Black set
self.chess_board[0][0] = self.black_player.rook
self.chess_board[0][1] = self.black_player.knight
self.chess_board[0][2] = self.black_player.bishop
self.chess_board[0][3] = self.black_player.queen
self.chess_board[0][4] = self.black_player.king
self.chess_board[0][5] = self.black_player.bishop
self.chess_board[0][6] = self.black_player.knight
self.chess_board[0][7] = self.black_player.rook
for i in range(8):
self.chess_board[1][i] = self.black_player.pawn
def get_chess_board(self):
chess_board = ""
for i in self.chess_board:
for j in i:
chess_board += j
chess_board += "\n"
return chess_board
async def game(self):
self.black_player.set_color_pieces()
self.set_chess_board()
while not self.over:
print("white turns")
await self.white_player.play()
| true |
937610be7ae050424c9cd4c665058e716fd06526 | Python | webclinic017/Backtesting-7 | /test/trade/test_trader.py | UTF-8 | 7,990 | 2.609375 | 3 | [] | no_license | import pytest
import pandas as pd
from backtesting.trade.trader import Trader
@pytest.fixture()
def trader():
return Trader(1000, 'BTC', 'harvir', 0, 0)
def test_long_max(trader):
trade = trader.long(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
assert trade.type == 'long'
assert trade.new_balance == 0
assert trade.price == 100
assert trade.quantity == 10
assert trade.date == pd.Timestamp('2020-01-01 00:00:00')
def test_long_max_with_fees(trader):
trader.fees = 0.01 # 1%
trade = trader.long(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
assert trade.type == 'long'
assert trade.new_balance == 0
assert trade.price == 100
assert trade.quantity == 9.9009901
assert trade.date == pd.Timestamp('2020-01-01 00:00:00')
def test_long_too_much(trader):
try:
trader.long(100, pd.Timestamp('2020-01-01 00:00:00'), 1000)
except Exception as e:
assert 'Future balance is -99000' in str(e)
def test_long_quantity(trader):
trade = trader.long(100, pd.Timestamp('2020-01-01 00:00:00'), 5)
assert trade.type == 'long'
assert trade.new_balance == 500
assert trade.price == 100
assert trade.quantity == 5
assert trade.date == pd.Timestamp('2020-01-01 00:00:00')
def test_long_quantity_with_fees(trader):
trader.fees = 0.01 # 1%
trade = trader.long(100, pd.Timestamp('2020-01-01 00:00:00'), quantity=9.9)
assert trade.type == 'long'
assert trade.new_balance == 0.1
assert trade.price == 100
assert trade.quantity == 9.9
assert trade.date == pd.Timestamp('2020-01-01 00:00:00')
def test_long_and_close_max_win(trader):
trader.long(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
trade = trader.close_long(120, pd.Timestamp('2020-02-01 00:00:00'), max=True)
assert trade.type == 'close long'
assert trade.new_balance == 1200
assert trade.price == 120
assert trade.quantity == 10
assert trade.date == pd.Timestamp('2020-02-01 00:00:00')
assert trader.open_long is None
def test_long_and_close_max_win_with_fees(trader):
trader.fees = 0.01 # 1%
trader.long(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
trade = trader.close_long(120, pd.Timestamp('2020-02-01 00:00:00'), max=True)
assert trade.type == 'close long'
assert trade.new_balance == 1176.24
assert trade.price == 120
assert trade.quantity == 9.9009901
assert trade.date == pd.Timestamp('2020-02-01 00:00:00')
assert trader.open_long is None
def test_long_and_close_max_loss(trader):
trader.long(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
trade = trader.close_long(80, pd.Timestamp('2020-02-01 00:00:00'), max=True)
assert trade.type == 'close long'
assert trade.new_balance == 800
assert trade.price == 80
assert trade.quantity == 10
assert trade.date == pd.Timestamp('2020-02-01 00:00:00')
assert trader.open_long is None
def test_long_and_close_max_loss_with_fees(trader):
trader.fees = 0.01 # 1%
trader.long(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
trade = trader.close_long(80, pd.Timestamp('2020-02-01 00:00:00'), max=True)
assert trade.type == 'close long'
assert trade.new_balance == 784.16
assert trade.price == 80
assert trade.quantity == 9.9009901
assert trade.date == pd.Timestamp('2020-02-01 00:00:00')
assert trader.open_long is None
def test_long_and_close_most_then_max(trader):
trader.long(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
trade1 = trader.close_long(120, pd.Timestamp('2020-02-01 00:00:00'), quantity=6)
trade2 = trader.close_long(110, pd.Timestamp('2020-03-01 00:00:00'), max=True)
assert trade1.type == 'close long'
assert trade1.new_balance == 720
assert trade1.price == 120
assert trade1.quantity == 6
assert trade1.date == pd.Timestamp('2020-02-01 00:00:00')
assert trade2.type == 'close long'
assert trade2.new_balance == 1160
assert trade2.price == 110
assert trade2.quantity == 4
assert trade2.date == pd.Timestamp('2020-03-01 00:00:00')
def test_short_max(trader):
trade = trader.short(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
assert trade.type == 'short'
assert trade.new_balance == 1000
assert trade.price == 100
assert trade.quantity == 5
assert trade.date == pd.Timestamp('2020-01-01 00:00:00')
def test_short_too_much(trader):
try:
trader.short(100, pd.Timestamp('2020-01-01 00:00:00'), quantity=8)
except Exception as e:
assert 'Quantity is larger than' in str(e)
def test_short_quantity(trader):
trade = trader.short(100, pd.Timestamp('2020-01-01 00:00:00'), quantity=3)
assert trade.type == 'short'
assert trade.new_balance == 1000
assert trade.price == 100
assert trade.quantity == 3
assert trade.date == pd.Timestamp('2020-01-01 00:00:00')
def test_short_and_close_win(trader):
trader.short(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
trade = trader.close_short(80, pd.Timestamp('2020-02-01 00:00:00'), max=True)
assert trade.type == 'close short'
assert trade.new_balance == 1100
assert trade.price == 80
assert trade.quantity == 5
assert trade.date == pd.Timestamp('2020-02-01 00:00:00')
def test_short_and_close_win_with_fees(trader):
trader.fees = 0.01
trader.short(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
trade = trader.close_short(80, pd.Timestamp('2020-02-01 00:00:00'), max=True)
assert trade.type == 'close short'
assert trade.new_balance == 1094.06
assert trade.price == 80
assert trade.quantity == 4.95049505
assert trade.date == pd.Timestamp('2020-02-01 00:00:00')
def test_short_and_close_loss(trader):
trader.short(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
trade = trader.close_short(120, pd.Timestamp('2020-02-01 00:00:00'), max=True)
assert trade.type == 'close short'
assert trade.new_balance == 900
assert trade.price == 120
assert trade.quantity == 5
assert trade.date == pd.Timestamp('2020-02-01 00:00:00')
def test_short_and_close_loss_with_fees(trader):
trader.fees = 0.01
trader.short(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
trade = trader.close_short(120, pd.Timestamp('2020-02-01 00:00:00'), max=True)
assert trade.type == 'close short'
assert trade.new_balance == 896.04
assert trade.price == 120
assert trade.quantity == 4.95049505
assert trade.date == pd.Timestamp('2020-02-01 00:00:00')
def test_short_and_close_most_then_max(trader):
trader.short(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
trade1 = trader.close_short(90, pd.Timestamp('2020-02-01 00:00:00'), quantity=3)
trade2 = trader.close_short(80, pd.Timestamp('2020-03-01 00:00:00'), max=True)
assert trade1.type == 'close short'
assert trade1.new_balance == 1030
assert trade1.price == 90
assert trade1.quantity == 3
assert trade1.date == pd.Timestamp('2020-02-01 00:00:00')
assert trade2.type == 'close short'
assert trade2.new_balance == 1070
assert trade2.price == 80
assert trade2.quantity == 2
assert trade2.date == pd.Timestamp('2020-03-01 00:00:00')
def test_short_and_close_most_then_max_with_fees(trader):
trader.fees = 0.01
trader.short(100, pd.Timestamp('2020-01-01 00:00:00'), max=True)
trade1 = trader.close_short(90, pd.Timestamp('2020-02-01 00:00:00'), quantity=3)
trade2 = trader.close_short(80, pd.Timestamp('2020-03-01 00:00:00'), max=True)
assert trade1.type == 'close short'
assert trade1.new_balance == 1025.05
assert trade1.price == 90
assert trade1.quantity == 3
assert trade1.date == pd.Timestamp('2020-02-01 00:00:00')
assert trade2.type == 'close short'
assert trade2.new_balance == 1064.06
assert trade2.price == 80
assert trade2.quantity == 1.95049505
assert trade2.date == pd.Timestamp('2020-03-01 00:00:00')
| true |
bb2bdbaa7ecd6fea7eecd2e1d37f56e40b269a59 | Python | aryanchandrakar/Blockchain_Chat | /restnode.py | UTF-8 | 3,221 | 2.609375 | 3 | [] | no_license | import socket
import select
import threading
import json
import time
import flask
import requests
import random
import blockchain
def ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("40.114.26.190", 80)) # doesn't actually send traffic
ipa = s.getsockname()[0]
s.close()
return ipa
def getPort():
return random.randint(1024, 65535)
class Node:
def __init__(self, port):
self.peers = []
self.chain = blockchain.Blockchain()
self.chain.genesis()
self.staging = [] # staging data to add to block
# socket stuff
self.port = port
def consensus(self):
chains = []
for peer in peers:
pass # get that peer's chain
for chain in chains:
self.chain.consensus(chain)
def add_block(self):
self.chain.add_block(self.staging)
def add_data(self, data):
self.staging.append(data)
def peer(self, addr, port):
self.peers.append(Peer(addr, port))
def serve_chain(self, app):
app.run("0.0.0.0", self.port)
def check_consensus(self):
while True:
for peer in self.peers:
chain = peer.get_chain()
if self.chain.consensus(chain):
print("Checked chain with {}, ours is right".format(
(peer.addr, peer.port)))
else:
print("Checked chain with {}, theirs is right".format(
(peer.addr, peer.port)))
time.sleep(5)
def add_blocks(self):
while True:
if len(self.staging) > 0:
print("Mining new block...")
self.add_block()
print("Added new block!")
self.staging = []
else:
time.sleep(5)
def handle_input(self):
while True:
cmd = input("> ").split(";")
if cmd[0] == "peer":
self.peer(cmd[1], int(cmd[2]))
if cmd[0] == "txion":
self.staging.append(cmd[1])
if cmd[0] == "chain":
print([block.data for block in self.chain.blocks])
class Peer:
def __init__(self, address, port):
self.addr = address
self.port = port
def get_chain(self):
print("Fetching chain from {}".format((self.addr, self.port)))
message = requests.get("http://{}:{}/chain".format(self.addr,
self.port)).text
return blockchain.Blockchain.fromjson(message)
def start(listen_port):
me = Node(listen_port)
app = flask.Flask(__name__)
@app.route("/chain")
def chain():
return me.chain.jsonrep()
server_thread = threading.Thread(target=me.serve_chain, args=(app,))
consensus_thread = threading.Thread(target=me.check_consensus)
miner_thread = threading.Thread(target=me.add_blocks)
#input_thread = threading.Thread(target=me.handle_input)
server_thread.start()
consensus_thread.start()
miner_thread.start()
#me.handle_input()
return me
| true |
041ec546b8ddd5b0dd8b27d049b075e8647d5b5a | Python | devqazi/roman-urdu | /scripts/visualize_terminals.py | UTF-8 | 340 | 3.015625 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
in_path = "../res/terminal_freq.csv"
with open(in_path) as f:
data = f.read()
data = [int(i) for i in data.split(",")]
labels = [chr(i+97) for i in range(26)]
ticks = range(26)
plt.bar(ticks, data, align="center")
plt.xticks(ticks, labels)
plt.title("Terminal frequencies")
plt.show() | true |
8875d1818713f6fa2dd52b337255b5e87c9b207a | Python | kzkMae/myProject | /code/vmGenymotion/GUI/guiOperate.py | UTF-8 | 2,073 | 3.0625 | 3 | [] | no_license | # coding:utf-8
import os
import time
#GUiでGenyotionの起動・停止を行うためのソースコード
#基本的には「xte」コマンドを用いる
#画面の位置を変数化
#Genymotionのスタートボタン,絶対位置(x,y)
startGeny = ['114','133']
#Genymotionの修了ボタン(x,y)
endGeny = ['642','45']
endGenyKey = ['Alt_L','F4']
#Wait時間(クリックまでの間隔,起動後,終了後)
waiTime = [0.5,25,5]
#xteコマンド(基礎)
xte = 'xte '
#xteコマンドの中身用
sq = '\''
mouseMove = 'mousemove '
mouseLClick = 'mouseclick 1'
keyDown = 'keydown '
keyUp = 'keyup '
keyClick = 'key '
cmd_c = xte + sq + mouseLClick + sq
#Genymotion起動
def startGenymotionClick():
checkNum = 0
startXY = startGeny[0] + ' ' + startGeny[1]
#コマンド作成
cmd_m = xte + sq + mouseMove + startXY + sq
#print cmd_m
checkNum += os.system(cmd_m)
time.sleep(waiTime[0])
#print cmd_c
checkNum += os.system(cmd_c)
#5秒間停止
time.sleep(waiTime[1])
return checkNum
#Genymotion終了
def endGenymotionClick():
checkNum = 0
endXY = endGeny[0] + ' ' + endGeny[1]
#コマンド作成
cmd_m = xte + sq + mouseMove + endXY + sq
#print cmd_m
checkNum += os.system(cmd_m)
time.sleep(waiTime[0])
#print cmd_c
checkNum += os.system(cmd_c)
#5秒間停止
time.sleep(waiTime[2])
return checkNum
def endGenymotionKey():
checkNum = 0
#コマンド作成
cmd_ad = xte + sq + keyDown + endGenyKey[0] + sq
checkNum += os.system(cmd_ad)
time.sleep(0.01)
cmd_k = xte + sq + keyClick + endGenyKey[1] + sq
checkNum += os.system(cmd_k)
time.sleep(0.01)
cmd_au = xte + sq + keyUp + endGenyKey[0] + sq
checkNum += os.system(cmd_au)
time.sleep(waiTime[2])
return checkNum
#Genymotionを起動するMainの関数
def startGenymotionMain():
checkNum = startGenymotionClick()
return checkNum
def endGenymotionMain():
#checkNum = endGenymotionClick()
checkNum = endGenymotionKey()
return checkNum | true |
c54d26b01cd24baba589470b867cc0ab2f82954f | Python | bh0085/compbio | /learning/multi/learner.py | UTF-8 | 4,493 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python
from numpy import *
import numpy as np
import matplotlib.pyplot as plt
import compbio.learning.plots as lplots
import compbio.utils.plots as myplots
import compbio.utils.colors as mycolors
from regression_models import *
from orange_models import *
import itertools as it
import compbio.utils.pbar as pbar
class Learner():
def __init__(self,x_data, y_data, coupling):
'''Inputs: x_data, y_data: [Nx * Nt], [Ny * Nt] arrays'''
assert shape(x_data)[1] == shape(y_data)[1]
self.xvals = x_data
self.yvals = y_data
self.coupling = coupling
self.nt = shape(x_data)[1]
self.nx = shape(x_data)[0]
self.ny = shape(y_data)[0]
self.splitTraining()
def splitTraining(self, seed= -1, train_frac = .6):
if seed != -1: random.seed(seed)
inds = arange(self.nt)
random.shuffle(inds)
self.train_idxs = inds[0:floor(train_frac *self.nt)]
self.test_idxs = inds[floor(train_frac *self.nt):]
def xyTrain(self):
return self.xvals[:,self.train_idxs] ,\
self.yvals[:,self.train_idxs]
def xyTest(self):
return self.xvals[:,self.test_idxs] ,\
self.yvals[:,self.test_idxs]
def setModel(self,model):
self.model = model
def learn(self):
x,y = self.xyTrain()
self.model.learn(x,y,self.coupling)
def predictTraining(self):
x, y = self.xyTrain()
return self.model.predict(x)
def predictTest(self):
x,y = self.xyTest()
return self.model.predict(x)
def predictOther(self, x, y):
return self.model.predict(x)
def makePlots(self, name = 'No Name'):
xtrain,ytrain = self.xyTrain()
xtest,ytest = self.xyTest()
ytrain_predicted = self.predictTraining()
ytest_predicted = self.predictTest()
ny = len(ytrain)
f = plt.figure(1)
f.clear()
ax0 = f.add_subplot('211')
f1 = plt.figure(2)
f1.clear()
ax1 = f1.add_subplot('211')
ct = mycolors.getct(ny)
for actual,predicted, ax ,subtitle in [[ytest,ytest_predicted,ax0,'test predictions'],
[ytrain,ytrain_predicted,ax1,'training predictions']]:
for i in range(len(actual)):
lplots.plotPredictions(actual[i],
predicted[i],
ax,
color = ct[i])
myplots.maketitle(ax, name, subtitle =subtitle )
def testParams(self, model_class,
prediction= 'test', res = 10, dim = 1):
#set up the grid of prediction parameters
if len(shape(res)) == 0: res = (res,) * dim
test_vals =list( it.product(*[[(x,r) for x in arange(r)]
for r in res]))
#shall we predict holdout or training set?
if prediction == 'training':
xyfun = self.xyTrain
predictfun = self.predictTraining
else:
xyfun = self.xyTest
predictfun = self.predictTest
#set initial values for output variables
ntest = len( xyfun()[1][0])
rms = zeros(res)
pdicts = reshape(array([{} for i in range(product(res))]),res)
test_preds = reshape(\
array([ zeros(ntest) for i in range(product(res))]),\
concatenate([res+(ntest,)]))
#test the learning method for each parameter
bar = pbar.simple(len(test_vals))
ct = 0
for t in test_vals:
ct += 1
bar.update(ct)
pdict = {}
idxs = zip(map(lambda x: x[0], t))
self.setModel(model_class(params = t,
pdict = pdicts[idxs][0]))
self.learn()
xtest, ytest = xyfun()
ypred = predictfun()
rms[idxs] = std(ytest - ypred)
test_preds[idxs] = ypred
bar.finish()
#create a dictionary of all of the output variables
out = {}
out['pdicts'] = pdicts
out['test_rms'] = rms
out['test_preds'] = test_preds
out['actual_preds'] = ytest
print ytest
return out
def randomDependent(nx, ny, nt):
xvals = random.random((nx,nt))
cxns = greater(random.random((ny,nx)),.2)
cofs = cxns * random.random((ny,nx))
yvals = np.dot(cofs, xvals)
return xvals, yvals, cxns
def main():
nx, ny, nt = 20,10,100
xvals, yvals, couplings = randomDependent(nx,ny,nt)
#xvals = array([0,1,2,3,4])[newaxis,:]
#yvals =array( [.1,1.3,2.5,2.9,4.1])[newaxis,:]
#couplings = [[1]]
l = Learner(xvals,yvals,couplings)
l.setModel(LassoRegressionModel)
l.learn()
l.makePlots(name = 'Test Data')
if __name__ == '__main__':
main()
exit(0)
| true |
f42038313f7513283d74eddb80085da621c10648 | Python | zhrmrz/pascalTriangle | /pascalTriangle.py | UTF-8 | 265 | 3.09375 | 3 | [] | no_license | class sol:
def pascalTriangle(self,numRows):
list=[[1]]
row=[1]
for i in range(numRows):
row=[1]+[row[i]+row[i+1] for i in range(len(row)-1)]+[1]
list.append(row)
print(list)
p1=sol()
p1.pascalTriangle(4)
| true |
6fc80cbb1ab2b36a3daebb5fc8453117b880583f | Python | bobqywei/Daily-Coding-Problem | /#13.py | UTF-8 | 607 | 3.125 | 3 | [] | no_license | while True:
k = int(input())
s = input()
char_freq = {s[0]: 1}
left = 0
right = 0
distinct_chars = 1
maxlen = 0
start = 0
while right < len(s)-1:
if distinct_chars <= k:
right += 1
freq = char_freq.get(s[right])
if freq is None or freq == 0:
distinct_chars += 1
char_freq[s[right]] = 1
else:
char_freq[s[right]] = freq + 1
if distinct_chars == k and right-left+1 > maxlen:
maxlen = right - left + 1
start = left
else:
char_freq[s[left]] -= 1
if char_freq[s[left]] == 0:
distinct_chars -= 1
left += 1
print(s[start: start+maxlen])
| true |
3d8afb5021c5ccec6a78fd2f47fd516a19b246fe | Python | knighton/babi | /panoptes/ling/parse/parse.py | UTF-8 | 20,165 | 3.328125 | 3 | [] | no_license | from collections import defaultdict
class Token(object):
"""
A single token in a parse.
"""
def __init__(self, index, text, tag, up, downs):
self.index = index # integer index
self.text = text # text
self.tag = tag # tag
self.up = up # (dep, Token or None)
self.downs = downs # list of (dep, Token)
def to_d(self):
return {
'index': self.index,
'text': self.text,
'tag': self.tag,
}
def reassign_parent(node, new_parent):
rel, old_parent = node.up
if old_parent:
for i, (_, child) in enumerate(old_parent.downs):
if child.index == node.index:
del old_parent.downs[i]
break
node.up = rel, new_parent
new_parent.downs.append((rel, node))
new_parent.downs.sort(key=lambda (dep, child): child.index)
class Parse(object):
"""
A parse tree.
"""
def __init__(self, tokens, root):
self.tokens = tokens # list of Tokens
self.root = root # the root Token in tokens
def fixed(self):
"""
We completely give up on certain parse shapes.
"""
print 'INPUT TO FIXED:'
self.dump()
# XX tokens, ugh.
for t in self.tokens:
if t.tag == 'XX':
t.tag = 'NNP'
t.up = ('nsubj', t.up[1])
reassign_parent(t, t.up[1])
# Jason is a proper noun.
for t in self.tokens:
if t.text in ('jason', 'antoine', 'sumit', 'yann'):
t.tag = 'NNP'
# Tokens that descend from sentence-ending punctuation shall be
# reassigned to the root.
#
# "What is the hallway north of?"
while True:
t = self.tokens[-1]
if t.tag != '.':
break
for rel, child in t.downs:
reassign_parent(child, self.root)
break
# "The" is not a direct verb argument.
#
# "What is the hallway north of?"
#
# Convert
#
# V* -nsubj-> the
#
# to
#
# (token after the) -det-> the
for t in self.tokens:
rel, parent = t.up
if not parent:
continue
if not parent.tag.startswith('V'):
continue
if rel != 'nsubj':
continue
if t.text != 'the':
continue
if len(self.tokens) < t.index + 1:
continue
next_token = self.tokens[t.index + 1]
next_rel, next_parent = next_token.up
if next_rel != 'nmod':
continue
next_token.up = 'nsubj', next_parent
reassign_parent(next_token, next_parent)
t.up = 'det', parent
reassign_parent(t, self.tokens[t.index + 1])
# "Does (subject) (verb)"-style questions sometimes get parsed like the
# (verb) is a noun, compounded to the true subject. Requires much
# fiddling to fix.
while True:
if self.root.text not in ['do', 'does', 'did']:
break
dobj = None
has_aux = False
for rel, child in self.root.downs:
if rel == 'dobj':
dobj = child
elif rel == 'aux':
has_aux = True
if not dobj:
break
if has_aux:
break
if dobj.tag != 'NN':
break
# Fuck you too!
self.root.up = 'aux', None
for i, (rel, child) in enumerate(self.root.downs):
if child.index == dobj.index:
del self.root.downs[i]
break
for rel, child in self.root.downs:
if rel == 'dobj':
continue
reassign_parent(child, dobj)
reassign_parent(self.root, dobj)
dobj.tag = 'VB'
dobj.up = 'ROOT', None
self.root = dobj
compound = None
for i, (rel, child) in enumerate(self.root.downs):
if rel == 'compound':
self.root.downs[i] = 'nsubj', child
compound = child
break
for i, (rel, child) in enumerate(self.root.downs):
if rel == 'det':
del self.root.downs[i]
if compound:
compound.downs.append((rel, child))
compound.downs.sort(
key=lambda (dep, child): child.index)
break
break
# Sometimes when there's a stranded preposition at the end, the ending
# punctuation is made its child. Annoying.
#
# "What is the bedroom east of?"
while True:
if not self.tokens:
break
t = self.tokens[-1]
if t.tag != '.':
break
rel, orig_parent = t.up
if rel == 'punct':
break
prev_parent = None
parent = orig_parent
while parent:
prev_parent = parent
_, parent = parent.up
top_verb = prev_parent
for i, (_, child) in enumerate(orig_parent.downs):
if child.index == t.index:
del orig_parent.downs[i]
break
t.up = ('punct', top_verb)
top_verb.downs.append(('punct', t))
top_verb.downs.sort(key=lambda (dep, child): child.index)
break
# Sometimes the parser puts the subject under an acomp for whatever
# reason.
#
# "Is the chocolate bigger than the box?"
#
# Got
#
# is -> bigger -> chocolate
#
# Want
#
# is -> chocolate
# is -> bigger
for t in self.tokens:
# We want to transform
#
# verb -acomp-> JJR -nsubj-> anything
#
# into
#
# verb -nsubj-> anything
# verb -acomp-> JJR
rel, parent = t.up
if rel != 'nsubj':
continue
if parent is None:
continue
if parent.tag != 'JJR':
continue
parent_rel, grandparent = parent.up
if grandparent is None:
continue
if parent_rel != 'acomp':
continue
if not grandparent.tag.startswith('V'):
continue
# Tree surgery.
for i, (_, child) in enumerate(parent.downs):
if child.index == t.index:
del parent.downs[i]
break
t.up = (rel, grandparent)
grandparent.downs.append((rel, t))
grandparent.downs.sort(key=lambda (dep, child): child.index)
# We don't like adverbial phrases. We do like prepositional phrases as
# verb arguments.
#
# "Mary went back to the garden."
#
# Got
#
# went -> back -> to -> garden -> the
#
# Want
#
# * went -> back
# * went -> to -> garden -> the
for t in self.tokens:
# We want to transform
#
# verb -advmod-> adverb -prep-> prep
#
# into
#
# verb -advmod-> adverb
# verb -prep-> prep
# Do the checks.
if t.tag != 'IN':
continue
rel, parent = t.up
if rel != 'prep':
continue
if parent is None:
continue
if parent.tag != 'RB':
continue
parent_rel, grandparent = parent.up
if parent_rel != 'advmod':
continue
if grandparent is None:
continue
if not grandparent.tag.startswith('V'):
continue
# Do the tree surgery.
for i, (_, child) in enumerate(parent.downs):
if child.index == t.index:
del parent.downs[i]
break
t.up = (rel, grandparent)
grandparent.downs.append((rel, t))
grandparent.downs.sort(key=lambda (dep, child): child.index)
# Usually, preps don't descend from other preps. If spacy gives us
# that, attach the child prep to its grandparent instead.
for t in self.tokens:
# Transform
#
# verb -prep-> IN-1 -prep-> IN-2
#
# into
#
# verb -prep-> IN-1
# verb -prep-> IN-2
if t.tag != 'IN':
continue
rel, parent = t.up
if rel != 'prep':
continue
if parent is None:
continue
if parent.tag != 'IN':
continue
parent_rel, grandparent = parent.up
if grandparent is None:
continue
if parent_rel != 'prep':
continue
# Do the surgery.
for i, (_, child) in enumerate(parent.downs):
if child.index == t.index:
del parent.downs[i]
break
t.up = (rel, grandparent)
grandparent.downs.append((rel, t))
grandparent.downs.sort(key=lambda (dep, child): child.index)
# Break up compounds of the form (determiner) (noun) (direction) (PP).
#
# "What is the bathroom east of?"
for t in self.tokens:
rel, parent = t.up
if parent is None:
continue
if rel != 'compound':
continue
parent_rel, grandparent = parent.up
if grandparent is None:
continue
# Some generic relation for nouns that won't break surface recog.
guess_rel = 'nsubj'
# Give the child of the "compound" relation to its grandparent.
for i, (_, child) in enumerate(parent.downs):
if child.index == t.index:
del parent.downs[i]
break
t.up = (guess_rel, grandparent)
grandparent.downs.append((guess_rel, t))
grandparent.downs.sort(key=lambda (dep, child): child.index)
# Reassign its parent's det to it.
det = None
for i, (rel, down) in enumerate(parent.downs):
if rel == 'det':
det = down
del parent.downs[i]
break
if not det:
continue
t.downs.append(('det', det))
t.downs.sort(key=lambda (dep, child): child.index)
# Possibly the worst hack.
#
# Example:
#
# "Is [the box of chocolates] [bigger than the box]?"
while True:
t = self.tokens[0]
if t.tag != 'VBZ':
break
if self.root.index:
break
n = len(self.root.downs)
if n != 2: # One for the joined arg, one for ending punct.
break
rel, child = self.root.downs[1]
if rel != 'punct':
break
for t in self.tokens:
if t.tag == 'JJR' and t.up[0] == 'amod':
t.up = ('nsubj', t.up[1])
reassign_parent(t, self.root)
break
# If it starts with a "to be" VBZ, it should be of the form
#
# "(is) (something) (something)"
#
# so if you get "(is) (something)" try to split the something.
#
# "Is the triangle above the pink rectangle?"
#
# and
#
# "Is the box bigger than the box of chocolates?"
#
# however note this won't handle the following alone:
#
# "Is [the box of chocolates] [bigger than the box]?"
while True:
t = self.tokens[0]
if t.tag != 'VBZ':
break
if self.root.index:
break
n = len(self.root.downs)
if n != 2: # One for punct, the other for the joined arg.
break
rel, child = self.root.downs[1]
if rel != 'punct':
break
rel, child = self.root.downs[0]
if len(child.downs) < 2: # det, (amod,) prep
break
child_rel, grandchild = child.downs[-1]
if child_rel == 'prep':
reassign_parent(grandchild, self.root)
elif child_rel == 'amod':
child.downs[1] = ('acomp', grandchild)
grandchild.up = ('acomp', child)
reassign_parent(grandchild, self.root)
else:
break
break
# Convert from
#
# verb -prep-> IN -npadvmod-> anything
#
# to
#
# verb -prep-> IN
# verb -nsubj-> anything
for t in self.tokens:
rel, parent = t.up
if not parent:
continue
if rel != 'npadvmod':
continue
if parent.tag != 'IN':
continue
parent_rel, grandparent = parent.up
if not grandparent:
continue
if parent_rel != 'prep':
continue
if not grandparent.tag.startswith('V'):
continue
t.up = 'nsubj', parent
reassign_parent(t, grandparent)
# Prepositional phrase attachment: should be owned by another arg.
#
# "The hallway is south of the bedroom."
for i in xrange(len(self.tokens) - 1):
this = self.tokens[i]
right = self.tokens[i + 1]
directions = ['north', 'south', 'east', 'west']
if not (this.text in directions and right.tag == 'IN'):
continue
if this.tag != 'NN':
this.tag = 'NN'
if right.text != 'of':
continue
rel, parent = right.up
if parent.index == this.index:
continue
for i, (_, child) in enumerate(parent.downs):
if child.index == right.index:
del parent.downs[i]
break
right.up = (rel, this)
this.downs.append((rel, right))
this.downs.sort(key=lambda (rel, child): child.index)
# Prepositional phrase attachment: should be its own arg.
#
# "Where was the apple before the beach?"
for t in self.tokens:
if t.text != 'before':
continue
rel, parent = t.up
if parent is None:
continue
if parent.tag != 'NN':
continue
parent_rel, grandparent = parent.up
if grandparent is None:
continue
for i, (_, child) in enumerate(parent.downs):
if child.index == t.index:
del parent.downs[i]
break
parent.up = (rel, grandparent)
grandparent.downs.append((rel, t))
grandparent.downs.sort(key=lambda (dep, child): child.index)
# Handle verb args descended from an aux relation.
for t in self.tokens:
dep, up = t.up
if up is None:
continue
up_dep, up_up = up.up
if up_up is None:
continue
if up_dep not in ('aux', 'auxpass'):
continue
for i, (_, child) in enumerate(up.downs):
if child.index == t.index:
del up.downs[i]
break
t.up = (dep, up_up)
up_up.downs.append((dep, t))
up_up.downs.sort(key=lambda (a, b): b.index)
# Handle advmod descending from a noun (relative clauses?), when at
# least in bAbi it is always descended from the verb.
for t in self.tokens:
dep, up = t.up
if up is None:
continue
if dep != 'advmod':
continue
if up.tag != 'NN':
continue
# Do the tree surgery.
for i, (_, child) in enumerate(up.downs):
if child.index == t.index:
del up.downs[i]
break
t.up = dep, up.up[1]
t.up[1].downs.append((dep, t))
t.up[1].downs.sort(key=lambda (a, b): b.index)
# The parser may give us multiple npadvmod links when what we want is
# just one npadvmod that compound-links to the "other" one. In other
# words:
#
# Make
#
# "[Yesterday] [evening] Tim moved to the abyss."
#
# parse similar to
#
# "[This evening] Tim moved to the abyss."
verb2npadvmods = defaultdict(list)
for t in self.tokens:
rel, parent = t.up
if not parent:
continue
if rel != 'npadvmod':
continue
if not parent.tag.startswith('V'):
continue
verb2npadvmods[parent.index].append(t.index)
for verb_x, npadvmod_xx in verb2npadvmods.iteritems():
if len(npadvmod_xx) == 1:
continue
elif len(npadvmod_xx) != 2:
assert False
left_x, right_x = npadvmod_xx
left = self.tokens[left_x]
right = self.tokens[right_x]
left.up = ('compound', left.up[1])
reassign_parent(left, right)
# At least in the bAbi dataset, the same sentence 'shape' almost always
# parses one way, but in a few cases it parses the other way. Normalize
# those to the common way.
#
# "Julie is either in the bedroom or the office." -- canonical
# "Mary is either in the school or the office." -- non-canonical
#
# Reassign "cc" and "conj" relations descending from an IN token to its
# "pobj" child.
for t in self.tokens:
if t.tag != 'IN':
continue
pobj = None
for rel, child in t.downs:
if rel == 'pobj':
pobj = child
break
if pobj is None:
continue
for rel, child in list(t.downs):
if rel in ('cc', 'conj'):
reassign_parent(child, pobj)
# Convert
#
# V* -*-> NN(s) -advmod-> RB
#
# to
#
# V* -*-> NN(s)
# V* -advmod-> RB
for t in self.tokens:
if t.tag != 'RB':
continue
rel, parent = t.up
if rel != 'advmod':
continue
if not parent:
continue
if not parent.tag.startswith('N'):
continue
parent_rel, grandparent = parent.up
reassign_parent(t, grandparent)
return self
def dump(self):
print 'Parse {'
print ' ',
for t in self.tokens:
print '%d=%s/%s' % (t.index, t.text, t.tag),
print
def fix((rel, parent)):
if parent:
parent = parent.index
return ' '.join(map(str, [rel, parent]))
for t in self.tokens:
print ' ', fix(t.up), '->', t.index, '->', map(fix, t.downs)
print '}'
| true |
917ae910bcaf1b34dffb3fe82ee7c632df382bdb | Python | liooil/leetcode | /convert-a-number-to-hexadecimal.py | UTF-8 | 250 | 2.765625 | 3 | [] | no_license | class Solution:
def toHex(self, num: 'int') -> 'str':
ans = ""
for _ in range(8):
num, r = divmod(num, 16)
ans = "0123456789abcdef"[r] + ans
if num == 0:
break
return ans | true |
2269b2373c07331d0ca0b61f3e1339a8fe04b895 | Python | ACSchil/PyAI | /towersofhanoi/search.py | UTF-8 | 14,113 | 3.09375 | 3 | [] | no_license | from collections import deque
from threading import RLock, Thread
from queue import Queue
from search.node import Node
from towersofhanoi.hanoi import immutable_hanoi
def dls_graph(problem, limit):
"""Depth limited search for hanoi with an explored set."""
problem.metrics.start()
explored = set()
explored.add(immutable_hanoi(problem.initial))
return recursive_dls_graph(Node(problem.initial), problem, limit, explored)
def dls_forward(problem, limit):
problem.metrics.start()
problem.metrics.inc_node()
return recursive_dls_forward(Node(problem.initial), problem, limit)
def bfs_graph(problem):
"""BFS for hanoi with explored set"""
problem.metrics.start()
node = Node(problem.initial)
problem.metrics.inc_node()
problem.metrics.node_size(node)
if problem.goal_test(node.state):
return solution(node, problem)
frontier = deque()
# explored set
explored = set()
frontier.append(node)
explored.add(immutable_hanoi(node.state))
# explore descendants in order of shallowest
while frontier:
problem.metrics.update_max_frontier(len(frontier))
problem.metrics.update_max_explored(len(explored))
node = frontier.popleft()
for child in node.expand(problem):
problem.metrics.inc_node()
c = immutable_hanoi(child.state)
if problem.goal_test(child.state):
problem.metrics.stop()
return solution(child, problem)
if c not in explored:
frontier.append(child)
explored.add(c)
problem.metrics.stop()
return None
def bidirectional_bfs(problem):
"""BD-BFS for hanoi"""
problem.metrics.start()
start_node = Node(problem.initial)
end_node = Node(problem.end)
problem.metrics.node_size(start_node)
problem.metrics.inc_node()
problem.metrics.inc_node()
if end_node.state == start_node.state:
return [start_node]
# locks for threads starting from the start and end (i.e. solution) of the problem
explored_e_lock = RLock()
explored_s_lock = RLock()
# explored sets as dictionaries to recover the solution. Use locks as these are shared
explored_e = {}
explored_e[hash(immutable_hanoi(end_node.state))] = end_node
explored_s = {}
explored_s[hash(immutable_hanoi(start_node.state))] = start_node
# queue for saving the solution from the threads
solutions = Queue()
# thread starts from initial state
thread_s = Thread(target=directional_bfs,
args=(problem, start_node, explored_s, explored_s_lock,
explored_e, explored_e_lock, solutions, 's'),
daemon=True)
# thread starts from goal state
thread_e = Thread(target=directional_bfs,
args=(problem, end_node, explored_e, explored_e_lock,
explored_s, explored_s_lock, solutions, 'e'),
daemon=True)
thread_s.start()
thread_e.start()
# s is of the form (intersecting_node (from start direction),
# intersecting_node.parent (from soultion direction))
s = solutions.get()
problem.metrics.stop()
return bidirectional_solution(s[0], s[1], problem)
def iterative_deepening_forward(problem):
"""Iterative deepening using a depth limited search with an explored list"""
problem.metrics.start()
problem.metrics.node_size(Node(problem.initial))
depth = 0
while True:
# print()
# print()
# print('===============')
# print(' Deepened to ', depth)
# print('===============')
result = dls_forward(problem, depth)
if result != 'cutoff':
problem.metrics.stop()
return result
else:
depth += 1
def iterative_deepening_graph(problem):
"""Iterative deepening using a depth limited search with an explored list"""
problem.metrics.start()
problem.metrics.node_size(Node(problem.initial))
depth = 0
while True:
# print()
# print()
# print('===============')
# print(' Deepened to ', depth)
# print('===============')
result = dls_graph(problem, depth)
if result != 'cutoff':
problem.metrics.stop()
return result
else:
depth += 1
##################
# Without explored
##################
def bfs_tree(problem):
"""BFS for hanoi without explored set"""
problem.metrics.start()
node = Node(problem.initial)
problem.metrics.node_size(node)
problem.metrics.inc_node()
if problem.goal_test(node.state):
return solution(node, problem)
frontier = deque()
frontier.append(node)
# explore descendants in order of shallowest
while frontier:
problem.metrics.update_max_frontier(len(frontier))
problem.metrics.update_max_explored(0)
node = frontier.popleft()
for child in node.expand(problem):
problem.metrics.inc_node()
if problem.goal_test(child.state):
problem.metrics.stop()
return solution(child, problem)
else:
frontier.append(child)
problem.metrics.stop()
return None
def dls_tree(problem, limit):
"""Depth limited search used for iterative deepening."""
problem.metrics.start()
return recursive_dls_tree(Node(problem.initial), problem, limit)
def iterative_deepening_tree(problem):
"""Iterative deepening using a depth limited search with an explored list"""
problem.metrics.start()
problem.metrics.node_size(Node(problem.initial))
depth = 0
while True:
# print()
# print()
# print('===============')
# print(' Deepened to ', depth)
# print('===============')
result = dls_tree(problem, depth)
if result != 'cutoff':
problem.metrics.stop()
return result
else:
depth += 1
##################
# Helper Functions
##################
def directional_bfs(problem, node, my_explored, my_lock, their_explored, their_lock, solutions, direction):
"""Runs a bfs in a given direction. If the search finds an intersection, it pushes the node onto the
shared solution queue. """
frontier = deque()
frontier.append(node)
with my_lock:
h = hash(immutable_hanoi(node.state))
my_explored[h] = node
# perform bfs
while frontier:
problem.metrics.update_max_frontier(len(frontier))
node = frontier.popleft()
for child in node.expand(problem):
problem.metrics.inc_node()
h = hash(immutable_hanoi(child.state))
if h in my_explored.keys():
continue
# Acquire locks in same order to avoid deadlock
if direction == 's':
lock_one = my_lock
lock_two = their_lock
else:
lock_one = their_lock
lock_two = my_lock
# if we haven't seen this state, check to see if the refactorme direction has seen it
with lock_one:
with lock_two:
my_keys = my_explored.keys()
problem.metrics.update_max_explored(len(my_keys) +
len(their_explored.keys()))
for key in my_keys:
# if the refactorme direction has seen the state the searches have intersected
if key in their_explored:
if direction == 's':
solutions.put((my_explored[key], their_explored[key].parent))
else:
solutions.put((their_explored[key], my_explored[key].parent))
return
# if there wasn't an intersection, add the generated child to the FIFO queue
frontier.append(child)
with my_lock:
h = hash(immutable_hanoi(child.state))
my_explored[h] = child
def recursive_dls_graph(node, problem, limit, explored, depth=0):
"""Recursive depth limited search for hanoi; uses an explored set.
Setting the limit to infinity runs a DFS"""
problem.metrics.update_max_depth(depth)
print()
print('At depth ', depth, ' and limit', limit)
problem.print_state(node.state)
if problem.goal_test(node.state):
problem.metrics.update_max_explored(len(explored))
problem.metrics.stop()
return solution(node, problem)
elif limit == 0:
problem.metrics.update_max_explored(len(explored))
problem.metrics.stop()
return 'cutoff'
else:
cutoff_occurred = False
for child in node.expand(problem):
problem.metrics.inc_node()
c = immutable_hanoi(child.state)
if c not in explored:
explored.add(c)
else:
continue
# search deeper
result = recursive_dls_graph(child, problem, limit - 1, explored, depth + 1)
# decide if we hit our depth limit, or if we found the solution
if result == 'cutoff':
cutoff_occurred = True
elif result != 'failure':
problem.metrics.update_max_explored(len(explored))
problem.metrics.stop()
return result
# if we didn't hit the depth limit nor did we find a solution, we failed
if cutoff_occurred:
problem.metrics.update_max_explored(len(explored))
problem.metrics.stop()
return 'cutoff'
else:
problem.metrics.update_max_explored(len(explored))
problem.metrics.stop()
return 'failure'
def recursive_dls_forward(node, problem, limit, forward_set=set(), depth=0):
"""Recursive depth limited search for hanoi; uses an explored set.
Setting the limit to infinity runs a DFS"""
problem.metrics.update_max_depth(depth)
# print()
# print('At depth ', depth, ' and limit', limit)
# problem.print_state(node.state)
if problem.goal_test(node.state):
problem.metrics.stop()
return solution(node, problem)
elif limit == 0:
problem.metrics.stop()
return 'cutoff'
else:
forward_set.add(immutable_hanoi(node.state))
problem.metrics.update_max_explored(len(forward_set))
cutoff_occurred = False
for child in node.expand(problem):
problem.metrics.inc_node()
c = immutable_hanoi(child.state)
if c not in forward_set:
forward_set.add(c)
else:
continue
# search deeper
result = recursive_dls_forward(child, problem, limit - 1, forward_set, depth + 1)
# decide if we hit our depth limit, or if we found the solution
forward_set.remove(c)
if result == 'cutoff':
cutoff_occurred = True
elif result != 'failure':
problem.metrics.stop()
return result
# if we didn't hit the depth limit nor did we find a solution, we failed
if cutoff_occurred:
problem.metrics.stop()
return 'cutoff'
else:
problem.metrics.stop()
return 'failure'
def recursive_dls_tree(node, problem, limit, depth=0):
"""Recursive depth limited search. Setting the limit to infinity runs a DFS"""
# if depth == 3 or depth == 5 or depth == 7 or depth == 10 or depth == 15 or depth == 16 or depth == 17:
# print(depth)
problem.metrics.update_max_depth(depth)
if problem.goal_test(node.state):
problem.metrics.stop()
return solution(node, problem)
elif limit == 0:
problem.metrics.stop()
return 'cutoff'
else:
cutoff_occurred = False
for child in node.expand(problem):
problem.metrics.inc_node()
result = recursive_dls_tree(child, problem, limit - 1, depth + 1)
if result == 'cutoff':
cutoff_occurred = True
elif result != 'failure':
problem.metrics.stop()
return result
if cutoff_occurred:
problem.metrics.stop()
return 'cutoff'
else:
problem.metrics.stop()
return 'failure'
#############################
# Solution Recovery Functions
#############################
def solution(node, problem=None):
"""Returns a deque of the steps required to get from the initial
state to the goal state, given a node with the goal state whose
ancestors form a path to the initial state."""
q = deque()
n = node
while True:
q.appendleft(n)
try:
n = n.parent
if n is None:
problem.metrics.update_solution_steps(len(q))
return q
except AttributeError:
problem.metrics.update_solution_steps(len(q))
return q
def bidirectional_solution(start_node, end_node, problem=None):
"""Returns a deque of the steps required to get from the initial
state to the goal state, given a node with with an intermediary state whose
ancestors form a path to the initial state, and another node with that's the
parent of the same intermediary state, but whose ancestors for a path to the
goal state"""
q = deque()
n = start_node
while True:
q.appendleft(n)
try:
n = n.parent
if n is None:
break
except AttributeError:
break
n = end_node
while True:
q.append(n)
try:
n = n.parent
if n is None:
problem.metrics.update_solution_steps(len(q))
return q
except AttributeError:
problem.metrics.update_solution_steps(len(q))
return q
| true |
005b711f7cb47c17c6dfca91f469c0cdfd67efd8 | Python | jimmy-jing/housing_ml | /jj_dummification.py | UTF-8 | 6,423 | 3 | 3 | [] | no_license | import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
class LabelEncoders():
'''
class to return various label encoder instances based a dict of df columns
only to be used within class HousingCategorical
to understand each instance method, please use the self.label_dict to identify relevant features
'''
def __init__(self):
#inits with dict of column names and partial method names to access instance methods
#not every key has to be used but new ones must be manually added
self.label_dict = {'ExterQual':'quality',
'ExterCond': 'quality',
'HeatingQC': 'quality',
'KitchenQual': 'quality',
'BsmtQual': 'bsmt',
'BsmtCond': 'bsmt',
'BsmtExposure': 'bsmt_exposure',
'FireplaceQu': 'fireplace',
'GarageCond': 'garage',
'GarageQual': 'garage',
'GarageFinish': 'garage_finish',
'PoolQC': 'pool'}
def quality_encoder(self):
quality_encoder = LabelEncoder()
quality_encoder.classes_ = ['Po','Fa','TA','Gd','Ex']
return quality_encoder
def bsmt_encoder(self):
bsmt_encoder = LabelEncoder()
bsmt_encoder.classes_ = ['No_Bsmt','Po','Fa','TA','Gd','Ex']
return bsmt_encoder
def bsmt_exposure_encoder(self):
bsmt_exposure_encoder = LabelEncoder()
bsmt_exposure_encoder.classes_ = ['No_Bsmt','No','Mn','Av','Gd']
return bsmt_exposure_encoder
def fireplace_encoder(self):
fireplace_encoder = LabelEncoder()
fireplace_encoder.classes_ = ['No_FP','Po','Fa','TA','Gd','Ex']
return fireplace_encoder
def garage_encoder(self):
garage_encoder = LabelEncoder()
garage_encoder.classes_ = ['No_G','Po','Fa','TA','Gd','Ex']
return garage_encoder
def garage_finish_encoder(self):
garage_finish_encoder = LabelEncoder()
garage_finish_encoder.classes_ = ['No_G','Unf','RFn','Fin']
return garage_finish_encoder
def pool_encoder(self):
pool_encoder = LabelEncoder()
pool_encoder.classes_ = ['No_Pool','Fa','TA','Gd','Ex']
return pool_encoder
class HousingCategorical():
'''
class to deal with all categorical features in dataset either via OHE or LE
user can init first and overwrite self.label_encode_features and self.ohe_features with new list to test different combinations
'''
def __init__(self):
'''
all three instance attributes can be overwritten after init for customization.
please use list_checker if user overwrites to make sure information is compliant
'''
self.mode = 'Train'
self.df = pd.read_csv('train_imputed.csv', index_col=0)
self.label_encode_features = ['ExterQual','ExterCond','BsmtQual','BsmtCond','BsmtExposure',
'HeatingQC','KitchenQual','FireplaceQu','GarageFinish','GarageQual',
'GarageCond','PoolQC']
self.ohe_features = ['MSSubClass','MSZoning','Street','Alley','LotShape','LandContour',
'LotConfig','LandSlope','Neighborhood','Condition1','Condition2','BldgType',
'HouseStyle','RoofStyle','RoofMatl','Exterior1st','Exterior2nd','MasVnrType',
'Foundation','BsmtFinType1','BsmtFinType2','Heating','CentralAir','Electrical',
'Functional','GarageType','PavedDrive','Fence','MiscFeature','SaleType',
'SaleCondition','MoSold','YrSold','ExterQual','ExterCond','BsmtQual','BsmtCond',
'BsmtExposure','HeatingQC','KitchenQual','FireplaceQu','GarageFinish','GarageQual',
'GarageCond','PoolQC']
def one_hot_encode(self):
#function to create instance of OHE, train on the necessary features, transform those features and replace with transformed results
ohe = OneHotEncoder(drop='first', sparse=False)
ohe_df=ohe.fit_transform(self.df[self.ohe_features])
#create a list to store new names after dummification so self.df is still legible
new_column_names = []
for i in range(len(ohe.categories_)):
for j in ohe.categories_[i]:
if (list(ohe.categories_[i]).index(j)) == 0:
pass
else:
new_column_names.append(self.ohe_features[i]+'_'+str(j))
ohe_df = pd.DataFrame(ohe_df, columns=new_column_names)
ohe_df.index = self.df.index
self.df = pd.merge(self.df.drop(columns=self.ohe_features),ohe_df, left_index=True, right_index=True)
if self.mode == 'Train':
self.move_sale_price_to_right()
def label_encode(self):
#create instance of LabelEncoders to label encode all necessary features from 0-n
#user should beware of the addition assumption of linear spacing within each feature
label_instance = LabelEncoders()
for i in self.label_encode_features:
labeler = getattr(label_instance, label_instance.label_dict[i]+'_encoder')()
self.df[i] = labeler.transform(self.df[i])
def move_sale_price_to_right(self):
#moves target variable y to the last column
self.df = pd.concat([self.df.drop(columns='SalePrice'), self.df.SalePrice], axis=1)
def list_checker(self):
#checks for overlap among the two feature lists as well as if all categorical features are accounted for
for i in self.label_encode_features:
if i in self.ohe_features:
print(i+' is in both feature lists')
for i in self.ohe_features:
if i in self.label_encode_features:
print(i+' is in both feature lists')
if len(self.ohe_features)+len(self.label_encode_features) != 45:
print('Beware the number of features you are OHE and LE transforming!\n')
print('Categorical Features might be missed or continuous variables might be overridden!\n')
print('value_counts() analysis recommended!') | true |
77eff28bd52c7dfc2d93a501aa276fac8140b042 | Python | santb08/statistics-py | /exercises/ic/taller ic/punto_10.py | UTF-8 | 584 | 2.953125 | 3 | [
"MIT"
] | permissive | import sys
sys.path.insert(0, '../../../lib/')
import ic
"""
La Asociación de Finanzas Estudiantiles en Faber Collage está planeando una “Feria primaveral” en la cual intentan
vender camisetas con su logo. El tesorero desea un estimado de la proporción de estudiantes que comprarán una camiseta.
El estimado debe proporcionar un nivel de confianza del 90% y el error no debe exceder del 3%. ¿Qué tan grande debe
tomarse la muestra?
"""
nivel = .90
error = 0.03
p = 0.5 #No nos dan una proporción
n = ic.numero_datos_para_error_proporcion(p, error, nivel)
print(n) | true |
909c67d5b55b056d2f1897154bf7171b49ce06dd | Python | tachylyte/HydroGeoPy | /monte_carlo.py | UTF-8 | 995 | 3.421875 | 3 | [
"BSD-2-Clause"
] | permissive | # Set of functions for generating monte carlo distributions
from random import *
import math
def Single(a, i):
samples = []
for i in range (1, i+1):
samples.append(a)
return samples
def Uniform(a, b, i):
samples = []
for i in range (1, i+1):
samples.append(uniform(a, b))
return samples
def Loguniform(a, b, i):
samples = []
for i in range (1, i+1):
samples.append(math.log(uniform(a, b)))
samples = [math.exp(x) for x in samples]
return samples
def Triangular(low, high, mode, i):
samples = []
for i in range (1, i+1):
samples.append(triangular(low, high, mode))
return samples
def Normal(mu, sigma, i):
samples = []
for i in range (1, i+1):
samples.append(normalvariate(mu, sigma))
return samples
def Lognormal(mu, sigma, i):
samples = []
for i in range (1, i+1):
samples.append(lognormvariate(mu, sigma))
return samples
| true |
e409ec12b5052a9a69ef6f6e060d45fb64d56713 | Python | KanikaParikh/Streaming-Text-Analytics | /Spark_SentimentAnalysis.py | UTF-8 | 5,206 | 3.140625 | 3 | [] | no_license | # Kanika Parikh 216030215 and Kaumilkumar Patel 216008914
# Assignment 3 : Part B
"""
This Spark app connects to a script running on another (Docker) machine
on port 9009 that provides a stream of raw tweets text. That stream is
meant to be read and processed here, where top trending hashtags are
identified. Both apps are designed to be run in Docker containers.
To execute this in a Docker container, do:
docker run -it -v $PWD:/app --link twitter:twitter eecsyorku/eecs4415
and inside the docker:
spark-submit spark_app.py
For more instructions on how to run, refer to final tutorial 8 slides.
Made for: EECS 4415 - Big Data Systems (York University EECS dept.)
Modified by: Tilemachos Pechlivanoglou
Based on: https://www.toptal.com/apache/apache-spark-streaming-twitter
Original author: Hanee' Medhat
"""
# Reference used- https://www.geeksforgeeks.org/twitter-sentiment-analysis-using-python/
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import Row,SQLContext
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
import sys
import os
# create spark configuration
conf = SparkConf()
conf.setAppName("TwitterStreamApp")
# create spark context with the above configuration
sc = SparkContext(conf=conf)
sc.setLogLevel("ERROR")
# create the Streaming Context from spark context, interval size 2 seconds
ssc = StreamingContext(sc, 2)
# setting a checkpoint for RDD recovery (necessary for updateStateByKey)
ssc.checkpoint("checkpoint_TwitterApp")
# read data from port 9009
dataStream = ssc.socketTextStream("twitter",9009)
# ---------------------------------------------------------------
# MODIFIED---
# Dictionary to hold 10 hashtags for each topic where key: topic and vlaues: hashtags
tweets_dict = {
'Politics': ['#america', '#vote' ,'#donaldtrump', '#politicalmemes ','#freedom','#politics','#democrats', '#NDA','#obama','#democraticparty'],
'Amazon': ['#amazon','#amazonprime','#amazondeals','#onlineshopping','#fashion', '#amazonseller', '#amazonfreebies','#ebook','#amazonreviewer','#amazonfba'],
'Apple': ['#apple', '#iphone', '#applewatch', '#ios','#airpods','#shotoniphone','#macbook','#macbookpro','#ipadpro','#ios'],
'Shoes': ['#shoes','#fashion','#shoesaddict', '#adidas' ,'#fashionblogger','#nike', '#ootd',' #jordan','#footwear','#shoesforsale'],
'CryptoCurrency': ['#crypto','#bitcoin','#cryptocurrency','#blockchain','#forex','#cryptotrading','#bitcoins','#investment','#bitcoincash','#bitcoinprice']
}
# Returns boolean value (True: for values in dict otherwise False)
def Filtering_hashtags(line):
res = False
for w in line.split(" "):
for t in tweets_dict.values():
if w.lower() in t:
res = True
return(res)
# Filtering_hashtags function to filter tweets and returns new Data stream containing only the elements that are in dictionary.
hashtags = dataStream.filter(Filtering_hashtags)
# Grouping them based on a topic (dictionary key)
def tweets_topic(line):
res = ""
for w in line.split(" "):
# iterating over dictionary values to group them.
for key in tweets_dict.keys():
for value in tweets_dict[key]:
if value == w.lower():
res = key
return(res)
def tweets_sentiment(tweet):
sia = SIA()
polarity = sia.polarity_scores(tweet)
if polarity['compound'] > 0.2:
return('Positive')
elif polarity['compound'] < -0.2:
return('Negative')
else:
return('Neutral')
# -----------------------------------------------------------
# map each hashtag to be a pair of (hashtag,1)
hashtag_counts = hashtags.map(lambda x: (tweets_topic(x) + "-" + tweets_sentiment(x), 1))
# adding the count of each hashtag to its last count
def aggregate_tags_count(new_values, total_sum):
return sum(new_values) + (total_sum or 0)
# do the aggregation, note that now this is a sequence of RDDs
hashtag_totals = hashtag_counts.updateStateByKey(aggregate_tags_count)
# MODIFIED:
# checks if values.txt file exists,then remove it before running
if os.path.exists('values.txt'):
os.remove('values.txt')
# creates new files for graph data and output data
values = open('values.txt', 'a+') # keep apending to file
output = open('PARTB_OUTPUT.txt', 'a+')
# process a single time interval
def process_interval(time, rdd):
# print a separator
print("----------- %s -----------" % str(time))
output.write("----------- %s -----------\n" % str(time))
try:
all_rdd = rdd.take(1000)
for tag in all_rdd:
# MODIFIED: write output to file
values.write('{:<40} {}\n'.format(tag[0], tag[1]))
output.write('{:<40} {}\n'.format(tag[0], tag[1]))
print('{:<40} {}'.format(tag[0], tag[1]))
except:
e = sys.exc_info()[0]
print("Error: %s" % e)
# Do this for every single interval
hashtag_totals.foreachRDD(process_interval)
# start the streaming computation
ssc.start()
# wait for the streaming to finish
ssc.awaitTermination()
# MODIFIED: close files
values.close()
output.close() | true |
a8f3ba8645868e108c4b647e563b0f7cce833266 | Python | vectominist/ZJ-Solutions-in-Python | /Contest/a864.py | UTF-8 | 741 | 3.109375 | 3 | [] | no_license | import sys
for s in sys.stdin:
num = s.split()
name = num[0]
if name == 'END':
break
mB = float(num[1])
mV = float(num[2])
delta = mB - mV
if delta < -0.251:
print('%s %.2lf O' % (name, delta))
elif delta > -0.250 and delta < -0.001:
print('%s %.2lf B' % (name, delta))
elif delta > -0.001 and delta < 0.249:
print('%s %.2lf A' % (name, delta))
elif delta > 0.249 and delta < 0.499:
print('%s %.2lf F' % (name, delta))
elif delta > 0.499 and delta < 0.999:
print('%s %.2lf G' % (name, delta))
elif delta > 0.999 and delta < 1.499:
print('%s %.2lf K' % (name, delta))
elif delta > 1.499:
print('%s %.2lf M' % (name, delta))
| true |
2f38cf343b6e1e56b89ff3f07ab9825079cdc5ad | Python | Rishabhchauhan30/Python-- | /function/sumreduce.py | UTF-8 | 99 | 2.65625 | 3 | [] | no_license | from functools import reduce
lst=[10,20,30,40,50]
result =reduce(lambda x,y:x+y,lst)
print(result) | true |
9eb8c80ea7bfcad3f9f332fd46a65150a962880f | Python | Raymond38324/Luffycity | /第三模块/测试代码/ftp/上传下载测试/Client/client.py | UTF-8 | 824 | 2.75 | 3 | [] | no_license | # coding: utf-8
import socket
import os
import struct
import json
from time import sleep
from sys import stdout
host = '127.0.0.1'
port = 8080
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port)) # 连接到ip和端口对应的socket对象
filename = input(">>>")
file_size = os.path.getsize(filename)
head = json.dumps({"filename":filename,"file_size":file_size}).encode("utf8")
print(head)
client.send(struct.pack("i",len(head)))
client.send(head)
buffer = 128
start = 0
with open(filename,'rb') as f:
while start <= file_size:
client.send(f.read(buffer))
num = int((start/file_size)*100)
start +=buffer
stdout.write("*"*num+'\r')
stdout.write("%d/100"%num)
stdout.flush()
os.system('clear')
print("100/100"+"*"*100)
client.close()
| true |
7e33687b9ecd243fb57de5686d86ea8476ad62b6 | Python | variito/practicassimulacion1 | /simbarb.py | UTF-8 | 3,095 | 2.53125 | 3 | [] | no_license | import random
import math
import time
#logaritmo natural = math.log()
t_entre_llegada = int(raw_input("INGRESA TIEMPO ENTRE LLEGADA: "))
t_minimo = int(raw_input("TIEMPO MINIMO EN CORTE: "))
t_maximo = int(raw_input("TIEMPO MAXIMO EN CORTE: "))
can_barberos = int(raw_input("CANTIDAD DE BARBEROS: "))
total_de_clientes = int(raw_input("TOTAL DE CLIENTES: "))
t_llegada_total = 0
t_salida_anterior = 0
espera_total_1 = 0
t_corte_1 = 0
tiempo_llegada = []
tiempo_corte = []
cli = []
sali = []
espe = []
tiempo_llegada_entero = []
sali_entero = []
barbero = []
barberos_ocupados = 1
arreglo_barberos = []
for i in range(can_barberos):
arreglo_barberos.append([0,0,i])
for i in range(total_de_clientes):
R = random.random()
t_llegada = abs((((-1)*(t_entre_llegada))*((math.log(R)))))#tiempo de llegada de un solo cliente
t_llegada_total = t_llegada + t_llegada_total # tiempo de llegada sumado
tiempo_llegada.append(t_llegada_total) #agregar el tiempo de llegada a la lista
tiempo_llegada_entero.append(int(t_llegada_total))
t_corte = ((t_minimo + ((t_maximo - t_minimo) * (R)))) #tiempo de corte
tiempo_corte.append(t_corte)
cli.append(i) #numrto de cliente agregado a la lista
espera_total = (t_salida_anterior - t_llegada_total)
for b in arreglo_barberos:
if b[0] == 0:
espera_total = 0
b[0] = 1
b[1] = t_llegada_total + t_corte
barbero.append(b[2])
elif b[1] < t_llegada_total:
espera_total = 0
b[1] = t_llegada_total + t_corte
barbero.append(b[2])
print "TIEMPO DE ESPERA " + str(espera_total)
espe.append(espera_total)
t_salida = t_llegada_total + t_corte + espera_total # tiempo de salida
sali.append(t_salida)
sali_entero.append(int(t_salida))
t_salida_anterior = t_salida
espera_total_1 = espera_total + espera_total_1#_________________________________
t_corte_1 = (t_corte) + (t_corte_1)
long_de_fila = (espera_total_1) / (t_salida_anterior)
t_espera_promedio = (espera_total_1) / (total_de_clientes)
uso_instalacion = (t_corte_1) / (t_salida_anterior)
print ("LONGITUD PROMEDIO DE FILA %.2f" %(long_de_fila))
print ("TIEMPO DE ESPERA PROMEDIO %.2f" %(t_espera_promedio))
print ("USO PROMEDIO DE LA INSTALACION %.2f" %(uso_instalacion))
print "\n\n"
sep = '|{}|{}|{}|{}|{}|{}|'.format('-'*10, '-'*16, '-'*10, '-'*16, '-'*16, '-'*16)
print('{0}\n| CLIENTE | LLEGADA | CORTE | SALIDA | ESPERA | BARBERO |\n{0}'.format(sep))
for cliente,llegada,corte,salida,espera,barb in zip(cli,tiempo_llegada, tiempo_corte,sali,espe,barbero):
#print ('tiempo de llegada:{0:.2f}, tiempo de corte: {0:.2f}'.format(llegada,corte))
print('| {:>8.2f} | {:>14.2f} | {:>8.2f} | {:>14.2f} | {:>14.2f} | {:>14.2f} |\n{}'.format(cliente,llegada,corte,salida,espera,barb,sep))
raw_input("")
clientese = 1
clientess = 1
for t in range(1, int(t_salida_anterior)+1):
time.sleep(1)
print t
if t in tiempo_llegada_entero:
print ("llego cliente %s" %(clientese))
clientese += 1
if t in sali_entero:
print ("salio cliente %s" %(clientess))
clientess += 1
| true |
f4377869de4e4ec82a76c7b571c19a16d8ccd12c | Python | SMikolaj99/Miko-aj-Solarz | /zadanie2/zadanie2.pyde | UTF-8 | 596 | 3.734375 | 4 | [] | no_license | def setup():
size(600,600)
frameRate(50)
stroke(150,0,150)
strokeWeight(2)
global x, y, kolor
x = 300
y = 25
kolor = 0
def draw():
global x, y, kolor
ellipse(x, y, 40, 40)
kolor = kolor + 1
stroke(150 + kolor,0 + kolor,150 - kolor)
x = x + 1
y = y + 1
if y > 300:
x = x - 2
kolor = kolor - 2
if x < 300:
y = y - 2
def mousePressed():
exit()
# 1,25pkt, brakje kolekcji, co bardzo zmniejsza możliwości w zakresie zmiany kolorów, ale już jest ok, możesz przejść dalej
| true |
b86396f28b9996804d16080bfc5284e38daa22a8 | Python | zaneguqi/shoelace | /shoelace/dataset.py | UTF-8 | 6,171 | 3.046875 | 3 | [
"MIT"
] | permissive | import re
import numpy as np
import pickle
from collections import defaultdict
from chainer.dataset.dataset_mixin import DatasetMixin
class LtrDataset(DatasetMixin):
"""
Implementation of Learning to Rank data set
Supports efficient slicing on query-level data. Note that single samples are
collections of query-document pairs represented as a tuple of matrix of
feature vectors and a vector of relevance scores
"""
def __init__(self, feature_vectors, relevance_scores, query_pointer,
query_ids, nr_of_queries):
self.feature_vectors = feature_vectors
self.relevance_scores = relevance_scores
self.query_pointer = query_pointer
self.query_ids = query_ids
self.nr_queries = nr_of_queries
def __len__(self):
"""
Returns the number of queries.
"""
return self.nr_queries
def get_example(self, i):
"""
Returns the i-th example.
Implementations should override it. It should raise :class:`IndexError`
if the index is invalid.
Args:
i (int): The index of the example.
Returns:
The i-th example.
"""
if i < 0 or i >= self.nr_queries:
raise IndexError
start = self.query_pointer[i]
end = self.query_pointer[i+1]
return LtrDataset(self.feature_vectors[start:end, :],
self.relevance_scores[start:end], np.zeros(1),
[self.query_ids[i]], 1)
def normalize(self):
for i in range(self.nr_queries):
start = self.query_pointer[i]
end = self.query_pointer[i+1]
self.feature_vectors[start:end, :] -= np.min(self.feature_vectors[start:end, :], axis=0)
maximum = np.max(self.feature_vectors[start:end, :], axis=0)
maximum[maximum == 0.0] = 1.0
self.feature_vectors[start:end, :] /= maximum
@classmethod
def load_txt(cls, file_handle, normalize=False):
"""
Loads a learning to rank dataset from a text file source
:param filepaths: A single file path as a string or a list of file paths
:return: A `class:dataset.dataset.LtrDataset` object
"""
# Iterate over lines in the file
data_set = defaultdict(list)
for line in file_handle:
# Extract the data point information
data_point = LtrDataPoint(line)
data_set[data_point.qid].append(data_point)
# Convert feature vectors, relevance scores and query pointer to correct
# form
query_ids = list(data_set.keys())
query_pointer = np.array([len(data_set[query]) for query in data_set])
query_pointer = np.cumsum(query_pointer)
query_pointer = np.hstack([np.array([0]), query_pointer])
nr_of_queries = len(data_set)
feature_vectors = np.vstack([data_point.feature_vector
for query in data_set
for data_point in data_set[query]])
relevance_scores = np.vstack([data_point.relevance
for query in data_set
for data_point in data_set[query]])
# Free memory
del data_set
# Generate object to return
result = LtrDataset(feature_vectors, relevance_scores, query_pointer,
query_ids, nr_of_queries)
# If normalization is necessary, do so
if normalize:
result.normalize()
# Cast to float32 (after normalization) which is typical format in
# chainer
result.feature_vectors = result.feature_vectors.astype(dtype=np.float32)
# Return result
return result
def save_txt(self, file_handle):
"""
Saves the data set in txt format to given file
:param file_handle: The file to save to
"""
for i in range(self.nr_queries):
start = self.query_pointer[i]
end = self.query_pointer[i + 1]
for j in range(start, end):
features = " ".join('{i}:{v}'.format(i=i,
v=self.feature_vectors[j, i])
for i in range(len(self.feature_vectors[j])))
out = '{r} qid:{qid} {features}\n'.format(r=self.relevance_scores[j,0],
qid=self.query_ids[i],
features=features)
file_handle.write(out)
def save(self, file_handle):
"""
Saves the data set in binary format to given file
:param file_handle: The file to save to
"""
pickle.dump(self, file_handle)
@classmethod
def load(cls, file_handle):
"""
Loads the data set in binary format from given file
:param file_handle: The file to load from
:return: A `class:dataset.dataset.LtrDataset` object
"""
return pickle.load(file_handle)
class LtrDataPoint:
"""
A single learning to rank data point, contains a query identifier, a
relevance label and a feature vector
"""
qid_regex = re.compile(".*qid:([0-9]+).*")
relevance_regex = re.compile("^[0-9]+")
feature_regex = re.compile("([0-9]+):([^ ]+)")
def __init__(self, line):
# Remove comment
comment_start = line.find("#")
if comment_start >= 0:
line = line[:comment_start]
# Extract qid
self.qid = re.search(LtrDataPoint.qid_regex, line).group(1)
self.relevance = re.search(LtrDataPoint.relevance_regex, line).group(0)
features = re.findall(LtrDataPoint.feature_regex, line)
minimum = min(int(index) for index, _ in features)
maximum = max(int(index) for index, _ in features)
self.feature_vector = np.zeros(1 + maximum - minimum)
for index, value in features:
self.feature_vector[int(index) - minimum] = float(value)
| true |
1f8b5b01a976ac129fd20c6bd0a7a4f2bea7a56c | Python | meatripoli/PythonSandbox | /find_gcd.py | UTF-8 | 452 | 3.34375 | 3 | [] | no_license | def find_gcd(some_list):
gcd_list=[]
m=len(some_list)
gcd=[]
for item in some_list:
for n in range(1,item+1):
if item%n==0:
gcd_list.append(n)
for item in gcd_list:
if gcd_list.count(item)==m:
gcd.append(item)
x=gcd[0]
for l in range(len(gcd)-1):
if x<gcd[l]:
x=gcd[l]
return x
print(find_gcd([12,24,6,18]))
print(find_gcd([3, 5, 9, 11, 13]))
| true |
6d4a187c4eee84f3e6819a0e5ce97b498dc58b01 | Python | intruedeep/target-data-extraction | /extract/tn_emulator/image.py | UTF-8 | 2,613 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env python2
import numpy as np
from scipy import ndimage
import cv2
import sys
RED_LOWER = np.array([17, 15, 100])
RED_UPPER = np.array([50, 56, 200])
def get_target_data(img, lowbounds, highbounds):
#isolate colors to binary image
target_iso = cv2.inRange(img, lowbounds, highbounds)
#Blur the binary image
blur_target = ndimage.gaussian_filter(target_iso, 8)
# Get contours and keep the largest - this should be our target
img, contours, _ = cv2.findContours(blur_target, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
largest_contour = max(contours, key=lambda x: cv2.contourArea(x))
#Get x,y position and radius
(x,y), rad = cv2.minEnclosingCircle(largest_contour)
xi = int(x)
yi = int(y)
tl = (xi-5,yi-5)
tr = (xi+5,yi-5)
bl = (xi-5,yi+5)
br = (xi+5,yi+5)
# draw two intersecting lines each 10 pixels long and a bounding circle
cv2.line(img, tl, br, (50, 200, 50), thickness=2)
cv2.line(img, tr, bl, (50, 200, 50), thickness=2)
cv2.circle(img, (xi, yi), int(rad), (100,100,100))
# cv2.imshow("image", img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return ((x,y), rad)
def get_tile_location(img, lowbounds, highbounds, tilex, tiley, pixelx, pixely):
(x, y), rad = get_target_data(img, lowbounds, highbounds)
retx = int((x/pixelx) * tilex)
rety = int((y/pixely) * tiley)
return retx, rety
"""
RED_LOWER = np.array([17, 15, 100])
RED_UPPER = np.array([50, 56, 200])
def area_containing_target(img, highbounds, lowbounds):
#isolate colors to binary image
target_iso = cv2.inRange(img, lowbounds, highbounds)
#Blur the binary image
blur_target = ndimage.gaussian_filter(target_iso, 8)
# Get contours and keep the largest - this should be our target
contours, _ = cv2.findContours(blur_target, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) == 0:
return -2
largest_contour = max(contours, key=lambda x: cv2.contourArea(x))
area = cv2.contourArea(largest_contour)
return area
def image_contains_target(img, highbounds, lowbounds, count_threshold):
area = area_containing_target(img, highbounds, lowbounds)
if area == -2:
return -2
return area >= count_threshold
def get_cv2_image_from_pil_image(pil_img):
open_cv_image = np.array(pil_img)
# Convert RGB to BGR
open_cv_image = open_cv_image[:, :, ::-1].copy()
return open_cv_image
if __name__ == '__main__':
if len(sys.argv) < 2:
print "image.py imagefile"
raise SystemExit
img = cv2.imread(sys.argv[1])
(x,y), radius = get_target_data(img, RED_LOWER, RED_UPPER)
print (x,y), radius
"""
| true |
3573817a45e96f6af3c1bb642e2d239e42259bb4 | Python | ChristopherHubbard/news-stock-predictor | /src/server/Prediction/TransformLayer.py | UTF-8 | 667 | 3.125 | 3 | [] | no_license | import torch
# Layer to transform a tensor in a sequential NN to a different format -- useful to define networks to output correctly shaped tensors
# Also helps include intermediate transformations between layers
class TransformLayer(torch.nn.Module):
def __init__(self, toSize):
# Call the base constructor
super(TransformLayer, self).__init__()
self.toSize = toSize
def forward(self, tensor):
# Transform the tensor into the appropriate shape -- this could throw an exception, but that should be the desired behavior
batch_size, rows, columns = self.toSize
return tensor.view(batch_size, rows, columns)
| true |
b6d1f01a74a29e957e2a406edaaed76763f75dde | Python | yyoshiaki/gene2bed | /.ipynb_checkpoints/gene2bed-checkpoint | UTF-8 | 416 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
import sys
import pandas as pd
import argparse
parser = argparse.ArgumentParser(description='Convert a gene list into a bed file.')
#文字列オプション
parser.add_argument('input', type=str, help='a gene list file', )
#数値 オプション
parser.add_argument('-m','--mergin', type=int, help='mergin length', default=0)
args = parser.parse_args()
print(args.input) | true |
abbd5d1dd0bc964228e1cdc23788fc6fafd039a2 | Python | JoaoCostaIFG/MNUM | /exams/unknown_date_exam/1_newton_1eq.py | UTF-8 | 280 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# In maxima:
# f: (x - 2.6) + (cos(x + 1.1))^3;
# diff(f, x);
from math import cos, sin
def f(x):
return (x - 2.6) + (cos(x + 1.1))**3
def df(x):
return 1 - 3 * (cos(x + 1.1)**2) * sin(x + 1.1)
x = 1.8
print(x)
x -= f(x) / df(x)
print(x)
| true |
97e41cc30482d20426233560913bde504b71252c | Python | weida2/practice | /6.py | UTF-8 | 395 | 2.578125 | 3 | [] | no_license | import easygui as eg
def function2():
title = '账户中心'
msg = '''
【*真实姓名】为必填项
【*E-mail】为必填项
【*手机号码】为必填项
'''
inputs = ['*用户名', '*真实姓名', '电话号码', '*手机号码', 'QQ', '*E-mail']
print(eg.multenterbox(msg, title, inputs))
function2() | true |
1631ec3dc46d4494944d182bbc2a4e80039f0910 | Python | DenisLamalis/cours-python | /lpp101-work/index_33.py | UTF-8 | 1,229 | 4.21875 | 4 | [] | no_license | # for loops and nesting
# for letter in 'Norwegian blue':
# print(letter)
# for furgle in range(8):
# print(furgle)
# for furgle in range(2,8):
# print(furgle)
# for furgle in range(1, 15, 3):
# print(furgle)
# for name in ['John','Terry','Eric','Michael','George']:
# print(name)
# friends = ['John','Terry','Eric','Michael','George']
# for friend in friends:
# print(friend)
# friends = ['John','Terry','Eric','Michael','George']
# for index in range(len(friends)):
# print(friends[index])
# friends = ['John','Terry','Eric','Michael','George']
# for friend in friends:
# if friend == 'Eric':
# print('Found ' + friend + '!')
# break
# print(friend)
# friends = ['John','Terry','Eric','Michael','George']
# for friend in friends:
# if friend == 'Eric':
# print('Found ' + friend + '!')
# continue
# print(friend)
# friends = ['John','Terry','Eric','Michael','George']
# for friend in friends:
# if friend == 'Eric':
# print('Found ' + friend + '!')
# print(friend)
friends = ['John','Terry','Eric']
for friend in friends:
for number in [1,2,3]:
print(friend, number)
print("For Loop done!")
| true |
9ba6db72572959c4dbe8446a7a6e9b534698dd71 | Python | danhidsan/movie-trailer-classifier | /test/test_classifier.py | UTF-8 | 1,470 | 3.28125 | 3 | [] | no_license | import unittest
import time
import logging
from ml.classifier import TextClassifier
# logging config
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
class ClassifierTest(unittest.TestCase):
logging.info("Preparing set up test for Classifier Module")
def setUp(self):
self.classifier = TextClassifier()
def test_get_classifier_name(self):
logging.info("Get classifier name test")
names_list = [
'MultinomialNB', 'LinearSVC',
'LogisticRegression', 'RandomForestClassifier'
]
model_name = self.classifier.get_classifier_name
self.assertTrue(model_name in names_list)
def test_train(self):
logging.info("Train model test")
# train model with wrong model
self.assertRaises(
AttributeError, self.classifier.train, 'WrongModel'
)
# train model with correct model
self.classifier.train('get_linear_svc')
model_name = self.classifier.get_classifier_name
self.assertEqual(model_name, 'LinearSVC')
def test_predict(self):
logging.info("Predict text test")
prediction = self.classifier.predict(
"""It is a curious thing, Harry, but perhaps those who
are best suited to power are those who have never sought it."""
)
self.assertTrue(len(prediction) == 1)
if __name__ == '__main__':
unittest.main()
| true |
3ef74b4cd72a337b588ff60c71b13559ce759b83 | Python | lesilencieux/python_mongodb_flask | /app/models/mission.py | UTF-8 | 4,624 | 2.609375 | 3 | [
"MIT"
] | permissive | from pymongo import MongoClient
from flask import jsonify, session
from bson import ObjectId
from pymongo.errors import DuplicateKeyError
import dateutil.parser
from datetime import datetime as dt
import datetime
class Mission():
client = MongoClient("localhost", 27017)
db = client["missions"]
missions = db["missions"]
def get_missions(self):
result = self.missions.find()
toreturns = []
# return [str(mission['_id']) for mission in result]
return result
# for mission in result:
# mission['_id'] = str(mission['_id'])
# toreturns.append(mission)
# return jsonify(toreturns)
def get_all_codes_of_missions(self):
result = self.missions.find()
toreturns = []
for mission in result:
toreturns.append(mission['code_mission'])
return toreturns
def get_mission(self, mission_id):
myquery = {"_id": ObjectId(mission_id)}
return self.missions.find_one(myquery)
def get_mission_by_code(self, code_mission):
myquery = {"code_mission": code_mission}
missions = self.missions.find(myquery)
return missions
def get_mission_by_zone_and_corps(self, zone, corps):
myquery = {"zone": zone, "corps": corps}
missions = self.missions.find(myquery)
toreturns = []
for mission in missions:
mission['_id'] = str(mission['_id'])
toreturns.append(mission)
return jsonify(toreturns)
def create_new_mission(self, jsn):
# Create index on code of mission field to prevent duplicated inserting
# self.missions.create_index([('code_mission', '')], unique=True)
try:
self.missions.insert(jsn)
return True
except DuplicateKeyError:
return False
def update_mission(self, id, newvalues):
query = {"_id": ObjectId(id)}
updated = {"$set": newvalues}
if self.missions.update(query, updated):
return True
else:
return False
def validate_mission(self, id):
query = {"_id": ObjectId(id)}
newvalues = {"status_mission": "Validee"}
updated = {"$set": newvalues}
if self.missions.update(query, updated):
return True
else:
return False
def delete_mission(self, id):
query = {"_id": ObjectId(str(id))}
if self.missions.remove(query):
return True
else:
return False
def delete_mission_by_code(self, code_mission):
query = {"code_mission": code_mission}
if self.missions.remove(query):
return True
else:
return False
def check_if_agent_has_already_on_mission_for_start_date(self, agent_mission, start_date, end_date):
query1 = {"date_debut_mission": {'$gte': dateutil.parser.parse(str(start_date)),
'$lt': dateutil.parser.parse(str(end_date))}}
result1 = self.missions.find(query1)
for r in result1:
if str(agent_mission) in r['agents_mission']:
return True
else:
return False
def check_if_agent_has_already_on_mission_for_end_date(self, agent_mission, start_date, end_date):
query1 = {"date_fin_mission": {'$gte': dateutil.parser.parse(str(start_date)),
'$lt': dateutil.parser.parse(str(end_date))}}
result1 = self.missions.find(query1)
for r in result1:
if str(agent_mission) in r['agents_mission']:
return True
else:
return False
def get_mission_by_agent(self, agent):
missions_for_agent = []
list_mission = self.missions.find()
for mission in list_mission:
if agent in mission['agents_mission']:
missions_for_agent.append(mission)
return missions_for_agent
def chech_mission_for_agent_between_two_dates(self, agent, start_date, end_date):
d1 = dt.strptime(start_date, "%m/%d/%Y")
d2 = dt.strptime(end_date, "%m/%d/%Y")
missions_for_agent = []
list_mission = self.missions.find()
for mission in list_mission:
if agent in mission['agents_mission']:
missions_for_agent.append(mission)
for miss in missions_for_agent:
if (d1 >= miss['date_depart_mission'] >= d1) or (d2 >= miss['date_retour_mission'] >= d1):
return True
else:
return False
| true |
ca6637e9034790a26dc23112f23f6358dfcb6020 | Python | Aasthaengg/IBMdataset | /Python_codes/p02766/s449956528.py | UTF-8 | 209 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python3
def main():
N, K = map(int, input().split())
for i in range(10 ** 9):
if N <= K ** i - 1:
print(i)
break
if __name__ == '__main__':
main()
| true |
ad67b1ab1158cf3c122b164ec34eb68c41289b9c | Python | imoneoi/CarZero | /src/carzero/scripts/movavg.py | UTF-8 | 1,029 | 3.5625 | 4 | [] | no_license | import numpy as np
class MovAvg:
"""Moving Average with Standard Deviation"""
def __init__(self, max_size=100):
self.maxsize = max_size
self.cache = np.zeros(max_size)
self.sum = 0.0
self.sq_sum = 0.0
self.size = 0
self.pointer = 0
def push(self, item):
if self.size == self.maxsize:
a = self.cache[self.pointer]
self.sum -= a
self.sq_sum -= a ** 2
else:
self.size += 1
self.cache[self.pointer] = item
self.pointer = (self.pointer + 1) % self.maxsize
self.sum += item
self.sq_sum += item ** 2
def get(self):
if self.size == 0:
return 0
return self.sum / self.size
def std(self):
if self.size == 0:
return 0
avg = self.sum / self.size
return np.sqrt((self.sq_sum - self.sum * avg) / self.size)
def __repr__(self):
return "Mean: {:.3f} Std {:.3f}".format(self.get(), self.std())
| true |
a1096a0c3cc3d2b916b8994a315d18ce7c8a67d8 | Python | ilayze/Ben-Yehuda-Project-Processor | /src/pageParser.py | UTF-8 | 1,927 | 2.953125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf8 -*-
import argparse
import urllib2
import unicodedata
from BeautifulSoup import BeautifulSoup
class PageParser:
def __init__(self, argsparser):
argsparser.add_argument('-u', '--url',
help='url to the creator main page',
default="http://benyehuda.org/tuvya/",
required=False)
self.args = argsparser.parse_args()
self.url = self.args.url
def parseMainPage(self):
html = urllib2.urlopen(self.url)
parsed_html = BeautifulSoup(html)
all_a=parsed_html.findAll("a",href=True)
creations=dict()
for a in all_a:
if a['href'].startswith("http") | a['href'].startswith("mail") | a['href'].startswith('aximan'):
print 'skipping ' +a['href']
continue
title = a.text
text=self.parseSubPage(self.url+a['href'])
creations[title]=text
print title
print creations[title]
return creations
def parseSubPage(self,url):
print 'parsing '+url
html = urllib2.urlopen(url)
parsed_html = BeautifulSoup(html)
text=""
for span in parsed_html.findAll("span"):
spanText=span.text.encode('utf8')
if ("לתוכן הענינים" in spanText) | ("פרויקט בן-יהודה" in spanText):
print 'skipping '+span.text
continue
spanText=spanText.replace('\r\n',' ')
print spanText
text+=spanText+'\n'
print text
return text
def main():
argsParser = argparse.ArgumentParser()
pageParser = PageParser(argsParser)
creations=pageParser.parseMainPage()
f = open('output.txt', 'w')
for key, value in creations.iteritems():
f.write(value)
if __name__ == '__main__':
main()
| true |
766bcec5841cc079e5fb0eb02f9bbd73b0ed94b5 | Python | midaslmg94/CodingTest | /Backtracking/15652_N과 M(4).py | UTF-8 | 233 | 2.953125 | 3 | [] | no_license | n, m = map(int, input().split())
result = []
def dfs(idx, count):
if count == m:
print(*result)
return
for i in range(idx, n):
result.append(i+1)
dfs(i, count+1)
result.pop()
dfs(0, 0) | true |
15e7a9da15858d76c5cec6b6f9b1e3477d495af2 | Python | glangsto/analyze | /fitCrossing.py | UTF-8 | 22,026 | 2.734375 | 3 | [] | no_license | """
Read in an observation summary and fit the times of galaxy crossings.
From these measurements estimate the Azimuth and Elevation of the
telescope pointing. Then compute the azimuth and elevation offsets
"""
# Functions to create a grid and place astronomical data on that
# grid with a convolving function
# HISTORY
# 23Apr01 GIL test using 1/2 maximum times to get dAz
# 23Mar31 GIL check dAz
# 22May06 GIL iteratively fit peaks
# 22May04 GIL get azimuth, elevation and telescope position
# 22May02 GIL initial version
import sys
import os
import numpy as np
from matplotlib import pyplot as plt
from pylab import *
from scipy.optimize import curve_fit
import datetime
import gainfactor as gf
import radioastronomy
# ine file paths
offsetfilename = "/home/glen/GalacticPlaneCrossingOffsets.txt"
dazdelfilename = "/home/glen/2023-DazDel.txt"
doTwo = True
doThree = True
doFour = True
GalacticPolRa = (12+(51.4/60.))*15 # (degrees)
GalacticPolDec = 27.13 # (degrees)
def readGalacticOffsets( offsetfilename):
"""
Read a table of RA and Dec positions
input - offset file name
outputs
decs - array of declinations for which Galactic Plane is crossed
ra1s - array of 1st galactic plane crossing RAs
ra2s - array of 2nd galactic plan crossing RAs
dRas - array of deltas in RA crossings
"""
decs = []
ra1s = []
ra2s = []
dRas = []
count = 0
f = open( offsetfilename, "r")
while True:
aline = f.readline()
aline = aline.strip()
if len(aline) < 1:
count = count + 1
if count < 10:
continue
else:
break
if aline[0] == "#":
continue
parts = aline.split()
nparts = len(parts)
if nparts < 1:
break
if nparts == 4:
decs.append( float(parts[0]))
ra1s.append( float(parts[1]))
ra2s.append( float(parts[2]))
dRas.append( float(parts[3]))
else:
print("Error Parsing Galactic Offsets line:")
print(parts)
decs = np.asarray( decs)
ra1s = np.asarray( ra1s)
ra2s = np.asarray( ra2s)
dRas = np.asarray( dRas)
# end of readGalacticOffsets
return decs, ra1s, ra2s, dRas
def writeDazDel( dazdelfilename, utc, cpuIndex, az, el, dAz, dEl):
"""
appends a new measurement of the Azimuth and elevation offsets
inputs
dazdelfile - file to append measuremnts to
representative time of measurement
az, el - recorded azimuth and elevation of observations
daz, del - offsets to be added to az, el to get true az,el of observation
"""
if os.path.exists( dazdelfilename):
f = open( dazdelfilename, "a")
else:
f = open( dazdelfilename, "w")
outline = "%s %2d %8.2f %8.2f %6.2f %6.2f \r\n" % \
(utc, cpuIndex, az, el, dAz, dEl)
f.write(outline)
f.close()
# end off writeDazDel()
return
def decFromCrossingDelta( dRa, decs, ra1s, ra2s, dRas):
"""
decFromCrossingDelta returns the closest Declination
matching the measured offset between two galactic crossing positions
input
dRa - measured right ascension difference between two galactic crossings
decs - array of declinations
ra1s - 1st right ascension crossing point for each declination
ra2s - 2nd right ascension crossing point for each declination
dRas - difference between ra1s and ra2s for each declination
"""
idec = 0
n = len( dRas)
ddRa = 360 - dRa
while ddRa < dRas[idec] and idec < n:
idec = idec + 1
print( "dRa = %7.1f coresponds to dec %7.1fd (%d) in array %7.1f" %
(dRa, decs[idec], idec, dRas[idec]))
return decs[idec]
def gauss(x,mu,sigma,A):
"""
Return the intensity for a single gaussian model
"""
return A*exp(-(x-mu)**2/2/sigma**2)
def bimodal(x,mu1,sigma1,A1,mu2,sigma2,A2):
"""
Return the intensity for a double gaussian model
"""
return gauss(x,mu1,sigma1,A1)+gauss(x,mu2,sigma2,A2)
def trimodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3):
"""
Return the intensity for a triple gaussian model
"""
return gauss(x, mu1, sigma1, A1) + \
gauss(x, mu2, sigma2, A2) + \
gauss(x, mu3, sigma3, A3)
def quadmodal(x,mu1,sigma1,A1,mu2,sigma2,A2, \
mu3,sigma3,A3, mu4,sigma4,A4):
"""
Return the intensity for a four gaussian model
"""
return gauss(x, mu1, sigma1, A1) + \
gauss(x, mu2, sigma2, A2) + \
gauss(x, mu3, sigma3, A3) + \
gauss(x, mu4, sigma4, A4)
def fivemodal(x,mu1,sigma1,A1,mu2,sigma2,A2, \
mu3,sigma3,A3, mu4,sigma4,A4, mu5,sigma5,A5):
"""
Return the intensity for a five gaussian model
"""
return gauss(x, mu1, sigma1, A1) + \
gauss(x, mu2, sigma2, A2) + \
gauss(x, mu3, sigma3, A3) + \
gauss(x, mu4, sigma4, A4) + \
gauss(x, mu5, sigma5, A5)
def sortParams( inparams, sigmas):
"""
sortParams re-orders parameters in order of peak height
the params are sets of three values: time, width and peak
returns sorted copy of orginal
"""
n = len( inparams)
npeaks = int(n/3)
# for all pairs of peaks
for i in range(npeaks-1):
for j in range(i+1,npeaks):
peaka = inparams[(i*3) + 2]
peakb = inparams[(j*3) + 2]
# if peak out of order
if peakb > peaka:
# swap three sets of values
temp0 = inparams[(i*3)+0]
temp1 = inparams[(i*3)+1]
temp2 = inparams[(i*3)+2]
sigma0 = sigmas[(i*3)+0]
sigma1 = sigmas[(i*3)+1]
sigma2 = sigmas[(i*3)+2]
inparams[(i*3)+0] = inparams[(j*3)+0]
inparams[(i*3)+1] = inparams[(j*3)+1]
inparams[(i*3)+2] = inparams[(j*3)+2]
inparams[(j*3)+0] = temp0
inparams[(j*3)+1] = temp1
inparams[(j*3)+2] = temp2
sigmas[(j*3)+0] = sigma0
sigmas[(j*3)+1] = sigma1
sigmas[(j*3)+2] = sigma2
return inparams, sigmas
def fitCrossing( filename):
"""
fitCrossing takes the integrated intensities from a sumary file,
then fits a series of gaussians
This version fits one gaussian at a time, until the fit fails,
then simultaneously fits all gaussians
"""
firstdate, utcs, tSums, dTs = gf.readAllValues( filename)
# create an array of seconds
nt = len(tSums)
# now fit two gausians
# create a temporary array to interatively fit.
tTemps = np.zeros(nt)
for i in range(nt):
tTemps[i] = tSums[i]
# set number of gaussians to fit and init array of peak values
NGAUSS = 5
iMaxs = [0, 0, 0, 0, 0]
tMaxs = [0., 0., 0., 0., 0.]
# keep fit results
utcPeaks = [0., 0., 0., 0., 0.]
dUtcPeaks = [0., 0., 0., 0., 0.]
tPeaks = [0., 0., 0., 0., 0.]
dTPeaks = [0., 0., 0., 0., 0.]
widths = [0., 0., 0., 0., 0.]
dWidths = [0., 0., 0., 0., 0.]
# now fit all gaussians
for ng in range(NGAUSS):
# get the indexs to the first intensity peak
iMax = np.argmax( tTemps)
tMax = tTemps[iMax]
iMaxs[ng] = iMax
tMaxs[ng] = tMax
# limit search for half max to range near peak
# but not beyond end of data arrays
if iMax < nt/2:
nseek = int(iMax*.4)
else:
nseek = int((nt - iMax)*.4)
# assume width is no more than one hour
width = 3600.
tHalf = tMax/2.
# now find the half width:
for i in range(nseek):
j = iMax - i
k = iMax + i
# if found the half width
if tTemps[j] < tHalf:
width = utcs[iMax] - utcs[j]
break
elif tTemps[k] < tHalf:
width = utcs[k] - utcs[iMax]
break
# end searching for width
print("t%d = %8.1f, y = %8.3f, width = %7.2f " % \
(ng+1, utcs[iMax], tMax, width))
# estimate the fit the gaussian
expected=(utcs[iMax], width, tMax)
sigma1 = [ 0., 0., 0.]
try:
params1,cov1=curve_fit(gauss,utcs,tTemps,expected,sigma=dTs)
sigma1=sqrt(diag(cov1))
except:
print("Error fitting gaussian %d" % (ng+1))
params1 = expected
# fit was successful, subtract fit and try again
for i in range(nt):
tTemps[i] = tTemps[i] - gauss( utcs[i], *params1)
utcPeaks[ng] = params1[0]
widths[ng] = params1[1]
tPeaks[ng] = params1[2]
dUtcPeaks[ng] = sigma1[0]
dWidths[ng] = sigma1[1]
dTPeaks[ng] = sigma1[2]
print("Fit %2d: %9.2f %8.2f %7.2f" % \
(ng, utcPeaks[ng], tPeaks[ng], widths[ng]))
print(" +/- : %9.2f %8.2f %7.2f" % \
(dUtcPeaks[ng], dTPeaks[ng], dWidths[ng]))
# end for all gaussians
params1 = [utcPeaks[0], widths[0], tPeaks[0]]
# prepare to bound fits in time
tmin = utcs[0] - 10.
tmax = utcs[nt-1] + 10.
# now try 1, 2, 3 and 4 gaussians
# keep the largest number that fits
expected1 = ( utcPeaks[0], widths[0], tPeaks[0])
sigma1 = [0., 0., 0.]
try:
params1,cov1=curve_fit(gauss,utcs,tSums,expected1,sigma=dTs)
sigma1=sqrt(diag(cov1))
ng = 1
except:
print("Error trying a 1 gaussian fit")
params1 = [utcPeaks[0], widths[0], tPeaks[0]]
expected2 = ( utcPeaks[0], widths[0], tPeaks[0],
utcPeaks[1], widths[1], tPeaks[1])
bounds2 = [ (tmin, 50., 50., tmin, 50., 50.),
(tmax, 90000., 300000., tmax, 90000., 300000.)]
sigma2 = [0., 0., 0., 0., 0., 0.]
try:
params2,cov2=curve_fit(bimodal,utcs,tSums,expected2,sigma=dTs,
bounds=bounds2)
sigma2=sqrt(diag(cov2))
ng = 2
except:
print("Error trying a 2 gaussian fit")
params2 = [ utcPeaks[0], widths[0], tPeaks[0],
utcPeaks[1], widths[1], tPeaks[1]]
expected3 = ( utcPeaks[0], widths[0], tPeaks[0],
utcPeaks[1], widths[1], tPeaks[1],
utcPeaks[1], widths[2], tPeaks[2])
bounds3 = [ (tmin, 50., 50., tmin, 50., 50.,
tmin, 50., 50.),
(tmax, 90000., 300000., tmax, 90000., 300000.,
tmax, 90000., 300000.)]
sigma3 = [0., 0., 0., 0., 0., 0., 0., 0., 0.]
try:
params3,cov3=curve_fit(trimodal,utcs,tSums,expected3,sigma=dTs,
bounds=bounds3)
sigma3=sqrt(diag(cov3))
ng = 3
except:
print("Error trying a 3 gaussian fit")
params3 = [ utcPeaks[0], widths[0], tPeaks[0],
utcPeaks[1], widths[1], tPeaks[1],
utcPeaks[1], widths[2], tPeaks[2]]
expected4 = ( utcPeaks[0], widths[0], tPeaks[0],
utcPeaks[1], widths[1], tPeaks[1],
utcPeaks[2], widths[2], tPeaks[2],
utcPeaks[3], widths[3], tPeaks[3])
bounds4 = [ (tmin, 50., 50., tmin, 50., 50.,
tmin, 50., 50., tmin, 50., 50.),
(tmax, 90000., 300000., tmax, 90000., 300000.,
tmax, 90000., 300000., tmax, 90000., 300000.)]
sigma4 = [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
try:
params4,cov4=curve_fit(quadmodal,utcs,tSums,expected4,sigma=dTs,
bounds=bounds4)
sigma4=sqrt(diag(cov4))
ng = 4
except:
print("Error trying a 4 gaussian fit")
params4 = [utcPeaks[0], widths[0], tPeaks[0],
utcPeaks[1], widths[1], tPeaks[1],
utcPeaks[2], widths[2], tPeaks[2],
utcPeaks[3], widths[3], tPeaks[3]]
expected5 = ( utcPeaks[0], widths[0], tPeaks[0],
utcPeaks[1], widths[1], tPeaks[1],
utcPeaks[2], widths[2], tPeaks[2],
utcPeaks[3], widths[3], tPeaks[3],
utcPeaks[4], widths[4], tPeaks[4])
bounds5 = [ (tmin, 50., 50., tmin, 50., 50.,
tmin, 50., 50., tmin, 50., 50.,
tmin, 50., 50.),
(tmax, 90000., 300000., tmax, 90000., 300000.,
tmax, 90000., 300000., tmax, 90000., 300000.,
tmax, 90000., 300000.)]
sigma5 = [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
try:
params5,cov5=curve_fit(fivemodal,utcs,tSums,expected5,sigma=dTs,
bounds=bounds5)
sigma5=sqrt(diag(cov5))
ng = 5
except:
print("Error trying a 5 gaussian fit")
params5 = [utcPeaks[0], widths[0], tPeaks[0],
utcPeaks[1], widths[1], tPeaks[1],
utcPeaks[2], widths[2], tPeaks[2],
utcPeaks[3], widths[3], tPeaks[3],
utcPeaks[4], widths[4], tPeaks[4]]
# now sort each result in intensity order
# no need to sort if only 2 peaks
# params1, sigma1 = sortParams( params1, sigma1)
params2, sigma2 = sortParams( params2, sigma2)
params3, sigma3 = sortParams( params3, sigma3)
params4, sigma4 = sortParams( params4, sigma4)
params5, sigma5 = sortParams( params5, sigma5)
return firstdate, utcs, tSums, dTs, ng, params1, sigma1, params2, sigma2, params3, sigma3, params4, sigma4, params5, sigma5
def main():
"""
Main executable for gridding astronomical data
"""
nargs = len(sys.argv)
if nargs < 2:
print('Fit Crossings: find the time of crossings of peak intensities')
print('fitCrossing <Spectrum Summary-files>')
sys.exit()
print( "reading %d files" % (nargs))
# first read through all data and find hot load
names = sys.argv[1:]
names = sorted(names)
print(names)
firsttime = ""
lasttime = ""
firstdate = ""
lastdate = ""
count = 0
utcs = []
timefmt = "%Y-%m-%d %H:%M:%S"
rs = radioastronomy.Spectrum()
# print( "Reading %d files" % (len(names)))
for filename in names:
print ("File: %s" % (filename))
# return the data and three fits
firstdate, utcs, tSums, dTs, ng, params1, sigma1, params2, sigma2, params3, sigma3, params4, sigma4, params5, sigma5 = fitCrossing( filename)
print("Fit successful for %d gausians" % (ng))
lastId, lastaz, lastel = gf.lastazel()
rs.telaz = lastaz
rs.telel = lastel
# retrieve the telescope coordinates
tellon, tellat, telelev = gf.lonlatelev()
rs.tellon = tellon
rs.tellat = tellat
rs.telelev = telelev
print("Telescope lon, lat, elev: %7.2f %7.2f %7.1f" %
(rs.tellon, rs.tellat, rs.telelev))
print("Telescope Az, El, id : %7.2f %7.2f %2d" %
(rs.telaz, rs.telel, lastId))
utcmidnight = datetime.datetime.strptime("20" + firstdate + " 00:00:00",
timefmt)
utc1 = utcmidnight
utc2 = utc1
if ng == 5:
utc1 = utcmidnight + datetime.timedelta( seconds = params5[0])
utc2 = utcmidnight + datetime.timedelta( seconds = params5[3])
elif ng == 4:
utc1 = utcmidnight + datetime.timedelta( seconds = params4[0])
utc2 = utcmidnight + datetime.timedelta( seconds = params4[3])
elif params3[0] != 0.:
utc1 = utcmidnight + datetime.timedelta( seconds = params3[0])
utc2 = utcmidnight + datetime.timedelta( seconds = params3[3])
elif params2[0] != 0.:
utc1 = utcmidnight + datetime.timedelta( seconds = params2[0])
utc2 = utcmidnight + datetime.timedelta( seconds = params2[3])
rs.utc = utc1
print("Time of first crossing: %s" % (utc1))
rs.azel2radec()
ra1 = rs.ra
dec1 = rs.dec
print("RA, Dec of crossing: %7.3fd %7.3fd (%7.3fh)" %
(rs.ra, rs.dec, rs.ra/15.))
rs.utc = utc2
print("Time of second crossing: %s" % (utc2))
rs.azel2radec()
ra2 = rs.ra
dec2 = rs.dec
print("RA, Dec of crossing: %7.3fd %7.3fd (%7.3fh)" %
(rs.ra, rs.dec, rs.ra/15.))
dRa = ra1 - ra2
if dRa < 0:
dRa = - dRa
# read in offsets vs dec
decs, ra1s, ra2s, dRas = readGalacticOffsets( offsetfilename)
founddec = decFromCrossingDelta( dRa, decs, ra1s, ra2s, dRas)
# can't determine el offset if dec > 55.
avera = (ra1 + ra2)/2.
dAz = 0
if rs.dec > 55.:
dEl = 0.
if ng == 5:
if params5[2] * 2. * params5[5]:
avera = params5[2]
elif ng == 4:
if params4[2] * 2. * params4[5]:
avera = params4[2]
elif ng == 3:
if params3[2] * 2. * params3[5]:
avera = params3[2]
elif ng == 2:
if params2[2] * 2. * params2[5]:
avera = params2[2]
else:
dAz = GalacticPolRa - avera
if rs.telaz > 90. and rs.telaz < 270.:
dEl = founddec - rs.dec
else:
dEl = rs.dec - founddec
#
print("Ave RA: %7.3fd (%7.3fh)" %
(avera, avera/15.))
print("dAz: %7.3fd, dEl: %7.3fd" %
(dAz, dEl))
utc0 = utcmidnight + datetime.timedelta( seconds = utcs[0])
writeDazDel( dazdelfilename, utc0, lastId,
rs.telaz, rs.telel, dAz, dEl)
aveutc, duration = radioastronomy.aveutcs(utc1, utc2)
print("Average of crossing Times: %s, Time Interval: %8.2fs" % \
(aveutc, duration))
plt.plot(utcs, tSums, color='blue',lw=3,
label='Intensities')
if params2[0] != 0.:
plt.plot(utcs, bimodal(utcs,*params2),color='red',lw=3,
label='2 Gaussians')
if params3[0] != 0.:
plt.plot(utcs, trimodal(utcs,*params3),color='green',lw=3,
label='3 Gaussians')
if params4[0] != 0.:
plt.plot(utcs, quadmodal(utcs,*params4),color='gold',lw=3,
label='4 Gaussians')
if params5[0] != 0.:
plt.plot(utcs, fivemodal(utcs,*params5),color='orange',lw=3,
label='5 Gaussians')
plt.xlabel( "Time (Seconds since Midnight %s) dAz:%5.1fd dEl:%5.1fd" % (firstdate, dAz, dEl))
plt.ylabel( "Integrated Intensity (K km/sec)")
plt.title("%s Galactic Integrated Intensities - Tel:%2d Az:%7.1fd El:%7.1fd" % (
firstdate, lastId, lastaz, lastel))
plt.legend()
# use the covariance matrix to get an estimate of fit uncertainty
print(" Time +/- Intensity +/- Width +/- ")
i = 0
if params2[0] != 0. and ng > 1:
print("2 Gaussian Fit:")
print(" %8.1f %7.1f %7.1f %7.1f %7.1f %7.1f" % (
params2[0+i*3], sigma2[0+i*3],
params2[2+i*3], sigma2[2+i*3],
params2[1+i*3], sigma2[1+i*3]))
i = 1
print(" %8.1f %7.1f %7.1f %7.1f %7.1f %7.1f" % (
params2[0+i*3], sigma2[0+i*3],
params2[2+i*3], sigma2[2+i*3],
params2[1+i*3], sigma2[1+i*3]))
print(" %8.1f %7.1f" % (
params2[0]-params2[3],
np.sqrt(sigma2[0]**2 + sigma2[3]**2)))
i = 0
if params3[0] != 0. and ng > 2:
print("3 Gaussian Fit:")
print(" %8.1f %7.1f %7.1f %7.1f %7.1f %7.1f" % (
params3[0+i*3], sigma3[0+i*3],
params3[2+i*3], sigma3[2+i*3],
params3[1+i*3], sigma3[1+i*3]))
i = 1
print(" %8.1f %7.1f %7.1f %7.1f %7.1f %7.1f" % (
params3[0+i*3], sigma3[0+i*3],
params3[2+i*3], sigma3[2+i*3],
params3[1+i*3], sigma3[1+i*3]))
print(" %8.1f %7.1f" % (
params3[0]-params3[3], np.sqrt(sigma4[0]**2 + sigma4[3]**2)))
i = 0
if params4[0] != 0. and ng > 3:
print("4 Gaussian Fit:")
print(" %8.1f %7.1f %7.1f %7.1f %7.1f %7.1f" % (
params4[0+i*3], sigma4[0+i*3],
params4[2+i*3], sigma4[2+i*3],
params4[1+i*3], sigma4[1+i*3]))
i = 1
print(" %8.1f %7.1f %7.1f %7.1f %7.1f %7.1f" % (
params4[0+i*3], sigma4[0+i*3],
params4[2+i*3], sigma4[2+i*3],
params4[1+i*3], sigma4[1+i*3]))
print(" %8.1f %7.1f" % (
params4[0]-params4[3], np.sqrt(sigma4[0]**2 + sigma4[3]**2)))
i = 0
if params5[0] != 0. and ng > 4:
print("5 Gaussian Fit:")
print(" %8.1f %7.1f %7.1f %7.1f %7.1f %7.1f" % (
params5[0+i*3], sigma5[0+i*3],
params5[2+i*3], sigma5[2+i*3],
params5[1+i*3], sigma5[1+i*3]))
i = 1
print(" %8.1f %7.1f %7.1f %7.1f %7.1f %7.1f" % (
params5[0+i*3], sigma5[0+i*3],
params5[2+i*3], sigma5[2+i*3],
params5[1+i*3], sigma5[1+i*3]))
print(" %8.1f %7.1f" % (
params5[0]-params5[3], np.sqrt(sigma5[0]**2 + sigma5[3]**2)))
plt.show()
print( "Read %d values from file %s" % (len(utcs), filename))
return
if __name__ == "__main__":
main()
| true |
a6a89192203d5d2c327da0843f910299729faefe | Python | nuclearglow/machine-learning | /titanic/titanic.py | UTF-8 | 4,233 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
import os
import pandas as pd
import numpy as np
import math
import joblib
import matplotlib
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import OneHotEncoder, LabelBinarizer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score, cross_val_predict, cross_validate
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import RandomForestClassifier
from transformers.DataFrameSelector import DataFrameSelector
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_curve, roc_auc_score
from util import evaluate_classifier_model
## load data
train_csv = os.path.join("data", "train.csv")
training_data = pd.read_csv(train_csv)
test_csv = os.path.join("data", "test.csv")
test_data = pd.read_csv(test_csv)
# preprocessing
# check for missing data in label -> none
# training_data["Survived"].isna()
# invalid_training_labels = training_data[training_data["Survived"].isna()]
numeric_attributes = ["Pclass", "Age", "SibSp", "Parch", "Fare"]
category_attributes = ["Pclass", "Embarked"]
binary_attributes = ["Sex", "Cabin"]
# TODO: move to pipeline, own transformer DataMapper
# prepare Sex category
training_data["Sex"] = training_data["Sex"].map({"male": 0, "female": 1})
test_data["Sex"] = test_data["Sex"].map({"male": 0, "female": 1})
# prepare Cabin data (String or NaN)
training_data["Cabin"] = training_data["Cabin"].map(
lambda cabin: 1 if isinstance(cabin, str) else 0
)
test_data["Cabin"] = test_data["Cabin"].map(
lambda cabin: 1 if isinstance(cabin, str) else 0
)
category_binarize_pipeline = Pipeline(
[("selector", DataFrameSelector(binary_attributes)),]
)
category_onehot_pipeline = Pipeline(
[
("selector", DataFrameSelector(category_attributes)),
("imputer", SimpleImputer(strategy="most_frequent")),
("one_hot_encode", OneHotEncoder(sparse=False)),
]
)
numeric_pipeline = Pipeline(
[
("selector", DataFrameSelector(numeric_attributes)),
("imputer", SimpleImputer(strategy="median")),
("std_scaler", StandardScaler()),
]
)
preprocessing_pipeline = FeatureUnion(
transformer_list=[
("numeric_pipeline", numeric_pipeline),
("binarize", category_binarize_pipeline),
("one_hot_encode", category_onehot_pipeline),
]
)
# extract labels
training_labels = training_data["Survived"].to_numpy()
# test_labels = test_data["Survived"].to_numpy()
# transform the date
titanic_training_data_preprocessed = preprocessing_pipeline.fit_transform(training_data)
# titanic_test_data_preprocessed = preprocessing_pipeline.fit_transform(test_data)
# joblib dump
joblib.dump(
titanic_training_data_preprocessed, "data/titanic_training_data_preprocessed.pkl"
)
joblib.dump(training_labels, "data/titanic_training_labels.pkl")
# joblib.dump(titanic_test_data_preprocessed, "data/titanic_test_data_preprocessed.pkl")
# joblib.dump(test_labels, "data/titanic_test_labels.pkl")
# SGDClassifier
sgd_clf = SGDClassifier(random_state=42)
sgd_validate = cross_validate(
sgd_clf,
titanic_training_data_preprocessed,
training_labels,
cv=3,
scoring="accuracy",
)
sgd_cross_validation = cross_val_score(
sgd_clf,
titanic_training_data_preprocessed,
training_labels,
cv=3,
scoring="accuracy",
)
decision_scores = cross_val_predict(
sgd_clf,
titanic_training_data_preprocessed,
training_labels,
cv=3,
method="decision_function",
)
sgd_evaluation = evaluate_classifier_model(decision_scores, training_labels)
# Random Forest
random_forest_clf = RandomForestClassifier(random_state=42)
sgd_cross_validation = cross_val_score(
random_forest_clf,
titanic_training_data_preprocessed,
training_labels,
cv=3,
scoring="accuracy",
)
decision_scores = cross_val_predict(
random_forest_clf, titanic_training_data_preprocessed, training_labels, cv=3
)
random_forest_evaluation = evaluate_classifier_model(decision_scores, training_labels)
| true |
b4ac09619d6cc32e6a8fe4ab177385d15bbf6fb5 | Python | roshan9419/AStarPathFindingVisulaizer | /aStarPathFinder.py | UTF-8 | 5,983 | 3.421875 | 3 | [] | no_license | import pygame
import math
from random import randint
from queue import PriorityQueue
pygame.init()
ROWS = 50
WIDTH = 700
HEIGHT = 500
WIN = pygame.display.set_mode((WIDTH, HEIGHT)) # Screen Size
pygame.display.set_caption("A* Path Finding Algorithm")
# COLORS
START_COLOR = (124, 32, 49)
END_COLOR = (0, 255, 0)
WALL_COLOR = (12, 53, 71)
VISITED_COLOR = (63, 203, 223)
VISITED_OUTER_COLOR = (197, 114, 255)
UNVISITED_COLOR = (255, 255, 255)
WALL_BOUNDARY_COLOR = (175, 216, 248)
PATH_COLOR = (255, 255, 0)
WIN_BACKGROUND_COLOR = (255, 255, 255)
# defining a font and a text
smallfont = pygame.font.SysFont('Corbel',35)
text = smallfont.render('Start Visualizing' , True , UNVISITED_COLOR)
class Spot:
def __init__(self, row, col, width, total_rows):
self.row = row
self.col = col
self.x = row * width
self.y = col * width
self.color = UNVISITED_COLOR
self.neighbors = []
self.width = width
self.total_rows = total_rows
def get_pos(self):
return self.row, self.col
def is_closed(self):
return self.color == VISITED_COLOR
def is_open(self):
return self.color == VISITED_OUTER_COLOR
def is_barrier(self):
return self.color == WALL_COLOR
def reset(self):
self.color = UNVISITED_COLOR
def make_start(self):
self.color = START_COLOR
def make_closed(self):
self.color = VISITED_COLOR
def make_open(self):
self.color = VISITED_OUTER_COLOR
def make_barrier(self):
self.color = WALL_COLOR
def make_end(self):
self.color = END_COLOR
def make_path(self):
self.color = PATH_COLOR
def draw(self, win):
pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.width))
def update_neighbors(self, grid):
self.neighbors = []
#DOWN
if self.row < self.total_rows - 1 and not grid[self.row + 1][self.col].is_barrier():
self.neighbors.append(grid[self.row + 1][self.col])
#UP
if self.row > 0 and not grid[self.row - 1][self.col].is_barrier():
self.neighbors.append(grid[self.row - 1][self.col])
#RIGHT
if self.col < self.total_rows - 1 and not grid[self.row][self.col + 1].is_barrier():
self.neighbors.append(grid[self.row][self.col + 1])
#LEFT
if self.col > 0 and not grid[self.row][self.col - 1].is_barrier():
self.neighbors.append(grid[self.row][self.col - 1])
def __lt__(self, other):
return False
# Heuristic Function
def hf(p1, p2):
x1, y1 = p1
x2, y2 = p2
#Mahattan Distance
return abs(x1 - x2) + abs(y1 - y2)
#Euclidean Distance
#return ( (x1 - x2)**2 + (y1 - y2)**2 )**1/2
def reconstruct_path(grid, came_from, current):
while current in came_from:
current = came_from[current]
current.make_path()
print((current.row, current.col))
draw(WIN, grid, ROWS, WIDTH)
def performAStartAlgorithm(win, grid, start, end):
count = 0
open_set = PriorityQueue()
open_set.put((0, count, start))
came_from = {}
g_score = {spot: float("inf") for row in grid for spot in row}
g_score[start] = 0
f_score = {spot: float("inf") for row in grid for spot in row}
f_score[start] = hf(start.get_pos(), end.get_pos())
open_set_hash = {start}
while not open_set.empty():
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
current = open_set.get()[2]
open_set_hash.remove(current)
if current == end:
reconstruct_path(grid, came_from, end)
end.make_end()
start.make_start()
return True
for neighbor in current.neighbors:
temp_g_score = g_score[current] + 1
if temp_g_score < g_score[neighbor]:
came_from[neighbor] = current
g_score[neighbor] = temp_g_score
f_score[neighbor] = temp_g_score + hf(neighbor.get_pos(), end.get_pos())
if neighbor not in open_set_hash:
count += 1
open_set.put((f_score[neighbor], count, neighbor))
open_set_hash.add(neighbor)
neighbor.make_open()
draw(win, grid, ROWS, WIDTH)
if current != start:
current.make_closed()
return False
def performDijkstrasAlgorithm(draw, grid, start, end):
pass
def make_grid(rows, width):
grid = []
gap = width // rows
for i in range(rows):
grid.append([])
for j in range(rows):
spot = Spot(i, j, gap, rows)
grid[i].append(spot)
return grid
def draw_grid(win, rows, width):
gap = width // rows
for i in range(rows):
pygame.draw.line(win, WALL_BOUNDARY_COLOR, (0, i * gap), (width, i * gap))
for j in range(rows):
pygame.draw.line(win, WALL_BOUNDARY_COLOR, (j * gap, 0), (j * gap, width))
def draw(win, grid, rows, width):
#print("Function DRAW")
win.fill(WIN_BACKGROUND_COLOR)
for row in grid:
for spot in row:
spot.draw(win)
draw_grid(win, rows, width)
pygame.display.update()
def get_clicked_pos(pos, rows, width):
gap = width // rows
y, x = pos
row = y // gap
col = x // gap
return row, col
def main(win, width):
grid = make_grid(ROWS, width)
start = None
end = None
run = True
while run:
draw(win, grid, ROWS, width)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if pygame.mouse.get_pressed()[0]: #LEFT
pos = pygame.mouse.get_pos()
row, col = get_clicked_pos(pos, ROWS, width)
spot = grid[row][col]
if not start and spot != end:
start = spot
start.make_start()
elif not end and spot != start:
end = spot
end.make_end()
elif spot != end and spot != start:
spot.make_barrier()
elif pygame.mouse.get_pressed()[2]: #RIGHT
pos = pygame.mouse.get_pos()
row, col = get_clicked_pos(pos, ROWS, width)
spot = grid[row][col]
spot.reset()
if spot == start: start = None
elif spot == end: end = None
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a and start and end:
for row in grid:
for spot in row:
spot.update_neighbors(grid)
performAStartAlgorithm(win, grid, start, end)
if event.key == pygame.K_c:
start = None
end = None
grid = make_grid(ROWS, width)
print("Grid Cleared")
pygame.quit()
print("Successfully Visualized")
main(WIN, WIDTH)
| true |
613baad711862eb0ecc95d94c087c8235e3c8993 | Python | ekarincodizm/AutomateWemall.com | /Keyword/Portal/storefront_cms/shop_management_page/css_pages_list.py | UTF-8 | 2,434 | 2.953125 | 3 | [] | no_license | import datetime
import json
def get_pages_list_data_from_response(response, view):
pages_list = []
response_data = json.loads(response)
for key, item in response_data['data'].items():
pages_data = {}
pages_data['page_name'] = item['name']
if item['page_status'] == 'active':
pages_data['page_status'] = 'Active'
elif item['page_status'] == 'inactive':
pages_data['page_status'] = 'Inactive'
else:
pages_data['page_status'] = 'wrong'
pages_data['content_status'] = _calculate_page_content_status(item['updated_at_'+view], item['published_at_'+view])
pages_data['live'] = _calculate_page_live(item['page_status'], item['published_at_'+view])
if item['updated_at_'+view] is None:
pages_data['last_updated'] = '-'
else:
pages_data['last_updated'] = _convert_time(item['updated_at_'+view])
if item['published_at_'+view] is None:
pages_data['last_published'] = '-'
else:
pages_data['last_published'] = _convert_time(item['published_at_'+view])
if item['page_updated_by'] is None:
pages_data['modified_by'] = ''
else:
pages_data['modified_by'] = item['page_updated_by']
#Add to Page list
pages_list.append(pages_data)
return pages_list
def _convert_time(str_date):
return datetime.datetime.strptime(str_date, "%Y-%m-%dT%H:%M:%S+07:00").strftime("%d/%m/%Y %H:%M:%S")
def _calculate_page_content_status(content_updated_at, content_published_at):
if content_updated_at is not None:
if content_published_at is not None:
if content_updated_at < content_published_at:
return 'Published'
else:
return 'Modified'
else:
return 'Draft'
else:
return 'Waiting'
def _calculate_page_live(page_status, content_published_at):
if page_status == 'inactive':
return False
elif content_published_at is not None:
return True
else:
return False
def compare_list(list1, list2):
list1_len = len(list1)
list2_len = len(list2)
assert list1_len==list2_len, 'Length of list not equal'
for item in list1:
assert item in list2, 'List not equal: ' + str(item)
def should_more_than_as_string(str1, str2, message=None):
assert str1 > str2, message
| true |
324928dd8bca612f5f95bec9428ced8776c21ed4 | Python | S41nz/diakrino | /model/enums/categoria_grado_academico.py | ISO-8859-1 | 564 | 2.671875 | 3 | [] | no_license | # -*- coding: latin-1 -*-
'''
Enumeracin que representa los diferentes tipos de grado acadmico que puede tener un candidato determinado
Created on 18/03/2015
@author: SA1nz
'''
class CategoriaGradoAcademico:
#Enumeraciones
PREESCOLAR = "Preescolar"
PRIMARIA = "Primaria"
SECUNDARIA = "Secundaria"
PREPARATORIA = "Preparatoria"
LICENCIATURA = "LICENCIATURA"
MAESTRIA = "Maestra"
DOCTORADO = "Doctorado"
POST_DOCTORADO = "Post Doctorado"
DIPLOMADO = "Diplomado"
ESPECIALIDAD = "Especialidad"
| true |
368dc6a04942e0b651e8996f8821b1df964ac1dd | Python | home-assistant/supervisor | /tests/utils/test_json.py | UTF-8 | 612 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | """test json."""
from supervisor.utils.json import write_json_file
def test_file_permissions(tmp_path):
"""Test file permissions."""
tempfile = tmp_path / "test.json"
write_json_file(tempfile, {"test": "data"})
assert tempfile.is_file()
assert oct(tempfile.stat().st_mode)[-3:] == "600"
def test_new_file_permissions(tmp_path):
"""Test file permissions."""
tempfile = tmp_path / "test.json"
tempfile.write_text("test")
assert oct(tempfile.stat().st_mode)[-3:] != "600"
write_json_file(tempfile, {"test": "data"})
assert oct(tempfile.stat().st_mode)[-3:] == "600"
| true |
88904b6888ddf53e08168e72aaacc43c5c54cd5b | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_97/1658.py | UTF-8 | 751 | 3.109375 | 3 | [] | no_license | cases = int( input() )
index = 1
while index <= cases:
_in = input()
low,high = _in.split()
low = int( low )
high = int( high )
firstNum = ""
secondNum = ""
revNum = ""
counter = 0
newNum = ""
for i in range( low, high + 1 ):
firstNum = str( i )
newNum = firstNum
for j in range( i + 1, high + 1 ):
if( len( firstNum ) == 1 ):
break
secondNum = str( j )
for k in range( 0, len( firstNum ) ):
newNum = newNum[ -1 ] + newNum[ : -1 ]
if not( newNum < firstNum ):
if newNum == secondNum:
counter += 1
break
print( "Case #" + str( index ) + ": " + str( counter ) )
counter = 0
index += 1
| true |
68e84da61f3dba108fc0d30167bbeefa3c8cefa5 | Python | mkbeh/rin-bitshares-arbitry-bot | /src/aiopybitshares/account.py | UTF-8 | 785 | 2.625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from .grambitshares import GramBitshares, default_node
class Account(GramBitshares):
def __init__(self):
super().__init__()
self._gram = None
async def connect(self, ws_node=default_node):
self._gram = await super().connect(ws_node)
return self
async def get_account_balances(self, account_id, *args):
"""
:param account_id: ID of the account to get balances for
:param args: ID of the asset to get balances of; if empty, get all assets account has a balance in
:return:
"""
raw_data = await self._gram.call_method('get_account_balances', account_id, args)
try:
return raw_data['result'][0]['amount']
except KeyError:
pass
| true |
6976674424b68a14c404600b599b5bad51bf0eab | Python | sandeep325/python-GUI-calculator | /calculator.py | UTF-8 | 3,262 | 3.171875 | 3 | [] | no_license | from tkinter import *
top=Tk()
top.title("calculator")
top.wm_iconbitmap("calculator.ico")
top.geometry("800x900")
top.maxsize(670,500)
top.minsize(670,500)
def click(event):
global scvalue
text=event.widget.cget("text") #cget() function used to how to get a text from a button widget.
#print(text)
if text=="=":
if scvalue.get().isdigit():
value=int(scvalue.get())
else:
value=eval(cal_screen.get()) #eval() function evaluate the integer like -- 5*3 gives the 15
scvalue.set(value)
cal_screen.update()
elif text=="c":
scvalue.set("")
cal_screen.update()
else:
scvalue.set(scvalue.get() + text)
cal_screen.update()
scvalue=StringVar()
scvalue.set("")
cal_screen=Entry(top,textvar=scvalue,fon="lucida 35 bold",bg="gray")
cal_screen.pack(fill=X,pady=13,padx=15)
f=Frame(top, bg="skyblue")
b=Button(f,text="9",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
b=Button(f,text="8",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
b=Button(f,text="7",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
f.pack()
f=Frame(top, bg="skyblue")
b=Button(f,text="6",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
b=Button(f,text="5",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
b=Button(f,text="4",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
f.pack()
f=Frame(top, bg="skyblue")
b=Button(f,text="3",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
b=Button(f,text="2",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
b=Button(f,text="1",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
f.pack()
f=Frame(top, bg="skyblue")
b=Button(f,text="0",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
b=Button(f,text="00",font="lucida 20 bold",padx=14.5,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
b=Button(f,text=".",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
f.pack()
f=Frame(top, bg="skyblue")
b=Button(f,text="+",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
b=Button(f,text="-",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
b=Button(f,text="x",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
f.pack()
f=Frame(top, bg="skyblue")
b=Button(f,text="c",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
b=Button(f,text="=",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
b=Button(f,text="/",font="lucida 20 bold",padx=20,pady=4)
b.pack(side=LEFT,padx=10,pady=5)
b.bind("<Button-1>",click)
f.pack()
top.mainloop() | true |
7c2f3bfa1c8cf370a6d706197cd318f29ac76404 | Python | Tony0726/Python-TA-interview-questions | /Image Convolution.py | UTF-8 | 1,490 | 2.96875 | 3 | [] | no_license | import cv2
import numpy as np
def blur(videopath, kernel, savepath):
vid = cv2.VideoCapture(videopath)
video_width = int(vid.get(3)) #获取视频的宽
video_height = int(vid.get(4)) #获取视频的高
video_fps = int(vid.get(5)) #获取视频的帧速率
#创建VideoWriter类对象
fourcc = cv2.VideoWriter_fourcc('I', '4', '2', '0') #创建视频编解码器
out = cv2.VideoWriter(savepath, fourcc, video_fps, (video_width, video_height)) #关键视频流写入对象
while(vid.isOpened()): #当时视频可以打开时
ret, frame = vid.read() #捕获一帧图像
if ret==True: #存在这一帧
frame = cv2.filter2D(frame, -1, kernel) #利用内核实现对图像的卷积运算
out.write(frame) #写入帧
img_ori = cv2.resize(frame, (640, 360)) #调整大小为了屏幕显示得下
cv2.imshow('frame',img_ori) #显示帧
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
vid.release() #释放读取
out.release() #释放写入
cv2.destroyAllWindows() #关闭所有窗口
if __name__ == '__main__':
filename = 'highway_video.mp4' # 视频路径
kernel = np.array((
[2, 4, 5, 4, 2],
[4, 9, 12, 9, 4],
[5, 12, 15, 12, 5],
[4, 9, 12, 9, 4],
[2, 4, 5, 4, 2]), dtype="float32") / 159 # 卷积核
blur(filename, kernel, savepath='output.avi') # 模糊视频并保存 | true |
176b4f9add010c28c74559ac4fac20424854f67c | Python | BCEM-UniAndes/Reproducibility-Guidelines | /codes/Change_header_fasta.py | UTF-8 | 1,303 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
###About###
#Name:Change_header_fasta.py
#Author:Luisa Matiz
#Description:Script to change header of fasta
#Institution:Universidad de los Andes
#email:lf.matiz10@uniandes.edu.co
#Date:10-02-2019
#Version:Python3.0 or more
###Libraries###
import sys
import argparse
import click
###Help###
parser = argparse.ArgumentParser()
parser=argparse.ArgumentParser(
description='''Run this script as python Change_header_fasta.py file. ''',
epilog="""It will produce a header assignation by sequence position in input fasta""")
parser.add_argument("file", help="input fasta file")
parser.add_argument("version", help="python 3 or more")
args = parser.parse_args()
print(args.file)
print (args.version)
###Comands###
#load the input file
f=(sys.argv[1])
file = open(sys.argv[1],'r')
labels=f.split(".")
#open the new fasta file without data
handle = open(labels[0]+"_new"+".fasta","w")
line=file.readlines()
#Star the line by line revision of the header
count=0
for l in line:
results=l.startswith(">")
results
if results == True:
count+=1
handle.write(">"+str(count)+"_"+labels[0]+"\n") #change the header by input file name and position of the sequence in the input fasta
else:
handle.write(str(l))
#Close final file
handle.close() | true |
0e1a3141a72a1293a5a071ed8caa3f632ae5bebc | Python | koikera/JogoPython | /adivinhacao.py | UTF-8 | 1,717 | 3.9375 | 4 | [] | no_license | import random
def jogar():
print("*****************************")
print("Bem vindo ao jogo Adivinhacao")
print("*****************************")
numero_secreto= random.randrange(1, 100)
total_tentativas = 0
pontos = 1000
print("qual nivel de dificuldade?")
print("(1) Facil (2)Medio (3)Dificil")
nivel = int(input("Defina um nivel: "))
if (nivel == 1):
total_tentativas = 20
elif (nivel == 2):
total_tentativas = 10
else:
total_tentativas = 5
for rodada in range(1, total_tentativas + 1):
print("Tentativa {} de {}".format(rodada,total_tentativas))
chute_str = input("Digite um numero de 1 a 100: ")
print("Voce Digitou", chute_str)
chute_int = int(chute_str)
if (chute_int < 1 | chute_int > 100):
print("Voce deve digitar um numero entre 1 e 100")
continue
acertou = chute_int == numero_secreto
maior = chute_int > numero_secreto
menor = chute_int < numero_secreto
if (acertou):
print("Voce Acertou e fez {} pontos!".format(pontos))
break
else:
if(maior):
print("seu chute foi maior que o numero secreto")
elif(menor):
print("seu chute foi menor que o numero secreto")
pontos_perdidos = abs(numero_secreto - chute_int)
pontos = pontos - pontos_perdidos
print("O numero secreto era: {}".format(numero_secreto))
print("Fim de jogo")
if(__name__ == "__main__"):
jogar()
| true |
efe7862549a082810c4f7ace229ce4d8353b2bfd | Python | paik11012/Algorithm | /lecture/day02/day02_2.py | UTF-8 | 655 | 2.984375 | 3 | [] | no_license |
import sys
from typing import Any, Union
sys.stdin = open('sample_2.txt','r')
# N, K = 3, 6
arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
n = len(arr)
total_num = int(input())
for tot in range(1,total_num+1):
N, K = map(int, input().split())
candidate = []
result = []
total = 0
# bit이용하기
n = len(arr) # 12
for i in range(1 << n): # 2의 12승-1까지 생성
result += [[arr[j] for j in range(n) if i & (1 <<j)]]
for n in result:
if len(n) == N:
candidate += [n]
for z in candidate:
if sum(z) == K:
total += 1
print('#{0} {1}'.format(tot,total))
| true |
ca9f7c9caf538039a87a349731ed3da91e6c403f | Python | dimpusagar91/python_tutorials | /3_datatypes_datastructures/functions_demo.py | UTF-8 | 1,873 | 4.125 | 4 | [] | no_license | #!/usr/bin/python
#Multiple assignment applicable
#assign
vara = varb = varc = 90
# to check the variable value
print("vara :", vara)
print("varb :", varb)
print("varc :", varc)
# assign
varint, varfloat, varstr = 90,92.75,"john"
#to check the variable value
print("varint :", varint)
print("varfloat :", varfloat)
print("varstr :", varstr)
# methods with numbers
# to convert the value to a plain integer
print("a plain integer :", int(22/7))
# to import the module
import math
# to print absolute value
print("abs(-940.2) :", abs(-940.2))
# to print largest integer not greater than pi
print("math.floor(math.pi)",math.floor(math.pi))
# to print largest of its arguments
print("max(34,23,329):",max(34,23,329))
# to print smallest of its arguments
print("min(34,23,329):",min(34,23,329))
# to print value of 100 power of 0
print("math.pow(100,0) :",math.pow(100,0))
# to print rounded to 3 digits
print("round(math.pi,3) :",round(math.pi,3))
# to print square root
print("math.sqrt(math.pi) :",math.sqrt(math.pi))
# import random module
import random
# to print a random float r, such that 0 is less than or
# equal to r and r is less than 1
print("random() :",random.random())
print("random() :",random.random())
# trignometric functions with numbers
print("cos(0) :",math.cos(0))
# to print the arc sins of 0, in radians
print("sin(0) :",math.sin(0))
# to convert the value to plain integer
print("a plain integer :", int(22/7))
# to convert the value to a floating-point number
print("a floating point number :", float(22/7))
# to convert value to a complex number with real part 3 and imaginary part zero
print("complex number :", complex(3))
# to convert 3 and 2 to a complex number with real part 3 and imaginary part 2
print("complex number :", complex(3,2))
| true |
643e76af3dde06ec4c1ef6093a2ea4dfb352f693 | Python | scipp/scipp | /tests/core/math_test.py | UTF-8 | 4,825 | 2.921875 | 3 | [
"BSD-3-Clause"
] | permissive | # SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)
# @author Jan-Lukas Wynen
import numpy as np
import pytest
import scipy
import scipp as sc
@pytest.mark.parametrize(
'funcs',
(
(sc.erf, scipy.special.erf),
(sc.erfc, scipy.special.erfc),
(sc.exp, np.exp),
(sc.log, np.log),
(sc.log10, np.log10),
(sc.sqrt, np.sqrt),
),
)
def test_unary_math_compare_to_numpy_dimensionless(funcs):
sc_f, ref = funcs
assert sc.allclose(sc_f(sc.scalar(0.512)), sc.scalar(ref(0.512)))
@pytest.mark.parametrize('func', (sc.exp, sc.log, sc.log10, sc.sqrt))
def test_unary_math_out(func):
out = sc.scalar(np.nan)
func(sc.scalar(0.932), out=out)
assert sc.identical(out, func(sc.scalar(0.932)))
@pytest.mark.parametrize(
'funcs', ((sc.sin, np.sin), (sc.cos, np.cos), (sc.tan, np.tan))
)
def test_compare_unary_math_to_numpy_trigonometry(funcs):
sc_f, ref = funcs
assert sc.allclose(sc_f(sc.scalar(0.512, unit='rad')), sc.scalar(ref(0.512)))
@pytest.mark.parametrize('func', (sc.sin, sc.cos, sc.tan))
def test_unary_math_trigonometry_out(func):
out = sc.scalar(np.nan)
func(sc.scalar(0.932, unit='rad'), out=out)
assert sc.identical(out, func(sc.scalar(0.932, unit='rad')))
@pytest.mark.parametrize(
'funcs', ((sc.asin, np.arcsin), (sc.acos, np.arccos), (sc.atan, np.arctan))
)
def test_compare_unary_math_to_numpy_inv_trigonometry(funcs):
sc_f, ref = funcs
assert sc.allclose(sc_f(sc.scalar(0.512)), sc.scalar(ref(0.512), unit='rad'))
@pytest.mark.parametrize('func', (sc.asin, sc.acos, sc.atan))
def test_unary_math_inv_trigonometry_out(func):
out = sc.scalar(np.nan, unit='rad')
func(sc.scalar(0.932), out=out)
assert sc.identical(out, func(sc.scalar(0.932)))
@pytest.mark.parametrize('args', ((sc.sqrt, sc.Unit('m^2'), sc.Unit('m')),))
def test_unary_math_unit(args):
func, inp, expected = args
assert func(inp) == expected
def test_abs():
assert sc.identical(sc.abs(sc.scalar(-72)), sc.scalar(72))
assert sc.identical(abs(sc.scalar(-72)), sc.scalar(72))
assert sc.abs(sc.Unit('m')) == sc.Unit('m')
assert abs(sc.Unit('m')) == sc.Unit('m')
def test_abs_out():
out = sc.scalar(0)
sc.abs(sc.scalar(-5), out=out)
assert sc.identical(out, sc.scalar(5))
def test_cross():
assert sc.identical(
sc.cross(sc.vector([0, 0, 1]), sc.vector([0, 1, 0])), sc.vector([-1, 0, 0])
)
def test_dot():
assert sc.identical(
sc.dot(sc.vector([1, 0, 2]), sc.vector([0, 1, 3])), sc.scalar(6.0)
)
def test_midpoints():
assert sc.allclose(
sc.midpoints(sc.array(dims=['xy'], values=[0.0, 1.0])),
sc.array(dims=['xy'], values=[0.5]),
)
assert sc.allclose(
sc.midpoints(sc.array(dims=['xy'], values=[0.0, 1.0]), dim='xy'),
sc.array(dims=['xy'], values=[0.5]),
)
def test_norm():
assert sc.allclose(sc.norm(sc.vector([1.0, 2.0, 0.0])), sc.scalar(np.sqrt(1 + 4)))
def test_pow():
assert sc.allclose(sc.pow(sc.scalar(2), sc.scalar(2)), sc.scalar(4))
assert sc.allclose(sc.pow(sc.scalar(2), 3), sc.scalar(8))
assert sc.pow(sc.Unit('m'), 2.0) == sc.Unit('m^2')
assert sc.pow(sc.Unit('m'), 2) == sc.Unit('m^2')
def test_atan2():
assert sc.allclose(
sc.atan2(y=sc.scalar(0.5), x=sc.scalar(1.2)),
sc.scalar(np.arctan2(0.5, 1.2), unit='rad'),
)
def test_atan2_out():
out = sc.scalar(np.nan)
sc.atan2(y=sc.scalar(0.5), x=sc.scalar(1.2), out=out)
assert sc.allclose(out, sc.scalar(np.arctan2(0.5, 1.2), unit='rad'))
def test_reciprocal():
assert sc.identical(sc.reciprocal(sc.scalar(2.0)), sc.scalar(0.5))
assert sc.reciprocal(sc.units.m) == sc.units.one / sc.units.m
def test_reciprocal_out():
out = sc.scalar(np.nan)
sc.reciprocal(sc.scalar(2.0), out=out)
assert sc.identical(out, sc.scalar(0.5))
def test_round():
x = sc.array(dims=['x'], values=(1.1, 1.5, 2.5, 4.7))
expected = sc.array(dims=['x'], values=(1.0, 2.0, 2.0, 5.0))
assert sc.identical(sc.round(x), expected)
x_out = sc.zeros_like(expected)
sc.round(x, out=x_out)
assert sc.identical(x_out, expected)
def test_ceil():
x = sc.array(dims=['x'], values=(1.1, 1.5, 2.5, 4.7))
expected = sc.array(dims=['x'], values=(2.0, 2.0, 3.0, 5.0))
assert sc.identical(sc.ceil(x), expected)
x_out = sc.zeros_like(expected)
sc.ceil(x, out=x_out)
assert sc.identical(x_out, expected)
def test_floor():
x = sc.array(dims=['x'], values=(1.1, 1.5, 2.5, 4.7))
expected = sc.array(dims=['x'], values=(1.0, 1.0, 2.0, 4.0))
assert sc.identical(sc.floor(x), expected)
x_out = sc.zeros_like(expected)
sc.floor(x, out=x_out)
assert sc.identical(x_out, expected)
| true |
6b8a1886e7f6e848e5118e8017e461470e678345 | Python | marius-pop0/CybeDataAnlaytics | /assignment1/plotting.py | UTF-8 | 6,956 | 2.78125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
def statistics(df):
df1 = df.groupby(['shoppercountrycode', 'currencycode', 'simple_journal']).size().reset_index(name='freq').sort_values(by=['freq'], ascending=False).head()
df2 = df[(df['shoppercountrycode'] == 'AU') & (df['currencycode'] == 'AUD')].groupby(['txvariantcode', 'simple_journal']).size().reset_index(name='freq').sort_values(by=['txvariantcode', 'simple_journal', 'freq'], ascending=False)
df3 = df[df['simple_journal'] == 'Chargeback'].groupby(['shoppercountrycode', 'currencycode']).size().reset_index(name='freq').sort_values(by=['freq'], ascending=False)
df4 = df[(df['simple_journal'] == 'Chargeback') & (df['shoppercountrycode']=='AU')].groupby(['card_id', ]).size().reset_index(name='freq').sort_values(by=['freq'], ascending=False).head(10)
df5 = df[df['simple_journal'] == 'Settled'].groupby(['card_id']).size().reset_index(name='freq').sort_values(by=['freq'], ascending=False).head(20)
df6 = df[df['card_id'] == 'card182921'].groupby(['simple_journal', 'amount', 'creationdate', 'ip_id', 'currencycode', 'shopperinteraction', 'shoppercountrycode']).size().reset_index(name='freq').sort_values(by=['creationdate'])#.head(10)
# print(df1)
print(df1)
# print(df6)
def time_diff(df):
df['date'] = pd.to_datetime(df['creationdate'])
df['diff_time'] = df.sort_values(['card_id', 'creationdate']).groupby('card_id')['date'].diff()
# print(df.sort_values(['card_id', 'date']).head(20))
time = pd.DatetimeIndex(df['diff_time'])
df['diff_time_min'] = time.hour * 60 + time.minute + 1 # df['diff_time'].str.split(':').apply(lambda x: int(x[0]) * 60 + int(x[1]))
df['diff_time_min'] = df['diff_time_min'].fillna(0)
df = df.drop(['date', 'diff_time'], axis=1)
return df
def plot_time_diff(df):
df = df[(df['shoppercountrycode'] == 'AU') & (df['currencycode'] == 'AUD')]
df = time_diff(df)
df2 = df[df['simple_journal'] == 'Chargeback']
df3 = df[df['simple_journal'] == 'Settled']
s = plt.scatter(df3['amount'], df3['diff_time_min'], s=4)
f = plt.scatter(df2['amount'], df2['diff_time_min'], s=4)
plt.legend((f, s), ('Fraud', 'Legitimate'))
plt.xlabel('transaction amount')
plt.ylabel('time delta minutes')
plt.show()
def plot_daily_freq(df):
# df = df[(df['shoppercountrycode'] == 'AU') & (df['currencycode'] == 'AUD')]
df['day'] = pd.to_datetime(df['creationdate']).dt.date
# print(df.head())
df['freq'] = df.sort_values(['creationdate']).groupby(['card_id', 'day'])['creationdate_unix'].rank(
method='first').astype(int)
df['freq'] = df['freq'] - 1
df_uu = df[df['freq']>6]
print(df_uu.head())
df3 = df[df['simple_journal'] == 'Settled']
df2 = df[df['simple_journal'] == 'Chargeback']
s = plt.scatter(df3['amount'], df3['freq'], s=4)
f = plt.scatter(df2['amount'], df2['freq'], s=4)
plt.legend((f, s), ('Fraud', 'Legitimate'))
plt.xlabel('transaction amount')
plt.ylabel('daily frequency of so far seen transactions')
plt.show()
def plot_amount_ave_diff(df):
#Y
df['date'] = pd.to_datetime(df['creationdate'])
df['ave_amount'] = df.sort_values(['creationdate']).groupby(['card_id'])['amount'].apply(pd.expanding_mean)
df['diff_from_ave_amount'] = df['amount'] - df['ave_amount']
#X
df['date'] = pd.to_datetime(df['creationdate'])
df['diff_time'] = df.sort_values(['card_id', 'creationdate']).groupby('card_id')['date'].diff()
print(df.sort_values(['card_id', 'date']).head(20))
time = pd.DatetimeIndex(df['diff_time'])
df['diff_time_min'] = time.hour * 60 + time.minute + 1 # df['diff_time'].str.split(':').apply(lambda x: int(x[0]) * 60 + int(x[1]))
df['diff_time_min'] = df['diff_time_min'].fillna(0)
print(df.head(20))
#plot
df2 = df[df['simple_journal'] == 'Chargeback']
df3 = df[df['simple_journal'] == 'Settled']
s = plt.scatter(df3['diff_time_min'], df3['diff_from_ave_amount'], s=4)
f = plt.scatter(df2['diff_time_min'], df2['diff_from_ave_amount'], s=4)
plt.legend((f, s), ('Fraud', 'Legitimate'))
plt.xlabel('time delta minutes')
plt.ylabel('amount delta from average')
plt.show()
def plot_cards_per_ip(df):
df.dropna(inplace=True)
df1 = df.groupby(['ip_id', 'simple_journal'], as_index=False)['card_id'].size().reset_index(name='freq')
df1 = df1[df1['ip_id']!= 'NA']
df1['ip_id'] = df1['ip_id'].map(lambda x: float(x))
# df1 = df1[df1['freq'] > 1]
print(df1)
df2 = df1[df1['simple_journal'] == 'Chargeback']
df3 = df1[df1['simple_journal'] == 'Settled']
s = plt.scatter(df3['ip_id'], df3['freq'], s=8)
f = plt.scatter(df2['ip_id'], df2['freq'], s=8)
plt.legend((f, s), ('Fraud', 'Legitimate'))
plt.xlabel('IP ID')
plt.ylabel('Frequency of IP used')
plt.show()
def plot_cards_per_mail(df):
# df.dropna(axis=0, how='any', inplace=True)
df1 = df.groupby(['mail_id', 'simple_journal'], as_index=False)['card_id'].size().reset_index(name='freq')
print(df1[df1['mail_id']=='NA'].head())
df1.dropna(axis=0, how='any', inplace=True)
df1= df1[df1['mail_id']!= 'NA']
df1['mail_id'] = df1['mail_id'].map(lambda x: float(x))
df2 = df1[df1['simple_journal'] == 'Chargeback']
df3 = df1[df1['simple_journal'] == 'Settled']
print(df3[df3['freq']>1].head())
s = plt.scatter(df3['mail_id'], df3['freq'], s=8)
f = plt.scatter(df2['mail_id'], df2['freq'], s=8)
plt.legend((f, s), ('Fraud', 'Legitimate'))
plt.xlabel('E-mail')
plt.ylabel('Frequency of e-mail used')
plt.show()
def plots(df):
df['simple_journal'], labels = pd.factorize(df.simple_journal)
stats_df = df[['creationdate','amount','ip_id','card_id']]
##change to any intersting card number
stats_df = stats_df[stats_df['card_id'] == 'card182921'].groupby(['creationdate', 'ip_id'], as_index=False)['amount'].mean()
pivot = stats_df.pivot(index='ip_id', columns='creationdate', values='amount')
sns.heatmap(pivot)
# stats_df = df[['cvcresponsecode', 'simple_journal', 'amount']]
# stats_df = stats_df.groupby(['cvcresponsecode', 'simple_journal'], as_index=false)[
# 'amount'].mean() # ['amount'].agg('sum')
#
# print(stats_df)
# pivot = stats_df.pivot(index='cvcresponsecode', columns='simple_journal', values='amount')
# print(labels)
# sns.heatmap(pivot)
#
# stats_df2 = df[['cvcresponsecode', 'simple_journal', 'amount']]
# stats_df22 = stats_df2.loc[(stats_df2['cvcresponsecode'] == 0) & (stats_df2['simple_journal'] == 2)]
# stats_df3 = stats_df2.loc[(stats_df2['cvcresponsecode'] == 0) & (stats_df2['simple_journal'] == 0)]
# stats_df22.hist(column='amount', range=(df['amount'].min(), df['amount'].max()))
# stats_df3.hist(column='amount', range=(df['amount'].min(), df['amount'].max()))
plt.show() | true |
b79f8afd34f5bb8fbe323712fe6a67496711b591 | Python | Spferical/bearcart | /bearcart/_compat.py | UTF-8 | 190 | 2.5625 | 3 | [
"MIT"
] | permissive | """For compatibility between Python 2 & 3"""
import sys
PY2 = sys.version_info[0] == 2
def iteritems(d):
if PY2:
return d.iteritems()
else:
return iter(d.items())
| true |
88a82ab7ad60d53af6f742400b67cf6708dcacdd | Python | TBespalko19/test-repository | /01_python_part/09_the_in_keyword/code.py | UTF-8 | 1,281 | 3.890625 | 4 | [] | no_license | # # friends = {"Bob", "Rolf", "Anne"}
# # print("Bob" in friends)
# movies_watched = {"The Matrix", "Green Book", "Her"}
# user_movie = input("Enter something you've watched recently: ")
# # print(user_movie in movies_watched)
# if user_movie in movies_watched:
# print(f"I've eatched {user_movie} too!")
# else:
# print("I haven't watched yet")
# number = 7
# user_input = input("Enter 'y' if you would like to play: ").lower()
# if user_input == "y":
# # if user_input in ('y', 'Y'):
# user_number = int(input("Guess our number: "))
# if user_number == number:
# print("You guess correctly!")
# # elif number - user_number in (1, -1):
# elif abs(number - user_number) == 1:
# print("You were off by one.")
# else:
# print ("Sorry, it's wrong!")
## Loop
number = 7
user_input = input("Would you like to play? (Y/n) ").lower()
# while user_input != "n":
while True:
user_input = input("Would you like to play? (Y/n) ").lower()
if user_input == "n":
break
user_number = int(input("Guess our number: "))
if user_number == number:
print("You guess correctly!")
elif abs(number - user_number) == 1:
print("You were off by one.")
else:
print ("Sorry, it's wrong!")
| true |
0d5f83831da8bfeb50f01eb7c71e7f3743d47bcb | Python | bbw7561135/phd_code | /sync_rotate_sfs.py | UTF-8 | 8,576 | 3.09375 | 3 | [] | no_license | #------------------------------------------------------------------------------#
# #
# This code is a Python script that reads in arrays of simulated synchrotron #
# intensities, and calculates the structure functions of the synchrotron #
# intensity for various angles between the line of sight and the mean magnetic #
# field, for a single value of gamma. This is done to measure the spectral #
# index of the structure on small scales. #
# #
# Author: Chris Herron #
# Start Date: 6/11/2014 #
# #
#------------------------------------------------------------------------------#
# First import numpy for array handling, matplotlib for plotting, and astropy.io
# for fits manipulation
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
# Import the functions that calculate the structure and correlation functions
# using FFT, as well as the function that calculates the radially averaged
# structure or correlation functions.
from sf_fft import sf_fft
from cf_fft import cf_fft
from sfr import sfr
# Set a variable to hold the number of bins to use in calculating the
# structure functions
num_bins = 25
# Create a string for the directory that contains the simulated magnetic fields
# and synchrotron intensity maps to use.
simul_loc = '/Users/chrisherron/Documents/PhD/Madison_2014/Simul_Data/'
# Create a string for the specific simulated data set to use in calculations.
# The directories end in:
# b.1p.1_Oct_Burk
# b.1p.01_Oct_Burk
# b.1p2_Aug_Burk
# b1p.1_Oct_Burk
# b1p.01_Oct_Burk
# b1p2_Aug_Burk
# c512b.1p.0049
# c512b.1p.05
# c512b.1p.7
# c512b1p.0049
# c512b1p.05
# c512b1p.7
# c512b3p.01
# c512b5p.01
# c512b5p2
spec_loc = 'fractal_data/'
# Create a string for the full directory path to use in calculations
data_loc = simul_loc + spec_loc
# Create an array that specifies the value of gamma used to produce each
# synchrotron intensity map
gamma_arr = np.array([1.0,1.5,2.0,2.5,3.0,3.5,4.0])
# Create an integer that just specifies the index/slice corresponding to the
# gamma value to be studied
gamma_index = 2
# Create an array that specifies the rotation angles relative to the z axis of
# the MHD cubes, of the synchrotron maps to be used
rot_ang_arr = np.array([0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0,\
80.0, 90.0])
# Create an array of zeroes, which will hold the radially averaged structure
# functions calculated for the synchrotron data. This array is 2 dimensional,
# with the same number of rows as rotation angle values, the number of columns
# is equal to the number of bins being used to calculate the correlation
# functions.
sf_mat = np.zeros((len(rot_ang_arr), num_bins))
# Create an array of zeroes, which will hold the radius values used to calculate
# each structure function. This array has the same shape as the array holding
# the radially averaged structure functions
rad_arr = np.zeros((len(rot_ang_arr), num_bins))
# We want to produce one plot for each rotation angle, so loop over the values
# of the rotation angle
for rot_index in range(len(rot_ang_arr)):
# Print a message to show what rotation angle is being used in the
# current calculations
print 'Starting calculation for rotation angle = {}'.format(rot_ang_arr[rot_index])
# Open the FITS file that contains the simulated synchrotron intensity maps
sync_fits = fits.open(data_loc + 'synint_p1-4_{}_frac.fits'.format(rot_ang_arr[rot_index]))
# Extract the data for the simulated synchrotron intensities
# This is a 3D data cube, where the slices along the third axis are the
# synchrotron intensities observed for different values of gamma, the power law
# index of the cosmic ray electrons emitting the synchrotron emission.
sync_data = sync_fits[0].data
# Print a message to the screen to show that the synchrotron data has been
# loaded successfully
print 'Simulated synchrotron data loaded'
# Calculate the shape of the synchrotron data cube
sync_shape = np.shape(sync_data)
# Print the shape of the synchrotron data matrix, as a check
print 'The shape of the synchrotron data matrix is: {}'.\
format(sync_shape)
# Calculate the 2D structure function for the relevant slice of the
# synchrotron intensity data cube, i.e. the value of gamma we are interested
# in. Note that no_fluct = True is set, because we are not subtracting the
# mean from the synchrotron maps before calculating the structure function
strfn = sf_fft(sync_data[gamma_index], no_fluct = True)
# Radially average the calculated 2D structure function, using the
# specified number of bins
rad_sf = sfr(strfn, num_bins)
# Insert the calculated radially averaged structure function
# into the matrix that stores all of the calculated structure functions
sf_mat[rot_index] = rad_sf[1]
# Insert the radius values used to calculate this structure function
# into the matrix that stores the radius values
rad_arr[rot_index] = rad_sf[0]
# Print a message to show that the structure function has been calculated
print 'Radially averaged structure function calculated for'\
+ ' rotation angle = {}'.format(rot_ang_arr[rot_index])
# Loop over the rotation angle values, to calculate the spectral index
# for each structure function of synchrotron emission
for i in range(len(rot_ang_arr)):
# Calculate the spectral indices of the structure functions calculated for
# each rotation angle. Note that only the first third of the structure
# function is used in the calculation, as this is the part that is
# close to a straight line.
spec_ind_data = np.polyfit(np.log10(rad_arr[i,0:np.ceil(num_bins/3.0)]),\
np.log10(sf_mat[i,0:np.ceil(num_bins/3.0)]), 1, full = True)
# Extract the returned coefficients from the polynomial fit
coeff = spec_ind_data[0]
# Extract the sum of the residuals from the polynomial fit
residuals = spec_ind_data[1]
# Print out the results from the linear fit, namely the gradient and the
# sum of the residuals
print 'Rotation angle = {}: Gradient = {}: m = {}: Residuals = {}'\
.format(rot_ang_arr[i], coeff[0], coeff[0]-1.0, residuals)
# Now that the radially averaged structure functions have been
# calculated, start plotting them all on the same plot
# Create a figure to display a plot comparing the radially
# averaged structure functions for all of the synchrotron maps
fig1 = plt.figure()
# Create an axis for this figure
ax1 = fig1.add_subplot(111)
# Plot all of the radially averaged structure functions
plt.plot(rad_arr[0], sf_mat[0], 'b-o', label ='Angle = {}'.format(rot_ang_arr[0]))
plt.plot(rad_arr[1], sf_mat[1], 'b--o', label ='Angle = {}'.format(rot_ang_arr[1]))
plt.plot(rad_arr[2], sf_mat[2], 'r-o', label ='Angle = {}'.format(rot_ang_arr[2]))
plt.plot(rad_arr[3], sf_mat[3], 'r--o', label ='Angle = {}'.format(rot_ang_arr[3]))
plt.plot(rad_arr[4], sf_mat[4], 'g-o', label ='Angle = {}'.format(rot_ang_arr[4]))
plt.plot(rad_arr[5], sf_mat[5], 'g--o', label ='Angle = {}'.format(rot_ang_arr[5]))
plt.plot(rad_arr[6], sf_mat[6], 'c-o', label ='Angle = {}'.format(rot_ang_arr[6]))
plt.plot(rad_arr[7], sf_mat[7], 'c--o', label ='Angle = {}'.format(rot_ang_arr[7]))
plt.plot(rad_arr[8], sf_mat[8], 'm-o', label ='Angle = {}'.format(rot_ang_arr[8]))
plt.plot(rad_arr[9], sf_mat[9], 'm--o', label ='Angle = {}'.format(rot_ang_arr[9]))
# Make the x axis of the plot logarithmic
ax1.set_xscale('log')
# Make the y axis of the plot logarithmic
ax1.set_yscale('log')
# Add a label to the x-axis
plt.xlabel('Radial Separation R', fontsize = 20)
# Add a label to the y-axis
plt.ylabel('Structure Function', fontsize = 20)
# Add a title to the plot
plt.title('Sync Int Str Fun Frac Gamma {}'.format(gamma_arr[gamma_index]), fontsize = 20)
# Force the legend to appear on the plot
plt.legend(loc = 4)
# Save the figure using the given filename and format
plt.savefig(data_loc + 'Sync_Int_Angle_SF_Comp_Gam{}_frac.png'.\
format(gamma_arr[gamma_index]), format = 'png')
# Print a message to the screen to show that the plot of all of the synchrotron
# structure functions has been saved
print 'Plot of the radially averaged structure functions'\
+ ' for synchrotron intensity saved'
# Close the figures so that they don't stay in memory
plt.close(fig1) | true |
778e8f86e43c11946d1868d6c5be214a01d46baa | Python | bigpianist/commitbasedtest | /python/musiclib/harmonypitch/scale.py | UTF-8 | 3,542 | 2.796875 | 3 | [] | no_license | modes = ["ionian", "dorian", "phrygian", "lydian", "mixolydian", "aeolian"]
scales = {
"ionian": [0, 2, 4, 5, 7, 9, 11],
"dorian": [0, 2, 3, 5, 7, 9, 10],
"phrygian": [0, 1, 3, 5, 7, 8, 10],
"lydian": [0, 2, 4, 6, 7, 9, 11],
"mixolydian": [0, 2, 4, 5, 7, 9, 10],
"aeolian": [0, 2, 3, 5, 7, 8, 10],
"locrian": [0, 1, 3, 5, 6, 8, 10],
"whole-tone": [0, 2, 4, 6, 8, 10],
"pentatonic-major": [0, 2, 4, 7, 9],
"pentatonic-minor": [0, 3, 5, 7, 10]
}
# dict indicating which pentatonic scales work well with the different modes
pentatonicModesMapping = {
"ionian": "major",
"dorian": "minor",
"phrygian": "minor",
"lydian": "major",
"mixolydian": "major",
"aeolian": "minor"
}
# sequence of scale degrees of pentatonic scales mapped onto modes
pentatonicScalesDegreeInModes = {
"major": [0, 1, 2, 4, 5],
"minor": [0, 2, 3, 4, 6],
}
class Scale(object):
def __init__(self, name="ionian"):
super(Scale, self).__init__()
# check if scale name exists. If it doesn't, default to 'ionian'
if name in scales.keys():
self.name = name
else:
print("Error: '" + name + "' scale does not exist. Defaulting "
"to 'ionian'")
self.name = "ionian"
self.pitchClassSequence = scales[self.name]
if name in modes:
self.pentatonicReduction = {
"quality": self.getPentatonicFromMode(),
"scaleDegrees": None
}
self.pentatonicReduction["scaleDegrees"] = self.getPentatonicScaleDegreesInMode(
self.pentatonicReduction["quality"])
def getPitchClassSequence(self):
return self.pitchClassSequence
def getName(self):
return self.name
def setName(self, name):
if name in scales.keys():
self.name = name
self.pitchClassSequence = scales[self.name]
else:
print("Error: '" + name + "' scale does not exist. Keeping '" +
self.name + "' scale")
def expandScaleSequence(self, octave=0):
"""Realizes a scale over a number of octaves
Args:
octave (int): Indicates midi octave up to which we want to
realise scale
"""
expandedScaleSeq = []
for i in range(octave+1):
offset = 12 * i
expandedScaleSeq += [(x+offset) for x in self.pitchClassSequence]
return expandedScaleSeq
def getPentatonicFromMode(self):
"""Returns the pentatonic scale that can be derived from a given
mode"""
return pentatonicModesMapping[self.name]
@staticmethod
def getPentatonicScaleDegreesInMode(pentatonicType):
"""Returns the scale degrees that are common to both pentatonic
scale and relative modal scale, mapped into the modal scale
Args:
pentatonicType (str): Either 'pentatonic-major' or
'pentatonic-minor'
Returns:
scaleDegrees (list): List of the pentatonic scale degrees mapped onto
the mode
"""
if pentatonicType not in pentatonicScalesDegreeInModes:
raise ValueError("The pentatonic type passed to the method is "
"wrong!")
return pentatonicScalesDegreeInModes[pentatonicType]
def getPentatonicReductionQuality(self):
return self.pentatonicReduction["quality"]
| true |
dfa010a0145416f20785293bf66c2c0ea2a5a89a | Python | 3nippo/system_of_equations_solving_methods | /tests/test_integral.py | UTF-8 | 676 | 3.078125 | 3 | [] | no_license | import context
from approx import Integral
def func(x):
return x*x/(x*x + 16)
start = 0
end = 2
h = [0.5, 0.25]
methods = ['rectangle_method', 'trapeze_method', 'Simpson_method']
obj = Integral(start, end, func)
for step in h:
obj.set_table(step)
for method in methods:
print(f"{method}, step = {step}")
print(f"F = {getattr(obj, method)(step)}")
print()
print("*"*15)
print("RungeRomberg_method")
print()
def sec_der(x):
return (512-96*x**2)*(x**2+16)**(-3)
for method in methods:
print(f"{method}")
F, R = obj.RungeRomberg_method(method, h[0], h[1]/h[0])
print(f"F = {F}")
print(f"R = O({R})")
print()
| true |
52ebe52feda3d46cebe8eec49b83b850a9db762a | Python | virendra2334/plivo-assignment | /assignment/utils/api_client.py | UTF-8 | 802 | 2.578125 | 3 | [] | no_license | import requests
class RequestType(object):
"""More request types can be added here as and when we have more."""
GET = 'GET'
POST = 'POST'
PUT = 'PUT'
DELETE = 'DELETE'
class APIClient(object):
"""Generic class based implementation to inherited by
clients of all apis."""
__rmethod_method_map = {
RequestType.GET: requests.get,
RequestType.POST: requests.post,
RequestType.PUT: requests.put,
RequestType.DELETE: requests.delete,
}
def _send_request(self, url, request_type, auth=None, data=None):
method = self.__rmethod_method_map[request_type]
kwargs = {}
if auth:
kwargs['auth'] = auth
if data:
kwargs['data'] = data
return method(url, **kwargs)
| true |
d45876ef3a1f64a06eb586e65ec7c7e4ef119624 | Python | patterson-dtaylor/python_work | /Chapter_4/odd_numbers.py | UTF-8 | 123 | 3.640625 | 4 | [] | no_license | # 10/1/19 Exercise 4-6: Creating a list of odd numbers between 1-20
odd_numbers = list(range(1, 21, 3))
print(odd_numbers)
| true |
83622aeb2b050fff217c085a3d58dd01c78de1f3 | Python | powerfulaidan/firstproject | /hi.py | UTF-8 | 118 | 2.734375 | 3 | [] | no_license | print "hi"
family = ["Aidan" , "Dad" , "Vitak ", "mom" , "rufus" , "honey"]
for member in family:
print "hi" + member | true |
5518dd79266a4beec1003ab829f98d3798b60822 | Python | cpprhtn/Machine_Learning_Cookbook | /Chapter7/6_요일 인코딩.py | UTF-8 | 267 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 23:04:23 2020
@author: cpprhtn
"""
import pandas as pd
dates = pd.Series(pd.date_range("2/2/2002", periods=3, freq="M"))
#요일 확인
dates.dt.day_name()
#요일 확인
dates.dt.weekday | true |
6d6a8125f28f8e5d1b5b08062ba825397e095681 | Python | Vivek-M416/Basics | /Array/nparray1.py | UTF-8 | 89 | 2.921875 | 3 | [] | no_license | # creating array with numpy
import numpy
x = numpy.array([10, 20, 30, 40, 50])
print(x)
| true |