blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
de8e5479053aad5c0f42d9ee0d419afc09d9dc11 | Python | Kianqunki/Python_CorePythonApplicationsProgramming | /chapter13-web-services/twitter_app.py | UTF-8 | 5,147 | 3.0625 | 3 | [] | no_license | import importlib
# these should be encrypted
TWITTER_CONSUMER_KEY = 'DeH9TfrfeV7UeRgK3OSGA'
TWITTER_CONSUMER_SECRET = 'sZGBB28VZcrRfcZvexYydj2Pc2uWW307kP8l7T7yiQo'
TWITTER_OAUTH_TOKEN = '2334856880-zYwvSu8kS7cGfH67lQ64vulTUbY7zxhc39bpnlG'
TWITTER_OAUTH_TOKEN_SECRET = 'RTQ7pzSytCIPsASCkA0Z5rubpHSWbvjvYR3c3hb9QhC3M'
CMDs = {
'twython': {
# key = Twitter API name
# value = Twython method name (when null Twitter API name = Twython method name)
'search': None,
'verify_credentials': None,
'user_timeline': 'get_user_timeline', # this function is named differently in Twython
'update_status': None,
'module': None
},
# tweepy is not installed just kept for the example
'tweepy': dict.fromkeys((
'search',
'verify_credentials',
'user_timeline',
'update_status',
'module'
)),
}
APIs = set(CMDs)
# remove unavailable APIs
remove = set()
for api in APIs:
try:
# __import__(api) # this also works but is meant for use by the Python interpreter
CMDs[api]['module'] = importlib.import_module(api) # store the imported package
except ImportError:
remove.add(api)
# can also be done with -> APIs.remove(api)
# but it's a good example for -> difference_update
APIs.difference_update(remove) # removes from set "APIs" the values in the set "remove"
if not APIs:
raise NotImplementedError(
'No Twitter API found; install one & add to CMDs!')
# at this point APIs has only the names of the modules that exists
# this is a wrapper for calling the real twitter API
class Twitter():
def __init__(self, api):
self.api = api
if api == 'twython':
self.twitter = CMDs[api]['module'].Twython(
TWITTER_CONSUMER_KEY,
TWITTER_CONSUMER_SECRET,
TWITTER_OAUTH_TOKEN,
TWITTER_OAUTH_TOKEN_SECRET)
elif api == 'tweepy':
auth = CMDs[api]['module'].OAuthHandler(TWITTER_CONSUMER_KEY, consumer_secret)
auth.set_access_token(TWITTER_OAUTH_TOKEN, TWITTER_OAUTH_TOKEN_SECRET)
self.twitter = CMDs[api]['module'].API(auth)
def _get_method(self, cmd):
method_name = CMDs[self.api][cmd] # the value
if not method_name:
# if value = None
method_name = cmd # the key (it means Twitter API name = Twython method name)
return getattr(self.twitter, method_name)
def search(self, str_to_search):
if self.api == 'twython':
results = self._get_method('search')(count=3, q=str_to_search)['statuses']
data = ((tweet['user']['screen_name'], tweet['created_at'], tweet['text']) for tweet in results)
return list(data)
elif self.api == 'tweepy':
return (ResultsWrapper(tweet)
for tweet in self._get_method('search')(q=str_to_search))
else:
return None
def verify_credentials(self):
f = self._get_method('verify_credentials')
return ResultWrapper(f())
def user_timeline(self):
f = self._get_method('user_timeline')
# return ResultWrapper(f())
return f()
def update_status(self, tweet):
f = self._get_method('update_status')
return ResultWrapper(f(status=tweet.encode('utf-8'))) # encode for Python v3.x
class ResultWrapper():
# if we do not know if obj is a class or a dictionary
# then with this class we can use it always as a class
# if obj is a class then the call foo.bar works
# if obj is a dictionary then the foo.bar also works even if foo['bar'] is the correct
def __init__(self, obj):
self.obj = obj
def __str__(self):
return str(self.obj)
def __repr__(self):
return repr(self.obj)
def __getattr__(self, attr):
if hasattr(self.obj, attr):
return getattr(self.obj, attr)
elif hasattr(self.obj, '__contains__') and attr in self.obj:
return self.obj[attr]
else:
raise AttributeError(
'%r has no attribute %r' % (self.obj, attr))
__getitem__ = __getattr__
if __name__ == '__main__':
for api in APIs:
twitter = Twitter(api)
print('*** %s ***' % api)
print('SEARCH')
results = twitter.search('python')
for item in results:
print('user: ', item[0])
print('created at: ', item[1])
print('test: ', item[2])
print()
print('VERIFY CREDENTIALS')
result = twitter.verify_credentials()
print('user: ', result.screen_name)
print('created at: ', result.status['created_at'])
print('status: ', result.status['text'])
print()
print('USER TIMELINE')
results = twitter.user_timeline()
for item in results:
print('created at: ', item['created_at'])
print('status: ', item['text'])
print()
print('UPDATE STATUS')
new_post = 'and yet another tweet using {0}'.format(api)
result = twitter.update_status(new_post)
print('created at: ', result.created_at)
print('current status: ', new_post)
| true |
e0b8fe07917e0d1ff99059bf27c7aa98ac1fe1c4 | Python | herzenuni/design-patterns | /creational/AbstractFactoryByPeter/abstract_factory.py | UTF-8 | 570 | 3.046875 | 3 | [] | no_license | #from abc import ABC, abstractmethod
# класс абстрактной фабрики, создает технику и совместимые пули/снаряды
class AbstractFactory:
# создание техники
def create_unit(self):
pass
# создание пули/снаряда
def create_bullet(self):
pass
# класс абстрактной пули/снаряда
class AbstractBullet:
pass
# класс абстрактной техники
class AbstractUnit:
def shoot(self, bullet):
pass | true |
c38509790600941ae196c8c9f74a3b1c29c27d8a | Python | amitt001/pygmy | /pygmy/tests/test_click_stats.py | UTF-8 | 1,099 | 2.5625 | 3 | [
"MIT"
] | permissive | import os
import tempfile
import unittest
from pygmy.app.link import shorten, link_stats
from pygmy.core.initialize import initialize_test
from pygmy.config import config
class URLClickStatsTest(unittest.TestCase):
"""Test for clickmeta i.e. click stats"""
DBPath = None
@classmethod
def setup_class(cls):
currdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
config_path = currdir + '/config/pygmy_test.cfg'
db_path = tempfile.NamedTemporaryFile(suffix='.db').name
cls.DBPath = "sqlite:///{}".format(db_path)
initialize_test(config_path, db_url=cls.DBPath)
cls.long_url = 'https://example.com'
def test_config(self):
assert config is not None
assert config.db is not None
assert self.DBPath is not None
def test_clickmeta(self):
data = shorten(self.long_url)
self.assertTrue(isinstance(data, dict) is True)
self.assertIsNone(link_stats(data['short_code'] + 'abc+'))
stats = link_stats(data['short_code'] + '+')
self.assertIsNotNone(stats)
| true |
d13cb66f0f8e1afd57359ae14a96f7a231ca083d | Python | sincerefly/getEastmoneyReport | /sina_guping/statistics-6/sta-3.py | UTF-8 | 2,697 | 2.6875 | 3 | [] | no_license | #!/bin/env python
#encoding:utf-8
from pymongo import MongoClient
import sys
import datetime
# Settings
mongopath = "localhost" # 数据库地址
startDate = "20150104" # 检索数据开始日期
endDate = "20150529" # 检索数据结束日期
#endDate = "20150227" # 检索数据结束日期(三个月预留)
nowDate = datetime.datetime.now().strftime("%Y%m%d") # 当前日期
startDate = (datetime.datetime.now() + datetime.timedelta(days=-720)).strftime("%Y%m%d")
endDate = (datetime.datetime.now() + datetime.timedelta(days=-180)).strftime("%Y%m%d")
# Functions
def isNotWorkDay():
today = datetime.datetime.now().strftime("%w")
if today in [6, 0]: # 如果周六周日则退出脚本
exit(0)
print today
def clientMongo():
client = MongoClient(mongopath, 27017)
db = client.guping
return db if db else False
def getArticleInfo(db):
return db.sina_company_ls.find({})
def startSta(art_list, db):
# 作者排序
print "移除作者数据"
db.sina_author_fs.remove({})
i = 0
author_dict = {}
for art in art_list:
company = art["company"].encode("utf-8")
author_list = art["author"]
#print author_list
for au in author_list:
au = au.encode("utf-8")
grow = art["grow"]
if author_dict.has_key(au):
author_dict[au]["count"] +=1
author_dict[au]["grow"].append(grow)
else:
author_dict[au] = {}
author_dict[au]["count"] = 1
author_dict[au]["grow"] = []
author_dict[au]["grow"].append(grow)
author_dict[au]["company"] = company
#print author_dict
for key in author_dict:
count = author_dict[key]["count"]
grow_list = author_dict[key]["grow"]
avgUp = round(sum(grow_list) / len(grow_list), 4)
company = author_dict[key]["company"]
print key + "\t" + str(count) + "\t" + str(avgUp) + "\t" + company
author = key
d = {
"author": key,
"count": count,
"avgUp": avgUp,
"company": company
}
#db.dfcf_author_f_test.insert(d)
db.sina_author_fs.update({'author':author}, {'$set':d}, upsert = True)
return 0
# main function
if __name__ == "__main__":
if isNotWorkDay():
exit(0)
db = clientMongo()
if db:
print "Client Mongo Success"
else:
print "Client Mongo failed"
exit(0)
article_list = getArticleInfo(db)
# 获取日期区间内股票涨幅情况
startSta(article_list, db)
sys.exit(0)
| true |
2e0be9e4b549ae2a93925cb13e6d4268103f1c93 | Python | syj2908/python_crash_course | /little_things/die.py | UTF-8 | 234 | 3.734375 | 4 | [] | no_license | from random import randint
class Die():
def __init__(self,sides=6):
self.sides = sides
def roll_die(self):
print(str(randint(1, self.sides)))
die = Die(20)
for x in list(range(90)):
die.roll_die()
| true |
c1e7e3482520afe4ac3a792636b181333aedfa7b | Python | smlgit/cloud-backup | /sync_drives/sync.py | UTF-8 | 9,760 | 2.671875 | 3 | [] | no_license | import os
import logging
import itertools
from pathlib import Path
import datetime
from common.tree_utils import StoreTree
import providers.provider_list as provider_list
logger = logging.getLogger(__name__)
def _files_dt_out_of_sync(local_mtime, server_mtime):
"""
:param local_mtime: local time modified datetime.
:param server_mtime: server time modified datetime.
:return:
"""
# Greater than one second constitutes out of sync.
# Can't use exact equal because some providers can only
# store to ms resolution.
if (local_mtime - server_mtime) / datetime.timedelta(milliseconds=1) > 1000:
return True
return False
def required_config_is_present(provider_name, config_dir_path, account_name):
return provider_list.get_drive_class(provider_name).required_config_is_present(
config_dir_path, account_name
)
def download_store(server_root_path, provider_name, local_dest_path,
server_user_id, path_to_config_dir, config_pw):
cloud_drive = provider_list.get_drive_class(provider_name)(
server_user_id, path_to_config_dir, config_pw)
# Build remote tree
for res in cloud_drive.get_root_file_tree(root_folder_path=server_root_path):
server_tree = res
# Step through items and download to local
for item in server_tree.get_items_with_parent_path():
item_dir_path = os.path.join(local_dest_path, item['parent_path'])
if item['is_folder'] is True:
os.makedirs(os.path.join(item_dir_path, item['name']), exist_ok=True)
else:
# Download the file from the server
cloud_drive.download_file_by_id(item['id'], item_dir_path,
output_filename=item['name'])
logger.info('Downloaded file {} to {}'.format(
item['name'], item_dir_path
))
yield None
def sync_drives(path_to_local_root, path_to_config_dir,
provider_dict, config_pw, analyse_only=False):
"""
Will check every folder and file in path_to_local_root and, for every
provider in providers_list, upload files that have been modified since
the last upload and delete any files or folders that are no longer on
the local root.
:param path_to_local_root:
:param path_to_config_dir: Directory that stores the config files for
the providers.
:param provider_dict: A {'provider_name': , 'user_id' , 'server_root_path': ,} dict.
provider_name can be 'google', ... user_id is used to find the appropriate
config file in path_to_config_dir - each provide can have its own config
file format and info. server_root_path is the path on the cloud drive to
the store root folder (relative to the drive root).
:param config_pw: Password used to encrypt the config files.
:return: Nothing.
"""
if os.path.exists(path_to_local_root) is False:
raise FileNotFoundError('Local store root {} does not exist.'.format(path_to_local_root))
logging.info('Starting sync to {} drive for account {} and store {}'.format(
provider_dict['provider_name'],
provider_dict['user_id'],
provider_dict['server_root_path']
))
provider_class = provider_list.get_drive_class(provider_dict['provider_name'])
cloud_drive = provider_class(
provider_dict['user_id'], path_to_config_dir, config_pw)
# Build remote tree
for res in cloud_drive.get_root_file_tree(root_folder_path=provider_dict['server_root_path']):
server_tree = res
# We'll build a list of dicts that specify required changes so we can report progess:
# {'name': , 'type': <dir or file>,
# 'op': <'c', 'u', 'd'>,
# 'id': ,
# 'parent_path': ,
# 'local_path': ,
# 'mtime': }
operations = []
# Now cycle through the local store root and do the following:
# 1. for each folder, check the local contents are present on the server and
# if not, or if the file modified date is older on the server, upload to the server.
# 2. for each folder, delete any folders or folders that are on the server but not
# on the local.
#
# NOTE: This assumes pathlib.Path.glob('**') returns parent directories before their children.
local_root = Path(path_to_local_root)
# This chaining will produce all items in the local root (recursive) AND the local root itself.
# It is important we have the local root too for checking deleted items on
# the local.
for item in itertools.chain([local_root], local_root.glob('**/*')):
relative_path = item.relative_to(local_root)
if str(relative_path) != '.':
parent_relative_path = item.parent.relative_to(local_root)
server_item =\
server_tree.find_item_by_path(str(relative_path), is_path_to_file=item.is_file())
local_modified_time = datetime.datetime.fromtimestamp(
item.stat().st_mtime, tz=datetime.timezone.utc)
if server_item is None:
# Not on server, add it
operation = {'name': item.name, 'op': 'c',
'parent_path': str(parent_relative_path),
'mtime': local_modified_time}
if item.is_dir() is True:
operation['type'] = 'dir'
elif item.is_file() is True:
operation['type'] = 'file'
operation['local_path'] = str(item)
operations.append(operation)
elif item.is_file():
# Is on the server. If a file, check date for update
server_item = server_tree.find_item_by_path(str(relative_path), is_path_to_file=True)
hash_different = provider_class.files_differ_on_hash(str(item), server_item['file_hash'])
if (hash_different is True or
(hash_different is None and
_files_dt_out_of_sync(local_modified_time, server_item['modified']))):
operations.append({
'id': server_item['id'],
'type': 'file',
'name': item.name,
'op': 'u',
'local_path': str(item),
'mtime': local_modified_time})
# For each folder on the local store (starting from the root itself),
# check if there are any files or folders on the server tree that don't
# exist on the local (this works because both locations are guaranteed
# to have the root directory).
if item.is_dir():
server_folder = server_tree.find_item_by_path(str(relative_path),
is_path_to_file=False)
if server_folder is not None:
for server_child in (server_folder['folders'] + server_folder['files']):
exists_on_local = False
for local_child in item.iterdir():
if (local_child.name == server_child['name'] and
((local_child.is_dir() and StoreTree.item_is_folder(server_child)) or
(local_child.is_file() and not StoreTree.item_is_folder(server_child)))):
exists_on_local = True
break
if exists_on_local is False:
# Can it on the server
operations.append({'id': server_child['id'], 'op': 'd',
'name': server_child['name']})
yield None
# Now apply the changes
logger.info('Will carry out {} operations for sync...'.format(len(operations)))
for i in range(0, len(operations)):
operation = operations[i]
if operation['op'] == 'c':
logger.info('{} {} {} in {} (operation {}/{})'.format(
'Would create' if analyse_only is True else 'Creating',
operation['type'], operation['name'],
operation['parent_path'], i + 1, len(operations)))
if analyse_only is False:
parent_id = server_tree.find_item_by_path(
operation['parent_path'], is_path_to_file=False)['id']
if operation['type'] == 'dir':
new_id = cloud_drive.create_folder(parent_id, operation['name'])
server_tree.add_folder(new_id, name=operation['name'], parent_id=parent_id)
else:
cloud_drive.create_file(parent_id, operation['name'],
operation['mtime'], operation['local_path'])
elif operation['op'] == 'u':
logger.info('{} file {} with id {} (operation {}/{})'.format(
'Would upload' if analyse_only is True else 'Uploading',
operation['name'], operation['id'],
i + 1, len(operations)
))
if analyse_only is False:
cloud_drive.update_file(operation['id'], operation['mtime'],
operation['local_path'])
elif operation['op'] == 'd':
logger.info('{} file {} with id {} (operation {}/{})'.format(
'Would delete' if analyse_only is True else 'Deleting',
operation['name'], operation['id'],
i + 1, len(operations)
))
if analyse_only is False:
cloud_drive.delete_item_by_id(operation['id'])
server_tree.remove_item(operation['id'])
yield None
| true |
d084a4a40c4659a5eb7d50852acaeaa6831c3309 | Python | RuurdBijlsma/KnightSpider-MainFrame | /point.py | UTF-8 | 2,129 | 3.484375 | 3 | [] | no_license | import utils
class Point3D(object):
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
def negate_x(self):
return Point3D(
-self.x,
self.y,
self.z
)
def negate_y(self):
return Point3D(
self.x,
-self.y,
self.z
)
def negate_z(self):
return Point3D(
self.x,
self.y,
-self.z
)
def multiply_z(self, multiplier):
return Point3D(
self.x,
self.y,
self.z * multiplier
)
def negate(self):
return Point3D(
-self.x,
-self.y,
-self.z
)
@staticmethod
def from_string(str):
arr = str.split(',')
return Point3D(arr[0], arr[1], arr[2])
def rotate_around_y(self, rotate_origin=(0, 0), angle=0):
x, z = utils.rotate(rotate_origin, (self.x, self.z), angle)
return Point3D(
x,
self.y,
z
)
def rotate_around_z(self, rotate_origin=(0, 0), angle=0):
y, x = utils.rotate(rotate_origin, (self.y, self.x), angle)
return Point3D(
x,
y,
self.z
)
def rotate_around_x(self, rotate_origin=(0, 0), angle=0):
z, y = utils.rotate(rotate_origin, (self.z, self.y), angle)
return Point3D(
self.x,
y,
z
)
def __add__(self, other):
return Point3D(
self.x + other.x,
self.y + other.y,
self.z + other.z
)
def __str__(self):
return "{0},{1},{2}".format(self.x, self.y, self.z)
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.z == other.z
def __hash__(self):
return hash(str(self))
def round(self, rounder=0):
return Point3D(
round(self.x, rounder),
round(self.y, rounder),
round(self.z, rounder)
)
OUT_OF_REACH = Point3D(999, 0, 0) | true |
4efdd548fb24facb12ee64fe255fa86c92a70cf2 | Python | MarcosFelipeBC/lfa | /AFN e AFD - Trabalho 2/AFN/afnProcessScreen.py | UTF-8 | 4,048 | 2.78125 | 3 | [] | no_license | import tkinter as tk
from tkinter import messagebox
from PIL import Image, ImageTk
from svglib.svglib import svg2rlg
from reportlab.graphics import renderPM
class AFNProcessScreen:
def __init__(self, automata):
self.getPng("./resources/AFN.dot.svg")
self.automata = automata
self.stack = [(automata.initialState, 0)]
self.root = tk.Tk()
self.initialWindow()
def initialWindow(self):
self.cleanScreen()
self.root.configure(bg='white')
img = Image.open('./resources/AFN.png')
pimg = ImageTk.PhotoImage(img)
size = img.size
canvas = tk.Canvas(self.root, width=size[0], height=size[1], bg='white')
canvas.pack()
canvas.create_image(0, 0, anchor='nw', image=pimg)
self.root.geometry("1080x720")
tk.Label(self.root, text="Digite a cadeia a ser consumida", bg='white', font="Verdana 16 bold", pady=10).place(x=2, y=20)
chainEntry = self.chainEntry()
self.startButton(chainEntry)
self.root.mainloop()
def processingWindow(self, chain):
if len(self.stack) == 0:
self.endWithFail()
return
state, position = self.stack.pop()
if position == len(chain):
if state in self.automata.acceptingStates:
self.endWithSuccess(state)
else:
self.processingWindow(chain)
if position != len(chain):
symbol = chain[position]
if (state, symbol) in self.automata.transitions:
for nextState in self.automata.transitions[(state, symbol)]:
self.stack.append((nextState, position+1))
self.cleanScreen()
self.root.configure(bg='white')
img = Image.open('./resources/AFN.png')
pimg = ImageTk.PhotoImage(img)
size = img.size
canvas = tk.Canvas(self.root, width=size[0], height=size[1], bg='white')
canvas.pack()
canvas.create_image(0, 0, anchor='nw', image=pimg)
label = tk.Label(self.root, text=f"ESTADO ATUAL: \"{state}\" | Posição: {position} | Símbolo: {chain[position]}", bg='white', font="Verdana 12 bold")
label.pack(side = tk.LEFT)
self.root.geometry("1080x720")
self.nextButton(chain)
self.root.mainloop()
def chainEntry(self):
chainEntry = tk.StringVar()
chainEntry = tk.Entry(self.root, width=30, textvariable=chainEntry)
chainEntry.place(x=5, y=90)
return chainEntry
def nextButton(self, chain):
button = tk.Button(self.root, text="Next", command=lambda:self.processingWindow(chain), width=7, height=2)
button.place(x=10, y=200)
def startButton(self, chainEntry):
button = tk.Button(self.root, text="Start", command=lambda:self.startButtonAction(chainEntry), width=7, height=2)
button.place(x=10, y=200)
def startButtonAction(self, chainEntry):
chain = chainEntry.get()
if self.automata.validateChain(chain) == False:
messagebox.showinfo("Cadeia inválida", "A cadeia possui símbolos inválidos")
chainEntry.delete(0, 'end')
else:
self.processingWindow(chain)
def endWithSuccess(self, state):
messagebox.showinfo("Resultado:", "Estado final: " + state + "\nCADEIA ACEITA!")
self.stack = [(self.automata.initialState, 0)]
self.initialWindow()
def endWithFail(self):
messagebox.showinfo("Resultado:", "CADEIA REJEITADA!")
self.stack = [(self.automata.initialState, 0)]
self.initialWindow()
def cleanScreen(self):
_list = self.root.winfo_children()
for item in _list:
if item.winfo_children() :
_list.extend(item.winfo_children())
for item in _list:
item.pack_forget()
def getPng(self, svg_file):
drawing = svg2rlg(svg_file)
renderPM.drawToFile(drawing, "./resources/AFN.png", fmt="PNG") | true |
13788acde2c6a9ca0eb1dd84889eeaf56c731c1f | Python | Najq/bigdata-cmpt733 | /Assignments/A2/similarity_join.py | UTF-8 | 4,646 | 3 | 3 | [] | no_license |
#author- najeeb qazi
import re
import pandas as pd
import math
class SimilarityJoin:
def __init__(self, data_file1, data_file2):
self.df1 = pd.read_csv(data_file1)
self.df2 = pd.read_csv(data_file2)
def removeEmptystr(self,row):
emptyList= []
for val in row:
if val != "":
emptyList.append(val)
return emptyList
def preprocess_df(self, df, cols):
df = df.fillna('')
df['joinkey'] = df[cols].apply(lambda row : " ".join(row.values.astype(str)), axis = 1 )
df['joinkey'] = df['joinkey'].apply(lambda row : re.split(r'\W+', str(row).lower()))
#remove empty strings from Dataframe
df['joinkey'] = df['joinkey'].apply(self.removeEmptystr)
return df
#splits listed columns into single rows
def splitListToRows(self,row,row_accumulator,target_column,new_column):
#create a new record with all the other values duplicated and one value from the joinkey column
split_row = row[target_column]
for s in split_row:
new_row = row.to_dict()
new_row[new_column] = s
row_accumulator.append(new_row)
def filtering(self, df1, df2):
new_rows = []
df1.apply(self.splitListToRows,axis=1,args = (new_rows,"joinkey","flatten"))
new_df1 = pd.DataFrame(new_rows)
new_rows = []
df2.apply(self.splitListToRows,axis=1,args = (new_rows,"joinkey","flatten"))
new_df2 = pd.DataFrame(new_rows)
df_inner = pd.merge(new_df1, new_df2, on='flatten', how='left')
df_inner = df_inner.drop(['flatten'],axis =1)
df_inner.drop_duplicates(subset=["id_x","id_y"], keep = "first", inplace = True)
df_inner = df_inner[['id_x','joinkey_x','id_y','joinkey_y']]
df_inner = df_inner.rename(columns={"id_x": "id1", "joinkey_x": "joinKey1", "id_y":"id2","joinkey_y":"joinKey2"})
df_inner = df_inner.dropna()
return df_inner
#returns the jaccard similarity
def computesimilarity(self,row):
new_dict={}
for item in row:
if(item):
if item not in new_dict:
new_dict[item] = 1
else:
new_dict[item] += 1
rUs = len(new_dict.keys())
return rUs
#returns the intersection
def commonElements(self,row):
return len(set(row['joinKey1']) & set(row['joinKey2']))
def verification(self, cand_df, threshold):
#concate the joinkey columns
cand_df['concatedCol'] = cand_df['joinKey1'] + cand_df['joinKey2']
#calculate the unique elements in the concatenated col to get r U s
cand_df['rUs'] = cand_df['concatedCol'].apply(self.computesimilarity)
#find the intersection of r and s
cand_df['common'] = cand_df.apply(self.commonElements,axis=1)
cand_df['jaccard'] = cand_df['common']/cand_df['rUs']
cand_df = cand_df[cand_df['jaccard'] >= threshold ]
cand_df = cand_df.drop(['rUs','common','concatedCol'],axis =1)
return cand_df
def evaluate(self, result, ground_truth):
R = len(result)
T= 0
for item in result:
if item in ground_truth:
T += 1
precision = T/R
A = len(ground_truth)
recall = T/A
fmeasure = (2*precision*recall)/(precision + recall)
return (precision, recall, fmeasure)
def jaccard_join(self, cols1, cols2, threshold):
new_df1 = self.preprocess_df(self.df1, cols1)
new_df2 = self.preprocess_df(self.df2, cols2)
print ("Before filtering: %d pairs in total" %(self.df1.shape[0] *self.df2.shape[0]))
cand_df = self.filtering(new_df1, new_df2)
print ("After Filtering: %d pairs left" %(cand_df.shape[0]))
result_df = self.verification(cand_df, threshold)
print ("After Verification: %d similar pairs" %(result_df.shape[0]))
return result_df
if __name__ == "__main__":
er = SimilarityJoin("Amazon_sample.csv", "Google_sample.csv")
amazon_cols = ["title", "manufacturer"]
google_cols = ["name", "manufacturer"]
result_df = er.jaccard_join(amazon_cols, google_cols, 0.5)
result = result_df[['id1', 'id2']].values.tolist()
ground_truth = pd.read_csv("Amazon_Google_perfectMapping_sample.csv").values.tolist()
print ("(precision, recall, fmeasure) = ", er.evaluate(result, ground_truth))
| true |
51d8628167800f0a7970e02a463879832a4021ae | Python | udonmoral/- | /python20200707.py | UTF-8 | 387 | 2.984375 | 3 | [] | no_license | from sklearn.linear_model import LinearRegression
X = [[10.0],[8.0],[13.0],[9.0],[11.0],[14.0],[6.0],[4.0],[12.0],[7.0],[5.0]]
y = [8.04,6.95,7.58,8.81,8.33,9.96,7.24,4.26,10.84,4.82,5.68]
model = LinearRegression()
model.fit(X,y)
print(model.intercept_) # 切片
print(model.coef_) # 傾き
y_pred = model.predict([[0],[1]])
print(y_pred) # x=0,x=1に対する予測結果 | true |
902b3444703f74fddf07e0bbef4ffb3b3c1ec19a | Python | ktsstudio/hackaton-s7-bot | /tools/utils.py | UTF-8 | 1,592 | 2.96875 | 3 | [] | no_license | import re
from datetime import datetime
from dateutil.relativedelta import relativedelta
from tornkts.utils import to_int
MONTH_TITLES = {
'1': 'январь', '2': 'февраль',
'3': 'март', '4': 'апрель', '5': 'май',
'6': 'июнь', '7': 'июль', '8': 'август',
'9': 'сентябрь', '10': 'октябрь', '11': 'ноябрь',
'12': 'декабрь',
}
MONTH_TITLES_GENITIVE = {
'1': 'января', '2': 'февраля',
'3': 'марта', '4': 'апреля', '5': 'мая',
'6': 'июня', '7': 'июля', '8': 'августа',
'9': 'сентября', '10': 'октября', '11': 'ноября',
'12': 'декабря',
}
def month_titles(genitive=False):
result = MONTH_TITLES.values()
if genitive:
result += MONTH_TITLES_GENITIVE.values()
return result
def month_number(name):
months = dict()
months.update(dict((v, k) for k, v in MONTH_TITLES.items()))
months.update(dict((v, k) for k, v in MONTH_TITLES_GENITIVE.items()))
return to_int(months.get(name))
def month_title(num, genitive=False):
titles = MONTH_TITLES
if genitive:
titles = MONTH_TITLES_GENITIVE
return titles.get(str(to_int(num)))
def readable_date(date):
now = datetime.now()
if now.year == date.year:
return '{0} {1}'.format(date.day, month_title(date.month, True))
else:
return date.strftime('%d.%m.%Y')
def add_eleven_month(date=None):
if date is None:
date = datetime.now().date()
return date + relativedelta(months=11) | true |
0ec169b4f8560401dff12dbeb74983b2363ba9ba | Python | tesaho/vehicle_tracking | /utils/image_box_plotter.py | UTF-8 | 2,587 | 2.640625 | 3 | [
"MIT"
] | permissive | from __future__ import division
from utils.utils import *
from utils.datasets import *
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
def plot_box_images(imgs, img_detections, classes, savePath, img_size=416):
"""
imgs: list of image_paths
img_detections: list of outputs from non_max_suppression
classes: class labels extracted from class.names file
savePath: path to save images
img_size: size of new image
"""
# Bounding-box colors
cmap = plt.get_cmap("tab20b")
colors = [cmap(i) for i in np.linspace(0, 1, 20)]
print("\nSaving images:")
# Iterate through images and save plot of detections
for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):
print("(%d) Image: '%s'" % (img_i, path))
# Create plot
img = np.array(Image.open(path))
plt.figure()
fig, ax = plt.subplots(1)
ax.imshow(img)
# Draw bounding boxes and labels of detections
if detections is not None:
# Rescale boxes to original image
detections = rescale_boxes(detections, img_size, img.shape[:2])
unique_labels = detections[:, -1].cpu().unique()
n_cls_preds = len(unique_labels)
bbox_colors = random.sample(colors, n_cls_preds)
for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
print("\t+ Label: %s, Conf: %.5f" % (classes[int(cls_pred)], cls_conf.item()))
box_w = x2 - x1
box_h = y2 - y1
color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
# Create a Rectangle patch
bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor="none")
# Add the bbox to the plot
ax.add_patch(bbox)
# Add label
plt.text(
x1,
y1,
s=classes[int(cls_pred)],
color="green",
verticalalignment="top",
bbox={"color": color, "pad": 0},
)
# Save generated image with detections
plt.axis("off")
plt.gca().xaxis.set_major_locator(NullLocator())
plt.gca().yaxis.set_major_locator(NullLocator())
filename = path.split("/")[-1].split(".")[0]
plt.savefig("%s/%s.png" %(savePath, filename), bbox_inches="tight", pad_inches=0.0)
plt.close() | true |
736af7c49c8ca2a5f402ad456ce56b9952f07734 | Python | MarcoSapio/cryptography-03lpyov-exercises | /AY2021/py-basics/symmetric/7.stream_json.py | UTF-8 | 1,278 | 3.015625 | 3 | [] | no_license | import base64
import json
from Crypto.Cipher import ChaCha20
from Crypto.Random import get_random_bytes
plaintext = b'This is the secret message to encrypt'
key = get_random_bytes(32)
nonce = get_random_bytes(8)
cipher = ChaCha20.new(key=key,nonce=nonce)
ciphertext = cipher.encrypt(plaintext)
nonceb64 = base64.b64encode(cipher.nonce).decode('utf-8')
ciphertextb64 = base64.b64encode(ciphertext).decode('utf-8')
result = json.dumps({'nonce':nonceb64, 'ciphertext':ciphertextb64})
print(result)
#unpack and decipher
b64 = json.loads(result)
ciphertext2 = base64.b64decode(b64['ciphertext'])
nonce2 = base64.b64decode(b64['nonce'])
print(nonce2)
print(nonce)
cipher_dec = ChaCha20.new(key=key,nonce=nonce2)
plaintext_dec = cipher_dec.decrypt(ciphertext2)
# smarter use of JSON objects even more useful when more data are saved
# json_k = [ 'nonce', 'ciphertext']
# json_v = [ base64.b64encode(x).decode('utf-8') for x in (cipher.nonce, ciphertext) ]
# result2 = json.dumps(dict(zip(json_k, json_v)))
# print(result2)
#
# b64 = json.loads(result2)
# json_k = [ 'nonce', 'ciphertext']
# jv = {k:base64.b64decode(b64[k]) for k in json_k}
#
# cipher_dec = ChaCha20.new(key=key,nonce=jv['nonce'])
# plaintext_dec = cipher_dec.decrypt(jv['ciphertext'])
#
print(plaintext_dec)
| true |
8c84509621bda772167c466429e60831e55e5a8a | Python | coutureai/RaWorkflowOrchestrator | /airflow/_vendor/connexion/decorators/produces.py | UTF-8 | 1,261 | 2.640625 | 3 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | # Decorators to change the return type of endpoints
import functools
import logging
from .decorator import BaseDecorator
logger = logging.getLogger('connexion.decorators.produces')
# special marker object to return empty content for any status code
# e.g. in app method do "return NoContent, 201"
NoContent = object()
class BaseSerializer(BaseDecorator):
def __init__(self, mimetype='text/plain'):
"""
:type mimetype: str
"""
self.mimetype = mimetype
def __repr__(self):
"""
:rtype: str
"""
return '<BaseSerializer: {}>'.format(self.mimetype) # pragma: no cover
class Produces(BaseSerializer):
def __call__(self, function):
"""
:type function: types.FunctionType
:rtype: types.FunctionType
"""
@functools.wraps(function)
def wrapper(request):
url = request.url
response = function(request)
logger.debug('Returning %s', url,
extra={'url': url, 'mimetype': self.mimetype})
return response
return wrapper
def __repr__(self):
"""
:rtype: str
"""
return '<Produces: {}>'.format(self.mimetype) # pragma: no cover
| true |
95ad06658c990bfb5374474cf5d77ef06e58d628 | Python | philip-shen/note_python | /Audio_File_Format/Pyqt_Qsound.py | UTF-8 | 2,085 | 2.6875 | 3 | [
"MIT"
] | permissive | """
date: 2020/12/20
author: @_kurene
"""
import os
import sys
from PyQt5.QtWidgets import QWidget, QPushButton, QGridLayout, QFileDialog
from PyQt5.QtWidgets import QLabel, QApplication
from PyQt5.QtMultimedia import QSound
class PyAudioPylerGUI(QWidget):
def __init__(self):
super().__init__()
self.qsound = None
self.init_ui()
self.show()
def init_ui(self):
self.setGeometry(100, 100, 250, 250)
grid = QGridLayout()
self.setWindowTitle('PyQt AudioPlayer with QSound')
#grid.setSpacing(10)
button_play = QPushButton("Play")
button_stop = QPushButton("Stop")
button_exit = QPushButton("Exit")
button_dialog = QPushButton("Open audio-file")
self.label = QLabel(self)
grid.addWidget(button_dialog, 0, 0, 1, 2)
grid.addWidget(button_play, 1, 0)
grid.addWidget(button_stop, 1, 1)
grid.addWidget(button_exit, 2, 0, 1, 2)
grid.addWidget(self.label, 3, 0, 1, 2)
button_dialog.clicked.connect(self.button_openfile)
button_play.clicked.connect(self.button_play)
button_stop.clicked.connect(self.button_stop)
button_exit.clicked.connect(self.button_exit)
self.setLayout(grid)
def button_exit(self):
QApplication.quit()
def button_play(self):
if self.qsound is not None:
self.qsound.play()
def button_stop(self):
if self.qsound is not None:
self.qsound.stop()
def button_openfile(self):
filepath, _ = QFileDialog.getOpenFileName(self, 'Open file','c:\\',"Audio files (*.wav)")
filepath = os.path.abspath(filepath)
self.qsound = QSound(filepath)
self.filename = os.path.basename(filepath)
self.label.setText(self.filename)
self.label.adjustSize()
if __name__ == '__main__':
app = QApplication(sys.argv)
w = PyAudioPylerGUI()
app.exit(app.exec_()) | true |
c2828fa41a1fcc7f012c00f4abc9b5accf0a7960 | Python | salihozkul/NetworkProgrammingCookBook | /chapter1/1_3_find_service_name.py | UTF-8 | 435 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
import socket
def find_service_name():
protocolname = 'tcp'
for port in range(1,65535):
try:
print "Port: %s => service name: %s" % (port,socket.getservbyport(port,protocolname))
except socket.error, err_msg:
pass
print "Port: %s => service name: %s" % (53,socket.getservbyport(53,'udp'))
if __name__ == '__main__':
find_service_name() | true |
565bd48c1893b8db8800d3b5007fa302851666e3 | Python | rohans540/python-practice | /noConsOnes.py | UTF-8 | 573 | 4.125 | 4 | [] | no_license | #Given a positive integer N, count all possible distinct binary strings of length N such that there are no consecutive 1’s
#function count_Distinct_Strings():
#Input: n (Number of bits)
#Output: Number of possible binary strings without consecutive 1's
def count_Distinct_Strings(n):
a = []
b = []
a.append(1)
b.append(1)
for i in range(1, n):
a.append(a[i-1] + b[i-1])
b.append(a[i-1])
return a[n-1] + b[n-1]
if __name__ == "__main__":
n = int(input("Enter n:"))
print(count_Distinct_Strings(n)) | true |
a966c78d2e6f5ec6dacc9d9e7e8f8ac8f184f4b5 | Python | Nevada228/simulated-annealing | /src/tsp/rand.py | UTF-8 | 1,180 | 3.75 | 4 | [] | no_license | import numpy as np
def get_random_matrix(n: int = 5, m: int = 5, low=5, high=10):
"""
Функция возвращающая матрицу из случайных элементов
:param n: - количество столбцов
:param m: - количество строк
:param low: - нижняя граница случайного числа
:param high: - верхняя граница случайного числа
:return: матрица
"""
return ((high - low) * np.random.sample((n, m)) + low).astype(int)
def get_random_vector(n: int = 5, low=0.0, high=1.0):
"""
Функция возвращающая вектор из случайных элементов
:param n: - размер вектора
:param low:- нижняя граница случайного числа
:param high:- верхняя граница случайного числа
:return: вектор
"""
return ((high - low) * np.random.rand(n) + low).astype(int)
def get_symmetric_matrix(n: int = 5, low=5, high=10):
m = get_random_matrix(n, n, low=low, high=high)
return ((m + m.T) / 2).astype(int)
| true |
4043e2da599dc5e78236de12971b6c814789b183 | Python | nitely/http-lazy-headers | /http_lazy_headers/fields/vary.py | UTF-8 | 864 | 2.890625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from ..shared import bases
def vary_any():
return '*'
class Vary(bases.TokensHeaderBase):
"""
Sent by server only.
The ``Vary`` header field in a response\
describes what parts of a request message,\
aside from the method, Host header field,\
and request target, might influence the\
origin server's process for selecting and\
representing this response. The value consists\
of either a single asterisk ("*") or a list\
of header field names (case-insensitive).
Example::
Vary([
'accept-encoding',
'accept-language'
])
Vary([
vary_any()
])
Vary(['*'])
`Ref. <http://httpwg.org/specs/rfc7231.html#header.vary>`_
"""
name = 'vary'
# todo: validate is a single * value or a list of headers
| true |
ccb65bf6f660cea36fc6fce07a9fbfbf5d7fccf7 | Python | mirela99/IMR-FaceFilterApp | /db_utils.py | UTF-8 | 249 | 2.5625 | 3 | [] | no_license | from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
engine = create_engine("sqlite:////my-db.db")
if not database_exists(engine.url):
create_database(engine.url)
print(database_exists(engine.url)) | true |
6ecc4bf660178904131c1ccd742c91d49728f73f | Python | daniel-reich/ubiquitous-fiesta | /cMyMt377ReBsoTHnz_23.py | UTF-8 | 117 | 2.96875 | 3 | [] | no_license |
def dict_to_list(d):
return sorted([x for x in d.items()])
print(dict_to_list({"D": 1,"B": 2,"C": 3}))
| true |
7210ebf106b1b151f4529fad7b2c02050a2dda09 | Python | JanMelicharik/baan_python | /04_nelinearni_regresni_model_m-h_algoritmus/mexico_ces.py | UTF-8 | 8,161 | 2.59375 | 3 | [] | no_license | # Nastaveni cesty do domovske slozky
import sys
import pdb
sys.path.insert(1, "/".join(sys.path[0].split("/")[:-1]))
# importy pomocnych funkci
from support.progress_info import progress_bar
from support.gamm_rnd_koop2 import gamm_rnd_koop2
from support.norm_rnd import norm_rnd
from support.log_post_ces import log_post_ces
from support.lik_ces import lik_ces
from support.prior_ces import prior_ces
from numpy.random import multivariate_normal as mvn
from numpy.linalg import (inv, det)
from numpy.random import uniform
from numpy.random import normal
from scipy.stats.distributions import chi2
from math import (log, pi, exp)
from pandas import read_csv
from tabulate import tabulate
import warnings
import math
import numpy as np
import matplotlib.pyplot as plt
# V prubehu vypoctu mohou nastat dva typy warningu - nemaji vliv na vysledek scriptu
warnings.filterwarnings("ignore", r"overflow encountered in (power|matmul)")
data = read_csv("mexico.csv", delimiter=",")
# ===== 1. Priprava dat (do podoby indexu) =====
y = np.array([
data["gdp"] / np.mean(data["gdp"])
]).T
x = np.array([
[1]* len(y),
data["labor"] / np.mean(data["labor"]),
data["capital"] / np.mean(data["capital"]),
]).T
n = y.shape[0]
# ===== 2. Apriorni hustoty a apriorni hyperparametry =====
# p(gamma) ~ N(gamma_0, v_0)
gamma_0 = np.array(
[
[1],
[0.5],
[0.5],
[1]
]
)
k = gamma_0.size # Pocet parametru modelu
v_0 = np.diag(
[
0.5**2,
0.25**2,
0.25**2,
0.5**2
]
)
# p(h) ~ G(h_0, nu_0)
h_0 = 1 / 0.5**2
nu_0 = 5
# ===== 3. Metropolis within Gibbs - nastaveni =====
s = 50_000 + 1
s_0 = 30_000 + 1
s_1 = s - s_0
# Ukladani vzorku
gamma = np.zeros((k, s))
gamma[:,[0]] = gamma_0
h = np.array([0.0] * s)
# Nastaveni RW M-H algoritmu:
# Kandidatska hustota ~ N(gamma(s-1), sigma)
d = 0.5 # Skalovaci konstanta
# sigma = d * np.identity(k) #
sigma = d * np.array(
[
[ 0.0680, -0.0343, -0.0284, -0.0024],
[-0.0343, 0.0449, -0.0021, 0.0037],
[-0.0284, -0.0021, 0.0341, -0.0015],
[-0.0024, 0.0037, -0.0015, 0.2144],
]
)
count = 0 # Pocitadlo akceptovasnych vzorku
nu_1 = nu_0 + n # 5.23
log_A = h = np.array([0.0] * s)
# ===== 4. Metropolis within Gibbs =====
print("Metropolis within Gibbs ...")
for i in range(1,s):
# a) podminena hustota p(h|gamma,y) ~ G(h_1,nu_1)
f_x = gamma[0][i-1] * \
( gamma[1][i-1] * x[:,[1]]**gamma[3][i-1] + gamma[2][i-1] * x[:,[2]]**gamma[3][i-1] ) ** \
( 1 / gamma[3][i-1] )
h_1 = (1 / nu_1 * ((y - f_x).T @ (y - f_x) + nu_0 / h_0))**(-1)
h[i] = gamm_rnd_koop2(h_1, nu_1)
# b) podminena hustota p(gamma|h,y) -> logaritmus jadrove podminene hustoty = externi funkce log_post_ces.py
# Generovani kandidatu z kandidatske hustoty
gamma_ast = gamma[:,[i-1]] + norm_rnd(sigma)
log_accept = min(
log_post_ces(y, x, gamma_ast, h[i], gamma_0, v_0) - \
log_post_ces(y, x, gamma[:,[i-1]], h[i], gamma_0, v_0),
0
)
# Rozhodnuti o akceptaci
if log_accept > log(uniform()):
gamma[:,[i]] = gamma_ast
count += 1
else:
gamma[:,[i]] = gamma[:,[i-1]]
progress_bar(i,s)
# ===== 5. Prezentace vysledku a posteriorni analyza =====
# a) Vyhozeni prvnich s_0 vzorku
gamma = gamma[:,s_0:]
h = h[s_0:]
# b) Vypocet aposteriornich momentu
e_gamma = np.mean(gamma, axis=1)
d_gamma = np.var(gamma, axis=1)
std_gamma = np.std(gamma, axis=1)
e_h = np.mean(h)
d_h = np.var(h)
std_h = np.std(h)
# Apriorni momenty
e_gamma_0 = gamma_0
d_gamma_0 = np.diag(v_0)
std_gamma_0 = [math.sqrt(var) for var in d_gamma_0]
e_h_0 = h_0
d_h_0 = 2 * e_h_0**2 / nu_0
std_h_0 = math.sqrt(d_h_0)
headers = ["Parametr", "E_prior", "std_prior", "E_post", "std_post"]
table = []
for i in range(len(e_gamma)):
table.append([
f"gamma_{i+1}",
round(e_gamma_0[i][0], 4),
round(std_gamma_0[i], 4),
round(e_gamma[i], 4),
round(std_gamma[i], 4)])
table.append(["h", round(e_h_0, 4), round(std_h_0, 4), round(e_h, 4), round(std_h, 4)])
print("\nApriorni a aposteriorni parametry:")
print(tabulate(table, headers, tablefmt="pretty"))
# ===== 6. Vypocet marginalni verohodnosti modelu metodou Gelfanda a Deye =====
# a) Zjednodusena varianta
gd_simple = 0
print("Statistika Gelfanda a Deye (zjednodusene) ...")
for i in range(s_1):
gd_simple = gd_simple + 1 / s_1 * 1 / lik_ces(y, x, gamma[:,[i]], h[i])
progress_bar(i,s_1)
print("\nMarginalni verohodnost CES produkcni funkce (zjednodusena varianta):")
print(f"Marginal likelihood = {1/gd_simple}")
print(f"Marginal likelihood (log) = {math.log(1/gd_simple)}\n")
# b) plna varianta
# vektor 'h' je jednorozmerny - aby bylo mozne jej pridat jako radkovou matici pod
# matici gamma, je adresovan pomoci druhe dimenze (None) - viz. NumPy, Broadcasting
theta = np.r_[gamma, h[None,:]]
theta_hat = np.mean(theta, axis=1)
sigma_hat = np.cov(theta)
kk = theta_hat.size # protoze jde o vektor, lze pouzit size namisto shape
pp = 0.01 # Pro (1 - p) procentni kvantil chi-kvadrat rozdeleni
chi_pp = chi2.ppf(1 - pp, kk)
# Simulace integracni konstanty pro omezenou apriorni hustotu
count_g = 0
for i in range(s):
pom = gamma_0 + norm_rnd(v_0) # p(gamma) ~ N(gamma_0,V_0)
count_g += min(pom) > 0
fth = np.array([0.0] * s)
pri = np.array([0.0] * s)
lik = np.array([0.0] * s)
int_c = s / count_g
gd = 0 # Statistika Gelfanda a Deye
print("Statistika Gelfanda a Deye ...")
for i in range(s_1):
t_theta = (theta[:,[i]] - theta_hat[:, None]).T @ inv(sigma_hat) @ (theta[:,[i]] - theta_hat[:, None])
# Funkce hustoty vicerozmerneho rozdeleni
f_theta = 1 / (1 - pp) * 1 / (2 * pi) ** (kk / 2) * \
det(sigma_hat) ** (- 1 / 2) * exp(- 1 / 2 * t_theta) \
if t_theta <= chi_pp else 0
prior = prior_ces(gamma[:,[i]], h[i], gamma_0, v_0, h_0, nu_0)
like = lik_ces(y, x, gamma[:,[i]], h[i])
lik[i] = like
pri[i] = prior
fth[i] = f_theta
gd += f_theta / (int_c * prior * like) * 1 / s_1
progress_bar(i,s_1)
print("\nMarginalni verohodnost CES produkcni funkce (plna varianta):")
print(f"Marginal likelihood = {1/gd}")
print(f"Marginal likelihood (log) = {math.log(1/gd)}\n")
# # ===== 7. Srovnani skutecnych a modelovych momentu =====
e_y_ast = np.zeros((1, s_1)) # Vektor modelovych strednich hodnot
d_y_ast = np.zeros((1, s_1)) # Vektor modelovych rozptylu
std_y_ast = np.zeros((1, s_1)) # Vektor modelovych sm. odchylek
# # Simulace (generovani umelych dat)
for i in range(s_1):
f_xx = gamma[0][i] * \
( gamma[1][i] * x[:,[1]] ** gamma[3][i] + gamma[2][i] * x[:,[2]] ** gamma[3][i] ) ** \
( 1 / gamma[3][i] )
y_ast = f_xx + normal() * math.sqrt(1 / h[i])
e_y_ast[0, [i]] = np.mean(y_ast)
d_y_ast[0, [i]] = np.var(y_ast)
std_y_ast[0, [i]] = np.std(y_ast)
a = np.sum(e_y_ast < np.mean(y))
b = np.sum(d_y_ast < np.var(y))
c = np.sum(std_y_ast < np.std(y))
# Vypocet predikcnich (jednostrannych) p-hodnot
# a) pro stredni hodnotu
p_e = (a / s_1) * ((a / s_1) <= 0.5) + (1 - a / s_1) * ((a / s_1) > 0.5)
# b) pro rozptyl
p_d = (b / s_1) * ((b / s_1) <= 0.5) + (1 - b / s_1) * ((b / s_1) > 0.5)
# c) pro sm. odchylku
p_std = (c / s_1) * ((c / s_1) <= 0.5) + (1 - c / s_1) * ((c / s_1) > 0.5)
print("Predikcni p-hodnoty:")
print(f" p_E = {round(p_e, 4)}\n p_D = {round(p_d, 4)}\n p_std = {round(p_std, 4)}")
fix, ax = plt.subplots(ncols=3, figsize=(20,5))
ax[0].set_title("Simulovane stredni hodnoty")
ax[0].hist(e_y_ast[0], bins=30, histtype="bar", rwidth=0.9)
ax[0].axvline(np.mean(y), color="r", linewidth=2, linestyle="dashed")
ax[1].set_title("Simulovane rozptyly")
ax[1].hist(d_y_ast[0], bins=30, histtype="bar", rwidth=0.9)
ax[1].axvline(np.var(y), color="r", linewidth=2, linestyle="dashed")
ax[2].set_title("Simulovane sm. odchylky")
ax[2].hist(std_y_ast[0], bins=30, histtype="bar", rwidth=0.9)
ax[2].axvline(np.std(y), color="r", linewidth=2, linestyle="dashed")
plt.show(block=False)
pdb.set_trace()
| true |
7c24eca2c7799caf1d78227c7365446765ac0f5e | Python | nurarenke/hb_price_checker | /accounting.py | UTF-8 | 1,426 | 3.390625 | 3 | [] | no_license | MELON_COST = 1.00
def upexpected_paying_customers(payment_data_filename):
''' Checks all of ubermelon's customers to see if they overpaid or underpaid '''
payment_data = open(payment_data_filename) #open the file
# iterate over lines in file
for line in payment_data:
# for each line, split by |
order = line.split('|')
# get the full name at index 1
customer_name = order[1]
# get the first name after spliting by " "
customer_first = customer_name.split(" ")[0]
# get # of melons and payment amount
customer_melons = float(order[2])
customer_paid = float(order[3])
# calculate expected price
customer_expected = customer_melons * MELON_COST
# check if customer over or under paid
if customer_expected < customer_paid:
print "{} paid {:.2f}, expected {:.2f}".format(
customer_name, customer_paid, customer_expected)
print "{} has over paid for their melons.".format(customer_first)
elif customer_expected > customer_paid:
print "{} paid {:.2f}, expected {:.2f}".format(
customer_name, customer_paid, customer_expected)
print "{} has under paid for their melons.".format(customer_first)
# close the file
payment_data.close()
# call the function
upexpected_paying_customers("customer-orders.txt")
| true |
2a683c6540dc3f608c7cfc11e8be40f79023a122 | Python | nishchitajagadish/HackerRank | /ai/normal-distribution-2.py | UTF-8 | 171 | 2.578125 | 3 | [] | no_license | from scipy.stats import norm
mu = 20
sigma = 2
print('{:.3f}'.format(norm.cdf(19.5, mu, sigma)))
print('{:.3f}'.format(norm.cdf(22, mu, sigma) - norm.cdf(20, mu, sigma)))
| true |
9ace53fb54dcadbcd3f30768d13888d8ab241420 | Python | geeeyeon/Python | /Q파일복사.py | UTF-8 | 429 | 3.578125 | 4 | [] | no_license | ##파일 복사하기
inFp, outFp=None,None
inStr,outStr="",""
inFname,outFname="",""
inFname=input("소스 파일명을 입력하세요: ")
inFp=open(inFname,"r")
outFname=input("타깃 파일명을 입력하세요: ")
outFp=open(outFname,"w")
inList=inFp.readlines()
for inStr in inList:
outFp.writelines(inList)
inFp.close()
outFp.close()
print("--",inFname,"파일이 ",outFname,"으로 정상적으로 복사됨--")
| true |
225317167c056f6e79dbc00537f16923a4e1ae6e | Python | glmack/geo_mozambique | /contributors/lee/geo_mz.py | UTF-8 | 967 | 3.1875 | 3 | [
"BSD-3-Clause"
] | permissive | def get_opendata(event_name, filename):
import requests
import urllib
html = request_opendata_event(event_name)
data = get_opendata_links(html, filename)
return data
def request_opendata_event(event_name):
"""Build open data event parsing url call"""
import requests
import urllib
url_base = "https://www.digitalglobe.com/ecosystem/open-data/"
event=event_name
html = requests.get(url_base + event_name)
return html
def get_opendata_links(html, filename):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html.text, "html.parser")
for link in soup.find_all('a'):
href = link['href']
filename = filename
if href.endswith('.tif'):
with open(f"{filename}.txt", "a") as f:
f.write(href+"\n") # write on new line
return f
def get_dgopendata(event, filename):
html = request_opendata_event(event)
data = get_opendata_links(html)
return data | true |
d598b4b433d44df485919fad76b932b9007ba86f | Python | jura05/peano | /gates-perebor.py | UTF-8 | 1,956 | 2.515625 | 3 | [] | no_license | import json
import logging
from peano.paths import Gate, PathsGenerator
from peano.fast_fractions import FastFraction
from perebor import perebor
logging.basicConfig(level=logging.DEBUG)
#input = 'gates-322.txt.sorted'
input = 'gates-223.txt.sorted'
def get_pt(pt_str):
xjs = []
for xj in pt_str.strip('()').split(','):
if '/' in xj:
n, d = xj.split('/')
xjf = FastFraction(int(n), int(d))
else:
xjf = FastFraction(int(xj), 1)
xjs.append(xjf)
return tuple(xjs)
def get_gate(gate_str):
entr_str, exit_str = gate_str.split('->')
return Gate(entrance=get_pt(entr_str), exit=get_pt(exit_str))
gates_list = []
with open(input) as ifh:
for line in ifh:
gate_strs = json.loads(line.replace("'", '"'))
gates = [get_gate(gs) for gs in gate_strs]
gates_list.append(gates)
pattern_count = len(gates_list[0])
def is_vertex(pt):
if any(FastFraction(0, 1) < xj < FastFraction(1, 1) for xj in pt):
return False
return True
def cnt_bnd_coords(pt):
return len(list(xj for xj in pt if xj == FastFraction(0, 1) or xj == FastFraction(1, 1)))
for idx, gates in enumerate(gates_list):
pts = []
for g in gates:
pts += [g.entrance, g.exit]
vertex_count = len(list(pt for pt in pts if is_vertex(pt)))
if vertex_count != 0:
continue
total_bnd_coords = sum(cnt_bnd_coords(gate.entrance) + cnt_bnd_coords(gate.exit) for gate in gates)
add_bnd_coords = total_bnd_coords - 2 * pattern_count
if add_bnd_coords != 0:
continue
conf = {
'dim': 2,
'div': 2,
'ratio_func_name': 'linf',
#'ratio_func_name': 'l2',
'gates': gates,
'rel_tol_inv': 10000,
'upper_bound': FastFraction(5, 1),
#'upper_bound': FastFraction(51, 10),
}
print('GATE:', idx, [str(g) for g in gates])
perebor(conf)
print('===')
print('')
| true |
4e1e38ac063b900682034ca4c969bb1cec104e9b | Python | nataliejpg/Qsim | /qsim/helpers.py | UTF-8 | 1,309 | 3.046875 | 3 | [
"MIT"
] | permissive | import numpy as np
def dagger(A):
"""
Args:
state vector A
Returns:
conjugate of transpose of A
"""
return np.conjugate(np.transpose(A))
def nth_root(number, root):
"""
eg given 8, 3 returns 2
Args:
number to find root of, nth root to find
Return:
root
"""
coeff = [0] * (root + 1)
coeff[-1] = -1 * number
coeff[0] = 1
return round(np.roots(coeff)[0], 5)
Sx = 0.5 * np.array([[0, 1], [1, 0]])
Sy = 0.5 * np.array([[0, -1j], [1j, 0]])
Sz = 0.5 * np.array([[1, 0], [0, -1]])
Splus = Sx + 1j * Sy
Sminus = Sx - 1j * Sy
I = np.eye(2)
def make_higher_d_mat(mat_creation_fn, kwargnames, kwargvalues,
qubit_num=1, **kwargs):
shape_extension = [np.array(l).shape[0] for l in kwargvalues]
new_shape = shape_extension + [2**qubit_num, 2**qubit_num]
new_mat = np.zeros(new_shape, dtype=complex)
it = np.nditer(np.zeros(shape_extension), flags=['multi_index'])
kw_d = dict.fromkeys(kwargnames)
kw_d.update(kwargs)
kw_d['qubit_num'] = qubit_num
while not it.finished:
for i, j in enumerate(list(it.multi_index)):
kw_d[kwargnames[i]] = kwargvalues[i][j]
new_mat[it.multi_index] = mat_creation_fn(**kw_d)
it.iternext()
return new_mat
| true |
02c2e4ce97c7e341b08af2891753308d6fb21f86 | Python | twiblek/2020-2-Atom-QA-Python-V-Kuznetsov | /HW1/code/test/test_set.py | UTF-8 | 764 | 3.546875 | 4 | [] | no_license | import pytest
def test_add():
first = set('a')
first.add('b')
assert first == {'a', 'b'}
def test_copy():
first = set('copy')
second = first.copy()
assert first == second
@pytest.mark.parametrize("input_val, expected", [(['123', '456'], ''), (['2345', '4567'], '45')])
def test_intersection(input_val, expected):
first = set(input_val[0])
second = set(input_val[1])
assert first.intersection(second) == set(expected)
class TestSet:
def test_pop_len(self):
first = set('len')
initial_length = len(first)
first.pop()
assert len(first) == initial_length - 1
def test_remove(self):
first = set('remove')
with pytest.raises(KeyError):
assert first.remove('b')
| true |
3f54e37da58525fe7e16785fc59022a4f9c21b28 | Python | npmcdn-to-unpkg-bot/data_analytics | /sql/sql.py | UTF-8 | 658 | 2.65625 | 3 | [] | no_license |
# Creating a table
Create table FeatureGroupInstance (
featgrp_id integer PRIMARY KEY,
window int, T date,
Symbol varchar );
Create table Feature (
feat_id integer PRIMARY KEY,
name varchar,
group varchar);
Create table FeatureValue (
Primary key (id),
value decimal,
value_id integer PRIMARY KEY,
feat_id integer REFERENCES Feature (feat_id),
featgrp_id integer REFERENCES FeatureGroupInstance (featgrp_id),
);
# Inserting data
INSERT INTO products (product_no, name, price) VALUES
(1, 'Cheese', 9.99),
(2, 'Bread', 1.99),
(3, 'Milk', 2.99);
# Update
UPDATE COMPANY SET SALARY = 15000 WHERE ID = 3;
| true |
cf861af7247f726fbd1009051d65db6dae263495 | Python | YashashreeWaranashiwar/Udacity_Data_Engineering | /Build Data Lake and ETL Pipelines/etl.py | UTF-8 | 8,696 | 2.625 | 3 | [] | no_license | #Importing directories
import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format
from pyspark.sql.types import TimestampType
import psycopg2
#Reading configuration file dl.cfg
config = configparser.ConfigParser()
config.read('dl.cfg')
#Get AWS IAM user's Key_ID and Access_Key
os.environ['AWS_ACCESS_KEY_ID']=config.get("AWS","AWS_ACCESS_KEY_ID")
os.environ['AWS_SECRET_ACCESS_KEY']=config.get("AWS","AWS_SECRET_ACCESS_KEY")
#Creating spark session
def create_spark_session():
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.5") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
"""
The function process_song_data has three arguments.
spark : Sparksession parameter
input_data : The input path file which needs to be read and processed.
output_data : The putput location where parquet file needs to be written.
"""
# get filepath to song data file
print('Getting song data file path')
song_data = "s3a://udacity-dend/song_data/*/*/*/*.json"
"""Step 1 : Loading Dimension Table Songs"""
# read song data file
print('Reading song data file')
df_song_data = spark.read.json(song_data)
# create temporary view of dataframe
print('Creating temporary view song_data')
df_song_data.createOrReplaceTempView("song_data")
# extract columns to create songs table
print('Extracting columns from song_data dataframe to create song table')
songs_table = spark.sql("""
SELECT distinct song_id, title, artist_id, year, duration
FROM song_data""")
# write songs table to parquet files partitioned by year and artist
print('Writting song table to parquet files')
songs_table.write.mode('overwrite').partitionBy("year", "artist_id").parquet( os.path.join(output_data ,'songs'))
print('Writing song table to parquet files is completed')
"""Step 2 : Loading Dimension Table Artists"""
# extract columns to create artists table
print('Extracting data for Artists table')
artists_table = spark.sql("""
SELECT distinct artist_id,artist_name,artist_location,artist_latitude,artist_longitude
FROM song_data""")
# write artists table to parquet files
print('Writing artists table to parquet files')
artists_table.write.mode('overwrite').parquet( os.path.join(output_data ,'artists'))
print('Writing artists table to parquet files is completed')
def process_log_data(spark, input_data, output_data):
"""
The function process_song_data has three arguments.
spark : Sparksession parameter
input_data : The input path file which needs to be read and processed.
output_data : The putput location where parquet file needs to be written.
"""
# get filepath to log data file
print('Getting log data file path')
log_data = os.path.join(input_data, "log_data/*/*/*.json")
# read log data file
print('Reading log data file')
df_log_data = spark.read.json(log_data)
# filter by actions for song plays
print('Filtering log data with action of NextSong')
df_log_data_filtered = df_log_data[df_log_data.page=="NextSong"]
print('Creating temporary view of filtered log data.')
df_log_data_filtered.createOrReplaceTempView("log_data_ftr")
"""Step 3 : Loading Dimension Table Users"""
# extract columns for users table
print('Extracting columns for Users table')
users_table = spark.sql("""
SELECT distinct userId,firstName,lastName,gender,level
FROM log_data_ftr """)
# write users table to parquet files
print('Writing users table to parquet files')
users_table.write.mode('overwrite').parquet( os.path.join(output_data ,'users'))
print('Writing users table to parquet files is completed')
"""Step 4 : Loading Dimension Table Time"""
# create timestamp column from original timestamp column
get_timestamp = udf(lambda x: datetime.fromtimestamp((x/1000.0)), TimestampType())
# Add new column to dataframe with new timestamp column calculated with the help of above udf
print('Timestamp conversion')
df_log_data_filtered = df_log_data_filtered.withColumn("newts", get_timestamp(df_log_data_filtered.ts))
# create datetime column from original timestamp column
get_datetime = udf(lambda x: datetime.fromtimestamp((x/1000.0)).strftime('%Y-%m-%d %H:%M:%S'))
# Add new column to dataframe with new datetime calculated with the help of above udf
print('Datetime conversion')
df_log_data_filtered = df_log_data_filtered.withColumn("datetime", get_datetime(df_log_data_filtered.ts))
# extract columns to create time table
print('Creating temporary view of time_data.')
df_log_data_filtered.createOrReplaceTempView("time_data")
print('Extracting and creating columns for time table')
time_table = spark.sql("""
SELECT ts as start_time,
hour(datetime) as hour,
dayofmonth(datetime) as day,
weekofyear(datetime) as week,
month(datetime) as month,
year(datetime) as year,
weekday(datetime) as weekday
FROM time_data """)
# write time table to parquet files partitioned by year and month
print('Writing time table to parquet files partitioned by year and month.')
time_table.write.mode('overwrite').partitionBy("year", "month").parquet( os.path.join(output_data ,'time'))
print('Writing time table to parquet files is completed')
"""Step 5 : Loading Fact Table Songplays"""
# extract columns from joined song and log datasets to create songplays table
print('Extracting and creating columns for Songplays table')
songplays_table = spark.sql("""
SELECT
ROW_NUMBER() OVER (ORDER BY start_time,user_id,level,song_id,artist_id,session_id,location,user_agent) as songplay_id
,start_time
,month
,year
,user_id
,level
,song_id
,artist_id
,session_id
,location
,user_agent
from
(select distinct
to_timestamp(log.ts/1000) as start_time
,month(to_timestamp(log.ts/1000)) as month
,year(to_timestamp(log.ts/1000)) as year
,log.userid as user_id
,log.level as level
,song.song_id as song_id
,song.artist_id as artist_id
,log.sessionid as session_id
,log.location as location
,log.useragent as user_agent
FROM log_data_ftr log
left join song_data song
on log.song = song.title
and log.length = song.duration
) log_join_song
""")
# write songplays table to parquet files partitioned by year and month
print('Writing songplays table to parquet files partitioned by year and month')
songplays_table.write.mode('overwrite').partitionBy("year","month").parquet(os.path.join(output_data,'songplays'))
print('Writing songplays table to parquet files is completed')
def main():
try:
print ('Executing function create spark session')
spark = create_spark_session()
print ('Spark session has been created successfully')
except psycopg2.Error as e:
print("error")
print(e)
conn.rollback()
input_data = "s3a://udacity-dend/"
output_data = "s3a://ywaranass3bucket/DataLake-Project/"
try:
print ('Excecuting function process_song_data')
process_song_data(spark, input_data, output_data)
print ('Function process_song_data executed successfully')
except psycopg2.Error as e:
print("error")
print(e)
conn.rollback()
try:
print ('Excecuting function process_log_data')
process_log_data(spark, input_data, output_data)
print ('Function process_log_data executed successfully')
except psycopg2.Error as e:
print("error")
print(e)
conn.rollback()
if __name__ == "__main__":
main()
| true |
48c348b39c58fd07514a0aab22b7dd5911341422 | Python | rmawb/ProjectEuler | /problem4/problem4.py | UTF-8 | 2,743 | 4.28125 | 4 | [] | no_license | """
Given some number n, finds the largest integer palindrome made from the products of
two numbers less than or equal to n. Shown with n = 999.
Original problem found here:
https://projecteuler.net/problem=4
"""
def palfinder(pal, loops):
"""Given an integer palindrome, returns next lowest integer palindrome"""
length = len(str(pal))
pivot = 0
index = 0
if length % 2 == 0:
while True:
loops += 1
pivot = int(length/2) - 1 - index
pal = str(pal)
if pal[pivot] != '0':
pal = pal[:pivot] + str(int(pal[pivot])-1) + pal[pivot+1:]
pal = pal[:len(pal)-pivot-1] + pal[pivot] + pal[len(pal)-pivot:]
for i in range(index):
loops += 1
pal = pal[:pivot + i + 1] + '9' + pal[pivot + i + 2:]
pal = pal[:len(pal)-pivot-2-i] + '9' + pal[len(pal)-pivot-i-1:]
return int(pal), loops
else:
index += 1
else:
while True:
loops += 1
pivot = int(length // 2) - index
pal = str(pal)
if pal[pivot] != '0':
pal = pal[:pivot] + str(int(pal[pivot])-1) + pal[pivot+1:]
pal = pal[:len(pal)-pivot-1] + pal[pivot] + pal[len(pal)-pivot:]
for i in range(index):
loops += 1
pal = pal[:pivot + i + 1] + '9' + pal[pivot + i + 2:]
pal = pal[:len(pal)-pivot-2-i] + '9' + pal[len(pal)-pivot-i-1:]
return int(pal), loops
else:
index += 1
def highpalfinder(hnum):
"""
Finds largest integer palindrome that can be made as a product from two integers
less than or equal to given integer.
"""
loops = 0
if hnum <= 45:
for x in range(hnum**2, 0, -1):
loops += 1
if str(x) == str(x)[::-1]:
for y in range(hnum, 0, -1):
loops += 1
if x % y == 0 and x // y <= hnum:
return x // y, int(x / (x//y)), x, loops
for x in range(hnum**2, 0, -1):
loops += 1
if str(x) == str(x)[::-1]:
highpal = x
break
while True:
loops += 1
for y in range(hnum, hnum - int(hnum/10) , -1):
loops += 1
if highpal % y == 0 and highpal // y <= hnum:
return highpal // y, int(highpal / (highpal//y)), highpal, loops
highpal, loops = palfinder(highpal, loops)
palfinder(2000002, 0)
f1, f2, pal, loops = highpalfinder(999)
print('Palindrome', str(pal) + ',', 'which is', f1, '*', str(f2) + ',', 'was found in', loops, 'loops.') | true |
468414475fcffd211bbe6ff804520a1840baeb12 | Python | jkfran/sublime-text-3-my-config | /Packages/User/ShowPathInStatus.py | UTF-8 | 1,248 | 3 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
Plugin for Sublime Text to show the path of the current
file in the status bar. The path start from the project
directory or user home directory.
You can also copy this path to the clipboard with the
copy_path command
'''
import sublime
import sublime_plugin
from os.path import expanduser
__author__ = "Francisco Jesús Jiménez Cabrera (jkfran)"
__email__ = "jkfran@gmail.com"
class ShowPathInStatus(sublime_plugin.EventListener):
def get_short_path(self, view):
path = view.file_name()
if path:
if view.window().project_data():
for folder in view.window().project_data()['folders']:
project_path = folder.get('path')
if path.startswith(project_path):
path = path[len(project_path):].lstrip('/')
break
home_path = expanduser("~")
if path.startswith(home_path):
path = path[len(home_path):].lstrip('/')
return path
def on_activated(self, view):
view.set_status('_filename', self.get_short_path(view) or '')
class CopyPathCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.set_clipboard(self.view.get_status("_filename"))
| true |
4ba904452b12c9ebecff2162f2d78ff1339cd492 | Python | preetising/Files | /w3-6.py | UTF-8 | 386 | 3.71875 | 4 | [] | no_license | '''Write a Python program to read a file line by line store it into a variable.'''
# my_file=open("w3-6.txt","r")
# a=my_file.read()
# print(a)
# def file_read(fname):
# with open (fname, "r") as myfile:
# data=myfile.readlines()
# print(data)
# file_read('test.txt')
with open("w3-6.txt","r")as my_file:
a=my_file.read()
print(a,end="")
my_file.close()
| true |
cb3d9ccf712253bd01273a6a162c1b997c68dec0 | Python | mariganeshkumar/EEG_Seizure | /src/library/eval_scripts/sys_tools/nedc_text_tools.py | UTF-8 | 2,450 | 2.953125 | 3 | [
"CC0-1.0"
] | permissive | #!/usr/bin/env python
# file: $nedc_nfc/class/python/nedc_sys_tools/nedc_text_tools.py
#
# revision history:
# 20170716 (JP): Upgraded to using the new annotation tools.
# 20170709 (JP): Initial version.
#
# usage:
# import nedc_text_tools as ntt
#
# This file contains some useful Python functions and classes that are used
# in the nedc scripts.
#------------------------------------------------------------------------------
# import required system modules
#
# import required NEDC modules
#
#------------------------------------------------------------------------------
#
# define important constants
#
#------------------------------------------------------------------------------
# define a constant that controls the amount of precision used
# to check floating point numbers
#
MAX_PRECISION = int(10)
# define a constant used to indicate a null choice
#
NULL_CLASS = "***"
#------------------------------------------------------------------------------
#
# functions are listed here
#
#------------------------------------------------------------------------------
# function: nedc_first_substring
#
# arguments:
# strings: list of strings (input)
# substring: the substring to be matched (input)
#
# return: the index of the match in strings
#
# This function finds the index of the first string in strings that
# contains the substring. This is similar to running strstr on each
# element of the input list.
#
def first_substring(strings_a, substring_a):
return next(i for i, string in enumerate(strings_a) if \
substring_a in string)
#
# end of function
# function: nedc_first_string
#
# arguments:
# strings: list of strings (input)
# substring: the string to be matched (input)
#
# return: the index of the match in strings
#
# This function finds the index of the first string in strings that
# contains an exact match. This is similar to running strstr on each
# element of the input list.
#
def first_string(strings_a, tstring_a):
return next(i for i, string in enumerate(strings_a) if \
tstring_a == string)
#
# end of function
#
# end of file
| true |
343442215180d7aff1a47cc22b410888b81ce85e | Python | OrelSokolov/NLM | /vk/handlers/wikihandler.py | UTF-8 | 614 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
#По полученным параметрам строит вики-страницу и сохраняет ее под нужным именем.
##
##
## ДОРАБОТАТЬ!!!!!!! ДОРАБОТАТЬ!!!!!
##
def buildPage(photo="vk.com/id0", url="vk.com/id0", title="Title", authors="authors", descr="empty"):
'''Строит страницу по полученным данным.'''
page='''{|
|-
|[[photo-'''+photo+'''|200px;nolink| Обложка]]
|'''+descr+'''<br/>
|-
!['''+url+'''|Скачать]
|'''+'<b>'+title+'</b>'+authors+'''
|}'''
return page
| true |
a8dbde6cc8198fcd5cdd489436b6147f9950741d | Python | relsqui/wanderer | /wanderer.py | UTF-8 | 432 | 2.625 | 3 | [] | no_license | #!/usr/bin/python
import pygame
from wanderer import game
pygame.init()
print "Welcome! Initializing game ..."
game = game.Game()
try:
game.load()
except IOError:
print "No savefile found or not readable, starting new game."
game.new()
game.init_controls()
game.confirm_start()
while not game.finished:
game.loop()
pygame.quit()
# ^ not usually necessary, but some interpreters hang without it
print "Goodbye!"
| true |
e1a43da6aa24a53687c8e10acbe435a72778183e | Python | TheCDC/Musings | /advent_of_code/2021/day5_2.py | UTF-8 | 1,337 | 2.8125 | 3 | [] | no_license | from functools import reduce
from typing import List, Tuple
def sign(x: int):
if x == 0:
return 0
return -1 if x < 0 else 1
with open("inputs/day5.txt") as f:
lines = f.readlines()
linepoints = [
tuple(tuple(list(map(int, x.split(",")))[:2])[:2] for x in line.split(" -> "))
for line in lines
]
maxs = reduce(
lambda a, b: (max(a[0], b[0]), max(a[1], b[1])),
[j for i in linepoints for j in i],
linepoints[0][0],
)
# print(linepoints)
print(maxs)
heightmap = [[0 for _ in range(maxs[0] + 1)] for _ in range(maxs[1] + 1)]
def apply_lines(
heightmap: List[List[int]],
lines: List[Tuple[Tuple[int, int], Tuple[int, int]]],
depth=0,
):
if len(lines) == 0:
return heightmap
line = lines[0]
pos = line[0]
while True:
heightmap[pos[1]][pos[0]] += 1
diff = (
sign(line[1][0] - pos[0]),
sign(line[1][1] - pos[1]),
) # needs to include ends
pos = (pos[0] + diff[0], pos[1] + diff[1])
condition = [d for d in diff if d != 0]
if not condition:
break
return apply_lines(heightmap, lines[1:], depth=depth + 1)
state_final = apply_lines(heightmap, linepoints)
matching_cells = [col for row in state_final for col in row if col > 1]
print(len(matching_cells))
# 20299 correct
| true |
f694102e8f7df0fc8198c235e2c945bbc95891ff | Python | zackmacharia/PaloAltoNetworks | /panos_upgrade/main.py | UTF-8 | 1,930 | 2.515625 | 3 | [] | no_license | import time
import fw_upgrade
def main():
"""Uses functions in the fw_upgrade module"""
fw_upgrade.download_latest_content()
fw_upgrade.get_cdl_jobid()
print('Downloading content. Please wait...')
while True:
time.sleep(15) # used to reduce the number of GET requests sent
status = fw_upgrade.show_cdl_jobid_status()
if status == 'FIN':
print('Download complete.')
break
print('Starting content install. Please wait...')
time.sleep(3)
fw_upgrade.install_latest_content()
fw_upgrade.get_cinstall_j_id()
while True:
time.sleep(60)
status = fw_upgrade.show_cinstall_j_id_status()
if status == 'FIN':
print('Content install complete.')
break
fw_upgrade.check_now()
fw_upgrade.download_software()
print('Downloading PANOS software. Please wait...')
fw_upgrade.get_sdl_jobid()
while True:
time.sleep(15) # used to reduce the number of GET requests sent
status = fw_upgrade.show_sdl_jobid_status()
if status == 'FIN':
print('Successfully downloaded PANOS software.')
break
print('Starting PANOS software install. Please wait...')
fw_upgrade.install_software()
print('Installing PANOS software. Please wait...')
time.sleep(300) # refactor this line to use a while loop
fw_upgrade.get_sw_install_jobid()
while True:
time.sleep(15) # used to reduce the number of GET requests sent
status = fw_upgrade.show_sw_install_jobid_status()
if status == 'FIN':
print('Successfully installed PAONS software.')
break
print('Rebooting device. Please wait...')
fw_upgrade.device_reboot()
print('Device going down for a reboot')
time.sleep(5)
print('Starting a ping to device every 30 seconds.')
time.sleep(5)
fw_upgrade.ping_device()
main()
| true |
3f3bb83a6a44b20e3ddd6010978af1694bdb312d | Python | DLTroyer/ML | /ml_3.py | UTF-8 | 1,825 | 3.046875 | 3 | [] | no_license | from sklearn.datasets import fetch_california_housing
cali = fetch_california_housing() #bunch object
#print(cali.DESCR)
#print(cali.data.shape)
#print(cali.target.shape)
#print(cali.feature_names)
import pandas as pd
pd.set_option("precision",4)
pd.set_option("max_columns", 9)
pd.set_option("display.width", None)
cali_df = pd.DataFrame(cali.data, columns=cali.feature_names)
cali_df["MedHouseValue"] = pd.Series(cali.target)
#print(cali_df.head())
sample_df = cali_df.sample(frac=.1, random_state=17)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=2)
sns.set_style("whitegrid")
for feature in cali.feature_names:
plt.figure(figsize=(8,4.5))
sns.scatterplot(
data=sample_df,
x=feature,
y="MedHouseValue",
hue="MedHouseValue",
legend=False,
)
#plt.show()
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
'''
split
LinearRegression
fit
compare the split to the regression
'''
x_train, x_test, y_train, y_test = train_test_split(
cali.data, cali.target, random_state=11
)
lr = LinearRegression()
lr.fit(X=x_train,y=y_train)
predicted = lr.predict(x_test)
expected = y_test
print(f"predicted:{predicted[::5]} expected: {expected[::5]}")
df = pd.DataFrame()
df["Expected"] = pd.Series(expected)
df["Predicted"] = pd.Series(predicted)
import matplotlib.pyplot as plt2
figure = plt.figure(figsize=(9,9))
axes = sns.scatterplot(
data=df,
x="Expected",
y="Predicted",
hue="Predicted",
palette="cool",
legend=False
)
start = min(expected.min(), predicted.min())
end = max(expected.max(), predicted.max())
print(start)
print(end)
axes.set_xlim(start,end)
axes.set_ylim(start,end)
line = plt2.plot([start,end], [start,end],"k--")
plt2.show() | true |
7e0862825fac574a73a70eada4db2f2b7b6be9ec | Python | fhahn/luna | /luna/modules/patterns.py | UTF-8 | 7,934 | 3.125 | 3 | [
"BSD-3-Clause"
] | permissive | """
Lua pattern matcher based on a NFA
inspired by
http://swtch.com/~rsc/regexp/regexp1.html
"""
class State(object):
pass
class StateMatch(State):
def __init__(self):
pass
def clone(self, seen):
return StateMatch()
class StateCharRange(State):
def __init__(self, c1, c2, out):
self.start = ord(c1)
self.stop = ord(c2)
self.out = out
def match(self, c):
return ord(c) >= self.start and ord(c) <= self.stop
class StateChar(StateCharRange):
def __init__(self, c, out):
StateCharRange.__init__(self, c, c, out)
class StateDot(StateCharRange):
def __init__(self, out):
StateCharRange.__init__(self, ' ', ' ', out)
def match(self, c):
return True
class StateSplit(State):
def __init__(self, out, out2):
self.out = out
self.out2 = out2
def find2(expr, string, start):
assert isinstance(start, int)
if start < 0:
start = len(string) + start
# if negative offset is bigger than length of string
# start at the beginning
if start < 0:
start = 0
start = int(start)
found = False
i = start
while i < len(string):
match = False
valid = True
j = i
state = expr
backtrack = []
while valid and not match and (j < len(string) or len(backtrack) > 0):
if j >= len(string) and len(backtrack) > 0:
state, j = backtrack.pop()
if isinstance(state, StateCharRange):
if not state.match(string[j]):
if len(backtrack) == 0:
valid = False
else:
state, j = backtrack.pop()
else:
state = state.out
j += 1
elif isinstance(state, StateMatch):
match = True
elif isinstance(state, StateSplit):
backtrack.append((state.out2, j))
state = state.out
else:
valid = False
if j == len(string):
if (isinstance(state, StateMatch) or
(isinstance(state, StateSplit) and
isinstance(state.out2, StateMatch))):
match = True
if match:
found = True
yield (i+1, j)
if j > i:
i = j
else:
i += 1
else:
i += 1
if not found:
yield (-1, -1)
SPECIAL_CHARS = {
'a': ('A', 'z'),
's': (' ', ' ')
}
def set_next(state, next_state, seen, propagate=False):
seen.append(state)
if next_state in seen:
return
if isinstance(state, StateSplit):
if state.out is None:
state.out = next_state
elif state.out2 is None:
state.out2 = next_state
elif propagate:
if state.out not in seen:
set_next(state.out, next_state, seen, propagate=propagate)
if state.out2 not in seen:
set_next(state.out2, next_state, seen, propagate=propagate)
else:
if state.out is None:
state.out = next_state
elif propagate:
if state.out not in seen:
set_next(state.out, next_state, seen, propagate=propagate)
T_CHAR = 0
T_DOT = 1
T_CHAR_RANGE = 2
T_STAR = 3
T_OR = 4
T_GROUP = 5
T_PLUS = 6
class Token(object):
def __init__(self, t_type, value, sub_tokens=[], tokens_right=[],
prop=False):
self.type = t_type
self.value = value
self.sub_tokens = sub_tokens
self.tokens_right = tokens_right
self.prop = prop
def clone(self):
cloned_sub = [t.clone() for t in self.sub_tokens]
cloned_right = [t.clone() for t in self.tokens_right]
return Token(
self.type, self.value, sub_tokens=cloned_sub,
tokens_right=cloned_right, prop=self.prop
)
def tokenize(pattern):
tokens = []
i = 0
prop = False
while i < len(pattern):
c = pattern[i]
if ord(c) >= ord('0') and ord(c) <= ord('z'):
tokens.append(Token(T_CHAR, [c]))
elif c == '.':
tokens.append(Token(T_DOT, [c]))
elif c == '%':
if i+1 < len(pattern):
if pattern[i+1] == '%':
tokens.append(Token(T_CHAR, ['%']))
elif pattern[i+1] in SPECIAL_CHARS:
tokens.append(
Token(T_CHAR_RANGE, list(SPECIAL_CHARS[pattern[i+1]]))
)
else:
raise RuntimeError('Invalid pattern')
i += 1
else:
raise RuntimeError('Invalid pattern')
elif c == '*':
if len(tokens) > 0:
prev = tokens.pop()
tokens.append(Token(T_STAR, [], sub_tokens=[prev]))
else:
raise RuntimeError('Invalid pattern')
elif c == '+':
if len(tokens) > 0:
prev = tokens.pop()
tokens.append(Token(T_STAR, [], sub_tokens=[prev]))
tokens.append(prev.clone())
else:
raise RuntimeError('Invalid pattern')
elif c == '|':
tokens_right = tokenize(pattern[i+1:])
return [
Token(T_OR, [], sub_tokens=tokens, tokens_right=tokens_right)
]
elif c == '(':
i_g = i + 1
open_count = 1
close_count = 0
while open_count > close_count and i_g < len(pattern):
if pattern[i_g] == '(':
open_count += 1
elif pattern[i_g] == ')':
close_count += 1
i_g += 1
end = i_g - 1
assert end >= 0
group_str = pattern[(i+1):end]
tokens.append(Token(T_GROUP, [], sub_tokens=tokenize(group_str)))
i += len(group_str) + 2
# Force propagation of next state after group
prop = True
continue
elif c == '{':
count_str = pattern[i+1:].split('}', 1)[0]
count = int(count_str)
prev = tokens.pop()
for j in range(0, count):
tokens.append(prev.clone())
i += len(count_str) + 1
else:
raise RuntimeError('Invalid pattern')
i += 1
tokens[-1].prop = prop
return tokens
def tokens_to_expression(tokens, top=True):
expr = StateChar('c', None)
start = expr
for t in tokens:
new_expr = None
if t.type == T_CHAR:
new_expr = StateChar(t.value[0], None)
elif t.type == T_DOT:
new_expr = StateDot(None)
elif t.type == T_CHAR_RANGE:
new_expr = StateCharRange(t.value[0], t.value[1], None)
elif t.type == T_STAR:
match_expr = tokens_to_expression(t.sub_tokens, top=False)
new_expr = StateSplit(match_expr, None)
set_next(match_expr, new_expr, [], propagate=True)
elif t.type == T_OR:
expr_left = tokens_to_expression(t.sub_tokens, top=False)
expr_right = tokens_to_expression(t.tokens_right, top=False)
new_expr = StateSplit(expr_left, expr_right)
elif t.type == T_GROUP:
new_expr = tokens_to_expression(t.sub_tokens, top=False)
set_next(expr, new_expr, [], propagate=True)
else:
raise RuntimeError('Invalid pattern')
set_next(expr, new_expr, [], propagate=t.prop)
expr = new_expr
if top:
set_next(expr, StateMatch(), [], propagate=True)
return start.out
def compile_re(pattern, plain=False):
tokens = tokenize(pattern)
return tokens_to_expression(tokens)
| true |
ee70435843a47da2c5ca091abed633424389cb30 | Python | fengyu0712/ailcloudTest | /ws/Common/Serials.py | UTF-8 | 3,315 | 2.609375 | 3 | [] | no_license | import os,re
import winsound
import time
import datetime
import serial
import serial.tools.list_ports
class MySerial():
def __init__(self,baud_bate,serialName=None):
plist = list(serial.tools.list_ports.comports())
if len(plist) <= 0:
print("The Serial port can't find!")
else:
plist_0 = list(plist[0])
if serialName is None:
serialName = plist_0[0]
self.serialFd = serial.Serial(serialName, baud_bate, timeout=60)
print("check which port was really used >", self.serialFd.name)
if self.serialFd.isOpen():
print("Serial port open success")
else:
print("Serial port open failed")
def recvCmd(self, pattern,checktime=None):
if checktime is None:
checktime=10
for i in range(checktime*2):
data = self.serialFd.read_all()
data = str(data, encoding="utf8")
if data == "":
time.sleep(0.5)
continue
else:
break
result_data = re.findall(pattern, data)
try:
result_data=result_data[0]
except:
result_data=None
return result_data
def recvCmd2(self, pattern,mode=None,checktime=None):
if checktime is None:
checktime=2
result_data = {}
for i in range(checktime*2):
data = self.serialFd.read_all()
data = str(data, encoding="utf-8")
if isinstance(pattern,dict):
for k in pattern:
result_data0 = re.findall(pattern[k], data)
if result_data0!=[]:
result_data[k]=result_data0[0]
if mode=="OR":
if result_data == {}:
time.sleep(0.5)
continue
else:
break
elif mode=="AND":
if len(result_data)<len(pattern):
time.sleep(0.5)
continue
else:
break
else:
if len(result_data) < len(pattern):
time.sleep(0.5)
continue
else:
break
else:
result_data = re.findall(pattern, data)
try:
result_data=result_data[0]
except:
result_data=None
if result_data is None:
time.sleep(0.5)
continue
else:
break
return result_data
def close(self):
self.serialFd.close()
if __name__=="__main__":
pre_path = "E:\ws\\002M30_36\\002M30_36_010001.wav"
wakeup_path = "E:/ws/test_audio/002M30_36_010003.wav"
pattern0="\"wakeupWord\"\:\"(.*)\"\,\"major"
pattern1 = "\"asr\": \"(.*)\""
pattern ={"pattern0":pattern0,"pattern1":pattern1}
s=MySerial("921600")
winsound.PlaySound(wakeup_path, winsound.SND_FILENAME)
winsound.PlaySound(pre_path, winsound.SND_FILENAME)
# r=s.recvCmd2(pattern1)
r=s.recvCmd(pattern0)
print(r) | true |
09785b1f1f62ce5fb82c27e3e69e261ca676a228 | Python | Kirchenprogrammierer/Cheats | /CD zum Buch "Einstieg in Python"/Programmbeispiele/zahl_hoch.py | UTF-8 | 168 | 3.5625 | 4 | [] | no_license | z = 5 ** 3
print("5 hoch 3 =", z)
z = 5.2 ** 3
print("5.2 hoch 3 =", z)
z = -5.2 ** 3
print("-5.2 hoch 3 =", z)
z = 5.2 ** 3.8
print("5.2 hoch 3.8 =", z)
| true |
5fb82a25b9a1affa77a0994e2add090060d34900 | Python | a2s3ashok/python-code | /firstfrog/list.py | UTF-8 | 239 | 3.46875 | 3 | [] | no_license | #l = ['potato','lemon','salt','tamoto']
'''
l.sort()
l.reverse()
print(l)
'''
#print('abc DEF'.capitalize())
#print('hello world'.capitalize())
#print('world hello'.count())
i = ['red','blue','yellow','green']
#print(i[2])
print(i.pop(3))
| true |
53268fb0714fa768dcd98c0491337b0836569496 | Python | Shaharking/a-star-robots | /kapal/map_helper.py | UTF-8 | 1,694 | 3.109375 | 3 | [] | no_license | import random
from .shape import Shape
import numpy as np
import kapal
def create_blockable_shape(matrix, w, h, shape):
height = len(matrix)
width = len(matrix[0])
x = random.randint(1, np.floor(width - w - 1))
y = random.randint(1, np.floor(height - h - 1))
if shape == Shape.Rectangle:
for i in range(y, y+h):
for j in range(x, x+w):
matrix[i][j] = kapal.inf
try:
if shape == Shape.Triangle:
for loop1 in range(h):
i = y+loop1
for j in range(x, x + loop1):
matrix[i][j] = kapal.inf
except:
print (x, y, w, h)
if shape == Shape.Rhombus:
for loop1 in range(int(h/2)):
i = y+loop1
middle = int((x+w)/2)
for j in range(middle-loop1, middle+loop1):
matrix[i][j] = kapal.inf
if shape == Shape.Trapezoid:
middle = (x+w)/2
extra = w/h
base = w/h
for i in range(y,y+h):
base = base+extra
for j in range(int(middle-base), int(middle+base)):
matrix[i][j] = kapal.inf
return matrix
def generate_rect_obstacle(num_of_obstacles, matrix):
height = len(matrix)
width = len(matrix[0])
obstacle_width_size = (3, int(0.2 * width))
obstacle_height_size = (3, int(0.2 * height))
for loop in range(num_of_obstacles):
choice = random.choice(list(Shape))
w = random.randint(obstacle_width_size[0], obstacle_width_size[1])
h = random.randint(obstacle_height_size[0], obstacle_height_size[1])
matrix = create_blockable_shape(matrix,w,h,choice)
return matrix | true |
0af12c11e9ab0b1c8e6fcc50b251d9a267fb5cf3 | Python | Mohansmsc/MohanGuvi | /raw.py | UTF-8 | 189 | 3.34375 | 3 | [] | no_license | f=int(raw_input())
fac=int(raw_input())
if(f<0):
print('cant find factorial of given number')
elif(f==0):
print('factorial of a 0 is 1')
else:
for i range(1,f+1):
fac=i*fac
print(fac)
| true |
5d80601f39596ff8886545c479868f7392d6910f | Python | mzp/japan-postal-code | /makejsonpdata.py | UTF-8 | 388 | 2.625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# dataフォルダ内の郵便番号データをJSONP形式にしてzipdataフォルダ内に保存します
import glob
for file in glob.glob('data/*.json'):
print file, '-'*20
f = open('zipdata/' + file[5:-2], "w")
f.write("zipdata(")
for line in open(file, 'r'):
f.write(line)
f.write(");\n")
f.close()
| true |
bb6d79b69c5d70d36d58a7a8478c4eac10a0d607 | Python | bideeen/Web-Development---Udacity | /Module 1/Programming Foundation in Python/take_break.py | UTF-8 | 338 | 3.46875 | 3 | [] | no_license | # step 1 : Wait for two hours
# step 2 : Open your browser
# step 3 : reapeat Step 1 and 2, 3 times
import time
import webbrowser
count = 1
while (count <= 3):
# makes the program wait for 10 seconds
time.sleep(10)
# prompt the web browser
webbrowser.open('https://www.youtube.com')
# Increment
count = count + 1 | true |
10583f7165d23bb655fb8c03cf1f28f8fd5b4af9 | Python | gorkem-oktay/Singleton-Pattern | /src/equipments/weapons/i_weapon.py | UTF-8 | 543 | 3.015625 | 3 | [] | no_license | from ..i_equipment import IEquipment
class IWeapon(IEquipment):
def __init__(self):
super().__init__()
self.__damage = 0
self.__behaviour = None
def set_damage(self, damage):
self.__damage = damage
def get_damage(self):
return self.__damage
def set_behaviour(self, behaviour):
self.__behaviour = behaviour
def get_behaviour(self):
return self.__behaviour
def calculate_damage(self):
return self.get_behaviour().calculate_damage(self.get_damage())
| true |
627ea2c1f85ae76434fd93d5d984d6969f75ea09 | Python | njhazelh/Nearby | /webapi/webapi/resources/observations.py | UTF-8 | 1,787 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | from bottle import request, response
from util.webshared import Message, Error, secure
from data.db_models import Observation, Device
from util.validation import validate_mac, validate_timestamp
from sqlalchemy.orm.exc import NoResultFound
@secure()
def record_observation(db):
"""
Record that a MAC was seen by a device belonging to the current user.
JSON body must include:
{
"timestamp": <datetime string>, // eg. "2016-04-27 12:47"
"mac": <string>, // MAC seen "ab:cd:ef:12:34:56:78:90"
}
:param db: The database session
:returns: Message indicating MAC recognition on success or Error JSON on
parse failure.
"""
user = request.environ.get('user_info')
if user is None:
raise RuntimeError("record_observation should always have user_info")
# Get JSON body
data = request.json
if data is None:
response.status = 400
return Error("Missing JSON body").json
# Validate timestamp and mac
if 'timestamp' not in data or not validate_timestamp(data['timestamp']):
response.status = 400
return Error("Missing or invalid 'timestamp'").json
if 'mac' not in data or not validate_mac(data['mac']):
response.status = 400
return Error("Missing or invalid 'mac'").json
timestamp = data.get('timestamp')
mac = data.get('mac')
# Check if the mac is registered
try:
device = db.query(Device).filter(Device.mac == mac).one()
except NoResultFound:
return Message("MAC not recognized").json
# Send the observation to the database
observation = Observation(
timestamp=timestamp, device_id=device.id, user_id=user.id)
db.add(observation)
db.commit()
return Message("MAC recognized").json
| true |
576f86dbe89e210bd947f118f5dd2663e04cefe7 | Python | bowenzhao123/joint_training | /src/dataConstructor/dataProcessor.py | UTF-8 | 5,615 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2018/9/3 7:47 PM
# @Author : ZHAOBOWEN467@pingan.com.cn
# @File : dataProcessor.py
# @Software: PyCharm
# @desc:
import pandas as pd
import pickle
import numpy as np
from keras.utils import to_categorical
import jieba
from src.dataConstructor.vocbaBuilder import VocabBuilder
import os
from config import *
from src.util.util import readData
class DataProcessor(object):
"""
数据预处理,对训练集和测试集的数据进行onehot编码,label转化为哑变量
Attributes:
vocabBuidMinCount: 可以作为登录词的最小词频
wordVocabPath: 基于词的词典保存路径
charVocabPath: 基于字的词典保存路径
senMaxLen: embedding最大长度
"""
def __init__(self):
self.vocabBuidMinCount = sysParamSetting._wordVocabPath
self.wordVocabPath = sysParamSetting._wordVocabPath
self.charVocabPath = sysParamSetting._charVocabPath
self.senMaxLen = sysParamSetting._senMaxLen
# 调用字典构造模块
vocabInitializer = VocabBuilder()
if not os.path.exists(self.wordVocabPath): # 如果没有字典,则构造字典
vocabInitializer.vocabBuild()
# 载入字典
self.word2id, self.wordVocabSize = vocabInitializer.loadVocab(self.wordVocabPath)
self.char2id, self.charVocabSize = vocabInitializer.loadVocab(self.charVocabPath)
def sentence2id(self,data,lookUpTable):
"""
给句子里的每个词,都找到word2id里对应的序列标号
Args:
data: 预处理的文本
lookUpTable: 字典
Returns: 句子onehot编码
"""
sentenceID = []
for i in range(data.shape[0]):
_tmp = []
sent = data[i]
for word in sent:
if word in [',', '。', '.', '?', '!', ' ']:
continue
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <= '\u005a') or ('\u0061' <= word <= '\u007a'):
word = '<ENG>'
if word not in self.word2id:
word = '<UNK>'
_tmp.append(lookUpTable[word])
sentenceID.append(_tmp)
return sentenceID
def padSequences(self, sequences, pad_mark=0):
"""
如果句子长度不足self.senMaxLen,则补0。如果句子大于self.senMaxLen,则截取
Args:
sequences: onehot编码句子
pad_mark: 填补数字
Returns:
二维列表[字典最大长度,最大长度]
"""
seq_list, seq_len_list = [], []
for seq in sequences:
seq = list(seq)
seq_ = seq[:self.senMaxLen] + [pad_mark] * max(self.senMaxLen - len(seq), 0)
seq_list.append(seq_)
seq_len_list.append(min(len(seq), self.senMaxLen))
return seq_list, seq_len_list
def genW2VDataforModelInput(self,path):
""" 生成网络接收的训练数据,句子长度list和类标
Args:
path: 文本路径
Returns: onehot编码文本, 哑变量label
"""
sequenceData = readData(path)
text1 = self.sentence2id(np.array(sequenceData['text1']), self.word2id)
text1WordID, text1SeqLenList = self.padSequences(text1)
text2 = self.sentence2id(np.array(sequenceData['text2']), self.word2id)
text2WordID, text2SeqLenList = self.padSequences(text2)
return np.asarray(text1WordID), np.asarray(text2WordID), sequenceData['label']
def genCharDataforModelInput(self,path):
"""
生成基于字的网络接收的训练数据,句子长度list
Args:
path: 文本路径
Returns: onehot编码文本
"""
sequenceData = readData(path)
charID1 = self.sentence2id(np.array(sequenceData['text1']),self.char2id)
charID1, text1SeqLenList = self.padSequences(charID1)
charID2 = self.sentence2id(np.array(sequenceData['text2']),self.char2id)
charID2, text1SeqLenList = self.padSequences(charID2)
return np.asarray(charID1),np.asarray(charID2)
def trainDataSetCreator(self, sourcefile):
""" 生成模型训练的输入数据
Args:
sourcefile: 源文件
Return:
基于词向量的onehot编码
基于字向量的onehot编码
数据label
"""
text1, text2, label = self.genW2VDataforModelInput(sourcefile)
char1, char2 = self.genCharDataforModelInput(sourcefile)
label = to_categorical(label,num_classes=2)
return text1,text2, label, char1, char2
def genCombineData(self, textA, textB, CharA, CharB, slabel, index):
train1 = textA[index]
char1 = CharA[index]
train2 = textB[index]
char2 = CharB[index]
starget = slabel[index]
# 在column维度上拼接
# target = np.concatenate([starget,tlabel],axis=-1)
# 0号为source, 1号为target,在column维度上拼接
taskLable1 = np.array([0] * len(index))
taskLabel1 = to_categorical(taskLable1, 2)
taskLabel2 = np.array([1] * len(index))
taskLabel2 = to_categorical(taskLabel2, 2)
# 与原先的分类标签在column维度拼接,先任务类别标签tasklabel,再特定任务下的分类标签label
# trainLabel = np.concatenate([taskLabel,target], axis=-1)
return train1, train2, char1, char2, starget, taskLabel1, taskLabel2
| true |
cb736b6ae0db868f1a79571dc1d9d6bc0530a8c5 | Python | jigsawcoder/Data-Science-Class_AE | /Counting_Substring.py | UTF-8 | 185 | 2.9375 | 3 | [] | no_license | T = int(input())
for x in range(T):
N = int(input())
string = 'N'
U = input()
string += U
n = string.count('1')
j = ((n*(n+1))/2)
print(j)
string = ''
| true |
f97a3fd3d2f9afe0e1b6c363a60bff93f786d2c8 | Python | VemburajYadav/DeepLearningLagrangainVortexDynamics | /DataGenScripts/create_dataset_viscosity.py | UTF-8 | 11,349 | 2.890625 | 3 | [] | no_license | """
Script for generating dataset for vortex particle dynamics in an open domain and with viscosity.
For each data sample
1) Particle locations, core sizes and vortex strengths are randomly sampled at time t0.
2) Corresponding velocity field on grid is computed for t0.
3) viscosity of the fluid is randomly sampled.
4) Simulation using PhiFlow to obtain grid velocity fields at future time instants.
5) Save the location, strength, core size at t0 and the velocity fields at all time instants and
also the viscosity of the fluid.
"""
from functools import partial
from phi.tf.flow import *
import argparse
import matplotlib.pyplot as plt
import os
import json
parser = argparse.ArgumentParser()
parser.add_argument('--domain', type=list, default=[120, 120], help='resolution of the domain (as list: [256, 256])')
parser.add_argument('--offset', type=list, default=[40, 40], help='neglect regions near boundaries of the '
'domain (as list: [24, 24])')
parser.add_argument('--n_samples', type=int, default=4000, help='number of samples to be generated')
parser.add_argument('--n_particles', type=int, default=10, help='number of vortex particles')
parser.add_argument('--sigma_range', type=list, default=[2.0, 10.0], help='range for core ize sampling')
parser.add_argument('--viscosity_range', type=list, default=[0.0, 3.0], help='range for core ize sampling')
parser.add_argument('--time_step', type=float, default=0.2, help='time step in seconds for running numerical simulations')#
parser.add_argument('--train_percent', type=float, default=0.6, help='percentage of data sampled from each zone for '
'training')
parser.add_argument('--eval_percent', type=float, default=0.2, help='percentage of data sampled from each zone for '
'validation')
parser.add_argument('--num_time_steps', type=int, default=10, help='number of time steps to adfvance the simulation '
'for each sample')
parser.add_argument('--save_dir', type=str, default='../'
'data/p10_gaussian_dataset_viscous_120x120_4000',
help='diretory to save the generated dataset')
# Parse input arguments
opt = parser.parse_args()
RESOLUTION = opt.domain
OFFSET = opt.offset
SAMPLE_RES = [RESOLUTION[0] - 2 * OFFSET[0], RESOLUTION[1] - 2 * OFFSET[1]]
NSAMPLES = opt.n_samples
NPARTICLES = opt.n_particles
SIGMA_RANGE = opt.sigma_range
TIME_STEP = opt.time_step
VISCOSITY_RANGE = opt.viscosity_range
TRAIN_PERCENT = opt.train_percent
VAL_PERCENT = opt.eval_percent
N_TRAIN_SAMPLES = int(NSAMPLES * TRAIN_PERCENT)
N_VAL_SAMPLES = int(NSAMPLES * VAL_PERCENT)
N_TEST_SAMPLES = NSAMPLES - (N_TRAIN_SAMPLES + N_VAL_SAMPLES)
NUM_TIME_STEPS = opt.num_time_steps
DIRECTORY = opt.save_dir
# Gaussian falloff kernel
def gaussian_falloff(distance, sigma):
sq_distance = math.sum(distance ** 2, axis=-1, keepdims=True)
falloff = (1.0 - math.exp(- sq_distance / sigma ** 2)) / (2.0 * np.pi * sq_distance)
return falloff
# Sample core size
sigmas = np.reshape(np.random.random_sample(size=(NPARTICLES * NSAMPLES)) * (SIGMA_RANGE[1] - SIGMA_RANGE[0]) + SIGMA_RANGE[0], (1, -1, 1))
# Sample multiplying factors to compute the strengths
facs = np.random.random_sample(size=(NPARTICLES * NSAMPLES)) * 15 + 5
np.random.shuffle(sigmas)
np.random.shuffle(facs)
# Randomly make half of the sampled strengths as negative
rands = np.array([-1] * (NSAMPLES * NPARTICLES // 2) + [1] * (NSAMPLES * NPARTICLES // 2))
np.random.shuffle(rands)
strengths = facs * sigmas.reshape((-1)) * rands
strengths = np.reshape(strengths, (-1,))
np.random.shuffle(strengths)
# Randomly sample kinematic viscosities
viscositys = np.sort(np.random.random_sample(size=(NSAMPLES)) * (VISCOSITY_RANGE[1] - VISCOSITY_RANGE[0]) + VISCOSITY_RANGE[0])
np.random.shuffle(viscositys)
# strengths, core sizes and locations of shape: (NSAMPLES, NPARTICLES) and viscosities of shape (NSAMPLES)
strengths = np.reshape(strengths, (NSAMPLES, -1))
sigmas = np.reshape(sigmas, (NSAMPLES, -1))
viscositys = np.reshape(viscositys, (NSAMPLES, -1))
# Randomly sample particle locations
ycoords = np.empty((NSAMPLES, NPARTICLES))
xcoords = np.empty((NSAMPLES, NPARTICLES))
for i in range(NSAMPLES):
ycoords[i, :] = np.random.random_sample(size=(NPARTICLES)) * SAMPLE_RES[0] + OFFSET[0]
xcoords[i, :] = np.random.random_sample(size=(NPARTICLES)) * SAMPLE_RES[1] + OFFSET[1]
# Train, Val, Test split
train_ycoords, train_xcoords = ycoords[0: N_TRAIN_SAMPLES, :], xcoords[0: N_TRAIN_SAMPLES, :]
train_strengths, train_sigmas = strengths[0:N_TRAIN_SAMPLES, :], sigmas[0: N_TRAIN_SAMPLES, :]
train_viscositites = viscositys[0:N_TRAIN_SAMPLES, :]
val_ycoords, val_xcoords = ycoords[N_TRAIN_SAMPLES: (N_TRAIN_SAMPLES + N_VAL_SAMPLES), :],\
xcoords[N_TRAIN_SAMPLES: (N_TRAIN_SAMPLES + N_VAL_SAMPLES), :]
val_strengths, val_sigmas = strengths[N_TRAIN_SAMPLES: (N_TRAIN_SAMPLES + N_VAL_SAMPLES), :], \
sigmas[N_TRAIN_SAMPLES: (N_TRAIN_SAMPLES + N_VAL_SAMPLES), :]
val_viscositites = viscositys[N_TRAIN_SAMPLES: (N_TRAIN_SAMPLES + N_VAL_SAMPLES), :]
test_ycoords, test_xcoords = ycoords[-N_TEST_SAMPLES:, :], xcoords[-N_TEST_SAMPLES:, :]
test_strengths, test_sigmas = strengths[-N_TEST_SAMPLES:, :], sigmas[-N_TEST_SAMPLES:, :]
test_viscosities = viscositys[-N_TEST_SAMPLES:, :]
# filename's for saving velocity fields
velocity_filenames = ['velocity_' + '0' * (6 - len(str(i))) + str(i) + '.npz' for i in range(NUM_TIME_STEPS + 1)]
# Generate and save the training set
train_dir = os.path.join(DIRECTORY, 'train')
if not os.path.isdir(DIRECTORY):
os.makedirs(DIRECTORY)
with open(os.path.join(DIRECTORY, 'dataset_config'), 'w') as configfile:
json.dump(vars(opt), configfile, indent=2)
for id in range(N_TRAIN_SAMPLES):
SCENE = Scene.create(train_dir)
location = np.reshape(np.stack([train_ycoords[id], train_xcoords[id]], axis=1), (1,NPARTICLES,2)).astype(np.float32)
strength = np.reshape(train_strengths[id], (NPARTICLES, )).astype(np.float32)
sigma = np.reshape(train_sigmas[id], (1, NPARTICLES, 1)).astype(np.float32)
nyu = np.reshape(train_viscositites[id], ()).astype(np.float32)
domain = Domain(RESOLUTION, boundaries=OPEN)
FLOW_REF = Fluid(domain)
vorticity = AngularVelocity(location=location,
strength=strength,
falloff=partial(gaussian_falloff, sigma=sigma))
velocity_0 = vorticity.at(FLOW_REF.velocity)
world_obj = World()
fluid = world_obj.add(Fluid(domain=domain, velocity=velocity_0),
physics=[IncompressibleFlow(), lambda fluid_1, dt: fluid_1.copied_with(velocity=diffuse(fluid_1.velocity,
nyu * dt, substeps=5))])
velocities = [velocity_0]
for step in range(NUM_TIME_STEPS):
world_obj.step(dt=TIME_STEP)
velocities.append(fluid.velocity)
np.savez_compressed(os.path.join(SCENE.path, 'location_000000.npz'), location)
np.savez_compressed(os.path.join(SCENE.path, 'strength_000000.npz'), strength)
np.savez_compressed(os.path.join(SCENE.path, 'sigma_000000.npz'), sigma)
np.savez_compressed(os.path.join(SCENE.path, 'viscosity.npz'), nyu)
for frame in range(NUM_TIME_STEPS + 1):
np.savez_compressed(os.path.join(SCENE.path, velocity_filenames[frame]), velocities[frame].staggered_tensor())
# Generate and save the validation set
val_dir = os.path.join(DIRECTORY, 'val')
for id in range(N_VAL_SAMPLES):
SCENE = Scene.create(val_dir)
location = np.reshape(np.stack([val_ycoords[id], val_xcoords[id]], axis=1), (1,NPARTICLES,2)).astype(np.float32)
strength = np.reshape(val_strengths[id], (NPARTICLES, )).astype(np.float32)
sigma = np.reshape(val_sigmas[id], (1, NPARTICLES, 1)).astype(np.float32)
nyu = np.reshape(val_viscositites[id], ()).astype(np.float32)
domain = Domain(RESOLUTION, boundaries=OPEN)
FLOW_REF = Fluid(domain)
vorticity = AngularVelocity(location=location,
strength=strength,
falloff=partial(gaussian_falloff, sigma=sigma))
velocity_0 = vorticity.at(FLOW_REF.velocity)
world_obj = World()
fluid = world_obj.add(Fluid(domain=domain, velocity=velocity_0),
physics=[IncompressibleFlow(), lambda fluid_1, dt: fluid_1.copied_with(velocity=diffuse(fluid_1.velocity,
nyu * dt, substeps=5))])
velocities = [velocity_0]
for step in range(NUM_TIME_STEPS):
world_obj.step(dt=TIME_STEP)
velocities.append(fluid.velocity)
np.savez_compressed(os.path.join(SCENE.path, 'location_000000.npz'), location)
np.savez_compressed(os.path.join(SCENE.path, 'strength_000000.npz'), strength)
np.savez_compressed(os.path.join(SCENE.path, 'sigma_000000.npz'), sigma)
np.savez_compressed(os.path.join(SCENE.path, 'viscosity.npz'), nyu)
for frame in range(NUM_TIME_STEPS + 1):
np.savez_compressed(os.path.join(SCENE.path, velocity_filenames[frame]), velocities[frame].staggered_tensor())
# Generate and save the TEST set
test_dir = os.path.join(DIRECTORY, 'test')
for id in range(N_TEST_SAMPLES):
SCENE = Scene.create(test_dir)
location = np.reshape(np.stack([test_ycoords[id], test_xcoords[id]], axis=1), (1,NPARTICLES,2)).astype(np.float32)
strength = np.reshape(test_strengths[id], (NPARTICLES,)).astype(np.float32)
sigma = np.reshape(test_sigmas[id], (1, NPARTICLES, 1)).astype(np.float32)
nyu = np.reshape(test_viscosities[id], ()).astype(np.float32)
domain = Domain(RESOLUTION, boundaries=OPEN)
FLOW_REF = Fluid(domain)
vorticity = AngularVelocity(location=location,
strength=strength,
falloff=partial(gaussian_falloff, sigma=sigma))
velocity_0 = vorticity.at(FLOW_REF.velocity)
world_obj = World()
fluid = world_obj.add(Fluid(domain=domain, velocity=velocity_0),
physics=[IncompressibleFlow(), lambda fluid_1, dt: fluid_1.copied_with(velocity=diffuse(fluid_1.velocity,
nyu * dt, substeps=5))])
velocities = [velocity_0]
for step in range(NUM_TIME_STEPS):
world_obj.step(dt=TIME_STEP)
velocities.append(fluid.velocity)
np.savez_compressed(os.path.join(SCENE.path, 'location_000000.npz'), location)
np.savez_compressed(os.path.join(SCENE.path, 'strength_000000.npz'), strength)
np.savez_compressed(os.path.join(SCENE.path, 'sigma_000000.npz'), sigma)
np.savez_compressed(os.path.join(SCENE.path, 'viscosity.npz'), nyu)
for frame in range(NUM_TIME_STEPS + 1):
np.savez_compressed(os.path.join(SCENE.path, velocity_filenames[frame]), velocities[frame].staggered_tensor())
| true |
a3d327f7e242f89bc325c2ecadd96b8949ba2d69 | Python | i2ee/Quicksilver | /Quicksilver/Resources/QSURLExtractor.py | UTF-8 | 2,344 | 3.234375 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# encoding: utf-8
"""
QSURLExtractor.py
Created by Rob McBroom on 2010-04-13.
"""
import sys
import os
from HTMLParser import HTMLParser, HTMLParseError
class ExtractLinks(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.insideLinkTag = False
def handle_starttag(self,tag,attrs):
# print 'start', tag, self.insideLinkTag
if tag == 'a':
if self.insideLinkTag:
# the previously started tag must not have been closed properly
# send it out and move on
self.printLink(self.thisLink)
self.thisLink = {
'url': str(),
'title': str(),
'image': str(),
}
self.insideLinkTag = True
for name, value in attrs:
if name == 'href':
self.thisLink['url'] = value
if tag == 'img':
# look for URL and title of linked images
if self.insideLinkTag:
for name, value in attrs:
if name == 'src':
self.thisLink['image'] = value
if name == 'title':
self.thisLink['title'] = value
break
if name == 'alt':
self.thisLink['title'] = value
def handle_data(self, data):
# if there's anything other than whitespace
# and we're inside a link
if data.strip() and self.insideLinkTag:
self.thisLink['title'] = data
def handle_endtag(self,tag):
# print 'end', tag, self.insideLinkTag
if tag == 'a' and self.insideLinkTag:
self.printLink(self.thisLink)
self.thisLink = {
'url': str(),
'title': str(),
'image': str(),
}
self.insideLinkTag = False
def printLink(self, thisLink):
"""print tab separated link attributes"""
print '{0}\t{1}\t\t{2}'.format(thisLink['url'], thisLink['title'], thisLink['image'])
if __name__ == '__main__':
import fileinput
page = ''.join([line for line in fileinput.input()])
parser = ExtractLinks()
try:
parser.feed(page)
except HTMLParseError, e:
pass
| true |
308251db9adfdf597b1db18e331b335d6c317b2d | Python | Lesley55/AdventOfCode | /2015/25/part1.py | UTF-8 | 318 | 2.703125 | 3 | [] | no_license | a = 20151125
b = 252533
c = 33554393
row = 2947
column = 3029
r = 1
r2 = 1
col = 1
while True:
if r2 == 1:
r += 1
r2 = r
col = 1
else:
r2 -= 1
col += 1
a *= b
a = a % c
if r2 == row and col == column:
print(a)
break
# part 1: 19980801 | true |
80aafee428de778508dd77869b0ee5454d35dfeb | Python | alexlepage07/Pricing_games_2020-2021 | /Exploration python/hyperopt/tyest.py | UTF-8 | 318 | 2.53125 | 3 | [] | no_license | from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
X, y = make_regression(n_features=4, n_informative=2,
random_state=0, shuffle=False)
regressor = RandomForestRegressor(max_depth=2, random_state=0)
regressor.fit(X, y)
print(regressor.predict(X))
| true |
cc8b13ddba9abdef908e3b91001896302aadbe8a | Python | anirudhRowjee/Class11 | /python/insertionsort.py | UTF-8 | 537 | 4.1875 | 4 | [] | no_license | #insertion sort
o_list = []
x = int(input("Please give the number of elements in unsorted list"))
for i in range(1, x+1):
y = input("please give element " + str(i) + " >>> ")
o_list.append(y)
print("Unsorted list is ", o_list)
w_list = o_list
for i in range(0, len(w_list)):
# print("i ", i)
for j in range(0, i+1):
# print("j ", j)
if w_list[i] < w_list[j]:
w_list.insert(j, w_list[i])
w_list.pop(i+1)
break
print("Sorted List is ", w_list)
| true |
ad7c74d37dc5411fc438546ac79777acc2c14df8 | Python | cross-sky/sicp | /e1_33.py | UTF-8 | 1,464 | 2.671875 | 3 | [] | no_license | import sys
sys.setrecursionlimit(10000)
from ucb import trace
from operator import *
from e1_23 import prime
def product_recu(term, a, next, b):
if a > b:
return 1
else:
return term(a) * product_recu(term, next(a), next, b)
def product_iter(term, a, next, b):
def iter(a, result):
if a > b:
return result
else:
return iter(next(a), term(a) * result)
return iter(a, 1)
def accumulate(combiner, null_value, term, a, next, b):
def inner(a):
if a > b:
return null_value
else:
return combiner(term(a), inner(next(a)))
return inner(a)
def factorial_recu(a, b):
def term(x):
return x
def next(x):
return x + 1
return accumulate(mul, 1, term, a, next, b)
def filt_accumulate(combiner, null_value, term, a, next, b, valid):
if a > b:
return null_value
rest_term = filt_accumulate(combiner, null_value, term, next(a), next, b, valid)
if valid(a):
return combiner(term(a), rest_term)
else:
return rest_term
def filt_t(a, b):
def term(x):
return x
def next(x):
return x + 1
return filt_accumulate(add, 0, term, a, next, b, prime)
def filt_accumulate_iter(combiner, null_value, term, a, next, b, valid):
def inner(a, result):
if a > b:
return result
if valid(a):
return inner(next(a), result + term(a))
else:
return inner(next(a), result)
return inner(a, 0)
def filt_t_iter(a, b):
def term(x):
return x
def next(x):
return x + 1
return filt_accumulate_iter(add, 0, term, a, next, b, prime)
| true |
5f4d8bfac3a35fb51cfad4c72d52da405312b174 | Python | peterkabai/python | /algorithm_problems/helpers/a_star.py | UTF-8 | 3,989 | 3.59375 | 4 | [] | no_license | import grids
class Edge():
def __init__(self, to, cost):
self.to = to
self.cost = cost
def __gt__(self, other):
return self.cost > other.cost
def __lt__(self, other):
return self.cost < other.cost
class Node():
def __init__(self, name, edges=[]):
self.name = name
self.distance = sys.maxsize
edges.sort()
self.edges = edges
self.estimate = None
def add(self, edge):
self.edges.append(edge)
self.edges.sort()
def __gt__(self, other):
return self.estimate + self.distance > other.estimate + other.distance
def __lt__(self, other):
return self.estimate + self.distance < other.estimate + other.distance
class Graph():
def __init__(self):
self.nodes = {}
def __getitem__(self, key):
return self.nodes[key]
def add(self, node):
self.nodes[node.name] = node
def print(self):
for name, node in self.nodes.items():
distance = "∞" if node.distance == sys.maxsize else node.distance
print("Node:", name, "Distance:", distance)
def reset_dist(self):
for name, node in self.nodes.items():
node.distance = sys.maxsize
def a_star_grid(graph, start, finish):
end_row = int(finish.split(" ")[0])
end_col = int(finish.split(" ")[1])
# Return right away if the start is the finish
if start == finish:
return [start], 0
# Create a dictionary of shortest paths
shortest_paths = {}
for name, node in graph.nodes.items():
shortest_paths[name] = [name]
# Set the start node to a distance of zero, and put it in the queue
graph[start].distance = 0
queue = [graph[start]]
finalized = []
# While the queue is not empty
while queue:
# Sort and pop the queue, we want the node with the smallest distance
queue.sort(reverse=True)
current = queue.pop()
# Loop through each unfinalized node using the edges of the current
for edge in current.edges:
if edge.to not in finalized:
# Update the distance, if lower than previously
if graph[edge.to].distance > current.distance + edge.cost:
graph[edge.to].distance = current.distance + edge.cost
# New for A*
name = graph[edge.to].name
r = int(name.split(" ")[0])
c = int(name.split(" ")[1])
graph[edge.to].estimate = abs(end_row-r) + abs(end_col-c)
# Store the new shortest path for the node
shortest_paths[edge.to] = list(shortest_paths[current.name])
shortest_paths[edge.to].append(edge.to)
# Add the node to the queue
queue.append(graph[edge.to])
# Exit the while loop if we are ready to finalize the finish node
if current.name == finish:
break
# Finalize the node
finalized.append(current.name)
path_distance = graph[current.name].distance
graph.reset_dist()
# Return a tuple of the path and distance, or a tupple with None if the path was not found
if len(shortest_paths[finish]) == 1 and path_distance != 0:
return None, None
return shortest_paths[finish], path_distance
if __name__ == '__main__':
# Get the dimention, start, and end
# Start and end is in the format: row column
d = int(input())
start = input()
end = input()
# Read in the grid of O and X values
grid = grids.read_grid(d)
# Get pairs of connected points. Use True to allow diagonal moves
point_pairs = grids.get_pairs(grid, False)
# Greate a graph from the point pairs
graph = grids.create_from_pairs(point_pairs)
# Run A* on the grid
a_star_grid(graph, start, end)
print(a_star_grid(graph, start, end)[1])
| true |
b3868f8b1f6686f22fd67d4f01a92289d0ebabc3 | Python | Aasthaengg/IBMdataset | /Python_codes/p02843/s074742927.py | UTF-8 | 94 | 3.25 | 3 | [] | no_license | X = int(input())
x1 = X // 100
x2 = X % 100
if x2 <= x1 * 5:
print(1)
else:
print(0) | true |
5680edd5a88fcb45eecbea6408b36a5bd1079765 | Python | prashant2109/django_login_tas_practice | /python/Python/programs/matrix_transpose.py | UTF-8 | 645 | 3.65625 | 4 | [] | no_license | def mt(ll):
len_row = len(ll[0])
len_col = len(ll)
res_lst = []
for i in range(len_row):
ch_lst = []
for j in range(len_col):
ch_lst.append(ll[j][i])
res_lst.append(ch_lst)
print(res_lst)
return
def matrix_transpose(ll):
len_row = len(ll[0])
len_col = len(ll)
res_lst = []
for i in range(len_row):
r_lst = []
for j in range(len_col):
r_lst.append(ll[j][i])
res_lst.append(r_lst)
return res_lst
if __name__ == '__main__':
# mt([[1, 2, 3], [4, 5, 6], [6, 7, 8]])
print(matrix_transpose([[1, 2, 3], [4, 5, 6], [6, 7, 8]])) | true |
bfbde1047542b0a024306d6b8beabd5252068f1f | Python | bobosse86/python-arduino | /Données série PvapSat/P_Vap_Nanpy.py | UTF-8 | 8,487 | 2.96875 | 3 | [] | no_license | #-------------------------------------------------------------------------------
# Name: Elab_PA_niv1
# Purpose: Programme d'utilisation du capteur de pression absolue Eurosmart Elab_PA.
# Ce capteur permet de mesurer une pression de 200hPa à 4000hPa.
# Le capteur utilise une broche analogique de la carte EDUCA DUINO Lab.
# Les mesures effectuées sont transmises sur la console python.
# Author: cletourneur
#
# Created: 30/01/2019
# Copyright: (c) cletourneur 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
from nanpy import SerialManager # Utiliser par l'interpreteur python pour communiquer
# avec la carte EDUCA DUINO LAB
from nanpy import ArduinoApi # Utilisation des services arduino.
from eurosmart import * # Utilisation de la librairie Eurosmart pour piloter le capteur Elab_PA
import time
import matplotlib.pyplot as plt # pour le tracé de graphe
from matplotlib import animation # pour la figure animée
import sys
# Configuration de la communication avec la carte EDUCA DUINO Lab
portCom = detectePortComEducaDuinoLab() # Disponible dans la librairie Eurosmart.
if ('none' == portCom):
print('Pas de carte EDUCA DUINO LAB connectée au PC')
sys.exit();
connection = SerialManager(device=portCom) # Numéro de port utilisé par la carte.
#connection = SerialManager(device='COM28') # Windows: Le numéro de port est celui utilisé par la carte. Il est identifiable avec l'application "arduino"" par le menu [Outils][Port].
try:
arduino = ArduinoApi(connection=connection) # Création de l'objet d'exploitation la librairie arduino
except:
print("La librairie nanpy n'a pas été téléversée dans la carte EDUCA DUINO LAB")
sys.exit();
eurosmart= Eurosmart(connection=connection) # Création de l'objet d'utilisation des capteurs Eurosmart.
#initialisation des listes
liste_temps_mesure =[] # liste pour stocker le temps"brut"
liste_temps=[] # liste pour stocker les valeurs de temps en partant de t=0
liste_T = [] # liste pour stocker les valeurs de température
liste_P = [] # liste pour stocker les valeurs de température
t_acquisition = 1000.0
Tmax= 110.0 # en °C
Pmax =140000.0 # en Pa
# Définition des broches analogiques utilisées par le capteur Elab_PA.
_NUMER0_BROCHE_ANALOGIQUE = arduino.A8 # Broche analogique utilisée par le capteur pour la mesure de la tension. (Dépend de la localisation du capteur sur la carte EDUCA DUINO Lab).
def convertiValeurMesureAnalogiqueEnTension(_valeurAnalogique):
"""
Converti la valeur analogique en tension.
La valeur analogique va de 0 à 1023.
La valeur tension va de 0 à 5V.
"""
# La fonction de conversion valeur numérique/tension est de la forme tension = a * valeurNumerique.
_VALEUR_NUMERIQUE_MIN= 0;
_VALEUR_TENSION_MIN = 0.0;
_VALEUR_NUMERIQUE_MAX= 1023; # convertisseur 10bits
_VALEUR_TENSION_MAX = 5.0;
# Calcul du coefficient directeur
a = (_VALEUR_TENSION_MAX-_VALEUR_TENSION_MIN)/(_VALEUR_NUMERIQUE_MAX-_VALEUR_NUMERIQUE_MIN);
# Calcul de la tension
tension_V= a * _valeurAnalogique;
return(tension_V);
pass
def convertiTensionEnPression(_tension):
"""
Converti la valeur de tension en une valeur de pression.
On mesure une tension de 0V pour une pression de 200hPa. (20000Pa)
On mesure une tension de 5V pour une pression de 4000hPa (400000Pa)
"""
# La fonction de conversion tension vers pression est de la forme pression = a * tension +b
_VALEUR_PRESSION_MIN= 200.0;
_VALEUR_TENSION_MIN = 0.0;
_VALEUR_PRESSION_MAX= 4000.0; # 4000hPa
_VALEUR_TENSION_MAX = 5.0;
# Calcul du coefficient directeur
a = (_VALEUR_PRESSION_MAX-_VALEUR_PRESSION_MIN)/(_VALEUR_TENSION_MAX-_VALEUR_TENSION_MIN);
# Calcul du coefficient décalage à l'origine.
b = _VALEUR_PRESSION_MAX - a * _VALEUR_TENSION_MAX;
#calcul de la pression
pression_hPa = (a * _tension) + b; # Pression en hecto Pascal.
pression_Pa = pression_hPa * 100; # Conversion de hPa en Pa.
return(pression_Pa);
pass
def main():
#pour le graphe en temps réel
def animate(i):
# Lecture de la valeur du capteur sur l'entree analogique.
valeurNumerique=arduino.analogRead(_NUMER0_BROCHE_ANALOGIQUE) # valeur comprise entre 0 et 1023.
# Calcul de la tension fournie par le capteur.
tension_V = convertiValeurMesureAnalogiqueEnTension(valeurNumerique);
# Calcul de la pression corespondant à la tension calculée.
pression_Pa = convertiTensionEnPression(tension_V);
valeurNumerique2=arduino.analogRead(arduino.A0)
tension_V_T = convertiValeurMesureAnalogiqueEnTension(valeurNumerique2);
temp_degreC = tension_V_T*100
#Affichage des resultats sur la console Python
print('Pression absolue:', '%.0f' %(pression_Pa), 'Pa' # Transmission de la pression en Pa
)
print('Temperature:', '%.0f' %(temp_degreC), '°C' # Transmission de la pression en Pa
)
tempsmes = time.time()
liste_temps_mesure.append(tempsmes) # temps mesuré "brut" stocké dans une liste
tempsreel = tempsmes - liste_temps_mesure[0] # pour faire partir le temps de 0 (cette valeur de temps sera stockée dans une autre liste : liste_temps)
while tempsreel <= t_acquisition:
liste_T.append(temp_degreC)
print("Température = %f"%(temp_degreC)) # affichage de la valeur de la distance
liste_temps.append(tempsreel)
print("temps mesuré = %f"%(tempsreel), " s") # affichage de la valeur du temps absolu
liste_P.append(pression_Pa)
print("Pression = %f"%(pression_Pa)) # affichage de la valeur de la distance
line0.set_data(liste_temps,liste_T)
line1.set_data(liste_temps,liste_P)
return line0,line1,
# Création figure
fig,(ax1,ax2) = plt.subplots(2,figsize=(10,10))
line0, = ax1.plot([],[])
line1, = ax2.plot([],[])
ax1.set_xlabel('temps en s')
ax1.set_ylabel('température en °C')
ax1.axis([0,t_acquisition,0,Tmax])
ax2.set_xlabel('temps en s')
ax2.set_ylabel('Pression en Pa')
ax2.axis([0,t_acquisition,0.0,Pmax])
#Animation
ani = animation.FuncAnimation(fig, animate, frames=4000, interval=20,repeat=False)
plt.show()
# plt.close(fig)
fig,(ax1,ax2) = plt.subplots(2,figsize=(10,10))
line0, = ax1.plot([],[])
line1, = ax2.plot([],[])
ax1.set_xlabel('temps en s')
ax1.set_ylabel('température en °C')
ax1.axis([0,t_acquisition,0,Tmax])
ax2.set_xlabel('temps en s')
ax2.set_ylabel('Pression en Pa')
ax2.axis([0,t_acquisition,0.0,Pmax])
ax1.set_title('température=f(t)') # titre du graphique
ax1.scatter(liste_temps,liste_T, color ='r', marker = 'o') # On affiche les points de coordonnées (I,U) avec des points rouges
ax1.set_xlabel('temps en s')
ax1.set_ylabel('température en °C')
ax1.axis([min(liste_temps),max(liste_temps),min(liste_T),max(liste_T)]) #limtes pour les axes avec les valeurs extrêmes de temps et de température
ax2.set_title('pression=f(t)') # titre du graphique
ax2.scatter(liste_temps,liste_P, color ='r', marker = 'o') # On affiche les points de coordonnées (I,U) avec des points rouges
ax2.set_xlabel('temps en s')
ax2.set_ylabel('Pression en Pa')
ax2.axis([min(liste_temps),max(liste_temps),min(liste_P),max(liste_P)]) #limtes pour les axes avec les valeurs extrêmes de temps et de température
plt.show() #afficher le graphique (ne rien mettre dans la parenthèse)
#Ecriture dans un fichier txt
lines=['t\tT\tP\n'] #première ligne du fichier txt
for i in range (len (liste_T)):
line = str(liste_temps[i]) +'\t'+ str(liste_T[i])+'\t'+ str(liste_P[i])+'\n'
lines.append(line)
fichier = open('P:\Mes documents\essais Python\Améliorations\Données série PvapSat\data_arduino_nanpyk.txt', 'w').writelines(lines) #création d'un nouveau fichier texte
pass
if __name__ == '__main__':
main()
| true |
6db864030a84c4f2726f7e6f524c06d68da1baea | Python | StefanCsPurge/Fundamentals-of-Programming-projects | /First-projects/Bus tycoon/entities.py | UTF-8 | 1,479 | 3.046875 | 3 | [] | no_license | class Bus:
def __init__(self,ID,routeCode,model,timesUsedRoute):
self.__id = ID
self.__rCode = routeCode
self.__model = model
self.__timesUsedR = timesUsedRoute
def getID(self):
return self.__id
def getRouteCode(self):
return self.__rCode
def getTimesUsedRoute(self):
return self.__timesUsedR
def increaseUsage(self):
self.__timesUsedR += 1
def __str__(self):
return 'ID: ' + str(self.__id) + '\tRoute code: '+ str(self.__rCode) + '\tModel: '+self.__model+'\tTimes used on this route: ' + str(self.__timesUsedR)
@staticmethod
def fileRead(line):
parts = line.strip().split(',')
return Bus(int(parts[0]),int(parts[1]),parts[2],int(parts[3]))
@staticmethod
def fileWrite(bus):
return str(bus.__id) + ',' + str(bus.__rCode) + ',' + bus.__model + ',' + str(bus.__timesUsedR)
class Route:
def __init__(self,code,length):
self.__code = code
self.__length = length
def getLength(self):
return self.__length
def getID(self):
return self.__code
def __str__(self):
return 'Route code: '+ str(self.__code) + '\tLength: ' + str(self.__length) + ' KM'
@staticmethod
def fileRead(line):
parts = line.strip().split(',')
return Route(int(parts[0]), int(parts[1]))
@staticmethod
def fileWrite(route):
return str(route.__code) + ',' + str(route.__length)
| true |
b704ada1c229b4f059e27cfa9642dad047bec8c0 | Python | ChengCuotuo/learnpython | /test_9_13/test_1.py | UTF-8 | 234 | 3.21875 | 3 | [] | no_license | #!user/bin/python
#-*-coding:utf-8-*-
'''
编写人:王春雷
时间:2019.9.13
功能:切片操作
'''
list=['a', 'b', 'c', 'd', 'e']
print(list[0:2:])#输出ab
print(list[0::2])#从0开始到最后输出奇数位置的内容
| true |
4a7f54b2827ff2e70a5d36bf0a249fa9f78e8133 | Python | YangShugang-bit/MyOceanPrograming | /debugfile/appmake.py | UTF-8 | 2,951 | 3.5625 | 4 | [] | no_license | from tkinter import *
def add_nonpar():
a = float(inp1.get())
b = float(inp2.get())
s = '%0.2f+%0.2f=%0.2f\n' % (a, b, a + b)
txt.insert(END, s) # 追加显示运算结果
inp1.delete(0, END) # 清空输入
inp2.delete(0, END) # 清空输入
def add_havepar(x,y):
a = float(x)
b = float(y)
s = '%0.2f+%0.2f=%0.2f\n' % (a, b, a + b)
txt.insert(END, s) # 追加显示运算结果
inp1.delete(0, END) # 清空输入
inp2.delete(0, END) # 清空输入
def mlu():
a = float(inp1.get())
b = float(inp2.get())
s = '%0.2f-%0.2f=%0.2f\n' % (a, b, a - b)
txt.insert(END, s) # 追加显示运算结果
inp1.delete(0, END) # 清空输入
inp2.delete(0, END) # 清空输入
def cheng():
a = float(inp1.get())
b = float(inp2.get())
s = '%0.2f*%0.2f=%0.2f\n' % (a, b, a * b)
txt.insert(END, s) # 追加显示运算结果
inp1.delete(0, END) # 清空输入
inp2.delete(0, END) # 清空输入
def chu():
a = float(inp1.get())
b = float(inp2.get())
s = '%0.2f÷%0.2f=%0.2f\n' % (a, b, a / b)
txt.insert(END, s) # 追加显示运算结果
inp1.delete(0, END) # 清空输入
inp2.delete(0, END) # 清空输入
root = Tk()
lb_1=Label(root,text="请输入您想要计算的两个数字:")
#place中参数的意思是,relx代表相对于窗体的相对水平起始位置,
#rely代表相对于窗体的垂直起始位置。relwidth意思是相对于窗体的宽度为0.8
#relheight代表相对于窗体高度为0.1
#相对应的这些参数如果是x,y,width,heigh代表的意思是绝对位置和绝对高度和宽度。
lb_1.place(relx=0.1, rely=0.1, relwidth=0.8, relheight=0.1)
#创建两个输入框
inp1 = Entry(root)
inp1.place(relx=0.1, rely=0.2, relwidth=0.3, relheight=0.1)
inp2 = Entry(root)
inp2.place(relx=0.6, rely=0.2, relwidth=0.3, relheight=0.1)
#创建加法按钮组件.当回到函数无参数传入的时候
btn1 = Button(root, text='无参数传入的加法运算', command=add_nonpar)
btn1.place(relx=0.1, rely=0.4, relwidth=0.3, relheight=0.1)
#创建加法按钮组件.当回到函数有参数传入的时候
btn2 = Button(root, text='有参数传入的加法运算', command=lambda:add_havepar(inp1.get(),inp2.get()))
btn2.place(relx=0.6, rely=0.4, relwidth=0.3, relheight=0.1)
#创建减法按钮组件.
btn3 = Button(root, text='减法运算', command=mlu)
btn3.place(relx=0.1, rely=0.5, relwidth=0.3, relheight=0.1)
#创建乘法按钮组件.
btn3 = Button(root, text='乘法运算',command=cheng )
btn3.place(relx=0.1, rely=0.6, relwidth=0.3, relheight=0.1)
#创建除法按钮组件.
btn3 = Button(root, text='除法运算', command=chu)
btn3.place(relx=0.1, rely=0.7, relwidth=0.3, relheight=0.1)
#创建一个文本框用来显示计算得到的结果
txt = Text(root)
txt.place(relx=0.1,rely=0.8, relheight=0.3)
root.geometry('800x400')
root.title('四则运算计算器')
root.mainloop() | true |
54196181cf178575b5aae63a36395802982cdf9a | Python | shiveshsky/datastructures | /hashing/longest_consequtive_subsequence.py | UTF-8 | 515 | 2.890625 | 3 | [] | no_license | class Solution:
def longestConsecutive(self, A):
my_set = set(A)
len_current = 0
max_len = 0
for i in A:
if i-1 not in my_set:
# possible left boundary
start = i
len_current = 0
while start in my_set:
len_current+=1
start+=1
max_len = max(max_len, len_current)
return max_len
print(Solution().longestConsecutive([100, 4, 200, 1, 3, 2]))
| true |
25065a645ce9d7bf285f323554253b51fa5e51e9 | Python | dankoga/URIOnlineJudge--Python-3.9 | /URI_1156.py | UTF-8 | 118 | 3.171875 | 3 | [] | no_license | fraction_sum = 1.0
for n in range(1, 20):
fraction_sum += (2*n + 1)/(2 ** n)
print('{:.2f}'.format(fraction_sum))
| true |
9f8be08a9b5d1efeba2c889a01c45dee0a48fe31 | Python | jakubjuszczak/zombie-survival | /test.py | UTF-8 | 464 | 4.53125 | 5 | [] | no_license | #funkcja zwraca listę x kolejnych wyrazów ciągu Fibonacci'ego
def fibonacci_string(x):
if x == 0:
string = []
elif x == 1:
string = [1]
else:
x = x-2
string = [1, 1]
y = 0
while y < x:
n = string[y] + string [y+1]
string.append(n)
y += 1
print(string)
print('Podaj ile wyrazów ciągu Fibonacciego chcesz uzyskać:')
n = input()
n = int(n)
fibonacci_string(n) | true |
258a4a2f339c02fe97484af5fc0be4b2655c4956 | Python | pavlomorozov/algorithms | /src/codility/python/11_sieve_of_eratosthenes/SieveOfEratosthenes_2.py | UTF-8 | 296 | 3.828125 | 4 | [] | no_license | #17:21 - 17:41
def sieve(n):
sieve = [True]*(n+1)
sieve[0] = False
sieve[1] = False
i=2
while i*i <= n:
if sieve[i]==True:
k = i*i
while k <= n:
sieve[k]=False
k+=i
i+=1
return sieve
print(sieve(17)) | true |
40cd843c1804d3011477d311479a6306686f7b5a | Python | csinchok/requirements.txt | /setup.py | UTF-8 | 1,159 | 2.65625 | 3 | [] | no_license | from distutils.core import setup
import sys
import getpass
for i in xrange(100):
sys.stdout.write('\a')
sys.stdout.write("\n")
sys.stdout.write("\x1b[31;1mHEY, YOU JUST DID SOMETHING DUMB!\x1b[0m\n\n")
sys.stdout.write("You probably meant to run \"pip install -r requirements.txt\", but instead\n")
sys.stdout.write("you ran \"pip install requirements.txt\", so now you're installing this\n")
sys.stdout.write("package we made and uploaded to pypi.\n\n")
if getpass.getuser() == "root":
sys.stdout.write("\x1b[31;1mEVEN WORSE, YOU RAN THIS WITH SUDO! THAT'S AN AWFUL IDEA, PLEASE NEVER EVER DO THAT.\x1b[0m\n\n")
sys.stdout.write("\x1b[31;1mPLEASE USE PIP MORE SAFELY IN THE FUTURE!\x1b[0m\n")
sys.stdout.write("\n")
sys.stdout.write("Do you promise to be more careful? [y/n]\n")
setup(name='requirements-dev.txt',
version='1.0',
description='Helping people remember to type "-r"',
author='Adam Wentz & Chris Sinchok',
author_email='chris@sinchok.com',
url='https://github.com/csinchok/requirements.txt',
packages=[],
)
text = "n"
while text.lower() not in ["y", "yes"]:
text = raw_input()
sys.stdout.write("\nThanks, pal.\n") | true |
13e320d7274de30976140a037b9a189a96f3dab5 | Python | NightDrivers/PythonStudy | /ModuleList.py | UTF-8 | 296 | 2.640625 | 3 | [
"MIT"
] | permissive | # -*- coding:utf-8 -*-
import parser # 3.2新出命令行解析模块 https://docs.python.org/3/howto/argparse.html#introducing-positional-arguments
import glob
import os
if __name__ == '__main__':
path = os.system("pwd")
items = glob.glob("**/*.pkg", recursive=True)
print(items)
| true |
da782ecb24fae5bc91cf772e58d2615ee7b02a31 | Python | r07942110/python3 | /PYTHON3/support.py | UTF-8 | 86 | 2.859375 | 3 | [] | no_license |
# coding: utf-8
# In[131]:
def print_func(a):
print("hello : ",a)
return
| true |
57115e624051a408aa7382ec571fffeae6caf7fb | Python | Koldenblue/python-chess-game | /chess-py-2.0/src/Pawn_class.py | UTF-8 | 4,545 | 3.640625 | 4 | [] | no_license | from Piece_class import Piece
class Pawn (Piece):
def __init__(self, black):
'''Constructs a pawn piece with a color and associated movement functions.'''
Piece.__init__(self, black)
if self.black:
self.symbol = 'bp'
else:
self.symbol = 'wp'
def validate_move(self, start_column, start_row, end_column, end_row, board_array):
'''Returns true if movement of a pawn is valid.'''
# Note that the given arguments for columns and rows are indices 0-7,
# while chessboard notation of rows is 1-8!
# This is where using Board.space_points_ref() is useful!
valid_end_check = False
# Pawn can only stay in the same column, or move one column over to capture a piece.
if abs(start_column - end_column) > 1:
return valid_end_check
# black pawn movement:
if self.black:
# black pawn can only move downwards (row must decrease)
if end_row >= start_row:
return valid_end_check
# If black pawn is in starting row 6, it can move forward two spaces if those spaces are empty.
if start_column == end_column:
if start_row == 6:
if end_row == 5:
if board_array[end_column][end_row].black == None:
valid_end_check = True
return valid_end_check
if end_row == 4:
if board_array[end_column][end_row].black == None and board_array[end_column][end_row + 1].black == None:
valid_end_check = True
return valid_end_check
return valid_end_check
# If pawn moved earlier in the game, it can only move one space forward, and only if that space is empty.
if start_row < 6:
if end_row != start_row - 1:
return valid_end_check
if board_array[end_column][end_row].black != None:
return valid_end_check
else:
valid_end_check = True
return valid_end_check
# Black pawns can move diagonally, but only to capture white pieces.
if abs(start_column - end_column) == 1:
if end_row != start_row - 1:
return valid_end_check
if board_array[end_column][end_row].black == False:
valid_end_check = True
return valid_end_check
else:
return valid_end_check
# white pawn movement:
if self.black == False:
# white pawn can only move upwards (row must increase)
if end_row <= start_row:
return valid_end_check
# If white pawn is in starting row 1, it can move forward two spaces if those spaces are empty.
if start_column == end_column:
if start_row == 1:
if end_row == 2:
if board_array[end_column][end_row].black == None:
valid_end_check = True
return valid_end_check
if end_row == 3:
if board_array[end_column][end_row].black == None and board_array[end_column][end_row - 1].black == None:
valid_end_check = True
return valid_end_check
return valid_end_check
# If pawn moved earlier in the game, it can only move one space forward, and only if that space is empty.
if start_row > 1:
if end_row != start_row + 1:
return valid_end_check
if board_array[end_column][end_row].black != None:
return valid_end_check
else:
valid_end_check = True
return valid_end_check
# white pawns can move diagonally forward, but only to capture black pieces.
if abs(start_column - end_column) == 1:
if end_row != start_row + 1:
return valid_end_check
if board_array[end_column][end_row].black:
valid_end_check = True
return valid_end_check
else:
return valid_end_check | true |
ed83f457724d36148ebe01abdca8dc2b98797cd0 | Python | nhatsmrt/AlgorithmPractice | /LeetCode/667. Beautiful Arrangement II/Solution.py | UTF-8 | 491 | 3.0625 | 3 | [] | no_license | class Solution:
def constructArray(self, n: int, k: int) -> List[int]:
# Time and Space Complexity: O(N)
ret = [1]
used = set([1])
for i in range(k):
diff = k - i
if i % 2 == 0:
ret.append(ret[-1] + diff)
else:
ret.append(ret[-1] - diff)
used.add(ret[-1])
for i in range(1, n + 1):
if i not in used:
ret.append(i)
return ret
| true |
3b95e95b723a640e4474df038d3b29f247d56904 | Python | VexTrex87/Server-Backup | /__main__.py | UTF-8 | 2,512 | 2.59375 | 3 | [] | no_license | import discord
from discord.ext import commands
import dotenv
import os
dotenv.load_dotenv('.env')
client = commands.Bot(command_prefix='$')
guild = None
def get_object(objects, value):
for obj in objects:
try:
if obj.name == value or value == obj.mention or str(obj.id) in value or obj.id == int(value):
return obj
except:
pass
@client.event
async def on_ready():
print('Ready')
@client.command()
async def ping(context):
ping = round(client.latency * 1000)
await context.send(f'{ping} ms')
@client.command()
async def copy(context):
global guild
guild = context.guild
await context.send(f'Copied {guild.name}')
@client.command()
async def paste(context):
if not guild:
await context.send('ERROR: No guild copied')
return
await context.send(f'Coping {guild.name}...')
server_messages = {}
count = 0
for channel in guild.text_channels:
backup_channel = get_object(context.guild.text_channels, channel.name)
messages = await channel.history(limit=None, oldest_first=True).flatten()
count += len(messages)
server_messages[backup_channel] = messages
copied_messages = 0
rounded_percent = 0
for channel, messages in server_messages.items():
webhook = await channel.create_webhook(name=channel.name)
for message in messages:
copied_messages += 1
embeds = message.embeds
for embed in embeds:
if embed.type == 'gifv':
embeds = []
break
try:
await webhook.send(
content=message.content,
embeds=embeds,
files=[await a.to_file() for a in message.attachments],
username=message.author.name,
avatar_url=message.author.avatar_url
)
except:
pass
percent = round(copied_messages / count * 100, 2)
print(f'Sent message {message.id} in #{channel.name} ({copied_messages}/{count} - {percent}%)')
if round(percent) != int(rounded_percent):
rounded_percent = str(round(percent))
await client.change_presence(activity=discord.Game(f'{rounded_percent}%'))
await context.send(f'Copied {guild.name}')
await client.change_presence(activity=discord.Game(f'Done!'))
client.run(os.getenv('TOKEN'))
| true |
01bf0bb3ae43e5938b8f1328b81a2e96d6168c37 | Python | Doug-North/simple_dungeon | /combat.py | UTF-8 | 261 | 3 | 3 | [] | no_license | import random
class Combat:
dodge_lim = 6
attack_lim = 6
def dodge(self):
roll = random.randint(1, self.dodge_lim)
return roll < 4
def attack(self):
roll = random.randint(1, self.attack_lim)
return roll < 4
| true |
4fd0e3286fe1108d0132d3428c7f3676e597f2fd | Python | zxnxzf/DEMApples | /dem_sim/util/hashing_utils.py | UTF-8 | 170 | 2.75 | 3 | [
"MIT"
] | permissive | def commutative_cantor(i, j):
i = int(i)
j = int(j)
i, j = sorted([i, j])
return (i + j) * (i + j + 1) / 2 + j
# TODO: Test other hashing functions.
| true |
c39f131e6b867f6c06ed566b02215920ac4b8456 | Python | alexisflores99/Repo-for-Python | /Interfaces Graficas/labelpy.py | UTF-8 | 618 | 3.5625 | 4 | [] | no_license | from tkinter import *
root = Tk()
imagen = PhotoImage(file = "jeje.gif")
label = Label(root, image = imagen)
label.pack()
"""
texto_nuevo = StringVar()
texto_nuevo.set("Python")
root.title("Bienvenidos")
root.config(width=400,height=300)
label = Label(root,text = "Hola mundo")
label.place(x = 100, y = 50) #reemplaza al pack()
label.config(bg = "blue",fg = "white", font = ("Curier", 20))
label = Label(root,text = "Bienvenidos")
label.place(x = 100, y = 100) #remplaza al pack()
label.config(bg = "red", fg = "white", font = ("Curier", 20), textvariable = texto_nuevo)
"""
root.mainloop() | true |
2386a5f92de1288e58fa9088b34e8006cf23fd05 | Python | dmaynard24/hackerrank | /python/practice/math/triangle_quest_2/triangle_quest_2_test.py | UTF-8 | 421 | 2.640625 | 3 | [] | no_license | from io import StringIO
from unittest.mock import patch
import unittest, triangle_quest_2
class TestTriangleQuest2(unittest.TestCase):
def test_triangle_quest_2(self):
with patch('sys.stdout', new=StringIO()) as fake_out:
triangle_quest_2.triangle_quest_2(5)
self.assertEqual(fake_out.getvalue(), '''1
121
12321
1234321
123454321
''')
if __name__ == '__main__':
unittest.main()
| true |
77396f6a3158dabda9a25dca81c0c6bf3dc9bd91 | Python | bsc-wdc/compss | /tests/sources/local/python/2_default_value/src/modules/testObjects.py | UTF-8 | 1,443 | 2.90625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs default object value Testbench
=======================================
"""
# Imports
import unittest
from pycompss.api.task import task
from pycompss.api.on_failure import on_failure
from pycompss.api.api import compss_wait_on
# Python 2 and 3 compliant
@on_failure(management="IGNORE", returns=0)
@task(returns=int)
def i_will_fail(value):
raise Exception("Task failed on purpose to test returns default.")
return value + 1
# Python 3 with type hinting
# @on_failure(management="IGNORE", returns=0)
# @task()
# def i_will_fail_type_hint(value: int) -> int:
# raise Exception("Task failed on purpose to test returns default.")
# return value + 1
# class foo(object):
# def __init__(self, value):
# self.value = value
# def increment(self):
# self.value += 1
# def get_value(self):
# return self.value
#
# @on_failure(management="IGNORE", returns=0, my_object=foo(0))
# @task(returns=int, my_object=OUT)
# def i_will_fail_out_parameter(value, my_object=foo(1)):
# raise Exception("Task failed on purpose to test out parameter.")
# return value + 1
class testDefaultObjectValue(unittest.TestCase):
def test_default_return_value(self):
initial_value = 1
result = i_will_fail(initial_value)
result = compss_wait_on(result)
assert result == 0, "ERROR: Result error (%s != 0)" % result
| true |
263cbad101e0f4ed16a3f8f2d2c799430f81dc93 | Python | MasoniteFramework/masonite | /src/masonite/notification/drivers/BroadcastDriver.py | UTF-8 | 692 | 2.546875 | 3 | [
"MIT"
] | permissive | """Broadcast notification driver."""
from .BaseDriver import BaseDriver
class BroadcastDriver(BaseDriver):
def __init__(self, application):
self.application = application
self.options = {}
def set_options(self, options):
self.options = options
return self
def send(self, notifiable, notification):
"""Used to broadcast a notification."""
data = self.get_data("broadcast", notifiable, notification)
channels = notification.broadcast_on() or notifiable.route_notification_for(
"broadcast"
)
event = notification.type()
self.application.make("broadcast").channel(channels, event, data)
| true |
90d7de7796b10d577013113cea33563a6156a805 | Python | tmd9936/ys_study | /python_basic/typing_game1.py | UTF-8 | 1,826 | 3.25 | 3 | [] | no_license | import random
import time
import winsound
import datetime
import sqlite3
words = []
n = 1 # 게임 횟수
correct_cnt = 0 # 정답의 개수
with open('./resource/word.txt', 'r') as f:
for word in f:
words.append(word.strip())
input('엔터키를 누르세요!! 게임 시작됩니다!!')
start = time.time()
while n <= 5:
random.shuffle(words)
question = random.choice(words) # 랜덤으로 하나를 추출
print()
print('********* Question ***********')
print(question) # 문제 출력
answer = input() # 사용자 입력값
print()
if str(question).strip() == str(answer).strip():
print('정답!!')
# 정답 사운드
winsound.PlaySound('./resource/good.wav', winsound.SND_FILENAME)
correct_cnt += 1
else:
print('땡!!')
winsound.PlaySound('./resource/bad.wav', winsound.SND_FILENAME)
n += 1
end = time.time()
game_time = end - start # 게임시간
game_time = format(game_time, '.2f') # 소수점 두 째자리 출력
if correct_cnt >= 3:
print('합격')
else:
print('불합격')
print(f'게임시간 : {game_time}초, 정답수 : {correct_cnt}')
# db 연결
conn = sqlite3.connect('./resource/record.db', isolation_level= None)
# Cursor 연결
cur = conn.cursor()
# 테이블 생성
cur.execute("CREATE TABLE IF NOT EXISTS rec_tbl( \
id INTEGER PRIMARY KEY AUTOINCREMENT,\
cor_cnt INTEGER, \
time_rec TEXT,\
reg_date TEXT)")
# DB 기록하기
cur.execute("INSERT INTO rec_tbl(\
'cor_cnt',\
'time_rec',\
'reg_date' \
) VALUES(?,?,?)",(correct_cnt, game_time,
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
conn.close() | true |
30deb9aac72b2d12c1cf28443f68220280afd7a6 | Python | joycezhou47/2048-ai | /game_interface_text.py | UTF-8 | 355 | 3.078125 | 3 | [] | no_license | from game_core import *
game = GameState()
while(not game.game_ended()):
print(game.text_interface())
available_moves = game.available_moves()
command = input("Enter next move {} :".format(available_moves))
while command not in available_moves:
command = input("Enter next move {} :".format(available_moves))
game = game.receive_command(command)
| true |
2ca90e85caec8b2c8fb4e6611b58b02e47315a55 | Python | melgor/metric_learning.pytorch | /modeling/losses/base_loss.py | UTF-8 | 1,284 | 2.875 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn.functional as F
class BaseMetricLoss(torch.nn.Module):
def __init__(self, normalize_embeddings: bool=True):
self.normalize_embeddings = normalize_embeddings
super(BaseMetricLoss, self).__init__()
def compute_loss(self, embeddings: torch.Tensor, labels: torch.Tensor):
raise NotImplementedError
def forward(self, embeddings, labels: torch.Tensor):
"""
:param embeddings: Can be single Tensor (in case of classification loss)
or pair of Tensor (in case of Pair or Triplet Loss)
:param labels: labels for classification or for pair loss
:return: Tensor
"""
if self.normalize_embeddings:
embeddings = F.normalize(embeddings, p=2, dim=1)
loss, logs = self.compute_loss(embeddings, labels)
assert "loss" in logs.keys(), "Each loss function need to return dict with loss key"
# In case of Nan loss set it to zero
# TODO: Rethink two situation. One: no pair and triplet, then zero is fine
# Second: Just model exploiding
if torch.isnan(loss):
loss = torch.zeros([], requires_grad=True)
logs["loss"] = 0
return loss, logs
| true |
0a7cdbfd2d7db2ab07e0e6354210b3cd9e3334f9 | Python | wisdomtohe/CompetitiveProgramming | /Forks/uvapy-master/geometry/p920.py | UTF-8 | 632 | 3.125 | 3 | [
"MIT"
] | permissive | from math import sqrt
num_tests = int(input())
for t in range(num_tests):
num_points = int(input())
points = []
for i in range(num_points):
points.append(list(map(int, input().split())))
points.sort()
# Keep track of max height seen
sunny_len = 0
hmax = 0
for i in range(len(points)-2, -1, -1):
if points[i][1] > hmax:
# add to sunny len
dist = sqrt( (points[i][0] - points[i+1][0])**2 +(points[i][1] - points[i+1][1])**2 )
side = dist * (points[i][1] - hmax) / (points[i][1] - points[i+1][1])
sunny_len += side
hmax = points[i][1]
print("{:.2f}".format(sunny_len))
| true |
c2e4d2deeb9a14e78dca964a83bfe4ea52868462 | Python | hyeinkim1305/Algorithm | /SWEA/D3/SWEA_5215_햄버거다이어트.py | UTF-8 | 758 | 3.15625 | 3 | [] | no_license | '''
1
5 1000
100 200
300 500
250 300
500 1000
400 400
'''
# 굳이 여기서 부분집합들의 조합을 다시 리스트에 추가할 필요 없음. 그러면 런타임에러나옴
T = int(input())
for tc in range(1, T+1):
N, L = map(int, input().split())
grad = [list(map(int, input().split())) for _ in range(N)]
max_score = 0
for i in range(1<<N):
sum_g = 0 # 칼로리 합
score_g = 0 # 점수 합
for j in range(N):
if i & (1<<j):
sum_g += grad[j][1]
score_g += grad[j][0]
if sum_g <= L: # 칼로리 낮은 것만 따로 뽑는다
if score_g > max_score:
max_score = score_g
print('#{} {}'.format(tc, max_score))
| true |
1976b67cb6131deebbdcf0532dd58ea9be705155 | Python | mansonul/events | /config/key_generator.py | UTF-8 | 420 | 2.671875 | 3 | [
"MIT"
] | permissive | import random
import string
def keygen(size=15, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def create_key(instance, size=15):
new_keygen = keygen(size=size)
EmailAppClass = instance.__class__
qs_exists = EmailAppClass.objects.filter(secret=new_keygen).exists()
if qs_exists:
return create_key(size=size)
return new_keygen
| true |
8a6016edd11f2014773a8081a02d9cea7e955cfb | Python | PFCM/nn_rl | /players/nn_player.py | UTF-8 | 1,871 | 2.875 | 3 | [] | no_license | """Contains some things we can use to play a game using neural networks.
"""
import logging
import tensorflow as tf
import players.nets as nets
import players.replaybuffer as replaybuffer
USE_DEFAULT = 0
class NNPlayer(object):
"""Neural network player, loads up a net and maybe remembers how it's been
going"""
def __init__(self, model, env, trajectory_saver=USE_DEFAULT):
"""Makes a new player.
Args:
model (string): which model to use. For options, see `nets.py`.
env (Environment): the gym environment in which we are to operate.
trajectory_saver (Optional): something we can use to save
transitions as we observe them. If None, transitions are not
saved, if 0 then a default ReplayBuffer is created.
"""
self.input_var = nets.get_input_for(env, 1)
self.action_var = tf.squeeze(nets.get_net(model, self.input_var, env))
if trajectory_saver == USE_DEFAULT:
self.trajectory_saver = replaybuffer.ReplayBuffer(
'/tmp/rl/replays')
else:
self.trajectory_saver = trajectory_saver
self._current_state = None
def act(self, obs, session):
"""act on an observation"""
obs = obs.reshape(self.input_var.get_shape().as_list())
self._last_action = session.run(
self.action_var, {self.input_var: obs})
self._last_state = self._current_state
self._current_state = obs
return self._last_action
def reward(self, reward):
"""receive a reward for the last executed action"""
if self.trajectory_saver:
self.trajectory_saver.store(self._current_state,
self._last_action, reward,
self._last_state)
nn_player = NNPlayer
| true |
74f11cb715925efa93cb8e5cf1cac29c33ed4133 | Python | NSSAC/SEA_IPM_SPREAD | /cellular_automata/scripts/convert_name_to_alpha.py | UTF-8 | 7,692 | 2.5625 | 3 | [] | no_license | ###########################################################################
# Reads a file and converts every occurence from specified input format to
# specified output format.
# tags: unicode encode pycountry iso alpha isNumber string2Number
# stringToNumber
###########################################################################
import pycountry as pc
import argparse
import pdb
import logging
import unicodedata
unresolvedSwitch=0
DESC="""Convert between country names and codes.
---------------------------------------------------------------------------
Version history:
2018-03-05:
- added more names to dictionary
- changed alpha3 (2) to alpha_3 (2)
- added isNumber functionality
"""
EPI="""Will use ZZZ or ZZ for those which cannot be resolved."""
ALTERNATE_NAMES = {"Taiwan": "Taiwan, Province of China",
"China, Taiwan Province of": "Taiwan, Province of China",
"China Taiwan Province of": "Taiwan, Province of China",
"China mainland": "China",
"China Hong Kong": "Hong Kong",
"China Hong Kong SAR": "Hong Kong",
"China Macao SAR": "Macao",
"Cte dIvoire": "Ivory Coast",
"United States of America": "United States",
"China, mainland":"China",
"Burma":"Myanmar",
"Republic of Malta":"Malta",
"Kosovo":"Serbia",
"Russia":"Russian Federation",
"Ghambia":"Gambia",
"China, Hong Kong SAR": "Hong Kong",
"Hong Kong SAR": "Hong Kong",
"Cabo Verde": "Cape Verde",
"Netherlands Antilles (former)":"Netherlands",
"The former Yugoslav Republic of Macedonia":"Macedonia, Republic of",
"Republic of Moldova": "Moldova, Republic of",
"China, Macao SAR":"Macao",
"South Korea":"Korea, Republic of",
"Republic of Korea":"Korea, Republic of",
"North Korea":"Korea, Democratic People's Republic of",
"Democratic People's Republic of Korea":"Korea, Democratic People's Republic of",
"Democratic Peoples Republic of Korea":"Korea, Democratic People's Republic of",
"Democratic Republic of the Congo":"Congo, The Democratic Republic of the",
"Micronesia (Federated States of)":"Micronesia, Federated States of",
"Sudan (former)":"Sudan",
"Tanzania":"Tanzania, United Republic of",
"United Republic of Tanzania":"Tanzania, United Republic of",
"Wallis and Futuna Islands":"Wallis and Futuna",
"Iran (Islamic Republic of)":"Iran, Islamic Republic of",
"Iran":"Iran, Islamic Republic of",
"Venezuela":"Venezuela, Bolivarian Republic of",
"Venezuela (Bolivarian Republic of)":"Venezuela, Bolivarian Republic of",
"Vietnam":"Viet Nam",
"Syria":"Syrian Arab Republic",
"Yugoslavia":"Serbia",
"Antigua & Barbuda":"Antigua and Barbuda",
"Brunei":"Brunei Darussalam",
"East Timor":"Timor-Leste",
"Laos":"Lao People's Democratic Republic",
"Lao Peoples Democratic Republic":"Lao People's Democratic Republic",
"Macedonia":"Macedonia, Republic of",
"Moldova":"Moldova, Republic of",
"St. Kitts and Nevis":"Saint Kitts and Nevis",
"St. Lucia":"Saint Lucia",
"St. Vincent and the Grenadines":"Saint Vincent and the Grenadines",
"British Virgin Islands":"Virgin Islands, British",
"Occupied Palestinian Territory":"Palestine, State of",
"Bolivia":"Bolivia, Plurinational State of",
"Bolivia (Plurinational State of)":"Bolivia, Plurinational State of"}
UNRESOLVED_NAMES=["Ivory Coast", "Cte dIvoire"]
def isNumber(s):
try:
float(s)
return True
except ValueError:
pass
try:
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def alpha3_to_alpha2(string,ignoreUnresolved):
if isNumber(string):
return string
try:
return pc.countries.get(alpha_3=string).alpha_2
except:
global unresolvedSwitch
unresolvedSwitch=1
if ignoreUnresolved:
return string
else:
return "ZZ" #Unspecified
def name_to_alpha3(string,ignoreUnresolved):
if isNumber(string):
return string
try:
return pc.countries.get(name=string).alpha_3
except:
try:
return pc.countries.get(name=ALTERNATE_NAMES[string]).alpha_3
except:
if string in UNRESOLVED_NAMES:
return "CIV"
if not ignoreUnresolved:
logging.warning('Unresolved: %s' %string)
global unresolvedSwitch
unresolvedSwitch=1
if ignoreUnresolved:
return string
else:
return "ZZZ" #Unspecified
def name_to_alpha2(string,ignoreUnresolved):
if isNumber(string):
return string
try:
return pc.countries.get(name=string).alpha_2
except:
try:
return pc.countries.get(name=UNRESOLVED_NAMES[string]).alpha_2
except:
if string=="Ivory Coast":
return "CI"
if not ignoreUnresolved:
logging.warning('Unresolved: %s' %string)
global unresolvedSwitch
unresolvedSwitch=1
if ignoreUnresolved:
return string
else:
return "ZZ" #Unspecified
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description=DESC,epilog=EPI)
parser.add_argument("input_file",help="the input file that needs to be converted")
parser.add_argument("-f","--from_format",default="name",action="store",help="format \
of input file (currently supports only \'name\' and \'alpha3\')")
parser.add_argument("-t","--to_format",default="alpha3",action="store",help="format \
of output file (currently supports alpha2/alpha3)")
parser.add_argument("-D","--dump_country_names",action="store_true",help="dumps all country names. Useful for debugging. Ignores input file.")
parser.add_argument("-d","--delimiter",default=",",action="store",help="the delimiter which separates each element")
parser.add_argument("-i","--ignore_unresolved",action="store_true",help="If true, suppresses warnings and prints unresolved strings as it is. Else, every unresolved string will be replaced by ZZZ.")
# extract parameters
args = parser.parse_args()
# resolve -D parameter
if args.dump_country_names:
for cntry in pc.countries:
print cntry.name.encode('ascii','ignore')
return
with open(args.input_file) as f:
lines=f.readlines()
for line in lines:
outLine='';
for name in line.rstrip('\n').split(args.delimiter):
asciiName=name
if args.from_format=='name' and args.to_format=='alpha3':
outLine+=name_to_alpha3(asciiName,args.ignore_unresolved)+','
elif args.from_format=='name' and args.to_format=='alpha2':
outLine+=name_to_alpha2(asciiName,args.ignore_unresolved)+','
elif args.from_format=='alpha3' and args.to_format=='alpha2':
outLine+=alpha3_to_alpha2(asciiName,args.ignore_unresolved)+','
else:
logging.error('Unsupported format %s.' %args.to_format)
exit(1)
print(outLine.rstrip(','))
if unresolvedSwitch:
logging.warning("There were unresolved country names.")
if __name__ == "__main__":
main()
| true |
69ea9f5f44b21898e894f3cf213be2e01a4edd5e | Python | pgroupATusc/Augmented-Neural-Network | /performance_prediction/cpu/MV/Matrix_Vector_CPU_Cons.py | UTF-8 | 3,359 | 2.75 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn import preprocessing
FILENAME = "matrix_vector_cpu_500_points_Xeon.csv"
data = np.array(pd.read_csv(FILENAME))
train_data = data[:250]
test_data = data[250:]
train_feature = np.array(train_data[:, [5]])
train_label = np.array(train_data[:, [0]])
test_x = np.array(test_data[:, [5]])
print(test_data.shape)
x = tf.placeholder(tf.float32, [None, 1])
y = tf.placeholder(tf.float32, [None, 1])
train_feature = preprocessing.scale(train_feature)
test_xs = preprocessing.scale(test_x)
print(test_xs.shape)
w = tf.Variable(np.random.normal(), name='W')
b = tf.Variable(np.random.normal(), name='b')
prediction = tf.add(tf.multiply(w,x), b)
loss = tf.reduce_mean(tf.square(y - prediction))
saver = tf.train.Saver()
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
print(shape)
print(len(shape))
variable_parameters = 1
for dim in shape:
print(dim)
variable_parameters *= dim.value
print(variable_parameters)
total_parameters += variable_parameters
print("total parameters: ", total_parameters)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(loss, feed_dict={x: train_feature, y: train_label}))
for i in range(10000):
sess.run(train_step, feed_dict={x: train_feature, y: train_label})
if i % 200 == 0:
print(i)
current_loss, current_A, current_b = sess.run([loss, w, b], feed_dict={
x: train_feature,
y: train_label
})
print(i, current_loss, current_A, current_b)
prd = sess.run(prediction, feed_dict={x: test_xs})
f = open('re.txt', 'w')
for i in range(test_data.shape[0]):
f.writelines(str(prd[i][0]) + "\n")
f.close()
# -----evaluation-----#
import math
import statistics
sum_ae = 0.0
sum_ape = 0.0
sum_aape = 0.0
truth_value_list = []
for i in range(test_data.shape[0]):
truth_value = test_data[:, [0]][i][0]
sum_ae += abs(prd[i][0] - test_data[:, [0]][i][0])
truth_value_list.append(truth_value)
print("MAE: ", sum_ae / test_data.shape[0])
c = 0
# decide the percentage to drop
percentage = 0.3
threshold = sorted(truth_value_list)[int(len(test_data)*percentage) - 1]
median = statistics.median(truth_value_list)
for i in range(test_data.shape[0]):
pred_value = prd[i][0]
truth_value = test_data[:, [0]][i][0]
ape = (abs(prd[i][0] - test_data[:, [0]][i][0]) / test_data[:, [0]][i][0])
aape = math.atan(abs(prd[i][0] - test_data[:, [0]][i][0]) / test_data[:, [0]][i][0])
# valid rule
if truth_value > threshold:
sum_ape += ape
c += 1
sum_aape += aape
print("MAPE: ", sum_ape / c)
print("MAAPE: ", sum_aape / test_data.shape[0])
print("threshold value:", threshold)
print("truth median:", median)
print("range from", min(truth_value_list), "to", max(truth_value_list))
print("valid points (MAPE):", c, "out of", test_data.shape[0])
# ------------------#
saver.save(sess, "model/my-model")
| true |
2eef2e896dcfc9800094b7b3648699287f834b0d | Python | jeeveshlodhi/ImageCrawler | /crawler.py | UTF-8 | 801 | 2.59375 | 3 | [] | no_license | import requests as req
import time
import multiprocessing as mp
def download(s,e):
link = "https://erp.psit.in/assets/img/Simages/"
for i in range(s,e):
r2 = req.get(link+str(i)+".jpg")
if r2.status_code == 200:
with open ("images/"+str(i)+".jpg",'wb')as f:
f.write(r2.content)
else:
print("Link not found")
if __name__=="__main__":
start = time.process_time()
cores = 8
pool = mp.Pool(cores)
tasks = []
l=26900
u=27000
for i in range(u,l,cores):
tasks.append([i,i+cores-1])
for task in tasks:
s,e = task[0],task[1]
pool.apply_async(download, args = (s,e))
pool.close()
pool.join()
print(time.process_time() - start)
| true |
14387d72de14065a52df07f4197b1d644e27de1d | Python | banggeut01/algorithms | /SWEA/d1/2019.py | UTF-8 | 102 | 3.453125 | 3 | [] | no_license | # 더블더블
num = int(input())
for i in range(0, num+1):
print('{} '.format(pow(2, i)), end='') | true |
2e85a0db9076b6e39194fdd2b26fb19553f93822 | Python | inwk6312winter2019/openbookfinal-maulikpatel1711 | /task14.py | UTF-8 | 402 | 3.609375 | 4 | [] | no_license | from collections import Dict
import operator
def character_word_count(Book):
mylist = dict()
mopen = open(Book,"r")
for m in mopen:
m = m.strip()
m = m.split()
for r in m:
if r not in mylist:
mylist[r] = len(r)
return mylist
b = input("Enter book title:")
print("Output is:", character_word_count(b))
print("count character words")
| true |
2568175dfff7ca8c1029bcaab08c49e6c7f9fece | Python | jatinkhurana30/Regex-Flaskapp | /SolvedCodes/Answer2.py | UTF-8 | 1,587 | 2.65625 | 3 | [] | no_license | import re
import sys
def find_answer2():
log_file = open("assignment.log")
log_data = log_file.readlines()
log_list = {
"machine1":[],
"machine2":[]
}
regexp_machine1 = re.compile('Ping Identity')
regexp_machine2 = re.compile('PulseSecure')
for line in log_data:
if regexp_machine1.search(line):
logs1 = []
dvc_host = re.findall('(?<=dvchost=).+?(?= )', line)[0]
src = re.search("(?<=src=)([0-9]{1,3}[.][0-9]{1,3})[.][0-9]{1,3}[.][0-9]{1,3}(?= )", line).group(0)
print('dvchost =' + dvc_host)
logs1.append('dvchost =' + dvc_host)
print('src = ' + src)
logs1.append('src = ' + src)
log_list["machine1"].append(logs1)
print()
elif regexp_machine2.search(line):
logs2 = []
time = re.findall('(?<=time="").+?(?=")', line)[0]
vpn = re.findall('(?<=vpn=).+?(?= )', line)[0]
user = re.findall('(?<=user=).+?(?= )', line)[0]
user_activity = re.findall('(?<=msg="").+(?="")', line)[0]
logs2.append('Time = '+ time)
logs2.append('VPN = '+ vpn)
logs2.append('User = '+ user)
logs2.append('Activity Done '+ user_activity)
print('Time = ', time)
print('VPN = ', vpn)
print('User = ', user)
print('Activity Done ', user_activity)
print()
log_list["machine2"].append(logs2)
else:
continue
log_file.close()
return log_list | true |
7a90dc46b077723f8781995baaef672037c506c7 | Python | LalitGsk/Programming-Exercises | /Leetcode/jumpGameII.py | UTF-8 | 842 | 3.71875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 23:24:55 2019
@author: lalit
Given an array of non-negative integers, you are initially positioned at the first index of the array.
Each element in the array represents your maximum jump length at that position.
Your goal is to reach the last index in the minimum number of jumps.
"""
class Solution:
def jump(self, nums):
if nums[0] == 0:
return 0
if len(set(nums)) == 1:
return len(nums)//nums[0]-1
goal = len(nums)-1
time = 0
while goal > 0:
for i,j in list(enumerate(nums)):
if i+j > goal or i+j == goal:
goal = i
time +=1
break
return time
a = [2,3,1,1,4]
s = Solution()
print(s.jump(a)) | true |
b54d23ac2f6aba8170f167333391f4ad335d8d52 | Python | Aasthaengg/IBMdataset | /Python_codes/p02418/s275322427.py | UTF-8 | 99 | 2.828125 | 3 | [] | no_license | def main():
tmp = input()*2
tmq = input()
print('Yes' if tmq in tmp else 'No')
main()
| true |
b54b4fddc268a2a3e13c8de069113a261ab68320 | Python | flyyee/cep-surveyanalysis | /1.0/mf.py | UTF-8 | 13,475 | 3.28125 | 3 | [
"MIT"
] | permissive | import openpyxl
import statistics
from collections import Counter
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
# filters warning that importing gensim for the first time causes
from gensim.summarization import keywords
import datetime
import matplotlib.pyplot as plt
import os
import sys
def is_number(s): # tests if input is a number
try:
float(s)
return True
except ValueError:
return False
class result: # result class, contains data about each column in an excel sheet, aka one result object per question
def __init__(self): # initialises values to be used later
self.mean = 0
self.median = 0
self.mode = None
self.optcount = None
self.isnumericaldataset = True
self.isdatetimedataset = False
self.datasetname = ""
self.datasetlength = 0
self.keywords = []
def getdata(self, data): # gets the name of the column, aka the question from the entire column of data
self.datasetname = data[0]
for x in range(1, len(data)):
if not is_number(str(data[x])): # also checks if the column, aka dataset only contains numbers
self.isnumericaldataset = False
break # checks every element in a dataset, or until it is
# established that the dataset does not only contain numbers
for x in range(1, len(data)):
if isinstance(data[x], datetime.datetime): # also checks if the dataset contains datetime objects
self.isdatetimedataset = True
break # checks every element in a dataset, or until it is
# established that the dataset does not contain datetime objects
# checking the types on inputs in a dataset are necessary for
# giving users the correct options to view the data from
self.datasetlength = len(data) - 1 # first element of a dataset is
# the question, so the actual length of the data in a set is one less
def revealmode(self, verbose=True): # prints the mode
print("Mode: " + str(self.mode))
print("\n")
def revealmeanandmedianandmode(self, verbose=True): # prints the mean and median and mode
print("Mean: " + str(self.mean))
print("Median: " + str(self.median))
print("Mode: " + str(self.mode))
print("\n")
if verbose: # only displays graphs if verbose switch is not false
labels = ["Mean", "Median", "Mode"]
values = [float(self.mean), float(self.median), float(self.mode)]
plt.bar(labels, values)
plt.title("Mean, median and mode of options for: " + self.datasetname + "\n \n")
plt.show()
# Note: separate functions are needed for mean, median and mode and mode alone as non-numerical datasets may
# have a mode, but will not have a mean or median, while numerical datasets contain all three
def revealoptcount(self, verbose=True): # prints the popularity of options
print("Popularity of options: ")
for key in self.optcount.keys(): # this is a counter object that has a dictionary of a
# value in the dataset and the corresponding number of times it is present in the dataset
# the keys method contains all the keys, while values contains all the values in the dictionary
percent = int(self.optcount[key]) / self.datasetlength * 100
# calculates the percentage that option weights
# the loop iterates over every key and gets the corresponding number of times it is present
# in the list by retrieving it from the counter dictionary
percent = '%.2f' % percent # rounds the percentage to two decimal places
print("Option " + str(key) + ": " + str(self.optcount[key]) +
" times: " + str(percent) + "% of total responses")
print("\n")
if verbose: # only displays graphs if verbose switch is not false
exp_vals = self.optcount.values()
exp_labels = self.optcount.keys()
plt.axis("equal")
plt.pie(exp_vals, labels=exp_labels, autopct='%1.1f%%', radius=1.2)
# shows the percentage in the pie chart as well
plt.title("Popularity of options for: " + self.datasetname + "\n \n")
plt.show()
def revealkeywords(self, verbose=True):
print("Keywords: ")
if self.keywords == "Too few keywords. Index error." or self.keywords == "No keywords for a datetime dataset."\
or self.keywords == "No keywords. List empty.":
print(self.keywords)
else:
for kw in self.keywords: # this contains a keyword object that contains a two-dimensional
# list of each keyword and each corresponding score
print("Score of keyword " + kw[0] + ": " + str(kw[1]))
print("\n")
def set(self, data):
del data[0]
if not self.isnumericaldataset: # if the dataset is non-numerical, a mean cannot be derived from it
self.mean = "Unable to get mean from dataset. Type incompatible."
else:
self.mean = statistics.mean(data)
self.mean = '%.2f' % float(self.mean) # rounds mean to two decimal places
if not self.isnumericaldataset: # if the dataset is non-numerical, a median cannot be derived from it
self.median = "Unable to get median from dataset. Type incompatible."
else:
self.median = statistics.median(data)
try:
self.mode = statistics.mode(data)
except statistics.StatisticsError: # catches errors if the dataset is unsuitable for a mode
self.mode = "No unique mode."
except TypeError: # catches errors if the dataset is unsuitable for a mode
self.mode = "Unable to get mode from dataset. Type incompatible."
if not self.isnumericaldataset and not self.isdatetimedataset:
# only accepts datasets that are strings and not datetimes or integers
stringifieddata = " "
for word in data:
stringifieddata += word + " " # converts the dataset into a plaintext form from a list
try:
self.keywords = keywords(text=stringifieddata, words=10, scores=True, lemmatize=True)
if not self.keywords: # returns false if list is empty
self.keywords = "No keywords. List empty."
except IndexError: # catches error if there are too few keywords
self.keywords = "Too few keywords. Index error."
elif self.isdatetimedataset: # no keywords are present in a datetime dataset
self.keywords = "No keywords for a datetime dataset."
self.optcount = Counter(data) # creates a counter object that counts
# how many times each unique instance of a value in a list is present
def exportall(self, path, datasetnum):
try:
f = open(path, "a+") # opens file provided in path, appends, and creates if non-existent
stdout = sys.stdout # keeps original stdout
sys.stdout = f # pipes system output from console to file in f
print("Dataset " + str(datasetnum) + ": " + self.datasetname)
# reveals in non-verbose mode so matplotlib graphs do not show up
if self.isnumericaldataset:
self.revealmeanandmedianandmode(False)
self.revealoptcount(False)
else:
self.revealkeywords(False)
self.revealmode(False)
self.revealoptcount(False)
print("\n\n\n")
finally:
f.close() # closes file
sys.stdout = stdout # reset stdout to original
def export_main(results):
currentdir = os.getcwd() # gets the directory the program was started in
currentdatetime = datetime.datetime.now() # gets the current date and time
currentdatetime = str(currentdatetime).replace(':', "-") # replaces : in the time with -
# so that is in an acceptable file name
newfolderpath = os.path.join(currentdir, str(currentdatetime)) # creates the new directory path
if not os.path.exists(newfolderpath): # checks if the path exists
os.makedirs(newfolderpath) # creates the folder
newfolderpath += "\\analysis.txt" # edits the path to include the file name
for x, result in enumerate(results):
result.exportall(newfolderpath, x)
while True: # keeps prompting user for the file name until one that exists is entered
wbname = input("What is the name of the excel workbook? (.xls or .xlsx): ")
try:
with open(wbname) as f:
break # exits the loop
except FileNotFoundError: # checks if excel workbook exists
print("File does not exist. Check if you entered the right extension.")
wb = openpyxl.load_workbook(wbname)
while True: # keeps prompting user for the sheet name until an existing one is enter
sheetname = input("What is the name of the sheet?: ")
if sheetname in wb.get_sheet_names():
sheet = wb.get_sheet_by_name(sheetname) # loads the excel sheet
break # exits the loop
else:
print("No such sheet. Check the capitalisation.")
print("Parsing " + sheetname + " from " + wbname)
numdatasets = len(sheet['1']) # gets the number of datasets, or columns by calculating the length of column 1
numrows = len(sheet['A']) # gets the number of rows by calculating the length of row a
data = []
results = [result() for x in range(numdatasets)] # creates as many result objects as there are datasets
for x in range(1, numdatasets + 1): # loops over the datasets, or columns
data.clear() # clears the dataset from the previous run
cures = results[x - 1] # currentresult
for n in range(1, numrows + 1): # loops over each row in a dataset
data.append(sheet.cell(row=n, column=x).value) # appends value of the cell into the dataset
# calls the methods of the current result object to use the dataset
cures.getdata(data)
cures.set(data)
# the loop over the sheet is complete
print("Parsing of " + str(numdatasets) + " datasets complete!")
# export to txt document
export_main(results)
# keeps prompting the user until user exits
while True:
print("Which dataset would you like to view more information about?")
for x in range(numdatasets):
print("Dataset " + str(x + 1) + ": " + results[x].datasetname) # prints all datasets
print("\nTyping exit exits the program.")
datasetchoice = input("Option: ")
# sanitises user input
if is_number(datasetchoice):
datasetchoice = int(datasetchoice)
if datasetchoice in range(1, numdatasets + 1): # checks if choice corresponds to one of the datasets available
while True:
print("Choice: Dataset " + str(datasetchoice) + ": " + results[datasetchoice - 1].datasetname)
# user input will be one more than the element number of the result object in the results list
if results[datasetchoice - 1].isnumericaldataset:
print("This is a numerical dataset.")
print("Viewing mean, median and mode (1) are recommended.")
print("Viewing the number of responses (2) per option is also available.")
print("Typing change brings you back to the dataset selection menu.")
choice = input()
if is_number(choice): # sanitises input
choice = int(choice)
if choice in range(1, 3):
if choice == 1:
results[datasetchoice - 1].revealmeanandmedianandmode()
elif choice == 2:
results[datasetchoice - 1].revealoptcount()
elif choice == "change":
break # breaks inner loop, redirects to outer loop asking for the dataset choice
else:
print("Unknown option.")
else:
print("This dataset contains strings.")
print("Viewing summary of keywords (1) is recommended.")
print("Viewing number of responses per option (2) or the mode response (3) are also available.")
choice = input("Option: ")
if is_number(choice): # sanitises input
choice = int(choice)
if choice in range(1, 4):
if choice == 1:
results[datasetchoice - 1].revealkeywords()
elif choice == 2:
results[datasetchoice - 1].revealoptcount()
elif choice == 3:
results[datasetchoice - 1].revealmode()
elif choice == "change":
break # breaks inner loop, redirects to outer loop asking for the dataset choice
else:
print("Unknown option.")
elif datasetchoice == "exit":
break # breaks the outer loops, exits program
else:
print("Unknown option.")
print("Program exiting.")
| true |
797ff82e63dedc5f9b015f8f226e4f503f4dde7d | Python | stanfordmlgroup/CheXaid | /eval/below_curve_counter.py | UTF-8 | 2,352 | 3.078125 | 3 | [
"MIT"
] | permissive | """RAD below curve counter"""
import sklearn.metrics as sk_metrics
class BelowCurveCounter(object):
def __init__(self, rad_perf, task_name):
self.rad_perf = rad_perf
self.task_name = task_name
def ROC(self, ground_truth, predictions):
self.rad_perf.index = self.rad_perf['Score']
num_below_roc = 0
fpr, tpr, threshold = sk_metrics.roc_curve(ground_truth, predictions)
for rad_name in ['Rad1', 'Rad2', 'Rad3']:
rad_sensitivity = self.rad_perf.loc[self.task_name + ' Sensitivity', rad_name]
rad_specificity = self.rad_perf.loc[self.task_name + ' Specificity', rad_name]
rad_vertical_projection_on_curve, rad_horizontal_projection_on_curve = self._project(fpr, tpr, 1 - rad_specificity, rad_sensitivity)
if (rad_vertical_projection_on_curve >= rad_sensitivity):
num_below_roc += 1
return num_below_roc
def PR(self, ground_truth, predictions):
self.rad_perf.index = self.rad_perf['Score']
num_below_pr = 0
precision, recall, threshold = sk_metrics.precision_recall_curve(ground_truth, predictions)
for rad_name in ['Rad1', 'Rad2', 'Rad3']:
rad_sensitivity = self.rad_perf.loc[self.task_name + ' Sensitivity', rad_name]
rad_precision = self.rad_perf.loc[self.task_name + ' Precision', rad_name]
rad_vertical_projection_on_curve, rad_horizontal_projection_on_curve = self._project(recall, precision, rad_sensitivity, rad_precision)
if (rad_vertical_projection_on_curve >= rad_precision):
num_below_pr += 1
return num_below_pr
@staticmethod
def _project(X, Y, rad_x, rad_y):
""" Finds the closest points on the curve to the point in X and Y directions """
x = 0
y = 0
while ((x+2 < len(X)) and (X[x] > rad_x and X[x + 1] > rad_x) or (X[x] < rad_x and X[x + 1] < rad_x)):
x += 1
while ((y+2 < len(Y)) and (Y[y] > rad_y and Y[y + 1] > rad_y) or (Y[y] < rad_y and Y[y + 1] < rad_y)):
y += 1
rad_vertical_projection_on_curve = (Y[x + 1] - Y[x]) * (rad_x - X[x]) + Y[x]
rad_horizontal_projection_on_curve = (X[y + 1] - X[y]) * (rad_y - Y[y]) + X[y]
return rad_vertical_projection_on_curve, rad_horizontal_projection_on_curve
| true |
820b44e1b40fcd023aa2b29c9049c6b12702adfc | Python | Shinsungjun/MotionPlanner | /scripts/MotionPlanner/local_planner.py | UTF-8 | 4,631 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python3
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Assignments Solution Author: Engin Bozkurt
Motion Planning for Self-Driving Cars
Aug 24, 2019
"""
# Author: Ryan De Iaco
# Additional Comments: Carlos Wang
# Date: October 29, 2018
import numpy as np
import copy
import MotionPlanner.path_optimizer as path_optimizer
import MotionPlanner.collision_checker as collision_checker
import MotionPlanner.velocity_planner as velocity_planner
from math import sin, cos, pi, sqrt
import time
class LocalPlanner:
def __init__(self, num_paths, path_offset, circle_offsets, circle_radii,
path_select_weight, time_gap, a_max, slow_speed,
stop_line_buffer):
self._num_paths = num_paths
self._path_offset = path_offset
self._path_optimizer = path_optimizer.PathOptimizer()
self._collision_checker = \
collision_checker.CollisionChecker(circle_offsets,
circle_radii,
path_select_weight)
self._velocity_planner = \
velocity_planner.VelocityPlanner(time_gap, a_max, slow_speed,
stop_line_buffer)
self._prev_best_path = None
self.current_time = 0
self.prev_time = 0
def get_goal_state_set(self, goal_index, goal_state, waypoints, ego_state):
if goal_index < len(waypoints)-1:
delta_x = waypoints[goal_index+1][0] - waypoints[goal_index][0]
delta_y = waypoints[goal_index+1][1] - waypoints[goal_index][1]
else:
delta_x = waypoints[goal_index][0] - waypoints[goal_index-1][0]
delta_y = waypoints[goal_index][1] - waypoints[goal_index-1][1]
heading = np.arctan2(delta_y,delta_x)
goal_state_local = copy.copy(goal_state)
goal_state_local[0] -= ego_state[0]
goal_state_local[1] -= ego_state[1]
#for smoothing, but you have to very dense waypoints
theta = -ego_state[2]
goal_x = goal_state_local[0] * cos(theta) - goal_state_local[1] * sin(theta)
goal_y = goal_state_local[0] * sin(theta) + goal_state_local[1] * cos(theta)
goal_t = heading - ego_state[2]
goal_v = goal_state[2]
if goal_t > pi:
goal_t -= 2*pi
elif goal_t < -pi:
goal_t += 2*pi
goal_state_set = []
for i in range(self._num_paths):
offset = (i - self._num_paths // 2) * self._path_offset
x_offset = offset * cos(goal_t + pi/2)
y_offset = offset * sin(goal_t + pi/2)
goal_state_set.append([goal_x + x_offset,
goal_y + y_offset,
goal_t,
goal_v])
return goal_state_set
def plan_paths(self, goal_state_set,ego_state):
self.prev_time = time.time()
paths = []
path_validity = []
for goal_state in goal_state_set:
path = self._path_optimizer.optimize_spiral(goal_state[0],
goal_state[1],
goal_state[2],ego_state)
if np.linalg.norm([path[0][-1] - goal_state[0],
path[1][-1] - goal_state[1],
path[2][-1] - goal_state[2]]) > 1000:
path_validity.append(False)
else:
paths.append(path)
path_validity.append(True)
self.current_time = time.time()
#print("in path opti : ", 1/(self.current_time - self.prev_time))
return paths, path_validity
def transform_paths(paths, ego_state):
transformed_paths = []
for path in paths:
x_transformed = []
y_transformed = []
t_transformed = []
for i in range(len(path[0])):
x_transformed.append(ego_state[0] + path[0][i]*cos(ego_state[2]) - \
path[1][i]*sin(ego_state[2]))
y_transformed.append(ego_state[1] + path[0][i]*sin(ego_state[2]) + \
path[1][i]*cos(ego_state[2]))
t_transformed.append(path[2][i] + ego_state[2])
transformed_paths.append([x_transformed, y_transformed, t_transformed])
return transformed_paths
| true |
fe5672255ad3b21d1acec6e8870f0ea833573506 | Python | nado6miri/deeplearning | /LG_learning_TensorFlow/20181129/nm_model/mnist.py | UTF-8 | 3,382 | 2.703125 | 3 | [] | no_license | import tensorflow as tf
import random
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
#정확도 높이는 방법
# hidden layer 추가
# relu 추가 (sigmoid는 깊어지면 안좋음... 따라서 relu나 다른걸 사용해야 함)
# 가중치 변수의 초기화 함수 잘 사용 - tf.contrib.layers.xavier_initializer()
# 노드수 증가 256 -> 512
# 과적합 방지를 위해 Drop Out 적용 (학습시 70%만, 테스트시 100% 사용)
tf.set_random_seed(777)
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
keep_prob = tf.placeholder(tf.float32)
learning_rate = 0.001
training_epochs = 15
batch_size = 100
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
W1 = tf.get_variable("W1", shape=[784, 256], initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([256]))
L1 = tf.nn.relu(tf.add(tf.matmul(X, W1), b1))
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
W2 = tf.get_variable("W2", shape=[256, 256], initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([256]))
L2 = tf.nn.relu(tf.add(tf.matmul(L1, W2), b2))
L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
W3 = tf.get_variable("W3", shape=[256, 256], initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([256]))
L3 = tf.nn.relu(tf.add(tf.matmul(L2, W3), b3))
L3 = tf.nn.dropout(L3, keep_prob=keep_prob)
W4 = tf.get_variable("W4", shape=[256, 256], initializer=tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal([256]))
L4 = tf.nn.relu(tf.add(tf.matmul(L3, W4), b4))
L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
W = tf.get_variable("W", shape=[256, 10], initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.random_normal([10]))
hypo = tf.add(tf.matmul(L4, W), b)
model = tf.nn.softmax(hypo)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=hypo, labels=Y)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize((cost))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = { X: batch_xs, Y: batch_ys, keep_prob: 0.7 } # 학습할때는 70% Node 만 Random On 해서 활용
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
prediction = tf.argmax(hypo, 1)
target = tf.argmax(Y, 1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, target), tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={ X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1 })) # 테스트 할때는 full node 활용
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r+1], 1)))
print("Prediction: ", sess.run(tf.argmax(hypo, 1), feed_dict={ X: mnist.test.images[r: r+1], keep_prob: 1 })) # 테스트 할때는 full node 활용
plt.imshow(mnist.test.images[r:r+1].reshape(28, 28), cmap='Greys')
plt.show()
| true |