blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a642f53a3d84dfada3b7a887799b3296040f5233 | Python | zhangjh12492/first_python_test | /time_/time_used.py | UTF-8 | 1,726 | 3.5625 | 4 | [] | no_license | import time
ticks = time.time()
print("当前时间戳为:", ticks)
localtime = time.localtime(time.time())
print("本地时间为:", localtime)
# 格式化的时间
localtime = time.asctime(time.localtime(time.time()))
print("本地时间为:", localtime)
# 格式化成 2016-03-12 11:45:34
print(time.strftime("%Y-%m-%d %H:%M:%S:%s", time.localtime()))
# 格式化成 Sat Mar 28 22:24:24 2016形式
print(time.strftime("%a %b %d %H:%M:%S %Y"))
# 将格式字符串转换为时间戳
a = "Sat Mar 28 22:24:24 2016"
print(time.mktime(time.strptime(a, "%a %b %d %H:%M:%S %Y")))
"""
%y 两位数的年份表示(00-99)
%Y 四位数的年份表示(000-9999)
%m 月份(01-12)
%d 月内中的一天(0-31)
%H 24小时制小时数(0-23)
%I 12小时制小时数(01-12)
%M 分钟数(00=59)
%S 秒(00-59)
%a 本地简化星期名称
%A 本地完整星期名称
%b 本地简化的月份名称
%B 本地完整的月份名称
%c 本地相应的日期表示和时间表示
%j 年内的一天(001-366)
%p 本地A.M.或P.M.的等价符
%U 一年中的星期数(00-53)星期天为星期的开始
%w 星期(0-6),星期天为星期的开始
%W 一年中的星期数(00-53)星期一为星期的开始
%x 本地相应的日期表示
%X 本地相应的时间表示
%Z 当前时区的名称
%% %号本身
"""
# 获取某月日历
import calendar
Nov = calendar.month(2017, 11)
print("输出2017年11月份的日历:")
print(Nov)
print("----------------")
print("time.altzone %d" % time.altzone)
t = time.localtime()
print("time.asctime(t) : %s" % time.asctime(t))
print(time.clock())
print("=================")
print(time.ctime())
print(time.localtime(2))
print(time.asctime()) | true |
e3bfb41882fcd469ea254e24ec4b036709c1787a | Python | kishore-krishna/Triangle_star | /triangle1.py | UTF-8 | 84 | 3.3125 | 3 | [] | no_license | def P(n):
for i in range(1, n+1): #specify limit
print(i*'*')
P(5) | true |
185beeb9785431ae48f679db695d3852b552b8d3 | Python | cjamgo/myGames | /SpaceInvaders.py | UTF-8 | 455 | 3.109375 | 3 | [] | no_license | import pygame
pygame.init() #initilizes pygame
#create the screen
# screen = pygame.display.set_mode((800, 600))#need tuple in between parenthesis or it wont work
screen = pygame.display
screen.set_mode((800, 600))
screen.set_caption(('Space Invaders: By yours truly')) #title
#main loop(creates infinte loop)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
| true |
141c2907eebef2faaf3c43ea479d9b780e8c4beb | Python | webclinic017/ChatDataMapper | /plot_mentions.py | UTF-8 | 309 | 3.0625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import json
def plot_mentions(data: dict, ticker: str):
x_data = []
y_data = []
for date in data:
x_data.append(date)
y_data.append(len(data[date].get_mentioned_tickers()))
plt.ylabel('Date')
plt.plot(x_data, y_data, 'ro')
plt.show()
| true |
959ac41ac394979fb910f7a2c2941cb72b1c0f8e | Python | dvrpc/TIM3AnalysisScripts | /UndocumentedScripts/AllScripts/GetTourRatesByCounty.py | UTF-8 | 1,052 | 2.71875 | 3 | [] | no_license | import pandas as pd
import numpy as np
from subprocess import Popen
print('Reading Files')
tour = {}
tour[2015] = pd.read_csv(r'D:\TIM3.1\000000\scenario\Output\_tour_2.dat', '\t')
tour[2040] = pd.read_csv(r'B:\model_development\TIM_3.1_2040\scenario\Output\_tour_2.dat', '\t')
taz2county = pd.read_csv(r'D:\ref\taz2county.csv', index_col = 0)['County']
print('Processing')
to_by_county = {}
td_by_county = {}
for year in [2015, 2040]:
tour[year]['tocounty'] = tour[year]['totaz'].map(taz2county)
tour[year]['tdcounty'] = tour[year]['tdtaz'].map(taz2county)
to_by_county[year] = tour[year].groupby('tocounty').sum()['toexpfac']
td_by_county[year] = tour[year].groupby('tdcounty').sum()['toexpfac']
print('Writing')
outfile = r'D:\TIM3\TourRatesByCounty.xlsx'
outdata = pd.Panel({'Origins': pd.DataFrame({2015: to_by_county[2015], 2040: to_by_county[2040]}),
'Destinations': pd.DataFrame({2015: td_by_county[2015], 2040: td_by_county[2040]})})
outdata.to_excel(outfile)
Popen(outfile, shell = True)
print('Go') | true |
c5eaa883653ed9be81c6564d931cc2a156fd6a28 | Python | Preetharajendran/python-programming | /Beginner level/count the no of characters.py | UTF-8 | 47 | 2.90625 | 3 | [] | no_license | a=raw_input()
n=len(a)-(a.count(" "))
print n
| true |
1ff64524cbd7d9ab5f1e1b5af7134f0cee4961e8 | Python | andyptt21/Fantasy_Hockey_App_2020 | /scraper.py | UTF-8 | 3,637 | 2.84375 | 3 | [] | no_license | ## I've been using td class "v-top"
import time
import re
import pandas as pd
from pandas.io.html import read_html
from selenium import webdriver
driver = webdriver.Chrome()
driver.get("https://www.fantrax.com/fantasy/league/8i8nwftijzzq6mwq/standings?startDate=2019-10-02&endDate=2020-04-04&hideGoBackDays=true&period=22&timeStartType=PERIOD_ONLY&timeframeType=YEAR_TO_DATE&view=SCHEDULE&pageNumber=1")
time.sleep(5)
## Weekly matchup stats
def matchup_scraper(num):
table = driver.find_element_by_xpath('/html/body/app-root/div/div[1]/div/app-league-standings/div/section/league-standings-tables/div/div[' + num + ']/ultimate-table/div/section/div')
table_html = table.get_attribute('innerHTML')
df = read_html(table_html)[0]
teams = driver.find_element_by_xpath('/html/body/app-root/div/div[1]/div/app-league-standings/div/section/league-standings-tables/div/div[' + num + ']/ultimate-table/div')
teams_html = teams.get_attribute('innerHTML')
# categories = re.findall('">.*?</a></th>',teams_html)
# for x in range(0,len(categories)):
# categories[x] = re.findall(';">.*?</a></th>',categories[x])
# categories[x] = str(categories[x])[6:]
# categories[x] = categories[x][:-11]
# df.columns = categories
teams = re.findall("</figure>.*?<!---->",teams_html)
for x in range(0,len(teams)):
teams[x] = teams[x][10:]
teams[x] = teams[x][:-8]
df['Team'] = teams
df['matchup'] = [num] * len(teams)
#df.columns.values[0:4] = ['CatWins','CatLosses','CatTies','CatPts']
df.columns = ['CatWins','CatLosses','CatTies','CatPts',
'Goals','Assists','Points','PlusMinus',
'PIM','SOG','Hits','PPP','ATOI','SHP',
'Blocks','Wins','GAA','Saves','G.Points',
'G.TOI','G.PIM','Team','matchup']
return(df)
matchup1 = matchup_scraper('1')
matchup_df = matchup1.append(matchup_scraper('2'))
list = range(3,23)
for x in list:
try:
matchup_df = matchup_df.append(matchup_scraper(str(x)))
except:
break
## Calculate season stats and record from matchup stats in R
driver.get('https://www.fantrax.com/fantasy/league/8i8nwftijzzq6mwq/standings?startDate=2019-10-02&endDate=2020-04-04&hideGoBackDays=true&period=5&timeStartType=PERIOD_ONLY&timeframeType=YEAR_TO_DATE&view=SEASON_STATS&pageNumber=1')
time.sleep(5)
table = driver.find_element_by_xpath('/html/body/app-root/div/div[1]/div/app-league-standings/div/section/league-standings-tables/div/div[2]/ultimate-table/div/section/div')
table_html = table.get_attribute('innerHTML')
season_df = read_html(table_html)[0]
teams = driver.find_element_by_xpath('/html/body/app-root/div/div[1]/div/app-league-standings/div/section/league-standings-tables/div/div[2]/ultimate-table/div')
teams_html = teams.get_attribute('innerHTML')
teams = re.findall("</figure>.*?<!---->", teams_html)
# categories = re.findall('">.*?</a></th>', teams_html)
for x in range(0,len(teams)):
teams[x] = teams[x][10:]
teams[x] = teams[x][:-8]
# for x in range(0,len(categories)):
# categories[x] = re.findall(';">.*?</a></th>',categories[x])
# categories[x] = str(categories[x])[6:]
# categories[x] = categories[x][:-11]
# season_df.columns = categories
season_df.columns = ['CatWins','CatLosses','CatTies','CatPts',
'Goals','Assists','Points','PlusMinus',
'PIM','SOG','PPP','SHP','Hits',
'Blocks','ATOI','Wins','GAA','Saves',
'G.PIM','G.TOI','G.Points']
season_df['Team'] = teams
season_df = season_df.iloc[:,4:22]
driver.quit()
| true |
4e5b6c12cd8ba245521cf92e69efd146f58ce81e | Python | michal-au/article-prediction | /lib/corpus.py | UTF-8 | 5,934 | 2.59375 | 3 | [] | no_license | from enum import Enum
import os
import sets
import utils
from .Tree import Tree
class DataType(Enum):
ALL = 0
TRAIN = 1
HELDOUT = 2
TEST = 3
def walk_and_transform(function, input_corpus_path, output_corpus_path):
"""
Applies the function to all the files from the input corpus together with the
corresponding files from the output corpus
"""
for r, ds, fs in os.walk(input_corpus_path):
print r
ds.sort()
fs.sort()
for f in fs:
old_file = os.path.join(r, f)
new_file = os.path.join(output_corpus_path, os.path.basename(r), f)
function(old_file, new_file)
def walk_parses(function, data_type=DataType.TRAIN):
settings = utils.read_settings()
path = settings.get('paths', 'dataParsed')
leave_out_dirs = []
if data_type == DataType.TRAIN:
leave_out_dirs = [os.path.join(path, dir_nb) for dir_nb in ('22', '23', '24')]
for r, ds, fs in os.walk(path):
if r in leave_out_dirs:
continue
print r
ds.sort()
fs.sort()
for f in fs:
f_path = os.path.join(r, f)
with open(f_path, 'r') as f:
for l in f:
t = Tree.from_string(l)
function(t)
# WAITING FOR
# DELETION::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def walk1(function, corpusFileType, result=None, data_type="train_devel"):
path = _getCorpusFileTypePath(corpusFileType)
path = os.path.join(path, data_type)
dirnames = [
dirname for dirname in os.listdir(path)
if os.path.isdir(os.path.join(path, dirname))
]
dirnames = sorted(dirnames)
if data_type == "train_devel":
# so this option means all the data? TODO redo to something reasonable
pass
elif data_type == "test_devel":
dirnames = ['24']
elif data_type == "test":
dirnames = ['23']
else:
raise NameError("Unknown data type: one of the following accepted: train_devel, test_devel, test")
dirnames = [os.path.join(path, d) for d in dirnames]
for d in dirnames:
print d
for fname in sorted(os.listdir(d)):
f = os.path.join(d, fname)
result = function(f, result)
return result
def walk2(function, corpusFileType, result=None, restrictToFiles=[], data_type="train_devel"):
#TODO: spoj s funkci nahore
'''
tohle jsem pouzival pro pripravu dat, pro pruchod uz naparsovanejch vet pouzivam to nahore
aplies the function to all the corpus files of the given type (orig, raw,
parsed, ...); if the restrictToFiles argument is given, only the files
corresponding to the provided number(s) will be searched
@data_type: {test, test_devel, train_devel} - part of the data that should be considered
'''
path = _getCorpusFileTypePath(corpusFileType)
if corpusFileType != 'orig' and corpusFileType != 'origP3':
# files already divided into test, test_devel, train_devel
paths = [os.path.join(path, dir) for dir in os.listdir(path)]
else:
paths = [path]
restrictToDirs = []
if restrictToFiles:
if type(restrictToFiles) is str:
restrictToFiles = [restrictToFiles]
restrictToDirs = sets.Set([f[:2] for f in restrictToFiles])
restrictToFiles = ['wsj_'+f for f in restrictToFiles]
for path in paths:
# get list of all the directories the corpus consists of:
dirnames = [dirname for dirname in os.listdir(path)
if os.path.isdir(os.path.join(path, dirname))]
dirnames = sorted(dirnames)
if '22' in dirnames:
dirnames.remove('22')
if '23' in dirnames:
dirnames.remove('23')
if '24' in dirnames:
dirnames.remove('24')
if restrictToDirs:
dirnames = [dir for dir in dirnames if dir in restrictToDirs]
# create the full paths, not just dir names:
dirnames = [os.path.join(path, dir) for dir in dirnames]
print dirnames
for dir in dirnames:
print dir
for fname in sorted(os.listdir(dir)):
print fname
if restrictToFiles and fname not in restrictToFiles:
continue
f = os.path.join(dir, fname)
result = function(f, result)
return result
def getSaveLocation(f, corpusFileType):
'''for the given file and desired output corpusFileType, it returns the
saving path for the file within the corpusFileType folder'''
path = _getCorpusFileTypePath(corpusFileType)
[parPath, fName] = os.path.split(f)
par = os.path.split(parPath)[1]
parAndFile = os.path.join(par, fName)
parNb = int(par)
settings = utils.readSettings()
if parNb <= 21:
path = os.path.join(path, settings.get('paths', 'trainDevelDir'),
parAndFile)
elif parNb == 23:
path = os.path.join(path, settings.get('paths', 'testDir'),
parAndFile)
elif parNb == 24:
path = os.path.join(path, settings.get('paths', 'testDevelDir'),
parAndFile)
else:
raise NameError("There should definitely be no directory like: " + dir)
return path
def _getCorpusFileTypePath(corpusFileType):
'''for the given corpusFileType (orig, pos, parsed, ...), it checks whether
it is a valid type and returns its full path from the .settings file'''
corpusFileTypeValues = ['orig', 'raw', 'pos', 'parsed', 'features']
if corpusFileType not in corpusFileTypeValues:
raise NameError(
"undefined corpusFileType, use one of "+str(corpusFileTypeValues)
)
settings = utils.readSettings()
path = settings.get('paths', 'data'+corpusFileType.capitalize())
return path
| true |
bd8a73d72306dd740965c3235ee7c0f420e26f7f | Python | jtanadi/robofontScripts | /etchASketch/z-Archive/multilineview-test.py | UTF-8 | 1,348 | 2.5625 | 3 | [] | no_license | from mojo.UI import MultiLineView
from vanilla import *
from mojo.drawingTools import *
f = CurrentFont()
sourcexheight = f.info.xHeight
class MyOwnSpaceCenter:
def __init__(self, font):
self._BuildUI(font)
self.w.open()
def _BuildUI(self, font):
self.w = Window((792, 612))
self.w.editText = EditText((10, 10, -10, 24),
callback=self.editTextCallback)
self.w.lineView = MultiLineView((0, 40, -0, -0),
pointSize=104,
lineHeight=130,
selectionCallback=self.lineViewSelectionCallback)
#self.w.lineView.setFont(font)
self.drawLines()
print self.w.lineView.getDisplayStates()
def drawLines(self):
newPath()
stroke(1,0,0)
moveTo((36, 10))
lineTo((100, 10))
drawPath()
def editTextCallback(self, sender):
letter = sender.get()
glyphlist = []
for glyphs in letter:
glyphlist.append(f[glyphs])
self.w.lineView.set(glyphlist)
def lineViewSelectionCallback(self, sender):
print sender.getSelectedGlyph()
MyOwnSpaceCenter(CurrentFont()) | true |
1d8f16c3ba01c599021939718d41b469ea3110e3 | Python | Dukerider45/guvicode | /factorial.py | UTF-8 | 74 | 3.296875 | 3 | [] | no_license | fact=1
a=int(raw_input())
for i in range(1,a+1):
fact=fact*i
print(fact)
| true |
0da9618a64aa14ec2135bfd2fc1761908320026f | Python | marielesf/devPython | /media_test.py | UTF-8 | 290 | 2.765625 | 3 | [] | no_license | import media
toy_story = media.movie("toy story", "a historia dos brinquedos",
"https://pt.wikipedia.org/wiki/Toy_Story#/media/File:Movie_poster_toy_story.jpg",
"https://www.youtube.com/watch?v=oIANkZ7wTHg")
print(toy_story.storyline)
| true |
d8cf159ef33e0fb1ac7045c90acbe0aa8f7b34b0 | Python | devesh-bhushan/python-assignments | /assignment-8/Q-1 cube.py | UTF-8 | 232 | 4.1875 | 4 | [] | no_license | """
program to find the cube of any number using function
"""
nu = int(input("enter the number whose cube is to be calculated"))
def cube(n):
a = n ** 3
return a
cu = cube(nu)
print("the cube of entered number is", cu) | true |
7ecf25cacd72d12a8cc59f67b39b5993081bd1d8 | Python | GowthamSingamsetti/Python-Practise | /rough11.py | UTF-8 | 188 | 3.328125 | 3 | [] | no_license | bdaystr=input("Enter date of birth:\n")
bdaylist=bdaystr.split("/")
bday='-'.join(bdaylist)
bdaydict={"birthday":bday}
if 'birthday' in bdaydict:
print(bdaydict['birthday'])
| true |
a01becb61ff75b368a77d9d38cf8a3d0ac32cdd8 | Python | sam1208318697/Leetcode | /Leetcode_env/2019/8_24/Longest_Word_in_Dictionary.py | UTF-8 | 1,757 | 3.84375 | 4 | [] | no_license | # 720. 词典中最长的单词
# 给出一个字符串数组words组成的一本英语词典。从中找出最长的一个单词,该单词是由words词典中其他单词逐步添加一个字母组成。
# 若其中有多个可行的答案,则返回答案中字典序最小的单词。若无答案,则返回空字符串。
# 示例 1:
# 输入: words = ["w","wo","wor","worl", "world"]
# 输出: "world"
# 解释: 单词"world"可由"w", "wo", "wor", 和 "worl"添加一个字母组成。
# 示例 2:
# 输入: words = ["a", "banana", "app", "appl", "ap", "apply", "apple"]
# 输出: "apple"
# 解释: "apply"和"apple"都能由词典中的单词组成。但是"apple"得字典序小于"apply"。
# 注意:
# 所有输入的字符串都只包含小写字母。
# words数组长度范围为[1,1000]。
# words[i]的长度范围为[1,30]。
class Solution:
def longestWord(self, words) -> str:
words.sort(key=len)
print(words)
res = []
maxlen = 0
for word in range(len(words)-1,-1,-1):
if len(words[word]) >= maxlen:
flag = True
for i in range(1,len(words[word])):
if words[word][:i] not in set(words):
flag = False
break
if flag:
maxlen = len(words[word])
res.append(words[word])
else:
break
if res == []:
return ""
else:
res.sort()
return res[0]
sol = Solution()
print(sol.longestWord(["b","br","bre","brea","break","breakf","breakfa","breakfas","breakfast","l","lu","lun","lunc","lunch","d","di","din","dinn","dinne","dinner"]))
| true |
c588758a0862ee973049ba2edd172289ce2488f6 | Python | sfneal/pdfconduit | /pdfconduit/utils/path.py | UTF-8 | 2,248 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | # Set directory paths and file names
import os
import sys
from pathlib import Path
if 'pathlib' in sys.modules:
def _add_suffix(file_path, suffix, sep, ext):
p = Path(file_path)
_ext = p.suffix if ext is None else str('.' + ext.strip('.'))
out = p.stem + sep + suffix + _ext # p.suffix is file extension
return os.path.join(os.path.dirname(file_path), out)
else:
def _add_suffix(file_path, suffix, sep, ext):
split = os.path.basename(file_path).rsplit('.', 1)
ext = split[1] if ext is None else str('.' + ext.strip('.'))
return os.path.join(os.path.dirname(file_path), split[0] + sep + suffix + '.' + ext)
def add_suffix(file_path, suffix='modified', sep='_', ext=None):
"""Adds suffix to a file name seperated by an underscore and returns file path."""
return _add_suffix(file_path, suffix, sep, ext)
def set_destination(source, suffix, filename=False, ext=None):
"""Create new pdf filename for temp files"""
source_dirname = os.path.dirname(source)
# Do not create nested temp folders (/temp/temp)
if not source_dirname.endswith('temp'):
directory = os.path.join(source_dirname, 'temp') # directory
else:
directory = source_dirname
# Create temp dir if it does not exist
if not os.path.isdir(directory):
os.mkdir(directory)
# Parse source filename
if filename:
src_file_name = filename
else:
src_file_name = Path(source).stem # file name
if ext:
src_file_ext = ext
else:
src_file_ext = Path(source).suffix # file extension
# Concatenate new filename
dst_path = src_file_name + '_' + suffix + src_file_ext
full_path = os.path.join(directory, dst_path) # new full path
if not os.path.exists(full_path):
return full_path
else:
# If file exists, increment number until filename is unique
number = 1
while True:
dst_path = src_file_name + '_' + suffix + '_' + str(number) + src_file_ext
if not os.path.exists(dst_path):
break
number = number + 1
full_path = os.path.join(directory, dst_path) # new full path
return full_path
| true |
1cb3158c8d8bd65f15d0f85c45debb65454d1451 | Python | sc076/Yonsei | /Programming/lab8/lab8_p3.py | UTF-8 | 450 | 4.15625 | 4 | [] | no_license | import turtle
def drawCircle(myturtle, x, y, r):
""" Draw a circle on the screen.
turtle is the drawing object to be used.
x and y are the x and y coordinates of the circle's center.
r is the radius of the circle.
All measures are given in units of pixels.
"""
#Getting new turtle object and draws circle
myturtle.penup()
myturtle.setposition(x, y)
myturtle.pendown()
myturtle.circle(r)
| true |
c311f5e8346df26c05ae15f3f17de063361403ea | Python | awild82/lrspectrum | /lrspectrum/lrspectrum.py | UTF-8 | 14,662 | 3.015625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """
The MIT License (MIT)
Copyright (c) 2018 Andrew Wildman
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError('Matplotlib is required to run LRSpectrum')
try:
import numpy as np
except ImportError: # pragma: no cover
raise ImportError('Numpy is required to run LRSpectrum')
from . import parsers
class LRSpectrum(object):
"""
LRSpectrum generates a linear response spectrum from a Gaussian log file
Attrubutes:
name: Name identifier
string
logfile: Logfiles to be parsed
array<string>
roots: Poles (key, eV) and oscillator strengths (value,
unitless) of linear response
dict<string:float>
freq: Energy range to be plotted (eV)
numpy.ndarray<float>
spect: Spectrum generated by convolving each of the roots with
a given distribution such that the integral over
the distribution gives the oscillator strength
numpy.ndarray<float>
broad: Broadening parameter. HWHM
float
wlim: Sets bounds on energy range to generate
tuple<float>
res: Number of points per eV to evaluate
int
Methods:
parse_log():
Parses a gaussian linear response log file. Fills roots dict.
Called during init, but can be used to regenerate if needed.
gen_spect(broad,wlim,res,meth):
Generates a spectrum in the range given by wlim by convolving a
specified distribution with each of the roots and scaling by the
oscillator strength. Can be called multiple times to generate
spectra with different parameters.
broad: Same definition as above
wlim: Same definition as above
res: Same definition as above
meth: Type of distribution used to broaden. Currently 'lorentz'
or 'gaussian' are supported. Lorentz is for time-energy
uncertainty broadening (lifetime) and gaussian is for
vibronic broadening.
plot(xlim,ylim,xlabel,ylabel,show,lines,**kwargs):
Plots spectrum vs frequency. Built using matplotlib.pyplot, so any
additional arguments can be passed using kwargs
xlim: Limits on x axis tuple<float>
ylim: Limits on y axis tuple<float>
xlabel: Label on x axis string
ylabel: Label on y axis string
show: Whether or not to call plt.show() bool
lines: Whether or not to plot lines showing bool
the roots with the respective
oscillator strengths.
"""
def __init__(self, *multLogNames, **kwargs):
# Keyword arguments. Has to be this way for 2.7 compatibility
name = kwargs.pop('name', None)
program = kwargs.pop('program', None)
# Support either one list of logfiles or many logfiles as params
if isinstance(multLogNames[0], list):
self.logfile = multLogNames[0]
elif isinstance(multLogNames[0], str):
self.logfile = list(multLogNames)
else:
raise TypeError(
'Unexpected type for logfiles: ' +
'{0}'.format(type(multLogNames[0]))
)
# Initialization
self.name = name
self.roots = {}
self.freq = None
self.spect = None
self.broad = None
self.wlim = None
self.res = None
# Always call parser when initializing
self.parse_log(program=program)
def parse_log(self, program=None):
"""
Parses the logfiles in self.logfile according to 'program' parser
"""
for lg in self.logfile:
if program is not None:
if not isinstance(program, str):
raise TypeError(
'Expected string for input "program". ' +
'Recieved {0}'.format(type(program))
)
program = program.lower()
if program not in parsers.progs.keys():
raise ValueError(
'Specified program {0} not parsable'.format(program)
)
else: # pragma: no cover
# We dont consider coverage here; testing of this method occurs
# separately
program = parsers.detect(lg)
# NOTE: If you have degenerate roots across the file boundaries,
# this will overwrite instead of sum them
self.roots.update(parsers.progs[program](lg))
def gen_spect(self, broad=0.5, wlim=None, res=100, meth='lorentz'):
""" Generates the broadened spectrum and stores it """
# Input checking
try:
broad * 1.5
except Exception as ex:
print('Caught exception: {0}'.format(ex))
raise TypeError('Input "broad" to LRSpectrum.gen_spect: ' +
'{0}'.format(type(broad)))
if wlim is not None:
try:
wlim[0] * 1.5
wlim[1] * 1.5
except Exception as ex:
print('Exception for input "wlim"')
raise ex
try:
res * 1.5
except Exception as ex:
print('Caught exception: {0}'.format(ex))
raise TypeError('Input "res" to LRSpectrum.gen_spect: ' +
'{0}'.format(type(res)))
try:
meth.lower()
except Exception as ex:
print('Caught exception: {0}'.format(ex))
raise TypeError('Input "meth" to LRSpectrum.gen_spect: ' +
'{0}'.format(type(meth)))
self.broad = broad
# If wlim isn't given, automatically generate it based on the roots
if wlim is None:
print("Spectral range not specified... " +
"Automatically generating spectral range")
percent = 0.930
mn = None
mx = None
for k in self.roots.keys():
if self.roots[k] != 0:
if mn is None or float(k) < mn:
mn = float(k)
if mx is None or float(k) > mx:
mx = float(k)
if mn is None and mx is None:
raise RuntimeError("Cannot automatically determine spectral " +
"range if no root has oscillator strength")
# We are going to use the quantile function of the lorentz
# distribution here, even if the actual distribution is gaussian
lb = broad*np.tan(((1-percent)-0.5)*np.pi)+mn
mb = broad*np.tan((percent-0.5)*np.pi)+mx
wlim = (lb, mb)
self.wlim = wlim
self.res = int(res)
nPts = int((wlim[1]-wlim[0])*res)
self.freq = np.linspace(wlim[0], wlim[1], nPts)
self.spect = np.zeros(nPts)
# Calling .items() is memory inefficent in python2, but this is good
# for python3
for root, osc_str in self.roots.items():
if osc_str != 0:
root = float(root)
if meth.lower() == 'lorentz':
self.spect += self._lorentz(broad, root, osc_str)
elif meth.lower() == 'gaussian':
self.spect += self._gaussian(broad, root, osc_str)
else:
raise ValueError(
'Unsupported distribution "{0}" specified'.format(meth)
)
def plot(self, xlim=None, ylim=None, xlabel='Energy / eV',
ylabel='Arbitrary Units', show=False, do_spect=True, sticks=True,
ax=None, xshift=0, xscale=1, yshift=0, yscale=1, **kwargs):
""" Plots the generated spectrum and roots """
if self.spect is None and do_spect:
print('Spectrum must be generated prior to plotting')
return
if ax is None:
ax = plt.gca()
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xscale is not None:
# Type checking
try:
xscale * 1.5
except Exception as ex:
print('Caught exception: {0}'.format(ex))
raise TypeError('Input "xscale" to LRSpectrum.plot: ' +
'{0}'.format(type(xscale)))
if xshift is not None:
# Type checking
try:
xshift * 1.5
except Exception as ex:
print('Caught exception: {0}'.format(ex))
raise TypeError('Input "xshift" to LRSpectrum.plot: ' +
'{0}'.format(type(xshift)))
if xlim is not None:
# Type checking
for i in range(2):
try:
xlim[i]
except TypeError as ex:
print('Caught exception: {0}'.format(ex))
raise TypeError('Input "xlim" to LRSpectrum.plot: ' +
'{0}'.format(type(xlim)))
except IndexError as ex:
print('Caught exception: {0}'.format(ex))
raise IndexError('Length of "xlim" to LRSpectrum.plot: ' +
'{0}'.format(len(xlim)))
try:
xlim[i] * 1.5
except TypeError as ex:
print('Caught exception: {0}'.format(ex))
raise TypeError('Elements inside input "xlim" to ' +
'LRSpectrum.plot' +
'{0}'.format(type(xlim[i])))
# Setting xlim
xlim_mod = [x * xscale + xshift for x in xlim]
ax.set_xlim(xlim_mod)
if yscale is not None:
# Type checking
try:
yscale * 1.5
except Exception as ex:
print('Caught exception: {0}'.format(ex))
raise TypeError('Input "yscale" to LRSpectrum.plot: ' +
'{0}'.format(type(yscale)))
if yshift is not None:
# Type checking
try:
yshift * 1.5
except Exception as ex:
print('Caught exception: {0}'.format(ex))
raise TypeError('Input "yshift" to LRSpectrum.plot: ' +
'{0}'.format(type(yshift)))
if ylim is not None:
# Type checking
for i in range(2):
try:
ylim[i]
except TypeError as ex:
print('Caught exception: {0}'.format(ex))
raise TypeError('Input "ylim" to LRSpectrum.plot: ' +
'{0}'.format(type(ylim)))
except IndexError as ex:
print('Caught exception: {0}'.format(ex))
raise IndexError('Length of "ylim" to LRSpectrum.plot: ' +
'{0}'.format(len(ylim)))
try:
ylim[i] * 1.5
except TypeError as ex:
print('Caught exception: {0}'.format(ex))
raise TypeError('Elements inside input "ylim" to ' +
'LRSpectrum.plot' +
'{0}'.format(type(ylim[i])))
# Setting ylim
ylim_mod = [y * yscale + yshift for y in ylim]
ax.set_ylim(ylim_mod)
# Plot spectrum
if do_spect:
x = xscale*self.freq + xshift
y = yscale*self.spect + yshift
ax.plot(x, y, **kwargs)
# Plot poles
if sticks:
for root, osc_str in self.roots.items():
r = float(root)
ax.plot((r, r), (0, osc_str), 'k-', **kwargs)
if show: # pragma: no cover
plt.show()
return ax
def _lorentz(self, broad, root, osc_str):
"""
Calculates and returns a lorentzian
The lorentzian is centered at root, integrates to osc_str, and has a
half-width at half-max of broad.
"""
ones = np.ones(self.freq.shape)
# 1/(pi*broad*(1+((w-root)/broad)^2))
l_denom = broad*np.pi*(1+np.square((self.freq-root*ones)/broad))
return osc_str*np.divide(ones, l_denom)
def _gaussian(self, broad, root, osc_str):
"""
Calculates and returns a gaussian
The gaussian is centered at root, integrates to osc_str, and has a
half-width at half-max of broad.
"""
ones = np.ones(self.freq.shape)
# Convert from HWHM to std dev
stddev = broad/np.sqrt(2.0*np.log(2.0))
# 1/((2*pi*broad^2)^(1/2))*e^(-(w-root)^2/(2*broad^2)
g_power = -1*np.square(self.freq-root*ones) / (2*np.square(stddev))
gauss = 1/(np.sqrt(2*np.pi)*stddev)*np.exp(g_power)
return osc_str*gauss
| true |
ace257dcc836fe88ca7345327c762b4c6ba144ec | Python | codilty-in/math-series | /codewars/src/k_primes_most_upvoted.py | UTF-8 | 527 | 3.46875 | 3 | [
"MIT"
] | permissive | """Module to solve https://www.codewars.com/kata/k-primes/python."""
def count_Kprimes(k, start, end):
return [n for n in range(start, end+1) if find_k(n) == k]
def puzzle(s):
a = count_Kprimes(1, 0, s)
b = count_Kprimes(3, 0, s)
c = count_Kprimes(7, 0, s)
return sum(1 for x in a for y in b for z in c if x + y + z == s)
def find_k(n):
res = 0
i = 2
while i * i <= n:
while n % i == 0:
n //= i
res += 1
i += 1
if n > 1: res += 1
return res
| true |
dd3a0cdee66d8eb1bc074e90d447208cfab18987 | Python | UWPCE-PythonCert-ClassRepos/SP_Online_PY210 | /students/randi_peterson/session03/strformat_lab.py | UTF-8 | 1,703 | 3.84375 | 4 | [] | no_license | #-----TASK 1-----
print('Task 1')
test1 = (2,123.4567,10000,12345.67)
output = 'file_{:0>3d} : {:.2f}, {:.2e}, {:.2e}'.format(*test1)
print(output)
#-----TASK 2-----
print('Task 2')
#Alternate method to achieve task 1. This turned out to be a lot more clunky, since I do not know how to use fstring with formatting numbers
filenum = '%03d' %test1[0]
firstval = '%.2f' %test1[1]
secval = '%.2e' %test1[2]
thirdval = '%.2e' %test1[3]
print(f"file_{filenum} : {firstval}, {secval}, {thirdval}")
#-----TASK 3-----
print('Task 3')
def format_my_tuple(tuple):
outputstring = 'the 3 numbers are: '
size = len(tuple)
i = 0
while i < size:
outputstring += '{:d}, '
i += 1
printstring = outputstring.format(*tuple)
#deletes the extra comma and space
print(printstring[:-2])
test3 = (1,2,3)
format_my_tuple(test3)
#-----TASK 4-----
print('Task 4')
test4 = (4,30,2017,2,27)
print('{3:0>2d} {4:d} {2:d} {0:0>2d} {1:d}'.format(*test4))
#-----TASK 5-----
print('Task 5')
datatoprint = ['oranges',1.3,'lemons',1.1]
#The fruit names are printed to exclude the s at the end
print(f"The weight of an {datatoprint[0][:-1]} is {datatoprint[1]} and the weight of a {datatoprint[2][:-1]} is {datatoprint[3]}")
#-----TASK 6-----
print('Task 6')
header = ['Name', 'Age','Cost']
testlst = [['First',54,3455.23],['Second',52,235.23],['Third',42, 54315.65]]
header_format = "{:<10}" + "{:<10}" + "{:<10}"
row_format ="{:<10}" + "{:^10}" + "${:>10.2f}"
i=0
print(header_format.format(*header))
for row in testlst:
print (row_format.format(*testlst[i]))
i += 1
#-----TASK 6 EXTRA-----
print('Task 6 Extra')
nums = (1,2,3,4,5,6,7,8,9,10)
print(('{:5}'*10).format(*nums))
| true |
0889d4356226650df91647972696477ee8be356f | Python | PetraB1517/python-012021 | /1/program05.py | UTF-8 | 983 | 3.4375 | 3 | [] | no_license | "Vraťme se k software pro našeho nakladatele. Nakladatel má nyní v software dva slovníky, "
"které obsahují informace o prodejích knih v letech 2019 a 2020." \
"Uvažuj, že uživatel se zajímá o prodeje konkrétní knihy." \
"Zeptej se uživatele na název knihy a poté vypiš informaci o tom, kolik se této knihy celkem prodalo." \
"Nezapomeň na to, že některé knihy byly prodávány pouze v jednom roce."
prodeje2019 = {
"Zkus mě chytit": 4165,
"Vrah zavolá v deset": 5681,
"Zločinný steh": 2565,
}
prodeje2020 = {
"Zkus mě chytit": 3157,
"Vrah zavolá v deset": 3541,
"Vražda podle knihy": 2510,
"Past": 2364,
"Zločinný steh": 5412,
"Zkus mě chytit 2": 6671,
}
dotaz = input('Prodeje které knihy Vás zajímají? ')
kusy = 0
if dotaz in prodeje2019:
kusy += prodeje2019[dotaz]
if dotaz in prodeje2020:
kusy += prodeje2020[dotaz]
print('Knihy ' + dotaz + ' se celkem prodalo ' + str(kusy) + ' kusů.') | true |
36e9027d07e4f4c37efcaec6cf1b974ae712bd05 | Python | Jimmy-INL/google-research | /supcon/classification_head.py | UTF-8 | 1,625 | 2.625 | 3 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation for Contrastive classification head."""
import tensorflow.compat.v1 as tf
class ClassificationHead(tf.layers.Layer):
"""A classification head.
Attributes:
num_classes: The number of classes to classify into.
kernel_initializer: An initializer to use for the weights.
name: Name for this object.
"""
def __init__(self,
num_classes,
kernel_initializer=tf.initializers.glorot_uniform(),
name='ClassificationHead',
**kwargs):
super(ClassificationHead, self).__init__(name=name, **kwargs)
self.dense_layer = tf.layers.Dense(
num_classes,
activation=None,
kernel_initializer=kernel_initializer,
kernel_regularizer=None)
def call(self, inputs, training=None):
del training # unused.
if inputs.shape.rank != 2:
raise ValueError(
f'Input shape {inputs.shape} is expected to have rank 2, but does '
'not.')
return self.dense_layer(inputs)
| true |
724705bd45573d35735c4707778766a2bed4d64e | Python | bullet1337/codewars | /katas/Python/6 kyu/IP Validation 515decfd9dcfc23bb6000006.py | UTF-8 | 243 | 2.703125 | 3 | [] | no_license | # https://www.codewars.com/kata/515decfd9dcfc23bb6000006
def is_valid_IP(strng):
bytes = strng.split('.')
return len(bytes) == 4 and all(byte.isdigit() and (len(byte) == 1 or byte[0] != '0') and 0 <= int(byte) <= 255 for byte in bytes) | true |
3f2b1e34859f857b0c87d3f0575636c3a59c2211 | Python | kriegaex/projects | /Python/projectEuler/uint_prime.py | UTF-8 | 179 | 3.25 | 3 | [] | no_license |
def isprime(number):
if number == 1 :
return False
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0:
return False
return True | true |
ecf4623e4e86d3fa71661a3b177ec0d0c1b643a2 | Python | AsciencioAlex/super-waddle-webscrapping | /web-scrapping/BeautifulSoup/module01.py | UTF-8 | 529 | 2.84375 | 3 | [
"MIT"
] | permissive | from bs4 import BeautifulSoup
# Using a stored HTML file
soup = BeautifulSoup(open("simple.html"))
# Entire HTML doc be passed
#soup = BeautifulSoup("<hmtl>data</html>")
#print soup
#print "==================================="
#print soup.prettify()
print "================================"
print soup.html.body.contents[1]
print "================================"
for tag in soup.find_all(True):
print tag.name
print "=================================="
print soup.get_text('+')
print "=================================="
| true |
0f4d16b58f5d7f31e40fc4b040ca9016b18ff978 | Python | sjnasr/JustDanceRandommizerApp | /Control.py | UTF-8 | 775 | 3.15625 | 3 | [] | no_license | import tkinter
import Model
import View
class Controller:
"""
The controller is the Controller for an app that follows the Model/View/Controller architecture.
When the user presses a Button on the View,
The Controller handles all communication between the Model and the View.
"""
def __init__(self):
root = tkinter.Tk()
self.model = Model.Model()
self.view = View.View(self)
self.view.mainloop()
root.destroy()
def buttonPressed(self):
self.model.random()
self.view.songName["text"] = self.model.songName()
self.view.artist["text"] = self.model.artistName()
self.view.level["text"] = self.model.level()
self.view.mode["text"] = self.model.mode()
c = Controller() | true |
e3cf8ade442febc4bf365424894180db2b0275c3 | Python | dabare/graph | /dfs_recursive.py | UTF-8 | 1,106 | 3.375 | 3 | [] | no_license | def Adj(graph,i): #graph , index returns all adjacent vertexes as a list
adjLst = []
for k in range (len(graph[i])):
if (graph[i][k] == 1):
adjLst.append(k)
return adjLst
def DFS(graph):
c = [] #color
p = [] #predecessor
d = [] #discover time array
f = [] #finished time array
t = 0 #timestamp
def DFS(G):
for u in range(len(G)):
c.append("WHITE")
p.append(None)
d.append(-1)
f.append(-1)
global t
t = 0
for u in range(len(G)):
if c[u] == "WHITE":
DFS_visit(u)
def DFS_visit(u):
global t
c[u] = "GRAY" #vertex u has been discovered
d[u] = t = t+1
for v in Adj(graph,u):
if c[v] == "WHITE":
p[v] = u
DFS_visit(v)
c[u] = "BLACK"
f[u] = t = t+1
DFS(graph)
print(d)
print(f)
mat = [[0,1,0,1,0,0],
[0,0,0,0,1,0],
[0,0,0,0,1,1],
[0,1,0,0,0,0],
[0,0,0,1,0,0],
[0,0,0,0,0,0]]
DFS(mat) | true |
5d6336b05334c5304acd60246396f924028a6cc2 | Python | mccolgst/breakout | /levelgen.py | UTF-8 | 705 | 3.28125 | 3 | [] | no_license | #!/usr/bin/python
import os, random
MAX_HEIGHT = 4
MAX_WIDTH = 5
def generate_level():
'''
generate the next level randomly
'''
fileslist = os.listdir('levels')
#filter the list to levels
#TODO: make regex OR put levels in their own database/folder
level_list = [x for x in fileslist if '.lvl' in x]
level_index = len(level_list) #index of the new level to make
of = open('levels/level%s.lvl' %level_index, 'w')
#could convert to list comp
for y in range(MAX_HEIGHT):
block_row = []
for x in range(MAX_WIDTH):
block_health = random.randint(0,2)
block_row.append(str(block_health))
of.write(','.join(block_row) +"\n")
of.close()
if __name__ == '__main__':
generate_level()
| true |
d5f57c7749321fdab7e2174c8097cdad7eed6fa1 | Python | zingesCodingDojo/DojoAssignments | /Python/PythonFundamentals/Carlos_FindCharacters_0508.py | UTF-8 | 986 | 4.90625 | 5 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Assignment: Find Characters
Write a program that takes a list of strings and a string containing a single character,
and prints a new list of all the strings containing that character.
Here's an example:
"""
# input
l = ['hello','world','my','name','is','Anna']
# char = 'o'
# output
n = ['hello','world']
# Copy
# Hint: how many loops will you need to complete this task?
def findcharacters(randolist, char):
"""
The purpose of this function is to accept a list of strings and a single character string. It will then
see if the param char is inside param randolist. If char in randolist, then a new list will be returned with
all instances of the index where char was found in randolist.
:param randolist:
:param char:
:return:
"""
newlist = []
for items in randolist:
if char in items:
newlist.append(items)
print(newlist)
return newlist
findcharacters(l, "o")
| true |
d03d0a2a8f3063ea8cb0ef2baf2b8382e1df859e | Python | rafaelperazzo/programacao-web | /moodledata/vpl_data/97/usersdata/247/51836/submittedfiles/lecker.py | UTF-8 | 731 | 3.34375 | 3 | [] | no_license | lista1=[ ]
n=int(input('lista'))
for i in range(1,n+1,1):
v=float(input('v: '))
lista1.append(v)
lista2=[ ]
for i in range(1,n+1,1):
v=float(input('v: '))
lista2.append(v)
def lecker(lista):
cont=0
for i in range(0,len(lista),1):
if i==0:
if lista[i]>lista[i+1]:
cont=cont+1
elif i==len(lista)-1:
if lista[i]>lista[i+1]:
cont=cont+1
else:
if lista[i]>lista[i+1] and lista[i]>lista[i-1]:
cont=cont+1
if cont==1:
return True
else:
return False
if lecker(lista1):
print('S')
else:
print('N')
if lecker(lista2):
print('S')
else:
print('N') | true |
7847ed9eeffe278b6e8c0ace4dfcc32815dd3949 | Python | BraderLh/ProyectoFC1 | /Project/src/getcsv.py | UTF-8 | 494 | 2.796875 | 3 | [] | no_license | import requests
import shutil
url_csv = "https://www.datosabiertos.gob.pe/sites/default/files/Programas%20de%20Universidades.csv"
path_folder_csv = "C:/Users/BRAYAN LIPE/Documents/UNSA/2020/SEMESTRE B/Proyecto Final de Carrera/Project/files/dataset.csv"
def download_file(url):
with requests.get(url, stream=True) as r:
r.raw.decode_content = True
with open(path_folder_csv, "wb") as file:
shutil.copyfileobj(r.raw, file)
download_file(url_csv)
| true |
356e7e5c1d60478abbde440c1974ac6aa7e53ce5 | Python | Mrzhouqifei/offfer | /kuaishou/4.py | UTF-8 | 160 | 2.65625 | 3 | [] | no_license | n, k = list(map(int, input().split()))
lists = []
# number, supplies, neighbour, distance
for i in range(n):
lists.append(list(map(int, input().split())))
| true |
27121d20c527b48f7923452b31b62891716897d7 | Python | gados3/kaggle_MovieRecommendation | /learning_algorithms/simple_hybrid_system.py | UTF-8 | 3,889 | 3.03125 | 3 | [] | no_license | from collections import defaultdict
from core.data_types import Star_Rating
class SimpleHybridSystem:
def __init__(self, users, movies: dict, ratings):
self.movie_rating_dict = self.__build_movie_rating_dict(ratings)
self.user_rating_dict = self.__build_user_rating_dict(ratings)
self.user_similarity = self.__build_user_similarity_dict(users)
self.movie_similarity = self.__build_movie_similarity_dict(movies)
def classify(self, user_id, movie_id):
rating_based_on_users = self.__classify_using_users(user_id, movie_id)
rating_based_on_movies = self.__classify_using_movies(
user_id, movie_id)
if rating_based_on_users is None and rating_based_on_movies is None:
return Star_Rating(3)
elif rating_based_on_movies is None:
return Star_Rating(int(rating_based_on_users))
elif rating_based_on_users is None:
return Star_Rating(int(rating_based_on_movies))
else:
return Star_Rating(int((rating_based_on_movies +
rating_based_on_users) / 2.))
def __build_user_rating_dict(self, ratings):
user_rating_dict = defaultdict(list)
for rating in ratings:
user_rating_dict[rating.user_id].append(rating)
return user_rating_dict
def __build_movie_rating_dict(self, ratings):
movie_rating_dict = defaultdict(list)
for rating in ratings:
movie_rating_dict[rating.movie_id].append(rating)
return movie_rating_dict
def __build_user_similarity_dict(self, users):
similarity_dict = {}
users_list = list(users.items())
for user1_id, user1 in users.items():
for user2_id, user2 in users_list:
if user1_id == user2_id:
similarity_dict[(user1_id, user2_id)] = 1
else:
similarity = user1.compare(user2)
similarity_dict[(user1_id, user2_id)] = similarity
similarity_dict[(user2_id, user1_id)] = similarity
users_list.remove((user1_id, user1))
return similarity_dict
def __build_movie_similarity_dict(self, movies):
similarity_dict = {}
movies_list = list(movies.items())
for movie1_id, movie1 in movies.items():
for movie2_id, movie2 in movies_list:
if movie1_id == movie2_id:
similarity_dict[(movie1_id, movie2_id)] = 1
else:
similarity = movie1.compare(movie2)
similarity_dict[(movie1_id, movie2_id)] = similarity
similarity_dict[(movie2_id, movie1_id)] = similarity
movies_list.remove((movie1_id, movie1))
return similarity_dict
def __classify_using_movies(self, user_id, movie_id):
numerator = 0
denominator = 0
for rating in self.user_rating_dict[user_id]:
try:
similarity = self.movie_similarity[(movie_id, rating.movie_id)]
numerator += rating.rating.value * similarity
denominator += similarity
except KeyError:
return None
if denominator == 0:
return None
else:
return float(numerator) / denominator
def __classify_using_users(self, user_id, movie_id):
numerator = 0
denominator = 0
for rating in self.movie_rating_dict[movie_id]:
try:
similarity = self.user_similarity[(user_id, rating.user_id)]
numerator += rating.rating.value * similarity
denominator += similarity
except KeyError:
return None
if denominator == 0:
return None
else:
return float(numerator) / denominator
| true |
0551b7383ea776a3d8f7de0ad46234699282071a | Python | qdonnellan/personal | /tests/controllers_jsonify_blog_post_test.py | UTF-8 | 854 | 2.875 | 3 | [
"MIT"
] | permissive | from controllers.jsonify_blog_post import jsonify_blog_post
from controllers.fetch_blog_post import fetch_blog_post
from base_test_handler import TestHandler
import json
class JsonifyBlogPostTest(TestHandler):
'''
test the the controller for turning blog posts into a json object
'''
def test_jsonify_blog_post_for_known_blog_file(self):
'''
test a call to jsonify a known blog posts returns the expected json object
'''
json_blog_object = jsonify_blog_post('2014','01','03')
self.assertIsNotNone(jsonify_blog_post)
blog_data = json.loads(json_blog_object)
self.assertEqual("10 Posts in 10 Days", blog_data['title'])
self.assertEqual('2014', blog_data['year'])
self.assertEqual('01', blog_data['month'])
self.assertEqual('03', blog_data['day'])
| true |
033a995551cefc97ed7bae3cd38c75bd4d60b581 | Python | mayankvik2/kaggle_cdiscount | /data_loader.py | UTF-8 | 1,599 | 2.546875 | 3 | [] | no_license | import numpy as np
import pandas as pd
import torch.utils.data as data
import torch
import utils
num_classes = 5270
class CSVDataset(data.Dataset):
def __init__(self, df, transform=None):
self.df = df
self.path = df['file_name'].values.astype(str)
self.target = df['class_id'].values.astype(np.int64)
self.transform = transform
def __len__(self):
return self.df.shape[0]
def __getitem__(self, idx):
X = utils.load_image(self.path[idx])
if self.transform:
X = self.transform(X)
y = self.target[idx]
return X, y
def get_loaders(batch_size,
args,
train_transform=None,
valid_transform=None):
train_df = pd.read_csv(f'data/train4_df.csv')
train_dataset = CSVDataset(train_df, transform=train_transform)
train_loader = data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=torch.cuda.is_available())
valid_df = pd.read_csv(f'data/val4_df.csv')
valid_dataset = CSVDataset(valid_df, transform=valid_transform)
valid_loader = data.DataLoader(valid_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=torch.cuda.is_available())
return train_loader, valid_loader
| true |
c5ea310d88299b5c6d0cbc7e553e43c8c13be8c2 | Python | abid-sayyad/py_beginners | /bubble_sort.py | UTF-8 | 396 | 3.703125 | 4 | [] | no_license | def bubble_sort(arr):
swap = True
idxOfLastUnsortedEle = len(arr)-1
while swap:
swap = False
for i in range(0,idxOfLastUnsortedEle):
if arr[i]>arr[i+1]:
arr[i],arr[i+1] = arr[i+1],arr[i]
swap = True
idxOfLastUnsortedEle -= 1
return arr
# testing
A = [1,7,9,4,6,5,2,0,85,42,75,69,94,38,3]
print(*bubble_sort(A))
| true |
14b27629d9a8b7751f136b9e16a78fea2e222379 | Python | abuwildanm/Python-Computer-Vision | /Computer Vision/LetsGo.py | UTF-8 | 3,321 | 3.109375 | 3 | [] | no_license | import numpy as np
import cv2
class Stitcher:
def __init__(self, images):
self.images = images
# convert to grayscale
left_gray = cv2.cvtColor(self.images[0], cv2.COLOR_BGR2GRAY)
right_gray = cv2.cvtColor(self.images[1], cv2.COLOR_BGR2GRAY)
self.gray = (left_gray, right_gray)
def detect_features(self):
sift = cv2.xfeatures2d.SIFT_create()
# SIFT keypoints and descriptors
left_kps, left_des = sift.detectAndCompute(self.gray[0], None)
right_kps, right_des = sift.detectAndCompute(self.gray[1], None)
return ((left_kps, left_des), (right_kps, right_des))
def match_keypoints(self, sift_features, ratio):
matcher = cv2.DescriptorMatcher_create("BruteForce")
# take best 2 matches for each features
all_matches = matcher.knnMatch(sift_features[0][1], sift_features[1][1], 2)
matches = []
for match in all_matches:
# distance from both best match features should be lower than given ratio
if match[0].distance < match[1].distance * ratio:
# save index of matching features
matches.append(match[0])
# draw match keypoints
self.show_matches(matches, sift_features[0][0], sift_features[1][0])
# construct the two sets of points
match_points = np.array([(sift_features[1][0][match.trainIdx].pt, sift_features[0][0][match.queryIdx].pt) for match in matches])
left_points = match_points[:,0]
right_points = match_points[:,1]
# ptsA = np.float32([kpsA[i] for (_, i) in matches])
# ptsB = np.float32([kpsB[i] for (i, _) in matches])
# find homography between points in both image
if len(matches) >= 4:
(H, status) = cv2.findHomography(left_points, right_points, cv2.RANSAC)
print(H)
else:
raise AssertionError('Can’t find enough keypoints.')
return (matches, H)
def show_matches(self, matches, left_kps, right_kps):
# get best matching feature
matches.sort(key=lambda x: x.distance, reverse=False)
matches = matches[:int(len(matches) * 0.5)]
# Draw top matches
vis_matches = cv2.drawMatches(self.images[0], left_kps, self.images[1], right_kps, matches, None)
cv2.imshow("matches.jpg", vis_matches)
def stitch(self):
sift_features = self.detect_features()
matches, H = self.match_keypoints(sift_features, 0.75)
stitched = cv2.warpPerspective(self.images[1], H, (self.images[1].shape[1] + self.images[0].shape[1], self.images[0].shape[0]))
cv2.imshow('result', stitched)
cv2.waitKey(0)
stitched[0:self.images[0].shape[0], 0:self.images[0].shape[1]] = self.images[0]
return stitched
left_img = cv2.imread('../Dataset/rektorat1.jpg')
right_img = cv2.imread('../Dataset/rektorat2.jpg')
width, height = left_img.shape[1]/3, left_img.shape[0]/3
left_img = cv2.resize(left_img, (int(width), int(height)))
right_img = cv2.resize(right_img, (int(width), int(height)))
# cv2.imshow('right', right_img)
# cv2.waitKey(0)
stitcher = Stitcher([left_img, right_img])
result = stitcher.stitch()
cv2.imshow('Panorama', result)
cv2.waitKey(0) | true |
4d0032e996e3edb1e63d8469ed75a528fcd46763 | Python | excelsky/Leet1337Code | /242_valid-anagram.py | UTF-8 | 662 | 3.453125 | 3 | [] | no_license | # https://leetcode.com/problems/valid-anagram
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
return sorted(list(s)) == sorted(list(t))
### suboptimal solution
'''
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if len(s) != len(t):
return False
s_dict, t_dict = dict(), dict()
for i in range(len(s)):
if s[i] in s_dict.keys():
s_dict[s[i]] += 1
else:
s_dict[s[i]] = 1
if t[i] in t_dict.keys():
t_dict[t[i]] += 1
else:
t_dict[t[i]] = 1
return s_dict == t_dict
''' | true |
24180ded0170a5d715680d633d37b23cbd2d4709 | Python | amul-code/FLASK_API | /task.py | UTF-8 | 2,303 | 3.078125 | 3 | [] | no_license | import pymongo
from flask import Flask
from pymongo import MongoClient
from datetime import datetime, timedelta
from pymongo.collection import Collection
app = Flask(__name__)
try:
client = MongoClient()
print("DB connected Successfully")
except:
print("Could not connect to Database")
connection = pymongo.MongoClient("localhost")
database = connection['my_database']
collection:Collection = database['flight_management']
collection2:Collection = database['booking_management']
#QUERY ONE
print("1. Flights whose model is 737\n")
model = "737"
flight = collection.find_one({"model":model})
if flight:
print(flight)
else:
print("No such flight with model " + model + " found.")
#QUERY 2
capacity = 40
print("\n\n\n2.Flights whose capacity is "+ str(capacity) +" and above\n")
flight = collection.find()
for i in flight:
if int(i['capacity'])>=40:
print(i['name'])
else:
print("no flights whoes capacity is "+str(capacity)+" and above.")
# QUERY 3
print("\n\n\n3.All flights whose service done 5 or more months back.\n")
months = 5
date_gap = datetime.today() - timedelta(days=30*months)
flight_details = collection.find({"service.date_of_service":{"$lte":date_gap}})
if flight_details:
for i in flight_details:
print(i['name'])
else:
print("no flight serviced 5 or more months back")
#QUERY 4
print("\n\n\n4. Which flight was services more.\n")
all_flights = collection.find()
flight_id = []
ser_len = []
max = {}
for i in all_flights:
flight_id.append(i['_id'])
ser_len.append(len(i['service']))
max = dict(zip(flight_id,ser_len))
temp = sorted(max)
id = temp[-1]
print(id)
#QUERY 5
print("\n\n\n5. to find lousy service?\n")
all_flights = collection.find()
data = []
for flight in all_flights:
data.append(flight)
min = datetime.now() - datetime.strptime("01-01-1970", "%d-%m-%Y")
lousy_team = ""
flight_no = ""
for flight in data:
service = flight["service"]
for i in range(len(service)-1):
time_diff = abs(service[i+1]["date_of_service"] - service[i]["date_of_service"])
if time_diff < min:
min = time_diff
lousy_team = service[i]["service_by"]
flight_no = flight["_id"] + " - " + flight["name"]
print("Most lousy service team is \"" + lousy_team )
| true |
f23de2fe760afe0df6ac1782dfcbf48525ee60d9 | Python | Noughton/LearningPython | /do_sorted.py | UTF-8 | 520 | 4.09375 | 4 | [] | no_license | #'sorted'排序序列
sorted_list = [2,6,3,-1,-26]
sorted_list_01 = sorted(sorted_list)
print(sorted_list_01)
sorted_list_02 = sorted(sorted_list,key = abs) #排序函数会根据'key'的函数规则进行排序
print(sorted_list_02)
sorted_list_str = ['dfa','efd','afg']
sorted_list_03 = sorted(sorted_list_str) #排序规则时根据'ascii'编码表进行排序
print(sorted_list_03)
sorted_list_04 = sorted(sorted_list_str,reverse = True) #排序中添加参数'reverse'为'True'时为倒叙
print(sorted_list_04) | true |
f00912978464ee63224a9dd6b0b84763d69256a5 | Python | srounet/pystormlib | /pystormlib/utils.py | UTF-8 | 722 | 2.75 | 3 | [
"MIT"
] | permissive | import ctypes
import pystormlib.winerror
def raise_for_error(func, *args, **kwargs):
"""Small helper around GetLastError
:param func: a function using SetLastError internally
:type func: callable
:param args: Arbitrary Argument Lists
:param kwargs: Keyword Arguments
:return: func result
:raise: PyStormException in case something when wrong with stormlib
"""
ctypes.windll.kernel32.SetLastError(0)
result = func(*args, **kwargs)
error_code = ctypes.windll.kernel32.GetLastError()
if error_code:
exception = pystormlib.winerror.exceptions.get(
error_code, pystormlib.winerror.exceptions
)
raise exception(error_code)
return result | true |
30ee98851a64dda770fc3d490b8a71c1f7adebe3 | Python | biubiubiubiubiubiubiu/netsec_labs_2017 | /lab3/src/ApplicationLayer.py | UTF-8 | 5,014 | 2.734375 | 3 | [] | no_license | from playground.network.packet.fieldtypes import BOOL, STRING
from playground.network.common import PlaygroundAddress
# MessageDefinition is the base class of all automatically serializable messages
from playground.network.packet import PacketType
import playground
import sys, time, os, logging, asyncio
class EchoPacket(PacketType):
"""
EchoProtocolPacket is a simple message for sending a bit of
data and getting the same data back as a response (echo). The
"header" is simply a 1-byte boolean that indicates whether or
not it is the original message or the echo.
"""
# We can use **ANY** string for the identifier. A common convention is to
# Do a fully qualified name of some set of messages.
DEFINITION_IDENTIFIER = "test.EchoPacket"
# Message version needs to be x.y where x is the "major" version
# and y is the "minor" version. All Major versions should be
# backwards compatible. Look at "ClientToClientMessage" for
# an example of multiple versions
DEFINITION_VERSION = "1.0"
FIELDS = [
("original", BOOL),
("message", STRING)
]
class EchoServerProtocol(asyncio.Protocol):
"""
This is our class for the Server's protocol. It simply receives
an EchoProtocolMessage and sends back a response
"""
def __init__(self, loop=None):
self.deserializer = EchoPacket.Deserializer()
self.loop = loop
self.transport = None
def connection_made(self, transport):
print("EchoServer: Received a connection from {}".format(transport.get_extra_info("peername")))
self.transport = transport
def connection_lost(self, reason=None):
print("Lost connection to client. Cleaning up.")
if self.loop:
self.loop.stop()
def data_received(self, data):
self.deserializer.update(data)
for echoPacket in self.deserializer.nextPackets():
if echoPacket.original:
print("Got {} from client.".format(echoPacket.message))
if echoPacket.message == "__QUIT__":
print("Client instructed server to quit. Terminating")
self.transport.close()
return
responsePacket = EchoPacket()
responsePacket.original = False # To prevent potentially infinte loops?
responsePacket.message = echoPacket.message
self.transport.write(responsePacket.__serialize__())
else:
print("Got a packet from client not marked as 'original'. Dropping")
class EchoClientProtocol(asyncio.Protocol):
"""
This is our class for the Client's protocol. It provides an interface
for sending a message. When it receives a response, it prints it out.
"""
def __init__(self, loop=None, callback=None):
self.buffer = ""
self.loop = loop
if callback:
self.callback = callback
else:
self.callback = print
self.transport = None
self.deserializer = EchoPacket.Deserializer()
def close(self):
self.__sendMessageActual("__QUIT__")
def connection_made(self, transport):
print("EchoClient: Connected to {}".format(transport.get_extra_info("peername")))
self.transport = transport
self.send("Hello world!")
def data_received(self, data):
self.deserializer.update(data)
for echoPacket in self.deserializer.nextPackets():
if echoPacket.original == False:
self.callback(echoPacket.message)
else:
print("Got a message from server marked as original. Dropping.")
def connection_lost(self, reason=None):
print("Lost connection to server. Cleaning up.")
if self.loop:
self.loop.stop()
def send(self, data):
print("EchoClientProtocol: Sending echo message...")
echoPacket = EchoPacket(original=True, message=data)
self.transport.write(echoPacket.__serialize__())
class EchoControl:
def __init__(self, loop=None):
self.txProtocol = None
self.loop = loop
def buildProtocol(self):
self.txProtocol = EchoClientProtocol(self.loop, self.callback)
return self.txProtocol
def connect(self, txProtocol):
self.txProtocol = txProtocol
print("Echo Connection to Server Established!")
# self.txProtocol = txProtocol
# sys.stdout.write("Enter Message: ")
# sys.stdout.flush()
# asyncio.get_event_loop().add_reader(sys.stdin, self.stdinAlert)
def callback(self, message):
print("Server Response: {}".format(message))
# self.txProtocol.send("__QUIT__")
print("Closing EchoProtocol...")
self.txProtocol.transport.close()
def stdinAlert(self):
data = sys.stdin.readline()
if data and data[-1] == "\n":
data = data[:-1] # strip off \n
self.txProtocol.send(data)
| true |
0087391483eb6ad2c978ccdf47f372430287472c | Python | Starkli-code/alien_invasion | /game_functions.py | UTF-8 | 6,219 | 2.5625 | 3 | [] | no_license | import sys
from time import sleep
import pygame
from bullet import Bullet
from alien import Alien
# 监听事件(鼠标/键盘)
def check_keydown_events(event, ai_settings, screen, ship, bullets):
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE:
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def check_keyup_events(event, ai_settings, screen, ship, bullets):
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
def check_events(ai_settings, stats, button, screen, ship, aliens, bullets, scoreboard):
for event in pygame.event.get():
# 控制游戏开关
if event.type == pygame.QUIT:
sys.exit()
# 控制子弹
elif event.type == pygame.K_SPACE:
check_keydown_events(event, ai_settings, screen, ship, bullets)
# 控制飞船左右移动
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ai_settings, screen, ship, bullets)
# 控制游戏开始
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, button, ship, aliens, bullets, mouse_x, mouse_y, scoreboard)
def check_play_button(ai_settings, screen, stats, button, ship, aliens, bullets, mouse_x, mouse_y, scoreboard):
button_clicked = button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_stats:
pygame.mouse.set_visible(False)
stats.reset_stats()
stats.game_stats = True
ai_settings.initialize_dynamic_settings()
aliens.empty()
bullets.empty()
scoreboard.prep_level()
scoreboard.prep_score()
scoreboard.prep_high_score()
scoreboard.prep_ship()
creat_fleet(ai_settings, screen, aliens)
ship.center_ship()
# 更新屏幕
def update_screen(ai_settings, stats, screen, ship, bullets, aliens, button, scoreboard):
screen.fill(ai_settings.bg_color)
scoreboard.show_score()
for bullet in bullets:
bullet.draw_bullet()
aliens.draw(screen)
ship.blitme()
if not stats.game_stats:
button.draw()
pygame.display.flip()
# 更新子弹
def update_bullets(ai_settings, screen, stats, scoreboard, ship, bullets, aliens):
bullets.update()
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
# collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
check_alien_destroy(ai_settings, screen, stats, scoreboard, ship, bullets, aliens)
if len(aliens) == 0:
bullets.empty()
creat_fleet(ai_settings, screen, aliens)
ai_settings.increase_speed()
stats.level += 1
scoreboard.prep_level()
# 外星人相关操作
def get_number_aliens_x(ai_settings, alien_width):
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def creat_alien(ai_settings, screen, aliens, alien_number):
alien = Alien(ai_settings, screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
aliens.add(alien)
def creat_fleet(ai_settings, screen, aliens):
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
for alien_number in range(number_aliens_x):
creat_alien(ai_settings, screen, aliens, alien_number)
def update_aliens(ai_settings, stats, screen, aliens, ship, bullets, scoreboard):
check_fleet_edges(ai_settings, aliens)
aliens.update()
check_fleet_bottom(ai_settings, stats, screen, aliens, ship, bullets, scoreboard)
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, stats, screen, aliens, ship, bullets, scoreboard)
# for alien in aliens:
# if alien.check_alien_edge():
# change_alien_direction(ai_settings, alien)
# else:
# alien.update()
def check_fleet_edges(ai_settings, aliens):
for alien in aliens:
if alien.check_alien_edge():
change_fleet_direction(ai_settings, aliens)
break
def check_fleet_bottom(ai_settings, stats, screen, aliens, ship, bullets, scoreboard):
screen_rect = screen.get_rect()
for alien in aliens:
if alien.rect.bottom >= screen_rect.bottom:
ship_hit(ai_settings, stats, screen, aliens, ship, bullets, scoreboard)
break
# def change_alien_direction(ai_settings, alien):
# alien.rect.y += ai_settings.fleet_drop_factor
# ai_settings.fleet_direction *= -1
def change_fleet_direction(ai_settings, aliens):
for alien in aliens:
alien.rect.y += ai_settings.fleet_drop_factor
ai_settings.fleet_direction *= -1
def check_alien_destroy(ai_settings, screen, stats, scoreboard, ship, bullets, aliens):
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_score * len(aliens)
scoreboard.prep_score()
check_high_score(stats, scoreboard)
# 飞船毁灭
def ship_hit(ai_settings, stats, screen, aliens, ship, bullets, scoreboard):
stats.ships_left -= 1
if stats.ships_left > 0:
aliens.empty()
bullets.empty()
creat_fleet(ai_settings, screen, aliens)
ship.center_ship()
scoreboard.prep_ship()
sleep(1)
else:
stats.game_stats = False
pygame.mouse.set_visible(True)
# 记分
def check_high_score(stats, scoreboard):
if stats.score > stats.high_score:
stats.high_score = stats.score
scoreboard.prep_high_score()
| true |
3e93a086ded3bee1d0d51fac23fdcb4a3cd7ee87 | Python | HarshaChinni/Leetcode | /frequency-sort.py | UTF-8 | 299 | 3.125 | 3 | [
"MIT"
] | permissive | from collections import Counter
class Solution:
def frequencySort(self, s: str) -> str:
countMap = Counter(s)
countMap = sorted(countMap.items(), key=lambda x: -x[1])
res = ''
for k, v in countMap:
k *= v
res += k
return res
| true |
71120a860ad20b6fdfa054617a5c54f5656e50a6 | Python | tsui-david/tzu-chi-cs-class | /class15-strings-introduction/answers.py | UTF-8 | 4,231 | 5.03125 | 5 | [] | no_license | def ex1():
"""
- We have seen strings before. Strings can be instantiated with single quote '' or double quote ""
PROBLEM: Try to create a variable with a string value, 'hello' and return it.
"""
return "hello"
def ex2(a):
"""
- Strings can be concactenated (joined together)
- You can add to strings together by using the + operator on two strings.
PROBLEM: Given an array of strings, return one string that concactenates all the elements in the array.
INCLUDE a space in between each string.
example:
a = ["hello", "world"]
return "hello world"
"""
answer = ""
for i in range(len(a)):
if i == 0:
answer = a[0]
else:
answer = answer + " " + a[i]
return answer
def ex3(s):
"""
- Strings are similar to arrays! To be more specific, they are an array of characters!
- Each index in the string will represent a character
PROBLEM: Given a string, return the first AND last character of the string
- hint: you can return multiple elements using the comma
EXAMPLE:
"hello"
return "h", "o"
"""
return s[0], s[-1]
def ex4(s, i, j):
"""
- You might've used the "slice" operation in python to truncate arrays.
- example:
a = [4,5,6,7]
You can "slice" the array of a from the 1st element onward:
a[1:] --> [5,6,7]
Or you can "slice" the array from the 1st element backward:
a[:1] --> [4]
Or you can "slice" the array from the 1st (inclusive) to the 3rd element (not inclusive):
a[1:3] --> [5,6]
- Similarly, you can do the same thing with strings!!
What would a[1:1] give?
- answer: []
- why? Because we go from 1 to 1 (not inclusive) so it's empty!
PROBLEM: Given a string return a slice of the string from i (inclusive) to j (not inclusive)
example:
s = "abc", i = 1, j = 2
return "b"
"""
return s[i:j]
def ex5(s1, s2):
"""
Problem: Given s1 and s2, return True if s1 contains s2. return False if s1 does not contain s2
example:
s1 = "TreasureIsland" s2 = "Island" --> True
s1 = "TreasureIsland" s2 = "X!!@" --> False
You can assume s2 will be smaller to or equal to s1
"""
for i in range(len(s2)):
cur_s1 = 0
for j in range(i, i + len(s1)):
if s1[cur_s1] == s2[j]:
cur_s1 += 1
else:
break
# we found the s1 in s2
if cur_s1 == len(s1):
return True
return False
def ex6(s1):
"""
While string is similar to an array, it is NOT an array. Strings are immutable. This means once a string is created, it cannot be changed.
When we concatenate two strings, we are simply creating a brand new string. So ex2 problem is actually pretty inefficient if we are using the + operator.
On the other hand, an array can be changed. It is MUTABLE. ex: a = [1] a.append(2). a is now [1,2]!
Sometimes it is handy to add characters to an array and then join it back because strings can't be "appended".
To do this we can do:
a = []
a.append("a")
a.append("b")
a.append("c")
a_str = "".join(a) --> "abc"
^ the join combines the array element by element with the "" in between.
if we do:
a_str = "*".join(a) --> "a*b*c"
PROBLEM:
Given a string s1, add a "*" in between each character
Example:
s1 = "abc"
return "a*b*c"
"""
return "*".join(s1)
def ex7(s1, s2):
"""
PROBLEM:
Given a string s1 and s2, merge the two strings character by character, starting with s1 as first character
EXAMPLE:
s1 = "abc", s2 = "def"
return "adbecf"
"""
merge = []
i = 0
j = 0
merge_s1 = True
while i < len(s1) and j < len(s2):
if merge_s1:
merge.append(s1[i])
i += 1
merge_s1 = False
else:
merge.append(s2[j])
j += 1
merge_s1 = True
while i < len(s1):
merge.append(s1[i])
i += 1
while j < len(s2):
merge.append(s2[j])
j += 1
return "".join(merge)
| true |
c2810f7a371dcf41b145d81dfd3b378154921774 | Python | furlow/EPS-Project | /src/bluetooth_module.py | UTF-8 | 3,388 | 2.890625 | 3 | [] | no_license | import threading
from bluetooth import *
# *** bluetooth_comms ***
# This class deals with the bluetooth communications with the phone application
# it inherits from the threading.Thread class this allows it to be run along
# side other threads.
class bluetooth_comms(threading.Thread):
def __init__(self, data):
threading.Thread.__init__(self)
self.data = data
port = 5
backlog = 1
self.server_sock = BluetoothSocket( RFCOMM )
self.server_sock.bind( ("", port) )
self.server_sock.listen( backlog )
self.client_sock = BluetoothSocket( RFCOMM )
uuid = "df0677bc-5f0b-45e4-8207-122adee18805"
advertise_service( self.server_sock, "alarm",
service_id = uuid,
service_classes = [ uuid, SERIAL_PORT_CLASS],
profiles = [SERIAL_PORT_PROFILE])
# This is the code run in parellel to the thread its called from
# it will continually run until a keyboard interrupt or if it's
# killed from the main thread
def run(self):
try:
while(True):
print "waiting for connection..."
self.client_sock, client_info = self.server_sock.accept()
print "Accepted connection from ", client_info
print "waiting for data..."
raw_data = self.client_sock.recv(1024)
self.data.set_time( int(raw_data[0:4]) )
self.data.set_alarm_time ( int(raw_data[5:9]) )
self.data.set_alarm = ( int(raw_data[10]) )
self.data.set_light( int(raw_data[12]) )
self.client_sock.send ("Data Received")
print self.data
self.client_sock.close()
except KeyboardInterrupt:
self.stop()
#Function to safely stop the bluetooth communications
def stop(self):
self.keepalive = False
stop_advertising (self.server_sock)
self.client_sock.close()
self.server_sock.close()
# *** app_data ***
# Is a class to encapsulate the application data sent form the mobile app
class app_data():
def __init__(self):
self.__time
self.__alarm_time
self.__alarm_control
self.__light_control
def set_time(self, time):
self.__time = time
#Set the actual system time here and then time modules can be used
def set_alarm_time(self, alarm_time):
self.__alarm_time = alarm_time
def set_alarm_status(self, control):
self.__alarm_control = control
def set_light_status(self, control):
self.__alarm_control = control
def time(self):
return self.__time
#Set the actual system time here and then time modules can be used
def alarm_time(self):
return self.__alarm_time
def alarm_status(self):
return self.__alarm_control
def light_status(self):
return self.__alarm_control
def print_settings():
print "Time is", self.__time
print "Alarm is set for ", self.__alarm_time
print "Alarm is ", self.__alarm_control
print "Light is ", self.__light_control
| true |
139d79baefe6580b538fe61e92e854424a0c988a | Python | kvin15/ensemble_methods_projects | /q02_stacking_clf/build.py | UTF-8 | 1,967 | 3.171875 | 3 | [] | no_license | # Default imports
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
import numpy as np
# Loading data
dataframe = pd.read_csv('data/loan_prediction.csv')
X = dataframe.iloc[:, :-1]
y = dataframe.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=9)
# Write your code here
def stacking_clf(model, X_train, y_train, X_test, y_test):
X_train_meta = pd.DataFrame()
X_test_meta = pd.DataFrame()
for model_ in model:
# fit the models passed to method, using X_train and y_train
model_.fit(X_train,y_train)
# create train dataframe for Meta Classifier using models passed to the method
# predict the probabilties on train (mlxtend library does not use probabilities
# actual classes and hence the accuracy score using mlxtend is 0.74054054054054053)
# also we do not need to consider class 0 and class 1 probability in this case but
# test case is written such tht this implementation of the method will pass
df_meta_train = pd.DataFrame(model_.predict_proba(X_train))
X_train_meta = pd.concat([X_train_meta, df_meta_train],axis=1)
# create test dataframe for Meta Classifier using models passed to the method
# predict the probabilties on test
df_meta_test = pd.DataFrame(model_.predict_proba(X_test))
X_test_meta = pd.concat([X_test_meta, df_meta_test],axis=1)
# fit metaclassifier using Logistic
meta_logcf = LogisticRegression(random_state=9)
meta_logcf.fit(X_train_meta,y_train)
# Predict using metaclassifier using Logistic
y_pred_meta_test = meta_logcf.predict(X_test_meta)
acc_score = accuracy_score(y_true=y_test, y_pred=y_pred_meta_test)
return acc_score
| true |
17245d8b671b8afa75442d9b704c8e291fa6323b | Python | seanchen513/dcp | /dcp004 - given int array, find first missing pos int.py | UTF-8 | 2,564 | 4.625 | 5 | [] | no_license | """
dcp#4
This problem was asked by Stripe.
Given an array of integers, find the first missing positive integer in linear time and constant space. In other words, find the lowest positive integer that does not exist in the array. The array can contain duplicates and negative numbers as well.
For example, the input [3, 4, -1, 1] should give 2. The input [1, 2, 0] should give 3.
You can modify the input array in-place.
"""
# Naive method: search for all positive integers, starting with 1.
# At worst, search n+1 numbers. This takes O(n^2) worst-case.
# Can use sorting then do linear scan of array.
# This takes O(n log n + n) = O(n log n).
# Can use hashing. Hash all positive integers in array, then scan hash table
# for first missing positive integer.
# This takes O(n) on average, but requires O(n) extra space.
################################################################################
# Solution with O(n) time, O(1) space.
def first_missing_pos_int(a):
# segregate positive values to left
n = len(a)
j = 0
for i in range(0, n):
if a[i] > 0:
a[i], a[j] = a[j], a[i]
j += 1
# j is now count of positive values in "a"
# We only need to track if values 1, 2, ..., j are in "a"
# Mark a[i] as present if in range 1..j by making sign of a[a[i] - 1] negative.
# We use a[i] - 1 as index to offset 1..j to 0..j-1.
# All numbers in this range should be positive in the original array,
# but might been marked negative as part of our algorithm.
for i in range(0, j):
x = abs(a[i]) # abs in case a[i] was previously marked
if (x >= 1) and (x <= j):
a[x - 1] = -abs( a[x - 1] ) # abs in case a[x-1] was previously marked
# The first missing positive integer is the first index i that has positive value a[i].
for i in range(0, j):
if a[i] > 0:
return i + 1
# worst case: integers 1..j are present, so return j + 1
return j + 1
################################################################################
a = [3, 5, -1, 1, 4, 2] # 6
a = [1, 2, 0] # 3
a = [3, 4, -1, 1] # 2
a = [2, 3, 7, 6, 8, -1, -10, 15] # 1
a = [2, 3, -7, 6, 8, 1, -10, 15] # 4
a = [1, 1, 0, -1, -2] # 2 ... solution#1 gives incorret answer of 1
a = [-1] # 1
a = [0] # 1
a = [1] # 2
a = [1, 1] # 2
a = [1, 1, 1] # 2
a = [1, 1, 1, 1] # 2
a = [1, 1, 1, 1, 1] # 2
print("array = {}".format(a))
f = first_missing_pos_int(a)
print("modified array = {}".format(a))
print("first missing positive integer = {}".format(f))
| true |
c8903fa1b0e8b88bae2c2bf4f772ecae129f60fd | Python | SleeplessChallenger/Miyamoto_Musashi_adages | /Ingestors/TXTDecode.py | UTF-8 | 541 | 2.546875 | 3 | [] | no_license | from typing import List
from Engine import IngestorInterface
from Engine import QuoteModel
class TXTclass(IngestorInterface):
allowed = ['txt']
@classmethod
def parse(cls, fl: str) -> List[QuoteModel]:
if not cls.can_ingest(fl):
raise Exception('Not desired extension!')
file = open(fl, "r", encoding="latin-1")
temp = file.readlines()
file.close()
bucket = list()
for x in temp:
x = x.strip()
if len(x) > 0:
data = x.split('-')
new_ = QuoteModel(data[0], data[1])
bucket.append(new_)
return bucket
| true |
891967c953d4cc978b550b8b0d66019d15325e63 | Python | Mohamed-MERZOUK/Apprentissage-supervise | /plot_boundaries.py | UTF-8 | 1,284 | 2.953125 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import ListedColormap
palette = sns.color_palette("Set2")
def plot_boundaries(X, y, clf, title, xLabel, yLabel, pathToSave):
nbClasses = pd.DataFrame(y)[0].value_counts().count()
cmap_bold = palette[0:nbClasses]
cmap_light = ListedColormap(palette[0:nbClasses])
h = .02 # step size in the mesh
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(figsize=(12, 6))
plt.contourf(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
df = pd.DataFrame(data=y, columns=["class"])
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y.flatten().astype(int), palette=cmap_bold, alpha=1.0, edgecolor="black")
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title(title)
plt.xlabel(xLabel)
plt.ylabel(yLabel)
plt.savefig(pathToSave)
plt.show() | true |
f06b49d4665c9883409c306a05cb0d83b9a726b3 | Python | LisaPei/Webscrapers | /beamer.py | UTF-8 | 2,006 | 2.703125 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
from datetime import datetime
requests.packages.urllib3.disable_warnings()
def scrape(url):
print('u', end='') # To show progress
# cut off last two characters if the url ends with /*
if url.endswith('/*'):
url = url[:-2]
# Get the webpage, creating a Response object.
try:
response = requests.get('http://' + url, timeout=5, verify=False)
except:
return [url, 'Error', '', '', '', '', '']
# Extract the page's html
html = response.text
# use one of the lines below, html.parser works on windows and lxml works on mac
soup = BeautifulSoup(html, features='html.parser')
# soup = BeautifulSoup(html, features='lxml')
powered_by_beamer = 'powered by beamer' in html.lower()
if powered_by_beamer:
change_log = soup.find('span', attrs={'class': 'catItemName'}) is not None
if change_log:
free_user = 'app.getbeamer.com' in url
watermark = 'feed by' in html.lower()
most_recent_post = soup.find('div', attrs={'class': 'featureDate'}).findChildren()[-1].text
list_posts = soup.find('div', attrs={'id': 'firstResults'}).findChildren()
list_posts = [x for x in list_posts if x.get('role') == 'listitem']
post1 = list_posts[0].findChild().findChildren()[-1].text # 0 is the first post
post2 = list_posts[-1].findChild().findChildren()[-1].text # -1 is the last post
date1 = datetime.strptime(post1, '%B %d, %Y')
date2 = datetime.strptime(post2, '%B %d, %Y')
average_days_between_posts = (date1 - date2).days / (len(list_posts) - 1)
return [url, True, True, free_user, watermark, most_recent_post, average_days_between_posts]
else:
return [url, True, False, '', '', '', '']
else:
return [url, False, '', '', '', '', '']
if __name__ == '__main__':
print(scrape('updates.convertflow.com'))
| true |
df182b8bc969e750ae908ab70d3fadd9a3cb4aad | Python | MarkoJereb/SchoolProjects | /multiThreadSort.py | UTF-8 | 4,752 | 3.46875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3.8
# Implementation of merge sort as single process, multithreaded process and multiprocess and how they compare to python built-in
# sorted() function.
import math
import multiprocessing
import random
import sys
import threading
import time
import queue
def merge(*args):
'''
Support explicit left/right args, as well as a two-item
tuple which works more cleanly with multiprocessing.
Merge function of two equal sized lists of integers
@param: *args
@return: merged -> list
'''
left, right = args[0] if len(args) == 1 else args
left_length, right_length = len(left), len(right)
left_index, right_index = 0, 0
merged = []
while left_index < left_length and right_index < right_length:
if left[left_index] <= right[right_index]:
merged.append(left[left_index])
left_index += 1
else:
merged.append(right[right_index])
right_index += 1
if left_index == left_length:
merged.extend(right[right_index:])
else:
merged.extend(left[left_index:])
return merged
def merge_sort(data):
'''
Merge sort algorithm. Recursive implementation.
@param: data -> list of elements (integers)
@return: sorted list of same data/elements
'''
length = len(data)
if length <= 1:
return data
middle = length // 2 # integer division
left = merge_sort(data[:middle])
right = merge_sort(data[middle:])
return merge(left, right)
def split_data(data, split):
'''
Splits data in {split} segments
@param: data -> list ; split -> int
@return: split_data -> list of lists
'''
size = int(math.ceil(float(len(data)) / split))
split_data = [data[i * size:(i + 1) * size] for i in range(split)]
return split_data
def merge_sort_parallel(data):
'''
Creates a pool of 2 worker processes
We then split the initial data into partitions, sized
equally per worker, and perform a regular merge sort
across each partition.
#processes = multiprocessing.cpu_count()
@param: list of elements(int)
@return: sorted list of data
'''
processes = 2
pool = multiprocessing.Pool(processes=processes)
data = split_data(data, processes)
data = pool.map(merge_sort, data)
# Each partition is now sorted - we now just merge pairs of these
# together using the worker pool, until the partitions are reduced
# down to a single sorted result.
while len(data) > 1:
# If the number of partitions remaining is odd, we pop off the
# last one and append it back after one iteration of this loop,
# since we're only interested in pairs of partitions to merge.
extra = data.pop() if len(data) % 2 == 1 else None
data = [(data[i], data[i + 1]) for i in range(0, len(data), 2)]
data = pool.map(merge, data) + ([extra] if extra else [])
return data[0]
def merge_sort_threads(data):
'''
Multithreaded merge sort with 2 threads. Each thread at the end puts result to a queue,
which we decue end merge the two lists into single list
@param: data -> list of integers
@return sorted list of integers
'''
threads = 2
jobs = list()
data = split_data(data, threads)
sorted_data = list()
que = queue.Queue()
for i in range(threads):
thread_data = data[i]
thread = threading.Thread(target=lambda q, arg1: q.put(merge_sort(arg1)), args=(que, thread_data))
thread.start()
jobs.append(thread)
for t in jobs:
t.join()
while not que.empty():
result = que.get()
sorted_data.append(result)
return merge(sorted_data)
if __name__ == "__main__":
for size in [10**3, 10**4, 10**5, 10**6, 10**7]:
data_unsorted = [random.randint(0, size) for _ in range(size)]
for sort in merge_sort, merge_sort_threads, merge_sort_parallel, sorted:
start = time.time()
data_sorted = sort(data_unsorted)
deltatime = time.time() - start
print("For size = {3}, function {0} took {1:.6f} seconds and data is sorted = {2}.".format(sort.__name__,
deltatime,
sorted(data_unsorted) == data_sorted,
size))
print('-' * 25)
| true |
3f8943dbd765f2f69e8f55c5d9860062461785fe | Python | Chrisrdouglas/LeapLight | /LightController.py | UTF-8 | 1,029 | 3.03125 | 3 | [] | no_license | from lifxlan import LifxLAN, Light, BLUE, CYAN, GREEN, ORANGE, PINK, PURPLE, RED, YELLOW, WHITE
class LightController:
def __init__(self, mac = None, ip = None):
self.bulb = None
if mac != None and ip != None:
self.bulb = Light(mac, ip) # put a try block in here later
elif self.bulb == None: #put this in the catch block later
lights = LifxLAN(1)
self.bulb = lights.get_lights()[0] # just get whatever light you find
else:
lights = LifxLAN(1)
self.bulb = lights.get_lights()[0]
self.color = 0
#self.colors = [BLUE, CYAN, GREEN, ORANGE, PINK, PURPLE, RED, YELLOW, WHITE]
self.colors = [BLUE, GREEN, RED, WHITE]
def shiftColor(self):
self.color = (1 + self.color) % len(self.colors)
self.bulb.set_color(self.colors[self.color])
def togglePower(self):
if self.bulb.get_power() == 65535:
self.bulb.set_power(0)
else:
self.bulb.set_power(65535)
| true |
8a5f69039eb1e2f0acfa43ea91713ffc114408c2 | Python | cmargerum/GeneticNeuralNet | /human.py | UTF-8 | 502 | 3.484375 | 3 | [] | no_license | """
if keys[pygame.K_UP]:
car.increase_speed()
if keys[pygame.K_DOWN]:
car.decrease_speed()
if keys[pygame.K_LEFT]:
if car.speed > 0:
car.turn_angle = (5 * car.speed / 15) + 3
car.angle = (car.angle + car.turn_angle) % 360
car.rotate(car.angle)
if keys[pygame.K_RIGHT]:
if car.speed > 0:
car.turn_angle = (5 * car.speed / 15) + 3
car.angle = car.angle - car.turn_angle if car.angle - car.turn_angle > 0 else 360 - (car.turn_angle - car.angle)
car.rotate(car.angle)
""" | true |
9d49ba9cf17dcd2e0b90ef878e2f9f0ba729605f | Python | jaldd/python | /jichu/liebiao.py | UTF-8 | 791 | 3.21875 | 3 | [] | no_license | # -*- coding:utf-8 -*-
# Author:Alex Li
names=["a","b","c","e","v"]
names2=["1","2","3",["4","5"]]
print(names2[:-1:2])
# print(names[1])
# print(names[1:3])
# print(names[-3:-1])
# print(names[-3:])
# names.append("d");
# names.insert(1,"f")
# names[1]=3
# names.remove(3)
# names.remove(names[2])
# names.pop()
# names.pop(0)
# print(names)
# print(names.index("b"))
# names.append(names[names.index("b")])
# print(names.count("b"))
# names.reverse()
# print(names)
# names.sort()
# print(names)
# names.clear()
# print(names)
# names.extend(names2)
# del names2
print(names,names2)
names3=names2.copy()
print(names3)
names2[0]=111
print(names3)
names2[3][0]="0"
print(names2)
print(names3)
import copy
names4=copy.deepcopy(names2)
names2[3][0]="9"
print(names2)
print(names4)
| true |
dc7f113905fdadf78b31a468c95713a818b31225 | Python | 1214101059/UNIDAD2 | /DVRS_evaluacion.py | UTF-8 | 334 | 3.921875 | 4 | [] | no_license | """Funcion que calcule el potencial gravitatorio, por Diana Resendiz"""
def potencialgravitatorio(G: float, M: float, r: float) -> float:
resultado = -G*(M / r)
return resultado
"""Funcion que convierta grados Celsius a Farenheit"""
def FarenheitCelsius(C: float) -> float:
F = (C * 1.8) + 32
return F
| true |
1c7ab98c85723d566a50440956d01019971b435a | Python | 92RogerCao/Encoder-Decoder-Models | /encoder-decoder_2.py | UTF-8 | 4,367 | 2.84375 | 3 | [] | no_license | # Python version: 3.7.7
# Tensorflow-gpu version: 1.14.0
# Keras version: 2.2.4-tf
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
"""
Encoder-Decoder 2
"""
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# =============================================================================
# =============================================================================
# Teacher forcing
# Encoder-decoder models using tfa.seq2seq addon
# =============================================================================
# =============================================================================
import numpy as np
from random import randint
from numpy import array
from numpy import argmax
from keras.utils import to_categorical
# Data description:
# Input is a sequence of n_in numbers. Target is first n_out elements
# of the input sequence in the reversed order
# generate a sequence of random integers
def gen_sequence(length, n_unique): # length of sequnce; range of integers from 0 to n_unique-1
return [randint(1, n_unique-1) for _ in range(length)]
# decode one hot encoded string
def one_hot_decode(encoded_seq):
return [argmax(vector) for vector in encoded_seq]
def gen_dataset(n_in, n_out, cardinality, n_samples):
X1, X2, y = list(), list(), list()
for _ in range(n_samples):
# generate source sequence
source = gen_sequence(n_in, cardinality)
# define target sequence:
# take first n elements of the source sequence as the target sequence and reverse them
target = source[:n_out] # these values will be passed to encoder inputs
target.reverse() # the values are targets
# create padded input target sequence
target_in = [0] + target[:-1] # include the start of sequence value [i.e. 0] in the first time step
# these values will be passed to decoder inputs)
# store (create all three inputs)
X1.append(source)
X2.append(target_in)
y.append(target)
return array(X1), array(X2), array(y)
k_features = 40
n_steps_in = 7 # time steps in
n_steps_out = 3 # time steps out
X1, X2, y = gen_dataset(n_steps_in, n_steps_out, k_features, 10000)
print(X1.shape, X2.shape, y.shape) #
# -----
# pip install tensorflow_addons
import tensorflow_addons as tfa # requires TensorFlow version >= 2.1.0
import tensorflow as tf
tf.random.set_seed(42)
vocab_size = k_features
embed_size = 10
n_units=512
encoder_inputs = tf.keras.layers.Input(shape=[None], dtype=np.int32)
decoder_inputs = tf.keras.layers.Input(shape=[None], dtype=np.int32)
sequence_lengths = tf.keras.layers.Input(shape=[], dtype=np.int32) # for different lenghts
embeddings = tf.keras.layers.Embedding(vocab_size, embed_size)
encoder_embeddings = embeddings(encoder_inputs)
decoder_embeddings = embeddings(decoder_inputs)
encoder = tf.keras.layers.LSTM(n_units, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_embeddings)
encoder_state = [state_h, state_c]
sampler = tfa.seq2seq.sampler.TrainingSampler()
decoder_cell = tf.keras.layers.LSTMCell(n_units)
output_layer = tf.keras.layers.Dense(vocab_size)
decoder = tfa.seq2seq.basic_decoder.BasicDecoder(decoder_cell, sampler,
output_layer=output_layer)
seq_length_out = np.full([10000], n_steps_out) # set the lenght of output (it must be vector)
final_outputs, final_state, final_sequence_lengths = decoder(
decoder_embeddings, initial_state=encoder_state,
sequence_length=seq_length_out) # set the lenght of output
Y_proba = tf.nn.softmax(final_outputs.rnn_output)
model = tf.keras.models.Model(
inputs=[encoder_inputs, decoder_inputs, sequence_lengths],
outputs=[Y_proba])
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam",metrics=['accuracy'])
seq_length_in = np.full([10000], n_steps_in)
history = model.fit([X1, X2, seq_length_in], y, epochs=5) | true |
d6a4c0f45b1070e9445d1e26d8dccc78e7a3ac4b | Python | kiliakis/cpp-benchmark | /kickNdrift/python/vectorMath1.py | UTF-8 | 928 | 3.015625 | 3 | [] | no_license | import time
begin = time.time()
start = time.time()
import numpy as np
print "imports: ", time.time() - start
N = 1000000
ITERS = 1
# if os.getenv('N_ELEMS'):
# N = int(os.getenv('N_ELEMS'))
# if os.getenv('N_ITERS'):
# ITERS = int(os.getenv('N_ITERS'))
print "Number of turns: %d" % ITERS
print "Number of points: %d" % N
print "\n"
start = time.time()
a = np.random.rand(N)
b = np.random.rand(N)
end = time.time()
print "initialization: ", end - start
sum = 0.0
start = time.time()
for iter in range(ITERS):
a += b
end = time.time()
elapsed = end - start
print "run time: ", end - start
start = time.time()
sum = np.sum(a)
end = time.time()
print "finalization: ", end - start
throughput = 1. * N * ITERS / elapsed / 1e6
print "a += b bench"
print "Elapsed Time : %.4f sec" % elapsed
print "Throughput : %.3f MB/sec" % throughput
print "Sum : %.5e" % sum
print "\n"
print "Total time: ", time.time() - begin | true |
8191974299ea47c02fb7ed86e687427e263101d1 | Python | Mia416/PythonPratices | /chentestPython1/JSONModule/__init__.py | UTF-8 | 3,281 | 2.609375 | 3 | [] | no_license | import json
import urllib.request
from urllib.request import urlopen
import xml.etree.ElementTree as ET
import XMLModule
#https://docs.python.org/3/library/json.html
#load() loads JSON from a file or file-like object
#loads() loads JSON from a given string or unicode object
#Encode过程,是把python对象转换成json对象的一个过程,常用的两个函数是dumps和dump函数。两个函数的唯一区别就是dump把python对象转换成json对象生成一个fp的文件流,而dumps则是生成了一个字符串:
#python_to_json = json.dumps(jsonstring)
#python_to_json2 = json.dumps(jsonstring,sort_keys=True,indent =4,separators=(',', ': '),ensure_ascii=True )
#json_to_python = json.loads(python_to_json2)
#dumps takes an object and produces a string:json dumps -> returns a string representing a json object from an object.
#load would take a file-like object, read the data from that object, and use that string to create an object: returns an object from a string representing a json object.
def load_fromURL():
url = 'http://idoru.oraclecorp.com:8080/v1/services/_/versions/_/artifacts?release=17.2.1&previous_release=17.1.6&qualifiers=tasbp'
response = urlopen(url)
json_to_python = json.load(response)
#print (json_to_python)
#urlpath = (json_to_python["artifacts"][1]["uri"])
for node in json_to_python["artifacts"]:
print (node["uri"])
def load_fromString():
jsonstring ={
'artifacts':
[
{
'qualifier':'tasbp',
'service':{
'release':'17.2.1',
'display_name':'Application Container Cloud',
'artifact_id':'apaas',
'version':'17.2.1-531',
'target_maturity':'production',
'service_id':'c7928dd7-dca5-4225-9486-f2286e417e45'
},
'uri':'http://almrepo.us.oracle.com/artifactory/opc-woodhouse-release/com/oracle/opc/definition/tasbp-apaas/17.2.1-1703131042/tasbp-apaas-17.2.1-1703131042.zip'
},
{
'qualifier':'tasbp',
'service':{
'release':'17.2.1',
'display_name':'psm',
'artifact_id':'psm',
'version':'17.2.1-548',
'tags':[
'17.2.1.2'
],
'target_maturity':'production',
'service_id':'8720ac6d-c99b-4bbe-9958-094ee35bc99c'
},
'uri':'http://almrepo.us.oracle.com/artifactory/opc-woodhouse-release/com/oracle/opc/definition/tasbp-psm-jaas/17.1.5-543/tasbp-psm-jaas-17.1.5-543.zip'
},
]
}
python_to_json2 = json.dumps(jsonstring,sort_keys=True,indent =4,separators=(',', ': '),ensure_ascii=True )
json_to_python = json.loads(python_to_json2)
for node in json_to_python["artifacts"]:
urladdress = node["uri"]
filename = urladdress.split('/')[-1]
req = urllib.request.urlretrieve(urladdress, filename)
print (node["uri"])
def load_xml_node():
xmltree = ET.parse('tasbp-psm-JaaSTASBlueprint.xml')
for node in xmltree.findall('.//{http://xmlns.schemas.oracle.com/tasBlueprint}name'):
print (node.tag, node.text)
break
for node in xmltree.iter('{http://xmlns.schemas.oracle.com/tasBlueprint}name'):
print (node.tag, node.text)
break
#exec
load_fromString()
| true |
dd80d7e701aa07b553e79022f654b55f76f7a7e3 | Python | StepanAnisenko/lesson1 | /answers.py | UTF-8 | 239 | 3.125 | 3 | [] | no_license | def get_answer(question,answer):
return answer[question]
quest=input()
ans={"привет": "И тебе привет!", "как дела": "Лучше всех", "пока": "Увидимся"}
result=get_answer(quest,ans)
print(result)
| true |
09178505d2d84c04f2bc2a55eb4fede552770b1a | Python | chaitanyamean/python-algo-problems | /simpleProblems/distanceBtwTwoPoints.py | UTF-8 | 329 | 3.859375 | 4 | [] | no_license |
## find if the point is inside circle or not
import math
(Cx, Cy, r) = input().split()
(Px, Py) = input().split()
Cx = float(Cx)
Cy = float(Cy)
r = float(r)
Px = float(Px)
Py = float(Py)
d = math.sqrt((Cx - Px) ** 2 + (Cy - Py) ** 2)
if d < r:
print('Point is inside Circle')
else:
print('Point is outside circle') | true |
6e9ea47efcf5342ded7378a3e5fa5f01c82a6eb2 | Python | Erick-Faster/gerbot-api | /resources/neural.py | UTF-8 | 599 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | from flask_restful import Resource, reqparse
from flask import json
from models.bot import ChatBotModel
import random
bot = ChatBotModel()
class Neural(Resource):
parser = reqparse.RequestParser() #Condicoes de entrada
parser.add_argument('antwort',
type=str,
required=True,
help="Este campo não pode ficar em branco"
)
def get(self):
pass
def post(self):
data = self.parser.parse_args() #Validacao das condicoes de entrada
response = {"response": bot.chatbot_response(data['antwort'])}
return response | true |
8861e3a120eedc02fb058f7356d52bf159843831 | Python | kangere/spring19_programming_languages | /lang/type.py | UTF-8 | 1,513 | 3.46875 | 3 | [] | no_license |
class Type:
"""
Base class for all types
"""
pass
class BoolType(Type):
"""
Represents Boolean type
"""
def __str__(self):
return "Bool"
class IntType(Type):
"""
Represents integer type
"""
def __str__(self):
return "Int"
class ArrowType(Type):
"""
Represents Arrow Type:
T1 -> T2
"""
def __init__(self,param,ret):
self.param = param
self.ret = ret
def __str__(self):
return f"{self.t1} -> {self.t2}"
class FuncType(Type):
"""
Represents function type:
(T1,T2,T3,...Tn) -> Tr
"""
def __init__(self,params,ret):
self.params = params
self.ret = ret
def __str__(self):
return f"{' '.join(map(str,self.params))} -> {self.ret}"
class TupleType(Type):
"""
Represents the tuple type:
{T1,T2,.....Tn}
"""
def __init__(self):
self.types = []
self.numTypes = 0
def add(self,t):
assert isinstance(t,Type), "Type required"
self.types.append(t)
self.numTypes += 1
def get(self,index):
if index >= self.numTypes and index < 0:
raise Exception("Index out of bounds")
return self.types[index]
def size(self):
return self.numTypes
def __str__(self):
return f"{' '.join(map(str,self.types))}"
class IdType(Type):
def __init__(self,name):
self.name = name
class AbsType(Type):
def __init__(self,var,expr):
self.var = var
self.expr = expr
class AppType(Type):
def __init__(self,t1,t2):
assert isinstance(t1,Type)
assert isinstance(t2,Type)
self.t1 = t1
self.t2 = t2
intType = IntType()
boolType = BoolType() | true |
5c8f3a444ebd4ff34fb94446dc9514fe6e4672c2 | Python | mt2962/lab_model_fitting | /fitter.py | UTF-8 | 2,164 | 3.421875 | 3 | [] | no_license | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
# ------------------------------------------------------
# PLOTDATA: plots a line and saves
# it as a png image ...
#
def plotData(x,y,name):
plt.clf()
plt.plot(x,y,'k.')
plt.title('Raw Data')
plt.savefig(name)
# ------------------------------------------------------
# PLOTFIT: plots line AND data and saves it as a png
#
#
def plotFit(x,y,z,name):
plt.clf()
plt.plot(x,y,'k.')
plt.plot(x,z,'k-')
plt.title('Fit to Data')
plt.savefig(name)
# ------------------------------------------------------
# GRABDATA
# Pulls data from file and stores it in x,y
#
def grabData():
dat=np.genfromtxt('linFit.txt')
x=dat[:,0]
y=dat[:,1]
return (x,y)
# ------------------------------------------------------
# Step 3: Given list of x return list of h0*x
def hubble(x,h0):
m= []
for i in x:
m.append(h0*i)
return m
# Given data lists x and y, this function
# finds how well a line, hubble(x) = h0*x fits
# by calculating the sum of the square of
# the residuals
#
#Step 5:
def SSR(x,y,h0):
b=hubble(x,h0)
SSR=0
for i in range(len(x)):
r = b[i]-y[i]
rsq=r**2
SSR=SSR+rsq
return SSR
#
# Step 2: Get data, plot it
#
x, y=grabData()
plotData(x,y,'rawData.png')
# Step 4: Plot example function
#
x,y=grabData()
z= hubble(x,10)
plotFit(x,y,z,'badFit.png')
#
# Step 6: Find SSR for poor fit from example function
#
x,y=grabData()
m = SSR(x,y,10)
print m
#
# list for possible slopes.
#
h0s=np.arange(0.,100.,0.1)
#
# Step 7: Find slope and intercept that minimize
# the RSS
#
best_h0 = 0.
min_ssr = np.inf
for h0 in h0s:
k = SSR(x,y,h0)
if k<min_ssr:
best_h0=h0
min_ssr=k
print best_h0
print min_ssr
#
# Step 8: Plot Result
#
#
x,y=grabData()
z=hubble(x,best_h0)
plotFit(x,y,z,'bestFit.png')
#
# Step 9: Plot Residuals
#
#
res =[z[i]-y[i] for i in range(len(y))]
plotData(x,res,'resids.png')
| true |
94f14bc52ca84e627a4b30821fd947dc02ace501 | Python | Python-study-f/Algorithm-study_2H | /October_/210905/15683_Surveillance/15683_211001_hyeonsook95.py | UTF-8 | 4,008 | 2.921875 | 3 | [] | no_license | from copy import deepcopy
from itertools import product
# pypy 1852ms
# python 4492ms
def solution(N, M):
rotations = {
1: list(range(4)),
2: list(range(2)),
3: list(range(4)),
4: list(range(4)),
5: list(range(1)),
}
# cctv 종류별 초기 방향 값
directions = {
1: [(0, 1)],
2: [(0, 1), (0, -1)],
3: [(-1, 0), (0, 1)],
4: [(0, -1), (0, 1), (-1, 0)],
5: [(0, 1), (0, -1), (1, 0), (-1, 0)],
}
amt = N * M
maps = []
cases, cctvs = [], []
for r in range(N):
maps.append(list(map(int, input().split())))
for c in range(M):
if maps[r][c] != 0:
amt -= 1
if 0 < maps[r][c] < 6:
cctvs.append((maps[r][c], r, c))
# cctv의 종류별로 의미있는 회전을 할 수 있는 경우를
# 하나의 list로 cases에 추가
cases.append(rotations[maps[r][c]])
# 90도씩 회전시킨 방향값을 리턴하는 함수
def rotate(loop, direction):
if loop == 0:
return direction
for _ in range(loop):
tmp = []
for r, c in direction:
if r != 0:
r *= -1
r, c = c, r
tmp.append((r, c))
direction = tmp[::]
return direction
# 주어진 방향으로 #를 기록하고, 기록한 횟수를 반환
def watch(tmp, r, c, direction):
cnt = 0
for mr, mc in direction:
vr, vc = r + mr, c + mc
while -1 < vr < N and -1 < vc < M and tmp[vr][vc] != 6:
if tmp[vr][vc] == 0:
cnt += 1
tmp[vr][vc] = -1
vr, vc = vr + mr, vc + mc
return cnt
ans = 100
# 모든 cctv들의 방향이 변할 수 있는 모든 경우의 수를 계산
for case in product(*cases):
cnt = 0 # '#'의 개수
tmp = deepcopy(maps)
for cctv, loop in zip(cctvs, case):
typ, r, c = cctv # cctv의 종류, 초기 위치 값
# loop 만큼 cctv의 방향을 회전
direction = rotate(loop, directions[typ])
cnt += watch(tmp, r, c, direction)
ans = min(ans, amt - cnt)
return ans
# python 196ms
# https://www.acmicpc.net/source/33822999
def solution(N, M):
UP, DOWN, LEFT, RIGHT = [-1, 0], [1, 0], [0, -1], [0, 1]
DIRECTION = {
1: [[UP], [DOWN], [LEFT], [RIGHT]],
2: [[UP, DOWN], [LEFT, RIGHT]],
3: [[RIGHT, UP], [RIGHT, DOWN], [LEFT, DOWN], [LEFT, UP]],
4: [
[UP, RIGHT, DOWN],
[RIGHT, DOWN, LEFT],
[DOWN, LEFT, UP],
[UP, LEFT, RIGHT],
],
5: [[UP, DOWN, LEFT, RIGHT]],
}
total = 0
cases = []
cctvs, maps = [], []
for r in range(N):
maps.append(list(map(int, input().split())))
for c in range(M):
if maps[r][c] == 0:
total += 1
elif 0 < maps[r][c] < 6:
cases.append([])
cctvs.append([maps[r][c], r, c])
def detect(r, c, directions):
cctv_case = []
for direction in directions:
case = set()
for mr, mc in direction:
vr, vc = r + mr, c + mc
while -1 < vr < N and -1 < vc < M and maps[vr][vc] != 6:
if maps[vr][vc] == 0:
case.add((vr, vc))
vr, vc = vr + mr, vc + mc
cctv_case.append(case)
return cctv_case
for idx, cctv in enumerate(cctvs):
typ, r, c = cctv
cases[idx] = detect(r, c, DIRECTION[typ])
ans = 0
for case in product(*cases):
sum = set()
for s in case:
sum |= s
ans = max(ans, len(list(sum)))
return total - ans
if __name__ == "__main__":
N, M = map(int, input().split())
print(solution(N, M))
| true |
a7bf8545b5d8d20ebcf796d02fd7e57b469016e5 | Python | balampbv/sentence_labelling | /corpus.py | UTF-8 | 811 | 3.0625 | 3 | [] | no_license | datafile = './data/LabelledData.txt'
def load_corpus():
labels = {}
print "Reading complete dataset."
with open(datafile) as dfile:
for line in dfile.read().splitlines():
line = line.strip()
if line:
sp = line.split(',,,')
l = sp[1].strip()
if l not in labels:
labels[l] = []
labels[l].append(sp[0].strip())
#print labels
label_set = set(labels.keys())
print "Splitting into train (80%) and test (20%)."
train_set = []
test_set = []
for k, v in labels.items():
print "Class ==> {}, #samples ==> {}".format(k, len(v))
# 20% of balanced data reserved for training, rest for testing
train_idx = int(len(v) - len(v) / 5.0)
train_set += [(sent, k) for sent in v[:train_idx]]
test_set += [(sent, k) for sent in v[train_idx:]]
return label_set, train_set, test_set | true |
c7272c3107c7be1b386959405096df05974dfbfc | Python | dimaggiofrancesco/DATA_VISUALISATION-Graph-Interaction | /Graph interaction - Code.py | UTF-8 | 8,451 | 3.78125 | 4 | [] | no_license |
# coding: utf-8
# # Assignment 3 - Building a Custom Visualization
#
# ---
#
# In this assignment you must choose one of the options presented below and submit a visual as well as your source code
# for peer grading. The details of how you solve the assignment are up to you, although your assignment must use matplotlib
# so that your peers can evaluate your work. The options differ in challenge level, but there are no grades associated with
# the challenge level you chose. However, your peers will be asked to ensure you at least met a minimum quality for a given
# technique in order to pass. Implement the technique fully (or exceed it!) and you should be able to earn full grades for the assignment.
#
#
# Ferreira, N., Fisher, D., & Konig, A. C. (2014, April). [Sample-oriented task-driven
# visualizations: allowing users to make better, more confident decisions.]
# (https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/Ferreira_Fisher_Sample_Oriented_Tasks.pdf)
# In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems
# (pp. 571-580). ACM. ([video](https://www.youtube.com/watch?v=BI7GAs-va-Q))
#
#
# In this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/Ferreira_Fisher_Sample_Oriented_
# Tasks.pdf) the authors describe the challenges users face when trying to make judgements about probabilistic data
# generated through samples. As an example, they look at a bar chart of four years of data (replicated below in Figure 1).
# Each year has a y-axis value, which is derived from a sample of a larger dataset. For instance, the first value might
# be the number votes in a given district or riding for 1992, with the average being around 33,000. On top of this is
# plotted the 95% confidence interval for the mean (see the boxplot lectures for more information, and the yerr parameter of barcharts).
#
# <br>
# <img src="readonly/Assignment3Fig1.png" alt="Figure 1" style="width: 400px;"/>
# <h4 style="text-align: center;" markdown="1"> Figure 1 from (Ferreira et al, 2014).</h4>
#
# <br>
#
# A challenge that users face is that, for a given y-axis value (e.g. 42,000), it is difficult to know which x-axis
# values are most likely to be representative, because the confidence levels overlap and their distributions are different
# (the lengths of the confidence interval bars are unequal). One of the solutions the authors propose for this problem (Figure 2c)
# is to allow users to indicate the y-axis value of interest (e.g. 42,000) and then draw a horizontal line and color bars based on this value.
# So bars might be colored red if they are definitely above this value (given the confidence interval), blue if they are definitely
# below this value, or white if they contain this value.
#
#
# <br>
# <img src="readonly/Assignment3Fig2c.png" alt="Figure 1" style="width: 400px;"/>
# <h4 style="text-align: center;" markdown="1"> Figure 2c from (Ferreira et al. 2014). Note that the colorbar legend at the bottom
# as well as the arrows are not required in the assignment descriptions below.</h4>
#
# <br>
# <br>
#
# **Easiest option:** Implement the bar coloring as described above - a color scale with only three colors, (e.g. blue, white, and red).
# Assume the user provides the y axis value of interest as a parameter or variable.
#
#
# **Harder option:** Implement the bar coloring as described in the paper, where the color of the bar is actually based on the amount
# of data covered (e.g. a gradient ranging from dark blue for the distribution being certainly below this y-axis, to white if the value
# is certainly contained, to dark red if the value is certainly not contained as the distribution is above the axis).
#
# **Even Harder option:** Add interactivity to the above, which allows the user to click on the y axis to set the value of interest.
# The bar colors should change with respect to what value the user has selected.
#
# **Hardest option:** Allow the user to interactively set a range of y values they are interested in, and recolor based on this
# (e.g. a y-axis band, see the paper for more details).
#
# ---
#
# *Note: The data given for this assignment is not the same as the data used in the article and as a
# result the visualizations may look a little different.*
# In[34]:
#get_ipython().magic('matplotlib notebook')
import scipy.stats as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
np.random.seed(12345)
df = pd.DataFrame([np.random.normal(32000, 200000, 3650),
np.random.normal(43000, 100000, 3650),
np.random.normal(43500, 140000, 3650),
np.random.normal(48000, 70000, 3650)],
index=[1992, 1993, 1994, 1995])
dft = df.transpose() # Transpose df and creates a new df (dft) with its values
dftg = dft.describe() # Create a new df (dftg) with the info obtained by dft
# Gradient color Blue-Red (Dark Blue, royal blue, deep sky blue, light blue, White, pink, Coral, Red, Firebrick, Dark Red)
gmap = [(0, 0, 0.545),
(0, 0, 1),
(0.254, 0.412, 0.882),
(0, 0.749, 1),
(0.678, 0.847, 0.902),
(1, 1, 1),
(1, 0.752, 0.8),
(1, 0.498, 0.314),
(1, 0, 0),
(0.698, 0.133, 0.133),
(0.545, 0, 0)]
#Creates first graph before entering the function 'onclick'
print ('Please click with the mouse on the graph to select the y-axis value')
yerr = (1.96 * (dftg.loc['std'] / (math.sqrt(3650)))) #Calculates the 95% confidece interval
plt.bar(df.index, dftg.loc['mean'], width=1, color=('w', 'w', 'w', 'w'), alpha=1, yerr=yerr, capsize=7,edgecolor='k') # Creates the plot
plt.xticks(df.index, ('1992', '1993', '1994', '1995')) # Sets a new x-axys label
plt.xlim(1990.8, 1996.2) # Sets the x-axis range
plt.axes().xaxis.set_ticks_position('none') # Removes ticks from x-axis
plt.xlabel('Year')
plt.ylim(0, 60000)
def onclick(event):
print ('Please click with the mouse on the graph to select the y-axis value')
var = event.ydata #Assigns to var the y-value in the graph where the mouse was clicked
plt.gcf().clear() #Clear the previous graph
yerr = (1.96 * (dftg.loc['std'] / (math.sqrt(3650)))) #Calculates the 95% confidece interval
plt.bar(df.index, dftg.loc['mean'], width=1, color=('w', 'w', 'w', 'w'), alpha=1, yerr=yerr, capsize=7,edgecolor='k') # Creates the plot
plt.xticks(df.index, ('1992', '1993', '1994', '1995')) # Sets a new x-axys label
plt.xlim(1990.8, 1996.2) # Sets the x-axis range
plt.axes().xaxis.set_ticks_position('none') # Removes ticks from x-axis
plt.xlabel('Year')
plt.ylim(0, 60000)
plt.axhline(y=event.ydata, zorder=0) # Creates a horizontal line
plt.annotate(str(int(event.ydata)),xy=(1990.9,event.ydata+1000)) # Adds y axis value on the top of the horizontal line
# Calculates probability based on the input y value
zscore = ((var - dftg.loc['mean']) / (1.96 * (dftg.loc['std'] / (math.sqrt(3650)))))
# Assigns the color depending on the distribution. Dark blue if the distribution is certainly below this y-axis, white
# if the value is certainly contained, to dark red if the value is certainly not contained as the distribution is above the axis).
c = []
for i in range(1992, 1996):
if zscore.loc[i] < -0.9:
c.append(gmap[10])
elif -0.9 <= zscore.loc[i] < -0.7:
c.append(gmap[9])
elif -0.7 <= zscore.loc[i] < -0.5:
c.append(gmap[8])
elif -0.5 <= zscore.loc[i] < -0.3:
c.append(gmap[7])
elif -0.3 <= zscore.loc[i] < -0.1:
c.append(gmap[6])
elif -0.1 <= zscore.loc[i] < +0.1:
c.append(gmap[5])
elif +0.1 <= zscore.loc[i] < +0.3:
c.append(gmap[4])
elif +0.3 <= zscore.loc[i] < +0.5:
c.append(gmap[3])
elif +0.5 <= zscore.loc[i] < +0.7:
c.append(gmap[2])
elif +0.7 <= zscore.loc[i] < +1.0:
c.append(gmap[1])
else:
c.append(gmap[0])
plt.bar(df.index, dftg.loc['mean'], width=1, color=[c[0], c[1], c[2], c[3]], alpha=1, yerr=yerr, capsize=7,edgecolor='k') # Creates the plot
plt.show()
cid = plt.gcf().canvas.mpl_connect('button_press_event', onclick)
plt.show()
| true |
4885c66b35f8e986f386d49837468ab30dc8176d | Python | mattclapham/NUbots | /module/support/logging/LegLoadsLogger/validate.py | UTF-8 | 4,053 | 3.46875 | 3 | [] | no_license | #!/usr/bin/python
import numpy
from matplotlib import pyplot
left_certainty = 0.5
left_uncertainty = 0.3
right_certainty = 0.5
right_uncertainty = 0.3
# Load our ground truth validation data
with open('validation_ground_truth', 'r') as f:
data = f.readlines()
left_truth = [float(d.strip().split(' ')[0]) for d in data[0::2]]
right_truth = [float(d.strip().split(' ')[0]) for d in data[1::2]]
# Load our predicted output data
with open('validation_prediction', 'r') as f:
data = f.readlines()
left_probability = [float(d.strip().split(' ')[2]) for d in data[1::2]]
right_probability = [float(d.strip().split(' ')[2]) for d in data[2::2]]
# Apply our bayesian filter
np = .000001
s = 0.5
n = 0.5
state = 1
left_predict = []
left_state = []
for v in left_probability:
k = n / (n + np) #2 * v)
s = s + k * (v - s)
n = (1 - k) * n + 1
# Store our raw probability prediction
left_predict.append(s)
# Apply our hysteresis
if s < left_uncertainty:
state = 0
elif s > left_certainty:
state = 1
# Store our state prediction
left_state.append(state)
# Apply our bayesian filter
np = .000001
s = 0.5
n = 0.5
state = 1
right_predict = []
right_state = []
for v in right_probability:
k = n / (n + np) #2 * v)
s = s + k * (v - s)
n = (1 - k) * n+1
right_predict.append(s)
# Apply our hysteresis
if s < right_uncertainty:
state = 0
elif s > right_certainty:
state = 1
# Store our state prediction
right_state.append(state)
left_fp = 0
left_fn = 0
left_tp = 0
left_tn = 0
for v in zip(left_truth, left_state):
if v[0] == 0 and v[1] == 0:
left_tn += 1
elif v[0] == 0 and v[1] == 1:
left_fp += 1
elif v[0] == 1 and v[1] == 0:
left_fn += 1
elif v[0] == 1 and v[1] == 1:
left_tp += 1
right_fp = 0
right_fn = 0
right_tp = 0
right_tn = 0
for v in zip(right_truth, right_state):
if v[0] == 0 and v[1] == 0:
right_tn += 1
elif v[0] == 0 and v[1] == 1:
right_fp += 1
elif v[0] == 1 and v[1] == 0:
right_fn += 1
elif v[0] == 1 and v[1] == 1:
right_tp += 1
print 'Left False Positive {:5.2f}%'.format(100.0 * float(left_fp) / float(left_fp + left_fn + left_tp + left_tn))
print 'Left False Negative {:5.2f}%'.format(100.0 * float(left_fn) / float(left_fp + left_fn + left_tp + left_tn))
print 'Left True Positive {:5.2f}%'.format(100.0 * float(left_tp) / float(left_fp + left_fn + left_tp + left_tn))
print 'Left True Negative {:5.2f}%'.format(100.0 * float(left_tn) / float(left_fp + left_fn + left_tp + left_tn))
print 'Left Accuracy {:5.2f}%'.format(100.0 * float(left_tp + left_tn) / float(left_fp + left_fn + left_tp + left_tn))
print
print 'Right False Positive {:5.2f}%'.format(100.0 * float(right_fp) / float(right_fp + right_fn + right_tp + right_tn))
print 'Right False Negative {:5.2f}%'.format(100.0 * float(right_fn) / float(right_fp + right_fn + right_tp + right_tn))
print 'Right True Positive {:5.2f}%'.format(100.0 * float(right_tp) / float(right_fp + right_fn + right_tp + right_tn))
print 'Right True Negative {:5.2f}%'.format(100.0 * float(right_tn) / float(right_fp + right_fn + right_tp + right_tn))
print 'Right Accuracy {:5.2f}%'.format(100.0 * float(right_tp + right_tn) / float(right_fp + right_fn + right_tp + right_tn))
print
print 'Total Accuracy {:5.2f}%'.format(100.0 * float(right_tp + right_tn + left_tp + left_tn) / float(left_fp + left_fn + left_tp + left_tn + right_fp + right_fn + right_tp + right_tn))
# Plot our predicted state with some offset to make it easy to distinguish from the ground truth
pyplot.plot([s * 0.8 + 0.1 for s in right_state], marker='^')
# Plot our raw probabilities
#pyplot.plot(right_probability, marker='.')
# Plot our bayesian prediction values
pyplot.plot(right_predict, marker='x')
# Plot our ground truth
pyplot.plot(right_truth, marker='o')
# Extend a little above 0,1
pyplot.ylim((-0.05,1.05))
# Show our graph
pyplot.show()
| true |
d06f1846326a0643f621b2b16d9df57de627d2c4 | Python | fireking77/aws-rds-mssql | /cmd_line_parser/set_config.py | UTF-8 | 1,413 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
import argparse
import global_config
def set_config():
"""
It's going to manage the command line arguments. Everything is going to be set in a specific global variable
:return: global_config.rds_action
global_config.config_file_path
global_config.sql_bak_file_path
"""
parser = argparse.ArgumentParser(
prog='aws-rds-mssql',
description='''AWS RDS / MSSQL backup and restore utility''',
epilog='''
Made by Darvi | System Architect - SRE / DevOps
https://www.linkedin.com/in/istvandarvas/
''')
parser.add_argument('rds_action',
choices=['backup', 'restore'],
help="Action to take")
parser.add_argument("-c", "--config-file",
dest="config_file_path",
type=str,
required=True,
help="Configuration file")
parser.add_argument("-bak", "--sql-bak-file",
dest="sql_bak_file_path",
type=str,
required=True,
help="Path to the MSSQL \"bak\" file")
args = parser.parse_args()
global_config.rds_action = args.rds_action
global_config.config_file_path = args.config_file_path
global_config.sql_bak_file_path = args.sql_bak_file_path
| true |
82e5ea5df0999c0ee02ddd4d9a35fef2734b355e | Python | vino160898/SET- | /comprehension&membership_set.py | UTF-8 | 125 | 2.625 | 3 | [] | no_license | #comprehension
s={no for no in range(1,6)}
#Membership Opretors
s=set('vino')
print(s)
print('o' in s)
print('o' not in s)
| true |
2f99d6fbc17266ff2b50f9e7537441722cb6a052 | Python | cpcdoy/non-max-suppression | /non_max_supression_tester.py | UTF-8 | 5,118 | 2.9375 | 3 | [] | no_license | import numpy as np
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import time
from non_max_supression import nms
class nms_tester:
def __init__(self):
pass
def jitter(self, bb, curr_idx, jitter_amount=3, iou_threshold=0.5):
"""Jitter a box to simulate noisy bounding boxes output by a detector
Using a uniform distribution, we take a given bb and jitter it to generate
new and noisy bounding boxes with a pretty high final IoU
Parameters:
-bb (array): bounding bo to jitter
-curr_idx (int): the current index in the final generated bb array to output the ref data
-jitter_amount (int, default=3): The amount of jittering, higher means less chance of high IoU
and more variance in the jittered bb coordinates
-iou_threshold (int, default=0.5):
Returns:
-jittered_bbs (np.array): the final jittered bounding boxes
-idxs_y (np.array): the reference data after nms
"""
# This array will contain all the jittered bb and the initial one
jittered_bbs = [bb]
# Randomly decide how many times we want to jitter the bb
nb_jitter = np.random.randint(0, 10)
# Array used to store the reference data
idxs_y = [curr_idx]
for i in range(nb_jitter):
# We generate a jittered bb
tmp = np.concatenate((bb[:4] + np.random.randint(0, jitter_amount, (4)), [np.random.uniform(0.0, 0.9)]))
# IoU < iou_threshold we add it to the ref array because it won't be removed later by nms
if nms.get_iou(bb, tmp) < iou_threshold:
idxs_y.append(i + curr_idx + 1)
jittered_bbs.append(tmp)
# Return the final jittered bb and the reference data
return np.array(jittered_bbs), np.array(idxs_y)
def gen_data(self, nb_bb, max_coord=100, max_size=40):
"""Generate a given number of jittered bounding boxxes to simulate noisy data output by a detector
Parameters:
-max_coord (int, default=100): maximum (x, y) a bb can have
-max_size (int, default=40): maximum (w, h) a bb can have
Returns:
-final_res (np.array): the final jittered bounding boxes
-final_res[idxs_y] (np.array): the reference data after nms
"""
# Generate all the sample bounding box coordinates, sizes and percentages
coords = np.array([np.random.randint(0, max_coord, (nb_bb)) for i in range(2)])
size = np.array([np.random.randint(0, max_size, (nb_bb)) for i in range(2)])
pc = np.ones((nb_bb))
# Fill an array with the correct format:
# [top_left, top_right, bottom_left, bottom_right, percentage]
res = np.zeros((nb_bb, 5))
res[:,0:2] = coords.T
res[:,2:4] = coords.T + size.T
res[:,4] = pc
# Jitter and store the bounding boxes
final_res, idxs_y = self.jitter(res[0], 0)
for b in res[1:]:
tmp_res, tmp_idx_y = self.jitter(b, len(final_res))
final_res = np.concatenate((final_res, tmp_res))
idxs_y = np.concatenate((idxs_y, tmp_idx_y))
# Return the final jittered arrays and the reference data
return final_res, final_res[idxs_y]
def disp_results(self, data, data_final, data_y):
"""Display the result to help compare input, reference and result data
Parameters:
-data (array): Input orgininal data
-data_final (array): Reference data
-data_y (array): Implementation result data
Returns:
-Nothing
"""
# Compute the figure size
max_x = np.amax(data[:, 2] + data[:, 0])
max_y = np.amax(data[:, 3] + data[:, 1])
a = np.zeros((int(max_x) + 5, int(max_y) + 5))
# Create 3 plots
fig, ax = plt.subplots(3, figsize=(max_x, max_y))
# Draw bounding boxes for the original data
for i in range(data.shape[0]):
rect = patches.Rectangle((data[i][0], data[i][1]), data[i][2] - data[i][0], data[i][3] - data[i][1], linewidth=1, edgecolor='g', facecolor='none')
# Add the patch to the Axes
ax[0].add_patch(rect)
# Draw bounding boxes for the ref data
for i in range(data_y.shape[0]):
rect2 = patches.Rectangle((data_y[i][0], data_y[i][1]), data_y[i][2] - data_y[i][0], data_y[i][3] - data_y[i][1], linewidth=1, edgecolor='g', facecolor='none')
ax[1].add_patch(rect2)
# Draw bounding boxes for my implementation's result
for i in range(data_final.shape[0]):
rect3 = patches.Rectangle((data_final[i][0], data_final[i][1]), data_final[i][2], data_final[i][3], linewidth=1, edgecolor='g', facecolor='none')
ax[2].add_patch(rect3)
# Display the image
ax[0].set_title("Input data")
ax[0].imshow(a)
ax[1].set_title("Helper/ref output (not 100% accurate)")
ax[1].imshow(a)
ax[2].set_title("My implementation")
ax[2].imshow(a)
plt.show()
if __name__ == "__main__":
#Handle not enough cmd line args
if len(sys.argv) < 2:
sys.exit("Usage: python non_max_supression_tester.py number_of_bounding_boxes_to_generate")
# Call all the helper functions
nms = nms()
nms_tester = nms_tester()
data, data_y = nms_tester.gen_data(int(sys.argv[1]))
nms.data = data
#nms.get_data(sys.argv[1])
start = time.perf_counter()
nms.compute_nms()
end = time.perf_counter()
print("Generated and jittered", len(data), "boxes.. processing took", end - start, "s")
nms_tester.disp_results(nms.data, nms.final_res, data_y) | true |
1d0a07b2bb5d1a31960244b3f22d266a9c95f0cf | Python | sangdon1984/python_chap04 | /python04-01.py | UTF-8 | 6,801 | 4.53125 | 5 | [] | no_license | #-*-coding:utf-8
print("# 함수 사용하기")
# 함수는 소스의 재활용을 위해서 사용
# 함수를 나타내는 키워드 def 를 사용
# 반환 타입 선언이 함수의 원형에 없음
# 함수의 매개 변수 선언 시 매개 변수의 타입을 입력할 필요 없음
# 함수의 사용법
# 자바에서의 함수 원형
# public void 함수명(int 매개변수){
# 실행코드
# return 반환값
# }
# 파이썬에서의 함수 원형
# def 함수명(매개변수): # (차이점)반환 타입이 없음 # 매개변수 타입도 없음
# 실행 코드
# return 반환값
# def sum(a,b):
# return a+b
def sum(a, b):
result = a + b
return result
a = 3
b = 4
c = sum(a,b)
print(c)
print(sum(8, 9))
def func1():
print("매개변수와 반환값이 없는 함수")
def func2(a, b):
print("반환 값이 없고, 매개변수가 {0}, {1} 인 함수".format(a, b))
def func3():
print("매개변수가 없고 반환값만 있는 함수")
return "함수 3번"
def func4(a, b):
print("매개 변수가 {0}, {1} 이고, 반환값이 있는 함수".format(a, b))
return "함수 4번"
print()
func1()
func2(10, 20)
x = func3()
print(x)
y = func4(10, 20)
print(y)
print()
print("# 문제 1) 매개 변수 2개를 입력 받고 계산된 값을 반환하는 총 4개의 함수를 생성하여 계산기 프로그램을 작성하세요")
# 함수명 : plus, minus, multi, divide
def plus(a, b):
return ("{0} + {1} = {2}".format(a, b, a + b))
def minus(a, b):
return ("{0} - {1} = {2}".format(a, b, a - b))
def multi(a, b):
return ("{0} * {1} = {2}".format(a, b, a * b))
def divide(a, b):
return ("{0} / {1} = {2}".format(a, b, a / b))
print(plus(10, 20))
print(minus(10, 20))
print(multi(10, 20))
print(divide(10, 20))
print(divide(100, 20))
print()
# 함수의 매개 변수가 몇개인지 모를 경우 함수 선언 방법
# 매개 변수의 이름 앞에 * 기호를 붙여서 선언
# 파이썬에서는 함수 오버로딩을 지원하지 않기 때문에 매개변수에 * 기호를 사용하여
# 매개 변수를 튜플로 받고 그 튜플의 데이터 타입과 길이를 확인하여 오버로딩을 구현한다
# 매개변수로 튜플을 받았다고 생각하면 쉬움
print("# 여러개의 입력값을 받는 함수")
def sum_many(*args):
sum = 0
for i in args: # 매개변수의 수 대로 값을 뽑아냄
sum += i
return ("sum : {0} = {1}".format(args, sum))
print(sum_many(1, 2, 3, 4, 5))
result = sum_many(1,2,3,4,5,6,7,8,9,10)
print(result)
print("매개 변수가 1개인 sum_many 의 합: {0}".format(sum_many(1)))
print("매개 변수가 2개인 sum_many 의 합: {0}".format(sum_many(1,2)))
print("매개 변수가 3개인 sum_many 의 합: {0}".format(sum_many(1,2,3)))
print("매개 변수가 4개인 sum_many 의 합: {0}".format(sum_many(1,2,3,4)))
print("매개 변수가 5개인 sum_many 의 합: {0}".format(sum_many(1,2,3,4,5)))
print()
print("# sum_mul")
def sum_mul(choice, *args):
if choice == "sum":
result = 0
for i in args:
result += i
elif choice == "mul":
result = 1
for i in args:
result *= i
return ("{0} : {1} = {2}".format(choice, args, result))
result = sum_mul("sum", 1,2,3,4,5)
print(result)
result = sum_mul("mul", 1,2,3,4,5)
print(result)
print()
# 기존 언어에서 반환값은 1개만 반환이 가능함
# 기존 다른 언어에서는 반환값을 2개 이상 받기 위해서 배열 및 리스트와 같은 자료구조를
# 사용하여 값을 반환받음
# public int sum_and_mul(a,b){
# int[] result = [a=b, a*b]
# return result
# }
# 파이썬에서는 반환값을 튜플로 받아 2개 이상 반환할 수 있음(사실 1개의 반환값임)
print("# 반환값은 언제나 하나이다")
def sum_and_mul(a,b):
return a+b, a*b
result = sum_and_mul(3,4)
# 튜플 result = (a+b, a*b) 와 같음
print(result)
print()
# 아래와 같은 형태는 불가능
# return 문은 2개의 기능이 있음
# 함수를 실행하고 그 결과값을 함수를 호출한 시점으로 반환하는 것
# return 문을 만나면 그 즉시 해당 함수를 종료함
print("# return 2개는 불가능")
def sum_and_mul1(a,b):
return a+b
return a*b # return 윗라인에서 return 문을 실행하였기 때문에 함수의 실행이
# 완전 종료되어 아래의 return 문을 실행할 수 없음
print()
# 함수를 실행 할 때 필요한 매개변수에 사용자가 모든 값을 입력하는 형태가 아니라
# 기본적으로 필요한 값을 미리 지정해 놓고 사용자가 입력하지 않았을 경우에만 초기값으로
# 매개 변수가 초기화되어 함수를 실행하는 형태#
# 초기값이 지정된 매개변수는 반드시 가장 마지막에 위치해야 함
# 초기값이 지정된 매개변수가 중간에 위치하게 되면 함수사용시 초기값이 지정된 매개변수를 입력하지 않고
# 사용하였을 경우 그 다음에 있는 매개 변수가 어디에 입력될지 확인이 불가능하여 오류가 발생함
print("# 매개변수 초기값 지정하기")
def say_myself(name, old = 25, man=True):
print("나의 이름은 {0}입니다.".format(name))
print("나의 나이는 {0}살입니다.".format(old))
if man:
print("남자입니다")
else:
print("여자입니다")
say_myself("박응용", 27)
say_myself("박응용", 27, True)
say_myself("박응용")
print()
say_myself("박응선", 27, False)
print()
def say_myself1(name, old = 25, man = True):
print("나의 이름은 {0}입니다.".format(name))
print("나의 나이는 {0}살입니다.".format(old))
if man:
print("남자입니다")
else:
print("여자입니다")
say_myself1("최수열", True)
print()
# 기본적으로 변수는 변수가 선언된 함수 내부에서만 메모리에 살아있음
# 함수 외부에 선언된 변수와 함수 내부에 선언된 변수의 이름이 동일한 경우 함수 내부에서는 함수 내부에 선언된 변수만 사용됨
# 함수 내부와 외부에 동일한 이름을 사용한 변수가 있을 경우 함수 외부의 변수를 사용하려면 global 키워드를 사용함(자바의 this와 비슷)
print("# 변수의 사용 범위")
a = 1 # 함수 외부에서 선언된 변수
def vartest(a):
a = a + 1 # 함수 내부에서 선언된 변수
print("함수 내부에서 선언된 변수 a : {0}".format(a))
vartest(a)
print("함수 외부에서 선언된 변수 a : {0}".format(a))
print()
b = 10
def vartest2():
global b
b = b + 1
print("global 키워드를 사용한 변수 b : {0}".format(b))
vartest2()
print("함수 외부의 변수 b : {0}".format(b)) | true |
47b8bf7e18a8f532c779d9680306d8377b917985 | Python | jjoooyi/python | /basic1/prac23_pass.py | UTF-8 | 456 | 3.40625 | 3 | [] | no_license | # pass : 함수 정의할 때 아무것도 안하고 넘어갈 때 사용..?
# 건물
class BuildingUnit(Unit):
def __init__(self, name, hp, location):
pass # 아무것도 안하고 넘어감
# 서플라이 디폿 : 건물, 1개 건물 = 8 유닛.
supply_depot = BuildingUnit("서플라이 디폿", 500, "7시")
def game_start():
print("[알림] 새로운 게임을 시작합니다.")
def game_over():
pass
game_start()
game_over() | true |
1fd72bc4c6b44e5312d53e7e7ef520ee007b4cee | Python | Dossar/batchjson | /csvtojson.py | UTF-8 | 2,701 | 2.921875 | 3 | [] | no_license | #!/usr/bin/python3
import re
import os
import sys
import pprint
import json
# MAIN
# C:\dev\batch_engine\Batch\September2014\2014.5.1\Sep_2014_Set1.csv
if __name__ == "__main__":
# Get the csv file.
print(">>> csvtojson.py is used to generate a json from the .csv file generated in batch.py")
print(">>> It is assumed here you already have run batch.py to make this .csv file.")
inputCSV = (input(">>> Input Full Path to .csv File generated from batch.py: ")).strip()
# searchList = ['ARTIST', 'TITLE', 'STEPARTIST']
"""
For now just assume the following indices:
0 - Folder Name (Useless)
1 - Song Artist
2 - Stepper
3 - Song Title
"""
fullPath = inputCSV
fileDir = os.path.abspath(os.path.join(os.path.dirname(fullPath), '.'))
csvFile = os.path.basename(os.path.normpath(fullPath))
batchName = str(csvFile.split(".csv")[0])
outputFile = batchName + ".json"
# Data structure for the JSON is a dictionary.
batchJson = {}
batchJson[batchName] = [] # Batch Name will be key, Will be an array of objects
# Parse the file
os.chdir(fileDir) # Change to csv file directory context
with open(csvFile) as fileCSV:
for line in fileCSV:
if line.startswith('[FOLDER]'):
continue
songToAdd = {}
lineValues = line.split(",") # CSV file separates fields by commas
songTitle = lineValues[3].strip()
songArtist = lineValues[1].strip()
stepArtist = lineValues[2].strip()
if songTitle == "":
songTitle = lineValues[0].strip() # First CSV column is ALWAYS folder name
if songArtist == "":
songArtist = "UNKNOWN" # this is a way of indicating files where artist names weren't parsed
# Now time to add this to the dictionary as an object
songToAdd['title'] = songTitle
songToAdd['artist'] = songArtist
songToAdd['stepper'] = stepArtist
songToAdd['status'] = "unjudged"
songToAdd['latest'] = "none"
songToAdd['batch'] = batchName
batchJson[batchName].append(songToAdd)
# stringToPrint = "Title: " + songTitle + " >>> Artist: " + songArtist + " >>> Stepper: " + stepArtist
# print(stringToPrint)
fileCSV.close()
# Print out the "JSON" for now
# print(batchJson)
# Write out the JSON.
print(">>> Writing JSON file for " + batchName)
with open(outputFile, 'w') as outFile:
json.dump(batchJson, outFile, indent=4)
outFile.close()
print(">>> Successfully wrote JSON file.")
# nope
| true |
5d18bb6097ca6df002967fb9db57441f82f17a48 | Python | Lightfire228/Wood_block_puzzle | /src/board/board.py | UTF-8 | 2,668 | 2.90625 | 3 | [] | no_license | from constants import *
from board.pieces import *
from classes.point import Point
from classes.piece import Piece
from classes.position import Position
import rotation
import utilities
def start():
board_matrix = utilities.generate_3d_matrix(False, BOARD_SIZE)
insert_piece(board_matrix, PIECE_00, POSITION_0)
return solve(board_matrix, PIECES[:], POSITIONS[:])
def solve(board_matrix, available_pieces, available_positions):
if len(available_positions) == 0:
return True
position = available_positions.pop()
used_pieces = []
while len(available_pieces) > 0:
piece = available_pieces.pop()
clone = deep_clone_board(board_matrix)
if not check_collides(clone, piece, position):
insert_piece(clone, piece, position)
if solve(clone, [*used_pieces, *available_pieces], available_positions[:]):
return True
else:
remove_piece(piece, position)
# run code against inverted piece as well, while still treating them as one physical piece
if not piece.is_inverted:
available_pieces.append(piece.inversion)
else:
used_pieces.append(piece.inversion)
return False
def remove_piece(piece, position):
position.set_piece(None)
def insert_piece(board_matrix, piece, position):
position.set_piece(piece)
rotated_piece_matrix = position.apply_matrix_rotations(piece)
for point in walk_position_indices(position):
board_matrix[point.Z][point.Y][point.X] = rotated_piece_matrix[point.Z][point.Y][point.X]
def check_collides(board_matrix, piece, position):
rotated_piece_matrix = position.apply_matrix_rotations(piece)
for point in walk_position_indices(position):
board_value = board_matrix [point.Z][point.Y][point.X]
piece_value = rotated_piece_matrix[point.Z][point.Y][point.X]
if board_value and piece_value:
return True
return False
def walk_position_indices(position):
origin = position.origin
dim = position.dimensions
z_range = _get_range(origin.Z, dim.Z)
y_range = _get_range(origin.Y, dim.Y)
x_range = _get_range(origin.X, dim.X)
for z in z_range:
for y in y_range:
for x in x_range:
yield Point(z, y, x)
def deep_clone_board(board_matrix):
return [
[
[
x
for x in y
]
for y in z
]
for z in board_matrix
]
def _get_range(origin, dim):
return range(origin, (origin + dim), -1 if dim < 0 else 1)
| true |
a61adb5c7ae767df3ed6522cf464182fdf7cf4e6 | Python | rajeevdodda/Python-Practice | /PythonBasics/scopes and namespaces/first class objects.py | UTF-8 | 380 | 3.625 | 4 | [] | no_license | # first-class objects are instances of a type that can be assigned to an identifier, passed as a parameter,
# or returned by a function.
scream = print # assign name ’scream’ to the function denoted as ’print’
scream("hello world") # call that function
from math import pi, sqrt
print(vars())
# pseudo random number generation
import random
print(random.random())
| true |
bd381057d13b9e0bd7b22cdeea66b37fc9d90672 | Python | force881/Castle-of-ideas | /Text_quest_20(DOG).py | UTF-8 | 29,925 | 4 | 4 | [] | no_license | food = 5
water = 5
variable = True
print("Правила: Следите за значениями еды и воды."
"\nЗначения еды и воды не должны быть меньше 0 или превышать 5!"
"\n\nВы красивый, сильный, молодой и юный ПЁС! Вы немного загуляли."
"\nВам предстоит добраться от микрорайона Уручье до микрорайона Лошица,"
"\nчтобы вернуться к своему хозяину! ")
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Будете добираться через МКАД или через город?")
step = input("Буду добираться через: ")
if step.lower() == "мкад":
food -= 1
water -= 1
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Легкой трусцой ты побежал по летнему зеленому уручью в сторону МКАДа.")
print("По пути ты увидел остановку. Тебе надо принять решение: 1. Идти дальше пешком, 2. Проехать на автобусе. "
"\nВведите 1 / 2")
while variable:
stop = input("Ваш вариант: ")
if stop != "1" and stop != "2":
print('Вы должны ввести "1" или "2"')
if stop == "1":
food -= 1
water -= 1
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты добрался до МКАДа и в это время пошел дождь")
break
elif stop == "2":
food -= 1
water -= 1
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты доехал на автобусе до МКАДа и в это время пошел дождь")
break
print("Выбери вариант: 1. Ты хочешь пойти под дождем, 2. Ты хочешь переждать дождь.")
while variable:
rain = input("Ваш вариант: ")
if rain != "1" and rain != "2":
print('Вы должны ввести "1" или "2"')
if (rain) == "1":
food -= 1
water += 1
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print('Ты пошел под дождем. Шёл сильный ливень и по дороге ты утолил подкравшуюся жажду,'
'\nпопив дождевой воды из лужи.'
'\nТы сильно намок, через некоторое время выглянуло солнце, '
'\nпод которым ты решил погреться.')
print('Укажите время которое ты будешь греться на солнце: 1. 5 минут, 2. 20 минут, 3. 1 час.')
time_bask = 0
while variable:
while time_bask < 20:
if time_bask != 0:
print('Укажите время которое ты будешь греться на солнце: 1. 5 минут, 2. '+ str(20 - time_bask) +' минут, 3. 1 час.')
time = input("Введи время: ")
if time != "1" and time != "2" and time != "3":
print('Вы должны ввести "1" или "2" или "3"')
if (time == "1"):
time_bask += 5
if time_bask < 20:
print(str(time_bask) + ' минут не хватило чтобы высохнуть.')
if time_bask == 20:
food -= 1
water -= 1
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print('20 минут хватило чтобы высохнуть.'
'\nМожно идти дальше в путь.'
'\nТы дошел по мкаду до чижовки и свернул в город, направившись'
'\n в сторону лошицкого парка.')
elif (time == "2"):
food -= 1
water -= 1
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print('20 минут хватило чтобы высохнуть.'
'\nМожно идти дальше в путь.'
'\nТы дошел по мкаду до чижовки и свернул в город, направившись'
'\n в сторону лошицкого парка.')
break
elif (time == "3"):
food -= 1
water -= 1
print('1 час')
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print('Вы грелись слишком долго и заснули.'
'\nТебя забрала служба по отлову бездомных животных. Квест окончен за решеткой!'
'\nGAME OVER')
variable = False
break
break
break
elif (rain) == "2":
food -= 1
water += 1
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print('Шёл приличный ливень и пока ты пережидал его под козырьком остановки,'
'\nты утолил подкравшуюся жажду, попив дождевой воды из лужи.'
'\nТы переждал дождь.'
'\nМожно идти дальше в путь.'
'\nТы дошел по мкаду до чижовки и свернул в город, направившись'
'\n в сторону лошицкого парка.')
break
elif step.lower() == "город":
food -= 1
water -= 1
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Облегчившись на ногу, задумывшегося и говорящего по телефону "
"\nслучайного прохожего около ТЦ Спектр,ты начал движение сквозь"
"\nзнойный и жаркий центр города! Пробежав приличное число км. и "
"\nдобежав до парка Челюскинцев ты почувствовал голод и жажду!"
"\nХочешь утолить жажду? Y / N")
while variable:
step = input("Утолить жажду? ")
if step.lower() != "y" and step.lower() != "n":
print('Вы должны ввести "y" или "n"')
if step.lower() == "y":
food -= 1
water += 1
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты забежал в сам парк и попил из фантана!")
break
elif step.lower() == "n":
food -= 1
water -= 1
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Тебя мучает жажда, но ты решил потерпеть!")
break
print("Пришло время пожрать! Ты видишь продовца хот-догов "
"\nс передвижной тележкой.Хочешь проявить инициативу? Y / N")
while variable:
step = input("Проявить инициативу?")
if step.lower() != "y" and step.lower() != "n":
print('Вы должны ввести "y" или "n"')
if step.lower() == "y":
food -= 1
water -= 1
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты очень умный, хитрый и решительный ПЁС. "
"\nТы быстро подбегаешь к продавцу хот-догов "
"\nсзади, нежно кусаешь его за попку и происходит "
"\nто что тебе нужно..."
"\nПродавец вскрикивает, случайным движением руки "
"\nопракидывает тележку и на земле оказываются 10-ть "
"\nхот-догов. Видя это, ты понимаешь, что это твой шанс "
"\nутолить голод. Сколько ты хочешь съесть хотдогов (введи число от 1 до 10)."
"\nБудьте аккуратным, 1 хот-дог равен одной единице пищевого запаса!")
while variable:
step1 = 0
step = input("Число съеденных хот-догов: ")
if step == "1" or step == "2" or step == "3" or step == "4" or step == "5" or step == "6" or step == "7" or step == "8" or step == "9" or step == "10":
step = int(step)
food = food + step * 1
step1 = step
else:
print('Вы должны ввести число от 1 до 10')
if food > 5 and 0 < step1 < 11:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты съел много хот-догов. Ты так обожрался, "
"\nчто не смог двигаться. Тебя нагнал разгневанный "
"\nпродавец и пустил тебя на хот-дог."
"\nGAME OVER")
variable = False
elif food <= 5 and 0 < step1 < 11:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Скушав несколько хот-догов(" + str(step) + "), "
"\nты почувствовал прилив сил и решил побежать дальше.")
break
break
elif step.lower() == "n":
food -= 1
water -= 1
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты очень умный, хитрый и решительный ПЁС. Но также ты очень хороший. "
"\nТы решил потерпеть и покушать позже. Отправляешься дальше в путь.")
break
else:
print("Ты долго думал! Тебя забрала служба по отлову бездомных животных. Квест окончен за решеткой!"
"\n----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------"
"\nGAME OVER")
variable = False
while variable:
print("Вот ты уже вбегаешь в Лошицкий, пробежав приличное расстояние, ты почти у цели.")
print("Ты видишь в траве лежит два кусочка курочки. Сколько кусочков курочки ты хочешь съесть?"
"\n(введи число 0, 1 или 2)."
"\nБудьте аккуратным, 1 кусочек курочки равен одной единице пищевого запаса!")
while variable:
step1 = -1
step = input("Сколько ты хочешь съесть кусочков курочки?")
if step == "0" or step == "1" or step == "2":
step = int(step)
food = food + step * 1
step1 = step
else:
print('Вы должны ввести число число 0, 1 или 2')
if food <= 5 and step1 == 1:
water -= 1
print("Ты подкрепился и у тебя появились силы двигаться дальше.")
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
break
elif food <= 5 and step1 == 2:
water -= 1
print("Ты подкрепился и у тебя появились силы двигаться дальше.")
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
break
elif food > 5 and (step1 == 2 or step1 == 1):
water -= 1
print("Ты обожрался. Не можешь двигаться. Как раз в это время по парку"
"\nпроходила служба отлова бездомных животных. Они тебя поймали."
"\nКвест закончен за решёткой")
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------"
"\nGAME OVER")
variable = False
elif food <= 5 and step1 == 0:
food -= 1
water -= 1
if food == 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты был слишком голодным и сдох от голода. Надо было кушать!"
"\nGAME OVER!")
variable = False
break
else:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты решил не есть кусок курочки и начал движение дальше.")
break
break
while variable:
print("Летняя дикая жара на улице заставляет тебя задуматься о принятии жидкости."
"\nХочешь попить? Y / N")
while variable:
step = input("Хочешь попить?")
if step.lower() != "y" and step.lower() != "n":
print('Вы должны ввести "y" или "n"')
if step.lower() == "y":
food -= 1
water += 1
if water != 0 and food == 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты сдох от голода. Надо было кушать!"
"\nGAME OVER!")
variable = False
break
elif water != 0 and food != 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Воды много не бывает в такую жару. Освежившись, ты дальше бежишь по зелёному парку.")
break
elif step.lower() == "n":
food -= 1
water -= 1
if water != 0 and food == 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты сдох от голода. Надо было кушать!"
"\nGAME OVER!")
variable = False
break
elif water !=0 and food != 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты отказался от воды в такую жару."
"\nВсё же ты дальше бежишь по зелёному парку.")
break
elif water ==0 and food != 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты сдох от обезвоживания. Надо было пить!"
"\nGAME OVER!")
variable = False
break
elif water == 0 and food == 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты сдох от обезвоживания и голода. Надо было пить и кушать!"
"\nGAME OVER!")
variable = False
break
break
while variable:
print("В парке ты видишь несколько резвящихся на поляне бродячих псов?"
"\nХочешь с ними познакомиться? 1. Да, 2. Нет")
break
while variable:
step = input("Познакомиться? ")
if step != "1" and step != "2":
print('Вы должны ввести "1" или "2"')
if step.lower() == "1":
food -= 1
water -= 1
if water != 0 and food != 0:
print(
"Ты подбегаешь к бездомным псам, начинаешь с ними резвиться и понимаешь, что тебе с ними хорошо. "
"\nТы вступаешь в их ряды и становишься членом банды 'Шерстяные скотинки'."
"\nЭта жизнь тожа будет хороша.")
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("FIN!")
elif water == 0 and food == 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты сдох от обезвоживания и голода. Надо было пить и кушать!"
"\nGAME OVER!")
elif water == 0 and food != 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты сдох от обезвоживания. Надо было пить!"
"\nGAME OVER!")
elif water != 0 and food == 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты сдох от голода. Надо было кушать!"
"\nGAME OVER!")
break
elif step.lower() == "2":
food -= 1
water -= 1
if water != 0 and food != 0:
print("Ты решаешь искать дальше своего хозяина. Ты пересекаешь весь парк и забегаешь "
"\nв сам микрорайон Лошица. Добираешься до двери своей квартиры и начинаешь громко лаять. "
"\nДверь открывается и тебя встречает радостный хозяин. Ты дома. Ура.")
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("FIN!")
elif water == 0 and food == 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты сдох от обезвоживания и голода. Надо было пить и кушать!"
"\nGAME OVER!")
elif water == 0 and food != 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты сдох от обезвоживания. Надо было пить!"
"\nGAME OVER!")
elif water != 0 and food == 0:
print("----------------"
"\n| = |"
"\n| | ==== |"
"\n| --======= |"
"\n| | | |"
"\n FOOD: " + "*" * food + ""
"\n WATER: " + "*" * water + ""
"\n----------------")
print("Ты сдох от голода. Надо было кушать!"
"\nGAME OVER!")
break | true |
41a33cd536b49ee7d8b2026e5393428d92494fdd | Python | techbala/sparklearning | /dataframe/aggregation/aggregate.py | UTF-8 | 1,710 | 2.75 | 3 | [] | no_license | from pyspark.sql import SparkSession
from pyspark.sql.functions import count, countDistinct, approx_count_distinct, first, last, expr, max, col, rank, dense_rank
from pyspark.sql.window import Window
spark = SparkSession.builder.master('local').appName('aggregationBasic').getOrCreate()
def countTest():
countDf = spark.read.format('csv').option("header", "true").option("inferSchema", "true").load("../../data/airlines.csv")
print( countDf.count() )
countDf.select(count('Code')).show()
countDf.select( countDistinct('Code') ).show()
countDf.select( first('Code'), last('Code')).show()
def coalesceTest():
countDf = spark.read.format('csv').option("header", "true").option("inferSchema", "true").load("../../data/airlines.csv")
print( countDf.rdd.getNumPartitions() )
print(countDf.coalesce(5).rdd.getNumPartitions())
def groupByTest():
df = spark.read.format('csv').option("header", "true").option("inferSchema", "true").load("../../data/flights.csv")
df.groupBy("flight_number", "origin").count().show()
df.groupBy("flight_number").agg( count("origin"), expr( "count(origin)") ).show()
def maxDepartureDelay():
windowSpec = Window.partitionBy( "date", "origin").orderBy(col("departure_delay").desc() ).rowsBetween( Window.unboundedPreceding, Window.currentRow )
rankOver = rank().over( windowSpec )
denseOver = dense_rank().over( windowSpec )
df = spark.read.format('csv').option("header", "true").option("inferSchema", "true").load("../../data/flights.csv")
df.select( "date", "origin", "departure_delay", rankOver.alias('rankOver'), denseOver.alias("denseOver") ).show()
#countTest()
#coalesceTest()
#groupByTest()
maxDepartureDelay() | true |
0b0d3cfc11796363436984d758bc9c75f09f5523 | Python | darshanthaker/bnn | /neural_network.py | UTF-8 | 1,558 | 2.734375 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
from pdb import set_trace
# TODO(dbthaker): Change this to use torch.functional instead of torch.nn
class NeuralNetwork(nn.Module):
def __init__(self, input_size, model='model1'):
super(NeuralNetwork, self).__init__()
if model == 'model1':
self.network = nn.Sequential( \
nn.Linear(input_size, 50), \
nn.ReLU(), \
nn.Linear(50, 50), \
nn.ReLU(), \
nn.Linear(50, 1))
elif model == 'model2':
self.network = nn.Sequential( \
nn.Linear(input_size, 1))
def forward(self, x):
return self.network(x)
class FNeuralNetwork(nn.Module):
def __init__(self, use_biases=True):
super(FNeuralNetwork, self).__init__()
self.use_biases = use_biases
def forward(self, x, params):
out = x
if self.use_biases:
# Assume params is list of (weight, bias) tuples.
assert len(params) % 2 == 0
for i in range(0, len(params), 2):
w = params[i]
b = params[i + 1]
if i == len(params) - 2:
out = F.linear(out, w, b)
else:
out = F.relu(F.linear(out, w, b))
else:
for i in range(len(params)):
w = params[i]
out = F.linear(out, w) # Assume only one weight for now, so no ReLU.
return out
| true |
114914673521a3527c2a8e660aa855417d2e28c9 | Python | NCATS-Gamma/robokop-messenger | /messenger/shared/qgraph_compiler.py | UTF-8 | 4,251 | 3.109375 | 3 | [
"MIT"
] | permissive | """Tools for compiling QGraph into Cypher query."""
def cypher_prop_string(value):
"""Convert property value to cypher string representation."""
if isinstance(value, bool):
return str(value).lower()
elif isinstance(value, str):
return f"'{value}'"
else:
raise ValueError(f'Unsupported property type: {type(value).__name__}.')
class NodeReference():
"""Node reference object."""
def __init__(self, node, anonymous=False):
"""Create a node reference."""
node = dict(node)
node_id = node.pop("id")
name = f'{node_id}' if not anonymous else ''
labels = node.pop('type', 'named_thing')
if not isinstance(labels, list):
labels = [labels]
props = {}
curie = node.pop("curie", None)
if curie is not None:
if isinstance(curie, str):
props['id'] = curie
filters = ''
elif isinstance(curie, list):
filters = []
for ci in curie:
# generate curie-matching condition
filters.append(f"{name}.id = '{ci}'")
# union curie-matching filters together
filters = ' OR '.join(filters)
else:
raise TypeError("Curie should be a string or list of strings.")
else:
filters = ''
node.pop('name', None)
node.pop('set', False)
props.update(node)
self.name = name
self.labels = labels
self.prop_string = ' {' + ', '.join([f"`{key}`: {cypher_prop_string(props[key])}" for key in props]) + '}'
self._filters = filters
if curie:
self._extras = f' USING INDEX {name}:{labels[0]}(id)'
else:
self._extras = ''
self._num = 0
def __str__(self):
"""Return the cypher node reference."""
self._num += 1
if self._num == 1:
return f'{self.name}' + \
''.join(f':`{label}`' for label in self.labels) + \
f'{self.prop_string}'
return self.name
@property
def filters(self):
"""Return filters for the cypher node reference.
To be used in a WHERE clause following the MATCH clause.
"""
if self._num == 1:
return self._filters
else:
return ''
@property
def extras(self):
"""Return extras for the cypher node reference.
To be appended to the MATCH clause.
"""
if self._num == 1:
return self._extras
else:
return ''
class EdgeReference():
"""Edge reference object."""
def __init__(self, edge, anonymous=False):
"""Create an edge reference."""
name = f'{edge["id"]}' if not anonymous else ''
label = edge['type'] if 'type' in edge else None
if 'type' in edge and edge['type'] is not None:
if isinstance(edge['type'], str):
label = edge['type']
filters = ''
elif isinstance(edge['type'], list):
filters = []
for predicate in edge['type']:
filters.append(f'type({name}) = "{predicate}"')
filters = ' OR '.join(filters)
label = None
else:
label = None
filters = ''
self.name = name
self.label = label
self._num = 0
self._filters = filters
has_type = 'type' in edge and edge['type']
self.directed = edge.get('directed', has_type)
def __str__(self):
"""Return the cypher edge reference."""
self._num += 1
if self._num == 1:
innards = f'{self.name}{":" + self.label if self.label else ""}'
else:
innards = self.name
if self.directed:
return f'-[{innards}]->'
else:
return f'-[{innards}]-'
@property
def filters(self):
"""Return filters for the cypher node reference.
To be used in a WHERE clause following the MATCH clause.
"""
if self._num == 1:
return self._filters
else:
return ''
| true |
49a00f32625eeb0c6633c2975e67b08cced27893 | Python | pkundu25/CaptchaDL | /models/metrics.py | UTF-8 | 6,826 | 3.140625 | 3 | [] | no_license |
'''
This script creates different ways to compute the score of a model given its
label predictions and the truth labels
'''
import numpy as np
from functools import partial, update_wrapper
from keras.callbacks import BaseLogger
from json import JSONEncoder
import keras.backend as K
import pandas as pd
'''
The next functions will have always the same signature. They take the truth and
predicted labels and compare them. Both must be a 2D tensor of int64 values with
the same size (nxm).
n is interpreted as the number of samples classified
m is the number of labels on each sample
Value -1 in the predicted labels will represent a 'blank' space character (null character)
'''
def metric(f):
'''
This is a decorator for all metric functions
'''
def wrapper(y_true, y_pred, *args, **kwargs):
y_true, y_pred = K.cast(y_true, np.int64), K.cast(y_pred, np.int64)
if len(y_true.get_shape().as_list()) == 3:
y_true = K.argmax(y_true, axis=2)
if len(y_pred.get_shape().as_list()) == 3:
y_pred = K.argmax(y_pred, axis=2)
return f(y_true, y_pred, *args, **kwargs)
update_wrapper(wrapper, f)
return wrapper
@metric
def char_accuracy(y_true, y_pred):
'''
This metric return the mean of characters matched correctly in total
'''
return K.mean(K.cast(K.flatten(K.equal(y_true, y_pred)), np.float32))
@metric
def matchk_accuracy(y_true, y_pred, k=2):
'''
This metric returns the mean of sample predictions that at least matches k labels correctly
k must be a number in the range [1, m] where m is the number of labels on each sample
'''
return K.mean(K.cast(K.greater_equal(K.sum(K.cast(K.equal(y_true, y_pred), np.int64), axis=1), k), np.float32))
@metric
def fullmatch_accuracy(y_true, y_pred):
'''
This metric returns the mean of sample predictions that matches all the labels correctly
'''
return K.mean(K.prod(K.cast(K.equal(y_true, y_pred), np.float32), axis=1))
'''
Aliases for different values of k in matchk_accuracy
'''
def match1_accuracy(y_true, y_pred):
return matchk_accuracy(y_true, y_pred, k=1)
def match2_accuracy(y_true, y_pred):
return matchk_accuracy(y_true, y_pred, k=2)
def match3_accuracy(y_true, y_pred):
return matchk_accuracy(y_true, y_pred, k=3)
def match4_accuracy(y_true, y_pred):
return matchk_accuracy(y_true, y_pred, k=4)
def summary(y_true, y_pred):
'''
Prints on stdout different metrics comparing the truth and
predicted labels specified as arguments
'''
metrics = {
'char_acc': char_accuracy(y_true, y_pred),
'fullmatch_acc': fullmatch_accuracy(y_true, y_pred)
}
for k in range(1, y_true.shape[1]):
metrics['match{}_acc'.format(k)] = matchk_accuracy(y_true, y_pred, k=k)
df = pd.DataFrame.from_dict(
dict([(metric, [round(K.get_value(value), 5)]) for metric, value in metrics.items()] + [('-', 'values')])
)
df.set_index(['-'], inplace=True)
print('Number of samples: {}, Number of characters per sample: {}'.format(*y_true.shape))
print(df)
class FloydhubKerasCallback(BaseLogger):
'''
This class can be used as a callback object that can be passed to the method fit()
when training your model (inside 'callbacks' argument)
If it is used while your model is running on a floydhub server, training metrics
will be plotted at real time under the 'Training metrics' panel.
'''
def __init__(self, mode='epoch', metrics=None, stateful_metrics=None):
super().__init__(stateful_metrics)
if mode not in ('epoch', 'batch'):
raise ValueError('Mode parameter should be "epoch" or "batch"')
if metrics is not None and not isinstance(metrics, (list, tuple)):
raise ValueError('Metrics parameter should be a list of training metric names to track')
if stateful_metrics is not None and not isinstance(metrics, (list, tuple)):
raise ValueError('Stateful metrics parameter should be a list of training metric names to track')
self.mode = mode
self.metrics = frozenset(metrics) if metrics is not None else None
self.encoder = JSONEncoder()
def report(self, metric, value, **kwargs):
info = {'metric': metric, 'value': value}
info.update(kwargs)
print(self.encoder.encode(info))
def on_batch_end(self, batch, logs):
if not self.mode == 'batch':
return
metrics = frozenset(logs.keys()) - frozenset(['batch', 'size'])
if self.metrics:
metrics &= self.metrics
for metric in metrics:
self.report(metric, round(logs[metric].item(), 5), step=batch)
def on_epoch_end(self, epoch, logs):
if not self.mode == 'epoch':
return
metrics = frozenset(logs.keys())
if self.metrics:
metrics &= self.metrics
for metric in metrics:
self.report(metric, round(logs[metric].item(), 5), step=epoch)
if __name__ == '__main__':
'''
Module unit test
'''
import numpy as np
import unittest
from unittest import TestCase
class MetricsUnitCase(TestCase):
def test_char_accuracy(self):
'''
Test char_accuracy metric
'''
y_true = np.array([
[0, 0, 1, 0],
[1, 0, 1, 1]],
dtype=np.int64)
y_pred = np.array([
[0, 1, 1, 0],
[-1, 1, 1, 1]],
dtype=np.int64)
self.assertEqual(K.get_value(char_accuracy(y_true, y_pred)), 5/8)
def test_fullmatch_accuracy(self):
'''
Test fullmatch accuracy metric
'''
y_true = np.array([
[0, 1, 1, 0],
[0, 0, 1, 0]],
dtype=np.int64)
y_pred = np.array([
[0, 1, 1, 0],
[0, 0, 0, 1]],
dtype=np.int64)
self.assertEqual(K.get_value(fullmatch_accuracy(y_true, y_pred)), 0.5)
def test_matchk_accuracy(self):
'''
Test matchk accuracy metric
'''
y_true = np.array([
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 1]],
dtype=np.int64)
y_pred = np.array([
[0, 1, 1, 1],
[1, 1, 1, 1],
[1, 0, 0, 0]],
dtype=np.int64)
self.assertEqual(K.get_value(matchk_accuracy(y_true, y_pred, k=1)), 1)
self.assertAlmostEqual(K.get_value(matchk_accuracy(y_true, y_pred, k=2)), 2/3, delta=0.01)
self.assertAlmostEqual(K.get_value(matchk_accuracy(y_true, y_pred, k=3)), 1/3, delta=0.01)
unittest.main()
| true |
4a1b716df998a07d4f07a5772726cccd06f5c290 | Python | ruairibrady/Compton-Scattering | /4. Electron NR RME vs Kinetic Energy/eNRelRME_kinenergy.py | UTF-8 | 1,343 | 3.25 | 3 | [] | no_license | #author: Ruairí Brady (ruairi.brady@ucdconnect.ie)
#importing packages
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
%matplotlib inline
#loading data
data = np.loadtxt("compton_edge_data.txt")
Egam = data[:,0]
T = data[:,1]
T_err = data[:,2]
Egam_err = 2*T_err
#non-relativistic rest energy of electron using experimental values
mnr_c2 = ((2*Egam-T)**2)/(2*T)
dTmnr_c2 = 0.5+(2*Egam**2)/(T**2)
dEmnr_c2 = (4*Egam/T)-2
mnr_c2_err = np.sqrt((Egam_err)**2*(dEmnr_c2)**2 + (T_err)**2*(dTmnr_c2**2))
#Figure: T as a function of non-relativisitic rest mass energy - linear plot
def func2(x, m, c):
return m*x+c
popt2, pcov2 = curve_fit(func2, T, mnr_c2)
best2 = func2(T, popt2[0], popt2[1])
plt.plot(T,mnr_c2,'bo', markersize=5)
plt.plot(T,best2, 'r-', linewidth=1)
plt.errorbar(T,mnr_c2,xerr=T_err,yerr=mnr_c2_err,fmt='.')
plt.xlabel('T (keV)')
plt.ylabel('$m_{nr}c^2$ (keV)')
plt.title("The Non-Relativistic Rest Mass Energy of the Electron\nas a Function of its Kinetic Energy")
plt.grid(True)
plt.axis([0,2500, 500, 1800])
plt.savefig('kineticenergy_vs_NRrestmass.png')
uncert_slope = np.sqrt(pcov2[0,0])
uncert_intercept = np.sqrt(pcov2[1,1])
print("The slope: {0:.4}".format(popt2[0]),"± {0:.2}".format(uncert_slope))
print("The y-intercept: {0:.4}".format(popt2[1]),"± {0:.3}".format(uncert_intercept), "keV\n")
| true |
d6dacadc9283c35852eabca193c9f86213545c15 | Python | Jason101616/LeetCode_Solution | /Design/170. Two Sum III - Data structure design.py | UTF-8 | 2,074 | 4.1875 | 4 | [] | no_license | # Design and implement a TwoSum class. It should support the following operations: add and find.
#
# add - Add the number to an internal data structure.
# find - Find if there exists any pair of numbers which sum is equal to the value.
#
# Example 1:
#
# add(1); add(3); add(5);
# find(4) -> true
# find(7) -> false
# Example 2:
#
# add(3); add(1); add(2);
# find(3) -> true
# find(6) -> false
# version 1: add O(1), find O(n)
class TwoSum(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.nums = dict()
def add(self, number):
"""
Add the number to an internal data structure..
:type number: int
:rtype: void
"""
if number not in self.nums:
self.nums[number] = 0
self.nums[number] += 1
def find(self, value):
"""
Find if there exists any pair of numbers which sum is equal to the value.
:type value: int
:rtype: bool
"""
for num in self.nums:
if value - num in self.nums:
if value - num != num and self.nums[value - num] >= 1:
return True
if value - num == num and self.nums[num] >= 2:
return True
return False
# version 2: add O(n) find O(1). TLE in OJ.
class TwoSum(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.nums = set()
self.sums = set()
def add(self, number):
"""
Add the number to an internal data structure..
:type number: int
:rtype: void
"""
if number in self.nums:
self.sums.add(number * 2)
else:
for num in self.nums:
self.sums.add(num + number)
self.nums.add(number)
def find(self, value):
"""
Find if there exists any pair of numbers which sum is equal to the value.
:type value: int
:rtype: bool
"""
return value in self.sums
| true |
b6abc5d9c16bf4cd03440a7e26d07881faf38e64 | Python | ThomasDerZweifler/pyPro | /thomas/excel/Main.py | UTF-8 | 199 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | import pandas as pd
# sudo apt-get install python3-xlrd
# index_col=0 tells pandas that column 0 is the index and not data
dfs = pd.read_excel('test.xlsx', sheet_name='Tabelle1')
print(dfs.head()) | true |
0713df28845b501cd186785daa39819b0b62a048 | Python | scragly/everstone | /everstone/sql/comparisons.py | UTF-8 | 4,704 | 3.390625 | 3 | [
"MIT"
] | permissive | from __future__ import annotations
import abc
import typing as t
class Condition:
def __init__(self, expression: t.Union[str, Condition]):
self.expression = expression
def __str__(self):
return str(self.expression)
def __repr__(self):
return f'<Condition "{self.expression}">'
def __eq__(self, other):
return str(self) == str(other)
def __and__(self, other):
return Condition(f"({self} AND {other})")
def __or__(self, other):
return Condition(f"({self} OR {other})")
@classmethod
def and_(cls, *conditions): # pragma: no cover
joined = " AND ".join(str(c) for c in conditions)
return cls(f"({joined})")
@classmethod
def or_(cls, *conditions): # pragma: no cover
joined = " OR ".join(str(c) for c in conditions)
return cls(f"({joined})")
def and_(self, *conditions):
joined = " AND ".join(str(c) for c in [self, *conditions])
return Condition(f"({joined})")
def or_(self, *conditions):
joined = " OR ".join(str(c) for c in [self, *conditions])
return Condition(f"({joined})")
class Comparable(metaclass=abc.ABCMeta):
"""Base class to define an SQL object as able to use SQL comparison operations."""
@staticmethod
def _sql_value(value: t.Any) -> str:
"""Adjusts a given value into an appropriate representation for SQL statements."""
if value is None:
return "NULL"
elif isinstance(value, str):
return f"'{value}'"
elif isinstance(value, bool):
return "TRUE" if value else "FALSE"
else:
return f"{value}"
def __hash__(self):
return hash(str(self))
def __lt__(self, value: t.Any) -> Condition:
"""Evaluate if less than a value."""
value = self._sql_value(value)
return Condition(f"{self} < {value}")
def __le__(self, value: t.Any) -> Condition:
"""Evaluate if less than or equal to a value."""
value = self._sql_value(value)
return Condition(f"{self} <= {value}")
def __eq__(self, value: t.Any) -> Condition:
"""Evaluate if equal to a value."""
value = self._sql_value(value)
return Condition(f"{self} = {value}")
def __ne__(self, value: t.Any) -> Condition:
"""Evaluate if not equal to a value."""
value = self._sql_value(value)
return Condition(f"{self} <> {value}")
def __gt__(self, value: t.Any) -> Condition:
"""Evaluate if greater than a value."""
value = self._sql_value(value)
return Condition(f"{self} > {value}")
def __ge__(self, value: t.Any) -> Condition:
"""Evaluate if greater than or equal to a value."""
value = self._sql_value(value)
return Condition(f"{self} >= {value}")
def like(self, value: t.Any) -> Condition:
"""Evaluate if like a value."""
value = self._sql_value(value)
return Condition(f"{self} LIKE {value}")
def not_like(self, value: t.Any) -> Condition:
"""Evaluate if not like a value."""
value = self._sql_value(value)
return Condition(f"{self} NOT LIKE {value}")
def ilike(self, value: t.Any) -> Condition:
"""Evaluate if like a value, ignoring case."""
value = self._sql_value(value)
return Condition(f"{self} ILIKE {value}")
def not_ilike(self, value: t.Any) -> Condition:
"""Evaluate if not like a value, ignoring case."""
value = self._sql_value(value)
return Condition(f"{self} NOT ILIKE {value}")
def between(self, minvalue: t.Any, maxvalue: t.Any) -> Condition:
"""Evaluate if between two values."""
minvalue = self._sql_value(minvalue)
maxvalue = self._sql_value(maxvalue)
return Condition(f"{self} BETWEEN {minvalue} AND {maxvalue}")
def not_between(self, minvalue: t.Any, maxvalue: t.Any) -> Condition:
"""Evaluate if not between two values."""
minvalue = self._sql_value(minvalue)
maxvalue = self._sql_value(maxvalue)
return Condition(f"{self} NOT BETWEEN {minvalue} AND {maxvalue}")
def is_(self, value: t.Any) -> Condition:
"""Evaluate if is a value."""
value = self._sql_value(value)
return Condition(f"{self} IS {value}")
def is_not(self, value: t.Any) -> Condition:
"""Evaluate if is not a value."""
value = self._sql_value(value)
return Condition(f"{self} IS NOT {value}")
def in_(self, value: t.Any) -> Condition:
"""Evaluate if in a value."""
value = self._sql_value(value)
return Condition(f"{self} IN {value}")
| true |
b1fa1d7fbfd086280a68db5816e6c889cad6269b | Python | ailakki/Python-basic | /python from master project/plot.py | UTF-8 | 637 | 2.65625 | 3 | [] | no_license | # if seaborn install works
import seaborn as sns
import matplotlib.pyplot as plt
import pylab
import sys
sns.set_style("whitegrid")
fil = raw_input("File name of genome to plot :")
f = open(fil,"r")
name = "None"
fig, ax =plt.subplots(3,2)
#axes = axes.flatten()
n=0
name= 'a'
for line in f:
dat=[]
if line.startswith('#'):
name = line[1:]
else:
#print line
l = line.strip()
d = l.split()
for x in d:
dat.append(float(x))
ax_curr = ax[abs(n/2),n%2]
ax_curr.set_title(name)
sns.violinplot(data=[dat],orient="h", color='b', ax=ax_curr)
plt.xlim(-100,100)
n = n+1
#fig.subplots_adjust(hspace=0.3)
plt.show()
| true |
0f41421fa16329c05c50714621f43fdb0dc5e402 | Python | nahlaerrakik/tweets-flask-final | /run.py | UTF-8 | 5,893 | 2.59375 | 3 | [] | no_license | __author__ = 'nahla.errakik'
import json
from flask import Flask, render_template, request, redirect, flash
from flask_login import current_user, login_user, logout_user, login_required
from flask_bcrypt import Bcrypt
from models import User, Search, Tweet, login, db
from pyTwitter import Twitter
app = Flask(__name__)
if app.config['ENV'] == 'production':
app.config.from_object('config.ProdConfig')
elif app.config['ENV'] == 'testing':
app.config.from_object('config.TestConfig')
else:
app.config.from_object('config.DevConfig')
bcrypt = Bcrypt(app)
db.init_app(app)
login.init_app(app)
@app.before_first_request
def create_all():
db.create_all()
@app.route("/")
def index():
return render_template('index.html')
@app.route("/login", methods=['POST', 'GET'])
def login():
try:
if current_user.is_authenticated:
print("ICH BIN IN LOGIN")
return redirect('/')
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
user = User().get_user(email)
if user is None:
flash('User {} not found.'.format(email), 'warning')
return render_template('login.html')
elif not user.check_password(password):
flash('Password is not correct.', 'warning')
return render_template('login.html')
else:
login_user(user)
return redirect('/')
else:
return render_template('login.html')
except:
flash(app.config['ERROR_MSG'].format('Could not login'), 'warning')
return redirect('/login')
@app.route("/logout")
def logout():
logout_user()
return redirect('/')
@app.route("/register", methods=['POST', 'GET'])
def register():
try:
if request.method == 'POST':
email = request.form['email']
username = request.form['username']
password = request.form['password']
repeat_password = request.form['repeat_password']
user = User().get_user(email)
if user:
flash('User already exist !', 'danger')
return render_template('register.html')
elif password != repeat_password:
flash('Passwords do not match !', 'danger')
return render_template('register.html')
else:
hash_password = bcrypt.generate_password_hash(password)
User().add_user(username, hash_password, email)
flash('Congrats! you have successfully registered. You can login now !', 'success')
return render_template('login.html')
return render_template('register.html')
except:
flash(app.config['ERROR_MSG'].format('Could not load page'), 'danger')
return render_template('register.html')
@app.route("/search", methods=['GET'])
def search():
try:
myTwitter = Twitter({'key': app.config['TWITTER_API_CLIENT_KEY'], 'secret': app.config['TWITTER_API_CLIENT_SECRET']})
keyword = request.args.get('keyword')
tweets = Search().search(keyword)
if len(tweets) > 0:
if Search.less_than_5minutes(tweets[0].creation_time):
tweets = [{'text': x.text} for x in tweets]
else:
print("insert new tweets in db")
search_result = myTwitter.search_tweets(keyword)
tweets = search_result['statuses']
for item in tweets:
tweet = Search(keyword=keyword, text=item['text'])
Search().add_search(tweet)
# Keyword isn t in DB
else:
print("!!!!!!!!!!!NOT FOUND IN DATABASE")
search_result = myTwitter.search_tweets(keyword)
tweets = search_result['statuses']
for item in tweets:
tweet = Search(keyword=keyword, text=item['text'])
Search().add_search(tweet)
if len(tweets) <= 0:
flash('No results were found.', 'warning')
else:
flash('{} results were found.'.format(len(tweets)), 'success')
return render_template("index.html", tweets=tweets, keyword=keyword, tweetsy=json.dumps(tweets))
except:
flash(app.config['ERROR_MSG'].format('Could not get search results'), 'danger')
return render_template("index.html", keyword=request.args.get('keyword'))
@app.route("/store", methods=['GET', 'POST'])
@login_required
def store():
try:
keyword = request.form['keyword']
tweets = json.loads(request.form['tweetsy'])
for tweet in tweets:
text = tweet['text']
Tweet.add_fav_tweet(keyword, text, current_user.id)
flash('Your search for the Keyword # {} # was successfully stored.'.format(keyword), 'success')
return render_template("index.html", tweets=tweets, keyword=keyword, tweetsy=json.dumps(tweets))
except:
flash(app.config['ERROR_MSG'].format('Could not store tweets!'), 'danger')
return redirect("/search")
@app.route("/profile")
@login_required
def profile():
user_fav_tweets = Tweet.get_fav_tweets(user=current_user.id)
return render_template('profile.html', stored_tweets=user_fav_tweets)
@app.route("/delete_tweet", methods=['POST'])
@login_required
def delete_tweet():
try:
tweet_id = request.form['tweet_id']
Tweet.delete_tweet(tweet_id)
flash('Your Tweet was successfully deleted', 'success')
return redirect('/profile')
except:
flash(app.config['ERROR_MSG'].format('Could not delete tweet!'), 'danger')
return redirect('/profile')
@app.errorhandler(404)
def page_not_found(e):
return render_template('not_found_404.html')
if __name__ == "__main__":
app.run()
| true |
16b4fd1741dee5001b6cb33e52787825b4054fe0 | Python | rvcarrera/freecodecamp | /arithmetic-formatter/actual_solution.py | UTF-8 | 1,651 | 3.515625 | 4 | [] | no_license | import re
problems = ["11 + 4", "3801 - 2999", "1 + 2", "123 + 49", "1 - 9380"]
solution = False
arranged_problems = ''
if len(problems) > 5:
arranged_problems = 'Error: Too many problems.'
data = []
for problem in problems:
datum = [re.findall('[0-9]+', problem), re.findall('[+-]', problem)]
if not datum[1]:
arranged_problems = 'Error: Operator must be \'+\' or \'-\'.'
if len(datum[0]) != 2:
arranged_problems = 'Error: Numbers must only contain digits.'
if max(len(datum[0][0]), len(datum[0][1])) > 4:
arranged_problems = 'Error: Numbers cannot be more than four digits.'
if datum[1][0] == '+':
datum.append(str(int(datum[0][0])+int(datum[0][1])))
else:
datum.append(str(int(datum[0][0])-int(datum[0][1])))
datum.append(max(len(datum[0][0]), len(datum[0][1])) + 2)
data.append(datum)
line_one = (' '*(data[0][3] - len(data[0][0][0]))) + data[0][0][0]
line_two = data[0][1][0] + (' '*(data[0][3] - len(data[0][0][1]) - 1)) + data[0][0][1]
line_three = '-'*data[0][3]
line_four = (' '*(data[0][3] - len(data[0][2]))) + data[0][2]
for i in range(1, len(data)):
line_one += ' ' + (' '*(data[i][3] - len(data[i][0][0]))) + data[i][0][0]
line_two += ' ' + data[i][1][0] + (' '*(data[i][3] - len(data[i][0][1]) - 1)) + data[i][0][1]
line_three += ' ' + '-'*data[i][3]
line_four += ' ' + (' '*(data[i][3] - len(data[i][2]))) + data[i][2]
if solution:
arranged_problems = line_one + '\n' + line_two + '\n' + line_three + '\n' + line_four
else:
arranged_problems = line_one + '\n' + line_two + '\n' + line_three
print(arranged_problems) | true |
1b179badae4c3b81ab8472a442728996133dec52 | Python | serre-lab/tripletcyclegan | /data.py | UTF-8 | 26,787 | 2.6875 | 3 | [] | no_license | import numpy as np
import tensorflow as tf
import tf2lib as tl
from PIL import Image
import imlib as im
def _smallest_size_at_least(height, width, smallest_side):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.compat.v1.to_float(height)
width = tf.compat.v1.to_float(width)
smallest_side = tf.compat.v1.to_float(smallest_side)
scale = tf.compat.v1.cond(tf.compat.v1.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.compat.v1.to_int32(height * scale)
new_width = tf.compat.v1.to_int32(width * scale)
return new_height, new_width
def _aspect_preserving_resize(image, smallest_side):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.compat.v1.image.resize_bilinear(image, [new_height, new_width],
align_corners=False)
resized_image = tf.compat.v1.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def make_dataset(img_paths, batch_size, load_size, crop_size, training, drop_remainder=True,grayscale=False, shuffle=False, repeat=1):
# if training:
# @tf.function
# def _map_fn(img): # preprocessing
# #toss = np.random.uniform(0,1)
# if grayscale:
# img = tf.image.rgb_to_grayscale(img)
# img = tf.image.grayscale_to_rgb(img)
# img = tf.image.random_flip_left_right(img)
# img = tf.image.resize_with_pad(img, load_size, load_size, antialias = True)
# img = tf.image.random_crop(img, [crop_size, crop_size, tf.shape(img)[-1]])
# img = tf.clip_by_value(img, 0, 255) / 255.0 # or img = tl.minmax_norm(img)
# img = img * 2 - 1
# return img
# else:
# @tf.function
# def _map_fn(img): # preprocessing
# img = tf.image.resize_with_pad(img,crop_size, crop_size, antialias = True) # or img = tf.image.resize(img, [load_size, load_size]); img = tl.center_crop(img, crop_size)
# if grayscale:
# img = tf.image.rgb_to_grayscale(img)
# img = tf.image.grayscale_to_rgb(img)
# img = tf.clip_by_value(img, 0, 255) / 255.0 # or img = tl.minmax_norm(img)
# img = img * 2 - 1
# return img
if training:
@tf.function
def _map_fn(img): # preprocessing
#toss = np.random.uniform(0,1)
if grayscale:
img = tf.image.rgb_to_grayscale(img)
img = tf.image.grayscale_to_rgb(img)
img = tf.image.random_flip_left_right(img)
maxside = tf.math.maximum(tf.shape(img)[0],tf.shape(img)[1])
minside = tf.math.minimum(tf.shape(img)[0],tf.shape(img)[1])
new_img = img
if tf.math.divide(maxside,minside) > 1.2:
repeating = tf.math.floor(tf.math.divide(maxside,minside))
new_img = img
if tf.math.equal(tf.shape(img)[1],minside):
for i in range(int(repeating)):
new_img = tf.concat((new_img, img), axis=1)
if tf.math.equal(tf.shape(img)[0],minside):
for i in range(int(repeating)):
new_img = tf.concat((new_img, img), axis=0)
new_img = tf.image.rot90(new_img)
else:
new_img = img
img = tf.image.resize(new_img, [crop_size,crop_size])
#im.imwrite(img.numpy(),'test.jpg')
#img = tf.image.central_crop(img, [crop_size, crop_size, tf.shape(img)[-1]])
img = tf.clip_by_value(img, 0, 255) / 255.0 # or img = tl.minmax_norm(img)
img = img * 2 - 1
return img
else:
@tf.function
def _map_fn(img): # preprocessing
maxside = tf.math.maximum(tf.shape(img)[0],tf.shape(img)[1])
minside = tf.math.minimum(tf.shape(img)[0],tf.shape(img)[1])
new_img = img
if tf.math.divide(maxside,minside) > 1.3:
repeating = tf.math.floor(tf.math.divide(maxside,minside))
new_img = img
if tf.math.equal(tf.shape(img)[1],minside):
for i in range(int(repeating)):
new_img = tf.concat((new_img, img), axis=1)
if tf.math.equal(tf.shape(img)[0],minside):
for i in range(int(repeating)):
new_img = tf.concat((new_img, img), axis=0)
new_img = tf.image.rot90(new_img)
else:
new_img = img
img = tf.image.resize(new_img, [crop_size,crop_size])
#padx = load_size - tf.shape(img)[0]
#pady = load_size -tf.shape(img)[1]
#paddings = [[padx/2,padx/2],[pady/2,pady/2],[0, 0]]
#img = tf.pad(img,paddings,'SYMMETRIC')
#img = tf.image.resize_with_pad(img,crop_size, crop_size, antialias = True) # or img = tf.image.resize(img, [load_size, load_size]); img = tl.center_crop(img, crop_size)
if grayscale:
img = tf.image.rgb_to_grayscale(img)
img = tf.image.grayscale_to_rgb(img)
#img = tf.image.random_crop(img, [crop_size, crop_size, tf.shape(img)[-1]])
img = tf.clip_by_value(img, 0, 255) / 255.0 # or img = tl.minmax_norm(img)
img = img * 2 - 1
return img
return tl.disk_image_batch_dataset(img_paths,
batch_size,
drop_remainder=drop_remainder,
map_fn=_map_fn,
shuffle=shuffle,
repeat=repeat)
def make_zip_dataset(A_img_paths, B_img_paths, batch_size, load_size, crop_size, training, shuffle=True, grayscale=True,repeat=False):
# zip two datasets aligned by the longer one
if repeat:
A_repeat = B_repeat = None # cycle both
else:
if len(A_img_paths) >= len(B_img_paths):
A_repeat = 1
B_repeat = None # cycle the shorter one
else:
A_repeat = None # cycle the shorter one
B_repeat = 1
A_dataset = make_dataset(A_img_paths,batch_size, load_size, crop_size, training, drop_remainder=True, shuffle=shuffle, grayscale=grayscale, repeat=A_repeat)
B_dataset = make_dataset(B_img_paths,batch_size, load_size, crop_size, training, drop_remainder=True, shuffle=shuffle, grayscale=grayscale, repeat=B_repeat)
A_B_dataset = tf.data.Dataset.zip((A_dataset, B_dataset))
len_dataset = max(len(A_img_paths), len(B_img_paths)) // batch_size
return A_B_dataset, len_dataset
class ItemPool:
def __init__(self, pool_size=50):
self.pool_size = pool_size
self.items = []
def __call__(self, in_items):
# `in_items` should be a batch tensor
if self.pool_size == 0:
return in_items
out_items = []
for in_item in in_items:
if len(self.items) < self.pool_size:
self.items.append(in_item)
out_items.append(in_item)
else:
if np.random.rand() > 0.5:
idx = np.random.randint(0, len(self.items))
out_item, self.items[idx] = self.items[idx], in_item
out_items.append(out_item)
else:
out_items.append(in_item)
return tf.stack(out_items, axis=0)
def make_dataset2(img_paths, labels, batch_size, load_size, crop_size, training, drop_remainder=True,grayscale=False, shuffle=False, repeat=1):
if training:
@tf.function
def _map_fn(img,label): # preprocessing
#toss = np.random.uniform(0,1)
if grayscale:
img = tf.image.rgb_to_grayscale(img)
img = tf.image.grayscale_to_rgb(img)
img = tf.image.random_flip_left_right(img)
maxside = tf.math.maximum(tf.shape(img)[0],tf.shape(img)[1])
while tf.math.square(tf.shape(img)[0]-tf.shape(img)[1])>100:
padx = tf.math.minimum(maxside - tf.shape(img)[0],tf.math.minimum(tf.shape(img)[0],tf.shape(img)[1]))
pady = tf.math.minimum(maxside - tf.shape(img)[1],tf.math.minimum(tf.shape(img)[0],tf.shape(img)[1]))
paddings = [[padx/2,padx/2],[pady/2,pady/2],[0, 0]]
img = tf.pad(img,paddings,'SYMMETRIC')#tf.image.resize_with_pad(img, load_size, load_size, antialias = True)
img = tf.image.resize(img, [load_size*+10,load_size+10],preserve_aspect_ratio=True)
img = tf.image.random_crop(img, [crop_size, crop_size, tf.shape(img)[-1]])
img = tf.clip_by_value(img, 0, 255) / 255.0 # or img = tl.minmax_norm(img)
img = img * 2 - 1
return [img,label]
else:
@tf.function
def _map_fn(img,label): # preprocessing
img =_aspect_preserving_resize(img,load_size+4)# tf.image.resize(img, [load_size,load_size])
#padx = load_size - tf.shape(img)[0]
#pady = load_size -tf.shape(img)[1]
#paddings = [[padx/2,padx/2],[pady/2,pady/2],[0, 0]]
#img = tf.pad(img,paddings,'SYMMETRIC')
#img = tf.image.resize_with_pad(img,crop_size, crop_size, antialias = True) # or img = tf.image.resize(img, [load_size, load_size]); img = tl.center_crop(img, crop_size)
if grayscale:
img = tf.image.rgb_to_grayscale(img)
img = tf.image.grayscale_to_rgb(img)
img = tf.image.random_crop(img, [crop_size, crop_size, tf.shape(img)[-1]])
img = tf.clip_by_value(img, 0, 255) / 255.0 # or img = tl.minmax_norm(img)
img = img * 2 - 1
return [img,label]
return tl.disk_image_batch_dataset(img_paths,
batch_size,
labels=labels,
drop_remainder=drop_remainder,
map_fn=_map_fn,
shuffle=shuffle,
repeat=repeat)
def make_zip_dataset2(A_img_paths,A_labels, B_img_paths,B_labels, batch_size, load_size, crop_size, training, shuffle=True, grayscale=True,repeat=False):
# zip two datasets aligned by the longer one
if repeat:
A_repeat = B_repeat = None # cycle both
else:
if len(A_img_paths) >= len(B_img_paths):
A_repeat = 1
B_repeat = None # cycle the shorter one
else:
A_repeat = None # cycle the shorter one
B_repeat = 1
A_dataset = make_dataset2(A_img_paths,A_labels, batch_size, load_size, crop_size, training, drop_remainder=True, shuffle=shuffle, grayscale=grayscale, repeat=A_repeat)
B_dataset = make_dataset2(B_img_paths,B_labels, batch_size, load_size, crop_size, training, drop_remainder=True, shuffle=shuffle, grayscale=grayscale, repeat=B_repeat)
A_B_dataset = tf.data.Dataset.zip((A_dataset, B_dataset))
len_dataset = max(len(A_img_paths), len(B_img_paths)) // batch_size
return A_B_dataset, len_dataset
def make_dataset_triplet(img_paths, labels, batch_size, load_size, crop_size, training,Triplet_K=4, num_classes=18,drop_remainder=True,grayscale=False, shuffle=False, repeat=1):
if training:
@tf.function
def _map_fn(img,label): # preprocessing
#toss = np.random.uniform(0,1)
if grayscale:
img = tf.image.rgb_to_grayscale(img)
img = tf.image.grayscale_to_rgb(img)
#img = tf.image.random_flip_left_right(img)
maxside = tf.math.maximum(tf.shape(img)[0],tf.shape(img)[1])
while tf.math.square(tf.shape(img)[0]-tf.shape(img)[1])>100:
padx = tf.math.minimum(maxside - tf.shape(img)[0],tf.math.minimum(tf.shape(img)[0],tf.shape(img)[1]))
pady = tf.math.minimum(maxside - tf.shape(img)[1],tf.math.minimum(tf.shape(img)[0],tf.shape(img)[1]))
paddings = [[padx/2,padx/2],[pady/2,pady/2],[0, 0]]
img = tf.pad(img,paddings,'SYMMETRIC')#tf.image.resize_with_pad(img, load_size, load_size, antialias = True)
img = tf.image.resize(img, [load_size*+10,load_size+10],preserve_aspect_ratio=True)
img = tf.image.random_crop(img, [crop_size, crop_size, tf.shape(img)[-1]])
img = tf.clip_by_value(img, 0, 255) / 255.0 # or img = tl.minmax_norm(img)
img = img * 2 - 1
return img, tf.one_hot(label, num_classes,dtype=tf.int32)
else:
@tf.function
def _map_fn(img,label): # preprocessing
img = _aspect_preserving_resize(img,load_size+4) #tf.image.resize(img, crop_size, crop_size, antialias = True) # or img = tf.image.resize(img, [load_size, load_size]); img = tl.center_crop(img, crop_size)
#img = tf.image.resize(img, [load_size*+10,load_size+10],preserve_aspect_ratio=True)
#padx = load_size - tf.shape(img)[0]
#pady = load_size -tf.shape(img)[1]
#paddings = [[padx/2,padx/2],[pady/2,pady/2],[0, 0]]
#img = tf.pad(img,paddings,'SYMMETRIC')#tf.image.resize_with_pad(img, load_size, load_size, antialias = True)
tf.print(tf.shape(img))
if grayscale:
img = tf.image.rgb_to_grayscale(img)
img = tf.image.grayscale_to_rgb(img)
img = tf.image.random_crop(img, [crop_size, crop_size, tf.shape(img)[-1]])
img = tf.clip_by_value(img, 0, 255) / 255.0 # or img = tl.minmax_norm(img)
img = img * 2 - 1
return img, tf.one_hot(label, num_classes,dtype=tf.int32)
return tl.disk_image_batch_dataset_triplet(img_paths,
batch_size,
crop_size,
labels=labels,
Triplet_K=Triplet_K,
drop_remainder=drop_remainder,
shuffle=shuffle,
repeat=repeat)
def make_zip_dataset_triplet(A_img_paths,A_labels, B_img_paths,B_labels, batch_size, load_size, crop_size, training,Triplet_K=4, shuffle=True, grayscale=True,repeat=False):
# zip two datasets aligned by the longer one
if repeat:
A_repeat = B_repeat = None # cycle both
else:
if len(A_img_paths) >= len(B_img_paths):
A_repeat = 1
B_repeat = None # cycle the shorter one
else:
A_repeat = None # cycle the shorter one
B_repeat = 1
A_dataset = make_dataset_triplet(A_img_paths,A_labels, batch_size, load_size, crop_size, training,Triplet_K=Triplet_K, drop_remainder=True, shuffle=shuffle, grayscale=grayscale, repeat=A_repeat)
B_dataset = make_dataset_triplet(B_img_paths,B_labels, batch_size, load_size, crop_size, training,Triplet_K=Triplet_K, drop_remainder=True, shuffle=shuffle, grayscale=grayscale, repeat=B_repeat)
A_B_dataset = tf.data.Dataset.zip((A_dataset, B_dataset))
len_dataset = max(len(A_img_paths), len(B_img_paths)) // batch_size
return A_B_dataset,len_dataset
def make_dataset3(img_paths, labels, batch_size, load_size, crop_size, training, drop_remainder=True,grayscale=False, shuffle=False, repeat=1):
if training:
@tf.function
def _map_fn(img,label): # preprocessing
#toss = np.random.uniform(0,1)
if grayscale:
img = tf.image.rgb_to_grayscale(img)
img = tf.image.grayscale_to_rgb(img)
img = tf.image.random_flip_left_right(img)
maxside = tf.math.maximum(tf.shape(img)[0],tf.shape(img)[1])
minside = tf.math.minimum(tf.shape(img)[0],tf.shape(img)[1])
new_img = img
if tf.math.divide(maxside,minside) > 1.2:
repeating = tf.math.floor(tf.math.divide(maxside,minside))
new_img = img
if tf.math.equal(tf.shape(img)[1],minside):
for i in range(int(repeating)):
new_img = tf.concat((new_img, img), axis=1)
if tf.math.equal(tf.shape(img)[0],minside):
for i in range(int(repeating)):
new_img = tf.concat((new_img, img), axis=0)
new_img = tf.image.rot90(new_img)
else:
new_img = img
img = tf.image.resize(new_img, [load_size,load_size])
#im.imwrite(img.numpy(),'test.jpg')
#img = tf.image.central_crop(img, [crop_size, crop_size, tf.shape(img)[-1]])
img = tf.clip_by_value(img, 0, 255) / 255.0 # or img = tl.minmax_norm(img)
img = img * 2 - 1
return [img,label]
else:
@tf.function
def _map_fn(img,label): # preprocessing
maxside = tf.math.maximum(tf.shape(img)[0],tf.shape(img)[1])
minside = tf.math.minimum(tf.shape(img)[0],tf.shape(img)[1])
new_img = img
if tf.math.divide(maxside,minside) > 1.3:
repeating = tf.math.floor(tf.math.divide(maxside,minside))
new_img = img
if tf.math.equal(tf.shape(img)[1],minside):
for i in range(int(repeating)):
new_img = tf.concat((new_img, img), axis=1)
if tf.math.equal(tf.shape(img)[0],minside):
for i in range(int(repeating)):
new_img = tf.concat((new_img, img), axis=0)
new_img = tf.image.rot90(new_img)
else:
new_img = img
img = tf.image.resize(new_img, [load_size,load_size])
#padx = load_size - tf.shape(img)[0]
#pady = load_size -tf.shape(img)[1]
#paddings = [[padx/2,padx/2],[pady/2,pady/2],[0, 0]]
#img = tf.pad(img,paddings,'SYMMETRIC')
#img = tf.image.resize_with_pad(img,crop_size, crop_size, antialias = True) # or img = tf.image.resize(img, [load_size, load_size]); img = tl.center_crop(img, crop_size)
if grayscale:
img = tf.image.rgb_to_grayscale(img)
img = tf.image.grayscale_to_rgb(img)
#img = tf.image.random_crop(img, [crop_size, crop_size, tf.shape(img)[-1]])
img = tf.clip_by_value(img, 0, 255) / 255.0 # or img = tl.minmax_norm(img)
img = img * 2 - 1
return [img,label]
return tl.disk_image_batch_dataset(img_paths,
batch_size,
labels=labels,
drop_remainder=drop_remainder,
map_fn=_map_fn,
shuffle=shuffle,
repeat=repeat)
def make_zip_dataset3(A_img_paths,A_labels, B_img_paths,B_labels, batch_size, load_size, crop_size, training, shuffle=True, grayscale=True,repeat=False):
# zip two datasets aligned by the longer one
if repeat:
A_repeat = B_repeat = None # cycle both
else:
if len(A_img_paths) >= len(B_img_paths):
A_repeat = 1
B_repeat = None # cycle the shorter one
else:
A_repeat = None # cycle the shorter one
B_repeat = 1
A_dataset = make_dataset3(A_img_paths,A_labels, batch_size, load_size, crop_size, training, drop_remainder=True, shuffle=shuffle, grayscale=grayscale, repeat=A_repeat)
B_dataset = make_dataset3(B_img_paths,B_labels, batch_size, load_size, crop_size, training, drop_remainder=True, shuffle=shuffle, grayscale=grayscale, repeat=B_repeat)
A_B_dataset = tf.data.Dataset.zip((A_dataset, B_dataset))
len_dataset = max(len(A_img_paths), len(B_img_paths)) // batch_size
return A_B_dataset, len_dataset
def make_dataset_triplet2(img_paths, labels, batch_size, load_size, crop_size, training,Triplet_K=4, num_classes=18,drop_remainder=True,grayscale=False, shuffle=False, repeat=1):
if training:
@tf.function
def _map_fn(img,label): # preprocessing
#toss = np.random.uniform(0,1)
if grayscale:
img = tf.image.rgb_to_grayscale(img)
img = tf.image.grayscale_to_rgb(img)
#img = tf.image.random_flip_left_right(img)
maxside = tf.math.maximum(tf.shape(img)[0],tf.shape(img)[1])
minside = tf.math.minimum(tf.shape(img)[0],tf.shape(img)[1])
new_img = img
load_size = 300
if tf.math.divide(maxside,minside) > 1.3:
x_offset = 0
repeat = tf.math.floor(tf.math.divide(maxside,minside))
new_img = img
if tf.math.equal(tf.shape(img)[1],minside):
for i in range(int(repeat)):
new_img = tf.concat((new_img, img), axis=1)
if tf.math.equal(tf.shape(img)[0],minside):
for i in range(int(repeat)):
new_img = tf.concat((new_img, img), axis=0)
new_img = tf.image.rot90(new_img)
else:
new_img = img
img = tf.image.resize(new_img, [load_size+5,load_size+5])
img = tf.image.random_crop(img, [crop_size, crop_size, tf.shape(img)[-1]])
img = tf.clip_by_value(img, 0, 255) / 255.0 # or img = tl.minmax_norm(img)
img = img * 2 - 1
return img, tf.one_hot(label, num_classes,dtype=tf.int32)
else:
@tf.function
def _map_fn(img,label): # preprocessing
maxside = tf.math.maximum(tf.shape(img)[0],tf.shape(img)[1])
minside = tf.math.minimum(tf.shape(img)[0],tf.shape(img)[1])
new_img = img
if tf.math.divide(maxside,minside) > 1.3:
repeat = tf.math.floor(tf.math.divide(maxside,minside))
new_img = img
if tf.math.equal(tf.shape(img)[1],minside):
for i in range(int(repeat)):
new_img = tf.concat((new_img, img), axis=1)
if tf.math.equal(tf.shape(img)[0],minside):
for i in range(int(repeat)):
new_img = tf.concat((new_img, img), axis=0)
new_img = tf.image.rot90(new_img)
else:
new_img = img
img = tf.image.resize(new_img, [load_size,load_size])#ize(img, [load_size*+10,load_size+10],preserve_aspect_ratio=True)
#padx = load_size - tf.shape(img)[0]
#pady = load_size -tf.shape(img)[1]
#paddings = [[padx/2,padx/2],[pady/2,pady/2],[0, 0]]
#img = tf.pad(img,paddings,'SYMMETRIC')#tf.image.resize_with_pad(img, load_size, load_size, antialias = True)
tf.print(tf.shape(img))
if grayscale:
img = tf.image.rgb_to_grayscale(img)
img = tf.image.grayscale_to_rgb(img)
#img = tf.image.random_crop(img, [crop_size, crop_size, tf.shape(img)[-1]])
img = tf.clip_by_value(img, 0, 255) / 255.0 # or img = tl.minmax_norm(img)
img = img * 2 - 1
return img, tf.one_hot(label, num_classes,dtype=tf.int32)
return tl.disk_image_batch_dataset_triplet(img_paths,
batch_size,
crop_size,
labels=labels,
Triplet_K=Triplet_K,
drop_remainder=drop_remainder,
shuffle=shuffle,
repeat=repeat)
def make_zip_dataset_triplet2(A_img_paths,A_labels, B_img_paths,B_labels, batch_size, load_size, crop_size, training,Triplet_K=4, shuffle=True, grayscale=True,repeat=False):
# zip two datasets aligned by the longer one
if repeat:
A_repeat = B_repeat = None # cycle both
else:
if len(A_img_paths) >= len(B_img_paths):
A_repeat = 1
B_repeat = None # cycle the shorter one
else:
A_repeat = None # cycle the shorter one
B_repeat = 1
A_dataset = make_dataset_triplet2(A_img_paths,A_labels, batch_size, load_size, crop_size, training,Triplet_K=Triplet_K, drop_remainder=True, shuffle=shuffle, grayscale=grayscale, repeat=A_repeat)
B_dataset = make_dataset_triplet2(B_img_paths,B_labels, batch_size, load_size, crop_size, training,Triplet_K=Triplet_K, drop_remainder=True, shuffle=shuffle, grayscale=grayscale, repeat=B_repeat)
A_B_dataset = tf.data.Dataset.zip((A_dataset, B_dataset))
len_dataset = max(len(A_img_paths), len(B_img_paths)) // batch_size
return A_B_dataset,len_dataset | true |
e9beacd2f6c0c4513a90e88e7f2b329df9631eeb | Python | sajid90/Pythonbasics | /calculate_total_price.py | UTF-8 | 574 | 3.984375 | 4 | [] | no_license | items = []
total_bill_amount = 0
while True:
price = input("\nEnter price OR type q to exit: ")
if price not in ('q', 'quit'):
if not price.isdigit():
print(f'\nPlease enter integers only. Passed value is "{price}"')
continue
total_bill_amount = total_bill_amount + int(price)
else:
if total_bill_amount == 0:
print("\nYou did not enter any price. Thanks for using our calculator")
else:
print(f'\nTotal amount: {total_bill_amount}. Thanks for using our calculator')
break
| true |
0ceda36c5cb9affabfbd6825cff32dc9d4888ef6 | Python | evelinrkalil13/Ejercicios-python | /EjerciciosJueves/estructuras/Ejercicio6.py | UTF-8 | 601 | 3.546875 | 4 | [] | no_license | abecedario = {1:"a", 2:"b", 3:"c", 4:"d", 5:"e", 6:"f", 7:"g", 8:"h", 9:"i", 10:"j", 11:"k",
12:"l", 13:"m", 14:"n", 15:"ñ", 16:"o", 17:"p", 18:"q", 19:"r", 20:"s", 21:"t",
22:"u", 23:"v", 24:"w", 25:"x", 26:"y", 26:"z"}
palabra = input("Ingrese una palabra ")
palabram = palabra
letranumero = " "
palabran = " "
for letra in palabra:
for llave, valor in abecedario.items():
if letra.lower() == valor:
letranumero += str(llave)
palabran += "" + str(valor) + '('+ str(llave)+')'
print("frase: ", palabram)
print("Salida: ", palabran) | true |
4a92a489d149e2eec0b738605dad892c67c19414 | Python | HANYIIK/Learning-OpenCV-Python-Tutorial | /Part 4/learning_1.py | UTF-8 | 2,518 | 3.171875 | 3 | [] | no_license | # 特征提取
# Chapter 1 Harris 角点检测算法
"""
函数: cv2.cornerHarris(), cv2.cornerSubPix()
"""
import cv2
import numpy as np
# 显示图像函数
def ShowImage(name_of_image, image, rate):
img_min = cv2.resize(image, None, fx=rate, fy=rate, interpolation=cv2.INTER_CUBIC)
cv2.namedWindow(name_of_image, cv2.WINDOW_NORMAL)
cv2.imshow(name_of_image, img_min)
if cv2.waitKey(0) == 27: # wait for ESC to exit
print('Not saved!')
cv2.destroyAllWindows()
elif cv2.waitKey(0) == ord('s'): # wait for 's' to save and exit
cv2.imwrite(name_of_image + '.jpg', image) # save
print('Saved successfully!')
cv2.destroyAllWindows()
# 1.1 Harris 角点检测(粗略版)
'''
cv2.cornerHarris(
① 数据类型为 float32 的输入图像 - img,
② 角点检测中要考虑的领域大小 - blockSize,
③ Sobel 求导中使用的窗口大小 - ksize,
④ Harris 角点检测方程中的自由参数,取值参数为 [0,04, 0.06] - k
)
'''
img = cv2.imread('chessboard.png')
# 1、Harris 角点检测基于【灰度】图像
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# type of img/img_gray is 【<class 'numpy.ndarray'>】(np.uint8)
# 2、Harris 角点检测
dst = cv2.cornerHarris(img_gray, 2, 3, 0.04)
# 3、腐蚀一下,便于标记
dst = cv2.dilate(dst, None)
# 4、角点标记为红色
# img[dst > 0.01 * dst.max()] = [0, 0, 255]
# ShowImage('test', img, 10)
# 1.2 Harris 角点检测(精准版)
# 亚像素级精确度的角点(小角点)
'''
cv2.cornerSubPix(
① 灰度图 - img
② 角点 - corners
③ winSize
④ zeroZone
⑤ 标准 - criteria
)
'''
ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)
dst = np.uint8(dst)
# 找到形心 centroids
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
# 定义一个提取角点的标准
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners = cv2.cornerSubPix(img_gray, np.float32(centroids), (5, 5), (-1, -1), criteria)
print('criteria(标准) = \n', criteria)
print('centroids(形心) = \n', centroids)
print('corners(角点) = \n', corners)
res = np.hstack((centroids, corners))
print('before int0, res(形心, 角点) 小数版 = \n', res)
# 将 res 的所有元素取整(非四舍五入)
res = np.int0(res)
print('after int0, res(形心, 角点) 整数版 = \n', res)
img[res[:, 1], res[:, 0]] = [0, 0, 255]
img[res[:, 3], res[:, 2]] = [0, 255, 0]
# ShowImage('test', img, 2)
| true |
5ce47ac118338c26b9388db9552034ac4709b7e8 | Python | stanleyjacob/algos_again | /word_break.py | UTF-8 | 684 | 3.328125 | 3 | [] | no_license | import collections
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
# key is str to bool
cache = collections.defaultdict(bool)
return self.wordBreakHelper(s, wordDict, cache)
def wordBreakHelper(self, s, wordDict, cache):
if len(s) == 0:
return True
if s in cache:
return cache[s]
boolVal = False
for curr_word in wordDict:
if s[0:len(curr_word)] == curr_word:
boolVal = boolVal or self.wordBreakHelper(s[len(curr_word):], wordDict, cache)
cache[s] = boolVal
return boolVal
| true |
dbd1284d1aceb527b2ceed02d89e79a1c02af661 | Python | Pedro-Bernardo/mercedes-benz.io-challenge | /core/commands/DoPoll.py | UTF-8 | 1,864 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python3
# ===============================
# AUTHOR: Pedro Bernardo
# CREATE DATE: 23 Feb 2019
# PURPOSE: Poll command
# ===============================
from core.commands.Command import Command
from core.StatusChecker import StatusChecker
from core.Saver import Saver
import json
class DoPoll(Command):
ID = 'poll'
HELP = 'Retrieves the status from of all configured services'
def __init__(self, service_list, args):
self._services = service_list
self._args = args
self._saver = Saver()
def __str__(self):
return self.__class__.__name__
def execute(self):
st = StatusChecker()
status_list = []
if self._args.only != None:
arg_services = self._args.only[0].split(',')
# check if every specified service is in the available services list
for s in arg_services:
if s not in [serv.ID for serv in self._services]:
print("Invalid service: %s\nExiting" % s)
exit(-1)
remaining_services = [serv for serv in self._services if serv.ID in arg_services]
elif self._args.exclude != None:
arg_services = self._args.exclude[0].split(',')
# check if every specified service is in the available services list
for s in arg_services:
if s not in [serv.ID for serv in self._services]:
print("Invalid service: %s\nExiting" % s)
exit(-1)
remaining_services = [serv for serv in self._services if serv.ID not in arg_services]
else:
remaining_services = self._services
for service in remaining_services:
status_list.append(service.accept(st))
self._saver.json(status_list)
for s in status_list:
print(s)
| true |
757d002e34e718e9090ebfb94fbfbc1589519271 | Python | IlPakoZ/Uniroma1-Informatica | /Fondamenti di Programmazione 1° semestre/Programmi Python/HW6obb/program01.py | UTF-8 | 15,406 | 3.46875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
In un immagine a sfondo nero e' disegnata una griglia
dove alcuni segmenti che ne connettono i nodi in orizzontale
o in verticale sono stati cancellati (i nodi della griglia sono in
verde mentre i segmenti sono in rosso).
La dimensione del lato dei quadrati della griglia non è data.
Si veda ad esempio la figura foto_1.png.
Progettare la funzione es1(fimm, k) che prende in input l'indirizzo
dell'immagine contenente la griglia ed un intero k e restituisce un intero.
L'intero restituito e' il numero di
quadrati rossi (con pixel verdi) di lato k (steps della griglia) che sono presenti nell'immagine.
Ad esempio es1(foto_1.png,2) deve restituire 2 (i due quadrati rossi presenti nella
sottogriglia hanno il vertice in alto a sinistra con coordinate (3,0) e
(4,2) nelle coordinate della griglia, rispettivamente)
Per caricare e salvare file PNG si possono usare load e save della libreria immagini allegata.
NOTA: il timeout previsto per questo esercizio è di 1 secondo per ciascun test
ATTENZIONE: quando caricate il file assicuratevi che sia nella codifica UTF8
(ad esempio editatelo dentro Spyder)
'''
import immagini
def es1(fimm,k):
img = immagini.load(fimm) #Contiene l'immagine caricata
width = len(img[0]) #Contiene la larghezza dell'immagine
height = len(img) #Contiene l'altezza dell'immagine
gap = 0 #Contiene quanti pixel di gap ci sono tra un punto della griglia e l'altro
right_boundary_index = None #Contiene la posizione in cui si trova l'ultimo pixel della griglia a destra
bottom_boundary_index = None #Contiene la posizione in cui si trova l'ultimo pixel della griglia in basso
x_starting_index = None #Contiene la posizione in cui si trova l'indice x del primo pixel
y_starting_index = None #Contiene la posizione in cui si trova l'indice y del primo pixel
y_index, x_index, x_starting_index, y_starting_index = first_grid_node(img,height,width)
if x_starting_index == None: #Se il primo pixel della griglia non è stato trovato...
return 0 #...allora la griglia è vuota, restituisci 0
right_boundary_index, gap = get_gap(img, x_index, y_index, x_starting_index, y_starting_index, width, height)
if not gap: #Se il gap non è stato ancora trovato
if k: #Se k è un numero diverso da 0
return 0 #...allora restituisci zero
else:
return 1 #...altrimenti restituisci uno
if not right_boundary_index == x_starting_index: #Se la griglia NON è larga uno...:
right_boundary_index = get_right_boundary(img, x_starting_index, y_starting_index, width, gap) #Ottiieni l'ultimo pixel destro della griglia
bottom_boundary_index = get_bottom_boundary(img, x_starting_index, y_starting_index, height, gap) #Ottieni l'ultimo pixel inferiore della griglia
ins = get_possible_x_segments(img, x_starting_index, y_starting_index, right_boundary_index, bottom_boundary_index, gap, k) #Ottiene la lista dei possibili quadrati
if not len(ins): #Se l'insieme è vuoto...
return 0 #...allora restituisci 0
if len(ins) > ((right_boundary_index-x_starting_index)//gap-k) * ((bottom_boundary_index-y_starting_index)//gap) // 8: #Se ci sono parecchi possibili quadrati allora utilizza un metodo alternativo e in questo caso più rapido
ins_y = get_possible_y_segments(img, x_starting_index, y_starting_index, right_boundary_index, bottom_boundary_index, gap, k) #Insieme
return count_intersection(ins,ins_y,k) #Restituisce il numero dei quadrati
return count_squares(img, ins,x_starting_index, y_starting_index, gap,k) #Restituisce il numero dei quadrati
def count_intersection(ins_x,ins_y,dim):
count = 0 #Inizializza il contatore a zero
for el in ins_y: #Per ogni segmento nell'insieme y
if (el[1],el[0],el[0]+dim) in ins_x: #Se esiste la sua altezza nell'insieme x
count+=1 #Conta +1
return count
def count_squares(img, ins, x_starting_index, y_starting_index, gap, dim):
count = 0 #Setta il contatore a zero
for el in ins: #Per ogni segmento dell'insieme
y = el[0] #Ottieni la coordinata y del piano in cui si trova
x = el[1] #Ottieni la x in cui inizia
if not get_square_by_segments(img,x,y,x_starting_index,y_starting_index,dim,gap): #Se invece la variabile non è cambiata, vuol dire che i segmenti sono stati trovati e i segmenti formano un quadrato
count+=1 #Conta un quadrato in più
return count
def get_square_by_segments(img,x,y,x_starting_index,y_starting_index,dim,gap):
for _x in range(x*gap + x_starting_index, (x+dim)*gap + x_starting_index+1,dim*gap): #Controlla tra la prima e l'ultima riga
for _y in range(y*gap + y_starting_index,(y+dim)*gap + y_starting_index,gap): #Per ogni pixel tra quelli non ancora verificati
if not img[_y+1][_x] == (255,0,0): #Se il colore non è rosso
return True
return False #Conta un quadrato in più
def get_possible_x_segments(img, x_starting_index, y_starting_index, right_boundary_index, bottom_boundary_index, gap, dim):
ins = set() #Crea un insieme che conterrà tuple di valori
count = 0 #Conta a partire da 0
for y in range(y_starting_index, bottom_boundary_index+1, gap): #Ci spostiamo verticalmente per ogni pixel della griglia
starting_x = x_starting_index #Imposta l'indice da cui inizia a contare all'indice iniziale
for x in range(x_starting_index, right_boundary_index+1, gap): #Ci spostiamo verticalmente per ogni pixel orizzontale della griglia
if is_red(img,x+1,y): #Se il pixel a destra del nodo è rosso...
count+=1
else:
count = 0 #Se non è rosso, allora resetta il contatore (finisce lo strike di indici)
starting_x = x+gap
if count == dim: #Se sono stati collegati dim nodi di seguito
ins.add(((y-y_starting_index)//gap,(starting_x-x_starting_index)//gap,(x-x_starting_index)//gap + 1)) #Aggiungi una tupla contenente la componente y dove è stato trovato il segmento, la componente x da cui parte e quella in cui finisce
count -= 1 #Decrementa di uno il contatore, riprendi a contare dal punto in cui sei arrivato
starting_x = starting_x+gap #Il nuovo punto di partenza è quello precedente più il gap tra un pixel e l'altro
count = 0 #Resetta il contatore
return {x for x in ins if (x[0]+dim, x[1], x[2]) in ins} #Mantieni i segmenti nell'insieme solo se, a "dim" di distanza per ogni segmento, c'è un altro segmento #Restituisce l'insieme
def is_red(img,x,y):
try:
if img[y][x] == (255,0,0): #Se il pixel a destra del nodo è rosso...
return True #Restituisci True
except IndexError:
pass
return False #Altrimenti restituisci False
def get_possible_y_segments(img, x_starting_index, y_starting_index, right_boundary_index, bottom_boundary_index, gap, dim):
ins = set() #Crea un insieme che conterrà tuple di valori
count = 0 #Conta a partire da 0
for x in range(x_starting_index, right_boundary_index+1, gap): #Ci spostiamo verticalmente per ogni pixel della griglia
starting_y = y_starting_index #Imposta l'indice da cui inizia a contare all'indice iniziale
for y in range(y_starting_index, bottom_boundary_index+1, gap): #Ci spostiamo verticalmente per ogni pixel orizzontale della griglia
if is_red(img,x,y+1): #Se il pixel in basso del nodo è rosso...
count+=1 #...allora conta che la lunghezza del segmento trovata è 1 in più a quella precedente
else:
count = 0 #Se non è rosso, allora resetta il contatore (finisce lo strike di indici)
starting_y = y+gap #Il nuovo indice da cui iniziare a contare è quello successivo
if count == dim: #Se sono stati collegati dim nodi di seguito
ins.add(((x-x_starting_index)//gap,(starting_y-y_starting_index)//gap,(y-y_starting_index)//gap + 1)) #Aggiungi una tupla contenente la componente x dove è stato trovato il segmento, la componente y da cui parte e quella in cui finisce
count -= 1 #Decrementa di uno il contatore, riprendi a contare dal punto in cui sei arrivato
starting_y = starting_y+gap #Il nuovo punto di partenza è quello precedente più il gap tra un pixel e l'altro
count = 0 #Resetta il contatore
return {x for x in ins if (x[0]+dim, x[1], x[2]) in ins} #Mantieni i segmenti nell'insieme solo se, a "dim" di distanza per ogni segmento, c'è un altro segmento
def get_right_boundary(img, x_starting_index, y_starting_index, width, gap):
for x in range(x_starting_index, width, gap): #Per ogni pixel orizzontale della griglia...
if not img[y_starting_index][x] == (0,255,0): #Se il pixel non è più verde...
return x-gap #Ho trovato la posizione del primo pixel da destra, puoi restituire il valore
return width-1 #...allora è perché è l'ultimo pixel orizzontale dell'immagine, quindi impostalo come la larghezza dell'immagine -1
def get_bottom_boundary(img, x_starting_index, y_starting_index, height, gap):
for y in range(y_starting_index, height, gap): #Per ogni pixel verticale della griglia
if not img[y][x_starting_index] == (0,255,0): #Se il pixel non è più verde...
return y-gap #Ho trovato la posizione dell'ultimo pixel verticalmente, puoi uscire dal ciclo
return height -1 #...allora è perché è l'ultimo pixel verticale dell'immagine, quindi impostalo come l'altezza dell'immagine -1
def get_gap(img, x_index, y_index, x_starting_index, y_starting_index, width, height):
gap = get_gap_horizontally(img, x_index, x_starting_index, y_starting_index, width)
if not gap: #Se il gap non è stato trovato...
right_boundary_index = x_starting_index #...allora la griglia è larga 1 pixel.
gap = get_gap_vertically(img, y_index, x_starting_index, y_starting_index, height) #Prova ad ottenere il gap verticalmente
return right_boundary_index, gap
return None, gap
def get_gap_horizontally(img, x_index, x_starting_index, y_starting_index, width):
for x in range(x_index, width): #Finché l'indice x è minore della larghezza dell'immagine
if img[y_starting_index][x] == (0,255,0): #Se il pixel in quella posizione è verde (è un punto della griglia)...
return x - x_starting_index #...allora il gap tra un pixel e l'altro della griglia è pari alla differenza tra i loro indici
return 0
def get_gap_vertically(img, y_index, x_starting_index, y_starting_index, height):
for y in range(y_index,height): #Finchè l'indice y è minore dell'altezza dell'immagine
if img[y][x_starting_index] == (0,255,0): #Se il pixel in quella posizione è verde (è un punto della griglia)...
return y - y_starting_index #...allora il gap tra un pixel e l'altro della griglia è pari alla differenza tra i loro indici, questa volta però verticalmente
return 0
def first_grid_node(img,height,width):
for y_index in range(0,height): #Per tutta l'altezza dell'immagine...
for x_index in range(0,width): #Per tutta la larghezza dell'immagine...
if img[y_index][x_index] == (0,255,0): #Se il pixel in quella posizione è verde (è un punto della griglia)...
x_starting_index = x_index #Memorizza l'indice x del primo pixel della griglia
y_starting_index = y_index #Memorizza l'indice y del primo pixel della griglia
return y_index, x_index+1, x_starting_index, y_starting_index #Incrementa l'indice x in modo da prendere il pixel che si trova subito dopo
return None, None, None, None
if __name__ == '__main__':
pass
# inserisci qui i tuoi test
| true |
9729a9fd54bf0ed6b5f043537b91825662936eee | Python | Rajitha2148/programs | /sum of all values.pro12.py | UTF-8 | 164 | 2.703125 | 3 | [] | no_license | mh,rh=list(map(int,input().split()))
lis1=list(map(int,input().split()))
for j in range(rh):
uh1,vh1=list(map(int,input().split()))
print(sum(lis1[uh1-1:vh1]))
| true |
8112f98991e31a4957972b6469a48b1adcf5fff5 | Python | org-kpf/auto | /config/version/saneryiwu/SNAPSHOT/snapshot.py | UTF-8 | 7,282 | 2.53125 | 3 | [] | no_license | import re
import time
from Testbed import testbed
import random
import paramiko
class snapshot():
def __init__(self,id,name,snap_name,size,allocated_size,status,volume_id,volume_name,cluster):
self.id = id
self.name = name
self.snap_name = snap_name
#size和allocated_size的单位都是B
self.size = size
self.allocated_size = allocated_size
self.status = status
self.volume_id = volume_id
self.volume_name = volume_name
self.cluster = cluster
def get_available_node(self):
class no_available_ip(Exception):
pass
# normal_node = paramiko.SSHClient()
# normal_node.set_missing_host_key_policy(paramiko.AutoAddPolicy)
# print('收到的pool_cluster是',self.cluster)
if self.cluster == 'cluster1':
ip_list = testbed.cluster1[1]
test_user = testbed.cluster1[4]
test_password = testbed.cluster1[5]
test_admin_user = testbed.cluster1[6]
test_admin_password = testbed.cluster1[7]
elif self.cluster == 'cluster2':
ip_list = testbed.cluster2[1]
test_user = testbed.cluster2[4]
test_password = testbed.cluster2[5]
test_admin_user = testbed.cluster2[6]
test_admin_password = testbed.cluster2[7]
else:
raise no_available_ip('输入集群名称错误')
normal_node = paramiko.SSHClient()
normal_node.set_missing_host_key_policy(paramiko.AutoAddPolicy)
#print('pool_cluster是', self.cluster)
bad_ip_list = []
for a in ip_list:
try:
normal_node.connect(hostname=a, username=test_user, password=test_password)
except:
bad_ip_list.append(a)
for b in bad_ip_list:
ip_list.remove(b)
time.sleep(1)
class empty_ip_list(Exception):
pass
if ip_list == []:
raise empty_ip_list('集群没有可登录的节点')
else:
return [random.choice(ip_list),test_user,test_password,test_admin_user,test_admin_password]
def create_snapshot(self,name,block_volume,description):
'''
由卷对象调用,传入名称,卷ID,描述,返回创建命令的后半段
:param name: 新建快照的名称
:param block_volume: 原卷的ID
:param description: 新建快照的描述
:return: 字符串
'''
if description != '':
return 'block-snapshot create --block-volume=%d --description=%s %s' %(block_volume,description,name)
if description == '':
return 'block-snapshot create --block-volume=%d %s' %(block_volume,name)
def delete(self,check_times=5,check_interval=5):
'''
只有snapshot对象可以调用delete删除自己
:param check_times: 循环检查快照是否删除成功,循环检查次数,默认5次
:param check_interval: 循环检查快照是否删除成功,循环检查间隔,默认5秒
:return: 如果预期删除失败,则返回错误的回显;如果预期删除成功,则没有返回
'''
class delete_snapshot_failed(Exception):
pass
temporary_node = self.get_available_node()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=temporary_node[0], username=temporary_node[1], password=temporary_node[2])
cmd1 = 'xms-cli --user %s --password %s ' % (temporary_node[3], temporary_node[4])
cmd2 = 'block-snapshot delete %d' % self.id
cmd = cmd1 + cmd2
print('下发删除pool的命令\n', cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
result_out = stdout.read().decode()[:-1]
result_err = stderr.read().decode()[:-1]
if result_err != '':
return result_err + result_out
# 判断删除结果
for i in range(0, check_times):
cmd = '''xms-cli -f '{{range .}}{{println .status}}{{end}}' --user %s --password %s block-snapshot list -q "id:%d"''' \
% (temporary_node[3], temporary_node[4], self.id)
stdin, stdout, stderr = ssh.exec_command(cmd)
result_out = stdout.read().decode()[:-1]
result_err = stderr.read().decode()[:-1]
if result_out == '' and result_err == '':
# 移除对象self
print('第%d次检查,删除%s号快照成功' % (i+1,self.id))
del self
break
if result_out != '':
time.sleep(check_interval)
print('第%d次检查,删除%s号快照失败,快照状态是%s' % ((i+1), self.id,result_out))
i += 1
elif i == check_times:
raise delete_snapshot_failed('循环检查结束,删除%d号快照失败' % self.id)
def set(self,check_times=0,check_interval=0,**kwargs):
'''
:param name: 更改后的快照名称
:param description: 更改后的快照描述
:param check_times: 循环检查次数
:param check_interval: 循环检查时间间隔
:return: 更改后的快照对象
'''
class set_snapshot_failed(Exception):
pass
temporary_node = self.get_available_node()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=temporary_node[0], username=temporary_node[1], password=temporary_node[2])
cmd1 = 'xms-cli --user %s --password %s block-snapshot set' % (temporary_node[3], temporary_node[4])
cmd2 = ''
cmd3 = ' %d' % self.id
for i in kwargs:
cmd2 = cmd2 + ' --' + i + '=%s' % kwargs.get(i)
cmd = cmd1 + cmd2 + cmd3
print('下发修改%d号快照的命令\n%s' %(self.id,cmd))
stdin,stdout,stderr = ssh.exec_command(cmd)
out = stdout.read().decode()[:-1]
err = stderr.read().decode()[:-1]
print(err + out)
if err == '':
pass
elif out == '' or err != '' or 'Incorrect Usage' in out:
return (err + out)
for j in kwargs:
for i in range(0, check_times):
stdin, stdout, stderr = ssh.exec_command('''xms-cli -f '{{range .}}{{println .%s}}{{end}}' --user %s --password %s block-snapshot list -q "id:%d"''' % (j, temporary_node[3], temporary_node[4], self.id))
result = stdout.read().decode()[:-1]
if result == kwargs.get(j):
print('第%d次检查修改快照结果,快照的%s为%s,传入值为%s,相同' %(i,j,result,kwargs.get(j)))
self.__dict__[j] = kwargs.get(j)
break
if result != kwargs.get(j):
print('第%d次检查修改快照结果,快照的%s为%s,传入值为%s,不同' %(i,j,result,kwargs.get(j)))
i += 1
time.sleep(check_interval)
if i == check_times:
raise set_snapshot_failed('循环检查结束,修改%d号快照后的%s为%s,而传入为%s不一致' %(self.id,j,result,kwargs.get(j)))
| true |
4c06179e92ab52e1dcc0e66dba1c15e5be6439f7 | Python | colinetzel/diabetes-reinforcement-learning | /PID-IFB.py | UTF-8 | 11,391 | 2.5625 | 3 | [] | no_license | # Author Colin Etzel
#/usr/bin/python3
import math
import random
import json
import requests
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", type=float, default=0.00465, help="Proportional PID component coefficent")
parser.add_argument("-i", type=float, default=0.0, help="Integral PID component coefficient")
parser.add_argument("-d", type=float, default=0.26156, help="Derivative component PID coefficient")
parser.add_argument("--ifb", action='store_true', help="Toggle for adding insulin feedback.")
parser.add_argument("--meals", action='store_true', help="Toggle for including carbs from simulated meals.")
parser.add_argument("--floor", type=float, default=0.0, help="Minimum amount of glucose produced (by liver in times of fasting)")
parser.add_argument("--target", type=int, default=120, help="Desired glucose value for algorithm to achieve")
parser.add_argument("--numDays", type=int, default=1, help="Number of days to run algorithm over.")
"""
The following constants are taken from:
Effect of Insulin Feedback on Closed-Loop Glucose Control: A Crossover Study by Ruiz et al.
https://www.ncbi.nlm.nih.gov/pubmed/23063039
"""
alpha11 = 0.9802 #subcutaneous insulin pharmokinetic constant 1
alpha21 = 0.014043 #subcutaneous insulin pharmokinetic constant 2
alpha31 = 0.000127 #subcutaneous insulin pharmokinetic constant 3
#pharmokinetic constant 1 is not present or used in the literature
alpha22 = 0.98582 #plasma insulin pharmokinetic constant 2
alpha32 = 0.017889 #plasma insulin pharmokinetic constant 3
alpha33 = 0.98198 #interstital insulin pharmokinetic constant 3
beta1 = 1.1881 #insulin delivery coefficient 1
beta2 = 0.0084741 #insulin delivery coefficient 2
beta3 = 0.00005 #insulin delivery coefficient 3
gamma1 = 0.64935 #IFB parameter for subcutaneous insulin
gamma2 = 0.34128 #IFB parameter for plasma insulin
gamma3 = 0.0093667 #IFB parameter for effective insulin
def main():
totalError = 0
print("totalError assigned")
# These are the arrays to track what is simulated
States = []
Actions = []
Rewards = []
# Write headers to output file
myFile = open("insulinResults.txt", "w")
myFile.write("StateGlucose, StateTime, StateInsulin, ActionBasal, Reward, Step, Episode\n")
glucoURL = "http://localhost:3000/dose"
errors = []
#Previous PID values for use by PID algorithm
P = []
I = []
D = []
FB = []
totalInsulin = []
initInsulin = 0
args = parser.parse_args()
Kp = args.p
Ki = args.i
Kd = args.d
mealsPresent = args.meals
useIFB = args.ifb
targetGlucose = args.target
basalFloor = args.floor
numDays = args.numDays
index = 0
#The JSON object that glucosym accepts
postdata = { "dose": 0.0, "dt": 5, "index": 0, "time": 1440, "events": { "basal": [{ "amt": 0.0, "start": 0, "length": 0 }], "carb": [{ "amt": 0.0, "start": 0, "length": 0 }] } }
{ "dose": 0.0, "dt": 5, "index": 0, "time": 1440, "events": { "basal": [{ "amt": 0.0, "start": 0, "length": 0 }], "carb": [{ "amt": 0.0, "start": 0, "length": 0 }] } };
for ep in range(numDays):
# Initial post to get glucose at start of day
response = requests.post(glucoURL, json = postdata)
obj = json.loads(response.text)
Idosage = [0] #Insulin dosage
Isubcutaneous = [0] #subcutaneous insulin estimates
Iplasma = [0] #plasma insulin estimates
Ieffective = [0] #effective/interstital insulin estimates
# Set current and last glucose same initially
if obj["bg"] != None:
glucose = obj["bg"]
lastGlucose = glucose
timeSinceLastMeal = 720
#Randomly pick meal times from range of normal meal times
breakfastTime = randomIntFromInterval(480, 540)
lunchTime = randomIntFromInterval(720, 840)
dinnerTime = randomIntFromInterval(1020, 1200)
breakfast = False
lunch = False
dinner = False
# Inner loop simulates time throughout single day/episode
t = 5
#t increments by 5 at the end of the loop
while t <= 1440:
print(glucose)
# Current index in action, state, reward log
curIndex = t / 5
#calculate subcutaneous insulin
Isubcutaneous.append(Isc(Isubcutaneous[-1], Idosage[-1]))
Iplasma.append(Ip(Isubcutaneous[-1], Iplasma[-1], Idosage[-1]))
Ieffective.append(Ieff(Isubcutaneous[-1], Iplasma[-1],Ieffective[-1], Idosage[-1]))
# Measured in International Units
insulinBasal = 0
if(useIFB):
insulinBasal = max(basalFloor, PIDIDFAlgorithm(index, totalError, targetGlucose, lastGlucose, glucose, errors, t, Kp, Ki, Kd, P, I, D, Isubcutaneous[-1], Iplasma[-1], Ieffective[-1], FB))
else:
insulinBasal = max(basalFloor, PIDAlgorithm(index, totalError, targetGlucose, lastGlucose, glucose, errors, t, Kp, Ki, Kd, P, I, D))
Idosage.append(insulinBasal)
carbs = 0
totalInsulin.append(Idosage[-1] + Isubcutaneous[-1] + Iplasma[-1] + Ieffective[-1])
# Simulate meals via carbohydrate injections at typical meal times
if (mealsPresent):
if (breakfastTime == t) or (t > breakfastTime and not breakfast):
#Measured in grams
carbs = randomIntFromInterval(20, 60)
breakfast = True
if (lunchTime == t) or (t > lunchTime and not lunch):
# Measured in grams
carbs = randomIntFromInterval(20, 60)
lunch = True
if (dinnerTime == t) or (t > dinnerTime and not dinner):
# Measured in grams
carbs = randomIntFromInterval(20, 60)
dinner = True
# Log all of this timestep's RL info
# The JSON object that stores state info
stateInfo = { "bloodGlucose": 0, "lastMealSeen": 0, "totalInsulin": 0 }
# The JSON object that stores action info
actionInfo = { "basalInject": 0 }
stateInfo["bloodGlucose"] = math.floor(glucose)
stateInfo["lastMealSeen"] = timeSinceLastMeal
stateInfo["totalInsulin"] = totalInsulin[-1]
actionInfo["basalInject"] = insulinBasal
States.append(stateInfo)
Actions.append(actionInfo)
# Determine reward for this state
if (glucose > 70) and (glucose < 100):
Rewards.append(math.floor(math.log(glucose - 70) - 4))
if (glucose <= 70):
Rewards.append(-1000)
if (glucose > 180):
Rewards.append(0)
if (glucose >= 100) and (glucose <= 180):
Rewards.append(1)
# Prepare to post this timestep's data to the simulator
postdata = { "dose": insulinBasal, "dt": 5, "index": curIndex, "time": 1440, "events": { "basal": [{ "amt": insulinBasal, "start": t, "length": 5 }], "carb": [{ "amt": carbs, "start": 0, "length": 90 }] } }
#Post this timestep and get result for next timestep
response = requests.post(glucoURL, json = postdata)
lastGlucose = glucose
obj = json.loads(response.text)
# Set current and last glucose same initially
if obj["bg"] != None:
glucose = obj["bg"]
# 5 minutes since last observation, thus 5 minutes added to last meal observation
timeSinceLastMeal += 5;
#Increment loop variable by 5
t = t + 5
#debug statement
if(useIFB):
msg = "P: " + str(P[index]) + " I: " + str(I[index]) + " D: " + str(D[index]) + " IFB: " + str(FB[index]) + " Net: " + str(P[index] + I[index] + D[index] + FB[index])
else:
msg = "P: " + str(P[index]) + " I: " + str(I[index]) + " D: " + str(D[index]) + " Net: " + str(P[index] + I[index] + D[index])
print(msg)
#increment index
index = index + 1
#Write this episode to file
for i in range(len(States)):
myFile.write(str(States[i]["bloodGlucose"]) + ", " + str(States[i]["lastMealSeen"]) + ", " + str(States[i]["totalInsulin"]) + ", " + str(Actions[i]["basalInject"]) + ", " + str(Rewards[i]) + ", " + str(i) + ", " + str(ep) + "\n")
# Last post to end this simulation
response = requests.post('http://localhost:3000/')
#empty lists for next day's simulation
States = []
Actions = []
Rewards = []
P = []
I = []
D = []
totalInsulin = []
FB = []
def errorSum(errorSum, previousError, currentError, dt):
"Sums the error between the current step and the last step. An estimation that assumes linearity between steps."
h = currentError - previousError
newError = errorSum + dt*h/2 + previousError*dt
return newError
def proportionalError(currentError, Kp):
return currentError * Kp
def integralError(errorSum, dt, Ki):
return errorSum * Ki
def derivativeError(slope, dt, Kd):
return slope * dt * Kd
def PIDAlgorithm(stepIndex, totalError, targetGlucose, previousGlucose, currentGlucose, errors, dt, Kp, Ki, Kd, P, I, D):
error = currentGlucose - targetGlucose
try:
totalError = errorSum(totalError, errors[-1], error, dt)
except IndexError:
totalError = errorSum(totalError, 0, error, dt)
errors.append(error)
P.append(proportionalError(error,Kp))
I.append(integralError(totalError,dt,Ki))
slope = (currentGlucose - previousGlucose) / dt
D.append(derivativeError(slope,dt,Kd))
correction = P[stepIndex] + I[stepIndex] + D[stepIndex]
return correction
def PIDIDFAlgorithm(stepIndex, totalError, targetGlucose, previousGlucose, currentGlucose, errors, dt, Kp, Ki, Kd, P, I, D, Isc, Ip, Ieff, FB):
return PIDAlgorithm(stepIndex, totalError, targetGlucose, previousGlucose, currentGlucose, errors, dt, Kp, Ki, Kd, P, I, D) - insulinFeedback(Isc, Ip, Ieff, FB)
def randomIntFromInterval(min, max):
"Generates a pseudorandom value from a normal distribution with bounds (min, max)"
sum = 0
for i in range(6):
#random.random generates a float from uniform distribution with bounds (0,1)
sum = sum + random.random() * (max - min + 1) + min
return math.floor(sum/6.0)
def insulinFeedback( Isc, Ip, Ieff, FB):
"calculate insulin feedback"
feedback = gamma1 * Isc + gamma2 * Ip + gamma3 * Ieff
FB.append(feedback)
print("\n FB " + str(FB[-1]))
return feedback
def Isc(Isc_previous, Id_previous):
"estimate current subcutaneous insulin"
return alpha11 * Isc_previous + beta1 * Id_previous
def Ip(Isc_previous, Ip_previous, Id_previous):
"estimate current plasma insulin"
return alpha21 * Isc_previous + alpha22 * Ip_previous + beta2 * Id_previous
def Ieff(Isc_previous, Ip_previous, Ieff_previous, Id_previous):
"estimate current effective (interstital) insulin"
return alpha31 * Isc_previous + alpha32 * Ip_previous + alpha33 * Ieff_previous + beta3 * Id_previous
main() | true |
9c28ca62837b80ae229fbcd7ff58f128bce43e6c | Python | sugitanishi/competitive-programming | /atcoder/abc186/c.py | UTF-8 | 91 | 2.59375 | 3 | [
"MIT"
] | permissive | print(len([i for i in range(1,int(input())+1) if '7' not in str(i) and '7' not in oct(i)])) | true |
8c2c190075cd8db3c30295e3c65da6b1e5803524 | Python | Ritesh007/tutorial | /python/while_loop.py | UTF-8 | 165 | 3.0625 | 3 | [] | no_license | #!/usr/bin/python
#########################
# python script 8
########################
# variable declaration
i = 1
#while loop
while i < 6:
print(i)
i += 1
| true |
73b44d1534d4dd2f05defafe94454e1d86abd886 | Python | kmair/Graduate-Research | /PYOMO_exercises_w_soln/exercises/PyomoFundamentals/exercises-1/knapsack_pandas_excel_soln.py | UTF-8 | 924 | 2.53125 | 3 | [] | no_license | import pandas as pd
from pyomo.environ import *
df_items = pd.read_excel('knapsack_data.xlsx', sheet_name='data', header=0, index_col=0)
W_max = 14
A = df_items.index.tolist()
b = df_items['Benefit'].to_dict()
w = df_items['Weight'].to_dict()
model = ConcreteModel()
model.x = Var( A, within=Binary )
model.obj = Objective(
expr = sum( b[i]*model.x[i] for i in A ),
sense = maximize )
model.weight_con = Constraint(
expr = sum( w[i]*model.x[i] for i in A ) <= W_max )
opt = SolverFactory('glpk')
opt_success = opt.solve(model)
total_weight = sum( w[i]*value(model.x[i]) for i in A )
print('Total Weight:', total_weight)
print('Total Benefit:', value(model.obj))
print('%12s %12s' % ('Item', 'Selected'))
print('=========================')
for i in A:
acquired = 'No'
if value(model.x[i]) >= 0.5:
acquired = 'Yes'
print('%12s %12s' % (i, acquired))
print('-------------------------')
| true |