blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a47924ef3c2235743fb98c53e080bec11927eb94 | Python | poojakancherla/Problem-Solving | /AlgoExpert_DailyCoding/#4.py | UTF-8 | 222 | 3.515625 | 4 | [] | no_license | # Maximum subarray problem
# Algorithm: Kadane's Algorithm
arr = [-6,-5,-4,-3,-2,-1]
currSum = maxSum = arr[0]
for num in arr[1:]:
currSum = max(currSum + num, num)
maxSum = max(maxSum, currSum)
print(maxSum)
| true |
acd086377ad75c44c27ff614714c4bb0b38b5da4 | Python | OldJohn86/Python_CPP | /TendCode/spider_test/jandan/download_img.py | UTF-8 | 3,846 | 2.515625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import hashlib
import base64
import requests
from bs4 import BeautifulSoup
import re
import threading
import multiprocessing
import os
def _md5(value):
'''md5加密'''
m = hashlib.md5()
m.update(value.encode('utf-8'))
return m.hexdigest()
def _base64_decode(data):
'''bash64解码,要注意原字符串长度报错问题'''
missing_padding = 4 - len(data) % 4
if missing_padding:
data += '=' * missing_padding
return base64.b64decode(data)
def get_imgurl(m, r='', d=0):
'''解密获取图片链接'''
e = "DECODE"
q = 4
r = _md5(r)
o = _md5(r[0:0 + 16])
n = _md5(r[16:16 + 16])
l = m[0:q]
c = o + _md5(o + l)
m = m[q:]
k = _base64_decode(m)
url = ''
url = k.decode('utf-8', errors='ignore')
url = '//w' + url
# print(url)
# h = list(range(256))
# b = [ord(c[g % len(c)]) for g in range(256)]
# f = 0
# for g in range(0, 256):
# f = (f + h[g] + b[g]) % 256
# tmp = h[g]
# h[g] = h[f]
# h[f] = tmp
t = ""
# p, f = 0, 0
# for g in range(0, len(k)):
# p = (p + 1) % 256
# f = (f + h[p]) % 256
# tmp = h[p]
# h[p] = h[f]
# h[f] = tmp
# t += chr(k[g] ^ (h[(h[p] + h[f]) % 256]))
# t = t[26:]
t = url
return t
def get_r(js_url):
'''获取关键字符串'''
js = requests.get(js_url).text
# 之前用的下面注释掉的这个,后来煎蛋改了函数名称,少个f_
# _r = re.findall('c=f_[\w\d]+\(e,"(.*?)"\)', js)[0]
_r = re.findall('c=[\w\d]+\(e,"(.*?)"\)', js)[0]
return _r
def load_img(imgurl, file):
'''下载单张图片到制定的文件夹下'''
name = imgurl.split('/')[-1]
# print(name)
# print(file)
file = "{}/{}".format(file,name)
# print(file)
item = requests.get(imgurl).content
with open(file,'wb') as f:
f.write(item)
# print('{} is loaded'.format(name))
def load_imgs(url,file):
'''多线程下载单页的所有图片'''
threads = []
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:49.0) Gecko/20100101 Firefox/49.0',
'Host': 'jandan.net'
}
html = requests.get(url, headers=headers).text
soup = BeautifulSoup(html, 'lxml')
# 这个地方必须使用[-1]来提取js地址,因为有的页面有两个js地址,其中第一个是被注释了不用的
js_url = re.findall('<script src="(//cdn.jandan.net/static/min/[\w\d]+\.\d+\.js)"></script>', html)[-1]
_r = get_r('http:{}'.format(js_url))
tags = soup.select('.img-hash')
for each in tags:
hash = each.text
img_url = 'http:' + get_imgurl(hash, _r)
t = threading.Thread(target=load_img,args=(img_url,file))
threads.append(t)
for i in threads:
i.start()
for i in threads:
i.join()
print(url,'is ok')
def get_dir():
'''判断文件夹是否存在,如果不存在就创建一个'''
filename = "pics"
if not os.path.isdir(filename):
os.makedirs(filename)
return filename
def main(start,offset,file):
'''多进程下载多页的图片,传入参数是开始页码数,结束页码,图片保存文件夹地址'''
end = start + offset
pool = multiprocessing.Pool(processes=4)
base_url = 'http://jandan.net/ooxx/page-{}'
for i in range(start,end+1):
url = base_url.format(i)
pool.apply_async(func=load_imgs,args=(url,file))
pool.close()
pool.join()
if __name__ == '__main__':
import time
t = time.time()
get_dir()
main(50689384,100,r'./pics')
# time.sleep(60)
# main(30,35,r'./download2')
# time.sleep(60)
# main(40,45,r'./download3')
# time.sleep(60)
# main(50,55,r'./download4')
# time.sleep(60)
print(time.time()-t)
| true |
ef9febcd2b3778af17b704525290755f04ca473d | Python | rheehot/code_test | /programmers/weekly_1.py | UTF-8 | 262 | 3.125 | 3 | [] | no_license | # source : https://programmers.co.kr/learn/courses/30/lessons/82612
def solution(price, money, count):
for i in range(1, count + 1):
money -= price * i
if money > 0:
return 0
else:
return -1 * money
solution(3, 20, 4) | true |
4ec60b178ea1d1896034dfc4a7442b2c437a579e | Python | moozer/skemapack | /bin/ExportHtml | UTF-8 | 2,198 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
Created on 10 Feb 2012
@author: moz
'''
import sys, codecs
from Configuration.SkemaPackConfig import SkemaPackConfig
from Import.ImportFile import ImportFile
from Output.HtmlTableOutput import HtmlTableOutput
Header = '''<html>
<header>
<title>TF</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<style TYPE="text/css">
<!--
table { border: solid 1px black; }
tr { background: #ddd }
td.WeekHeader { -webkit-transform: rotate(270deg);
-moz-transform: rotate(270deg);
-o-transform: rotate(270deg);
writing-mode: lr-tb; }
-->
</style>
</header>
<body>
'''
Footer = ''' </body>
</html>'''
def ExportHtml( Weeksums, config, ConfigSet = "ExportHtml"):
# open output file.
Outfile = config.get( ConfigSet, 'OutFile' )
f = codecs.open(Outfile, 'w', 'utf-8')
f.write( Header )
# get flags
Headers = config.get( ConfigSet, 'GroupBy' )
IncludeRowSums = config.getboolean( ConfigSet, 'RowSums' )
IncludeColumnSums = config.getboolean( ConfigSet, 'ColumnSums' )
HeadersList = filter(lambda x: x.strip(), Headers.split(','))
# output all - filter elsewhere
Html = HtmlTableOutput( Weeksums,
RowSums = IncludeRowSums, ColSums = IncludeColumnSums,
Headers = HeadersList )
# save to html
f.write( "<h2>Schedule showing all entries</h2><br />")
f.write( Html )
f.write( "<br />")
# and close
f.write( Footer )
return
if __name__ == '__main__':
# allow cfg file from cmd line
if len(sys.argv) > 1:
cfgfile = open( sys.argv[1] )
config = SkemaPackConfig( cfgfile )
else:
config = None
# # 1) read config/parameter
ConfigSet = "ExportHtml"
# 3) import from file (which might be stdin
Events, config = ImportFile( config, ConfigSet )
print config
# 4) output all events to ics
ExportHtml( Events, config )
| true |
897e9c86ff79a63f8f97760d5b22bd860248581f | Python | mdryden/110yards | /yards_py/domain/enums/position_type.py | UTF-8 | 5,175 | 2.59375 | 3 | [
"MIT"
] | permissive | from __future__ import annotations
from enum import Enum
from yards_py.core.logging import Logger
class PositionType(str, Enum):
qb = "qb"
rb = "rb"
wr = "wr"
k = "k"
lb = "lb"
dl = "dl"
db = "db"
ol = "ol"
o_flex = "o-flex"
d_flex = "d-flex"
flex = "flex"
ir = "ir"
bye = "bye"
bench = "bench"
other = "other"
@staticmethod
def all():
'''Returns a list of all PositionType items which are used in the system (excludes other and OL)'''
return [e.name for e in PositionType if e not in [PositionType.other, PositionType.ol]]
def display_name(self):
if self == PositionType.qb:
return "Quarterback"
if self == PositionType.rb:
return "Running Back"
if self == PositionType.wr:
return "Receiver"
if self == PositionType.k:
return "Kicker"
if self == PositionType.lb:
return "Linebacker"
if self == PositionType.dl:
return "Defensive Lineman"
if self == PositionType.db:
return "Defensive Back"
if self == PositionType.ol:
return "Offensive Lineman"
return self.capitalize()
def is_active_position_type(self):
return self not in [PositionType.ir, PositionType.bye]
def is_starting_position_type(self):
return self not in [PositionType.ir, PositionType.bye, PositionType.bench]
def is_reserve_type(self):
return self in [PositionType.ir, PositionType.bye]
def is_eligible_for(self, position_type: PositionType):
if self == PositionType.ir or self == PositionType.bye:
return True
if self == position_type:
return True
if self == PositionType.bench:
return True
if self == PositionType.o_flex:
return position_type in [PositionType.rb, PositionType.wr, PositionType.k]
if self == PositionType.d_flex:
return position_type in [PositionType.lb, PositionType.dl, PositionType.db]
if self == PositionType.flex:
return position_type in [PositionType.k, PositionType.rb, PositionType.wr, PositionType.lb, PositionType.dl, PositionType.db]
return self == position_type
@staticmethod
def from_cfl_roster(abbreviation: str):
if abbreviation in ["DE", "DT"]:
abbreviation = "DL"
if abbreviation in ["OL", "LS", "G", "T", "OT"]:
abbreviation = "ol"
if abbreviation in ["P"]:
abbreviation = "K"
if abbreviation in ["FB"]:
abbreviation = "RB"
if abbreviation in ["SB", "TE"]:
abbreviation = "WR"
if abbreviation in ["S", "CB"]:
abbreviation = "DB"
try:
return PositionType(abbreviation.lower())
except Exception:
Logger.warn(f"Encountered unknown position '{abbreviation}'")
return PositionType.other
def get_position_type_config():
return [
{"id": str(PositionType.qb.value), "display": "Quarterback", "is_player_position": True, "order": 0, "reserve": False, "short": "QB", "max": 1},
{"id": str(PositionType.rb.value), "display": "Running Back", "is_player_position": True, "order": 10, "reserve": False, "short": "RB", "max": 1},
{"id": str(PositionType.wr.value), "display": "Receiver", "is_player_position": True, "order": 20, "reserve": False, "short": "WR"},
{"id": str(PositionType.o_flex.name), "display": "Offensive Flex", "is_player_position": False, "order": 25, "reserve": False, "short": "OFF",
"description": "Accepts running backs, kickers and receivers", "api_id": "o-flex"},
{"id": str(PositionType.k.value), "display": "Kicker", "is_player_position": True, "order": 30, "reserve": False, "short": "K", "max": 1},
{"id": str(PositionType.dl.value), "display": "Defensive Line", "is_player_position": True, "order": 40, "reserve": False, "short": "DL"},
{"id": str(PositionType.lb.value), "display": "Linebacker", "is_player_position": True, "order": 50, "reserve": False, "short": "LB"},
{"id": str(PositionType.db.value), "display": "Defensive Back", "is_player_position": True, "order": 60, "reserve": False, "short": "DB"},
{"id": str(PositionType.d_flex.name), "display": "Defensive Flex", "is_player_position": False, "order": 80, "reserve": False, "short": "DEF",
"description": "Accepts linebacker, defensive line or defensive back", "api_id": "d-flex"},
{"id": str(PositionType.flex.value), "display": "Flex", "is_player_position": False, "order": 90, "reserve": False, "short": "FX"},
{"id": str(PositionType.bench.value), "display": "Bench", "is_player_position": False, "order": 100, "reserve": False, "short": "BN"},
{"id": str(PositionType.bye.value), "display": "Bye", "is_player_position": False, "order": 110, "reserve": True, "short": "BYE"},
{"id": str(PositionType.ir.value), "display": "Injury Reserve", "is_player_position": False, "order": 120, "reserve": True, "short": "IR"},
]
| true |
a7bdc5f443e2283d8a9f74483406c23c18a4c329 | Python | okingniko/AnomalyLogAnalyzer | /syslog_analyzer.py | UTF-8 | 2,745 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' This is a demo file for the Invariants Mining model.
API usage:
dataloader.load_syslog(): load syslog dataset
feature_extractor.fit_transform(): fit and transform features
feature_extractor.transform(): feature transform after fitting
model.fit(): fit the model
model.predict(): predict anomalies on given data
model.evaluate(): evaluate model accuracy with labeled data
'''
from detector import *
import time
import os
# struct_log = 'log_result/perf_50w.log_structured.csv' # The structured log file
# struct_log = 'log_result/auth.log_structured.csv' # The structured log file
label_file = ''
epsilon = 0.5 # threshold for estimating invariant space
struct_log_list = ['log_result/auth_mix.log_structured.csv']
if __name__ == '__main__':
print("current pid", os.getpid())
# time.sleep(20)
for struct_log in struct_log_list:
begin = time.time()
print("begin parse file {}, time: {}".format(struct_log, time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(time.time()))))
# Load structured log without label info
save_file = struct_log.split("/")[1]
x_train, x_test = load_syslog(struct_log,
window='session',
train_ratio=1.0,
save_csv=True,
save_file=save_file)
# Feature extraction
feature_extractor = preprocessing.FeatureExtractor()
x_train, events = feature_extractor.fit_transform(x_train)
# Model initialization and training
# model = InvariantsMiner(epsilon=epsilon)
# model.fit(x_train, events)
# print("Spent {} seconds".format(time.time() - begin))
# print("finish parse file {}, time: {}".format(struct_log, time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(time.time()))))
#
# Predict anomalies on the training set offline, and manually check for correctness
# print(y_train)
#
# Predict anomalies on the test set to simulate the online mode
# x_test may be loaded from another log file
# beginOnline = time.time()
# print("Online: begin parse file {}, time: {}".format(struct_log, time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(time.time()))))
# x_test = feature_extractor.transform(x_test)
# y_test = model.predict(x_test)
# print("Spend {} seconds".format(time.time() - beginOnline))
# print("Online: finish parse file {}, time: {}".format(struct_log, time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(time.time()))))
# print(y_test)
#
| true |
cc4e08e49aa5e6b5d7b5afb19032f695683cfdd6 | Python | csz-git/python_repo | /project/scrapy/qingTingFM/qtController.py | UTF-8 | 2,774 | 2.9375 | 3 | [] | no_license | #coding=utf-8
from qtModel import *
from qtView import *
class QtController:
# 初始化
# downloadPath:下载路径
def __init__(self, downloadPath):
self.downloadPath = downloadPath
self._qtView = QtView()
self._qtModel = QtModel()
# 输入校验
def input_check(self, input):
try:
newNumber = int(input)
except ValueError:
self._qtView.show_msg("Incorrect index '{}'".format(input))
else:
return newNumber
# 页码校验
def page_index_check(self, typeIndex, pageIndex):
if pageIndex > self._qtModel.type_list()[typeIndex]['pageCount'] or pageIndex <= 0:
return False
else:
return True
# 运行
def run(self):
while True:
# 分类列表
self._qtView.show_type_list(self._qtModel.type_list())
# 分类列表 - 操作
inputOne = self._qtView.choice_operate()
if inputOne == 'q' or inputOne == 'Q': break
# 分类下故事列表
pageIndex = 1
inputOneCheck = self.input_check(inputOne)
while True:
self._qtView.show_story_list(self._qtModel.story_list(inputOneCheck, pageIndex))
# 故事列表 - 操作
inputTwo = self._qtView.choice_operate()
if inputTwo == 'q' or inputTwo == 'Q': break # 返回上层目录
if inputTwo == 'n' or inputTwo == 'N': # 下一页
pageIndex += 1
if self.page_index_check(inputOneCheck, pageIndex): continue
self._qtView.show_msg('页码太大!!!')
pageIndex -= 1
elif inputTwo == 'b' or inputTwo == 'B': # 上一页
pageIndex -= 1
if self.page_index_check(inputOneCheck, pageIndex): continue
pageIndex += 1
else: # 显示目录
inputTwoCheck = self.input_check(inputTwo)
while True:
self._qtView.show_story_catalog(self._qtModel.story_catalog(inputTwoCheck))
# 目录列表 - 操作
inputThree = self._qtView.choice_operate()
if inputThree == 'q' or inputThree == 'Q': break # 返回上层目录
# 下载
self._qtModel.download_story(self._qtModel.story_catalog(inputTwoCheck), self.downloadPath)
break
if __name__ == '__main__':
qtContorller = QtController('/Users/apple/Desktop/')
qtContorller.run()
| true |
dad48d93b26dca068ffe309070f763ef85da397b | Python | shrishyla/shrishyla | /labs111.py | UTF-8 | 239 | 2.953125 | 3 | [] | no_license | import speech_recognition as sr
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source, duration=5)
print("say something")
while True:
audio=r.listen(source)
print("you said"+r.recognize_google(audio)) | true |
7d26f0dd80e94258cc80da0d696019001868b01e | Python | ekkiii/gitpracticeEKKI | /gitpracticeEKKI.py | UTF-8 | 682 | 3.71875 | 4 | [] | no_license | # Partner 1 Name: Ekki Lu
# Partner 2 Name: Clyde Beuter
###############################
# Assignment Name: GitHub Practice - 2/26/20 - 20 pts
import random as rand
def getNRandom(n):
'''takes in an integer and returns a list of n random integers between 1 and 10, inclusive'''
n_list = []
for i in range(n):
n_list.append(rand.randint(1,10))
return n_list
def multiplyRandom(numbers):
'''takes in a list of n numbers and returns the product of the numbers'''
product = 1
for number in numbers:
product = int(number) * product
return product
def main():
print(multiplyRandom(getNRandom(10)))
if __name__ == "__main__":
main()
| true |
1717502f47c5f2e207784c36852c695887abb5d8 | Python | boukeversteegh/bitcoinbalance | /timecache.py | UTF-8 | 1,386 | 2.953125 | 3 | [] | no_license | import time
from cache import Cache, CacheException
class TimeCache(Cache):
def __init__(self, maxage):
Cache.__init__(self)
self.maxage = maxage
def getTSCache(self, *args):
#print 'TimeCache.getCache(%s)' % repr(args)
value, timestamp = super(TimeCache, self).getCache(*args)
if time.time() > timestamp + self.maxage:
raise CacheException("Expired")
return value, timestamp
def getCache(self, *args):
return self.getTSCache(*args)[0]
def setCache(self, value, *args):
#print 'TimeCache.setCache(%s)' % repr(value)
timestamp = time.time()
super(TimeCache, self).setCache((value, timestamp), *args)
def getWait(self, *args):
try:
value, timestamp = self.getTSCache(*args)
expiretime = (timestamp + self.maxage)
waittime = expiretime-time.time()
if waittime > 0:
time.sleep(waittime)
except CacheException as e:
#print '!! getWait: %s' % e
pass
return self.getFresh(*args)
if __name__ == "__main__":
cache = TimeCache(2)
import time, datetime
print '--'
print 'now:'.ljust(30), cache.get(time.time)
#.sleep(3)
print '1 second later, cached:'.ljust(30), cache.get(time.time)
#print '1 second later, force wait:'.ljust(30), cache.getWait(time.time)
#print 'datetime, force wait:'.ljust(30), cache.getWait(datetime.datetime.today)
print 'datetime, force wait:'.ljust(30), cache.getWait(datetime.datetime.today)
| true |
0df7673230f46adecec42d0d383f8fd4a1a47a98 | Python | merveozgul/EDA-google-play-store-apps | /data-exploration.py | UTF-8 | 6,552 | 3.53125 | 4 | [] | no_license | import pandas as pd # data science essentials
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
file ='googleplaystore.csv'
apps = pd.read_csv(file)
#viewing the head of the data
with pd.option_context('display.max_rows', 50, 'display.max_columns', 50):
print(apps.head())
print(apps.describe())
#which columns are numeric and which columns are not
apps.info()
apps.shape
#displaying the all column names
apps.columns
#exploring the columns and unique values
apps['App'].unique()
#Comment: App refers to app name
apps['Category'].unique()
apps['Type'].unique()
#Comment: we can categorize the types of the apps with 0, 1 and 2
#Number of unique values in the genres
apps['Genres'].nunique()
#since there are 120 categories, it is better to leave the column as a string
#there are 1378 unique categories for the Last Updated column.
apps['Last Updated'].nunique()
apps['Current Ver'].nunique()
apps['Android Ver'].unique()
#we only have 1 column that is numeric. But we can convert Size, Installs and Price to
#numeric
#Changing the column Size to numeric
apps['Size'] = apps['Size'].apply(lambda x: x.replace('M','000000'))
apps['Size'] = apps['Size'].apply(lambda x: x.replace('k','000'))
apps['Size'] = apps['Size'].apply(lambda x: x.replace('Varies with device','-4'))
apps['Size'] = apps['Size'].apply(lambda x: x.replace('1,000+','1000'))
apps['Size'] = apps['Size'].astype(float)
#Changing Installs Column to numeric
apps['Installs'].unique()
#replacing + sign
apps['Installs'] = apps['Installs'].apply(lambda x: x.replace('+',''))
#replacing commas
apps['Installs'] = apps['Installs'].apply(lambda x: x.replace(',',''))
#replacing the value of Free to -1
apps['Installs'] = apps['Installs'].apply(lambda x: x.replace('Free','-1'))
#changing type to float
apps['Installs'] = apps['Installs'].astype(float)
#alternative way to convert values to numeric
#apps['Rating'] = goog_play['Rating'].apply(pd.to_numeric, errors='coerce')
#apps['Reviews'] = goog_play['Reviews'].apply(pd.to_numeric, errors='coerce')
#Changing Price column to numeric
apps['Price'].unique()
apps['Price'] = apps['Price'].apply(lambda x: x.replace('$', ''))
#Replacing Everyone with -2
apps['Price'] = apps['Price'].apply(lambda x: x.replace('Everyone', '-2'))
#Changing to numeric
apps['Price'] = apps['Price'].astype(float)
#Now we converted everything necessary to floats and integers. We can look at the
#distributions
#Checking for missing values
#Flagging missing values
print(
apps.isnull()
.any()
)
#We have missing values in the rating, type content rating, current vers and android version
#columns
apps.columns[apps.isnull().any()]
#Printing number of missing values for the columns that have at least one missing value
for col in apps.columns:
if apps[col].isnull().any() :
print(f"""{apps[col].name} : {apps[col].isnull().sum()}""")
# As a percentage of total observations (take note of the parenthesis)
print(
round((apps.isnull().sum())
/
len(apps), 2)
)
apps.columns
#We can create some histograms
#apps.columns
with pd.option_context('display.max_rows', 50, 'display.max_columns', 50):
print(apps.describe(include=[np.object]))
#Dropping the missing values and looking at the distribution
apps_dropped = apps.dropna()
apps_dropped.columns
#Plot for Rating
plt.hist(apps_dropped['Rating'], bins='fd', color='green')
plt.title("Rating")
plt.xlabel("Value")
plt.ylabel("Frequency")
#Plot for Price
plt.hist(apps_dropped['Price'], color='blue')
plt.title("Price")
plt.xlabel("Value")
plt.ylabel("Frequency")
#Plot for Size
plt.hist(apps_dropped['Size'], bins='fd')
plt.title("Size")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.hist(apps_dropped['Installs'])
plt.title("Installs")
plt.xlabel("Value")
plt.ylabel("Frequency")
#Plotting with Seaborn
sns.distplot(apps_dropped['Installs'])
sns.distplot(apps_dropped['Size'])
sns.distplot(apps_dropped['Price'])
sns.distplot(apps_dropped['Rating'])
#Looking for the categorical variables distribution
#Type
type_plot = sns.countplot(x="Type", data=apps_dropped)
#Type for catefory
category_plot = sns.countplot(x="Category", data=apps_dropped)
sns.set_style("ticks", {"xtick.major.size":5, "ytick.major.size":7})
#sns.set_context("notebook", font_scale=0.5, rc={"lines.linewidth":0.3})
#improving the figure size
sns.set(rc={'figure.figsize':(11.7,8.27)})
#rotating the xtick labels
for item in category_plot.get_xticklabels():
item.set_rotation(90)
plt.savefig("category.jpeg")
sns.countplot(x="Genres", data=apps_dropped)
#genres; ıt doesnt make sense to plot a countplot for genre because it has
#too many different genres, in fact 115 unique genres, when we run the code below
apps_dropped["Genres"].nunique()
#We can subset the Genres by setting some tresholds
#First we can look at the genres that have the most number of apps
genres = apps_dropped.Genres.value_counts().sort_values(ascending=False)
#Now creating a df, order the number of apps we can subset
apps_dropped_copy = apps_dropped
gdf = apps_dropped_copy.groupby("Genres").count().sort_values(by = "App", ascending=False)
#changing index to a column
gdf.reset_index(level=0, inplace=True)
gdf.columns
#looking at the distribution of number of apps per genre
gda_p = sns.distplot(gdf["App"])
gda_p.set_title("Number of Apps per Genre")
gda_p.set_xlabel("Number of Apps")
#Genres that have more than 50 apps
gdf_hi = gdf[gdf["App"] > 50]
#now we can plot with a barplot: which genres have the most number of apps
genre_plot = sns.barplot(x="Genres", y= "App", data=gdf_hi)
sns.set_style("ticks", {"xtick.major.size":5, "ytick.major.size":7})
#sns.set_context("notebook", font_scale=0.5, rc={"lines.linewidth":0.3})
#improving the figure size
sns.set(rc={'figure.figsize':(11.7,8.27)})
#rotating the xtick labels
for item in genre_plot.get_xticklabels():
item.set_rotation(90)
#genres that has low number of apps
gdf_low = gdf[gdf["App"] < 5]
genre_plot = sns.barplot(x="Genres", y= "App", data=gdf_low)
sns.set_style("ticks", {"xtick.major.size":5, "ytick.major.size":7})
#sns.set_context("notebook", font_scale=0.5, rc={"lines.linewidth":0.3})
#improving the figure size
sns.set(rc={'figure.figsize':(11.7,8.27)})
#rotating the xtick labels
for item in genre_plot.get_xticklabels():
item.set_rotation(90)
#Looking at the relationships
plot_1=sns.stripplot(x="Installs", y="Price", data=apps_dropped, hue="Type")
for item in plot_1.get_xticklabels():
item.set_rotation(90)
| true |
404ddd05f8fb6d6dffcec06c5c621fbcf67c9795 | Python | aminnj/makers | /disMaker/db.py | UTF-8 | 6,624 | 2.71875 | 3 | [] | no_license | import sqlite3
import pickle
class DBInterface():
def __init__(self, fname="main.db"):
self.connection = sqlite3.connect(fname)
self.cursor = self.connection.cursor()
self.key_types = [
("sample_id", "INTEGER PRIMARY KEY"),
("timestamp", "INTEGER"),
("sample_type", "VARCHAR(30)"),
("twiki_name", "VARCHAR(60)"),
("dataset_name", "VARCHAR(250)"),
("location", "VARCHAR(300)"),
("filter_type", "VARCHAR(20)"),
("nevents_in", "INTEGER"),
("nevents_out", "INTEGER"),
("filter_eff", "FLOAT"),
("xsec", "FLOAT"),
("kfactor", "FLOAT"),
("gtag", "VARCHAR(40)"),
("cms3tag", "VARCHAR(40)"),
("baby_tag", "VARCHAR(40)"),
("analysis", "VARCHAR(30)"),
("assigned_to", "VARCHAR(30)"),
("comments", "VARCHAR(600)"),
]
def drop_table(self):
self.cursor.execute("drop table if exists sample")
def make_table(self):
sql_cmd = "CREATE TABLE sample (%s)" % ",".join(["%s %s" % (key, typ) for (key, typ) in self.key_types])
self.cursor.execute(sql_cmd)
# import time
# print time.strftime('%Y-%m-%d %H:%M:%S')
def make_val_str(self, vals):
return map(lambda x: '"%s"' % x if type(x) in [str,unicode] else str(x), vals)
def do_insert_dict(self, d):
# provide a dict to insert into the table
keys, vals = zip(*d.items())
key_str = ",".join(keys)
val_str = ",".join(self.make_val_str(vals))
sql_cmd = "insert into sample (%s) values (%s);" % (key_str, val_str)
self.cursor.execute(sql_cmd)
def do_update_dict(self, d, idx):
# provide a dict and index to update
keys, vals = zip(*d.items())
val_strs = self.make_val_str(vals)
set_str = ",".join(map(lambda (x,y): "%s=%s" % (x,y), zip(keys, val_strs)))
sql_cmd = "update sample set %s where sample_id=%i" % (set_str, idx)
self.cursor.execute(sql_cmd)
def do_delete_dict(self, d, idx):
# provide a dict and index to update
sql_cmd = "delete from sample where sample_id=%i" % (idx)
self.cursor.execute(sql_cmd)
def is_already_in_table(self, d):
# provide a dict and this will use appropriate keys to see if it's already in the database
# this returns an ID (non-zero int) corresponding to the row matching the dict
dataset_name, sample_type, cms3tag = d.get("dataset_name",""), d.get("sample_type",""), d.get("cms3tag","")
baby_tag, analysis = d.get("baby_tag",""), d.get("analysis","")
if baby_tag or analysis:
sql_cmd = "select sample_id from sample where dataset_name=? and sample_type=? and cms3tag=? and baby_tag=? and analysis=? limit 1"
self.cursor.execute(sql_cmd, (dataset_name, sample_type, cms3tag, baby_tag, analysis))
else:
sql_cmd = "select sample_id from sample where dataset_name=? and sample_type=? and cms3tag=? limit 1"
self.cursor.execute(sql_cmd, (dataset_name, sample_type, cms3tag))
return self.cursor.fetchone()
def read_to_dict_list(self, query):
# return list of sample dictionaries
self.cursor.execute(query)
col_names = [e[0] for e in self.cursor.description]
self.cursor.execute(query)
toreturn = []
for r in self.cursor.fetchall():
toreturn.append( dict(zip(col_names, r)) )
return toreturn
def update_sample(self, d):
# provide dictionary, and this will update sample if it already exists, or insert it
if not d: return False
if self.unknown_keys(d): return False
# totally ignore the sample_id
if "sample_id" in d: del d["sample_id"]
already_in = self.is_already_in_table(d)
if already_in: self.do_update_dict(d, already_in[0])
else: self.do_insert_dict(d)
self.connection.commit()
return True
def delete_sample(self, d):
# provide dictionary, and this will update sample if it already exists, or insert it
if not d: return False
if self.unknown_keys(d): return False
# totally ignore the sample_id
if "sample_id" in d: del d["sample_id"]
already_in = self.is_already_in_table(d)
if already_in:
self.do_delete_dict(d, already_in[0])
self.connection.commit()
return True
return False
def fetch_samples_matching(self, d):
# provide dictionary and this will find samples with matching key-value pairs
if not d: return []
if self.unknown_keys(d): return []
# sanitize wildcards
for k in d:
if type(d[k]) in [str,unicode] and "*" in d[k]:
d[k] = d[k].replace("*","%")
keys, vals = zip(*d.items())
val_strs = self.make_val_str(vals)
def need_wildcard(y):
return ("%" in y) or ("[" in y) or ("]" in y)
set_str = " and ".join(map(lambda (x,y): "%s %s %s" % (x,'like' if need_wildcard(y) else '=', y), zip(keys, val_strs)))
sql_cmd = "select * from sample where %s" % (set_str)
return self.read_to_dict_list(sql_cmd)
def unknown_keys(self, d):
# returns True if there are unrecognized keys
unknown_keys = list(set(d.keys()) - set([kt[0] for kt in self.key_types]))
if len(unknown_keys) > 0:
# print "I don't recognize the keys: %s" % ", ".join(unknown_keys)
return True
else: return False
def close(self):
self.connection.close()
if __name__=='__main__':
pass
import db_tester
if db_tester.do_test():
print "Calculations correct"
# db = DBInterface(fname="allsamples.db")
# print db.is_already_in_table({
# "dataset_name": "/VBF_HToZZTo4L_M125_14TeV_powheg2_JHUgenV702_pythia8/PhaseIITDRSpring17MiniAOD-noPU_91X_upgrade2023_realistic_v3-v1/MINIAODSIM",
# "sample_type": "CMS3",
# "cms3tag": "CMS4_V00-00-03",
# })
# tchi = db.fetch_samples_matching({"dataset_name":"/TChiNeu_mChi-300_mLSP-290_step1/namin-TChiNeu_mChi-300_mLSP-290_step2_miniAOD-eb69b0448a13fda070ca35fd76ab4e24/USER"})
# tchi = db.fetch_samples_matching({"dataset_name":"/TChi%/namin-TChi%/USER"})
# tchi = db.fetch_samples_matching({"dataset_name":"/GJets_HT-4*/*/*"})
# print tchi
# db.close()
| true |
c55bcf800bda436793297f614f94192ba0a8d404 | Python | shuq3/CNN | /read_image.py | UTF-8 | 6,139 | 2.640625 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
import os
import tensorflow as tf
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
class DataGenerator:
def __init__(self, filepath, mode, batch_size, num_classes):
self.write_to_tfrecord(filepath, mode)
self.read_from_tfrecord(batch_size, num_classes, mode)
def write_to_tfrecord(self, filepath, mode):
with tf.name_scope("write_image"):
# 设定类别
classes={'Kodak_M1063':0,
'Casio_EX-Z150':1,
'Nikon_CoolPixS710':2,
'Olympus_mju_1050SW':3,
'Pentax_OptioA40':4}
#存放图片个数
bestnum = 1000
#第几个图片
num = 0
#第几个TFRecord文件
recordfilenum = 0
#tfrecords格式文件名
tf_filepath = 'H:\\shuqian\\resize\\code\\5_resize_tfrecord\\'
if mode == 'train':
ftrecordfilename = ('train_image.tfrecords_%.2d' % recordfilenum)
else:
ftrecordfilename = ('test_image.tfrecords_%.2d' % recordfilenum)
writer= tf.python_io.TFRecordWriter(tf_filepath+ftrecordfilename)
for index, name in enumerate(classes):
class_path = filepath + name + '\\'
for img_name in os.listdir(class_path):
num = num + 1
img_path=class_path+img_name #每一个图片的地址
# 写入下一个文件
if num > bestnum:
num = 1
recordfilenum = recordfilenum + 1
#tfrecords格式文件名
if mode == 'train':
ftrecordfilename = ('train_image.tfrecords_%.2d' % recordfilenum)
else:
ftrecordfilename = ('test_image.tfrecords_%.2d' % recordfilenum)
writer= tf.python_io.TFRecordWriter(tf_filepath+ftrecordfilename)
# 加载文件
img=Image.open(img_path)
img_raw=img.tobytes()#将图片转化为二进制格式
example = tf.train.Example(features=tf.train.Features(feature={
#value=[index]决定了图片数据的类型label
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
})) #example对象对label和image数据进行封装
writer.write(example.SerializeToString()) #序列化为字符串
writer.close()
self.data_size = recordfilenum*bestnum + num
print (mode, self.data_size)
def read_from_tfrecord(self, batch_size, num_classes, mode):
with tf.name_scope("read_image"):
if mode == 'train':
files = tf.train.match_filenames_once('H:\\shuqian\\resize\\code\\5_resize_tfrecord\\train_image.tfrecords*')
else:
files = tf.train.match_filenames_once('H:\\shuqian\\resize\\code\\5_resize_tfrecord\\test_image.tfrecords*')
filename_queue = tf.train.string_input_producer(files, shuffle=True) #读入流中
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue) #返回文件名和文件
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw' : tf.FixedLenFeature([], tf.string),
}) #取出包含image和label的feature对象
#tf.decode_raw可以将字符串解析成图像对应的像素数组
image = tf.decode_raw(features['img_raw'], tf.uint8)
image = tf.reshape(image, [64,64,3])
image = tf.cast(image, tf.float32)
image = tf.image.per_image_standardization(image)
label = tf.cast(features['label'], tf.int32)
if mode == 'train':
example_queue = tf.RandomShuffleQueue(
# 队列容量
capacity = 150 * batch_size,
# 队列数据的最小容许量
min_after_dequeue = 120* batch_size,
dtypes = [tf.float32, tf.int32],
# 图片数据尺寸,标签尺寸
shapes = [[64, 64, 3], ()])
# 读线程的数量
num_threads = 10
else:
example_queue = tf.RandomShuffleQueue(
# 队列容量
capacity = 100 * batch_size,
# 队列数据的最小容许量
min_after_dequeue = 90 * batch_size,
dtypes=[tf.float32, tf.int32],
shapes=[[64, 64, 3], ()])
# 读线程的数量
num_threads = 1
# 数据入队操作
example_enqueue_op = example_queue.enqueue([image, label])
# 队列执行器
tf.train.add_queue_runner(tf.train.queue_runner.QueueRunner(
example_queue, [example_enqueue_op] * num_threads))
# 数据出队操作,从队列读取Batch数据
images, labels = example_queue.dequeue_many(batch_size)
# 将标签数据由稀疏格式转换成稠密格式
# [ 2, [[0,1,0,0,0]
# 4, [0,0,0,1,0]
# 3, --> [0,0,1,0,0]
# 5, [0,0,0,0,1]
# 1 ] [1,0,0,0,0]]
labels = tf.reshape(labels, [batch_size, 1])
indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])
labels = tf.sparse_to_dense(
tf.concat(values=[indices, labels], axis=1),
[batch_size, num_classes], 1.0, 0.0)
#检测数据维度
assert len(images.get_shape()) == 4
assert images.get_shape()[0] == batch_size
assert images.get_shape()[-1] == 3
assert len(labels.get_shape()) == 2
assert labels.get_shape()[0] == batch_size
assert labels.get_shape()[1] == num_classes
self.images = images
self.labels = labels
| true |
b53547088bd1df9661b7f1923aaea6bd796e91f4 | Python | L-Ramos/MrClean_Poor | /plots_visualization.py | UTF-8 | 2,623 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 11:50:51 2019
@author: laramos
"""
#Creating nice plots
import seaborn as sns
import matplotlib.pyplot as plt
frame['mrs']=Y_mrs
def plot_box(var):
sum_poor=list ()
sum_good=list()
sum_nan=list()
var = 'rr_syst'
for i in range(0,frame.shape[0]):
if np.isnan(frame['mrs'].iloc[i]):
sum_nan.append(frame[var].iloc[i])
else:
if frame['mrs'].iloc[i]>=5:
sum_poor.append(frame[var].iloc[i])
else:
sum_good.append(frame[var].iloc[i])
df_poor = pd.DataFrame(sum_poor,columns=['mRS 5-6'])
df_good = pd.DataFrame(sum_good,columns=['mRS 0-4'])
df_plot = pd.concat([df_poor,df_good],axis=1)
sns.set(font_scale = 2)
plt.figure(figsize=(15, 2))
ax = sns.boxplot(data=(df_plot),orient="h",color='b').set_ylabel('time to groin',fontsize=20)
sns.set(font_scale = 2)
plt.figure(figsize=(15, 2))
ax = sns.boxplot(data=(sum_poor),orient="h",color='b').set_ylabel('AGE 5-6',fontsize=20)
sns.set(font_scale = 2)
plt.figure(figsize=(15, 2))
ax = sns.boxplot(data=(sum_good),orient="h",color='b').set_ylabel('AGE 0-4',fontsize=20)
sns.set(font_scale = 2)
plt.figure(figsize=(15, 2))
ax = sns.boxplot(data=(frame['age']),orient="h",color='b').set_ylabel('Age',fontsize=20)
plt.figure(figsize=(15, 2))
ax = sns.boxplot(data=(frame['ASPECTS_BL']),orient="h",color= 'r').set_ylabel('ASPECTS',fontsize=20)
plt.figure(figsize=(15, 2))
ax = sns.boxplot(data=(frame['NIHSS_BL']),orient="h",color = 'g').set_ylabel('NIHSS',fontsize=20)
plt.figure(figsize=(15, 2))
ax = sns.boxplot(data=(frame['togroin']),orient="h",color = 'k' ).set_ylabel('Time to Groin',fontsize=20)
plt.figure(figsize=(15, 2))
ax = sns.boxplot(data=(frame['rr_syst']),orient="h",color ='c' ).set_ylabel('Systolic Blood Pressure',fontsize=20)
import numpy as np
import matplotlib.pyplot as plt
barWidth = 0.3
spec = np.array([0.94,0.93,0.96,0.96,0.96])
ci = np.array([[0.93,0.96],[0.89,0.96],[0.95,0.97],[0.94,0.97],[0.95,0.98]])
yerr = np.c_[spec-ci[:,0],ci[:,1]-spec ].T
plt.bar(range(len(spec)), spec, yerr=yerr)
plt.xticks(range(len(spec)))
plt.show()
y_r = [spec[i] - ci[i][1] for i in range(len(ci))]
plt.bar(range(len(spec)), spec, yerr=y_r, alpha=0.2, align='center')
plt.xticks(range(len(spec)), [str(year) for year in range(1992, 1996)])
plt.show()
r1 = np.arange(len(spec))
r2 = [x + barWidth for x in r1]
plt.bar(r1, spec, width = barWidth, color = 'blue', edgecolor = 'black', yerr=yerr, capsize=7, label='poacee') | true |
fde35ac5ceafec6719de6e4e064e823a294d6637 | Python | jmackraz/baker-house | /src/skill/lambda/custom/house_lambda.py | UTF-8 | 15,783 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
Based on Skills SDK example
The Intent Schema, Custom Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
from __future__ import print_function
import logging
import boto3
import json
from os import environ
from sys import stdout
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(levelname)s - %(message)s')
handler = logging.StreamHandler(stdout)
handler.setFormatter(formatter)
#logging.basicConfig(format=formatter)
log = logging.getLogger(__name__)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
# IoT thing name needs to come from the environment (set up in handler main)
thing_name = 'NOT_SET'
# --------------- Helpers that build all of the responses ----------------------
def build_response(session_attributes, title, output, reprompt_text, should_end_session):
session_debug_message = "ENDING SESSION" if should_end_session else "KEEPING SESSION OPEN"
log.debug(session_debug_message)
speechlet_response = {
'outputSpeech': { 'type': 'PlainText', 'text': output},
'card': { 'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'shouldEndSession': should_end_session
}
if reprompt_text is not None:
speechlet_response['reprompt'] = { 'outputSpeech': { 'type': 'PlainText', 'text': reprompt_text } }
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
def numeric_slot_value(slot):
if slot is not None:
num_value = slot['value']
if num_value != "?":
return int(num_value)
return None
def string_slot_value(slot):
#log.debug("string_slot_value - slot: %s", slot)
str_value = None
if slot is not None:
str_value = slot['value']
log.debug("string_slot_value - slot: %s value: %s", slot['name'], slot['value'])
else:
log.debug("string_slot_value - slot is None")
return str_value
def validated_slot_value(slot):
"""return a value for a slot that's been validated."""
if slot is not None:
for resolution in slot['resolutions']['resolutionsPerAuthority']:
if 'values' in resolution:
return resolution['values'][0]['value']['name']
return None
def intent_slot(intent, slot_name):
"""return slot dict, avoid key errors"""
if slot_name in intent['slots'] and 'value' in intent['slots'][slot_name]:
return intent['slots'][slot_name]
return None
# --------------- INTENTS ------------------
general_prompt = "You can select and input source or change the volume."
input_select_prompt = "You can select an input source by saying, select input sonos"
volume_level_prompt = "You can select the volume level by saying, volume 40"
def welcome_intent(session_attributes):
""" Called when the user launches the skill without specifying what they want.
If we wanted to initialize the session to have some attributes we could
add those here.
"""
session_attributes['baker_is_open'] = True
card_title = "Welcome"
speech_output = "Hi."
reprompt_text = general_prompt
should_end_session = False
return build_response(session_attributes, card_title, speech_output, reprompt_text, should_end_session)
def cancel_intent(session_attributes):
card_title = "Session Ended"
speech_output = "Bye."
should_end_session = True
return build_response({}, card_title, speech_output, None, should_end_session)
def keep_baker_open(session_attributes):
return session_attributes.get('baker_is_open', False)
def select_input(intent, session_attributes):
""" select receiver input source """
card_title = intent['name']
should_end_session = not keep_baker_open(session_attributes)
input_selection = None
slot = intent_slot(intent, 'input_selection')
input_selection = validated_slot_value(slot)
if input_selection is not None:
log.debug("inputs selection value: %s", input_selection)
speech_output = "Setting input source to {}".format(input_selection)
#reprompt_text = volume_level_prompt
reprompt_text = ""
# update the IoT device shadow
payload = json.dumps( { 'state': { 'desired': {'input': input_selection}}} )
client=boto3.client('iot-data')
client.update_thing_shadow(thingName=thing_name, payload=payload)
else:
speech_output = "I didn't understand your selection. Please try again."
reprompt_text = ""
return build_response(session_attributes, card_title, speech_output, reprompt_text, should_end_session)
def power_control(intent, session_attributes):
"""change desired power state to on or off"""
card_title = intent['name']
should_end_session = not keep_baker_open(session_attributes)
input_selection = None
slot = intent_slot(intent, 'input_selection')
input_selection = validated_slot_value(slot)
if input_selection is not None:
log.debug("inputs selection value: %s", input_selection)
speech_output = "Setting input source to {}".format(input_selection)
#reprompt_text = volume_level_prompt
reprompt_text = ""
# update the IoT device shadow
payload = json.dumps( { 'state': { 'desired': {'input': input_selection}}} )
client=boto3.client('iot-data')
client.update_thing_shadow(thingName=thing_name, payload=payload)
else:
speech_output = "I didn't understand your selection. Please try again."
reprompt_text = ""
return build_response(session_attributes, card_title, speech_output, reprompt_text, should_end_session)
def power_control(intent, session_attributes):
"""change desired power state to on or off"""
card_title = intent['name']
should_end_session = not keep_baker_open(session_attributes)
power_state = None
slot = intent_slot(intent, 'power_state')
power_state = validated_slot_value(slot)
if power_state is not None:
log.debug("power state: %s", power_state)
speech_output = "Turning power {}".format(power_state)
reprompt_text = ""
# update the IoT device shadow
payload = json.dumps( { 'state': { 'desired': {'power': power_state}}} )
client=boto3.client('iot-data')
client.update_thing_shadow(thingName=thing_name, payload=payload)
if power_state == 'off':
should_end_session = True
else:
speech_output = "I didn't understand your selection. Please try again."
reprompt_text = ""
return build_response(session_attributes, card_title, speech_output, reprompt_text, should_end_session)
pass
def _get_volume_level():
client=boto3.client('iot-data')
response = client.get_thing_shadow(thingName=thing_name)
streamingBody = response["payload"]
shadow_state = json.loads(streamingBody.read())
log.debug("shadow_state: %s", shadow_state)
return int(shadow_state['state']['reported']['volume'])
def query_volume(intent, session_attributes):
"""query current volume from the thing shadow, and set an adjusted level"""
card_title = intent['name']
should_end_session = not keep_baker_open(session_attributes)
current_volume_level = _get_volume_level()
speech_output = "The volume level is {}".format(current_volume_level)
should_end_session = not keep_baker_open(session_attributes)
power_state = None
slot = intent_slot(intent, 'power_state')
power_state = validated_slot_value(slot)
if power_state is not None:
log.debug("power state: %s", power_state)
speech_output = "Turning power {}".format(power_state)
reprompt_text = ""
# update the IoT device shadow
payload = json.dumps( { 'state': { 'desired': {'power': power_state}}} )
client=boto3.client('iot-data')
client.update_thing_shadow(thingName=thing_name, payload=payload)
if power_state == 'off':
should_end_session = True
else:
speech_output = "I didn't understand your selection. Please try again."
reprompt_text = ""
return build_response(session_attributes, card_title, speech_output, reprompt_text, should_end_session)
pass
def _get_volume_level():
client=boto3.client('iot-data')
response = client.get_thing_shadow(thingName=thing_name)
streamingBody = response["payload"]
shadow_state = json.loads(streamingBody.read())
log.debug("shadow_state: %s", shadow_state)
return int(shadow_state['state']['reported']['volume'])
def query_volume(intent, session_attributes):
"""query current volume from the thing shadow, and set an adjusted level"""
card_title = intent['name']
should_end_session = not keep_baker_open(session_attributes)
current_volume_level = _get_volume_level()
speech_output = "The volume level is {}".format(current_volume_level)
reprompt_text = ""
return build_response(session_attributes, card_title, speech_output, reprompt_text, should_end_session)
def relative_volume(intent, session_attributes):
"""query current volume from the thing shadow, and set an adjusted level"""
card_title = intent['name']
should_end_session = not keep_baker_open(session_attributes)
current_volume_level = _get_volume_level()
# change by how much?
volume_change_slot = intent_slot(intent, 'volume_level_change')
if volume_change_slot is None:
volume_change = 10 # TODO: allow sticky override of hardcoded default
else:
volume_change = numeric_slot_value(volume_change_slot)
# raise or lower volume?
raise_lower_slot = intent_slot(intent, 'raise_lower')
if raise_lower_slot is None:
return build_response(session_attributes, card_title, "Hm.", None, should_end_session)
rl_val = string_slot_value(raise_lower_slot)
log.debug("rl_val: %s", rl_val)
if rl_val == 'lower':
volume_change = -volume_change
# set volume level
volume_level = current_volume_level+volume_change
payload = json.dumps( { 'state': { 'desired': {'volume': volume_level}}} )
client=boto3.client('iot-data')
client.update_thing_shadow(thingName=thing_name, payload=payload)
speech_output = "Changing volume level by {}, to {}".format(volume_change, volume_level)
reprompt_text =""
return build_response(session_attributes, card_title, speech_output, reprompt_text, should_end_session)
def set_volume(intent, session_attributes):
""" set receiver volume (may be capped). """
card_title = intent['name']
should_end_session = not keep_baker_open(session_attributes)
# the value is "?" if it's given bogus input
slot = intent_slot(intent, 'volume_level')
volume_level = numeric_slot_value(slot)
if volume_level is not None:
log.debug("volume level slot value: %s", volume_level)
# update the IoT device shadow
payload = json.dumps( { 'state': { 'desired': {'volume': volume_level}}} )
client=boto3.client('iot-data')
client.update_thing_shadow(thingName=thing_name, payload=payload)
speech_output = "Volume level set to {}".format(volume_level)
#reprompt_text = input_select_prompt
reprompt_text = ""
else:
speech_output = "I didn't understand your selection. Please try again." + volume_level_prompt
#reprompt_text = "I didn't understand your selection." + volume_level_prompt
reprompt_text = ""
return build_response(session_attributes, card_title, speech_output, reprompt_text, should_end_session)
# --------------- EVENTS ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
log.debug("on_session_started requestId=%s sessionId=%s", session_started_request['requestId'], session['sessionId'])
def on_launch(launch_request, session_attributes):
""" Called when the user launches the skill without specifying what they want """
log.debug("on_launch requestId= %s", launch_request['requestId'])
return welcome_intent(session_attributes)
def on_intent(intent_request, session_attributes):
""" Called when the user specifies an intent for this skill """
log.info("on_intent requestId=%s", intent_request['requestId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
log.info("intent name: %s", intent_name)
# Dispatch to your skill's intent handlers
if intent_name == "set_volume":
return set_volume(intent, session_attributes)
elif intent_name == "select_input":
return select_input(intent, session_attributes)
elif intent_name == "query_volume":
return query_volume(intent, session_attributes)
elif intent_name == "relative_volume":
return relative_volume(intent, session_attributes)
elif intent_name == "power_control":
return power_control(intent, session_attributes)
elif intent_name == "AMAZON.HelpIntent":
return welcome_intent(session_attributes)
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return cancel_intent(session_attributes)
else:
log.error("UNKNOWN INTENT: %s", intent_name)
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
log.info("on_session_ended requestId=" + session_ended_request['requestId'] + ", sessionId=" + session['sessionId'])
# --------------- Main handler ------------------
def lambda_handler(event, context):
log.debug("LAMBDA_HANDLER: event: %s", event)
log.debug("lambda_handler: event.session.application.applicationId=%s", event['session']['application']['applicationId'])
session_attributes = event['session'].get('attributes', {})
log.debug("keep_baker_open %s", keep_baker_open(session_attributes))
global thing_name
thing_name = environ.get('BAKERHOUSE_IOT_THING')
if thing_name is None:
log.error("lambda_handler: required environment variable 'BAKERHOUSE_IOT_THING' is not set")
return
"""
Uncomment this block and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
log.info("invoking applicationId: %s", event['session']['application']['applicationId'])
skill_app_id = "amzn1.echo-sdk-ams.app.[unique-value-here]"
# ZZZ: will need to configure this from the environment config
perform_app_id_check = False
if perform_app_id_check:
if (event['session']['application']['applicationId'] !=
"amzn1.echo-sdk-ams.app.[unique-value-here]"):
raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']}, event['session'])
log.debug("TYPE: %s", event['request']['type'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], session_attributes)
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], session_attributes)
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
| true |
710aff70a993e6f0e606953ea0cbbd419a383dd2 | Python | kgvconsulting/PythonDEV | /convertMBtoGB.py | UTF-8 | 262 | 3.4375 | 3 | [
"MIT"
] | permissive | # Created by Krasimir Vatchinsky - KGV Consulting Corp - info@kgvconsultingcorp.com
# This program help converting megabytes to gigabytes
# convert megabytes to gigabytes
mb = input("entera number of megabytes: ")
mb = float(mb)
gb = mb / 1024
print(mb, "megabytes is = to",gb, "gigabytes")
| true |
d86a2cab23d7491f5ad71f1b282b4ed09dbe6dfc | Python | samiraabnar/brain-lang | /read_dataset/readHarryPotterData.py | UTF-8 | 9,402 | 3.03125 | 3 | [] | no_license | import numpy as np
import scipy.io
from .scan import ScanEvent
# This method reads the Harry Potter data that was published by Wehbe et al. 2014
# Paper: http://aclweb.org/anthology/D/D14/D14-1030.pdf
# Data: http://www.cs.cmu.edu/afs/cs/project/theo-73/www/plosone/
# It consists of fMRI data from 8 subjects who read chapter 9 of the first book of Harry Potter.
# They see one word every 0.5 seconds.
# A scan is taken every two seconds.
# The chapter was presented in four blocks of app. 12 minutes.
# Voxel size: 3 x 3 x 3
def read_all(data_dir):
# Collect scan events
events = []
for subject_id in range(1, 9):
for block_id in range(1, 5):
events.extend(read_block(data_dir, subject_id, block_id))
# add voxel to region mapping
for subject in {event.subject_id for event in events}:
mapping = get_voxel_to_region_mapping(data_dir, subject)
subject_events = [e for e in events if e.subject_id == subject]
for event in subject_events:
event.voxel_to_region_mapping = mapping
return events
def read_block(data_dir, subject_id, block_id):
# Data is in matlab format
datafile = scipy.io.loadmat(data_dir + "subject_" + str(subject_id) + ".mat")
# Data structure is a dictionary with keys data, time, words, meta
# Shapes for subject 1, block 1: data (1351,37913), time (1351,2) words (1, 5176)
noisy_scans = datafile["data"]
timedata = datafile["time"]
presented_words = datafile["words"]
# --- PROCESS FMRI SCANS --- #
# We have one scan every 2 seconds
scan_times = timedata[:, 0]
# We have four blocks. One block includes approx. 12 minutes
blocks = timedata[:, 1]
# find first and last scan time of current block
block_starts = np.min(scan_times[np.where(blocks == block_id)])
block_ends = np.max(scan_times[np.where(blocks == block_id)]) + 2
# --- PROCESS TEXT STIMULI -- #
# Here we extract the presented words and align them with their timestamps.
# The original data consists of weirdly nested arrays.
timed_words = []
for i in np.arange(presented_words.shape[1]):
token = presented_words[0][i][0][0][0][0]
timestamp = presented_words[0][i][1][0][0]
if timestamp >= block_starts:
timed_words.append([timestamp, token])
# Initialize variables
# stimulus = words presented between current and previous scan
# noisy_scan = voxel activations (with the standard preprocessing: motion correction, slice timing correction etc, but not yet cleaned).
# Details about the preprocessing can be found in the Appendix of Wehbe et al.
# We save everything in arrays because the data is already ordered.
word_index = 0
word_time = timed_words[word_index][0]
word = timed_words[word_index][1]
events = []
sentences = []
seen_text = ""
start = np.where(scan_times == block_starts)[0][0]
for j in range(start, len(scan_times)):
event = ScanEvent()
scan_time = scan_times[j]
word_sequence = ""
if scan_time > block_ends:
# End of block, add last sentence to sentences
events[-1].sentences.append(seen_text.strip())
return events
# Collect the words that have been represented during the previous and the current scan.
while (word_time < scan_time) and (word_index + 1 < len(timed_words)):
# Words are currently stored exactly as presented, preprocessing can be done later
if len(word) > 0:
# Keep track of sentence boundaries and store sentences seen so far.
if is_beginning_of_new_sentence(seen_text.strip(), word):
sentences.append(seen_text)
seen_text = word.strip() + " "
# Simply add word to the current sentence
else:
if len(word) > 0:
seen_text = seen_text + word.strip() + " "
word_sequence += word + " "
# Get next word
word_index += 1
word_time, word = timed_words[word_index]
# TODO: We have not yet decided how to deal with the end of paragraph symbol ("+").
# Leila did not answer when I asked whether participants had actually seen the +.
# reached next scan, add collected data to events
event.subject_id = str(subject_id)
event.block = block_id
event.timestamp = scan_time
event.scan = noisy_scans[j]
event.stimulus = word_sequence.strip()
event.sentences = list(sentences)
event.current_sentence = seen_text
events.append(event)
# Add the last sentence to the sentences of the last event.
events[-1].sentences.append(seen_text.strip())
return events
# This is a quite naive sentence boundary detection that only works for this dataset.
def is_beginning_of_new_sentence(seentext, newword):
sentence_punctuation = (".", "?", "!", ".\"", "!\"", "?\"", "+")
# I am ignoring the following exceptions, because they are unlikely to occur in fiction text: "etc.", "e.g.", "cf.", "c.f.", "eg.", "al.
exceptions = ("Mr.", "Mrs.")
if (seentext.endswith(exceptions)):
return False
# This would not work if everything is lowercased!
if (seentext.endswith(sentence_punctuation) and not newword.islower() and newword is not (".")):
return True
else:
return False
# The metadata provides very rich information.
# Double-check description.txt in the original data.
# Important: Each voxel in the scan has different coordinates depending on the subject!
# Voxel 5 has the same coordinates in all scans for subject 1.
# Voxel 5 has the same coordinates in all scans for subject 2, but they differ from the coordinates for subject 1.
# Same with regions: Each region spans a different set of voxels depending on the subject!
def get_voxel_to_region_mapping(data_dir, subject_id):
metadata = scipy.io.loadmat(data_dir + "subject_" + str(subject_id) + ".mat")["meta"]
roi_of_nth_voxel = metadata[0][0][8][0]
roi_names = metadata[0][0][10][0]
voxel_to_region = {}
for voxel in range(0, roi_of_nth_voxel.shape[0]):
roi = roi_of_nth_voxel[voxel]
voxel_to_region[voxel] = roi_names[roi][0]
# for name in roi_names:
# print(name[0])
return voxel_to_region
# --------
# These are some lines for processing the metadata which are not needed here, but I leave them in for reference.
# Setting indices according to description.txt in the original data folder
# sub_id_index = 0
# number_of_scans_index = 1
# number_of_voxels = 2
# x_dim_index = 3
# y_dim_index = 4
# z_dim_index = 5
# colToCoord_index = 6
# coordToCol_index = 7
# ROInumToName_index = 8
# ROInumsToName_3d_index = 9
# ROINames_index = 10
# voxel_size_index = 11
# matrix_index = 12
# Extract metadata
# Number of scans is constant over all subjects: 1351
# Voxel size is also constant 3x3x3
# Number of voxels varies across subjects.
# the_subject_id = metadata[0][0][sub_id_index][0][0]
# number_of_scans = metadata[0][0][number_of_scans_index][0][0]
# number_of_voxels = metadata[0][0][number_of_voxels][0][0]
# voxel_size = metadata[0][0][voxel_size_index]
# Example: get coordinates of 5th voxel for this subject
# coordinates_of_nth_voxel = metadata[0][0][6]
# coordinates_of_nth_voxel[5]
# which_voxel_for_coordinates = metadata[0][0][7]
# get voxel number for set of coordinates
# which_voxel_for_coordinates[36,7,19]
# These coordinates differ slightly across subjects
# x_dim = metadata[0][0][x_dim_index][0][0]
# y_dim = metadata[0][0][y_dim_index][0][0]
# z_dim = metadata[0][0][z_dim_index][0][0]
# This index provides the geometric coordinates (x,y,z) of the n-th voxel
# coords = metadata[0][0][colToCoord_index]
# I am not really sure how voxels are mapped into Regions of Interest
# column_map = metadata[0][0][coordToCol_index]
# nmi_matrix = metadata[0][0][matrix_index]
# named_areas = metadata[0][0][ROInumToName_index]
# area_names_list = metadata[0][0][ROINames_index][0]
# The Appendix.pdf gives information about the fMRI preprocessing (slice timing correction etc)
# Poldrack et al 2014: The goal of spatial normalization is to transform the brain images from each individual in order to reduce the variability between individuals and allow
# meaningful group analyses to be successfully performed.
# TODO ask Samira: Wehbe et al used a Gaussian kernel smoother and voxel selection, you too?
# Signal Drift: : a global signal decrease with subsequently acquired images in the scan (technological artifact)
# Detrending? Tanabe & Meyer 2002:
# Because of the inherently low signal to noise ratio (SNR) of fMRI data, removal of low frequency signal
# intensity drift is an important preprocessing step, particularly in those brain regions that weakly activate.
# Two known sources of drift are noise from the MR scanner and aliasing of physiological pulsations. However,
# the amount and direction of drift is difficult to predict, even between neighboring voxels.
# from nilearn documentation:
# Standardize signal: set to unit variance
# low_pass, high_pass: Respectively low and high cutoff frequencies, in Hertz.
# Low-pass filtering improves specificity.
# High-pass filtering should be kept small, to keep some sensitivity.
| true |
f1228f35697e8f7c157d97d9d1deaf39ef9a0130 | Python | srideepkar/Driver-Drowsiness-Detection-using-MQ6-gas-sensor-and-vision-sensor | /py7seg/Display108.py | UTF-8 | 243 | 2.796875 | 3 | [] | no_license | # Display101.py
# showText()
from py7seg import Py7Seg
import time
ps = Py7Seg()
ps.showText('HELO')
for i in range(4):
time.sleep(0.5)
ps.setBrightness(7)
time.sleep(0.5)
ps.setBrightness(1)
time.sleep(1)
ps.showText("8YE")
| true |
13d2088df88120c6086ab8e1a1f6570cbec18f0f | Python | dabrunhosa/PhD_Program | /Plotting/NetXNeuroPlot.py | UTF-8 | 8,661 | 2.859375 | 3 | [] | no_license | ## -*- coding: utf-8 -*-
#'''
#Created on September 6, 2017
#@author: dabrunhosa
#'''
#from Plotting.IPlot import IPlot
#import networkx as nx
#from Utilities.Utils import Set
#import operator
#import math
#from Queue import Queue
#import matplotlib.pyplot as plt
#class NetX_NeuroPlot(IPlot):
# ########################################
# ### Private Functions ###
# ########################################
# def __insert_node_level(self,level_sequence,level,node):
# if not level_sequence.contains_key(level):
# # add the level and a unique list of nodes
# level_sequence.add(level, set([node]))
# else:
# # Find level and add node the Sequence
# # will ignore non-unique nodes
# level_sequence.get(level).add(node)
# def __find_number_levels(self,mesh_origin,origin,flow_direction_name):
# number_levels = 0
# level_nodes = []
# begin = False
# for level in mesh_origin.values():
# if flow_direction_name == 'predecessors':
# if origin in level:
# level_nodes.append([origin])
# break
# else:
# level_nodes.append(level)
# number_levels += 1
# elif flow_direction_name == 'successors':
# if origin in level:
# level_nodes.append([origin])
# begin = True
# elif begin:
# level_nodes.append(level)
# number_levels += 1
# return [number_levels,level_nodes]
# def __find_origins(self):
# # Create a Set for the New Order
# # of the nodes
# new_order = Set('New_Order')
# geometric_mesh = self.__data
# # Get all the Graph's paths
# all_paths = nx.shortest_path_length(geometric_mesh)
# has_path = True
# # Define a default longest path and level
# current_longest_path = -1
# current_level = 0
# while has_path:
# # Find the longest path using the iteration object (key,value) and
# # the Operator class to get the second position of the tuple (value).
# longest_path = max(all_paths.iteritems(),key=operator.itemgetter(1))
# longest_path_length = max(longest_path[1].iteritems(),key=operator.itemgetter(1))[1]
# if longest_path_length < current_longest_path:
# current_level += 1
# # Every time the node of longest path is added
# # to the appropriated level, the longest path
# # is setted and the current path is
# # deleted from the options
# self.__insert_node_level(new_order, 'level_'+str(current_level), longest_path[0])
# current_longest_path = longest_path_length
# del all_paths[longest_path[0]]
# if all_paths == {}:
# has_path = False
# return new_order
# def __find_central_segment(self):
# geometric_mesh = self.__mesh
# # Calculate the betweenness of the entire graph
# betweenness_centrality = nx.betweenness_centrality(geometric_mesh)
# # The node with the higgest score is the central node
# central_node = max(betweenness_centrality,key=betweenness_centrality.get)
# biggest_path = -1
# central_segment = ()
# predecessors = geometric_mesh.predecessors(central_node)
# sucessors = geometric_mesh.successors(central_node)
# all_neighbors = predecessors + sucessors
# for node in all_neighbors:
# largest_path = max(nx.shortest_path_length(geometric_mesh, source=node).values())
# if largest_path > biggest_path or biggest_path == -1:
# biggest_path = largest_path
# if node in predecessors:
# edge = (node,central_node)
# else:
# edge = (central_node,node)
# central_segment = edge
# return central_segment
# def __organize_vertically(self,mesh_origin,origin,flow_direction):
# vertical_list = Queue()
# neuro_layout = {}
# vertical_list.put_nowait(origin)
# base_level_distance = 0.2
# level_distance = base_level_distance
# [number_of_levels,level_nodes] = self.__find_number_levels(mesh_origin, origin,flow_direction.__name__)
# brothers_distance = lambda distance,angle: (math.sin((math.pi*angle)/180)*distance)*2
# for _ in xrange(0,number_of_levels-1):
# level_distance *= 2
# level = number_of_levels + 1
# angle = 120
# if flow_direction.__name__ == 'predecessors':
# x = 0
# y = 0
# position = -1
# elif flow_direction.__name__ == 'successors':
# x = level_distance
# y = 0
# position = 0
# neuro_layout[origin] = (x,y)
# while not vertical_list.empty():
# node = vertical_list.get(block = False)
# first = True
# if level_nodes != []:
# if node not in level_nodes[position]:
# level -= 1
# level_nodes.pop(position)
# level_distance -= 0.1*level_distance
# angle -= 0.7*angle
# if flow_direction.__name__ == 'predecessors':
# x_dislocation = -level_distance
# elif flow_direction.__name__ == 'successors':
# x_dislocation = level_distance
# for item in flow_direction(node):
# x = neuro_layout[node][0] + x_dislocation
# if first:
# y = neuro_layout[node][1] - (brothers_distance(level_distance,angle)/2)
# first = False
# else:
# y = neuro_layout[node][1] + (brothers_distance(level_distance,angle)/2)
# neuro_layout[item] = (x,y)
# vertical_list.put_nowait(item)
# return neuro_layout
# def __neuroscience_layout(self):
# mesh_origins = self.__find_origins()
# neuroscience_mesh = self.__mesh
# central_segment = self.__find_central_segment()
# neuro_layout = self.__organize_vertically(mesh_origins,central_segment[0], neuroscience_mesh.predecessors)
# neuro_layout.update(self.__organize_vertically(mesh_origins,central_segment[1], neuroscience_mesh.successors))
# return neuro_layout
# ########################################
# ### Public Functions ###
# ########################################
# def show(self):
# scale_factor = 1.2
# if len(self.__data.nodes()) is not 0:
# # positions for all nodes
# pos = self.__neuroscience_layout()
# # nodes
# nx.draw_networkx_nodes(self.__data,pos,node_size=scale_factor*300)
# # edges
# nx.draw_networkx_edges(self.__data,pos,width=scale_factor*0.5)
# # labels for the nodes
# nx.draw_networkx_labels(self.__data,pos
# ,font_size=scale_factor*13,
# font_family='sans-serif')
# # labels for the edges
# edge_labels = {}
# for edge in self.__data.edges():
# node = self.__data.get_edge_data(*edge)['segment']
# edge_labels[edge] = node.name
# nx.draw_networkx_edge_labels(self.__data,
# pos,edge_labels=edge_labels,\
# font_size=scale_factor*12,
# font_family='sans-serif')
# plt.show()
# else:
# print "The Mesh does not have any elements."
# def save(self,path_location=None):
# raise NotImplementedError
| true |
1b3af9b0f4cb02955cbefc3de3fcdfce16ba6b4b | Python | starzc-galaxy/Dynamic-desktop | /main.py | UTF-8 | 781 | 2.671875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""一个设置视频成动态壁纸的工具
"""
__author__ = "zc"
import sys
from PyQt5.QtWidgets import QApplication
from PyQt5.QtNetwork import QLocalSocket,QLocalServer
from wallpaper import Wallpaper
if __name__ == '__main__':
app = QApplication(sys.argv)
serverName = 'wallpaper'
socket = QLocalSocket()
socket.connectToServer(serverName)
# 如果连接成功,表明server已经存在,当前已有实例在运行
if socket.waitForConnected(500):
app.quit()
else:
localServer = QLocalServer() # 没有实例运行,创建服务器
localServer.listen(serverName)
# 关闭所有窗口,也不关闭应用程序
mes = Wallpaper()
mes.show()
sys.exit(app.exec_())
| true |
0a3bdd12583b530a086ab6d1cb89c7948b8a555a | Python | Kenpatner/Python210_Fall2019 | /students/Ken Patner/lesson02/print_grid.py | UTF-8 | 330 | 3.5 | 4 | [] | no_license | def gridprinter(n):
plus = "+"
minus = "-"
line = "|"
print (plus + minus *n + plus+ minus *n + plus)
for i in (range(n)):
print (line+ " "*n + line + " "*n+line)
print (plus + minus *n + plus+ minus *n + plus)
for i in (range(n)):
print (line+ " "*n + line + " "*n+line)
gridprinter(7) | true |
476aaef8632f785ad26dd14878c608e0f03eafa1 | Python | Sonia-96/Coding4Interviews | /剑指offer/python/1-二维数组中的查找/1-search_in_2D_array.py | UTF-8 | 1,266 | 3.609375 | 4 | [] | no_license |
class Solution:
# Brute Force
def Find1(self, target, array):
n = len(array)
for i in range(n):
if target in array[i]:
return 'true'
return 'false'
# Divide and Conquer
def Find2(self, target, array):
row = len(array)
col = len(array[0])
i = 0
j = col - 1
while 0 <= i < row and 0 <= j < col:
if array[i][j] < target:
i += 1
elif array[i][j] > target:
j -= 1
else:
return 'true'
return 'false'
# Binary Search
def Find3(self, target, array):
row = len(array)
col = len(array[0])
for i in range(row):
low = 0
high = col - 1
while low <= high:
mid = (low + high) // 2
if array[i][mid] < target:
low = mid + 1
elif array[i][mid] > target:
high = mid - 1
else:
return 'true'
return 'false'
while True:
try:
S = Solution()
L = list(eval(input()))
target, array = L[0], L[1]
print(S.Find2(target, array))
except:
break
| true |
31319e5ad7063dd535d237647c0692ff92aa4e17 | Python | ladyy27/comparacion-planes-NLP | /NLPcode_Lady/proyNLP/detectIdioma.py | UTF-8 | 3,300 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
########Import textblob
from textblob import TextBlob
from detect_es import *
from detect_en import *
import codecs
#######
""""
stopwordslist = []
with codecs.open('spanish', encoding='utf-8') as f:
for line in f.readlines():
stop = line
stop2 = stop.replace("\n","")
stopwordslist.append(stop2)
#stop1 = stop.encode('utf8')
for i in stopwordslist:
print i
"""
def stopwordsList(filename):
#Cargar lista de stopwords
stopwordslist = []
stopfile= codecs.open(filename,"r",encoding="UTF-8")
for line in stopfile:
stop = line
stop2 = stop.replace("\n","")
stopwordslist.append(stop2)
return stopwordslist
#reemplazar tildes en frases en español
#cad_es= "Explicá las diferentés fases de procesamiento de un tejido para su observación al microscopio óptico y menciona las técnicas histológicas para identificación de tejidos epiteliales."
cad_es = "anaconda, serpiente"
cad_es = cad_es.replace("á", "a").replace("é", "e").replace("í", "i").replace("ó", "o").replace("ú", "u").replace("Á", "A").replace("É", "E").replace("Í", "I").replace("Ó", "O").replace("Ú", "U")
"""if "á" in cad_es:
cad_es= cad_es.replace("á", "a")
elif "é" in cad_es:
cad_es= cad_es.replace("é", "e")
print "entrando"
elif "í" in cad_es:
cad_es= cad_es.replace("í", "i")
elif "ó" in cad_es:
cad_es= cad_es.replace("ó", "o")
elif "ú" in cad_es:
cad_es= cad_es.replace("ú", "u")
elif "Á" in cad_es:
cad_es= cad_es.replace("Á", "A")
elif "É" in cad_es:
cad_es= cad_es.replace("É", "e")
elif "Í" in cad_es:
cad_es= cad_es.replace("Í", "I")
elif "Ó" in cad_es:
cad_es= cad_es.replace("Ó", "O")
elif "Ú" in cad_es:
cad_es= cad_es.replace("Ú", "U")
"""
print "-------- CADENA EN ESPANIOL SIN TILDES-----------------------"
print cad_es
print "-------- CADENA EN ESPANIOL SIN TILDES-----------------------"
lista = []
#textblob para traduccion de idioma
cad_es_r= TextBlob(cad_es)
lista.append(cad_es_r)
cad_fr= TextBlob("Je m'appelle Mercy. Au revoir")
lista.append(cad_fr)
cad_en= TextBlob("Aerospace and electronic systems")
lista.append(cad_en)
es_stopwords= stopwordsList("spanish")
fr_stopwords= stopwordsList("french")
en_stopwords= stopwordsList("english")
punt_stopwords= stopwordsList("puntuacion")
for i in lista:
if i.detect_language() == 'es':
print "ES:"
a = str(i)
print a
texto = es_parsing(a)
for sen in texto:
for tok in sen:
#lema = tok[5]
if tok[5] not in es_stopwords:
if tok[5] not in punt_stopwords:
print tok[0] + " --- "+ tok[5]
print texto
print "----"
elif i.detect_language() == 'en':
print "EN:"
a = str(i)
print a
texto = en_parsing(a)
print texto
for sen in texto:
for tok in sen:
#lema = tok[5]
if tok[5] not in en_stopwords:
if tok[5] not in punt_stopwords:
print tok[0] + " --- "+ tok[5]
print "----"
elif i.detect_language() == 'fr':
print "FR:"
a = str(i)
print a
texto = parse(a, tokenize=True, tags=True, chunks=True, relations=True, lemmata=True).split()
print texto
for sen in texto:
for tok in sen:
#lema = tok[5]
if tok[5] not in fr_stopwords:
if tok[5] not in punt_stopwords:
print tok[0] + " --- "+ tok[5]
print "----"
| true |
9b86756b326b8e9ef7776a03233a23626c858498 | Python | buzoherbert/6.867-Machine-Learinng-in-transportation-safety-perception | /write_confusions.py | UTF-8 | 1,949 | 2.828125 | 3 | [] | no_license |
import csv
import numpy as np
matrices_acc = []
matrices_f1 = []
matrices_reg = []
matrices_gp = []
with open('all_confusions.txt') as file:
i = 0
rows = []
for line in file:
line = line.strip()
if len(line) < 1:
continue
if line[-1] == ":":
i += 1
continue
line = line.replace("[", "").replace("]", "")
numbers = line.split()
rows.append(numbers)
if len(rows) == 5:
if i == 1:
matrices_acc.append(rows)
elif i == 2:
matrices_f1.append(rows)
elif i == 3:
matrices_reg.append(rows)
elif i == 4:
matrices_gp.append(rows)
rows = []
def transpose(mat):
new_mat = [[row[i] for row in mat] for i in range(len(mat))]
return new_mat
with open('confusions_nnclass_acc.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for mat in matrices_acc:
new_mat = transpose(mat)
for row in new_mat:
writer.writerow(row)
with open('confusions_nnclass_f1.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for mat in matrices_f1:
new_mat = transpose(mat)
for row in new_mat:
writer.writerow(row)
with open('confusions_nnreg.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for mat in matrices_reg:
new_mat = transpose(mat)
for row in new_mat:
writer.writerow(row)
with open('confusions_gp.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for mat in matrices_gp:
new_mat = transpose(mat)
for row in new_mat:
writer.writerow(row)
| true |
adf06cb5e0f52f96dbb3ad75e6db96a8212dbff5 | Python | AhmedAbdElfatah999/AI-Project-Bounded-and-Unbounded-Knapsack- | /knapsack (PSO).py | UTF-8 | 3,839 | 3.703125 | 4 | [] | no_license | #Define Item class
class Item:
#each with a weight and a value
def __init__(self, weight, value):
self.weight = weight
self.value = value
def Bounded_Knapsack(items, capacity):
knapsack = [] #knapsack container
knapsack_weight = [] #array save all item value's kept in knapsack
knapsack_value = [] #array save all item weight's kept in knapsack
#initialization part,put into knapsack items ,constrained by knapsack weight
while len(items) > 0:
item = items.pop(0)
if item.weight +sum(knapsack_weight )<= capacity:
#if adding new item,doesn't make total weight greater than the knapsack weight
#then add it and update total current weight,value
knapsack.append(item)#item complete object
knapsack_weight.append( knapsack[knapsack.index(item)].weight)
knapsack_value.append( knapsack[knapsack.index(item)].value)
else:
#fitness evaluations
#if the other item has value greater than item's value in the knapsack and it's weight
#less than the weight of the item's weight in the knapsack then remove old item from
#knapsack and append the new item with the new value and weight
for t in items:
for kt in knapsack:
if(t.value>kt.value and t.weight<=min(knapsack_weight)and t.weight<kt.weight):
knapsack_value.remove(kt.value)
knapsack_weight.remove(kt.weight)
knapsack.remove(kt)
knapsack.append(t)
knapsack_value.append(t.value)
knapsack_weight.append(t.weight)
items.pop(0)
else:
continue
return knapsack,sum(knapsack_weight),sum(knapsack_value)
#experiment 1
items = [
Item(5,800)
,Item(5,700)
,Item(60,100)
,Item(2,300)
,Item(10,400)
,Item(100,200)
,Item(20,500)
,Item(15,400)
,Item(5,620)
]
#experiment 2
'''
items=[
Item(3,50)
,Item(5,10)
,Item(4,40)
,Item(6,30)
,
]'''
#experiment 3
'''
items=[
Item(20,30)
,Item(10,55)
,Item(35,20)
,Item(45,30)
,Item(30,35)
,Item(20,15)
,Item(15,15)
]
'''
capacity=50
knapsack,weight,value=Bounded_Knapsack(items,capacity)
print("Bounded knapsack")
for item in knapsack:
print("weight:",item.weight,"value:",item.value)
print("Total Allocated Weight:",weight,"\n","Max Value",value)
#--------------------------------------------------------------------------------------
#the same concept put we can put unlimited number of the same item in the knapsack
def Unbounded_Knapsack(items, capacity):
knapsack = []
knapsack_weight = []
knapsack_value = []
while len(items) > 0:
item = items.pop()
if item.weight +sum(knapsack_weight )<= capacity:
knapsack.append(item)
knapsack_weight.append( knapsack[knapsack.index(item)].weight)
knapsack_value.append( knapsack[knapsack.index(item)].value)
else:
for t in items:
for kt in knapsack:
if((t.value>kt.value and t.weight<kt.weight ) or sum(knapsack_weight)<capacity):
knapsack_value.remove(kt.value)
knapsack_weight.remove(kt.weight)
knapsack.remove(kt)
knapsack.append(t)
knapsack_value.append(t.value)
knapsack_weight.append(t.weight)
else:
continue
return knapsack,sum(knapsack_weight),sum(knapsack_value)
#experiment 1
items = [
Item(10,800)
,Item(5,700)
,Item(60,100)
,Item(2,300)
,Item(5,620)
,Item(100,200)
,Item(20,500)
,Item(15,400)
,Item(10,400)
]
#experiment 2
'''
items=[
Item(3,50)
,Item(5,10)
,Item(4,40)
,Item(6,30)
,
]'''
#experiment 3
'''
items=[
Item(20,30)
,Item(10,55)
,Item(35,20)
,Item(45,30)
,Item(30,35)
,Item(20,15)
,Item(15,15)
]
'''
capacity=50
knapsack,i,j=Unbounded_Knapsack(items,capacity)
print("Unbounded knapsack")
for item in knapsack:
print("weight:",item.weight,"value:",item.value)
print("Total Allocated Weight:",i,"\n","Max Value",j)
| true |
9d8930430efd7c3cc4e430c555615b4eca204e3a | Python | MatheusFeijoo/lyriclook | /bot.py | UTF-8 | 2,862 | 2.765625 | 3 | [] | no_license | import telebot
from telebot import types
import time
from search import pega
bot_token = "795674646:AAHY7s8Xetv-XZK8HKtTQGnzdG2_cL6NDII"
bot = telebot.TeleBot(token=bot_token)
user_dict = {}
class User:
def __init__(self, name):
self.name = name
self.music = None
@bot.message_handler(commands=['start'])
def send_welcome(message):
msg = bot.reply_to(message, """\
Hi if you want me to search for a music lyric type /music
For more informations type /help
This bot was developed by @matheusfeijoo
Feel free to send your feedback :)
You can see the code of this bot in https://github.com/MatheusFeijoo/lyriclook
""")
@bot.message_handler(commands=['help'])
def send_help(message):
message = bot.reply_to(message, """\
Hi if you want me to search for a music lyric type /music
------- OMG YOU CAN'T FIND THE LYRICS -------
I know I know, I'm not perfect, yet!
I use a brazilian website to search the lyrics, and sometimes they don't use the same name of the music.
A example is with the artist Passenger, they saved as The Passenger Reino Unido, wich is a bit strange.
Another example is with feat.
You need to write the feat in the music. For exemple: Princess of China feat. Rihanna
---------------------------------------------
This bot was developed by @matheusfeijoo
Feel free to send your feedback :)
You can see the code of this bot in https://github.com/MatheusFeijoo/lyriclook
""")
@bot.message_handler(commands=['music'])
def send_music(message):
msg = bot.reply_to(message, """\
From which artist?
""")
bot.register_next_step_handler(msg, process_name_step)
def process_name_step(message):
try:
chat_id = message.chat.id
name = message.text
user = User(name)
user_dict[chat_id] = user
msg = bot.reply_to(message, 'Which music?')
bot.register_next_step_handler(msg, process_age_step)
except Exception as e:
bot.reply_to(message, 'Something went wrong! Need to start again with /music')
def process_age_step(message):
try:
chat_id = message.chat.id
music = message.text
user = user_dict[chat_id]
user.music = music
chat_id = message.chat.id
lyrics = pega(user.name, user.music)
bot.send_message(chat_id, lyrics)
except Exception as e:
print(e)
bot.reply_to(message, 'Sorry I am not perfect yet. \n You can Try again with /music')
# Enable saving next step handlers to file "./.handlers-saves/step.save".
# Delay=2 means that after any change in next step handlers (e.g. calling register_next_step_handler())
# saving will hapen after delay 2 seconds.
bot.enable_save_next_step_handlers(delay=2)
# Load next_step_handlers from save file (default "./.handlers-saves/step.save")
# WARNING It will work only if enable_save_next_step_handlers was called!
bot.load_next_step_handlers()
bot.polling() | true |
706008a7db63bcadbbcddde09a1612c1ee320045 | Python | DaHuO/Supergraph | /codes/CodeJamCrawler/16_0_2/wojiefu/B.pancage.py | UTF-8 | 415 | 3.65625 | 4 | [] | no_license | def flip_count(s):
prev = s[0]
item = s[0]
n = 0
for item in s[1:]:
if item != prev:
prev = item
n += 1
if item == '-':
n += 1
return n
def main():
t = int(raw_input())
for i in xrange(1, t+1):
cakes = str(raw_input())
print "Case #{}: {}".format(i, flip_count(cakes))
if __name__ == '__main__':
main() | true |
067f1274140a6ff88f1537f1b1cce9b3bb22a6f2 | Python | liquor1014/python_study | /guess_word.py | UTF-8 | 2,193 | 3.015625 | 3 | [] | no_license | import jieba
from wordcloud import WordCloud
from scipy.misc import imread
# 读取文件
with open('D:/Python/Text1/wenjian/threekingdom.txt', 'r', encoding='utf-8') as f:
text = f.read()
# 分词
word_list = jieba.lcut(text)
# print(word_list)
# # 将列表转化成字符串
# words = ' '.join(word_list)
# # 绘制词云
# wc = WordCloud(
# background_color='white',
# height=600,
# width=800,
# font_path='msyh.ttc'
# ).generate(words)
# wc.to_file('三国小说词云.png')
img = imread('china.jpg')
excludes = {"将军", "却说", "丞相", "二人", "不可", "荆州", "不能", "如此", "商议",
"如何", "主公", "军士", "军马", "左右", "次日", "引兵", "大喜", "天下",
"东吴", "于是", "今日", "不敢", "魏兵", "陛下", "都督", "人马", "不知"}
with open('D:/Python/Text1/wenjian/threekingdom.txt', 'r', encoding='utf-8') as f:
txt = f.read()
words = jieba.lcut(text)
counts = {}
for word in words:
if len(word) == 1:
continue # 排除分词为一的结果
else:
counts[word] = counts.get(word,0) +1
# 给字典的键赋值 孔明 :1
counts['孔明'] = counts.get('孔明') + counts.get('孔明曰')
counts['玄德'] = counts.get('玄德') + counts.get('玄德曰')
counts['玄德'] = counts.get('玄德') + counts.get('刘备')
counts['云长'] = counts.get('云长') + counts.get('关公')
# print(counts)
for word in excludes:
del counts[word]
# 转化成列表排序
items = list(counts.items())
# [('正文', 1), ('第一回', 1), ('桃园', 19), ('豪杰', 22)...
items.sort(key=lambda x: x[1], reverse=True)
# print(items)
# [('曹操', 910), ('孔明', 818), ('将军', 739), ('却说', 642), ('玄德', 515),
li = []
for i in range(10):
word, count = items[i]
print(word, count)
for _ in range(count):
li.append(word)
cloud_text =','.join(li)
# print(cloud_text)
# collocations : bool, default=True //是否包括两个词的搭配
wc = WordCloud(background_color='white',width=800, height=600
,font_path='msyh.ttc', collocations=False, mask=img).generate(cloud_text)
wc.to_file('三国演义人物词频统计.png')
| true |
07dd6f4cc33405427524220447fb2cf471c6a6f6 | Python | sydgarnett/PokemonChooser | /Testing/buttontest2.py | UTF-8 | 1,189 | 3.4375 | 3 | [] | no_license | #!/usr/bin/env python3
from tkinter import *
class Application(Frame):
"""a GUI application with 3 buttons"""
def __init__(self,master):
Frame.__init__(self,master)
self.grid()
self.createWidgets()
def createWidgets(self):
self.instruction= Label(self,text= "enter the password")
self.instruction.grid(row=0,column=0,columnspan=2,sticky=W)
self.password=Entry(self)
self.password.grid(row=1,column=1,sticky=W)
self.submitButton=Button(self,text="Submit",command=self.reveal)
self.submitButton.grid(row=2,column=0,sticky=W)
self.text=Text(self,width=35,height=5,wrap=WORD)
self.text.grid(row=3,column=0,columnspan=2,sticky=W)
def reveal(self):
"""display message based on the password typed in"""
content=self.password.get()
if content=="password":
message="You have access to something special."
else:
message="Access Denied."
self.text.delete(0.0,END)
self.text.insert(0.0,message)
root=Tk()
root.title("Password")
root.geometry("250x150")
app=Application(root)
root.mainloop()
| true |
930d7eb0a8e27f32f6bffacb12af019ba74eb398 | Python | Int-TRUE/2021knupython | /3. recursion+condition/while_recursion.py | UTF-8 | 502 | 3.984375 | 4 | [] | no_license | # for와 while의 차이
# for문은 정해진 횟수만큼 돌린다
# while문은 정해진 목표까지 돌린다 -> 조건이 참인 경우
# while문 기초
it = 0
while it <5:
it+=1
print(it)
# while문 구조
# while 조건:
# 반복할 명령어1
# 반복할 명령어2
# while 무한루프
# overflow
# it=0
# while True:
# it+=1
# print(it)
# Ctrl + c로 탈출
# while 무한루프 + break
it = 0
while True:
it+=1
print(it)
if(it>500):
break | true |
aa0529a05b43b3152d392e79040ac84a8cbeecf7 | Python | kuzminArtur/foodgram-project | /recipes/templatetags/user_filters.py | UTF-8 | 576 | 2.671875 | 3 | [] | no_license | from django import template
register = template.Library()
@register.filter
def addclass(field, css):
"""Add CSS class."""
return field.as_widget(attrs={"class": css})
@register.filter
def get_num_ending(num, ending):
"""Make correct declination."""
ending = ending.split(',')
remainder = num % 100
if 11 <= remainder <= 19:
return f'{num} {ending[2]}'
remainder = remainder % 10
if remainder == 1:
return f'{num} {ending[0]}'
if 1 < remainder <= 4:
return f'{num} {ending[1]}'
return f'{num} {ending[2]}'
| true |
0d6b62be336e6c47a650512872405d7d4366f1ff | Python | sy1wi4/ASD-2020 | /sorting/radix_sort.py | UTF-8 | 1,207 | 3.671875 | 4 | [] | no_license | # sortujemy kolejno "kolumnami" od najmniej znaczacych cyfr, czyli zaczynajac od ostatniej pozycji az do pierszej
# kazda kolumne sortujemy stabilnym counting sortem
from random import randint
def countingSort(arr,pos):
# modyfikacja - sortujemy wzgledem danej cyfry (pos ma wartosci 1, 10 ,100, etc.(cyfra jednosci, dziesiatek...))
n = len(arr)
count = 10*[0] # cyfry od 0 do 9
output = n*[0]
for i in range(n):
idx = arr[i]//pos # "obcinamy" cyfry z konca za ta ktora nas interesuje - idx to cyfry przed wybrana i ona sama
count[idx%10] += 1 # idx % 10 - dostane ostatnia cyfre z pozostalych, czyli ta, ktora mnie interesuje - pos
for i in range(1,10):
count[i] += count[i-1] # cumulative sum
for i in range(n-1,-1,-1):
idx = arr[i] // pos
output[count[idx%10]-1] = arr[i]
count[idx%10] -= 1
# zamiast zwracac output, to przepisze do wejsciowej tablicy
for i in range(n):
arr[i] = output[i]
def radixSort(arr):
Max = max(arr)
pos = 1
while Max :
countingSort(arr,pos)
Max = Max//pos # najpierw dziel, potem zwiekszaj pos !!!!!!!!!
pos *= 10
| true |
d3faca8f594f5932e94cfaf32eb96388c930d4b7 | Python | benjaminhuanghuang/py-selenium-job-apply | /login.py | UTF-8 | 1,529 | 2.640625 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, ElementClickInterceptedException, NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
import time
import re
import json
class EasyApplyLinkedin:
def __init__(self, data):
"""Parameter initialization"""
self.email = data['email']
self.password = data['password']
self.keywords = data['keywords']
self.location = data['location']
self.driver = webdriver.Chrome(data['driver_path'])
def login_linkedin(self):
"""This function logs into your personal LinkedIn profile"""
# go to the LinkedIn login url
self.driver.get("https://www.linkedin.com/login")
# introduce email and password and hit enter
login_email = self.driver.find_element_by_name('session_key')
login_email.clear()
login_email.send_keys(self.email)
login_pass = self.driver.find_element_by_name('session_password')
login_pass.clear()
login_pass.send_keys(self.password)
login_pass.send_keys(Keys.RETURN)
if __name__ == '__main__':
with open('config.json') as config_file:
data = json.load(config_file)
bot = EasyApplyLinkedin(data)
bot.login_linkedin()
| true |
8938a8415ba96730e576c530dcf8fe8faded0276 | Python | bvsbrk/Algos | /src/CodeChef/snackdown_1a/cardmgk.py | UTF-8 | 777 | 2.671875 | 3 | [] | no_license | from bisect import bisect_right as bs
from collections import Counter
if __name__ == '__main__':
for _ in range(int(input().strip())):
n = int(input().strip())
arr = [int(__) for __ in input().strip().split()]
srtd = sorted(arr)
co = Counter(arr)
if arr == srtd:
print("YES")
else:
idx = bs(srtd, arr[0])
con_count = 0
ch = arr[0]
i = 0
while arr[i] == ch:
con_count += 1
i += 1
# print(srtd[idx:] + srtd[:idx])
idx -= con_count
# print(srtd[idx:] + srtd[:idx])
if arr == srtd[idx:] + srtd[:idx]:
print("YES")
else:
print("NO")
| true |
6730578fa7ddc48e1614f7cdece0495b32fe0384 | Python | danielct/Honours | /Numerics/Pumps.py | UTF-8 | 1,318 | 3.4375 | 3 | [] | no_license | import numpy as np
class SpatialFunction(object):
"""
Not to be used.
Parent class for spatial functions such as the pump and potential.
Spatial functions are required to provide a function that corresponds to
the spatial function. Eg, for a pump, the function would take an x grid and
y grid and output the pump power density function.
Should also provide a characteristic size so that the grid extent may be
scaled to it.
"""
# TODO: Write the above more clearly
def __init__(self):
raise NotImplementedError()
class gaussianPump(SpatialFunction):
"""
A Gaussian pump
"""
# TODO: Allow for no scaling.
# TODO: Allow for ellispoidal spot shape
def __init__(self, sigma, P0, Pth):
"""
Make a Gaussian pump with maximum power P0 * Pth, where Pth is the
threshold value of P in the normalised GPE.
Parameters:
sigma: value of sigma for the Gaussian. Should be in SI units
P0: Maximum pump power. In units of Pth.
Pth: The value of the threshold power in the scaled GPE.
"""
self.charSize = 2.0 * np.sqrt(2 * np.ln(2)) * sigma
self.function = lambda x, y: (P0 * Pth *
np.exp(-0.5 * (x**2 + y**2) / sigma**2))
| true |
2d2510756b24a90f2a7fb145f37c5a5110b32ff4 | Python | gregorgabrovsek/ProjectEuler | /Problem058.py | UTF-8 | 841 | 3.640625 | 4 | [] | no_license | # Setting the diagonal direction functions:
u_r = lambda x: 4 * (x ** 2) - 10 * x + 7 # OEIS: A054554
u_l = lambda x: 4 * ((x - 1) ** 2) + 1 # OEIS: A053755
d_l = lambda x: 4 * (x ** 2) - 6 * x + 3 # OEIS: A054569
d_r = lambda x: (2 * (x - 1) + 1) ** 2 # OEIS: A016754
is_prime = lambda y: y % 2 == 1 and len(list(filter(lambda x: y % x == 0, range(3, int(y ** 0.5) + 1, 2)))) == 0
def get_more_diagonals_and_check_if_they_are_prime(n: int) -> int:
# we needn't check the down-right diagonal - it's always a square number
return sum([is_prime(diagonal(n)) for diagonal in [u_r, u_l, d_l]])
counter = (0, 1)
current = 2
while True:
counter = (counter[0] + get_more_diagonals_and_check_if_they_are_prime(current), counter[1] + 4)
if counter[0] / counter[1] < 0.1:
break
current += 1
print(current * 2 - 1)
| true |
e75290144f8e5da84c1698d3eb8e08a0922b669a | Python | San-Holo/Adversarial-generation | /utils/build_network_utils_2D.py | UTF-8 | 7,144 | 3.0625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import torch
import torch.nn as nn
def conv_block(in_filter, output_filter, nb_conv, kernel_size, stride, padding, final_nbchannels, normalize, wasserstein, layer_norm, spectral_norm, dropout, activation_function=nn.LeakyReLU(0.2, inplace=True)):
"""To simplify the creation of convolutional sequences for discriminator
Parameters
----------
in_filter : int
Number of filters that we want in entry
output_filter : int
Number of filters that we want in output
nb_conv : int
Number of convolution layers
kernel_size, stride, padding : int
We assume that the kernel is a square
final_nbchannels : int
Where to stop the classic pattern Conv, Norm, Act
activation_function : nn Function
Activation function after each convolution
normalize : boolean
Add normalization or not
wasserstein : boolean
If True, we must remove sigmoid at the end
layer_norm : boolean
If True, we use LayerNorm instead of batch norm -> As it's done in WGAN-GP
spectral_norm : boolean
If true, we use SpectralNorm instead of others -> Seems to be the real state of the art
dropout : (Boolean, float) tuple
Whether we use dropout or not, and its corresponding probability. It was used in wassertein-GAN-GP-CT paper.
Returns
---------
sequential : Sequential torch Object
The convolutional sequence that we were seeking
"""
nbchannel = in_filter
nbfilter = output_filter
sequential = []
for i in range(nb_conv):
# Had to change the code here, instead of using my own implementation
if layer_norm and spectral_norm: # No one used both of them at the same time -> Logical
raise ValueError
else:
if spectral_norm:
tmp_conv = nn.utils.spectral_norm(nn.Conv2d(nbchannel, nbfilter, kernel_size, stride, padding, bias=False))
else:
tmp_conv = nn.Conv2d(nbchannel, nbfilter, kernel_size, stride, padding, bias=False)
sequential.append(tmp_conv)
nbchannel = nbfilter
if nbchannel != final_nbchannels:
if normalize:
if layer_norm:
sequential.append(nn.GroupNorm(1, nbfilter))
else:
sequential.append(nn.BatchNorm2d(nbfilter))
sequential.append(activation_function)
if dropout[0]:
sequential.append(nn.Dropout(p=dropout[1]))
else:
if not wasserstein:
sequential.append(nn.Sigmoid())
return sequential
def deconv_block(latent_vector_size, output_channels, nb_deconv, kernel_size, stride, padding, final_nbchannels, normalize, layer_norm, spectral_norm, batch_norm_proj, activation_function=nn.ReLU()):
"""To simplify the creation of fractionned strided convolutional sequences for the generator
Parameters
----------
latent_vector_size : int
Dimensionnality of the sampled vector
output_channels : int
Number of filters ~ channels that we want in output
nb_deconv : int
Number of deconvolution layers (Deconvolution is not a correct term there though)
kernel_size, stride, padding : int
We assume that the kernel is a square
final_nbchannels : int
Where to stop the classic pattern Conv, Norm, Act
activation_function : nn Function
Activation function after each fractionned strided convolution
normalize : boolean
Add normalization or not
layer_norm : boolean
If True, we must use LayerNorm instead of Batch norm -> As it's done in WGAN-GP
spectral_norm : boolean
If true, we use SpectralNorm instead of others -> Seems to be the real state of the art
batch_norm_proj : boolean
If True, add Batch norm right after spectral norm layer -> As described in Self-Attention GAN.
Actually, it's quite different from SAGAN but we take it as a source of inspiration
Returns
---------
sequential : Sequential torch Object
The convolutional sequence that we were seeking
"""
nbchannel = latent_vector_size
nbfilter = output_channels
sequential = []
for i in range(nb_deconv):
# Had to change the code here, instead of using my own implementation
if layer_norm and spectral_norm: # No one ever used both of them at the same time -> Logical
raise ValueError
else:
if spectral_norm:
tmp_deconv = nn.utils.spectral_norm(nn.ConvTranspose2d(nbchannel, nbfilter, kernel_size, stride, padding, bias=False))
else:
tmp_deconv = nn.ConvTranspose2d(nbchannel, nbfilter, kernel_size, stride, padding, bias=False)
sequential.append(tmp_deconv)
nbchannel = nbfilter
if nbchannel != final_nbchannels:
if normalize:
if layer_norm:
sequential.append(nn.GroupNorm(1, nbfilter))
elif spectral_norm and batch_norm_proj:
sequential.append(nn.BatchNorm2d(nbfilter))
else:
sequential.append(nn.BatchNorm2d(nbfilter))
sequential.append(activation_function)
else:
sequential.append(nn.Tanh())
return sequential
def network_from_shape(net_structure, activation=nn.ReLU()):
"""To simplify the creation of fully connected layers sequences
Parameters
----------
net structure: int list
Describe each layer size -> one entry of the list is a layer conv_size
activation_function : nn Function
Activation function after each layer of the net
Returns
---------
temp : Torch object list
The fully connected sequence with the last activation function "tanh"
"""
temp = []
for prev, next in zip(net_structure[:-1], net_structure[1:]):
temp.append(nn.Linear(prev, next))
temp.append(activation)
temp = temp[:-1] # Remove last activation
return temp
def weights_init(module, mean=0.0, std=0.02):
"""To init weights in a layer according to the vast majority of papers
Parameters
----------
module : torch nn module
Each module = A layer usually
mean : float
Mean of the normal distribution used to init a layer
std : float
Standard deviation of the normal distribution used to init a layer
"""
if isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d):
nn.init.normal_(module.weight.data, mean, std)
elif isinstance(module, nn.BatchNorm2d):
nn.init.normal_(module.weight.data, 1.0, std)
nn.init.constant_(module.bias.data, 0)
| true |
5ec286b6bc07d645aa2789d0976e5b81083b06d3 | Python | samar2326/Python-Programs | /copy.py | UTF-8 | 542 | 3.84375 | 4 | [] | no_license |
""" Wap to copy from 1 file to another"""
from shutil import copyfile
print("Enter x for exit")
source_file = input("Enter source file name:")
if(source_file == "x"):
exit()
else:
destination_file = input("Enter destination file name:")
copyfile(source_file,destination_file)
print("File copied successfully...")
check = input("Want to display the content(y/n):")
if(check == "n"):
exit()
else:
temp = open(destination_file,"r")
print(temp.read())
temp.close()
| true |
2b15a2385ba8f9eaea875b346596a02f9e0be4f7 | Python | AndreyPankov89/python-glo | /lesson11/task1.py | UTF-8 | 291 | 3.984375 | 4 | [] | no_license | n = int(input('Введите количество фраз '))
phrases = []
for i in range(n):
phrases.append(input())
search_phrase = input('Введите фразу для поиска ')
for phrase in phrases:
if(search_phrase.lower() in phrase.lower()):
print(phrase) | true |
5e694d37864ca1a89e1cf35e30807945e6fc5faf | Python | michaelSmithUCC/bored_games | /db_functionality/setup_db.py | UTF-8 | 493 | 2.609375 | 3 | [] | no_license | def words_connect():
import pymysql as db
failed=0
server="----"
database="----"
username="----"
password="----"
try:
connection = db.connect(server, username, password, database)
if connection:
cursor =connection.cursor(db.cursors.DictCursor)
if cursor:
return cursor
return failed
except:
return failed
def words_close(connection, cursor):
connection.close()
cursor.close()
| true |
c9fbe4cdacfb4d1f46a55e827ae9776be85194ef | Python | witness97/computationalphysics_N2015301020062 | /6 in one.py | UTF-8 | 441 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 1 14:38:00 2018
@author: wangshiru
"""
from pylab import *
from random import choice
numwalk = 6
length = 200
data = zeros((numwalk, length), int)
for n in range(numwalk):
for x in range(1, length):
step = choice([-1, 1])
data[n,x] = data[n,x-1] + step
plot(range(length), data[n,:])
xlabel('t')
axis ((0,200, -20, 20))
savefig('Random_Walk_example.svg')
show()
| true |
5182bf4b4bad3f873ece298b59c193ad51980540 | Python | pawandeepthind/dev-multivm | /server/library/download.py | UTF-8 | 1,353 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Author: Pawandeep Singh - @rohit01 <pawandeep.singh@expicient.com>
#
# Ansible module to download file from ftp.
#
#---- Documentation Start ----------------------------------------------------#
DOCUMENTATION = '''
---
version_added: "2.0.1"
module: download
short_description: download
description:
- This module downloads a file from ftp to a local
options:
url:
description:
Ftp url to download the file
required: true
dest:
description:
Path to the destination.
required: true
requirements: []
author: Pawandeep Singh
'''
EXAMPLES = '''
- name: "Download the file"
download: url="Url to download" dest_path="/path/to/destination"
'''
import urllib
#---- Logic Start ------------------------------------------------------------#
def main():
# Note: 'AnsibleModule' is an Ansible utility imported below
module = AnsibleModule(
argument_spec=dict(
url=dict(required=True),
dest=dict(required=True),
),
supports_check_mode=True
)
url = module.params['url']
dest = module.params['dest']
urllib.urlretrieve (url, dest)
module.exit_json(text="File (%s) successfully downloaded at (%s)" % (url, dest))
#---- Import Ansible Utilities (Ansible Framework) ---------------------------#
from ansible.module_utils.basic import *
main() | true |
fa412be4974180b52b753b7ef87854eb92068c7f | Python | bcwan/PythonRepo | /Horse/Inheritance/Chef.py | UTF-8 | 199 | 2.625 | 3 | [] | no_license | class Chef:
def make_chicken(self):
print("Cook the chicken!")
def make_salad(self):
print("Make the salad.")
def make_special_dish(self):
print("Make a special dish tonight!") | true |
f5a52d9c640519e18a85c830a2a2ed4cc4a06f5a | Python | astrofrog/old-astropy-versions | /v0.4.2/api/astropy-convolution-Box1DKernel-1.py | UTF-8 | 221 | 2.765625 | 3 | [
"BSD-3-Clause"
] | permissive | import matplotlib.pyplot as plt
from astropy.convolution import Box1DKernel
box_1D_kernel = Box1DKernel(9)
plt.plot(box_1D_kernel, drawstyle='steps')
plt.xlim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show() | true |
1c6d6af25fe9aac936e8d91371d4ff8f11b4ff51 | Python | sreejithev/thinkpythonsolutions | /c5/condition.py | UTF-8 | 157 | 3.640625 | 4 | [] | no_license | x = input(int)
if x > 0:
print ' x is positive'
if x < 0:
pass # need to handle negative values!
if x%2 == 0:
print 'x is even'
else:
print 'x is odd'
| true |
595be2e074283aa43763c3fd9188e480ba0c5de1 | Python | abdallawi/PythonBasic | /Exercices/ExaminationSchedule.py | UTF-8 | 209 | 3.46875 | 3 | [] | no_license |
exam_st_date = (12, 10, 2019)
print(f'The examination will start from :', exam_st_date[0], '/', exam_st_date[1], '/', exam_st_date[2])
print("The examination will start from : %i / %i / %i" % exam_st_date)
| true |
6e5ceb89e3a6cee5802469f2a70c88761b8f1fdf | Python | brickgao/leetcode | /src/algorithms/python/Surrounded_Regions.py | UTF-8 | 2,015 | 3.359375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from Queue import Queue
class Solution:
def bfs(self, x, y):
q = Queue()
q.put((x, y))
self.vis[x][y] = True
self.mat[x][y] = True
while not q.empty():
top_x, top_y = q.get()
for mv in self.mvs:
nx, ny = top_x + mv[0], top_y + mv[1]
if nx < 0 or nx >= self.m:
continue
if ny < 0 or ny >= self.n:
continue
if not self.vis[nx][ny] and self.board[nx][ny] == 'O':
q.put((nx, ny))
self.vis[nx][ny] = True
self.mat[nx][ny] = True
# @param {character[][]} board
# @return {void} Do not return anything, modify board in-place instead.
def solve(self, board):
if board == []:
return
self.mvs = [(0, 1), (0, -1), (1, 0), (-1, 0)]
self.board = board
self.m, self.n = len(board), len(board[0])
m, n = self.m, self.n
self.mat = [[False for i in range(n)] for j in range(m)]
self.vis = [[False for i in range(n)] for j in range(m)]
for y in range(self.n):
if self.board[0][y] == 'O' and not self.vis[0][y]:
self.bfs(0, y)
if self.board[m - 1][y] == 'O' and not self.vis[m - 1][y]:
self.bfs(m - 1, y)
for x in range(m):
if self.board[x][0] == 'O' and not self.vis[x][0]:
self.bfs(x, 0)
if self.board[x][n - 1] == 'O' and not self.vis[x][n - 1]:
self.bfs(x, n - 1)
for x in range(m):
for y in range(n):
if not self.mat[x][y]:
board[x][y] = 'X'
return board
if __name__ == "__main__":
solution = Solution()
print solution.solve(
[
['X', 'X', 'X', 'X'],
['X', 'O', 'O', 'X'],
['X', 'X', 'O', 'X'],
['X', 'O', 'X', 'X']
]
)
| true |
d117d2a686eee4d9cfbfac9004b4b28498bb8dca | Python | yestherlee/samplefiles | /Homework 3.py | UTF-8 | 3,149 | 3.71875 | 4 | [] | no_license | #Homework 3 by Ye Eun (Esther) Lee
#Establish Monopoly property group data
psize = {'purple':2, 'light blue':3,'maroon':3, 'orange':3, 'red':3, 'yellow':3, 'green':3, 'dark blue':2}
pcost = {'purple':50, 'light blue':50,'maroon':100, 'orange':100, 'red':150, 'yellow':150, 'green':200, 'dark blue':200}
#Input color block user is building on
color = input('Which color block will you be building on? ' )
#Prompt again if invalid entry for color
while color not in ('purple', 'light blue', 'maroon', 'orange', 'red', 'yellow', 'green', 'dark blue'):
if color == 'blue':
color = input('Light blue or dark blue? ')
else:
print('Color not valid. Please try again.')
color = input('Which color block will you be building on? ' )
#Input money user has to spend
money = input('How much money do you have to spend? ' )
money = int(money)
#Retrieve cost of houses on property from dictionary
cost = pcost[color]
#Calculate number of houses that can be built
houses = money // cost
#Retrieve size of property (number of properties in group) from dictionary
size = psize[color]
#Calculate evenly distributed number of houses on each property, and remainder to be distributed
num_equal_houses = houses//size
remainder = houses%size
#Identify how many properties will receive extra houses
extra_houses = remainder
#Identify how many properties will receive equal distribution of houses
equal_houses = size - extra_houses
#Determine how many houses those properties with more will receive
num_extra_houses = num_equal_houses + 1
#Output result
if houses == 0:
print('You cannot afford even one house.')
elif num_equal_houses >= 5 and extra_houses == 0:
num_equal_houses = 'a hotel'
print('There are',size, color,'properties and each house costs', cost)
print('You can build',houses,'houses --', equal_houses, 'will have', num_equal_houses)
elif num_equal_houses >= 5 and num_extra_houses >= 5:
print('There are',size, color,'properties and each house costs', cost)
print('You can build',houses,'houses --', equal_houses+extra_houses, 'will have a hotel')
elif num_equal_houses >= 5:
num_equal_houses = 'a hotel'
print('There are',size, color,'properties and each house costs', cost)
print('You can build',houses,'houses --', equal_houses, 'will have', num_equal_houses, 'and', extra_houses, 'will have', num_extra_houses)
elif num_extra_houses >= 5:
num_extra_houses = 'a hotel'
print('There are',size, color,'properties and each house costs', cost)
print('You can build',houses,'houses --', equal_houses, 'will have', num_equal_houses, 'and', extra_houses, 'will have', num_extra_houses)
elif extra_houses == 0:
print('There are',size, color,'properties and each house costs', cost)
print('You can build',houses,'houses --', equal_houses, 'will have', num_equal_houses)
else:
print('There are',size, color,'properties and each house costs', cost)
print('You can build',houses,'houses --', equal_houses, 'will have', num_equal_houses, 'and', extra_houses, 'will have', num_extra_houses)
| true |
8060df2db16a83e42a407e355154a6119928cdee | Python | hrtoomer/BIOL5153 | /assn07.py | UTF-8 | 1,981 | 3.34375 | 3 | [] | no_license | #! /usr/bin/env python3
# assn07
from Bio import SeqIO
import argparse
fasta_file='watermelon.fsa'
gff_file ='watermelon.gff'
def get_args():
# create an argument parser object
parser = argparse.ArgumentParser(description = 'This script returns the Fibonacci number at a specified position in the Fibonacci sequence')
# add positional argument for the input position in the Fibonacci sequence
parser.add_argument(fasta_file, help="The FASTA file you want to input", type=str)
parser.add_argument(gff_file, help="The GFF file you want to input", type=str)
# parse the arguments
return parser.parse_args()
def read_fasta(fasta_file):
# read in the FASTA file
genome = SeqIO.read(fasta_file, 'fasta') # genome.seq is the pure genome sequence
# print (genome.seq)
return(genome)
def read_gff(gff_file):
gff=open(gff_file)
return(gff)
def calculation(gff, genome):
for line in gff:
line = line.rstrip('\n')
fields = line.split('\t') #list the categories of the gff file by the splitting where the tab character is
start = int(fields[3]) # start
end = int(fields[4]) # stop
exon=genome.seq[start:end] #create an individual substring
# calculate GC content from substring
lengthexon=len(exon)
g_count=exon.count('G')
c_count=exon.count('C')
gc_content = g_count + c_count / lengthexon # calculate GC content for each substring
return("GC content is " + str(gc_content))
gff.close()
# reverse complement for each '-' strand
strand=(str(fields[6]))
if strand=='-':
print(exon.reverse_complement())
def main():
fasta=read_fasta(fasta_file)
gff=read_gff(gff_file)
calculation()
# get arguments before calling main
args = get_args()
# execute by calling main
if __name__=="__main__":
main()
| true |
824af8fc65ff787d3da4a303c0f1e0745dd00947 | Python | kumgleb/SemanticSegmentation | /utils/train_utils.py | UTF-8 | 995 | 2.90625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
def train_monitor(losses_train, losses_train_mean, losses_val):
fig, ax = plt.subplots(1, 2, figsize=(16, 8))
iters = np.arange(len(losses_train))
n_vals = len(losses_val)
step = int(len(losses_train) / n_vals)
val_steps = np.linspace(step, step*n_vals, n_vals)
for i in range(2):
ax[i].plot(iters, losses_train, linewidth=1.5, alpha=0.6,
c='tab:blue', label='train loss')
ax[i].plot(iters, losses_train_mean, linewidth=2, alpha=1,
c='tab:blue', label='avg10 train loss')
ax[i].plot(val_steps, losses_val, linewidth=2, alpha=1,
c='tab:red', label='val loss')
ax[i].set_ylabel('CrossEntropy loss')
ax[i].set_xlabel('Iteration')
ax[i].legend()
ax[i].grid()
if i == 1:
ax[i].set_yscale('log')
plt.show()
| true |
a3aae4b00c8b4aa2e64b65b6ab9d1c78255182bc | Python | MarceloBCS/Exercicios_Curso_em_video | /aula_020.py | UTF-8 | 752 | 4.125 | 4 | [] | no_license | def mensagem(txt):
print('-='*10)
print(txt)
print('-='*10)
def soma(a, b):
print(a+b)
def som_pac(*tam):
s = 0
for c in tam:
s += c
print(f'somando os {tam} é {s}')
def contador(*num):
for c in num:
print(num, end='')
print(c, end=' | ')
print()
def dobravalor(*lis):
alista = []
for k, v in enumerate(lis):
#print(f'lis{k} = {2*v}', end=' | ')
alista.append(2*lis[k])
#print()
print(alista)
mensagem('Python is not too easy to learn, but I will')
soma(4, 5)
soma(b=3, a=9)
contador(4, 4, 10, 1, 0)
contador(1, 3)
print()
valores = [7, 2, 5, 0, 4]
dobravalor(valores)
dobravalor(7, 2, 5, 0, 4)
print()
som_pac(4, 7, 1, 1, 9, 2, 6)
som_pac(1, 2, 5)
| true |
62e2b74a80e24805b9db3d6eaa00886dfee6a998 | Python | Clem28L/test | /Chapitre11/SearchString.py | UTF-8 | 309 | 3.734375 | 4 | [] | no_license | SearchMe = "La pomme est rouge et la luzerne est verte !"
print(SearchMe.find("est"))
print(SearchMe.rfind("est"))
print(SearchMe.count("est"))
print(SearchMe.startswith("La"))
print(SearchMe.endswith("La"))
print(SearchMe.replace("pomme", "voiture")
.replace("luzerne", "camionnette"))
| true |
824f66b3ce854d993ac3dd2a0ecd3091a8b13bcc | Python | manosai/tweepy | /assignment_4/majority_vote_template.py | UTF-8 | 2,996 | 3.46875 | 3 | [
"MIT"
] | permissive | #!/bin/python
import csv
import operator
from label_map import mturk_labels
class MajorityVoteGrader():
"""
Implements majority vote quality estimation.
estimate_data_labels returns the most popular label for each tweet
estimate_worker_qualities returns, for each worker, the proportion of labels which matched the majority label
"""
#Initialize grader with path to graded HITs csv
def __init__(self, csv_path):
self.csv_path = csv_path
#Compile dictionary {tweet : {number of votes for each label}}
def get_tweet_data_from_csv(self):
tweet_data = dict()
for hit in csv.DictReader(open(self.csv_path, 'rU')):
for i in range(0,10):
tweetId = '%s-%d'%(hit['HITId'],i)
if tweetId not in tweet_data : tweet_data[tweetId] = {'positive':0,'negative':0,'neutral':0}
label = mturk_labels[hit['Answer.item%d'%i]]
if not(label == 'NA') : tweet_data[tweetId][label] += 1
return tweet_data
#Compile dictionary of {worker : {tweet : worker's label}}
def get_worker_data_from_csv(self):
worker_data = dict()
for hit in csv.DictReader(open(self.csv_path, 'rU')):
worker = hit['WorkerId']
if worker not in worker_data : worker_data[worker] = {}
for i in range(0,10):
tweetId = '%s-%d'%(hit['HITId'],i)
label = mturk_labels[hit['Answer.item%d'%i]]
if not(label == 'NA') : worker_data[worker][tweetId] = label
return worker_data
#Return a dictionary of {tweet : most popular label}
def estimate_data_labels(self):
tweet_data = self.get_tweet_data_from_csv()
label_estimates = list()
for tweet in tweet_data:
#get the most popular label for this tweet and store its value in the variable 'estimate'
#See the method get_tweet_data_from_csv() to see what the variable 'tweet' contains
positive_count = tweet_data[tweet]['positive']
negative_count = tweet_data[tweet]['negative']
neutral_count = tweet_data[tweet]['neutral']
counts = [positive_count, negative_count, neutral_count]
estimate = max(counts)
label_estimates.append({'objectName':tweet, 'categoryName':estimate})
return {w['objectName'] : w['categoryName'] for w in label_estimates}
#Return a dictionary of {worker : average worker accuracy}
def estimate_worker_qualities(self):
majority_labels = self.estimate_data_labels()
worker_data = self.get_worker_data_from_csv()
worker_estimates = list()
for worker in worker_data:
#TODO compute the proportion of this worker's labels which match the majority label
#and store it in the variable 'accuracy'
#You should look at the methods estimate_data_labels() and get_worker_data_from_csv()
worker_tweets = worker_data[worker]
correct = 0
# keep track of all the correctly labeled tweets
for tweet in worker_tweets:
if worker_tweets[tweet] == majority_labels[tweet] : correct += 1
accuracy = correct / len(worker_tweets)
worker_estimates.append({'workerName':worker, 'value':accuracy})
return {w['workerName'] : w['value'] for w in worker_estimates}
| true |
f0ed8ecfbc426a6ef738430e07c438a4e4b75e4b | Python | Mertkmrc/video-feedback-system | /windowing.py | UTF-8 | 2,756 | 2.625 | 3 | [] | no_license | from sklearn.metrics.pairwise import cosine_similarity
from transformers import AutoTokenizer, AutoModel
import torch
def wndw(input, win_len):
out = []
idx = []
step_size = int(win_len / 2)
le = len(input)
base_idx = 0
end_idx = win_len
# print(le)
while (end_idx < le):
# print(base_idx, end_idx)
tmp = input[base_idx:end_idx]
idx.append(base_idx)
out.append(" ".join(tmp))
base_idx += step_size
end_idx += step_size
# print(out)
return out, idx
#Cosine similarty funtion via pytorch
def cos_sim_calc(input_sequence, chap_num, vid_num, win_len):
path = "by_video/ch{}_{}.text"
list_of_lists = []
try:
with open(path.format(chap_num, vid_num)) as f:
for line in f:
list_of_lists.append(line)
except:
return " Video not found", 0,0
with open(path.format(chap_num, vid_num)) as f:
for line in f:
list_of_lists.append(line)
list_of_lists, idx = wndw(list_of_lists, win_len)
list_of_lists.insert(0, input_sequence)
tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens')
model = AutoModel.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens')
sentences = list_of_lists
tokens = {'input_ids': [], 'attention_mask': []}
for sentence in sentences:
# encode each sentence and append to dictionary
new_tokens = tokenizer.encode_plus(sentence, max_length=128,
truncation=True, padding='max_length',
return_tensors='pt')
tokens['input_ids'].append(new_tokens['input_ids'][0])
tokens['attention_mask'].append(new_tokens['attention_mask'][0])
#Collecting the tensors in one tensor
tokens['input_ids'] = torch.stack(tokens['input_ids'])
tokens['attention_mask'] = torch.stack(tokens['attention_mask'])
outputs = model(**tokens)
embeddings = outputs.last_hidden_state
attention_mask = tokens['attention_mask']
mask = attention_mask.unsqueeze(-1).expand(embeddings.size()).float()
masked_embeddings = embeddings * mask
summed = torch.sum(masked_embeddings, 1)
summed_mask = torch.clamp(mask.sum(1), min=1e-9)
mean_pooled = summed / summed_mask
mean_pooled = mean_pooled.detach().numpy()
cos_dis = cosine_similarity([mean_pooled[0]], mean_pooled[1:])
matched_text = sentences[cos_dis.argmax() + 1]
start_id_match = idx[cos_dis.argmax()] / 3 - win_len + 1
similarity_val = cos_dis.max()
return matched_text ,similarity_val , start_id_match
| true |
b78eef7bd2443a5120f129c112462b64aa4d1f6c | Python | Hyper10n/LearningPython | /find_from_txt_file.py | UTF-8 | 323 | 2.90625 | 3 | [
"MIT"
] | permissive | def find_from_txt_file(source):
email_list = []
try:
fhand = open(source)
except:
print('Could not open file')
for line in fhand:
for word in line.split():
if word == 'From':
email_list.append(line.split()[1])
fhand.close()
return email_list
| true |
0aba617ab855c93d848836723091caa4289d50d0 | Python | MichalMaM/ella | /ella/core/templatetags/authors.py | UTF-8 | 2,517 | 3.0625 | 3 | [
"BSD-3-Clause"
] | permissive | from django import template
register = template.Library()
class AuthorListingNode(template.Node):
def __init__(self, obj_var, count, var_name, omit_var=None):
self.obj_var = obj_var
self.count = int(count)
self.var_name = var_name
self.omit_var = omit_var
def render(self, context):
try:
author = template.Variable(self.obj_var).resolve(context)
except template.VariableDoesNotExist:
return ''
if not author:
return ''
if self.omit_var is not None:
try:
omit = template.Variable(self.omit_var).resolve(context)
except template.VariableDoesNotExist:
return ''
else:
omit = None
if omit is not None:
published = author.recently_published(exclude=omit)
else:
published = author.recently_published()
context[self.var_name] = published[:self.count]
return ''
@register.tag('author_listing')
def do_author_listing(parser, token):
"""
Get N listing objects that were published by given author recently and optionally
omit a publishable object in results.
**Usage**::
{% author_listing <author> <limit> as <result> [omit <obj>] %}
**Parameters**::
================================== ================================================
Option Description
================================== ================================================
``author`` Author to load objects for.
``limit`` Maximum number of objects to store,
``result`` Store the resulting list in context under given
name.
================================== ================================================
**Examples**::
{% author_listing object.authors.all.0 10 as article_listing %}
"""
contents = token.split_contents()
if len(contents) not in [5, 7]:
raise template.TemplateSyntaxError('%r tag requires 4 or 6 arguments.' % contents[0])
elif len(contents) == 5:
tag, obj_var, count, fill, var_name = contents
return AuthorListingNode(obj_var, count, var_name)
else:
tag, obj_var, count, fill, var_name, filll, omit_var = contents
return AuthorListingNode(obj_var, count, var_name, omit_var)
| true |
88492d2ddd14c32ea3abfe6e148eb2f8cd1195ed | Python | Susama91/Project | /W3Source/List/list8.py | UTF-8 | 145 | 4.03125 | 4 | [] | no_license | #Write a Python program to check a list is empty or not
l=[10,20]
if not l:
print("empty list")
else:
print("list contains element: ",l)
| true |
e218b7f5777a8f95b9ecbeac280b7b0b72144ac1 | Python | 18720936539/CANTEMIST | /cantemist/cantemist-evaluation-library-master/src/main.py | UTF-8 | 1,835 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 8 15:22:29 2020
@author: tonifuc3m
"""
import argparse
import warnings
import cantemist_coding
import cantemist_ner_norm
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
return '%s:%s: %s: %s\n' % (filename, lineno, category.__name__, message)
warnings.formatwarning = warning_on_one_line
def parse_arguments():
'''
DESCRIPTION: Parse command line arguments
'''
parser = argparse.ArgumentParser(description='process user given parameters')
parser.add_argument("-g", "--gs_path", required = True, dest = "gs_path",
help = "path to GS file")
parser.add_argument("-p", "--pred_path", required = True, dest = "pred_path",
help = "path to predictions file")
parser.add_argument("-c", "--valid_codes_path", required = False,
default = '../valid-codes.tsv',
dest = "codes_path", help = "path to valid codes TSV")
parser.add_argument('-s', '--subtask', required = True, dest = 'subtask',
choices=['ner', 'norm', 'coding'],
help = 'Subtask name')
args = parser.parse_args()
gs_path = args.gs_path
pred_path = args.pred_path
codes_path = args.codes_path
subtask = args.subtask
return gs_path, pred_path, codes_path, subtask
if __name__ == '__main__':
gs_path, pred_path, codes_path, subtask = parse_arguments()
if subtask == 'coding':
cantemist_coding.main(gs_path, pred_path, codes_path)
elif subtask == 'ner':
cantemist_ner_norm.main(gs_path, pred_path, subtask='ner')
elif subtask == 'norm':
cantemist_ner_norm.main(gs_path, pred_path, subtask='norm')
| true |
cbdbacccdf996cc5d3081796af309aa3716090bf | Python | melikesenol/PythonBeginnerExercise | /Decorators/decorator.py | UTF-8 | 384 | 3.96875 | 4 | [] | no_license | # High order function -> Excepts another function inside
# Decorators Pattern
def my_decorator(func):
def wrap_func(*args, **kwargs):
print('****')
func(*args, **kwargs)
print('******')
return wrap_func
@my_decorator
def hello(greeting, emoji = ':('):
print(greeting, emoji)
# @my_decorator does = a = my_decorator(hello)
hello("hi") | true |
9d6065c2d8a539821ac0a1d57f60b6a8b2076080 | Python | ZiyaoGeng/LeetCode | /Code/199.py | UTF-8 | 553 | 2.859375 | 3 | [] | no_license | from typing import List
import sys
sys.path.append('../functions/')
from tree import TreeNode
class Solution:
def rightSideView(self, root: TreeNode) -> List[int]:
if root == None:
return None
que, l = [], []
count, length = 0, 1
que.append(root)
while len(que) != 0:
p = que.pop(0)
count += 1
if p.left != None:
que.append(p.left)
if p.right != None:
que.append(p.right)
if count == length:
l.append(p.val)
length = len(que)
count = 0
return l
| true |
cec776fd9bbd3e094f9d3de8e63e0b5f1ccba5eb | Python | KazukiOhta/tsglive | /workingDirectoy/main.py | UTF-8 | 17,552 | 3.015625 | 3 | [] | no_license | from math import exp
"""
Matrix class (substitution for numpy)
"""
class matrix():
def __init__(self, lst2d=[], filename=None):
if filename == None:
self.matrix = lst2d
else:
with open(filename) as f:
self.matrix = list(map(lambda line: list(map(float, line.split(","))), f.readlines()))
self.rows = len(self.matrix)
self.cols = len(self.matrix[0])
for row in range(self.rows):
assert len(self.matrix[row]) == self.cols, "inconsistent cols"
self.shape = (self.rows, self.cols)
def dot(self, matrix2):
assert self.cols == matrix2.rows, "M1.rows does not match M2.cols"
dotproduct = []
for r in range(self.rows):
sublist = []
for c in range(matrix2.cols):
sublist.append(sum([self.matrix[r][i]*matrix2.matrix[i][c] for i in range(self.cols)]))
dotproduct.append(sublist)
return matrix(dotproduct)
def broadcast(self, f):
mainlist = []
for row in range(self.rows):
sublist = list(map(f, self.matrix[row]))
mainlist.append(sublist)
return matrix(mainlist)
def __str__(self):
return str(self.matrix)
"""
vanillaAI class
"""
class vanillaAI:
def __init__(self, filename, hidden_size = 50):
self.hidden_size = hidden_size
self.W1 = matrix(filename="data/"+filename+"W1.csv")
self.W2 = matrix(filename="data/"+filename+"W2.csv")
self.record_x = []
self.record_y = []
def move(self, march, recording=True, showeval = False, epsilon=0.001):
bestmove = None
besteval = -float("inf") # Negamax法で、自分の勝率(1-eval)に直してしまう。
for i in range(9,55):
frm = 1<<i
if frm & march.b != 0:
for to in march.tos(frm):
child = March(march.b, march.r, march.bk, march.rk)
child.move(frm^to)
j = child.richJudge()
if j == 1:
thiseval = 0
elif j == -1:
thiseval = 1
else:
thiseval = (1-epsilon)-self.evaluate(child)[0][0]*(1-2*epsilon)
if thiseval == besteval and showeval:
print("衝突")
print("best:", bestmove, besteval)
print("this:", (frm, to), thiseval)
if thiseval >= besteval:
besteval = thiseval
bestmove = (frm, to)
if recording:
self.record_x.append(self.boardToOnehotLabel(march))
self.record_y.append(besteval)
if showeval:
print("私の勝率は{0:.1f}%".format((besteval)*100))
return bestmove
def boardToOnehotLabel(self, march):
b = bitToVec(march.b)
r = bitToVec(march.r)
bk = bitToVec(march.bk)
rk = bitToVec(march.rk)
x = b+r+bk+rk
return x
def evaluate(self, march):
# blueの勝率の推定値を返す。(0~1)
x = matrix(self.boardToOnehotLabel(march))
sigmoid = lambda x: 1/(1+exp(-x))
u1 = self.W1.dot(x)
z1 = u1.broadcast(sigmoid)
u2 = self.W2.dot(z1)
y = u2.broadcast(sigmoid)
return y.matrix
"""
March Rule
"""
class March:
def __init__(self,b=None,r=None,bk=None,rk=None):
if b == None:
#self.b = sum([1 << i for i in range(49, 55)])
self.b = sum([1 << i for i in range(41, 47)]) + sum([1 << i for i in range(49, 55)])
else:
self.b = b
if r == None:
#self.r = sum([1 << i for i in range(9, 15)])
self.r = sum([1 << i for i in range(9, 15)]) + sum([1 << i for i in range(17, 23)])
else:
self.r = r
if bk == None:
self.bk = 1 << 52
else:
self.bk = bk
if rk == None:
self.rk = 1 << 11
else:
self.rk = rk
self.b = (self.b | self.bk)
self.r = (self.r | self.rk)
self.wall = sum([1 << i for i in range(0,8)]) | sum([1 << i for i in range(56,64)]) | sum([1 << i for i in range(0,64,8)]) | sum([1 << i for i in range(7,64,8)])
self.turn = 0
self.lastmove = 0
def __str__(self):
s = ""
for y in range(8):
for x in range(8):
address = 8*y+x
bit = 1<<address
if self.bk & bit != 0 :
s += "O"
elif self.rk & bit != 0:
s += "X"
elif self.b & bit != 0:
s += "o"
elif self.r & bit != 0:
s += "x"
elif self.wall & bit != 0:
s += "#"
else:
s += "."
s += " "
s += "\n"
return s
def reverseBoard(self):
self.bk, self.rk = reverse64bit(self.rk), reverse64bit(self.bk)
self.b, self.r = reverse64bit(self.r), reverse64bit(self.b)
self.wall = reverse64bit(self.wall)
def judge(self):
if self.bk == 0:
return -1
if self.rk == 0:
return 1
blue_goal = (1<<15)-(1<<9) # sum([1 << i for i in range(9, 15)])
red_goal = (1<<55)-(1<<49) # sum([1 << i for i in range(49, 55)])
if self.bk & blue_goal != 0:
return 1
if self.rk & red_goal != 0:
return -1
if not self.existsChildren():
return -1
return 0
def richJudge(self):
#そもそも勝負がついている場合
j = self.judge()
if j != 0:
return j
#王手をかけている場合
for i in range(7, 10):
if (self.rk<<i)&self.b != 0:
return 1
#次で必ずゴールできる場合
blue_sub_goal = (1<<23)-(1<<17) # sum([1 << i for i in range(17, 23)])
if self.bk & blue_sub_goal != 0:
return 1
return 0
def existsChildren(self):
for i in range(7, 10):
if (self.b>>i) & ~(self.b|self.wall) != 0:
return True
return False
def tos(self, frm):
assert self.b & frm != 0
tos = []
for i in range(7,10):
to = frm>>i
if (to & self.wall == 0) and (to & self.b == 0):
tos.append(to)
return tos
def move(self, move):
self.b = self.b ^ move
if self.bk & move != 0:
self.bk = self.bk ^ move
self.r = self.r & ~move
self.rk = self.rk & ~move
self.reverseBoard()
self.turn += 1
self.lastmove = reverse64bit(move)
def movable(self, frm, to):
if self.b & frm == 0:
return False
if to & self.wall != 0:
return False
if to & self.b != 0:
return False
for i in range(7,10):
if to == frm>>i:
return True
return False
def children(self):
children = []
for i in range(64):
frm = 1<<i
if frm & self.b != 0:
for to in self.tos(frm):
child = March(self.b, self.r, self.bk, self.rk)
child.move(frm^to)
children.append(child)
return children
"""
AImove
"""
def AImove(AI, march):
frm, to = AI(march)#AI.move(march)
march.move(frm^to)
if march.judge() != 0:
#march = March()
pass
"""
Bit Board Manager
"""
def bitprint(bit,name=" ",num=None):
print(name,(num if num != None else " "),bin(bit).zfill(66))
def bitlist(bit):
lst = []
for i in range(64):
if bit&(1<<i) != 0:
lst.append(i)
return lst
def reverse64bit(bit):
ones = (1<<64)-1
mask = lambda x: ones//((1<<(1<<x))+1)
for i in range(6): # 2**6 = 64
a = bit & mask(i)
b = bit & ~mask(i)
bit = (a<<(1<<i))|(b>>(1<<i))
return bit
def bitToVec(bit):
return list(map(lambda x: [int(x)], (bin(bit)[2:].zfill(64))[::-1]))
"""
URL access
"""
import urllib.request
def url2text(url):
data = urllib.request.urlopen(url)
return data.read().decode()
"""
Graphical User Interface
"""
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.uix.textinput import TextInput
from kivy.graphics import Color
Window.size = (450, 800)
color_dict = {"b":(0.725,0.825,0.925,1),
"r":(1 ,0.75 ,0.85 ,1),
"bk":(0, 0, 1, 1),
"rk":(1, 0, 0, 1),
"space":(1, 1, 1, 1),
"outside":(0.95 ,0.95 ,0.95 ,1)}
class URLTextInput1(TextInput):
multiline = False
def on_text_validate(self):
print("W1:", self.text)
text = url2text(self.text)
self.parent.bw.AI.W1.matrix = list(map(lambda x:list(map(float, x.split(","))), text.split("\n")[:-1]))
with open("data/AIW1.csv", mode="w") as f:
f.write(text)
class URLTextInput2(TextInput):
multiline = False
def on_text_validate(self):
print("W2:", self.text)
text = url2text(self.text)
self.parent.bw.AI.W2.matrix = list(map(lambda x:list(map(float, x.split(","))), text.split("\n")[:-1]))
with open("data/AIW2.csv", mode="w") as f:
f.write(text)
class GridButton(Button):
def on_press(self):
if self.parent.frm == None:
if self.parent.march.b & self.value != 0:
self.parent.frm = self.value
else:
if self.parent.march.movable(self.parent.frm, self.value):
self.parent.march.move(self.parent.frm ^ self.value)
if self.parent.march.judge() == 0:
AImove(AI = self.parent.parent.AI, march = self.parent.march)
else:
self.parent.march.reverseBoard()
self.parent.frm = None
if self.parent.march.judge() != 0:
#self.parent.march = March()
pass
self.parent.updateColor()
class BoardGrid(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.rows = 6
self.cols = 6
self.buttons = []
self.march = March()
self.frm = None
for row in range(self.rows):
sub_buttons = []
for col in range(self.cols):
btn = GridButton()
btn.background_normal = "white.png"
btn.font_size = 100
btn.value = 1<<(8*(row+1)+(col+1))
sub_buttons.append(btn)
self.add_widget(btn)
self.buttons.append(sub_buttons)
self.updateColor()
self.background_normal = "white.png"
def updateColor(self):
for row in range(self.rows):
for col in range(self.cols):
address = 1<<(8*(row+1)+(col+1))
if self.march.bk & address != 0:
color = color_dict["bk"]
elif self.march.rk & address != 0:
color = color_dict["rk"]
elif self.march.b & address != 0:
color = color_dict["b"]
elif self.march.r & address != 0:
color = color_dict["r"]
else:
color = color_dict["space"]
self.buttons[row][col].background_color = color
if self.march.lastmove & address != 0:
self.buttons[row][col].text = "•"
else:
self.buttons[row][col].text = ""
self.buttons[row][col].color = color_dict["space"]
if self.frm == None:
pass
else:
if self.frm & address != 0:
self.buttons[row][col].text = "•"
if any([to & address != 0 for to in self.march.tos(self.frm)]):
self.buttons[row][col].color = color_dict["b"]
self.buttons[row][col].text = "•"
class RedPlayerButton(Button):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.value = 0
self.text = RedAINames[self.value]
self.font_size = 25
def on_press(self):
RedAIDict = RedAIDictFunc()
self.value = (self.value + 1)%len(RedAIDict)
print(self.value)
self.parent.AI = RedAIDict[self.value]
self.parent.bw.march = March()
self.parent.bw.updateColor()
self.text = RedAINames[self.value]
class BluePlayerButton(Button):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.value = 0
self.font_size = 75
def on_press(self):
self.value = (self.value + 1) %2
self.text = ["", "Q-network"][self.value]
self.parent.bw.march = March()
self.parent.bw.updateColor()
class BattleBox(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "vertical"
self.redbtn = RedPlayerButton()
self.redbtn.background_color = color_dict["outside"]
self.bw = BoardGrid()
self.bluebtn = Label()#BluePlayerButton()
self.add_widget(self.redbtn)
self.add_widget(self.bw)
self.add_widget(self.bluebtn)
self.AI = RedAIDictFunc()[self.redbtn.value]
def on_size(self, *args):
self.bw.size_hint_y = None
self.bw.height = self.width
class RootBox(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.battleView()
def battleView(self):
self.clear_widgets()
self.add_widget(BattleBox())
class MarchApp(App):
def build(self):
return RootBox()
"""
LIVE AI
"""
import numpy as np
def RedAIDictFunc():
#RedAIDict = [greedyAI, doubleCalculationAI, singleCalculationAI, randomAI] #vanillaAI(filename="AI").move
#RedAIDict = [greedyAI, doubleCalculationAI]#, singleCalculationAI, randomAI] #vanillaAI(filename="AI").move
RedAIDict = [vanillaAI(filename="AI").move]
return RedAIDict
RedAINames = ["vanillaAI"]#["greedyAI", "doubleCalculationAI"]#, "singleCalculationAI", "randomAI"]
# ランダムなAI
def randomAI(march):
while True:
frm = 1<<np.random.randint(64)
to = frm>>(9-np.random.randint(3))
if march.movable(frm, to):
return frm, to
# 王手をかけていれば取る。(Working in Progress)
def singleCalculationAI(march):
for i in range(9, 55):
frm = 1<<i
if frm & march.b != 0:
for to in march.tos(frm):
child = March(march.b, march.r, march.bk, march.rk)
child.move(frm^to)
if child.judge() == -1:
return (frm, to)
return randomAI(march)
# 負けにいかない。
def doubleCalculationAI(march):
retVal = (0, 0)
randlist = list(range(9, 55))
np.random.shuffle(randlist)
for i in randlist:
frm = 1<<i
if frm & march.b != 0:
for to in march.tos(frm):
child = March(march.b, march.r, march.bk, march.rk)
child.move(frm^to)
if child.richJudge() == -1:
return (frm, to)
if child.richJudge() == 0:
retVal = (frm, to)
if retVal == (0, 0):
return randomAI(march)
else:
return retVal
# 取れるコマは絶対に取る!
def greedyEval(march):
if march.richJudge() == 1:
return 10000
elif march.richJudge() == -1:
return -10000
blueEval = sum(np.array(list(bin(march.b)))=="1")
redEval = sum(np.array(list(bin(march.r)))=="1")
return blueEval - redEval #+np.random.randn()
def greedyAI(march):
bestmove = (0, 0)
bestEval = -100000
randlist = list(range(9, 55))
np.random.shuffle(randlist)
for i in randlist:
frm = 1<<i
if frm & march.b != 0:
for to in march.tos(frm):
child = March(march.b, march.r, march.bk, march.rk)
child.move(frm^to)
Eval = -greedyEval(child)
if Eval >= bestEval:
bestmove = (frm, to)
bestEval = Eval
print(bestEval)
if bestmove == (0, 0):
return randomAI(march)
else:
return bestmove
#def greedyAI2(march):
# bestmove = (0, 0)
# bestEval = -100000
# for i in range(9, 55):
# frm = 1<<i
# if frm & march.b != 0:
# for to in march.tos(frm):
# child = March(march.b, march.r, march.bk, march.rk)
# child.move(frm^to)
# for j in range(9, 55):
# frm2 = 1<<j
# if frm2 & march.b != 0:
# for to in march.tos(frm):
MarchApp().run()
| true |
71587e214e407cb551fef282843da83e98bd3dd4 | Python | SebastianRehfeldt/dash-slideshow | /src/elements/plot.py | UTF-8 | 585 | 2.859375 | 3 | [] | no_license | """Module for creating plots"""
import pandas as pd
import dash_core_components as dcc
import plotly.graph_objects as go
def create_histogram(df: pd.DataFrame, column: str) -> dcc.Graph:
"""Create Histogram for dataframe and column"""
return dcc.Graph(
id="graph-{:s}".format(column),
figure={
"data": [go.Histogram(x=df[column])],
"layout": {
"title": column.title(),
"xaxis": {"title": column.title()},
"yaxis": {"title": "Frequency"},
}
}
)
| true |
a02c3d6da597fe43a4cbd9a74481f767516c78f0 | Python | USC-NSL/ALPS_code | /test_plot_fig/data_for_fig/plot_cdf.py | UTF-8 | 1,026 | 2.65625 | 3 | [] | no_license | import numpy as np
import os,sys
import matplotlib.pyplot as plt
X_LIM = 30
LINE_WIDTH = 3
FONT_SIZE = 17
X_LABLE = 'error(m)'
Y_LABLE = 'CDF'
TITLE = 'Distribution of errors (MTV)'
color_list = ['g', 'r']
legend_list = ['ALPS','Google']
for i in range(1,len(sys.argv)):
data = np.loadtxt(sys.argv[i])
sorted_data = np.sort(data)
yvals=np.arange(len(sorted_data))/float(len(sorted_data))
print yvals
print '------------'
if sorted_data[-1] < X_LIM:
yvals[-1] = 1.0
plt.plot(sorted_data,yvals,lw=LINE_WIDTH,color=color_list[i-1])
if sorted_data[-1] < X_LIM:
plt.plot([sorted_data[-1], X_LIM], [0.995, 0.995], color=color_list[i-1], lw=LINE_WIDTH)
plt.legend(legend_list, loc=4)
ltext = plt.gca().get_legend().get_texts()
for i in range(1,len(sys.argv)):
plt.setp(ltext[i-1], color=color_list[i-1])
plt.title(TITLE)
plt.xlim([0, X_LIM])
plt.ylim([0,1.0])
plt.xlabel(X_LABLE, fontsize=FONT_SIZE)
plt.ylabel(Y_LABLE, fontsize=FONT_SIZE)
plt.xticks(fontsize=FONT_SIZE)
plt.yticks(fontsize=FONT_SIZE)
plt.show()
| true |
ab5234274e23320a2d3088b8209bfb23d4ed8d4f | Python | ManishBhat/Project-Euler-solutions-in-Python | /P345_matrix_sum/P345.py | UTF-8 | 1,031 | 3.28125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 10:00:41 2020
@author: Manish
"""
def f(a):
n = len(a)
rowchosen = {}
c = 0
for r in range(n):
rowchosen[frozenset([r])] = a[r][c]
for c in range(1, n):
newrow = dict()
for x in rowchosen:
range2 = frozenset(range(n)) - x
for r in range2:
s = frozenset().union(*[x, frozenset([r])])
val = rowchosen[x] + a[r][c]
if s not in newrow:
newrow[s] = val
elif newrow[s] < val:
newrow[s] = val
rowchosen = newrow
#print(rowchosen)
ans = list(rowchosen.values())[0]
print("The answer is:", ans)
def Q345():
fhand = open("matrix2.txt", "r")
arr = []
for line in fhand:
arr.append([int(x) for x in line.split()])
f(arr)
if __name__ == '__main__':
import time
start_time = time.time()
Q345()
print("Program run time(in s): ", (time.time() - start_time))
| true |
252ed9bbb7bda3768719325b81dd3f5fc0ad5324 | Python | SundeepChand/Ride-the-Road | /play.py | UTF-8 | 5,696 | 3.546875 | 4 | [
"MIT"
] | permissive | import pygame
import random
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GRAY = (159, 163, 168)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
CAR_COLOR = (181, 230, 29)
TEXT_COLOR = (250, 105, 10)
pygame.init()
class Car:
def __init__(self, x=0, y=0, dx=4, dy=0, width=30, height=30, color=RED):
self.image = ""
self.x = x
self.y = y
self. dx = dx
self.dy = dy
self.width = width
self.height = height
self.color = color
def load_image(self, img):
self.image = pygame.image.load(img).convert()
self.image.set_colorkey(BLACK)
def draw_image(self):
screen.blit(self.image, [self.x, self.y])
def move_x(self):
self.x += self.dx
def move_y(self):
self.y += self.dy
def draw_rect(self):
pygame.draw.rect(screen, self.color, [self.x, self.y, self.width, self.height], 0)
def check_out_of_screen(self):
if self.x+self.width > 400 or self.x < 0:
self.x -= self.dx
def check_collision(player_x, player_y, player_width, player_height, car_x, car_y, car_width, car_height):
if (player_x+player_width > car_x) and (player_x < car_x+car_width) and (player_y < car_y+car_height) and (player_y+player_height > car_y):
return True
else:
return False
# Set the width and height of the screen [width, height]
size = (400, 700)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Ride the Road")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Create a player car object
player = Car(175, 475, 0, 0, 70, 131, RED)
player.load_image("player.png")
collision = True
# Store the score
score = 0
# Load the fonts
font_40 = pygame.font.SysFont("Arial", 40, True, False)
font_30 = pygame.font.SysFont("Arial", 30, True, False)
text_title = font_40.render("Ride the Road", True, TEXT_COLOR)
text_ins = font_30.render("Click to Play!", True, TEXT_COLOR)
def draw_main_menu():
screen.blit(text_title, [size[0] / 2 - 106, size[1] / 2 - 100])
score_text = font_40.render("Score: " + str(score), True, TEXT_COLOR)
screen.blit(score_text, [size[0] / 2 - 70, size[1] / 2 - 30])
screen.blit(text_ins, [size[0] / 2 - 85, size[1] / 2 + 40])
pygame.display.flip()
# Setup the enemy cars
cars = []
car_count = 2
for i in range(car_count):
x = random.randrange(0, 340)
car = Car(x, random.randrange(-150, -50), 0, random.randint(5, 10), 60, 60, CAR_COLOR)
cars.append(car)
# Setup the stripes.
stripes = []
stripe_count = 20
stripe_x = 185
stripe_y = -10
stripe_width = 20
stripe_height = 80
space = 20
for i in range(stripe_count):
stripes.append([190, stripe_y])
stripe_y += stripe_height + space
# -------- Main Program Loop -----------
while not done:
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# Reset everything when the user starts the game.
if collision and event.type == pygame.MOUSEBUTTONDOWN:
collision = False
for i in range(car_count):
cars[i].y = random.randrange(-150, -50)
cars[i].x = random.randrange(0, 350)
player.x = 175
player.dx = 0
score = 0
pygame.mouse.set_visible(False)
if not collision:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
player.dx = 4
elif event.key == pygame.K_LEFT:
player.dx = -4
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
player.dx = 0
elif event.key == pygame.K_RIGHT:
player.dx = 0
# --- Game logic should go here
# --- Screen-clearing code goes here
screen.fill(GRAY)
# --- Drawing code should go here
if not collision:
# Draw the stripes
for i in range(stripe_count):
pygame.draw.rect(screen, WHITE, [stripes[i][0], stripes[i][1], stripe_width, stripe_height])
# Move the stripes
for i in range(stripe_count):
stripes[i][1] += 3
if stripes[i][1] > size[1]:
stripes[i][1] = -40 - stripe_height
player.draw_image()
player.move_x()
player.check_out_of_screen()
# Check if the enemy cars move out of the screen.
for i in range(car_count):
cars[i].draw_rect()
cars[i].y += cars[i].dy
if cars[i].y > size[1]:
score += 10
cars[i].y = random.randrange(-150, -50)
cars[i].x = random.randrange(0, 340)
cars[i].dy = random.randint(4, 9)
# Check the collision of the player with the car
for i in range(car_count):
if check_collision(player.x, player.y, player.width, player.height, cars[i].x, cars[i].y, cars[i].width, cars[i].height):
collision = True
pygame.mouse.set_visible(True)
break
# Draw the score.
txt_score = font_30.render("Score: "+str(score), True, WHITE)
screen.blit(txt_score, [15, 15])
pygame.display.flip()
else:
draw_main_menu()
# --- Limit to 60 frames per second
clock.tick(60)
# Close the window and quit.
pygame.quit()
| true |
17f9ea65e769670503d6692d25e5d264762786e2 | Python | g4m3rm1k3/data-struct-algo-s | /recursive_fib.py | UTF-8 | 563 | 4.40625 | 4 | [] | no_license | def fib_recur(n):
if n == 0:
return 0
elif n == 1:
return 1
return fib_recur(n-1) + fib_recur(n-2)
def long_fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
prev = 0
next = 1
for i in range(n-1):
print(f"{prev} + {next} = {prev + next}")
prev, next = next, prev + next
return next
# print(long_fib(10))
print(long_fib(1))
# print(long_fib(2))
def fib_runner(z):
print(f"The {z} number in the fibonacci sequence is {fib_recur(z)}")
z = 0
fib_runner(z)
z = 1
fib_runner(z)
z = 10
fib_runner(z) | true |
4dc4039ffd8825848210b85f0fc1dd3c6d6936f9 | Python | buiquangmanhhp1999/Age-Gender-Classification-Based-On-ShuffleNet | /ex.py | UTF-8 | 836 | 2.640625 | 3 | [] | no_license | from PIL import Image
import cv2
im1 = Image.open('./chaubui.png')
im2 = Image.open('./hoailinh_result.png')
def get_concat_h_resize(im1, im2, resample=Image.BICUBIC, resize_big_image=True):
if im1.height == im2.height:
_im1 = im1
_im2 = im2
elif (((im1.height > im2.height) and resize_big_image) or
((im1.height < im2.height) and not resize_big_image)):
_im1 = im1.resize((int(im1.width * im2.height / im1.height), im2.height), resample=resample)
_im2 = im2
else:
_im1 = im1
_im2 = im2.resize((int(im2.width * im1.height / im2.height), im1.height), resample=resample)
dst = Image.new('RGB', (_im1.width + _im2.width, _im1.height))
dst.paste(_im1, (0, 0))
dst.paste(_im2, (_im1.width, 0))
return dst
get_concat_h_resize(im1, im2).save('1.png')
| true |
4155badf43d2a0acec64ca8f128b4f8928caa309 | Python | MLAlg/EGC-Dataset-Analysis | /analysis.py | UTF-8 | 2,254 | 2.625 | 3 | [] | no_license | # Prepare Environment
import sys
colab = 'google.colab' in sys.modules
# Download the dataset from my drive(fixed format issue)
if colab:
!wget 'https://drive.google.com/uc?authuser=0&id=1rseU8HjF16lq87CjVtVCLbhrUCqt_lzi&export=download' -O "EGC_dataset.csv"
#imports
import pandas as pd
import numpy as np
import string
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import FrenchStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.decomposition import NMF, LatentDirichletAllocation
from math import floor
import pickle
from sklearn import preprocessing
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import keras
from sklearn.decomposition import NMF, LatentDirichletAllocation
nltk.download('stopwords')
nltk.download('punkt')
stopword = set(stopwords.words('french'))
porter = PorterStemmer()
snowball_stemmer = FrenchStemmer()
print("french stop words: ", stopword)
# Read Data
path = "/content/EGC_dataset.csv"
df = pd.read_csv(path)
df.head()
# Find Top Authors
df.authors
df.authors[0]
# prepare authors field
authors = df.authors.str.split(',')
result = [list(map(str.strip, sublist)) for sublist in authors]
flattened_authors = [item for sublist in result for item in sublist]
for i in range(1269):
for j in range(len(authors[i])):
authors[i][j] = authors[i][j].lower()
authors
# mapper
dict_aut = {}
for aut in flattened_authors:
aut = aut.lower()
dict_aut[aut] = dict_aut.get(aut,0) + 1
len(dict_aut) # number of authors: 2007
dict_aut # dictionary of authors and their contributions
# reducer
import operator
sorted_aut = sorted(dict_aut.items(), key=operator.itemgetter(1), reverse=True)
sorted_aut[0:11] # top authors
map_aut = []
for aut in flattened_authors:
map_aut.append((aut, 1))
#map_aut
# Titles of articles for every author
dict_art = {}
for i in range(1269):
temp = dict.fromkeys(authors[i], df.title[i])
x = temp.keys()
for a in x:
if not dict_art.get(a):
temp[a] = temp[a]
else:
s = dict_art.get(a)
temp[a] += ", " + s
dict_art.update(temp)
dict_art
| true |
cb27de811eabb5e0fae2559d1d3367ab1643f068 | Python | Parwej0007/FASTAPI-crud-Authentication-Token-ForgetPasswordByEmail-Login | /main.py | UTF-8 | 1,492 | 2.71875 | 3 | [] | no_license | from fastapi import FastAPI
# from pydantic_v import TestPostValidate
from pydantic import BaseModel
from typing import Optional, List
# for debug import uvicorn
import uvicorn
# make FastAPI instance with name app
app = FastAPI()
# DO CRUD WITHOUT DATABASE
# Run - uvicorn module_name:app --reload
# start first api with get operation
@app.get('/', tags=['FastAPI Basic']) # know as path operation decorator (path('/'), operation(get()), decorator @)
def home(): # know as path operator function
return {'hello': 'Hello FastAPI'}
# post request (send data from client)
# we can validate sended data from client in function parameter
# limit 10 will show 10 data at a time
@app.post('/home/', tags=['FastAPI Basic'])
def create_book(name: str, age: int, location: str, limit: int=10):
return {"result": f" post request from user name-{name} and location-{location}"}
########################################################################################
# same
# or validating data sended from client(browser)
# using pydantic -- BaseModel
# use to request body use pydantic
class PostSchema(BaseModel):
id: int
name: str
location: Optional[str]=None
price: float
book_rent: List[str]=[]
@app.post('/home/post/', tags=['FastAPI Use Schema'])
def create_book_py(book_item_request : PostSchema):
print(book_item_request)
return book_item_request
if __name__ == '__main__':
uvicorn.run(app=app, host="127.0.0.1", port=9000 )
| true |
9a41f2c7af072d98686996c6ce0c7a60ff1e142e | Python | saidaaisha/enron_CollocateNetworks | /collocation_experiments/code/score_calc_swl.py | UTF-8 | 5,133 | 2.515625 | 3 | [] | no_license | #!/usr/bin/python2.7
from __future__ import division
from multiprocessing import Process, Queue
from nltk.tokenize import sent_tokenize
from nltk import word_tokenize
from collections import Counter
from math import floor, sqrt, log
from time import time
from sys import argv
import Queue as que
import re
import os
import sys
sw_list = []
ind_list = Counter()
cnt = Counter()
cnt_counter = 0
ind_counter = 0
N=0
#input file paths
collocation_file = ""
ind_frequency_file = ""
stop_word_file = ""
t_score_output_file = ''
mi_score_output_file = ''
ts_cutoff_indicator = 0
mi_cutoff_indicator = 0
ts_cutoff = 0.0
mi_cutoff = 0.0
def isNumber(s):
try:
int(s)
return True
except ValueError:
return False
def load_values():
global cnt_counter, ind_counter, sw_list,ind_list,cnt,N
print 'loading frequencies and stopword list'
with open(collocation_file,'r') as f:
for line in f.readlines():
tokens = line.split(',')
cnt[tokens[0]+'-'+tokens[1]]+=int(tokens[2])
cnt_counter+=1
print 'reading collocations complete. Total count: '+str(cnt_counter)
with open(ind_frequency_file,'r') as f:
for line in f.readlines():
tokens = line.split(',')
val = int(tokens[1])
ind_list[tokens[0]]+=val
N+=val
ind_counter+=1
print 'reading individual frequencies complete.. Total count: '+str(ind_counter)+' value of N is '+str(N)
with open(stop_word_file,'r') as f:
for line in f.readlines():
sw_list.append(line.strip().lower())
print 'reading stopword list complete. Total count: '+str(len(sw_list))
def scoreCal():
t_score = Counter()
count = 0
new_count = 0
sys.stdout.write("Computing t_score: %.2f%% complete\r"%float(count*100.0/cnt_counter))
sys.stdout.flush()
cnt_items = cnt.items()
for key, value in cnt_items:
count+=1
sys.stdout.write("Computing t_score: %.2f%% complete\r"%float(count*100.0/cnt_counter))
sys.stdout.flush()
words = key.split('-')
if words[0].strip().lower() in sw_list or words[1].strip().lower() in sw_list:
continue
elif isNumber(words[0]) or isNumber(words[1]):
continue
fl = ind_list[words[0]]
fr = ind_list[words[1]]
den = sqrt(value)
inter = float((fl*fr)/N)
num = float(value - inter)
res = float(num/den)
if ts_cutoff_indicator == 1 and res < ts_cutoff:
continue
t_score[key] = res
new_count +=1
print ''
count =0
sys.stdout.write("Writing to t_score.csv: %.2f%% complete\r"%float(count*100.0/new_count))
sys.stdout.flush()
with open(t_score_output_file,'w') as f:
f.write('source,target,t_score\n')
for key, value in t_score.most_common():
words = key.split('-')
f.write(words[0]+','+words[1]+','+str(value)+'\n')
count +=1
sys.stdout.write("Writing to t_score.csv: %.2f%% complete\r"%float(count*100.0/new_count))
sys.stdout.flush()
del t_score[key]
print ''
mi = Counter()
count = 0
new_count = 0
sys.stdout.write("Computing MI score: %.2f%% complete\r"%float(count*100.0/cnt_counter))
sys.stdout.flush()
for key, value in cnt_items:
count+=1
sys.stdout.write("Computing MI score: %.2f%% complete\r"%float(count*100.0/cnt_counter))
sys.stdout.flush()
words = key.split('-')
if words[0].strip().lower() in sw_list or words[1].strip().lower() in sw_list:
continue
elif isNumber(words[0]) or isNumber(words[1]):
continue
fl = ind_list[words[0]]
fr = ind_list[words[1]]
den = sqrt(value)
inter = float((fl*fr)/N)
res = float(log(float(value/inter), 2))
if mi_cutoff_indicator == 1 and res < mi_cutoff:
continue
mi[key] = res
new_count+=1
print ''
count =0
sys.stdout.write("Writing to mi.csv: %.2f%% complete\r"%float(count*100.0/new_count))
sys.stdout.flush()
with open(mi_score_output_file,'w') as f:
f.write('source,target,mi_score\n')
for key, value in mi.most_common():
words = key.split('-')
f.write(words[0]+','+words[1]+','+str(value)+'\n')
count +=1
sys.stdout.write("Writing to mi.csv: %.2f%% complete\r"%float(count*100.0/new_count))
sys.stdout.flush()
del mi[key]
print ''
def init():
global collocation_file, ind_frequency_file, stop_word_file, ts_cutoff_indicator, mi_cutoff_indicator, ts_cutoff, mi_cutoff, t_score_output_file, mi_score_output_file
collocation_file = raw_input('Enter collocations file path:\n')
ind_frequency_file = raw_input('Enter Individual Frequencies file path:\n')
stop_word_file = raw_input('Enter stop word list file path:\n')
temp1 = raw_input('Do you need a cutoff for t_score value (y/n):\n')
if temp1 == 'y':
ts_cutoff_indicator = 1
ts_cutoff = float(raw_input('Enter t_score cutoff:\n'))
temp1 = raw_input('Do you need a cutoff for mi_score value (y/n):\n')
if temp1 == 'y':
mi_cutoff_indicator = 1
mi_cutoff = float(raw_input('Enter mi_score cutoff:\n'))
t_score_output_file = raw_input('Enter file path for t_score output:\n')
mi_score_output_file = raw_input('Enter file path for mi_score output:\n')
start_ts = time()
load_values()
scoreCal()
time_taken = divmod((time()-start_ts),60)
print("Overall time taken for T-score and MI-score Calculation: %d minutes and %d seconds" %(time_taken[0],time_taken[1]))
if __name__ == '__main__':
init()
| true |
1a9a383292c88aa80b16eb0733d45511d970db2c | Python | yingchuanfu/Python | /com/python5/Pass.py | UTF-8 | 428 | 3.78125 | 4 | [] | no_license | # -*- coding: UTF-8 -*-
#pass语句:Python pass语句是空语句,一般用做占位符,不执行任何实际的操作,只是为了保持程序结构的完整性
#如下例子,else语句本来可以不用写,但写上更为完整,这时候pass占位的意义就体现出来了
num_set = [98, 94, 82, 67, 58, 90, 86]
for i in range(len(num_set)):
if num_set[i] < 60:
print("SomeOne failed!!!")
else:
pass | true |
9eb42e37ebfcc4d30af298c4248c7e56595bd307 | Python | Leahxuliu/Data-Structure-And-Algorithm | /Python/LeetCode2.0/DP/322.Coin Change.py | UTF-8 | 1,088 | 3.5625 | 4 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2020/05/11
'''
Method - DP
DP[i]: minimum number of coins when amount is i
Steps:
1.build a dp list, the list size is amount + 1; 0,1,2,....amount
2.scan list from 1 to amount
dp[i] = min(choose the coin, don’t)
= min(dp[i], dp[i - coin] + 1, coin < i)
base case:
dp[0] = 0
initial value:inf
3.if dp[amount] == inf, return -1,else return dp[amount]
Time: O(MN), M is amount, N is the number of coins
Space: O(M)
'''
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
if amount == 0:
return 0
if coins == []:
return -1
dp = [float(inf)] * (amount + 1)
dp[0] = 0
for i in range(1, amount + 1):
for coin in coins:
if coin <= i:
dp[i] = min(dp[i], dp[i - coin] + 1)
if dp[amount] == float(inf):
return -1
else:
return dp[amount]
| true |
37364ab81582328059e676cc1252afb9faf7f54d | Python | lspgl/csat | /sectorImage/core/toolkit/intersection.py | UTF-8 | 526 | 3.28125 | 3 | [] | no_license |
def Intersection(ln1, ln2):
x1 = ln1.x1
y1 = ln1.y1
x2 = ln1.x2
y2 = ln1.y2
x3 = ln2.x1
y3 = ln2.y1
x4 = ln2.x2
y4 = ln2.y2
if (max(x1, x2) < min(x3, x4)):
return False
A1 = (y1 - y2) / (x1 - x2)
A2 = (y3 - y4) / (x3 - x4)
b1 = y1 - A1 * x1
b2 = y3 - A2 * x3
if (A1 == A2):
return False
Xa = (b2 - b1) / (A1 - A2)
if Xa < max(min(x1, x2), min(x3, x4)) or Xa > min(max(x1, x2), max(x3, x4)):
return False
else:
return True
| true |
4b1859778942830b062f420d4be192c060506874 | Python | lumeng689/gist | /py/skr/mf_case_5.py | UTF-8 | 952 | 2.5625 | 3 | [] | no_license | import sklearn
from sklearn.datasets import load_digits
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import learning_curve
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
iris = load_digits()
X = iris.data
y = iris.target
train_sizes, train_loss, test_loss = learning_curve(SVC(gamma=0.001), X, y, cv=10, scoring="neg_mean_squared_error",
train_sizes=[0.1, 0.25, 0.5, 0.75, 1])
train_loss_mean = -np.mean(train_loss, axis=1)
test_loss_mean = -np.mean(test_loss, axis=1)
plt.plot(train_sizes, train_loss_mean, 'o-', color='r', label='Training')
plt.plot(train_sizes, test_loss_mean, 'o-', color='g', label='Cross-Validation')
plt.xlabel('Training examples')
plt.ylabel('Loss')
plt.legend(loc='best')
plt.show()
# end of file
| true |
b271b2d0431d9921b5e77d3b9af747d06e752638 | Python | mstroehle/pydent | /pydent/marshaller/exceptions.py | UTF-8 | 2,922 | 3.0625 | 3 | [
"MIT"
] | permissive | """Marshalling exceptions."""
class MarshallerBaseException(Exception):
pass
class SchemaRegistryError(MarshallerBaseException):
"""Generic schema registry exception."""
class SchemaException(MarshallerBaseException):
"""A generic schema exception."""
class SchemaModelException(MarshallerBaseException):
"""A generic model exception."""
"""
Field validation exceptions
"""
class FieldValidationError(MarshallerBaseException):
"""A generic field validation error."""
class AllowNoneFieldValidationError(FieldValidationError):
"""A field validation error for getting or setting None values."""
class CallbackValidationError(MarshallerBaseException):
"""A generic callback validation error."""
class RunTimeCallbackAttributeError(AttributeError):
"""Error that occurs during executing a field callback."""
"""
Model validation exceptions
"""
class ModelRegistryError(MarshallerBaseException):
"""Model not found in registry exception."""
class ModelValidationError(MarshallerBaseException):
"""A model validation error."""
class ExceptionCollection(MarshallerBaseException):
"""Context dependent exception for capturing multiple exceptions.
Call `r` to gather exceptions, upon exiting, a single
ExceptionCollection will be raised with a summary of all the
internal exceptions.
"""
def __init__(self, *args, header=""):
self.args = args
self.header = header
self.errors = None
def r(self, exception):
self.errors.append(exception)
def raise_exception_class(self, exception_class):
"""Raise an exception class, if it was collected."""
errors = self.group_errors().get(exception_class.__name__, [])
if errors:
raise exception_class(errors)
def group_errors(self):
grouped = {}
for e in self.errors:
grouped.setdefault(e.__class__.__name__, []).append(e)
return grouped
def __enter__(self):
self.errors = []
return self
def __exit__(self, *args):
if self.errors:
# raise MultipleValidation(self.errors)
try:
msg = "{}: {}".format(self.__class__.__name__, self.header)
group_by_exception = self.group_errors()
for g, errors in group_by_exception.items():
msg += "\n {}(s):".format(g)
for i, e in enumerate(errors):
msg += "\n ({}): {}".format(i, e)
self.args = (msg,)
except Exception as e:
raise e.__class__(
"{}\nThere was an error raising exceptions {}\n".format(
self.errors, e
)
)
raise self
class MultipleValidationError(ModelValidationError, ExceptionCollection):
"""Model validation exception."""
| true |
b86950ec7eafb7b662209e7a7907f69fd3086176 | Python | vaibhavpandey11/daily_coding_problem | /Problem 031.py | UTF-8 | 780 | 4.09375 | 4 | [] | no_license | '''
This problem was asked by Google.
The edit distance between two strings refers to the minimum number of character insertions, deletions, and substitutions required to change one string to the other.
For example, the edit distance between "kitten" and "sitting" is three: substitute the "k" for "s", substitute the "e" for "i", and append a "g".
Given two strings, compute the edit distance between them.
'''
#________________________________________________________________
def edit_dist(string1, string2):
edit_distance = 0
edit_distance += abs(len(string2) - len(string1))
for i in range(min(len(string1), len(string2))):
if string1[i] != string2[i]: edit_distance += 1
return edit_distance
print(edit_dist(input(), input()))
| true |
aedfa1d39eaddb0748586e7e0b9f4cec23b7e304 | Python | abnsl0014/-Machine-Learning-to-Detect-Fake-News | /DATA+SET+2+ACCURACY+PREDICTIONS.py | UTF-8 | 16,769 | 3.15625 | 3 | [] | no_license |
# coding: utf-8
# ## Importng the packages and modules required in the project
# In[328]:
import pandas as pd
import numpy as np
import csv
from sklearn import naive_bayes
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.svm import SVC
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold, cross_val_score, train_test_split
from sklearn.learning_curve import learning_curve
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from textblob import TextBlob
from textblob import TextBlob, Word, Blobber
from textblob.classifiers import NaiveBayesClassifier
from textblob.taggers import NLTKTagger
# ## reading the data
# In[329]:
adv=pd.read_csv('fakerealnews.csv')
# In[330]:
adv
# ## value counts- Returns object containing counts of unique values.
# In[331]:
adv.news.value_counts()
# In[332]:
adv.label.value_counts()
# ## Aggregate statistics
# In[333]:
adv.describe()
# In[334]:
adv.groupby('label').describe()
# ## Removing Null values- Cleaning the data
# In[335]:
adv[adv.news.notnull()]
adv[adv.label.notnull()]
# In[336]:
adv=adv[pd.notnull(adv['news'])]
adv=adv[pd.notnull(adv['label'])]
# In[337]:
adv.isnull()
# ## Calculating the length of news
# In[338]:
adv['length']=adv['news'].map(lambda text: len(text))
adv.head(30)
# ## Plotting the graph according
# In[339]:
adv.length.plot( bins=20, kind='hist')
# ## Plotting the histogram according to the length of both the labels
# In[340]:
adv.hist(column='length', by='label', bins=50)
# ## Data Preprocessing
# In[341]:
def tokenize(news):
news2 = 'news -' + str(news) # convert bytes into proper unicode
return TextBlob(news).words
# In[342]:
adv.news.head().apply(tokenize)
# In[343]:
def lemmatize(news):
news2 = 'news -' + str(news).lower()
words = TextBlob(news).words
# for each word, take its "base form" = lemma
return [word.lemma for word in words]
adv.news.head().apply(lemmatize)
# In[344]:
TextBlob("Strong Solar Storm, Tech Risks Today").tags
# In[345]:
TextBlob("What's in that Iran bill that Obama doesn't like?").tags
# ## Data to Vectors- fitting and transforming using Count Vectorizer
# In[346]:
bow_transformer=CountVectorizer(analyzer=lemmatize).fit(adv['news'])
len(bow_transformer.vocabulary_)
# In[347]:
news4=adv['news'][160]
news4
# In[348]:
bow4 = bow_transformer.transform([news4])
bow4
# In[349]:
bow4.shape
# #### //getting feature names
# In[350]:
bow_transformer.get_feature_names()[665]
# In[351]:
news_bow = bow_transformer.transform(adv['news'])
'sparsity: %.2f%%' % (100.0 * news_bow.nnz / (news_bow.shape[0] * news_bow.shape[1]))
# In[352]:
'sparse matrix shape:', news_bow.shape
# In[353]:
'number of non-zeros:', news_bow.nnz
# ## Data to Vectors- fitting and transforming TFIDF- term frequency- inverse doc frequency and getting sparse matrix
# In[354]:
tfidf_transformer = TfidfTransformer().fit(news_bow)
tfidf4 = tfidf_transformer.transform(bow4)
tfidf4
# In[355]:
tfidf_transformer.idf_[bow_transformer.vocabulary_['u']]
# In[356]:
news_tfidf = tfidf_transformer.transform(news_bow)
news_tfidf.shape
# ## Applying Multinomial on the whole training set and predicting accuracy
# In[357]:
get_ipython().magic("time spam_detector = MultinomialNB().fit(news_tfidf, adv['label'])")
# In[358]:
spam_detector=MultinomialNB().fit(news_tfidf, adv['label'])
spam_detector
# In[359]:
'predicted:', spam_detector.predict(tfidf4)[0]
# In[360]:
'expected:', adv.label[55]
# In[361]:
all_predictions = spam_detector.predict(news_tfidf)
all_predictions
# In[362]:
'accuracy', accuracy_score(adv['label'], all_predictions)
# In[363]:
'confusion matrix\n', confusion_matrix(adv['label'], all_predictions)
# In[364]:
'(row=expected, col=predicted)'
# In[365]:
plt.matshow(confusion_matrix(adv['label'], all_predictions), cmap=plt.cm.binary, interpolation='nearest')
plt.title('confusion matrix')
plt.colorbar()
plt.ylabel('expected label')
plt.xlabel('predicted label')
# In[366]:
print (classification_report(adv['label'], all_predictions))
# ## For Logistic Regression
# In[367]:
get_ipython().magic("time spam_detector = LogisticRegression().fit(news_tfidf, adv['label'])")
# In[368]:
spam_detector=LogisticRegression().fit(news_tfidf, adv['label'])
spam_detector
# In[369]:
print('predicted:', spam_detector.predict(tfidf4)[0])
print('expected:', adv.label[6])
# In[370]:
all_predictions = spam_detector.predict(news_tfidf)
all_predictions
# In[371]:
print('accuracy', accuracy_score(adv['label'], all_predictions))
print('confusion matrix\n', confusion_matrix(adv['label'], all_predictions))
print('(row=expected, col=predicted)')
# In[372]:
plt.matshow(confusion_matrix(adv['label'], all_predictions), cmap=plt.cm.binary, interpolation='nearest')
plt.title('confusion matrix')
plt.colorbar()
plt.ylabel('expected label')
plt.xlabel('predicted label')
# In[373]:
print (classification_report(adv['label'], all_predictions))
# ## Calculating how much data we are training and testing
# In[374]:
msg_train, msg_test, label_train, label_test = train_test_split(adv['news'], adv['label'], test_size=0.2)
len(msg_train), len(msg_test), len(msg_train) + len(msg_test)
# #### Resulted in 5% of testing data and rest is the training data
# ## PIPELINE- to combine techniques
#
# In[375]:
pipeline = Pipeline([
('bow', CountVectorizer(analyzer='char')), # strings to token integer counts
('tfidf', TfidfTransformer()), # integer counts to weighted TF-IDF scores
('classifier', LogisticRegression()),# train on TF-IDF vectors w/ Naive Bayes classifie
])
# In[376]:
import _pickle as cPickle
# ## Cross Validation Scores for Logistic Regression
# In[377]:
scores = cross_val_score(pipeline, # convert news into models
msg_train, # training data
label_train, # training labels
cv=10, # split data randomly into 10 parts: 9 for training, 1 for scoring
scoring='accuracy', # which scoring metric?
n_jobs=-1, # -1 = use all cores = faster
)
# In[378]:
scores
# In[379]:
scores.mean(), scores.std()
# ## Cross Validation scores for Naive Bayes
# In[380]:
pipeline = Pipeline([
('bow', CountVectorizer(analyzer='char')), # strings to token integer counts
('tfidf', TfidfTransformer()), # integer counts to weighted TF-IDF scores
('classifier', MultinomialNB()),# train on TF-IDF vectors w/ Naive Bayes classifie
])
# In[381]:
scores = cross_val_score(pipeline, # convert news into models
msg_train, # training data
label_train, # training labels
cv=10, # split data randomly into 10 parts: 9 for training, 1 for scoring
scoring='accuracy', # which scoring metric?
n_jobs=-1, # -1 = use all cores = faster
)
print(scores)
# In[382]:
scores.mean(), scores.std()
# In[383]:
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Data")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
# In[384]:
get_ipython().magic('time plot_learning_curve(pipeline, "accuracy vs. training set size", msg_train, label_train, cv=5)')
# In[385]:
from sklearn.grid_search import GridSearchCV
# ## GridSearch for SVM
# In[386]:
pipeline_svm = Pipeline([
('bow', CountVectorizer(analyzer='char')),
('tfidf', TfidfTransformer()),
('classifier', SVC()), # <== change here
])
# pipeline parameters to automatically explore and tune
param_svm = [
{'classifier__C': [1], 'classifier__kernel': ['linear']},
{'classifier__C': [1], 'classifier__gamma': [0.001, 0.0001], 'classifier__kernel': ['rbf']},
]
grid_svm = GridSearchCV(
pipeline_svm, # pipeline from above
param_grid=param_svm, # parameters to tune via cross validation
refit=True, # fit using all data, on the best detected classifier
n_jobs=-1, # number of cores to use for parallelization; -1 for "all cores"
scoring='accuracy', # what score are we optimizing?
cv=StratifiedKFold(label_train, n_folds=5), # what type of cross validation to use
)
# ## SVM time ad Scores
# In[387]:
get_ipython().magic('time svm_detector = grid_svm.fit(msg_train, label_train)')
svm_detector.grid_scores_
# In[388]:
print(confusion_matrix(label_test, svm_detector.predict(msg_test)))
print(classification_report(label_test, svm_detector.predict(msg_test)))
# In[389]:
svm_detector.predict(["Donald Trump just trolled Rosie O'Donnell. Not good"])[0]
# In[390]:
svm_detector.predict(["Kushner family won't attend China investor pitch after criticism."])[0]
# In[391]:
svm_detector.predict(["US prosecuter told to push for more harsher punishments"])[0]
# In[392]:
clf=svm.SVC(kernel='linear', C=1.0,gamma=1)
# In[393]:
clf.fit(X_test_dtm,y_test)
# In[394]:
clf.score(X_test_dtm,y_test)
# In[395]:
predicted=clf.predict(X_test_dtm)
# In[396]:
predicted
# ## Count Vectorizer and TRAINING AND TESTING DATA
# In[397]:
vect=CountVectorizer()
# In[398]:
new_df1=adv[['news']]
new_df2=adv[['label']]
# In[399]:
train_data=new_df1.iloc[1:500,:]
test_data=new_df2.iloc[500:1,:]
train_label=new_df1.iloc[1:500,:]
test_label=new_df2.iloc[500:1,:]
train_vectors=cv.fit_transform(train_data)
test_vectors=cv.fit_transform(test_data)
# In[400]:
cv.get_feature_names()
# In[401]:
train_vectors.toarray()
# In[402]:
test_vectors.toarray()
# In[403]:
X=adv.news
y=adv.label
# In[404]:
print(X.shape)
print(y.shape)
# In[405]:
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train,y_test=train_test_split(X,y,random_state=4)
# In[406]:
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
# In[407]:
vect.fit(X_train)
X_train_dtm = vect.transform(X_train)
# In[408]:
X_train_dtm=vect.fit_transform(X_train)
# In[409]:
X_train_dtm
# In[410]:
X_test_dtm=vect.transform(X_test)
X_test_dtm
# ## Applying MACHINE LEARNING ALGORITHM ON TRAINING AND TESTING DATA
# # 1. KNN
# In[411]:
knn= KNeighborsClassifier(n_neighbors=8)
# In[412]:
knn.fit(X_train_dtm, y_train)
# In[413]:
y_pred_class=knn.predict(X_test_dtm)
# In[414]:
knn.score(X_test_dtm, y_test)
# In[415]:
get_ipython().magic('time knn.fit(X_train_dtm, y_train)')
# In[416]:
from sklearn import metrics
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix
from sklearn.metrics import accuracy_score
import sys
import scipy
# In[417]:
metrics.accuracy_score(y_test,y_pred_class)
# In[418]:
metrics.confusion_matrix(y_test, y_pred_class)
# In[419]:
print(metrics.classification_report(y_test, y_pred_class))
# In[420]:
scores = cross_val_score(KNeighborsClassifier(n_neighbors=15), # steps to convert raw emails into models
X_train_dtm, # training data
y_train, # training labels
cv=10, # split data randomly into 10 parts: 9 for training, 1 for scoring
scoring='accuracy', # which scoring metric?
n_jobs=-1, # -1 = use all cores = faster
)
# In[421]:
scores
# In[422]:
scores.mean()
# In[423]:
scores.std()
# # 2.NAIVE BAYES
# In[424]:
nb=MultinomialNB()
# In[425]:
get_ipython().magic('time nb.fit(X_train_dtm, y_train)')
# In[426]:
nb.fit(X_train_dtm, y_train)
# In[427]:
y_pred_class=nb.predict(X_test_dtm)
# In[428]:
nb.score(X_train_dtm, y_train)
# In[429]:
metrics.confusion_matrix(y_test, y_pred_class)
# In[430]:
y_pred_prob = nb.predict_proba(X_test_dtm)[:,1]
y_pred_prob
# In[431]:
metrics.accuracy_score(y_test, y_pred_class)
# In[432]:
print(metrics.classification_report(y_pred_class, y_test))
# In[433]:
scores = cross_val_score(MultinomialNB(), # steps to convert raw emails into models
X_train_dtm, # training data
y_train, # training labels
cv=10, # split data randomly into 10 parts: 9 for training, 1 for scoring
scoring='accuracy', # which scoring metric?
n_jobs=-1, # -1 = use all cores = faster
)
# In[434]:
scores
# In[435]:
scores.mean()
# In[436]:
scores.std()
# # Logsitic Regression
# In[437]:
logreg=LogisticRegression()
# In[438]:
logreg.fit(X_train_dtm, y_train)
# In[439]:
y_pred_class=logreg.predict(X_test_dtm)
# In[440]:
logreg.score(X_test_dtm, y_test)
# In[441]:
get_ipython().magic('time logreg.fit(X_train_dtm, y_train)')
# In[442]:
metrics.accuracy_score(y_test,y_pred_class)
# In[443]:
metrics.confusion_matrix(y_test, y_pred_class)
# In[444]:
print(metrics.classification_report(y_test, y_pred_class))
# In[445]:
scores = cross_val_score(KNeighborsClassifier(n_neighbors=15), # steps to convert raw emails into models
X_train_dtm, # training data
y_train, # training labels
cv=10, # split data randomly into 10 parts: 9 for training, 1 for scoring
scoring='accuracy', # which scoring metric?
n_jobs=-1, # -1 = use all cores = faster
)
# In[446]:
scores
# In[447]:
scores.mean()
# In[448]:
scores.std()
# In[449]:
names=['label','news','length']
# In[450]:
seed=7
# In[451]:
models = []
models.append(('LR', LogisticRegression()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('NB', MultinomialNB()))
models.append(('SVM', SVC()))
# In[452]:
results=[]
# In[453]:
names=[]
# In[454]:
scoring='accuracy'
# In[455]:
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
scores = model_selection.cross_val_score(model, X_test_dtm, y_pred_class, cv=kfold, scoring=scoring)
results.append(scores)
names.append(name)
msg = "%s: %f (%f)" % (name, scores.mean(), scores.std())
print(msg)
# In[456]:
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
# In[457]:
import matplotlib.pyplot as plt
# Data to plot
labels = 'Nave Bayes', 'SVM', 'K-NN', 'LG'
sizes = [80.14, 54.50, 59.77, 80.64]
colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue']
explode = (0.1, 0, 0, 0) # explode 1st slice
# Plot
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
plt.show()
# In[460]:
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
objects = ('Nave Bayes', 'SVM', 'K-NN', 'LG')
y_pos = np.arange(len(objects))
performance = [7.02,6.13,4.01,19.1]
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('Time')
plt.title('Spam Detector Time')
plt.show()
# In[ ]:
# In[ ]:
| true |
a999f14bc3d6730cc2ba315a6ea7f1736c1373e5 | Python | kenoskynci/mad_topic_model | /visualization/examples/flarify.py | UTF-8 | 788 | 3.03125 | 3 | [] | no_license | import sys
import json
from features import analyzer, meter
text_key = "name"
child_key = "children"
ngram_parsers = {
'pos': analyzer.pos_ngrams,
'etymology': analyzer.etymology_ngrams,
'word_count': analyzer.word_count_ngrams,
'syllable': analyzer.syllable_ngrams,
'syllable_count': analyzer.syllable_count_ngrams,
'meter': lambda x, y: meter.meter_ngrams(x)
}
def get_ngrams(text, n):
data = {text_key: text, child_key: []}
for ngram_type in ngram_parsers:
parse = ngram_parsers[ngram_type]
items, ngrams = parse(text, n, BODY=True)
sub_data = {text_key: items, child_key: ngrams}
data[child_key].append(sub_data)
if __name__ == "__main__":
text = sys.stdin.readlines()
print json.dumps(get_ngrams(text, 2))
| true |
ec89f170a223a06d1743ba7d8a201176928a2545 | Python | Leedk3/pytorch_study | /neural_network_tutorial.py | UTF-8 | 3,243 | 3.328125 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
device = 'cuda' if torch.cuda.is_available else 'cpu'
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# input : 1 image channel
# output : 6 ouput channels, 3x3 conv. kernel.
self.conv1 = nn.Conv2d(1, 6, 3)
# input : 6 image channel
# output : 16 ouput channels, 3x3 conv. kernel.
self.conv2 = nn.Conv2d(6, 16, 3)
# an affine operation : y = wx + b
self.fc1 = nn.Linear(16 * 6 * 6, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
#Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2,2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
# nn.functional.relu vs nn.ReLU()
# nn.ReLU() creates an nn.Module which can add nn.Sequential model.
# nn.functional.relu is just the functional API call.
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] #all dimensions except the batch dimention.
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net().to(device = device)
print(net)
# You just have to define forward function, and the backward
# function is automatically defined for you using autograd.
# you can use any of the Tensor operations in the forward function.
params = list(net.parameters())
print(len(params))
#See the parameters in the each layers.
for i in range(len(params)):
print(f"{i},", params[i].size())
#RANDOM TEST
input = torch.randn(1, 1, 32, 32).to(device)
out = net(input)
print(out)
print("out size:", out.size())
#Zero the gradient buffers of all parameters
#backprops with random gradients.
net.zero_grad()
out.backward(torch.randn(1, 10).to(device))
## NOTE
# torch.nn only supports mini-batches
# The entire torch.nn package only supports inputs that are
# a mini-batch of samples and not a single sample.
# If you have a single sample, just use input.unsqueeze(0)
# to add a fake batch dimension.
# Loss Function.
output = net(input)
target = torch.randn(10).to(device)
target = target.view(1, -1)
loss = nn.MSELoss()(output, target)
print(loss)
#loss.grad_fn function represents from backward:
print(loss.grad_fn) # MSELoss
print(loss.grad_fn.next_functions[0][0]) # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU
#Back propagation
# To backpropagate the error all we have to do is to loss.backward()
# You need to clear the existing gradients though, else
# gradients will be accumulated to existing gradients.
net.zero_grad()
print('conv1. bias grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1. bias grad after backward')
print(net.conv1.bias.grad)
#Optimization
import torch.optim as optim
#create your optimizer
optimizer = optim.SGD(net.parameters(), lr=0.01)
#in our training loop:
optimizer.zero_grad()
output = net(input)
loss = nn.MSELoss()(output, target)
loss.backward()
optimizer.step() | true |
02ac224b90a817169df695b1a10e2e8a5b2d0447 | Python | InsightSoftwareConsortium/ITK | /Utilities/Doxygen/mcdoc.py | UTF-8 | 6,799 | 2.78125 | 3 | [
"IJG",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"SMLNJ",
"BSD-3-Clause",
"BSD-4.3TAHOE",
"LicenseRef-scancode-free-unknown",
"Spencer-86",
"LicenseRef-scancode-llnl",
"FSFUL",
"Libpng",
"libtiff",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive",
... | permissive | #!/usr/bin/env python
import sys, os, re, glob
try:
import io
except ImportError:
import cStringIO as io
def usage():
sys.stdout.write(
"""usage: mdoc.py set group file [files...]
Add the tag "\\ingroup group" to all the doxygen comment with a \\class
tag in it.
usage: mdoc.py check group file [files...]
Check that the tag "\\ingroup group" is in all the doxygen comment with a \\class
tag in it. If the tag is not there, a warning is displayed with the file name, the
line number and the class name. The return value is 0 when all the doxygen comments
have the tag, and 1 when at least one doxygen comment don't have it.
usage: mdoc.py massive-set [ITK-source]
Add the tag "\\ingroup module" to all the headers in ITK, where 'module' is the
module name of the header.
usage: mdoc.py massive-check [ITK-source]
Check that all the headers in ITK have their module name in their \\ingroup tag.
As for 'check', a warning is displayed if the tag is missing and 1 is returned.
\n"""
)
def setGroup(fname, group):
# sys.stderr.write("Processing "+ fname +"\n")
f = open(fname, "r", encoding="utf-8")
out = io.StringIO()
# load everything in memory
fcontent = f.read()
f.close()
# now parse all the doxygen fields
last = 0
for m in re.finditer(r"/\*\*(.*?)\*/", fcontent, re.DOTALL):
# write what is before the doxygen field to the output
out.write(fcontent[last : m.start(1)])
last = m.end(1)
dcontent = m.group(1)
# we don't care about doxygen fields not about a class
if r"\class" in dcontent and dcontent != r" \class classname ":
# do we have a line with the expected content?
if re.search(r"\ingroup .*" + group + r"(\s|$)", dcontent, re.MULTILINE):
# yes - just keep the content unchanged
out.write(dcontent)
else:
# add the expected group
if "\n" in dcontent:
# this is a multiline content. Find the indent
indent = re.search(r"( *)(\*|$)", dcontent).group(1)
lastLine = dcontent.splitlines()[-1]
if re.match(r"^ *$", lastLine):
out.write(dcontent + "* \\ingroup " + group + "\n" + indent)
else:
out.write(
dcontent.rstrip()
+ "\n"
+ indent
+ "* \\ingroup "
+ group
+ "\n"
+ indent
)
else:
out.write(dcontent + " \\ingroup " + group + " ")
else:
out.write(dcontent)
out.write(fcontent[last:])
# we can save the content to the original file
f = open(fname, "w", encoding="utf-8")
f.write(out.getvalue())
f.close()
def checkGroup(fname, group):
# sys.stderr.write("Checking"+ fname + "\n")
f = open(fname, "r", encoding="utf-8")
# load everything in memory
fcontent = f.read()
f.close()
# now parse all the doxygen fields
ret = 0
for m in re.finditer(r"/\*\*(.*?)\*/", fcontent, re.DOTALL):
dcontent = m.group(1)
# we don't care about doxygen fields not about a class
if r"\class" in dcontent and dcontent != r" \class classname ":
# do we have a line with the expected content?
if not re.search(
r"\\ingroup .*" + group + r"(\s|$)", dcontent, re.MULTILINE
):
# get class name and the line for debug output
cname = re.search(r"\\class +([^ ]*)", dcontent).group(1).strip()
line = len(fcontent[: m.start(1)].splitlines())
sys.stderr.write(
r'%s:%s: error: "\ingroup %s" not set in class %s.'
% (fname, line, group, cname)
+ "\n"
)
ret = 1
return ret
def main():
# first arg is the command
command = sys.argv[1]
if command == "set":
if len(sys.argv) < 4:
usage()
return 1
# second arg is the module name, and the rest are the files to process
module = sys.argv[2]
files = sys.argv[3:]
for fname in files:
setGroup(fname, module)
return 0
elif command == "massive-set":
if len(sys.argv) < 2:
usage()
return 1
if len(sys.argv) >= 3:
d = sys.argv[2]
else:
d = sys.path[0] + "/../.."
cmm = os.path.abspath(d + "/*/*/*/itk-module.cmake")
for fname in glob.glob(cmm):
f = file(fname, "r", encoding="utf-8")
mcontent = f.read()
f.close()
module = re.search(r"itk_module\(([^ )]+)", mcontent).group(1)
dname = os.path.dirname(fname)
for fname2 in glob.glob(dname + "/include/*.h"):
setGroup(fname2, module)
return 0
elif command == "check":
if len(sys.argv) < 4:
usage()
return 1
# second arg is the module name, and the rest are the files to process
module = sys.argv[2]
files = sys.argv[3:]
ret = 0
count = 0
for fname in files:
if os.path.isdir(fname):
for fname2 in glob.glob(fname + "/*.h"):
count += 1
ret = max(ret, checkGroup(fname2, module))
else:
count += 1
ret = max(ret, checkGroup(fname, module))
sys.stderr.write(str(count) + " headers checked." + "\n")
return ret
elif command == "massive-check":
if len(sys.argv) < 2:
usage()
return 1
if len(sys.argv) >= 3:
d = sys.argv[2]
else:
d = sys.path[0] + "/../.."
cmm = os.path.abspath(d + "/*/*/*/itk-module.cmake")
ret = 0
count = 0
for fname in glob.glob(cmm):
f = file(fname, "r", encoding="utf-8")
mcontent = f.read()
f.close()
module = re.search(r"itk_module\(([^ )]+)", mcontent).group(1)
dname = os.path.dirname(fname)
for fname2 in glob.glob(dname + "/include/*.h"):
count += 1
ret = max(ret, checkGroup(fname2, module))
sys.stderr.write(str(count) + " headers checked." + "\n")
return ret
else:
sys.stderr.write("Unknown command" + command + "\n")
usage()
return 1
if __name__ == "__main__":
ret = main()
sys.exit(ret)
| true |
7db9c9a70159ef0c9b625e986855b37f855a0ab9 | Python | moontasirabtahee/Problem-Solving | /Leetcode/20 Valid Parentheses.py | UTF-8 | 826 | 3.546875 | 4 | [] | no_license | from collections import deque
# Used deque instead of List as deque is faster than List by performance
class Solution:
def isValid(self, s: str) -> bool:
stack = deque()
parentheses = {
"opening": ['(', '{', '['],
"closing_pair": {
")": '(',
"}": '{',
"]": '[',
}
}
for char in s:
if char in parentheses["opening"]:
stack.append(char)
elif char in parentheses['closing_pair'] and stack:
if not stack.pop() == parentheses['closing_pair'][char]:
return False
else:
return False
return False if stack else True
solution = Solution()
print(solution.isValid("()"))
# TimeComplexity -> O(n)
| true |
b1fc69ca7dae24ff590cee8a258c482cd3db87bf | Python | JoseCordobaEAN/refuerzo_programacion_2018_1 | /sesion_2/es_par.py | UTF-8 | 231 | 4 | 4 | [
"MIT"
] | permissive | # Solicitamos el número al usuario
numero = int(input("Ingrese su número\n"))
# Validamos que el dividendo sea par
if numero % 2 == 0:
print("El dividendo",numero,"es par")
else:
print("El dividendo ",numero,"es impar")
| true |
6199846d1501688de64a7a10099afb83ccf16ce2 | Python | HallidayJ/comp61542-2014-lab | /src/comp61542/fastgraph.py | UTF-8 | 2,606 | 3.375 | 3 | [] | no_license | # module fastgraph
# created by Gribouillis for the python forum at www.daniweb.com
# November 9, 2010
# Licence: public domain
# This module defines 3 functions (node, edge and graph) to help
# create a graph (a pygraphviz.AGraph instance) linking arbitrary
# python objects.
# This graph can be saved in various image formats, and in dot format.
import pygraphviz
def node(x):
"helper to create graphs"
return (x,)
def edge(x, y):
"helper to create graphs"
return (x, y)
def graph(items, tolabel, **kwd):
# Create a pygraphviz AGraph instance
# @ items - a sequence of node(obj), or edge(obj, obj) items (obj can be any python object)
# @ tolabel - a function tolabel(obj) -> str which converts an object to string
# @ **kwd - additional keyword arguments for AGraph.__init__
names = dict()
the_graph = pygraphviz.AGraph(**kwd)
for uple in (tuple(t) for t in items):
for obj in uple:
if not obj in names:
names[obj] = "n%d" % len(names)
the_graph.add_node(names[obj], label=tolabel(obj), shape="box")
if len(uple) == 2:
the_graph.add_edge(*(names[obj] for obj in uple))
return the_graph
#
#if __name__ == "__main__":
#
# def my_items():
# # Example generator of graph items. Our graph contains string here
# for x in ['AuthorA', 'Author B', 'Author C']:
# yield node(x)
#
# for s in [['AuthorA', 'Author B'], ['Author B', 'Author C'], ['Author C', 'AuthorA']]:
# yield edge(s[0], s[1])
# #x, y = iter(s)
# #yield edge(x, y)
#
# def my_label(x):
# "Create a label for graph objects"
# return x.upper()
#
# g = graph(my_items(), my_label)
# g.draw('mygraph.png', prog='circo') # prog can be neato|dot|twopi|circo|fdp|nop
def buildNodes(authorpath):
if len(authorpath) > 1:
for authorname in authorpath:
yield node(authorname)
for i in range(0, len(authorpath) - 1):
yield edge(authorpath[i], authorpath[i + 1])
def authorlabel(authorname):
return authorname.upper()
class graph_network:
def buildnetworkgraph(self, authorpath):
if len(authorpath) > 1:
networkgraph = graph(buildNodes(authorpath), authorlabel)
networkgraph.draw('hunt_network.png', prog='circo')
| true |
7df2210b6377905d51b920f6b054ba8d9ee0278f | Python | rodrigopscampos/python-lp | /ifs/ex4.py | UTF-8 | 210 | 4.3125 | 4 | [] | no_license | #Leia um número, se < 10, criança, se < 18 adolescente, se não, adulto
a = int(input('Informe uma idade: '))
if a < 10:
print('Criança')
elif a < 18:
print('Adolescente')
else:
print('Adulto') | true |
ba3d8a064d0ac0e7add3596e91cd699278ae975c | Python | Liyubov/bikeshare-simulation | /data_prep/data_prep.py | UTF-8 | 1,451 | 3.125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 18:13:32 2020
@author: freddy
Create a JSON file, containing date, time and corresponding weight matrix.
"""
import pandas as pd
import os
if __name__ == "__main__":
data = pd.read_csv("../data/biketrip_data.csv")
data_agg = data[["start_station_id", "end_station_id", "hour_from", "date_from"]]
data_agg["n"] = 1
data_agg_detail = data_agg.groupby(
["start_station_id", "end_station_id", "date_from", "hour_from"]
).sum()
data_agg_detail = data_agg_detail.reset_index()
for time in data_agg_detail["hour_from"].unique(): # for every time
directory = "../data/" + str(time)
if not os.path.exists(directory):
os.makedirs(directory)
filtered_data = data_agg_detail[
data_agg_detail["hour_from"] == time
] # filter day
for date in data_agg_detail["date_from"].unique(): # for every time
filtered_data_time = filtered_data[
filtered_data["date_from"] == date
] # filter time
# create the matrix
matrix = pd.crosstab(
index=filtered_data_time["start_station_id"],
columns=filtered_data_time["end_station_id"],
values=filtered_data_time["n"],
aggfunc="sum",
)
filename = directory + "/" + str(date) + ".csv"
matrix.to_csv(filename)
| true |
6a4e293b0eed78f50912ae3b540f992ce7d1c62d | Python | ProgramSalamander/AlgorithmLesson | /管道网络.py | UTF-8 | 2,316 | 3.859375 | 4 | [] | no_license | # 管道网络
# 描述
#
# Every house in the colony has at most one pipe going into it and at most one pipe going out of it.
# Tanks and taps are to be installed in a manner such that every house with one outgoing pipe but no incoming pipe gets a tank installed on its roof and every house with only an incoming pipe and no outgoing pipe gets a tap.
# Find the efficient way for the construction of the network of pipes.
#
#
# 输入
#
# The first line contains an integer T denoting the number of test cases.
# For each test case, the first line contains two integer n & p denoting the number of houses and number of pipes respectively.
# Next, p lines contain 3 integer inputs a, b & d, d denoting the diameter of the pipe from the house a to house b.
# Constraints:
# 1<=T<=50,1<=n<=20,1<=p<=50,1<=a, b<=20,1<=d<=100
#
#
# 输出
#
# For each test case, the output is the number of pairs of tanks and taps installed i.e n followed by n lines that contain three integers: house number of tank, house number of tap and the minimum diameter of pipe between them.
#
#
# 输入样例 1
#
# 1
# 9 6
# 7 4 98
# 5 9 72
# 4 6 10
# 2 8 22
# 9 7 17
# 3 1 66
# 输出样例 1
#
# 3
# 2 8 22
# 3 1 66
# 5 6 10
import sys
if __name__ == '__main__':
case_num = int(input())
for _ in range(case_num):
houses_num, pipes_num = [int(_) for _ in input().split(' ')]
tanks = []
taps = []
froms = []
tos = []
diameters = []
for _ in range(pipes_num):
from_house, to_house, diameter = [int(_) for _ in input().split(' ')]
froms.append(from_house)
tos.append(to_house)
diameters.append(diameter)
for i in range(1, houses_num + 1):
if i in froms and i in tos:
pass
elif i in froms:
tanks.append(i)
elif i in tos:
taps.append(i)
# print(tanks)
# print(taps)
print(len(tanks))
for tank in tanks:
cur = tank
min_diameter = sys.maxsize
while cur in froms:
idx = froms.index(cur)
min_diameter = min(diameters[idx], min_diameter)
cur = tos[froms.index(cur)]
print('%d %d %d'%(tank, cur, min_diameter)) | true |
dc77b7b7d811e55b7cbe4a3848f5b6bdf3bd775a | Python | jdiazram/DL4CV_starterBundle | /321_import_image.py | UTF-8 | 264 | 2.9375 | 3 | [] | no_license | #pip install opencv-contrib-python
import cv2
image = cv2.imread("images/example.png") #importar la imagen
print(image.shape) #imprimir dimensiones de la imagen
cv2.imshow("Image", image) #mostrar en ventana nueva
cv2.waitKey(0) #se espera para cerrar la ventana | true |
a2c804b5f7eeb954ae587a3fa5cfa0462c6eaa70 | Python | github-cve-social-graph/cve | /code/get_cves.py | UTF-8 | 1,475 | 2.515625 | 3 | [] | no_license | import pymongo
import requests
import time
from datetime import datetime
from pymongo import MongoClient
client = MongoClient('mongodb+srv://erinszeto:Fall2020CKIDS!@erincluster.mvldp.mongodb.net/test')
db = client.ckids
collections = db.collection_names()
if "cve" in collections: # If collection has been made already and exists
db.cve.drop() # drop/delete collection
cve = db.cve # make collection
## Retrieve first 2000 CVEs with "github" keyword
url = "https://services.nvd.nist.gov/rest/json/cves/1.0?startIndex=0&keyword=github&resultsPerPage=2000"
response = requests.get(url).json()
total_results = response["totalResults"]
cves = response["result"]["CVE_Items"] #list of CVE dictionaries/JSON
# Get metadata (current time, api URL)
def get_metadata(cves, url):
time = datetime.utcnow().strftime("%a %b %d %H:%M:%S UTC %Y") # Current time in UTC
metadata = {"date_accessed": time, "api_url": url}
for item in cves:
item["metadata"] = metadata
return cves
cves = get_metadata(cves, url)
cves_id = cve.insert_many(cves).inserted_ids
index = 2000
while (index < total_results):
time.sleep(10)
url = "https://services.nvd.nist.gov/rest/json/cves/1.0?startIndex=%s&keyword=github&resultsPerPage=2000" % str(index)
response = requests.get(url).json()
cves = response["result"]["CVE_Items"] # list of CVE dictionaries/JSON
cves = get_metadata(cves, url) # insert metadata
cves_id = cve.insert_many(cves).inserted_ids
index += 2000 | true |
85f625d51e9fcee7cfc1dc31390b38591c162f8a | Python | droundy/sad-monte-carlo | /plotting/number-movie.py | UTF-8 | 3,955 | 2.65625 | 3 | [] | no_license | #!/usr/bin/python3
import yaml, sys
import numpy as np
import matplotlib.pyplot as plt
def latex_float(x):
exp = int(np.log10(x*1.0))
if abs(exp) > 2:
x /= 10.0**exp
if ('%.1g' % x) == '1':
return r'10^{%.0f}' % (exp)
return r'%.1g\times10^{%.0f}' % (x, exp)
else:
return '%g' % x
allcolors = list(reversed(['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple',
'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan',
'xkcd:lightblue', 'xkcd:puke']))
my_histogram = {}
current_histogram = {}
my_free_energy = {}
my_volume = {}
current_free_energy = {}
current_total_energy = {}
my_temperature = {}
my_time = {}
my_color = {}
max_iter = 0
my_gamma = {}
my_gamma_t = {}
fnames = sys.argv[1:]
for fname in fnames:
print(fname)
with open(fname) as f:
yaml_data = f.read()
data = yaml.load(yaml_data)
current_histogram[fname] = np.array(data['bins']['histogram'])
my_temperature[fname] = data['T']
current_free_energy[fname] = np.array(data['bins']['lnw'])
my_volume[fname] = float(data['system']['cell']['box_diagonal']['x'])**3
current_total_energy[fname] = np.array(data['bins']['total_energy'])
my_volume[fname] = float(data['system']['cell']['box_diagonal']['x'])**3
my_color[fname] = allcolors.pop()
my_time[fname] = np.array(data['movies']['time'])
if len(my_time[fname]) > max_iter:
max_iter = len(my_time[fname])
my_temperature[fname] = data['T']
my_free_energy[fname] = np.array(data['movies']['lnw'])
my_histogram[fname] = np.array(data['movies']['histogram'])
my_gamma[fname] = np.array(data['movies']['gamma'], dtype=float)
my_gamma_t[fname] = np.array(data['movies']['gamma_time'])
if 'Sad' in data['method']:
minT = data['method']['Sad']['min_T']
plt.ion()
all_figures = set()
keep_going = True
while keep_going:
#keep_going = False
for ii in range(max_iter):
for fig in all_figures:
fig.clf()
for fname in fnames:
if ii < len(my_time[fname]):
t = my_time[fname][ii]
j = ii
else:
j = -1
all_figures.add(plt.figure('Excess free energy'))
plt.plot(my_free_energy[fname][j,:] - my_free_energy[fname][j,0],
my_color[fname],
label=fname)
plt.title('$t=%s/%s$' % (latex_float(t),
latex_float(my_time[fname][-1])))
plt.ylabel('$F_{exc}/kT$')
plt.legend(loc='best')
#plt.ylim(Smin, 0)
all_figures.add(plt.figure('Histogram'))
plt.title('$t=%s/%s$' % (latex_float(t),
latex_float(my_time[fname][-1])))
plt.plot(my_histogram[fname][j,:], my_color[fname],
label=fname)
#plt.ylim(0)
#plt.legend(loc='best')
all_figures.add(plt.figure('Pressure'))
plt.title('$t=%s/%s$' % (latex_float(t),
latex_float(my_time[fname][-1])))
plt.ylabel('Pressure')
V = my_volume[fname]
T = my_temperature[fname]
F = -my_free_energy[fname][j,:]*T
N = len(F)
p = np.zeros(N-1)
p_exc = np.zeros(N-1)
for i in range(0,N-1):
u = F[i+1]-F[i] # dN = 1
p_exc[i] = (-F[i]+u*(i+.5))/V
p[i] = (-F[i]+u*(i+.5))/V+(i+.5)*T/V
UN = np.arange(0.5, N-1, 1)
print(len(UN), len(p))
plt.plot(UN, p, my_color[fname],
label=fname)
plt.legend(loc='best')
plt.figure('Histogram')
plt.ylabel('histogram')
plt.ylim(0)
plt.legend(loc='best')
plt.pause(0.1)
plt.ioff()
plt.show()
| true |
17b86f798f73ea7555104d9d24f1fe763cb45358 | Python | SilkyAnt/rupeng_python | /python_workspaces/Seq_02_SelfWebServer/flaskLearning/04dynRoute.py | UTF-8 | 553 | 2.796875 | 3 | [] | no_license | # 动态路由
# 导入Flask模块
from flask import Flask
from flask import send_file
# 创建一个Flask的实例
app = Flask(__name__)
app.debug = True
# 注册一个路由
@app.route("/")
def index(): # 视图函数
# 代码直接访问静态页面,没有经过Jinja2 模板的渲染。
return send_file("../templates/03Hello.html")
@app.route("/user/<name>")
def user(name):
return "hello,%s" % name
@app.route("/userid/<int:id>")
def userId(id):
return "hello,%d" % id
if __name__ == "__main__":
app.run(port=8080)
| true |
fe0b7b17ff164b38f0ca0f6e66f2d0e571fffd3a | Python | robertvandeneynde/parascolaire-students | /antoine collon/test4.py | ISO-8859-2 | 1,408 | 2.796875 | 3 | [] | no_license | from __future__ import print_function, division
import pygame
pygame.init()
taille = [700, 700]
ecran = pygame.display.set_mode(taille)
NOIR = [0, 0, 0]
BLANC = [255, 255, 255]
ROUGE = [255, 0, 0]
VERT = [0, 255, 0]
BLEU = [0, 0, 255]
# DBUT
ma_position=100
sens=1
clock = pygame.time.Clock()
HAUT = 273
BAS = 274
GAUCHE = 275
DROITE = 276
fini = 0
while fini == 0:
for event in pygame.event.get():
if event.type == pygame.QUIT:
fini = 1
elif event.type == pygame.KEYDOWN:
if event.key == DROITE:
ma_position=ma_position - 100
elif event.type == GAUCHE:
ma_postion=ma_position + 100
elif event.type == HAUT:
ma_position=ma_position - 100
elif event.type == BAS:
ma_position=ma_position + 100
pressed = pygame.key.get_pressed()
# TICK
if sens == +1 :
ma_position=ma_position+5
if sens == -1 :
ma_position -= 5
if ma_position>700:
sens = -1
if ma_position < 0:
sens = +1
# DESSIN
ecran.fill(BLANC)
pygame.draw.rect(ecran, ROUGE, [100,ma_position, 60,40])
pygame.draw.circle(ecran, BLEU, [ma_position,200], 20)
pygame.draw.circle(ecran, VERT, [150, ma_position], 10)
pygame.display.flip()
clock.tick(60)
pygame.quit() | true |
206a08ba18411fc1f479c798eef72213bb7f507a | Python | rainwoodman/vmad | /vmad/core/tape.py | UTF-8 | 1,582 | 2.65625 | 3 | [
"BSD-2-Clause"
] | permissive | from . import get_autodiff
class Record(object):
""" A record on the tape.
A record contains the node and the resolved arg symbols to the node.
"""
def __init__(self, node, impl_kwargs):
self.node = node
self.impl_kwargs = impl_kwargs
def __repr__(self):
return '%s / %s' % (self.node, self.impl_kwargs)
class Tape(list):
def __init__(self, model, init):
self.model = model
self.init = init
self._completed = False
def finalize(self, out):
""" Finalize the tape, with a set of computed outputs.
Parameters
----------
out : dict / OrderedDict
"""
assert isinstance(out, dict)
self.out = out.copy()
self._completed = True
def append(self, node, impl_kwargs):
assert not self._completed
list.append(self, Record(node, impl_kwargs))
def get_vjp_vout(self):
return ['_' + varname for varname in self.init.keys()]
def get_jvp_vout(self):
return [varname + '_' for varname in self.out.keys()]
def get_vjp(self):
assert self._completed
return get_autodiff().vjpmodel(self)
def get_jvp(self):
assert self._completed
return get_autodiff().jvpmodel(self)
def compute_jvjp(self, vout, aout, init):
jvp = self.get_jvp()
aout_ = [a + '_' for a in aout]
t = jvp.compute(aout_, init)
vjp = self.get_vjp()
p = vjp.compute(vout, init=dict([('_' + a, t1) for a, t1 in zip(aout, t)]))
return p
| true |
5bc37f35ecfb8761a8551ee3be1ea6b247c2fb79 | Python | Err0rdmg/python-programs | /right_triangle.py | UTF-8 | 412 | 3.4375 | 3 | [] | no_license | line = int(input("Enter numbers of lines you want:"))
# astriks = int(input("Enter numbers of astriks per line you want:"))
for i in range(line, 0, -1):
if i == 1 or i == line:
for j in range(1, i+1):
print("*", end="")
else:
for j in range(1, i+1):
if j == 1:
print("*", end="")
else:
print(" ", end="")
print()
| true |
cc74d0b018a8299983916320ed8210013df904fb | Python | jskway/data-structures-algorithms | /data_structures/binary_search_tree/binary_search_tree.py | UTF-8 | 2,799 | 4.09375 | 4 | [
"MIT"
] | permissive | import sys
sys.path.append('../stack')
from stack import Stack
from collections import deque
class BSTNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
"""
Inserts the value into the tree
"""
def insert(self, value):
if value < self.value:
if self.left is None:
self.left = BSTNode(value)
else:
self.left.insert(value)
else:
if self.right is None:
self.right = BSTNode(value)
else:
self.right.insert(value)
"""
Checks if the target value exists in the tree
- Returns True if found
- Otherwise returns False
"""
def contains(self, target):
if target == self.value:
return True
elif (target < self.value) and (self.left is not None):
return self.left.contains(target)
elif (target > self.value) and (self.right is not None):
return self.right.contains(target)
else:
return False
"""
Returns the maximum value in the tree
"""
def get_max(self):
if self.right is None:
return self.value
else:
return self.right.get_max()
"""
Calls fn on each node value
"""
def for_each(self, fn):
fn(self.value)
if self.left is not None:
self.left.for_each(fn)
if self.right is not None:
self.right.for_each(fn)
"""
Prints all the values in order from lowest to highest
"""
def in_order_print(self, node):
root = node
if root is None:
return
self.in_order_print(root.left)
print(root.value)
self.in_order_print(root.right)
"""
Prints the value of every node, starting with the given node,
in an iterative breadth first traversal
"""
def bft_print(self, node):
q = deque()
q.appendleft(node)
current_node = None
while(len(q) > 0):
current_node = q.pop()
print(current_node.value)
if current_node.left:
q.appendleft(current_node.left)
if current_node.right:
q.appendleft(current_node.right)
"""
Prints the value of every node, starting with the given node,
in an iterative depth first traversal
"""
def dft_print(self, node):
s = Stack()
s.push(node)
current_node = None
while(len(s) > 0):
current_node = s.pop()
print(current_node.value)
if current_node.left:
s.push(current_node.left)
if current_node.right:
s.push(current_node.right)
| true |
a7d81523c5350441e43482861ea69803268c9bc2 | Python | jaean123/SplineInterpolation | /cubic_interpolation.py | UTF-8 | 3,123 | 3.3125 | 3 | [] | no_license | # Cubic Spline Interpolation
import matplotlib.pyplot as plt
def cubic_interpolation(x, y):
n = len(x) - 1
h = [0 for i in range(n)]
b = h[:]
v = h[:]
u = h[:]
# SOME PRE-CALCULATIONS
h[0] = x[1] - x[0]
b[0] = (y[1] - y[0]) / h[0]
for i in range(0, n):
h[i] = x[i + 1] - x[i]
b[i] = (y[i + 1] - y[i]) / h[i]
for i in range(1, n):
v[i] = 2*(h[i-1] + h[i])
u[i] = 6*(b[i] - b[i-1])
u = u[1:]
# MAKE THE MATRICES
A = [[0 for col in range(n-1)] for row in range(n-1)]
# Fill in the first row of A
A[0][0] = v[1]
A[0][1] = h[1]
# Fill in the other rows of A
for i in range(1, n-2):
row = []
row.extend(0 for row in range(i-1))
row.extend([h[i], v[i+1], h[i+1]])
A[i] = row
# Fill in the last row of A
A[n-2][n-3] = h[n-2]
A[n-2][n-2] = v[n-1]
z = solve_matrix_tridiagonal(A, u)
temp = [0]
temp.extend(z)
temp.append(0)
z = temp
return generate_spline_coefficients(h, z, y)
def solve_matrix_tridiagonal(A, u):
n = len(A)
# ELIMINATION STAGE
for i in range(1, n):
m = A[i][i-1]/A[i-1][i-1]
A[i][i-1] = 0
A[i][i] = A[i][i] - m*A[i-1][i]
u[i] = u[i] - m * u[i - 1]
# BACKWARDS SUBSTITUTION
z = [0 for i in range(n)]
z[n-1] = u[n - 1] / A[n - 1][n - 1]
for i in reversed(range(n-1)):
z[i] = (u[i] - A[i][i + 1] * z[i + 1]) / A[i][i]
return z
def generate_spline_coefficients(h, z, y):
n = len(z) - 1
S = [[0, 0, 0, 0] for i in range(n)]
for i in range(n):
c1 = z[i+1]/(6*h[i])
c2 = z[i]/(6*h[i])
c3 = y[i+1]/h[i] - z[i+1]*h[i]/6
c4 = y[i]/h[i] - h[i]*z[i]/6
S[i] = [c1, c2, c3, c4]
return S
def get_points(xi, xip1, step, coefficient):
pts = []
xval = xi
if (xval < xip1):
while xval < xip1:
y = spline_eq(xval, xi, xip1, coefficient)
pts.append([xval, y])
xval += step
else:
while xval > xip1:
y = spline_eq(xval, xi, xip1, coefficient)
pts.append([xval, y])
xval -= step
return pts
def spline_eq(xval, xi, xip1, coefficient):
return coefficient[0] * (xval - xi)**3 + coefficient[1] * (xip1 - xval)**3 \
+ coefficient[2] * (xval - xi) + coefficient[3] * (xip1 - xval)
def plot_splines(x, y, C, step):
n = len(x) - 1
pts = []
for i in range(n):
ipts = get_points(x[i], x[i+1], step, C[i])
pts.extend(ipts)
xinterpolated = [pts[i][0] for i in range(len(pts))]
yinterpolated = [pts[i][1] for i in range(len(pts))]
plt.scatter(xinterpolated, yinterpolated)
plt.scatter(x, y)
plt.show()
# Test Data Points
xv = [0.9, 1.3, 1.9, 2.1]
yv = [1.3, 1.5, 1.85, 2.1]
# xv = [1, 2, 3, 9, 1]
# yv = [2, 4, 7, 8, 12]
# xv = [1, 2, 3, 9, 5, 2]
# yv = [2, 9, 2, 2, 4, 1]
C = cubic_interpolation(xv, yv)
plot_splines(xv, yv, C, 0.01)
# A = [[1, 2, 0, 0], [2, 3, 1, 0], [0, 1, 2, 3], [0, 0, 4, 1]]
# b = [4, 9, 2, 1]
# solve_matrix_tridiagonal(A, b) | true |
156d4a1a82341ae7e12e8c52dfb5a407c71c0630 | Python | Hansung-Lee/SSAFY | /hphk/hphk_006/papago.py | UTF-8 | 1,790 | 3.234375 | 3 | [] | no_license |
# 네이버(파파고)야 내가 단어하나 전달할테니, 번역해줘
# 0. 사용자에게 단어를 입력받는다. (추가기능)
# 1. papago API 요청 주소에 요청을 보낸다.
# 2. 응답을 받아 번역된 단어를 출력한다.
import requests
import os
from pprint import pprint as pp
# 함수를 import하는 방법
# import pprint => pprint.pprint()
# from pprint import pprint => pprint()
# from pprint import pprint as pp => pp()
url = "https://openapi.naver.com/v1/papago/n2mt"
naver_id = os.getenv("NAVER_ID")
naver_secret = os.getenv("NAVER_SECRET")
# naver_secret = os.environ.get('NAVER_SECRET')
sel = ''
sel = input("1. 한글->영어 번역 2. 영어->한글 번역\n")
if (sel=='1'):
input_text = input("번역할 한글 단어를 입력하세요.\n")
headers = {
'X-Naver-Client-Id': naver_id,
'X-Naver-Client-Secret': naver_secret
}
data = {
'source': 'ko',
'target': 'en',
'text': input_text
}
elif (sel=='2'):
input_text = input("번역할 영어 단어를 입력하세요.\n")
headers = {
'X-Naver-Client-Id': naver_id,
'X-Naver-Client-Secret': naver_secret
}
data = {
'source': 'en',
'target': 'ko',
'text': input_text
}
else :
print('잘못된 입력입니다. 다시 시도해주세요.')
try :
res = requests.post(url, headers=headers, data=data)
pp(res.json().get('message').get('result').get('translatedText')) # 값이 빈경우 None 출력
# pp(res.json()['message']['result']['translatedText']) # 값이 빈경우 NoneType에러
# print(res.text.split('"')[27])
except NameError:
print('', end = '')
except AttributeError:
print('API 연결 오류')
| true |
abaf927b542299613b3be41b5a1653b484ce9c99 | Python | rkhous/Clemont | /bot.py | UTF-8 | 3,774 | 2.75 | 3 | [
"MIT"
] | permissive | import MySQLdb
from config import *
from requirements import *
import traceback
import sys
database = MySQLdb.connect(host, username, password, db)
database.ping(True)
cursor = database.cursor()
def find_pokemon_id(name):
if name == 'Nidoran-F':
return 29
elif name == 'Nidoran-M':
return 32
elif name == 'Mr-Mime':
return 122
elif name == 'Ho-oh':
return 250
elif name == 'Mime-Jr':
return 439
else:
name = name.split('-')[0]
for k in pokejson.keys():
v = pokejson[k]
if v == name:
return int(k)
return 0
class Message:
def __init__(self, poke_dict):
self.poke_dict = poke_dict
def process_message(self):
url = self.poke_dict[0]['url']
pokemon_name = self.poke_dict[0]['fields'][0]['value'].split(' (')[0]
pokemon_id = find_pokemon_id(pokemon_name.capitalize())
lat = float(self.poke_dict[0]['url'].split('?q=')[1].split(',')[0])
lon = float(self.poke_dict[0]['url'].split('?q=')[1].split(',')[1])
return {'pokemon_name':pokemon_name, 'poke_id': int(pokemon_id), 'lat': lat, 'lon': lon, 'url': url}
class Notification:
def __init__(self, data):
self.data = data
def get_user_info(self):
try:
cursor.execute('SELECT * FROM notifications WHERE poke_id = %s;', [str(self.data['poke_id'])])
grab_data = cursor.fetchall()
possible_users = [n for n in grab_data]
return possible_users
except:
tb = traceback.print_exc(file=sys.stdout)
print(tb)
print('An error has occurred while searching thru the database for notifications to send.')
class Database:
def __init__(self, user_id, poke_name, location, distance):
self.user_id = user_id
self.poke_name = poke_name
self.location = location
self.distance = distance
def add_to_notifications(self):
try:
poke_id = find_pokemon_id(str(self.poke_name).capitalize())
if self.location is not None:
lat = str(self.location).split(',')[0]
lon = str(self.location).split(',')[1]
else:
lat = 0
lon = 0
cursor.execute("INSERT INTO notifications("
"user_id, poke_id, lat, lon, distance)"
"VALUES "
"(%s, %s, %s, %s, %s);",
(str(self.user_id), int(poke_id), str(lat), str(lon), int(self.distance)))
database.commit()
print('[{}] Adding user to database.'.format(str(self.user_id)))
return 'Successfully added your notification to the database.\n' \
'**Pokémon:** `{}`, **Location:** `{}`, **Max distance from you:** `{} miles`'.format(
str(self.poke_name).capitalize(), str(self.location), str(self.distance)
)
except:
return 'An error occurred while trying to add your notification to the database.'
def remove_from_notifications(self):
try:
poke_id = find_pokemon_id(str(self.poke_name).capitalize())
cursor.execute("DELETE FROM notifications WHERE user_id = %s and poke_id = %s;",
(str(self.user_id), int(poke_id)))
database.commit()
print('[{}] Removing user from database.'.format(str(self.user_id)))
return 'Successfully remove your notification from the database.\n' \
'**Pokémon:** `{}`'.format(self.poke_name)
except:
return 'An error occurred while trying to remove your notification from the database.' | true |
d03198ee41fef426179868efd89dd6b7b6f806a1 | Python | lucernae/timesheets-converter | /scripts/report.py | UTF-8 | 5,463 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
# coding=utf-8
from __future__ import print_function
from builtins import next
import argparse
from datetime import timedelta, datetime
from timesheets.timesheet import TimeSheets
from timesheets.format.harvest import HarvestTimeRecord
from timesheets.format.sageone import SageOneTimeRecord
from timesheets.format.toggl import TogglTimeRecord
from timesheets.format.toggl import TogglTagsTimeRecord
parser = argparse.ArgumentParser(
description='Get reported timesheets format and show it on screen.'
)
parser.add_argument(
'-t',
'--input_type',
metavar='types',
type=str,
choices=['sageone', 'harvest', 'toggl', 'toggl-tags'],
help="Input types: ['sageone', 'harvest', 'toggl', 'toggl-tags']",
default='sageone',
required=True
)
parser.add_argument(
'-f',
'--report_format',
metavar='format_types',
type=str,
choices=['daily', 'weekly', 'standup'],
help="Report format: ['daily', 'weekly', 'standup']",
default='daily',
required=False
)
parser.add_argument(
'-o',
'--output_format',
metavar='output_types',
type=str,
choices=['markdown', 'slack'],
help="Output format: ['markdown', 'slack']",
default='slack',
required=False
)
parser.add_argument(
'-d',
'--date',
metavar='report_date',
required=False
)
parser.add_argument(
'csv_input',
metavar='input_path',
# type=argparse.FileType('r'),
# type=basestring,
help='CSV file as input for timesheets'
)
def get_record_type(type_name):
if type_name == 'harvest':
return HarvestTimeRecord
elif type_name == 'toggl':
return TogglTimeRecord
elif type_name == 'toggl-tags':
return TogglTagsTimeRecord
elif type_name == 'sageone':
return SageOneTimeRecord
def report_aggregate(timesheet):
"""
Structure:
{
"week":
"days": [
{
"date": "10/04/2018"
"day": "Monday",
"records": [
{
"project": "Project"
"notes": [
"task A",
"task B"
]
},
]
},
]
}
:param timesheet: Timesheet object
:type timesheet: TimeSheets
"""
dates = [r._date for r in timesheet.records]
start_date = min(dates)
end_date = max(dates)
week_range = '{start:%d %b %Y} - {end:%d %b %Y}'.format(
start=start_date,
end=end_date)
reports = {
'week': week_range,
'days': []
}
days = reports['days']
sorted_records = sorted(
timesheet.records, key=lambda x: (x.date, x.project))
for r in sorted_records:
date = r._date
day = '{date:%A}'.format(date=date)
try:
existing_day = next(
d for d in days if d['date'] == date)
except StopIteration:
existing_day = {
'date': date,
'day': day,
'records': []
}
days.append(existing_day)
try:
existing_project = next(
p for p in existing_day['records'] if p['project'] == r.project)
except StopIteration:
existing_project = {
'project': r.project,
'notes': []
}
existing_day['records'].append(existing_project)
existing_project['notes'].append(r.notes)
return reports
def format_output(report, report_type):
if report_type == 'slack':
print('*{week}*'.format(week=report['week']))
for d in report['days']:
print('*{day}*'.format(day=d['day']))
for p in d['records']:
print('_{project}_'.format(project=p['project']))
for n in p['notes']:
print('- {note}'.format(note=n))
elif report_type == 'markdown':
print('# {week}'.format(week=report['week']))
for d in report['days']:
print('## {day}'.format(day=d['day']))
for p in d['records']:
print('### {project}'.format(project=p['project']))
for n in p['notes']:
print('- {note}'.format(note=n))
args = parser.parse_args()
ts = TimeSheets()
ts.load_csv(args.csv_input, target_type=get_record_type(args.input_type))
if args.date:
today = datetime.strptime(args.date, '%Y-%m-%d')
else:
today = datetime.strptime(datetime.now().strftime('%Y-%m-%d'), '%Y-%m-%d')
if args.report_format == 'daily':
# Return today
ts.records = [r for r in ts.records if r._date == today]
report = report_aggregate(ts)
elif args.report_format == 'weekly':
# Return whole week
start_week = today
while start_week.strftime('%A') != 'Monday':
start_week -= timedelta(days=1)
end_week = start_week + timedelta(days=6)
ts.records = [
r for r in ts.records
if r._date >= start_week and r._date <= end_week]
report = report_aggregate(ts)
elif args.report_format == 'standup':
# Return today and yesterday
yesterday = today - timedelta(days=1)
ts.records = [
r for r in ts.records
if r._date == yesterday or r._date == today]
report = report_aggregate(ts)
format_output(report, args.output_format)
| true |
d76c8e780a72cb91b6bbbf1ffe9142e1717d9249 | Python | abhishek2x/TKinterGUIPy | /GetReady19.py | UTF-8 | 277 | 2.96875 | 3 | [] | no_license | from tkinter import *
root = Tk()
root.title("Article")
root.geometry("654x567")
scrollbar = Scrollbar(root)
scrollbar.pack(side=RIGHT, fill=Y)
txt = Text(root, yscrollcommand=scrollbar.set)
txt.pack(fill=BOTH)
scrollbar.config(command=txt.yview)
root.mainloop() | true |
9fc69de43a5bc539068489bce8f5892d84fc0047 | Python | simsimplay/raspverry_exe | /HC_SR04.py | UTF-8 | 838 | 2.96875 | 3 | [] | no_license | #-*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
TRIG = 23
ECHO = 24
print('Distance measurement in progress')
# Trig and Echo 핀의 출력/입력 설정
GPIO.setup(TRIG, GPIO.OUT)
GPIO.setup(ECHO, GPIO.IN)
GPIO.output(TRIG, False)
print('Waiting for sensor to settle')
time.sleep(2)
try:
while True:
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
while GPIO.input(ECHO) == 0:
start = time.time()
while GPIO.input(ECHO) == 1:
stop = time.time()
check_time = stop - start
distance = check_time * 34300 / 2
print("Distance : %.1f cm" % distance)
time.sleep(0.4)
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup() | true |