blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
739f1318cfd04dd60b3caf6bed0bc5d2ba1165ca | Python | ameidar/bittrex | /Mywallet.py | UTF-8 | 2,783 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python
# This program buys some Dogecoins and sells them for a bigger price
#from bittrex import bittrex
import smtplib
from bittrex.bittrex import *
from email.mime.text import MIMEText
#server = smtplib.SMTP('internal-mail-router.oraclecorp.com',25) #port 465 or 587
def getbalanceof(currency):
myBalance = api.get_balance(currency)
x = myBalance["result"]
price = x['Available']
y = api.get_marketsummary('{0}-{1}'.format('USDT', currency))
res = y["result"]
lastprice = res[0]['Last']
total = price * lastprice
return total
#r = requests.get("https://bittrex.com/api/v1.1/public/getticker?market=BTC-ADA")
#data = r.json()
#res = data["result"]
#print(res)
# Market to trade at
trade = 'USDT'
currency = 'ETH'
market = '{0}-{1}'.format(trade, currency)
# Amount of coins to buy
amount = 100
# How big of a profit you want to make
multiplier = 1.1
# Getting the BTC price for DOGE
#adasummary = api.get_marketsummary(market)
#res = adasummary["result"]
#dogeprice = res[0]['Last']
#print 'The price for {0} is {1:.8f} {2}.'.format(currency, dogeprice, trade)
#currency = 'ETH'
#Gets my ADA balance
#adabalance = api.get_balance(currency)
#res1 = adabalance["result"]
#print res1
allb= api.get_balances()
fsdaf = allb["result"]
#resw = fsdaf[0]['Available']
#print (resw)
total = 0
for i in range(0,fsdaf.__len__()):
f = fsdaf[i]['Available']
if f != 0.0 and fsdaf[i]['Currency'] != 'USDT' and fsdaf[i]['Currency'] != 'XLM':
t = getbalanceof(fsdaf[i]['Currency'])
total += t
print '{0} = {1}$ '.format(fsdaf[i]['Currency'] , t )
print 'total in dollar = {0}'.format(total)
strTotal = 'total in dollar = {0}'.format(total)
#if total > 3500 or total < 3000:
# server.sendmail("ami.meidar@oracle.com", "ami.meidar@oracle.com", strTotal)
# server.quit()
#getbalanceof(api ,currency)
#x = float(adab)
#y = float(dogeprice)
#TotalinDollar= float(res) * float(dogeprice)
#print (TotalinDollar)
#api.buy_limit(market, amount, dogeprice)
#Buying 100 DOGE for BTC
###print 'Buying {0} {1} for {2:.8f} {3}.'.format(amount, currency, dogeprice, trade)
###api.buylimit(market, amount, dogeprice)
# Multiplying the price by the multiplier
###dogeprice = round(dogeprice*multiplier, 8)
# Selling 100 DOGE for the new price
###print 'Selling {0} {1} for {2:.8f} {3}.'.format(amount, currency, dogeprice, trade)
###api.selllimit(market, amount, dogeprice)
# Gets the DOGE balance
###dogebalance = api.getbalance(currency)
###print "Your balance is {0} {1}.".format(dogebalance['Available'], currency)
# For a full list of functions, check out bittrex.py or https://bittrex.com/Home/Api
| true |
c9117404dabe22aba518c2b81eeb8c0119e1f65d | Python | SGenheden/advent_of_code | /aoc_2017/solutions2017/day24/part2.py | UTF-8 | 679 | 2.828125 | 3 | [] | no_license | from solutions2017.day24.utils import find_bridges, make_components
def solve(components_spec):
components = make_components(components_spec)
bridges = []
find_bridges([(-1, 0)], components, bridges)
max_length = 0
max_strength = 0
for bridge in bridges:
max_length = max(max_length, len(bridge))
if len(bridge) == max_length:
strength = sum([sum(c) for c in bridge[1:]])
max_strength = max(max_strength, strength)
return max_strength
if __name__ == "__main__":
import fileinput
spec = [line.strip() for line in fileinput.input()]
print(f"The strength of the longest bridges is {solve(spec)}")
| true |
4ad90374047a5bc6d375bdc6b338e0275e0dcd3d | Python | hamologist/Code-Eval | /moderate/mth_to_last_element/mth_to_last_element.py | UTF-8 | 219 | 3.125 | 3 | [] | no_license | import sys
f = open(sys.argv[1])
lines = f.read().rstrip().splitlines()
f.close()
for line in lines:
vals = line.split(' ')
index = int(vals.pop())
if (len(vals) >= index):
print(vals[-1 * index])
| true |
56dd5bd7e6335013426fc970408198acc1badc47 | Python | luomeng007/LintCode | /末尾几个0判断.py | UTF-8 | 2,198 | 4.03125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 2 09:02:57 2020
@author: 15025
虽然可以得到结果,但是当阶乘数字较大时,可能会溢出,时间复杂度太复杂,
可以进一步优化算法,尝试
"""
class Solution:
"""
@param: n: An integer
@return: An integer, denote the number of trailing zeros in n!
"""
def trailingZeros(self, n):
# write your code here, try to do it without arithmetic operators.
result = 1
for i in range(1, n+1):
result *= i
# print(result)
count = 0
while result % 10 == 0:
count += 1
result //= 10 # 这里一定要使用整除!!!!!!!!!!!!!!!!!!!!!!!!!
# print(result)
return count
main = Solution()
num = main.trailingZeros(105)
print(num)
# =============================================================================
# 科学计数法对10取余结果是否为0
# result = 1.23423523525e+63
# if result % 10 == 0:
# print("余数为0")
# 用科学计数法显示的时候,末尾不视为以0结尾
# =============================================================================
#
# result = 1.23423523525e+63
# print("%d" % result)
# 也就是说在以科学计数法表示后,其他位上的数字已经出现了偏差,虽然很小,尽量避免
# =============================================================================
# 看是否可以关闭科学计数法
# import numpy as np
# np.set_printoptions(suppress=True) # 感觉不是很好用
# result = np.array([12342352352500000000000000000000000000000000000000000000000])
# # print(result)
# =============================================================================
# result = 12342352352500000000000000000000000000000000000000000000000
# count = 0
# while result % 10 == 0:
# count += 1
# result //= 10 # 这里一定不要使用result /= 10,结果会不正确,选择整出代替
# print(count)
# print(result)
# print(count)
# 1000的时候输出为3 代码没毛病 | true |
2bf4825fd81f559250c4155a83daeb8bff520f16 | Python | frdizon/CSC611M_Project | /SentimentAnalysis_Parallel1.py | UTF-8 | 3,430 | 3.3125 | 3 | [] | no_license | from textblob import TextBlob
import pandas as pd
import numpy as np
import re
import time
from multiprocessing import Process, Lock, Value
# HELPERS: -------------------------------------------------------------
# Create a function to get the polarity
def getPolarity(text):
return TextBlob(text).sentiment.polarity
# Create a function to compute negative(-1), neutral(0), positive(+1) analysis
def getAnalysis(score):
if score < 0:
return 'Negative'
elif score == 0:
return 'Neutral'
else:
return 'Positive'
# -----------------------------------------------------------------------
# Process: -------------------------------------------------------------
class EvaluateTweetsProcess(Process):
def __init__(self, lock, tweets, positiveCount, neutralCount, negativeCount):
Process.__init__(self)
self.tweets = tweets
self.lock = lock
self.positiveCount = positiveCount
self.neutralCount = neutralCount
self.negativeCount = negativeCount
def run(self):
for tweet in self.tweets['text']:
polarityValue = getPolarity(tweet)
# Critical Section Start
self.lock.acquire()
if -0.05 <= polarityValue and polarityValue <= 0.05:
self.neutralCount.value += 1
elif polarityValue < -0.05:
self.negativeCount.value += 1
elif polarityValue > 0.05:
self.positiveCount.value += 1
self.lock.release()
# Critical Section End
# -----------------------------------------------------------------------
if __name__ == '__main__':
# PARAMS SET:
fileName = 'SamsungDataFinalX4.csv'
processCount = 8
timeStart1 = time.time()
# Read csv, put it in dataframe
df = pd.read_csv(fileName)
# NLP Process Start -----------------------------------------------------
lock = Lock()
processList = []
# Initialize Polarity Categories count
positiveCount = Value('i', 0) # n < -0.05
neutralCount = Value('i', 0) # -0.05 <= n <= 0.05
negativeCount = Value('i', 0) # n > 0.05
for processI in range(processCount):
evalTweetsProcess = EvaluateTweetsProcess(lock, df[processI::processCount], positiveCount, neutralCount, negativeCount)
evalTweetsProcess.start()
processList.append(evalTweetsProcess)
for evalProcess in processList:
evalProcess.join()
# Print of results (all critical section should be done first before this)
mostPolarity = 'Positive'
mostPolarityValue = positiveCount.value
if mostPolarityValue < neutralCount.value:
mostPolarityValue = neutralCount.value
mostPolarity = 'Neutral'
if mostPolarityValue < negativeCount.value:
mostPolarityValue = negativeCount.value
mostPolarity = 'Negative'
totalCount = positiveCount.value + neutralCount.value + negativeCount.value
print('APPROACH 1 Results:') # TO DO: Show Percentage
print(mostPolarity + '( Positive: ' + str(positiveCount.value) + '(' + str((positiveCount.value/totalCount) * 100) + '%)'
+ ', Neutral: ' + str(neutralCount.value) + '(' + str((neutralCount.value/totalCount) * 100) + '%)'
', Negative: ' + str(negativeCount.value) + '(' + str((negativeCount.value/totalCount) * 100) + '%)' + ')')
print('Time Taken: ' + str(time.time() - timeStart1 ) + 's')
| true |
7ff822a471c413d34c03a7494fd5dbe0c90b7812 | Python | MyChoYS/K_TIL | /python/PYTHONexam/day11_class/classTest5.py | UTF-8 | 882 | 3.359375 | 3 | [] | no_license | def deposit(name, money):
if name == "둘리" :
global balancedooly
balancedooly += money
elif name == "또치" :
global balanceddochi
balanceddochi += money
elif name == "도우너" :
global balancedouner
balancedouner += money
def inquire(name):
if name == "둘리":
print("%s의 잔액은 %s원입니다." % (name, format(balancedooly, ',')))
elif name == "또치":
print("%s의 잔액은 %s원입니다." % (name, format(balanceddochi, ',')))
elif name == "도우너":
print("%s의 잔액은 %s원입니다." % (name, format(balancedouner, ',')))
dooly = "둘리"
ddochi = "또치"
douner = "도우너"
balancedooly = 8000
balanceddochi = 8000
balancedouner = 8000
deposit(dooly, 1000)
inquire(dooly)
deposit(ddochi, 2000)
inquire(ddochi)
deposit(douner, 3000)
inquire(douner) | true |
175bd4ddd4a406c0fe892842b577dd5cb2bc2e20 | Python | momesmo/LeetCode | /atoi.py | UTF-8 | 979 | 3.359375 | 3 | [] | no_license | class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
INT_MIN = -2**31
INT_MAX = 2**31 - 1
L = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
result = 0
sign = 1
str = self.removeLeadingWhitespace(str)
if str == "":
return result
if str[0] == "+":
sign = 1
str = str[1:]
elif str[0] == "-":
sign = -1
str = str[1:]
if str == "":
return result
while len(str) > 0 and L.count(str[0]) == 1:
result *= 10
result += L.index(str[0])
str = str[1:]
result *= sign
if result < INT_MIN:
return INT_MIN
if result > INT_MAX:
return INT_MAX
return result
def removeLeadingWhitespace(self, str):
while(str != '' and str[0] == ' '):
str = str[1:]
return str
print Solution().myAtoi("42") | true |
2b0eea7c1cdb8352216f6947e2e7287575368185 | Python | M-Riku/.leetcode | /45.jump-game-ii.py | UTF-8 | 472 | 2.859375 | 3 | [] | no_license | #
# @lc app=leetcode id=45 lang=python3
#
# [45] Jump Game II
#
# @lc code=start
class Solution:
def jump(self, nums: List[int]) -> int:
if len(nums) == 1:
return 0
step = 0
cur_cover = 0
next_cover = 0
for i in range(len(nums)-1):
next_cover = max(next_cover, i+nums[i])
if i == cur_cover:
step += 1
cur_cover = next_cover
return step
# @lc code=end
| true |
f1e197cd81546dd39a088367527c922b473a5daa | Python | larhauga/diskstats | /diskstats.py | UTF-8 | 3,572 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
import os, re, csv
import argparse
from time import sleep
# Tool for getting /proc/diskstats
DISKSTATS_PATH = '/proc/diskstats'
HEADERS = ['datetime', 'major_number', 'minor_number', 'device_name', 'read_completed_successfully',
'reads_merged', 'sectors_read', 'time_spent_reading(ms)', 'writes_completed',
'writes_merged', 'sectors_written', 'time_spent_writing_(ms)', 'IO_currently_in_progress',
'time_spent_doing_IO_(ms)', 'weighted_time_spent_doing_IO']
lastread_timestamp = None
def read_diskstats():
global lastread_timestamp
devs = []
with open(DISKSTATS_PATH, 'r') as f:
for line in f.readlines():
# check if line is disk
devs.append(dict(zip(HEADERS[1:], re.split('\s+', line.strip()))))
lastread_timestamp = datetime.now()
return devs
def find_device(data, device):
return (item for item in data if item['device_name'] == device).next()
def write_diskstats(data, device=None, path=""):
global lastread_timestamp
csv_data = []
if device:
if len(device) > 1:
filename = "{}mon_diskstats_{}".format(path, "_".join(device))
if not os.path.isfile(filename):
csv_data = [HEADERS]
d = []
for dev in device:
d.append(find_device(data, dev))
if len(d) < 1:
print "No devices found by filter {}".format(" ".join(device))
return False
else:
data = d
for dev in data:
tmp = [dev[h] for h in HEADERS[1:]]
tmp.insert(0, lastread_timestamp.isoformat())
csv_data.append(tmp)
else:
data = find_device(data, device[0])
filename = "{}mon_{}".format(path, device[0].strip())
if not os.path.isfile(filename):
csv_data = [HEADERS]
tmp = [data[h] for h in HEADERS[1:]]
tmp.insert(0, lastread_timestamp.isoformat())
csv_data.append(tmp)
else:
filename = "{}mon_diskstats".format(path)
if not os.path.isfile(filename):
csv_data = [HEADERS]
for dev in data:
tmp = [dev[h] for h in HEADERS[1:]]
tmp.insert(0, lastread_timestamp.isoformat())
csv_data.append(tmp)
with open(filename, 'a') as f:
w = csv.DictWriter(f, HEADERS)
w.writer.writerows(csv_data)
def gather_and_write(devices):
data = read_diskstats()
write_diskstats(data, devices)
return data
def main():
parser = argparse.ArgumentParser(description='Parser tool for /proc/diskstats')
parser.add_argument('-l', '--loop', type=int, metavar='sleep',
help='Runs the program in a loop. Takes the looptime as option.')
parser.add_argument('-d', '--device', metavar='name', nargs='+', default=None, help='Names, like sda, sda1...')
parser.add_argument('-o', '--outdir', metavar='outdir', default=None, help='Path to store files in. End with /')
args = parser.parse_args()
try:
if args.loop:
while True:
print "Gathering data for {}".format(args.device)
data = gather_and_write(args.device)
sleep(args.loop)
else:
data = gather_and_write(args.device)
print find_device(data, 'vda')
except KeyboardInterrupt:
print "Aborting..."
if __name__ == '__main__':
main()
| true |
6b4fce34358ce73b8cdd1b1f29feca7218b06ce1 | Python | chriscassidy561/coinScript | /CoinDaemon.py | UTF-8 | 1,230 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
from Constants import Constants as cnt
class CoinDaemon:
coinName = ""
coinPort = 00000
COMMAND = ""
coinDaemon = None
coinDaemonStop = None
def __init__(self, a_str, b_int, c_str):
self.coinName = a_str
self.coinPort = b_int
self.COMMAND = cnt.SCRIPT_DIR + "bin/" + self.coinName + " -daemon "
def start_daemon(self):
"""Responsible for starting the coin daemon process"""
import subprocess, shlex, os
os.system(self.COMMAND)
#~ self.coinDaemon = subprocess.Popen(self.COMMAND.split())
#~ proc.wait()
#~ subprocess.call(self.COMMAND.split(), shell=True)
def stop_daemon(self):
"""Responsible for stopping the coin daemon process"""
import subprocess, time, os, signal
self.COMMAND = "/usr/bin/pkill -9 " + self.coinName
os.system(self.COMMAND)
def is_running(self):
import re, subprocess
s = subprocess.Popen(["ps", "axw"],stdout=subprocess.PIPE)
for x in s.stdout:
if re.search(self.coinName, x):
ding = True
break
else:
ding = False
return ding
| true |
18e7e68a987e25915e0bdb4c6f7d62c8329af10a | Python | kompics/kompicsbenches | /visualisation/custom_plotters/bench/atomic_broadcast/plot_ts_latency.py | UTF-8 | 4,955 | 2.578125 | 3 | [] | no_license | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
import argparse
import os
import plotly.graph_objects as go
parser = argparse.ArgumentParser()
parser.add_argument('-s', nargs='+', help='timestamp and latency directory')
parser.add_argument('-n', nargs='?', default=-1, help='Number of lines to read per file')
parser.add_argument('-t', nargs='?', help='Output directory')
parser.add_argument('-sample', nargs='?', default=1, help='Sample every X datapoint')
parser.add_argument('-keep', nargs='+', default=[0, sys.maxint], type=int, help='Interval of datapoints that must be kept')
colors = {
"paxos": "royalblue",
"raft": "red",
"raft-replace-leader": "forestgreen",
"raft-replace-follower": "red"
}
all_leader_changes = []
all_timestamps = {} # legend -> timestamp
all_latencies = {} # legend -> latency
args = parser.parse_args()
print("Plotting with args:",args)
n = int(args.n)
sample = int(args.sample)
(keep_min, keep_max) = tuple(args.keep)
(ts_dir, latency_dir) = tuple(args.s)
# read timestamp files
ts_files = [f for f in os.listdir(ts_dir) if f.endswith('.data')]
for filename in ts_files :
count = 0
timestamps = []
leader_changes = []
f = open(ts_dir + "/" + filename, 'r')
print("Reading timestamp file", filename, "...")
# read leader_changes
first_line = f.readline();
pid_ts_str = first_line.split(" ")
for csv_str in pid_ts_str:
pid_ts = csv_str.split(",")
try:
pid = int(pid_ts[0])
ts = float(pid_ts[1])/1000 # ms
leader_changes.append((pid, ts))
except:
pass
# read timestamps
for line in f:
count += 1
#print(line)
if count % sample == 0 or (count >= keep_min and count <= keep_max):
try:
timestamps.append(float(line)/1000) # ms
except:
pass
if count == n:
break
if "paxos" in filename:
legend = "paxos"
else:
if "leader" in filename:
legend = "raft-replace-leader"
elif "follower" in filename:
legend = "raft-replace-follower"
else:
legend = "raft"
all_timestamps[legend] = timestamps
all_leader_changes.append((legend, leader_changes))
# read latency files
latency_files = [f for f in os.listdir(latency_dir) if f.endswith('.data')]
for filename in latency_files :
count = 0
latencies = []
f = open(latency_dir + "/" + filename, 'r')
print("Reading latency file", filename, "...")
# read timestamps
for line in f:
count += 1
#print(line)
if count % sample == 0 or (count >= keep_min and count <= keep_max):
try:
latencies.append(float(line)/1000) # ms
except:
pass
if count == n:
break
if "paxos" in filename:
legend = "paxos"
else:
if "leader" in filename:
legend = "raft-replace-leader"
elif "follower" in filename:
legend = "raft-replace-follower"
else:
legend = "raft"
all_latencies[legend] = latencies
initial_leaders = []
fig = go.Figure()
for (algorithm, leader_changes) in all_leader_changes:
for (pid, ts) in leader_changes:
if ts == 0:
initial_leaders.append((algorithm, pid))
else:
fig.add_vline(x=ts, line_dash="dash", line_color=colors[algorithm], opacity=0.7, annotation_text="{} changed leader: {}".format(algorithm, pid), annotation_position="top left", annotation_textangle=90)
fig.add_vline(x=0, line_dash="dash", line_color="black", opacity=0.25, annotation_text="Initial leaders: {}".format(initial_leaders), annotation_position="top left", annotation_textangle=90)
for (legend, timestamps) in all_timestamps.iteritems():
data = go.Scattergl(x = timestamps, y = all_latencies[legend], mode = 'markers', name = legend, hovertemplate ='(%{x:.d}, %{y:.d})', marker = dict(color = colors[legend]))
fig.add_trace(data)
dir_name = os.path.basename(os.path.normpath(ts_dir))
if "-" in dir_name:
(nodes, cp) = tuple(dir_name.split("-"))
fig.update_layout(
title="Timestamp and Latency of every {}th response since start of benchmark, fully-sampled interval: [{}, {}]".format(sample, keep_min, keep_max),
xaxis_title="Timestamp (ms)",
yaxis_title="Latency (ms)",
legend_title="nodes: {}, concurrent_proposals: {}".format(nodes, cp),
)
else:
fig.update_layout(
title="Timestamp of every response since start of benchmark",
xaxis_title="Timestamp (ms)",
yaxis_title="Latency (ms)",
#legend_title="3 nodes"
)
if args.t is not None:
target_dir = args.t
else:
target_dir = ts_dir
if not os.path.exists(target_dir):
os.makedirs(target_dir)
fig.write_html(target_dir + '/ts_latency.html', auto_open=False)
| true |
addf9ba63702633f94b37d4b5149e4fe3bfc48c3 | Python | tactycHQ/knowledge-graph | /components/facts_extractor.py | UTF-8 | 3,219 | 2.6875 | 3 | [] | no_license |
from spacy.tokens import Token, Span
from spacy.matcher import Matcher
from components import fact_matcher_rules
class FactsExtractor(object):
name = 'facts'
# make sure the token matches all requirements set by identifier object
# search up if there's a dynamic way to do this.
# pos_, _.is_graph_entity, dep_
# and, or
# lemma_, optional etc.
# this should probably be it's own class
# identifier is a dictionary e.g. { 'DEP_': '', 'AND': [IDENTIFIERS]}
# ROUGHLY WRITTEN, WONT WORK
def token_matches(self, token, identifier):
matching = True
for spec_key, spec_val in identifier.items():
if spec_key == 'DEP_':
matching = matching and token.dep_ == spec_val
if spec_key == 'POS_':
matching = matching and token.pos_ == spec_val
if spec_key == 'is_graph_entity':
matching = matching and token._.is_graph_entity == spec_val
if spec_key == 'AND':
bools_arr = [token_matches(token, next_id) for next_id in spec_val]
matching = matching and all(bools_arr)
if spec_key == 'OR':
bools_arr = [token_matches(token, next_id) for next_id in spec_val]
matching = matching and any(bools_arr)
if spec_key == '*':
matching = False
return matching
def sentence_matches(self, sentence, pattern):
pattern_index = 0
match_start = -1
match_end = -1
pattern_len = len(pattern)
done = False
for i, token in enumerate(sentence):
if self.token_matches(token, pattern[pattern_index]):
if pattern_index == 0:
match_start = i
pattern_index += 1
if pattern_index == pattern_len:
match_end = i+1
done = True
pattern_index -= 1
elif done:
match_end = i
break
if match_end != -1:
return sentence[match_start:match_end]
else:
return None
# nsubj:NER nsubj:NER verb,ROOT dobj pobj:NER
# layer on top of matcher to find if matcher matched a "fact" by more complex rules
def check_if_has_fact(self, sent):
patterns = fact_matcher_rules.rules
for pattern in patterns:
fact_span = self.sentence_matches(sent, pattern)
if fact_span is not None:
sent._.set('has_fact', True)
sent._.set('fact', fact_span)
return True
return False
# define the matcher rules
# and set_extensions on Spans (sentences)
def __init__(self, nlp):
self.nlp = nlp
if self.nlp.has_pipe('facts'):
self.nlp.remove_pipe('facts')
self.matcher = Matcher(nlp.vocab)
# MAIN POINT OF IMPROVEMENT: get the "reg exp" better
self.matcher.add('FACT', None, [{'DEP': 'nsubj'}], [{'DEP': 'ROOT'}], [{'DEP': 'dobj'}])
Span.set_extension('has_fact', default=False)
Span.set_extension('fact', default="") # string describing fact
# modify the span's attributes to hold the relevant info
# using the matcher defined in init
def __call__(self, doc):
doc_facts = []
for sent in doc.sents:
has_fact = self.check_if_has_fact(sent)
# if has_fact:
# sent._.set('has_fact', True)
# MAIN POINT OF IMPROVEMENT: get the "fact" better
# sent._.set('fact', sent[?:?])
return doc
| true |
7fd8ce83a626b7050a2170724b933026c9602946 | Python | ericbgarnick/AOC | /y2018/day05/day05.py | UTF-8 | 1,386 | 3.515625 | 4 | [] | no_license | import re
from sys import argv
def polymer_length(polymer: str, part_num: int):
if part_num == 1:
print("Part 1")
print("Reduced length:", reduce_polymer(polymer))
elif part_num == 2:
print("Part 2")
print("Shortest polymer length:", shortest_polymer(polymer))
def reduce_polymer(polymer: str) -> int:
original = list(polymer)
reduced = []
while len(original) != len(reduced):
i = 0
while i < len(original) - 1:
if opposites(original[i], original[i + 1]):
i += 2
else:
reduced.append(original[i])
i += 1
if i == len(original) - 1:
reduced.append(original[i])
if len(original) != len(reduced):
original = [x for x in reduced]
reduced = []
return len(original)
def opposites(a: str, b: str) -> bool:
return a != b and a.lower() == b.lower()
def shortest_polymer(polymer: str) -> int:
alphabet = {chr(c): 0 for c in range(65, 91)}
for letter in alphabet:
subbed = re.sub(letter, '', polymer, flags=re.I)
alphabet[letter] = reduce_polymer(subbed)
return min(alphabet.values())
if __name__ == '__main__':
data_file = argv[1]
data_line = open(data_file, 'r').read().strip()
part = int(argv[2])
polymer_length(data_line, part)
| true |
ff36cbfb5dbb93aee61daa9a6efd7e79916d1e79 | Python | mrcdb/dare-sec-topo | /cybertop/plugins/FilterQueryDigits.py | UTF-8 | 1,869 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2017 Politecnico di Torino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
"""
Input packets filter plug-in.
@author: Daniele Canavese
"""
from cybertop.plugins import FilterPlugin
class FilterQueryDigits(FilterPlugin):
"""
Filters an attack event based on the query digits.
"""
def filter(self, value, attackEvent):
"""
Filters an attack event.
@param value: The optional value for the filter.
@param attackEvent: The attack event to analyze.
@return: True if the event must be accepted, False if the event must be discarded.
"""
queryDigits = sum(c.isdigit() for c in attackEvent.fields["query"])
groups = re.findall("(==|!=|<|<=|>|>=)(\d+)", value)
relationship = groups[0][0]
number = int(groups[0][1])
if relationship == "==" and queryDigits == number:
return True
elif relationship == "!=" and queryDigits != number:
return True
elif relationship == "<" and queryDigits < number:
return True
elif relationship == "<=" and queryDigits <= number:
return True
elif relationship == ">" and queryDigits > number:
return True
elif relationship == ">=" and queryDigits >= number:
return True
else:
return False
| true |
5c1087137434191f82f64796c3259a847de94967 | Python | ShehrozeEhsan086/ICT | /Exercise/Largest Number/utils.py | UTF-8 | 142 | 3.28125 | 3 | [] | no_license | def find_largest(x):
largest = x[0]
for number in x:
if number > largest:
largest = number
return largest | true |
48a856e790519eecb81abe37c42f0e503d860d30 | Python | nurruden/training | /NonTraining/mock/test.py | UTF-8 | 555 | 3.140625 | 3 | [] | no_license | #-*-coding:utf-8-*-
#/usr/bin/env python
__author__ = "Allan"
import unittest
from function import add_and_multiply
import mock
class MyTestCase(unittest.TestCase):
@mock.patch('function.multiply')
def test_add_and_multiply(self,mock_multiply):
x = 3
y = 5
mock_multiply.return_value = 15
addition, multiple = add_and_multiply(x, y)
mock_multiply.assert_called_once_with(3, 5)
self.assertEqual(8, addition)
self.assertEqual(15, multiple)
if __name__ == "__main__":
unittest.main() | true |
91210e7682fb954eedf1876bb326be2d20ac3749 | Python | neelkapadia/WolfPal | /keyword-mapping/sklearn2.py | UTF-8 | 4,284 | 3.109375 | 3 | [
"MIT"
] | permissive | from sklearn.decomposition import NMF, LatentDirichletAllocation, TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer
doc1 = "Fundamental issues related to the design of operating systems. Process scheduling and coordination, deadlock, memory management and elements of distributed systems."
doc2 = "Algorithm design techniques: use of data structures, divide and conquer, dynamic programming, greedy techniques, local and global search. Complexity and analysis of algorithms: asymptotic analysis, worst case and average case, recurrences, lower bounds, NP-completeness. Algorithms for classical problems including sorting, searching and graph problems [connectivity, shortest paths, minimum spanning trees]."
doc3 = "Introduces students to the discipline of designing, developing, and testing secure and dependable software-based systems. Students will learn about risks and vulnerabilities, and effective software security techniques. Topics include common vulnerabilities, access control, information leakage, logging, usability, risk analysis, testing, design principles, security policies, and privacy. Project required."
doc4 = "Introduction to and overview of artificial intelligence. Study of AI programming language such as LISP or PROLOG. Elements of AI problem-solving technique. State spaces and search techniques. Logic, theorem proving and associative databases. Introduction to knowledge representation, expert systems and selected topics including natural language processing, vision and robotics."
doc5 = "Basic theory and concepts of human-computer interaction. Human and computational aspects. Cognitive engineering. Practical HCI skills. Significant historical case studies. Current technology and future directions in user interface development."
doc6 = "The conception and creation of effective visual interfaces for mobile devices, including ideation and prototyping for useful mobile applications, the industry and architecture of mobile devices, mobile usage context, computer graphics and interfaces for mobiles, and mobile programming."
doc7 = "Topics related to design and management of campus enterprise networks, including VLAN design; virtualization and automation methodologies for management; laboratory use of open space source and commercial tools for managing such networks."
doc8 = "Algorithm behavior and applicability. Effect of roundoff errors, systems of linear equations and direct methods, least squares via Givens and Householder transformations, stationary and Krylov iterative methods, the conjugate gradient and GMRES methods, convergence of method."
# compile documents
documents = [doc1, doc2, doc3, doc4, doc5, doc6, doc7, doc8]
#test_doc = [doc7, doc8]
NUM_TOPICS = 1
vectorizer = CountVectorizer(min_df=1, max_df=6,
stop_words='english', lowercase=True,
token_pattern='[a-zA-Z\-][a-zA-Z\-]{2,}')
data_vectorized = vectorizer.fit_transform(documents)
# Build a Latent Dirichlet Allocation Model
lda_model = LatentDirichletAllocation(n_topics=NUM_TOPICS, max_iter=10, learning_method='online')
lda_Z = lda_model.fit_transform(data_vectorized)
print(lda_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)
# Build a Non-Negative Matrix Factorization Model
nmf_model = NMF(n_components=NUM_TOPICS)
nmf_Z = nmf_model.fit_transform(data_vectorized)
print(nmf_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)
# Build a Latent Semantic Indexing Model
lsi_model = TruncatedSVD(n_components=NUM_TOPICS)
lsi_Z = lsi_model.fit_transform(data_vectorized)
print(lsi_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)
# Let's see how the first document in the corpus looks like in different topic spaces
print("LDA")
print(lda_Z[0])
print()
print("NMF")
print(nmf_Z[0])
print()
print("LSI")
print(lsi_Z[0])
def print_topics(model, vectorizer, top_n=10):
for idx, topic in enumerate(model.components_):
print("Topic %d:" % (idx))
print([(vectorizer.get_feature_names()[i], topic[i])
for i in topic.argsort()[:-top_n - 1:-1]])
print("LDA Model:")
print_topics(lda_model, vectorizer)
print("=" * 20)
print("NMF Model:")
print_topics(nmf_model, vectorizer)
print("=" * 20)
print("LSI Model:")
print_topics(lsi_model, vectorizer)
print("=" * 20) | true |
7fd9780bf8a1186822c20c0556fc29616e0153a3 | Python | shadrul/dsa-levelup | /array&vectors/mindiff.py | UTF-8 | 609 | 3.359375 | 3 | [] | no_license | #min Difference
import sys
def minDiff(a,b):
al = len(a)
bl = len(b)
a.sort()
b.sort()
m= sys.maxsize
i =j =0
while(i<al and j<bl):
if(abs(a[i]-b[j])<m):
m = abs(a[i]-b[j])
x = a[i]
y = b[j]
if(a[i]<b[j]):
i+=1
elif(a[i]>b[j]):
j+=1
else:
x = a[i]
y = b[j]
return (x,y)
return (x,y)
if __name__ == "__main__":
arr = list(map(int,input().split()))
arr1 = list(map(int,input().split()))
result = minDiff(arr,arr1)
print(result) | true |
f81dfa9f5677c582ccec889bc75c8c5e90308613 | Python | ccxt/ccxt | /python/ccxt/async_support/base/ws/order_book_side.py | UTF-8 | 6,072 | 2.765625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import sys
import bisect
"""Author: Carlo Revelli"""
"""Fast bisect bindings"""
"""https://github.com/python/cpython/blob/master/Modules/_bisectmodule.c"""
"""Performs a binary search when inserting keys in sorted order"""
class OrderBookSide(list):
side = None # set to True for bids and False for asks
def __init__(self, deltas=[], depth=None):
super(OrderBookSide, self).__init__()
self._depth = depth or sys.maxsize
self._n = sys.maxsize
# parallel to self
self._index = []
for delta in deltas:
self.storeArray(list(delta))
def storeArray(self, delta):
price = delta[0]
size = delta[1]
index_price = -price if self.side else price
index = bisect.bisect_left(self._index, index_price)
if size:
if index < len(self._index) and self._index[index] == index_price:
self[index][1] = size
else:
self._index.insert(index, index_price)
self.insert(index, delta)
elif index < len(self._index) and self._index[index] == index_price:
del self._index[index]
del self[index]
def store(self, price, size):
self.storeArray([price, size])
def limit(self):
difference = len(self) - self._depth
for _ in range(difference):
self.remove_index(self.pop())
self._index.pop()
def remove_index(self, order):
pass
def __len__(self):
length = super(OrderBookSide, self).__len__()
return min(length, self._n)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = item.indices(len(self))
return [self[i] for i in range(start, stop, step)]
else:
return super(OrderBookSide, self).__getitem__(item)
def __eq__(self, other):
if isinstance(other, list):
return list(self) == other
return super(OrderBookSide, self).__eq__(other)
def __repr__(self):
return str(list(self))
# -----------------------------------------------------------------------------
# overwrites absolute volumes at price levels
# or deletes price levels based on order counts (3rd value in a bidask delta)
# this class stores vector arrays of values indexed by price
class CountedOrderBookSide(OrderBookSide):
def __init__(self, deltas=[], depth=None):
super(CountedOrderBookSide, self).__init__(deltas, depth)
def storeArray(self, delta):
price = delta[0]
size = delta[1]
count = delta[2]
index_price = -price if self.side else price
index = bisect.bisect_left(self._index, index_price)
if size and count:
if index < len(self._index) and self._index[index] == index_price:
self[index][1] = size
self[index][2] = count
else:
self._index.insert(index, index_price)
self.insert(index, delta)
elif index < len(self._index) and self._index[index] == index_price:
del self._index[index]
del self[index]
def store(self, price, size, count):
self.storeArray([price, size, count])
# -----------------------------------------------------------------------------
# indexed by order ids (3rd value in a bidask delta)
class IndexedOrderBookSide(OrderBookSide):
def __init__(self, deltas=[], depth=None):
self._hashmap = {}
super(IndexedOrderBookSide, self).__init__(deltas, depth)
def storeArray(self, delta):
price = delta[0]
if price is not None:
index_price = -price if self.side else price
else:
index_price = None
size = delta[1]
order_id = delta[2]
if size:
if order_id in self._hashmap:
old_price = self._hashmap[order_id]
index_price = index_price or old_price
# in case the price is not defined
delta[0] = abs(index_price)
# matches if price is not defined or if price matches
if index_price == old_price:
# just overwrite the old index
index = bisect.bisect_left(self._index, index_price)
self._index[index] = index_price
self[index] = delta
return
else:
# remove old price level
old_index = bisect.bisect_left(self._index, old_price)
del self._index[old_index]
del self[old_index]
# insert new price level
self._hashmap[order_id] = index_price
index = bisect.bisect_left(self._index, index_price)
self._index.insert(index, index_price)
self.insert(index, delta)
elif order_id in self._hashmap:
old_price = self._hashmap[order_id]
index = bisect.bisect_left(self._index, old_price)
del self._index[index]
del self[index]
del self._hashmap[order_id]
def remove_index(self, order):
order_id = order[2]
if order_id in self._hashmap:
del self._hashmap[order_id]
def store(self, price, size, order_id):
self.storeArray([price, size, order_id])
# -----------------------------------------------------------------------------
# a more elegant syntax is possible here, but native inheritance is portable
class Asks(OrderBookSide): side = False # noqa
class Bids(OrderBookSide): side = True # noqa
class CountedAsks(CountedOrderBookSide): side = False # noqa
class CountedBids(CountedOrderBookSide): side = True # noqa
class IndexedAsks(IndexedOrderBookSide): side = False # noqa
class IndexedBids(IndexedOrderBookSide): side = True # noqa
| true |
8de117f8192ba535049227cff7663b0942652141 | Python | oscarburgo/AgendaNadela | /sections/delete_contact.py | UTF-8 | 317 | 2.640625 | 3 | [] | no_license | import db.core as db
from sections.view_contacts import run as view_contacts
def run():
contactos = db.read()
view_contacts()
index_contacto = int(input("[+] Selecciona un contacto: ")) - 1
contactos.remove(contactos[index_contacto])
db.save(contactos)
print("Contacto eliminado!")
| true |
f897662d2fea5b955c761d8c005ae8690a90c79b | Python | Silviu777/Blockchain | /main.py | UTF-8 | 2,023 | 3.0625 | 3 | [] | no_license | import os
from hashlib import sha256
from random import randint
from datetime import datetime
import json
class BlockChainGenerator():
def __init__(self, fileName, newChain=True, difficultyLevel=2):
self.fileName = fileName
self.newChain = newChain
self.difficultyLevel = difficultyLevel
self.BlockChain = []
self.HandleJson()
self.AddBlock("GENESIS BLOCK") if self.BlockChain == [] else None
# Creating the .json file
def HandleJson(self):
if not self.newChain:
try:
with open(self.fileName) as f:
self.BlockChain = json.load(f)
except Exception as e:
print(f"Error: {e}")
# Creating the block
def CreateBlock(self, data):
block = {}
block["data"] = data
block["index"] = str(len(self.BlockChain))
block["timeStamp"] = str(datetime.utcnow())
block["previousHash"] = self.BlockChain[0]["currentHash"] if self.BlockChain != [] else "x"
block["currentHash"], block["nounce"] = self.miner(
block["data"] + block["index"] + block["timeStamp"] + block["previousHash"]
)
return block
def miner(self, dataString):
while True:
nounce = str(randint(0, 1E10))
hash = sha256(str(dataString + nounce).encode()).hexdigest()
if hash[:self.difficultyLevel] == "0" * self.difficultyLevel:
return hash, nounce
# Adding a block to BlockChain
def AddBlock(self, data):
self.BlockChain = [self.CreateBlock(data)] + self.BlockChain
with open(self.fileName, "w") as f:
json.dump(self.BlockChain, f)
# -------------------------------------------------------------------
BlockChainFileName = "BlockChain.json"
Transaction = BlockChainGenerator(BlockChainFileName, newChain=False, difficultyLevel=3)
while True:
Transaction.AddBlock(input("Enter Transaction ::: "))
os.system(BlockChainFileName)
| true |
46dd5858b63cd6c563f382d06ed275234d6e80be | Python | pythonTedo/Pygame | /knightGame/main.py | UTF-8 | 12,912 | 2.84375 | 3 | [] | no_license | import pygame
import random
import button
pygame.init()
pygame.font.init()
bottom_panel = 150
WIDTH, HEIGHT = 800, 400 + bottom_panel
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Battle")
#define fonts
font = pygame.font.SysFont("Times New Roman", 26)
red = (255, 0, 0)
green = (0, 255, 0)
#define game variables
current_fighter = 1
total_fighters = 3
action_cooldown = 0
action_wait_time = 90
attack = False
potion = False
potion_effect = 15
clicked = False
game_over = 0 # 1:win game -1: lost
# Load IMGs
# background
BG_img = pygame.image.load('img/Background/background.png').convert_alpha()
PANEL_img = pygame.image.load('img/Icons/panel.png').convert_alpha()
SWORD_img = pygame.image.load('img/Icons/sword.png').convert_alpha()
potion_img = pygame.image.load("img/Icons/potion.png").convert_alpha()
restart_img = pygame.image.load("img/Icons/restart.png").convert_alpha()
#load victory and defeat
VICTORY_img = pygame.image.load('img/Icons/victory.png').convert_alpha()
DEFEAT_img = pygame.image.load("img/Icons/defeat.png").convert_alpha()
# keep the window running
FPS = 60
run = True
clock = pygame.time.Clock()
class Fighter():
def __init__(self, x, y, name, max_hp, strength, potions):
self.x = x
self.y = y
self.name = name
self.max_hp = max_hp
self.strength = strength
self.potions = potions
self.hp = max_hp
self.start_potion = potions
self.alive = True
self.animation_list = []
self.frame_index = 0 #to control the animation
self.action = 0 #0:idle, 1:attack, 2:hurt, 3:dead
self.update_time = pygame.time.get_ticks() # keep in track with the time since the instance is created
# load idle images
temp_list = []
for i in range(8):
image = pygame.image.load("img/%s/Idle/%s.png" % (self.name, i))
image = pygame.transform.scale(image, (image.get_width() * 3, image.get_height() * 3))
temp_list.append(image)
self.animation_list.append(temp_list) #index 0 list is my idle images...
#load attack images
temp_list = []
for i in range(8):
image = pygame.image.load("img/%s/Attack/%s.png" % (self.name, i))
image = pygame.transform.scale(image, (image.get_width() * 3, image.get_height() * 3))
temp_list.append(image)
self.animation_list.append(temp_list) #index 1 list is my attack images...
# load hurt animation
temp_list = []
for i in range(3):
image = pygame.image.load("img/%s/%s/%s.png" % (self.name, "Hurt", i))
image = pygame.transform.scale(image, (image.get_width() * 3, image.get_height() * 3))
temp_list.append(image)
self.animation_list.append(temp_list)
#load dead animation
temp_list = []
for i in range(10):
image = pygame.image.load("img/%s/%s/%s.png" % (self.name, "Death", i))
image = pygame.transform.scale(image, (image.get_width() * 3, image.get_height() * 3))
temp_list.append(image)
self.animation_list.append(temp_list)
self.image = self.animation_list[self.action][self.frame_index]
self.rect = self.image.get_rect() #take widths and heights of the pict
self.rect.center = (x, y)
def update(self): # animation update
animation_cooldown = 100 ## ms time measure
# handle animation
self.image = self.animation_list[self.action][self.frame_index]
# take current time - take the time when instance is created > time measure update index
if pygame.time.get_ticks() - self.update_time > animation_cooldown:
self.update_time = pygame.time.get_ticks()
self.frame_index += 1
#if the animation has run out of images, redo them
if self.frame_index >= len(self.animation_list[self.action]):
if self.action == 3:
self.frame_index = len(self.animation_list[self.action]) - 1
else:
self.idle()
#resets animation to idle station after action
def idle(self):
self.action = 0
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def attack(self, target):
#deal dmg to enemy
rand = random.randint(-5, 5)
damage = self.strength + rand
target.hp -= damage
#run enemy hurt animation
target.hurt()
#check if target is dead
if target.hp < 1:
target.hp = 0
target.alive = False
target.dead()
damage_text = DamageText(target.rect.centerx, target.rect.y, str(damage), red)
damage_text_group.add(damage_text)
#set var for attack animation
self.action = 1
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def hurt(self):
self.action = 2
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def dead(self):
self.action = 3
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def reset(self):
self.alive = True
self.potions = self.start_potion
self.hp = self.max_hp
self.frame_index = 0
self.action = 0
self.update_time = pygame.time.get_ticks()
def draw(self):
screen.blit(self.image, self.rect)
class HealthBar:
def __init__(self, x, y, hp, max_hp):
self.x = x
self.y = y
self.hp = hp
self.max_hp = max_hp
def draw(self, hp):
self.hp = hp
# calculate health ration
ratio = self.hp/self.max_hp
pygame.draw.rect(screen, red, (self.x, self.y, 150, 20))
pygame.draw.rect(screen, green, (self.x, self.y, 150 * ratio, 20))
class DamageText(pygame.sprite.Sprite):
def __init__(self, x, y, damage, colour):
pygame.sprite.Sprite.__init__(self)
self.image = font.render(damage, True, colour)
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.counter = 0
def update(self):
#move damage text up
self.rect.y -= 1
#delete the text after few sec
self.counter += 1
if self.counter > 30:
self.kill()
damage_text_group = pygame.sprite.Group()
knight = Fighter(200, 260, "Knight", 5, 12, 3)
bandit1 = Fighter(550, 270, "Bandit", 30, 10, 1)
bandit2 = Fighter(700, 270, "Bandit", 30, 10, 1)
bandit_list = []
bandit_list.append(bandit1)
bandit_list.append(bandit2)
knight_health_bar = HealthBar(100, HEIGHT - bottom_panel + 40, knight.hp, knight.max_hp)
bandit1_health_bar = HealthBar(550, HEIGHT - bottom_panel + 40, bandit1.hp, bandit1.max_hp)
bandit2_health_bar = HealthBar(550, HEIGHT - bottom_panel + 100, bandit2.hp, bandit2.max_hp)
#create buttons
potion_button = button.Button(screen,100, HEIGHT - bottom_panel + 70, potion_img, 64, 64)
restart_button = button.Button(screen,330, 120, restart_img, 120, 30)
# draw text
def draw_text(text, font, text_color, x, y):
img = font.render(text, True, text_color)
screen.blit(img, (x, y))
def draw_panel():
screen.blit(PANEL_img, (0, HEIGHT - bottom_panel))
#show knight stats
draw_text("%s HP: %d" % (knight.name, knight.hp), font, red, 100, HEIGHT - bottom_panel + 10)
#show bandit stats
for count, bandit in enumerate(bandit_list):
draw_text("%s HP: %d" % (bandit.name, bandit.hp), font, red, 550, (HEIGHT - bottom_panel + 10) + count * 60)
#beacuse i have multiple bandits y axis is going to move down for each bandit
def draw():
screen.blit(BG_img, (0, 0))
draw_panel()
knight.update() # first update then draw
knight.draw()
knight_health_bar.draw(knight.hp)
bandit1_health_bar.draw(bandit1.hp)
bandit2_health_bar.draw(bandit2.hp)
for bandit in bandit_list:
bandit.update()
bandit.draw()
while run:
clock.tick(FPS)
draw()
#draw damage text
damage_text_group.update() #inherited methods
damage_text_group.draw(screen)
# SECTION 1 LOOKING FOR ACTIONS
#player actions control
#reset action vars
attack = False
potion = False
target = None
#make sure mouse is visible
pygame.mouse.set_visible(True)
pos = pygame.mouse.get_pos()
for count, bandit in enumerate(bandit_list):
if bandit.rect.collidepoint(pos):
#hide mouse
pygame.mouse.set_visible(False)
#show sword in place of mouse cursor
screen.blit(SWORD_img, pos)
if clicked == True and bandit.alive == True: # can click only on alive bandit
attack = True
target = bandit_list[count]
if potion_button.draw():
potion = True
#show nums of potions
draw_text(str(knight.potions), font, red, 150, HEIGHT - bottom_panel + 70)
if game_over == 0: # SECTION 2 EXECUTE ACTIONS
# player action
if knight.alive:
if current_fighter == 1:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#look for player action
#attack
if attack == True and target != None:
knight.attack(target)
current_fighter += 1
action_cooldown = 0
#potion
if potion == True:
if knight.potions > 0:
#check if hp is enought for a potion
if knight.max_hp - knight.hp > potion_effect:
heal_amount = potion_effect
else:
heal_amount = knight.max_hp - knight.hp
knight.hp += heal_amount
knight.potions -= 1
damage_text = DamageText(knight.rect.centerx, knight.rect.y, str(heal_amount), green)
damage_text_group.add(damage_text)
#count as an action
current_fighter += 1
action_cooldown = 0
else:
game_over = -1
# enemy action
for count, bandit in enumerate(bandit_list):
if current_fighter == 2 + count:
if bandit.alive:
action_cooldown += 1
if action_cooldown >= action_wait_time:
# look for player action
#pcheck if needs to heal
if (bandit.hp / bandit.max_hp) < 0.5 and bandit.potions > 0:
# check if hp is enought for a potion
if bandit.max_hp - bandit.hp > potion_effect:
heal_amount = potion_effect
else:
heal_amount = bandit.max_hp - bandit.hp
bandit.hp += heal_amount
bandit.potions -= 1
damage_text = DamageText(bandit.rect.centerx, bandit.rect.y, str(heal_amount), green)
damage_text_group.add(damage_text)
# count as an action
current_fighter += 1
action_cooldown = 0
# attack
else:
bandit.attack(knight)
current_fighter += 1
action_cooldown = 0
else:
current_fighter += 1
#if all the fighters had a turn then reset
if current_fighter > total_fighters:
current_fighter = 1
#check if all bandits are dead
alive_bandits = 0
for bandit in bandit_list:
if bandit.alive == True:
alive_bandits += 1
if alive_bandits == 0:
game_over = 1
#check if game is over
if game_over != 0:
if game_over == 1:
screen.blit(VICTORY_img, (255, 50))
if game_over == -1:
screen.blit(DEFEAT_img, (290, 50))
if restart_button.draw():
knight.reset()
for bandit in bandit_list:
bandit.reset()
current_fighter = 1
action_cooldown
game_over = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
else:
clicked = False
pygame.display.update() ## when drawing updates the screen
pygame.quit() | true |
6500d85ca8fce5bf0996a7479f3ec9e7d64c14d5 | Python | JonPython/LearningCommunicate | /LearningCommunicate/question.py | UTF-8 | 13,262 | 2.8125 | 3 | [] | no_license | # coding="utf-8"
'''问题数据库>>>:
1,使用库名Ifquestion
2.1,建立问题总表userask,字段包含:
1,uid(int主键,自增长),
2,用户名user(非空),
3,问题类型type(enum,包含:"Python","数据库","网络编程","WEB",
"GUI","模块相关","项目相关","其他","心情墙")
4,问题标题title(限制40个字)
5,问题详情question(varchar(5000))
6,图片字段img1 mediumblob
7,'同问'计数使用number(int,默认为1)
8,同问用户记录numusers(varchar(5000),默认为'')
2.2,建立问题答案表solution,字段包含:
答案id:aid(int主键自增长)
问题唯一标识:sid(userask表uid)
回答者:answers(char(20))
问题答案:solutions(varchar(5000))
图片字段:img2 mediumblob
2.3,建立用户表users包含字段:
用户名name(唯一性,非空,20)
邮箱mail
生日brithday
性别sex
密码password(非空,20)
头像img3 blob
以下为用户类功能:
1,处理申请分类applyfor
2,提供验证用户名唯一性调用函数isexist
3,提供用户名,密码调用函数checking
4,提供注册成功插入函数userinsert
5,个人信息展示show
6,对密码进行加密处理
以下是问题类功能:
1,提供函数type10
分别调用9个类型的问题,并按'同问'字段从大到小排序,返回uid,title,number
2,提供函数sameask
'同问'按钮点击后数值+1的功能,同一个id的问题同一用户只生效一次
3,提供热门问题函数hot
所有问题根据'同问'字段从大到小排序,返回uid,title,number
4,提供用户回答过的问题调用函数answer,返回问题标题
5,提供提问问题提交函数ask
包含用户名,问题类型,问题标题以及问题详情,插入到总表.
6,提供回答问题调用函数reply,插入答案
7,提供用户提问过的问题调用函数question,返回uid,title,number
8,提供单个显示问题调用函数single,
包含用户名,问题详情,所有回答者和他们的答案
'''
import pymysql
db = pymysql.connect('localhost', 'root', '123456', charset="utf8")
cs = db.cursor()
# 一般情况下,库会另外提前单独创建,本函数是为在不同主机上调试使用
def makesql():
# 创建Ifquestion库
cs.execute('drop database Ifquestion;') # 测试用
cs.execute('''create database if not exists
Ifquestion default charset="utf8";''')
cs.execute('''use Ifquestion;''')
# 创建userask表,
# uid,user,type,title,question,number,numbers
# 图片字段img1 mediumblob
cs.execute('''create table if not exists
userask(uid int primary key auto_increment,
user char(20) not null,type enum(
"Python","数据库","网络编程","WEB",
"GUI","模块相关","项目相关","其他","心情墙"),
title char(40),question varchar(5000),
img1 mediumblob,
number int default 1,
numusers varchar(5000) default "",
index(number)
) default charset="utf8";''')
# 建立问题答案表solution
# 问题id:sid(int)
# 答案id:aid(int)
# 所有回答者:answers(char(20)
# 问题答案:solutions(varchar(5000))
# 图片字段img2
cs.execute('''create table if not exists
solution(aid int primary key auto_increment,
sid int,answers char(20),
solutions varchar(5000),
img2 mediumblob,
index(sid)) default charset="utf8";
''')
# 建立用户表users包含字段:
# 用户名name(唯一性,非空)
# 邮箱mail
# 生日brithday
# 性别sex
# 密码password(非空,最少6位)
# 头像img3 blob
cs.execute('''create table if not exists
users(name char(20) primary key,
mail char(30),brithday char(20),
sex enum('男','女','保密'),
password char(20) not null,
img3 blob);''')
# 建立申请分类表applyfor,字段包含:
# 用户名user,申请标题title,申请内容content(500字)
cs.execute('''create table if not exists
applyfor(user char(20),title char(40),
content varchar(500));''')
# 创建留言表message,字段包含:
# 用户名user,内容content,留言者name
cs.execute('''create table if not exists
message(user char(20),content varchar(1000),
name char(20));''')
# 创建留言记录表msghistory,字段包含:
# 用户名user,内容content,留言者name
cs.execute('''create table if not exists
msghistory(user char(20),content varchar(1000),
name char(20));''')
insert1() # 测试用
db.commit()
#测试用数据插入函数
def insert1():
f = open('./wenjian.txt', 'r')
f = f.read()
cs.execute('''use Ifquestion;''')
l = f.split('----------')
try:
for i in l:
s = i.split('#.#')
sql = '''insert into userask(user,type,title,question)
values('%s','%s','%s','%s');''' % (
s[0][1:], s[1].strip(), s[2].strip(), s[3])
cs.execute(sql)
f.close()
cs.execute('''insert into users(name,mail,brithday,sex,password)
values('未知','666666@qq,com','01010101','保密','123456');''')
except IndexError as e:
pass
#-----------------用户类功能-------------------------
# 处理申请分类
def applyfor(user, title, content):
try:
cs.execute('''insert into applyfor values(
'%s','%s','%s');''' % (user, title, content))
return True
except:
return False
# 1.1,提供验证用户名唯一性调用函数isexist
def isexist(name):
cs.execute('''select name from users
where name='%s';''' % name)
n = cs.fetchone()
if n == None: # 如果不存在,说明用户名可用
return True
else:
return False
# 1.2,提供用户名,密码验证函数checking
def checking(name, passwd):
password = encryption(passwd)
cs.execute('''select name,password from users
where name='%s' and password='%s';
''' % (name, password))
n = cs.fetchone()
if n == None: # 如果不存在,说明用户名或密码不正确
return False
else:
return True
# 1.3,提供注册成功插入函数userinsert
def userinsert(name, mail, brithday, sex, passwd):
password = encryption(passwd)
cs.execute('''insert into users(name,
mail,brithday,sex,password) values(
'%s','%s','%s','%s','%s');
''' % (name, mail, brithday, sex, password))
# 1.4,个人信息展示
def show(sid):
cs.execute('''select user from userask where uid='%s';''' % sid)
name = cs.fetchone()[0]
cs.execute('''select mail,brithday,sex from users
where name='%s';''' % name)
l = cs.fetchone()
try:
mail, bri, sex = l[0], l[1], l[2]
return (name, mail, bri, sex)
except TypeError:
return ('未知','666666@qq,com','01010101','保密')
# 1.5,存储留言信息
def messageto(sid,mes,name):
cs.execute('''select user from userask where uid='%s';''' % sid)
user = cs.fetchone()[0]
try:
cs.execute('''insert into message(user,content,name)
values('%s','%s','%s');''' % (user,mes,name))
cs.execute('''insert into msghistory(user,content,name)
values('%s','%s','%s');''' % (user,mes,name))
return True
except:
return False
# 1.6,存储留言信息
def messageto2(user,mes,name):
try:
cs.execute('''insert into message(user,content,name)
values('%s','%s','%s');''' % (user,mes,name))
return True
except:
return False
# 1.7,查询留言信息
def message(user):
cs.execute('''select content,name from message where
user='%s';''' % user)
l=cs.fetchall()
if len(l)<1:
return 'nothing'
else:
s=''
for i in l:
s+=(i[1]+'*.*留言: '+i[0]+'#.#')
cs.execute('''delete from message where user='%s';
''' % user)
return s
# 简单加密
def encryption(msg):
s = msg[3]
l = msg.split(s)
s1 = chr(ord(s)+1)
msg1 = s1.join(l)
return msg1
# -----------------问题类功能-------------------------
# 1提供函数分别调用9个类型的问题,并按'同问'字段从大到小排序
def type10(type1, m):
# 传参type1(python或web...)和m,分别返回 10个类型问题的
# 标题(title) 同问(number)
if m == 0:
# 所有问题根据'同问'字段从大到小排序,取前十个记录的标题和同问数
sql = '''select uid,title,number from userask where type='%s'
order by number desc limit 20;''' % type1
elif m == 1:
# 所有问题根据'同问'字段从大到小排序,取所有个记录的标题和同问数
sql = '''select uid,title,number from userask where type='%s'
order by number desc ;''' % type1
cs.execute(sql)
n = cs.fetchall()
return n
# 2实现同问功能函数
# 提供'同问'按钮点击后数值+1的功能,同一个id的问题同一用户只生效一次
def sameask(uid, name):
try:
cs.execute('''select numusers from userask where uid=%d
''' % uid)
s = cs.fetchone()[0]
cs.execute('''select number from userask where uid=%d
''' % uid)
n = cs.fetchone()[0]
# 判断是否已经点击过按钮
if (name in s) or ((len(s)+len(name)) > 5000):
return n
else:
# 增加同问用户
cs.execute('''update userask set numusers='%s' where
uid=%d''' % (s+name, uid))
# 更改同问人数数量
cs.execute('''update userask set number=%d where
uid=%d''' % (n+1, uid))
return n+1
except TypeError:
return 1
# 3提供热门问题调用函数,所有问题根据'同问'字段从大到小排序
def hot(m):
if m == 0:
# 所有问题根据'同问'字段从大到小排序,取前十个记录的标题
sql = '''select uid,title,number from userask
order by number desc limit 20;'''
elif m == 1:
# 所有问题根据'同问'字段从大到小排序,取所有的记录的标题
sql = '''select uid,title,number from userask
order by number desc ;'''
cs.execute(sql)
n = cs.fetchall()
return n
# 4提供用户回答过的问题调用函数,包含,问题标题
def answer(name):
# name为提供用户名,取此用户名下的 问题类型,问提标题,同问计数 问题
sql = '''select sid from solution
where answers='%s';''' % name
cs.execute(sql)
n = cs.fetchall()
l = []
for i in n:
uid = i[0]
cs.execute('''select uid,title,number from userask
where uid='%d';''' % uid)
uid, title, number = cs.fetchone()
l1 = str(uid)+'*.*'+title+'*.*'+str(number)
l.append(l1)
return l
# 5提供提问问题提交插入函数,包含用户名,问题类型,问题标题
# 以及问题详情,插入到总表.
def ask(name, type1, title, question):
try:
# 插入语句
title = title.strip()
question = question.strip()
sql = '''insert into userask(user,type,title,question)
values('%s','%s','%s','%s');''' % (name, type1, title, question)
cs.execute(sql)
db.commit()
return True
except:
return False
# 6提供回答问题调用函数,插入答案表
def reply(sid, name, solution):
try:
sql = '''insert into solution(sid,answers,solutions)
values('%d','%s','%s');''' % (sid, name, solution)
cs.execute(sql)
db.commit()
return True
except:
return False
# 7提供用户提问过的问题调用函数,包含问题标题,
def question(name):
# name为提供用户名,
sql = '''select title from userask
where user='%s';''' % name
cs.execute(sql)
n = cs.fetchall()
l = []
for i in n:
title = i[0]
cs.execute('''select uid,number from userask
where user='%s' and title='%s';''' % (name, title))
uid, number = cs.fetchone()
l1 = str(uid)+'*.*'+title+'*.*'+str(number)
l.append(l1)
return l
# 8提供单个显示问题调用函数,包含用户名,问题详情,
# 所有答案(包含回答者用户名)
def single(uid):
sql = '''select user,question from
userask where uid=%d;''' % uid
cs.execute(sql)
a = cs.fetchone() # 问题的用户名,问题详情
sql = '''select answers,solutions from solution
where sid='%d' limit 10;''' % uid
cs.execute(sql)
b = cs.fetchall() # 问题的所有答案
return (a, b)
# 9答案回复互动函数
# def revert(aid,rev):
# try:
# cs.execute('''select solutions from
# solution where aid=%d;''' % aid)
# a=cs.fetchone()
# ss=a[0]+'#.#'+rev
# cs.execute('''update solution set
# solutions='%s';''' % ss)
# return True
# except:
# return False
| true |
f3ec95729520fd62d6756f3bb58ce980696d89ef | Python | HannahYH/My-Projects | /CV_Segmentation/SVM.py | UTF-8 | 11,325 | 2.53125 | 3 | [] | no_license | # Writted by Hanxin Chen, Xueyan Lu and Han Yang
# This is the code for TwoStep (SVM + Post-processing) Method
# https://github.com/dgriffiths3/ml_segmentation
# above link is the reference of our code
import os
import cv2
import time
import math
import random
import numpy as np
import pickle as pkl
import mahotas as mt
from glob import glob
from matplotlib import pyplot as plt
from numpy.lib import stride_tricks
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.model_selection import train_test_split
from skimage import measure
from skimage.morphology import reconstruction, dilation
from scipy import ndimage as ndimage
# ============ Utils ==========================================
def read_data(image_dir, label_dir):
print('[INFO] Loading image data.')
# extract all pathes of images and labels, contains augmented images and labels
trainlist = glob(os.path.join(image_dir, '*.jpg'))
labellist = glob(os.path.join(label_dir, '*.jpg'))
image_list = []
label_list = []
image_list_test = []
label_list_test = []
# read all first 30 raw images and labels, use it as test dataset for cross validation
for file in trainlist[:30]:
image_list_test.append(cv2.imread(file, 0))
for file in labellist[:30]:
label_list_test.append(cv2.imread(file, 0))
# shuffle all images, use it as whole dataset for cross validation
random.shuffle(trainlist)
for i in range(len(trainlist[:30])):
num = trainlist[i].split('volume')[1].split('.')[0]
# extract the corresponding label file name
name = labellist[0].split('label')[0] + 'labels/train-labels' + num + '.jpg'
image_list.append(cv2.imread(trainlist[i], 0))
label_list.append(cv2.imread(name, 0))
return image_list, label_list, image_list_test, label_list_test
def subsample_idx(low, high, sample_size):
# create random index
return np.random.randint(low,high,sample_size)
# ============ Utils ==========================================
# ========== Extract Features Part =============================
def create_dark_blob(img):
print('[INFO] Detecting dark blobs.')
erosion = cv2.erode(img,None,iterations = 1)
dilation = cv2.dilate(erosion, None, iterations=1)
labels = measure.label(dilation, neighbors=8, background=255)
mask = np.zeros(dilation.shape, dtype="uint8")
# loop over the unique components
for label in np.unique(labels):
# if this is the background label, ignore it
if label == 0:
continue
# otherwise, construct the label mask and count the number of pixels
labelMask = 255 * np.zeros(dilation.shape, dtype="uint8")
labelMask[labels == label] = 255
numPixels = len(labels[labels == label])
# if the number of pixels in the component is sufficiently
# large, then add it to our mask of "large blobs"
if numPixels > 5000:
mask = cv2.add(mask, labelMask)
return 255-mask
def create_LCHF_features(img):
print('[INFO] Computing LCHF features.')
median = cv2.medianBlur(img,3)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(15,15))
clahe = clahe.apply(median)
ret, im_th = cv2.threshold(clahe,100,255,cv2.THRESH_BINARY)
seed = np.copy(im_th)
seed[1:-1, 1:-1] = im_th.max()
mask = im_th
filled = reconstruction(seed, mask, method='erosion').astype(int)
filled[filled>255] = 255
filled = filled.astype(np.uint8)
out = cv2.medianBlur(filled, 5)
return out
def create_features(img, label, train=True):
# number of examples per image to use for training model
num_examples = 1000
# reduce the dimention of image by 1 dim
img_reshape = img.reshape(img.shape[0]*img.shape[1], 1)
features = np.zeros((img.shape[0]*img.shape[1], 1))
# the first dimention is the pixel of image
features[:,:1] = img_reshape
ss_idx = []
# get random features' index
if train == True:
non = []
mem = []
labels = label.reshape(label.shape[0]*label.shape[1], 1)
for i in range(len(labels)):
if labels[i] > 128:
non.append(i)
else:
mem.append(i)
# randomly select 2000 pixels, half membrane and half non-membrane
idx_non = subsample_idx(0, len(non), num_examples)
idx_mem = subsample_idx(0, len(mem), num_examples)
ss_idx = [*idx_non, *idx_mem]
features = features[ss_idx]
# create labels
labels = labels[ss_idx]
else:
labels = None
# ---------- start creating features ----------
# Laplace
Laplace_feature = cv2.Laplacian(img, cv2.CV_64F)
# LCHF
LCHF_feature = create_LCHF_features(img)
# Sobel
sobel_feature = cv2.Sobel(img, cv2.CV_8U, 1, 1, ksize=7)
# Bilteral & global thresholding
bil = cv2.bilateralFilter(img,9,75,75)
ret, bg_feature = cv2.threshold(bil,116,255,cv2.THRESH_BINARY)
# Attribute Opening
ao = ndimage.grey_opening(img, structure=np.ones((3,3))).astype(np.int)
ao = np.array(ao, np.uint8)
ao = cv2.bilateralFilter(ao,9,75,75)
ret, ao_feature = cv2.threshold(ao,116,255,cv2.THRESH_BINARY)
# ---------- integrating features ----------
# Laplace
Laplace_features = Laplace_feature.reshape(Laplace_feature.shape[0]*Laplace_feature.shape[1], 1)
if train == True:
Laplace_features = Laplace_features[ss_idx]
features = np.hstack((features, Laplace_features))
# LCHF
LCHF_features = LCHF_feature.reshape(LCHF_feature.shape[0]*LCHF_feature.shape[1], 1)
if train == True:
LCHF_features = LCHF_features[ss_idx]
features = np.hstack((features, LCHF_features))
# Sobel
sobel_features = sobel_feature.reshape(sobel_feature.shape[0]*sobel_feature.shape[1], 1)
if train == True:
sobel_features = sobel_features[ss_idx]
features = np.hstack((features, sobel_features))
# Bilteral & global thresholding
bg_features = bg_feature.reshape(bg_feature.shape[0]*bg_feature.shape[1], 1)
if train == True:
bg_features = bg_features[ss_idx]
features = np.hstack((features, bg_features))
# Attribute Opening
ao_features = ao_feature.reshape(ao_feature.shape[0]*ao_feature.shape[1], 1)
if train == True:
ao_features = ao_features[ss_idx]
features = np.hstack((features, ao_features))
# ---------- end creating features ----------
return features, labels
def create_dataset(image_list, label_list):
print('[INFO] Creating dataset on %d image(s).' %len(image_list))
X = []
y = []
for i, img in enumerate(image_list):
features, labels = create_features(img, label_list[i])
X.append(features)
y.append(labels)
X = np.array(X)
print('[INFO] Features shape in ', X.shape)
X = X.reshape(X.shape[0]*X.shape[1], X.shape[2])
y = np.array(y)
print('[INFO] Labels shape in ', y.shape)
y = y.reshape(y.shape[0]*y.shape[1], y.shape[2]).ravel()
return X, y
# ========== Extract Features Part ======================
# ========== Training Part ==============================
def train_model(X, y):
print('[INFO] Training Support Vector Machine model.')
model = SVC(gamma='scale', verbose=1)
model.fit(X, y)
print('[INFO] Model training complete.')
print('[INFO] Training Accuracy: %.2f' %model.score(X, y))
return model
# ========== Training Part =============================
# ========== Evaluation Part ===========================
def test_model(X, y, model):
print('[INFO] Evaluating Support Vector Machine model.')
pred = model.predict(X)
accuracy = metrics.accuracy_score(y, pred)
precision = metrics.precision_score(y, pred, average='weighted', labels=np.unique(pred))
recall = metrics.recall_score(y, pred, average='weighted', labels=np.unique(pred))
f1 = metrics.f1_score(y, pred, average='weighted', labels=np.unique(pred))
print('--------------------------------')
print('[RESULTS] Accuracy: %.2f' %accuracy)
print('[RESULTS] Precision: %.2f' %precision)
print('[RESULTS] Recall: %.2f' %recall)
print('[RESULTS] F1: %.2f' %f1)
print('--------------------------------')
return accuracy
# ========== Evaluation Part =============================
# ========== Prediction Part =============================
def create_features_for_test(img):
features, _ = create_features(img, label=None, train=False)
return features
def compute_prediction(img, model):
features = create_features_for_test(img)
print('[INFO] Predicting:')
predictions = model.predict(features.reshape(-1, features.shape[1]))
print('[INFO] Finish Prediction:')
pred_size = int(math.sqrt(features.shape[0]))
inference_img = predictions.reshape(pred_size, pred_size)
return inference_img
# ========== Prediction Part =============================
def main(image_dir, label_dir, output_model, test_dir, output_dir):
start = time.time()
print('[INFO] Start:')
# training
image_list, label_list, image_list_test, label_list_test = read_data(image_dir, label_dir)
print('[INFO] Creating the whole dataset:')
X, y = create_dataset(image_list, label_list)
print('[INFO] Creating the Testing dataset:')
X_test, y_test = create_dataset(image_list_test, label_list_test)
# cross-validation
fold = 5
fold_len = int(len(X)/fold)
idx = [i for i in range(len(X))]
for i in range(fold):
# for each fold, the size of train dataset is 120
X_train = X[[*idx[:i*fold_len], *idx[(i+1)*fold_len:fold*fold_len]]]
y_train = y[[*idx[:i*fold_len], *idx[(i+1)*fold_len:fold*fold_len]]]
print('[INFO] Fold ' + str(i+1) + ', Training dataset shape in ', X_train.shape, y_train.shape)
print('[INFO] Fold ' + str(i+1) + ', Testing dataset shape in ', X_test.shape, y_test.shape)
model = train_model(X_train, y_train)
pkl.dump(model, open(output_model+'/aug_data_0810_'+str(i)+'.pkl', "wb"))
# evaluation
test_model(X_test, y_test, model)
# predicting
print('[INFO] Using Support Vector Machine model to do prediction.')
loaded_model = pkl.load(open(output_model+'/aug_data_0810_'+str(0)+'.pkl', "rb"))
filelist = glob(os.path.join(test_dir,'*.jpg'))
print('[INFO] Running prediction on %s test images' %len(filelist))
for file in filelist:
print('[INFO] Processing images:', os.path.basename(file))
inference_img = compute_prediction(cv2.imread(file, 0), loaded_model)
# post-processing: remove dark blobs
print('[INFO] Post-Processing images:')
mask = create_dark_blob(inference_img)
cv2.imwrite(os.path.join(output_dir, 'predict_' + os.path.basename(file)), mask)
print('[INFO] Processing time:',time.time()-start)
if __name__ == "__main__":
image_dir = "/content/drive/My Drive/COMP9517/data/images"
label_dir = "/content/drive/My Drive/COMP9517/data/labels"
output_model = "/content/drive/My Drive/COMP9517/data"
predict_dir = "/content/drive/My Drive/COMP9517/data/tests"
output_dir = "/content/drive/My Drive/COMP9517/data/results"
main(image_dir, label_dir, output_model, predict_dir, output_dir)
| true |
d9856d1782e484b89593d854eb1fe8a0a7a12041 | Python | lilunjiaax/JvlunlTest | /python实现排序算法.py | UTF-8 | 7,452 | 3.90625 | 4 | [] | no_license | """
简单选择排序:依次遍历未排序的列表,选出其中的最小值,插在对应的位置
第一次遍历[0:n]得到最小值,排在第一个位置,
第二次遍历[1:n]得到最小值,排在第二个位置,
。。。。
"""
def selectionSort(a_list):
"""
选择排序
:param a_list:
:return:
"""
a_len = len(a_list)
for i in range(a_len):
a_min = a_list[i]
ind = i
for j in range(i+1, a_len):
if a_list[j] < a_min:
a_min = a_list[j]
ind = j
tmp = a_list[i]
a_list[i] = a_list[ind]
a_list[ind] = tmp
return a_list
def bubbleSort(a_list):
"""
冒泡排序:从小到大
:param a_list:
:return:
"""
a_len = len(a_list)
for i in range(a_len):
for j in range(a_len-1-i): # 已经冒泡排序号得就不需要再比对了
if a_list[j+1] < a_list[j]:
a_list[j+1], a_list[j] = a_list[j], a_list[j+1]
return a_list
def insertSortSearch(a_list):
"""
简单插入排序
:param a_list:
:return:
"""
a_len = len(a_list)
for i in range(1, a_len):
# 从后往前遍历之前有序得列表
tmp = a_list[i]
flag = 0
for j in range(i-1, -1, -1):
if a_list[j] > tmp:
a_list[j+1] = a_list[j]
else:
a_list[j+1] = tmp
flag = 1
break
if not flag:
a_list[0] = tmp
return a_list
################################################################################
def shellSort(a_list):
"""
希尔排序
:param a_list:
:return:
"""
a_index = 4
a_len = len(a_list)
for i in range(a_index, 0, -1):
zu_num = a_len // i + 1 if a_len % i != 0 else a_len // i
for k in range(i):
tmp_list = []
for j in range(zu_num):
if i*j+k < a_len:
tmp_list.append(a_list[i*j + k])
tmp_list = selectionSort(tmp_list)
for j in range(len(tmp_list)):
a_list[i*j + k] = tmp_list[j]
return a_list
#################################################################################
def quickSort(a_list):
"""
快速排序
:param a_list:
:return:
"""
if not a_list:
return []
flag = a_list[0]
return quickSort([i for i in a_list if i < flag]) + [i for i in a_list if i == flag] + quickSort([i for i in a_list if i > flag])
#################################################################################
def mergeSortRecursive(a_list):
"""
归并排序,主要采用递归 + 分治的策略
:param a_list:
:return:
"""
a_len = len(a_list)
if a_len > 1:
resu_list = []
left = mergeSortRecursive(a_list[:a_len//2])
right = mergeSortRecursive(a_list[a_len//2:])
left_len = len(left)
right_len = len(right)
i = 0
j = 0
while i < left_len and j < right_len:
if left[i] < right[j]:
resu_list.append(left[i])
i += 1
else:
resu_list.append(right[j])
j += 1
if i < left_len:
resu_list.extend(left[i:])
if j < right_len:
resu_list.extend(right[j:])
return resu_list
return a_list
##################################################################################
def move_item(a_list, i):
"""
移动堆内元素
:param a_list:
:param i:
:return:
"""
a_len = len(a_list)
while i < a_len:
if 2*i+1 < a_len and (2*i+2 < a_len):
if a_list[2 * i + 1] >= a_list[2 * i + 2]:
if a_list[i] < a_list[2 * i + 1]:
a_list[i], a_list[2 * i + 1] = a_list[2 * i + 1], a_list[i]
i = 2*i + 1
continue
else:
if a_list[i] < a_list[2 * i + 2]:
a_list[i], a_list[2 * i + 2] = a_list[2 * i + 2], a_list[i]
i = 2*i + 2
continue
elif 2*i+1 < a_len:
if a_list[i] < a_list[2 * i + 1]:
a_list[i], a_list[2 * i + 1] = a_list[2 * i + 1], a_list[i]
i = 2+i + 1
continue
break
def heapSort(a_list):
"""
堆排序:先构建一个最大堆,
然后将最大堆的第一个和最后元素互换位置,取出最后一个元素,
然后调整堆顶元素,所以需要两个函数,构造最大堆函数,每次取数后的调整堆顶函数
:param a_list:
:return:
"""
a_len = len(a_list)
for i in range((a_len-1)//2, -1, -1):
move_item(a_list, i)
# 已经构成了最小堆
resu_list = []
while a_list:
a_list[0], a_list[-1] = a_list[-1], a_list[0]
resu_list.append(a_list[-1])
a_list.pop(-1)
move_item(a_list, 0)
return resu_list[::-1]
###########################################################################
def radixSort(a_list, radix=10):
"""
基数排序:列表中最大的数的位数,要以他为最大的基数
基数排序在构建存储数据的容器上有点类似于桶排序
:param a_list:
:return:
"""
# 对于数字的比较我们一般将基数设为10
a_max = a_list[1]
for i in a_list[1:]:
if a_max < i:
a_max = i
K = 0
while a_max:
a_max = a_max // radix
K += 1
for i in range(1, K+1):
bucket = [[] for j in range(radix)]
for val in a_list:
bucket[val%(radix**i)//(radix**(i-1))].append(val)
del a_list[:]
for item in bucket:
a_list.extend(item)
return a_list
##################################################################
def BucketSort(a_list, bucket_num=5):
"""
桶排序
:param a_list:
:return:
"""
a_len = len(a_list)
a_max = max(a_list) + 1 # 将后边界值加1可以忽略掉最后一个桶的后边界值得包含
a_min = min(a_list)
a_minus = a_max - a_min
bucket_index = a_minus / bucket_num
bucket = [[] for i in range(bucket_num)]
for i in range(0, bucket_num):
# print([a_min, a_min+bucket_index])
for j in a_list:
if j >= a_min and (j < a_min+bucket_index):
if not bucket[i]:
bucket[i].append(j)
continue
tmp_len = len(bucket[i])
bucket[i].append(j)
for k in range(tmp_len, 0, -1):
if bucket[i][k] < bucket[i][k-1]:
bucket[i][k], bucket[i][k-1] = bucket[i][k-1], bucket[i][k]
a_min += bucket_index
# 合并桶
del a_list[:]
for i in bucket:
a_list.extend(i)
return a_list
#
# if __name__ == "__main__":
# a_list = [3, 4, 1, 10, 8, 5, 9, 11, 18, 13, 101]
# # print(selectionSort(a_list))
# # print(bubbleSort(a_list))
# # print(insertSortSearch(a_list))
# # print(quickSort(a_list))
# # print(mergeSortRecursive(a_list))
# # print(heapSort(a_list))
# # print(radixSort(a_list))
# # print(BucketSort(a_list))
# print(shellSort(a_list))
a_list = [3, 4, 1, 10, 8, 5, 9, 11, 18, 13, 101]
print(BucketSort(a_list))
| true |
848c6986d6c57f55b6d4f3ffcc3345ea4ccbd55a | Python | Wenzurk-Ma/Python-Crash-Course | /Chapter 05/ages.py | UTF-8 | 359 | 3.671875 | 4 | [
"Apache-2.0"
] | permissive | # Title : TODO
# Objective : TODO
# Created by: Wenzurk
# Created on: 2018/2/6
age = 21
if age < 2:
print("She is a baby.")
elif age < 4:
print("She is a child.")
elif age < 13:
print("She is a little girl.")
elif age < 20:
print("She is a beautiful girl.")
elif age < 65:
print("She is a woman.")
else:
print("She is an older.") | true |
77e02731f4b1b36bfd023f432c30208d64b8eb74 | Python | grecoe/pythonthreading | /pageparser/persist.py | UTF-8 | 1,311 | 3.171875 | 3 | [
"MIT"
] | permissive | from datetime import datetime
import os
import json
def normalizeBase(base):
if not base:
base = '.\\'
elif not os.path.isdir(base):
os.mkdir(base)
if not base.endswith('\\'):
base += '\\'
return base
def createPath(base, name, time_stamp = datetime.now()):
'''
Builds up a directory structure based on
NAME/Year/Month/Day
'''
base = normalizeBase(base)
path = [name, str(time_stamp.year), str(time_stamp.month), str(time_stamp.day)]
for part in path:
base += part + '\\'
if not os.path.isdir(base):
os.mkdir(base)
return base
def writeContent(directory, name, content):
'''
Dump out ALL content of a sequence as JSON.
'''
localFileName = os.path.join(directory, name)
with open(localFileName, 'w') as outputFile:
outputFile.write(json.dumps(content, indent=4))
def loadContent(directory, name = None):
file_data = None
# Might have the file on it already
file_path = directory
if name :
file_path = os.path.join(directory,name)
if os.path.isfile(file_path):
with open(file_path, 'r') as file_read:
file_content = file_read.readlines()
file_data = '\n'.join(file_content)
return file_data | true |
708889792ce6ed99bdc1635bc28c1407ec7a4245 | Python | xiang-daode/Python3_codes | /compile.py | UTF-8 | 234 | 3.046875 | 3 | [] | no_license | # 在这里写上你的代码 :-)
# 单一名句用exel:
x = compile('print(12345679*18)', 'test', 'eval')
exec(x)
# 多行语句用exec:
x = compile('''
v=3*3+4*4
u=5*5
w=65536**(1/16)
print(v,u,w)
''',
'myCode', 'exec')
exec(x)
| true |
f9bf53d436d0e188489c960253b1dc4a1c7340cc | Python | luczakmarta/mod8ex2 | /main.py | UTF-8 | 681 | 2.515625 | 3 | [] | no_license | from flask import request, redirect
from flask import render_template
from flask import Flask
app = Flask(__name__)
@app.route('/mypage/me', methods=['GET'])
def mypage():
print("We received GET")
return render_template("main.html")
# http://127.0.0.1:5000/mypage/me
@app.route ('/mypage/contact', methods=['GET', 'POST'])
def mypage2():
if request.method == 'GET':
print("We received GET")
return render_template("contact.html")
elif request.method == 'POST':
print("We received POST")
print(request.form)
return render_template("contact.html")
#http://127.0.0.1:5000/mypage/contact
if __name__ == "__main__":
app.run()
| true |
19861d470de1341c4aa278d8260cb4e0c6d6c393 | Python | Renl1001/DeepLearning | /demo/Text/data/prepare.py | UTF-8 | 2,328 | 3 | 3 | [] | no_license | import os
from shutil import copyfile
def copy_file(max_num):
"""复制部分数据到新的文件夹中,降低数据量
Arguments:
max_num {int} -- 每个类别的最大数量
"""
MAX_NUM = max_num
path = 'THUCNews'
dst_path = 'mini_News'
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
for item in os.listdir(path):
# print(item)
class_path = os.path.join(path, item)
dst_class_path = os.path.join(dst_path, item)
if not os.path.isdir(dst_class_path):
os.mkdir(dst_class_path)
if os.path.isdir(class_path):
num = 0
for file in os.listdir(class_path):
num += 1
if num > MAX_NUM:
break
file_path = os.path.join(class_path, file)
dst_file_path = os.path.join(dst_class_path, file)
copyfile(file_path, dst_file_path)
def _read_file(filename):
"""读取一个文件并转换为一行"""
with open(filename, 'r', encoding='utf-8') as f:
return f.read().replace('\n', '').replace('\t',
'').replace('\u3000', '')
def save_file(dirname):
"""
将多个文件的数据整合到三个txt文件中
Arguments:
dirname {str} -- 目录名
"""
f_train = open('cnews/cnews.train.txt', 'w', encoding='utf-8')
f_test = open('cnews/cnews.test.txt', 'w', encoding='utf-8')
f_val = open('cnews/cnews.val.txt', 'w', encoding='utf-8')
for category in os.listdir(dirname): # 分类目录
cat_dir = os.path.join(dirname, category)
if not os.path.isdir(cat_dir):
continue
files = os.listdir(cat_dir)
count = 0
for cur_file in files:
filename = os.path.join(cat_dir, cur_file)
content = _read_file(filename)
if count < 800:
f_train.write(category + '\t' + content + '\n')
elif count < 900:
f_test.write(category + '\t' + content + '\n')
else:
f_val.write(category + '\t' + content + '\n')
count += 1
print('Finished:', category)
f_train.close()
f_test.close()
f_val.close()
copy_file(1000)
save_file('mini_News')
| true |
bd7ea4423cabfec9edf4d3a0088e5cf7bedfc9e2 | Python | rchughye/tripping-octo-wight | /PEuler009.py | UTF-8 | 609 | 3.734375 | 4 | [] | no_license | # Euler Problem #9: Special Pythagorean triplet
# http://projecteuler.net/problem=9
# Q: There exists exactly one Pythagorean triplet for which a + b + c = 1000.
# Find the product abc.
# A: 31875000
# Iterate through all pairs of c and (a+b) which sum to 1000. Print answer when a^2 + b^2 = c^2
# Initialize
answer = 0
for c in range(1,1000):
for a in range(1,1001-c):
b = 1000 - a - c
a2 = a*a
b2 = b*b
c2 = c*c
if c2 == a2+b2:
answer = a*b*c
print answer
#print a
#print b
#print c
break
| true |
36e22ee6b37fc7f689080ceb620a73c7cd28bf58 | Python | crisp-k/python_crash_course | /chapter4/exercise13.py | UTF-8 | 269 | 3.640625 | 4 | [] | no_license | buffet_food = ("chicken", "steak", "oranges", "watermelon", "noods")
for food in buffet_food:
print(food)
# throws error
#buffet_food[0] = pork
print("\n")
buffet_food = ("pork", "steak", "apples", "watermelon", "noods")
for food in buffet_food:
print(food) | true |
1ed2cbc36aa1bd5a86097cf275140efd2bf19615 | Python | politecnicomodelopoo2018/farappa_guia1-2.0 | /Prueba_1_Persona.py | UTF-8 | 1,158 | 2.5625 | 3 | [] | no_license | from Prueba_1_Medidas import Medisonga
import datetime
class Persona(object):
nombre=None
apellido=None
fecha_nac=None
def __init__(self,nombre,apellido,fecha):
self.lista_medidas = []
self.nombre=nombre
self.apellido=apellido
self.fecha_nac=fecha
def agregarMedidas(self,Peso, Altura,Fecha):
a = Medisonga()
a.Ingresardatos(Peso, Altura,Fecha)
self.lista_medidas.append(a)
def VerSegunFedeFer(self,fecha):
for a in self.lista_medidas:
if fecha == a.Fecha:
return a.Peso , a.Altura
#return False
def Prom(self,año):
cont = None
sumP = None
sumA = None
for a in self.lista_medidas:
if año == a.Fecha.year:
sumP += a.Peso
sumA += a.Altura
cont += 1
return sumP/cont , sumA/cont
def Crecimiento(self,año1,año2):
a=Prom(año1)
b=Prom(año2)
if a > b:
return b-a
return a-b
def caca(self):
for a in self.lista_medidas:
return a.Peso
def
| true |
ac17c35017eb57d5d610f3ffb77d5bd2ff7880ef | Python | jun1116/aib_section3_project | /func/company_answer.py | UTF-8 | 486 | 2.65625 | 3 | [] | no_license | #회사의 답변에 대한 기능을 하는 func
from func.func_gangnamBike import gangnamBike
from kokoa.models.user_model import Company
#회사들의 답변을 처음 여기서 나눠서 진행합니다.
def company_answer(company_id, text=None):
if company_id==1:
return gangnamBike(text)
else:
cname = Company.query.get(company_id)
return f"죄송합니다. 현재 '{cname.companyname}' 에 대한 회사의 답변리스트 작성중입니다. "
| true |
4328560ba17c07ea392bf6bb4e5b06ed14be38f4 | Python | sknutsen/INFO132v2019 | /Oblig8/skn003_Oblig8.py | UTF-8 | 2,289 | 3.78125 | 4 | [] | no_license | import re
# Temainnlevering 8
# Sondre Knutsen (skn003)
# Oppgave 1
Bøker = {('Blackburn', 'Modal logic'): ('logikk', '2002'),
('Brook', 'Knowledge and Mind'): ('filosofi', '2000'),
('Dowek', 'Computation, proof, machine'): ('matematikk', '2015'),
('Dowek', 'Proofs and algorithms'): ('logikk', '2011'),
('Hein', 'Discrete mathematics'): ('matematikk', '2003'),
('Horstmann', 'Python for everyone'): ('programmering', '2016'),
('Lowe', 'A survey of metaphysics'): ('filosofi', '2002'),
('Severance', 'Java for somebody'): ('programmering', '1999'),
('Severance', 'Python for everybody'): ('programmering', '2016')}
# Oppgave 1a)
def skrivBøker():
for ((au, t), (ar, y)) in Bøker.items():
print(au, t, ar, y, sep=', ')
print('1a)')
skrivBøker()
# Oppgave 1b)
def leggTilBok():
auth = input('Forfatter: ')
title = input('Tittel: ')
area = input('Fagfelt: ')
year = input('Utgivelsesår: ')
if auth == '' and title == '' and area == '' and year == '':
return
Bøker[(auth, title)] = (area, year)
print('1b)')
leggTilBok()
skrivBøker()
# Oppgave 1c)
def finnForfatter():
auth = input('Forfatter: ')
if auth == '':
return
for ((au, t), (ar, y)) in Bøker.items():
if auth.lower() == au.lower():
print(t, y, sep=', ')
print('1c)')
finnForfatter()
# Oppgave 1d)
def finnFagområde():
area = input('Fagområde: ')
if area == '':
return
for ((au, t), (ar, y)) in Bøker.items():
if area.lower() == ar.lower():
print(au, t, y, sep=', ')
print('1d)')
finnFagområde()
# Oppgave 2
temp = \
'''Mandag var middeltemperaturen 9.87 grader
og tirsdag var den 11.0.
Neste dag var middeltemperaturen 7.987
mens torsdag var den 8.88. Fredag steg
temperaturen til 9.7 grader og i helgen
fikk vi 9.9 og 7.7 grader.'''
# Oppgave 2a)
def tMinst4(text):
print('2a)')
words = text.split()
søk = '^t'
for w in words:
if re.search(søk, w) and len(w) >= 4:
print(w)
# Oppgave 2b)
def ordFørTall(text):
print('2b)')
søk = '([a-z]+)\s[0-9]+'
result = re.findall(søk, text)
print(result)
tMinst4(temp)
ordFørTall(temp)
| true |
124f1fd010909c5cdd1fcb2ca3e1033fbbb800ea | Python | evanreyes/DojoAssignments | /python/week1/5_friday/01-multiplication_table.py | UTF-8 | 170 | 2.984375 | 3 | [] | no_license | print "x",
for a in range(1,13):
print a,
print ""
for i in range(0,13):
if i > 0:
print i, i*1, i*2, i*3, i*4, i*5, i*6, i*7, i*8, i*9, i*10, i*11, i*12
| true |
a1c6b6de5669b15a29bb839c8d71077cc6713f92 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_118/1504.py | UTF-8 | 1,103 | 3.25 | 3 | [] | no_license | #!/usr/bin/python
import readline
root = [1, 2, 3, 11, 22, 101, 111, 121, 202, 212, 1001, 1111, 2002, 10001, 10101, 10201, 11011, 11111, 11211, 20002, 20102, 100001, 101101, 110011, 111111, 200002, 1000001, 1001001, 1002001, 1010101, 1011101, 1012101, 1100011, 1101011, 1102011, 1110111, 1111111, 2000002, 2001002]
square = []
for i in root:
square.append(i*i)
def getIndex(n):
bit = 1 << 8
r = 0
while True:
bit >>= 1
if bit == 0:
break
t = r | bit
try:
if square[t] <= n:
r = t
except IndexError:
pass
return r
def getIndexExclude(n):
if (n <= 1):
return -1
bit = 1 << 8
r = 0
while True:
bit >>= 1
if bit == 0:
break
t = r | bit
try:
if square[t] < n:
r = t
except IndexError:
pass
return r
cases = int(raw_input())
for case in xrange(1, cases+1):
a, b = (int(x) for x in raw_input().split())
print "Case #%d: %d" % (case, getIndex(b) - getIndexExclude(a))
| true |
84f8c18f13763489d3ee7268caeb7acade32e44d | Python | Johnson-Lab-BYU/End-Bias | /CoverageRetriever.py | UTF-8 | 6,250 | 3.015625 | 3 | [] | no_license | import sys
import threading
#This program is designed to output average coverage ratio data at a single bp resolution at a
#user-defined distance from known fragment ends. The input for this program is output from binMassager.py.
#The output of this program is a single value, necessitating multiple program calls with parameter changes.
z = int(sys.argv[3]) #distance from cutsite
output = str(sys.argv[4]) #name of output
f = open(output, 'w')
sites1 = [] #empty lists to be filled with cutsites by chromosome, argument 1
sites2 = []
sites3 = []
sites4 = []
sites5 = []
sitesX = []
coverage1 = {} #empty dictionaries to be filled with bamCompare data, argument 2
coverage2 = {}
coverage3 = {}
coverage4 = {}
coverage5 = {}
coverageX = {}
prime1 = [] #locations upstream or downstream of cutsite file using variable z, argument 3
prime2 = []
prime3 = []
prime4 = []
prime5 = []
primeX = []
data1 = [] #coverage values based on distance from cutsite
data2 = []
data3 = []
data4 = []
data5 = []
dataX = []
#separate cutsite file into lists of each chromosome
with open(sys.argv[1], 'rt') as x:
for line in x:
if line.startswith('chr1') or line.startswith('chrI'):
sites1.append(line)
elif line.startswith('chr2') or line.startswith('chrII'):
sites2.append(line)
elif line.startswith('chr3') or line.startswith('chrIII'):
sites3.append(line)
elif line.startswith('chr4') or line.startswith('chrIV'):
sites4.append(line)
elif line.startswith('chr5') or line.startswith('chrV'):
sites5.append(line)
elif line.startswith('chrX'):
sitesX.append(line)
else:
continue
#separate bamCompare file into dictionaries of each chromosome
with open(sys.argv[2], 'rt') as y:
for line in y:
if line.startswith('chr1') or line.startswith('chrI'):
line = line.split()
coverage1[line[2]] = line[3]
elif line.startswith('chr2') or line.startswith('chrII'):
line = line.split()
coverage2[line[2]] = line[3]
elif line.startswith('chr3') or line.startswith('chrIII'):
line = line.split()
coverage3[line[2]] = line[3]
elif line.startswith('chr4') or line.startswith('chrIV'):
line = line.split()
coverage4[line[2]] = line[3]
elif line.startswith('chr5') or line.startswith('chrV'):
line = line.split()
coverage5[line[2]] = line[3]
elif line.startswith('chrX'):
line = line.split()
coverageX[line[2]] = line[3]
else:
continue
#create function that uses location in cutsite list to use as a key to call the corresponding location in the bamCompare dictionary and return the associated value
#each thread represents one chromosome
def thread1():
for index in sites1:
element = index.split()
threePrime = int(element[2])-int(z)
fivePrime = int(element[1])+int(z)
prime1.append(fivePrime)
prime1.append(threePrime)
for index in prime1:
for key,value in coverage1.items():
if str(index) == str(key):
data1.append(float(value))
else:
continue
def thread2():
for index in sites2:
element = index.split()
threePrime = int(element[2])-int(z)
fivePrime = int(element[1])+int(z)
prime2.append(fivePrime)
prime2.append(threePrime)
for index in prime2:
for key,value in coverage2.items():
if str(index) == str(key):
data2.append(float(value))
else:
continue
def thread3():
for index in sites3:
element = index.split()
threePrime = int(element[2])-int(z)
fivePrime = int(element[1])+int(z)
prime3.append(fivePrime)
prime3.append(threePrime)
for index in prime3:
for key,value in coverage3.items():
if str(index) == str(key):
data3.append(float(value))
else:
continue
def thread4():
for index in sites4:
element = index.split()
threePrime = int(element[2])-int(z)
fivePrime = int(element[1])+int(z)
prime4.append(fivePrime)
prime4.append(threePrime)
for index in prime4:
for key,value in coverage4.items():
if str(index) == str(key):
data4.append(float(value))
else:
continue
def thread5():
for index in sites5:
element = index.split()
threePrime = int(element[2])-int(z)
fivePrime = int(element[1])+int(z)
prime5.append(fivePrime)
prime5.append(threePrime)
for index in prime5:
for key,value in coverage5.items():
if str(index) == str(key):
data5.append(float(value))
else:
continue
def thread6():
for index in sitesX:
element = index.split()
threePrime = int(element[2])-int(z)
fivePrime = int(element[1])+int(z)
primeX.append(fivePrime)
primeX.append(threePrime)
for index in primeX:
for key,value in coverageX.items():
if str(index) == str(key):
dataX.append(float(value))
else:
continue
#call each thread and then make sure each thread is done before continuing
if 0 == 0:
t1=threading.Thread(target=thread1)
t2=threading.Thread(target=thread2)
t3=threading.Thread(target=thread3)
t4=threading.Thread(target=thread4)
t5=threading.Thread(target=thread5)
t6=threading.Thread(target=thread6)
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t6.start()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
t6.join()
#massage format for final output to be easy to read
final = data1 + data2 + data3 + data4 + data5 + dataX
ave = sum(final)/len(final)
f.write(str(ave))
f.close()
| true |
119d101020b01570ceef54fb98465dd0b10418ba | Python | qzq2514/DNNCode | /Classification/DenseNet/nets/DenseNetForDigit.py | UTF-8 | 7,157 | 2.578125 | 3 | [] | no_license | import collections
import tensorflow as tf
import tensorflow.contrib.slim as slim
class DenseNetForDigit(object):
def __init__(self, is_training, num_classes,growth_rate,net_depth):
self.num_classes = num_classes
self._is_training = is_training
self.growth_rate=growth_rate #在DenseNet Block内的卷积层的宽度(通道数)
self.conv_num=int((net_depth-4)/3) #共3个DenseNet Block,每个DenseNet Block内的卷积次数
#(去掉开始的一次卷积和结尾的bn、全局池化和全连接)
def preprocess(self, inputs):
# ResNet暂不需要做输入预处理
preprocessed_inputs = tf.to_float(inputs)
preprocessed_inputs = tf.subtract(preprocessed_inputs, 128.0)
preprocessed_inputs = tf.div(preprocessed_inputs, 128.0)
return preprocessed_inputs
# 每经过一个卷积操作,均把卷积结果和未卷积之前的集合进行合(论文的核心)
# 以供后面的卷积能够直接连接到之前的卷积结果,起到Densely connection的效果
# 其中原文中在每个Dense Unit的3x3卷积之前还有一个Bottleneck层,
# 会将目前为止该Dense block的合并后的特征图固定到4xgrowth_rate(此时网络称为DenseNet-B)再送入3x3卷积
# 这样保证不至于依次将之前的feature map合并后产生的特征图通道数过多
def add_layer(self,layer_collection,name):
#根据论文第三章的Composite function的一节,在本层依次进行bn,relu和3x3conv
#将bn和relu放在conv之前也符合ResNetV2中前置激活的想法,保证在合并(tf.concat)之后
#不再进行bn和激活
with tf.variable_scope(name) as sc:
cur_bn=slim.batch_norm(layer_collection,scope="bn")
cur_conv=slim.convolution2d(cur_bn,num_outputs=self.growth_rate,
kernel_size=3,stride=1)
layer_collection=tf.concat([cur_conv,layer_collection],axis=-1)
return layer_collection
# 每个DenseNet Block之间的转换层,仅仅用于下采样,且最后不会添加到layer_collection中
def add_transition(self,layer_collection,name):
layer_collection_channel=layer_collection.get_shape().as_list()[-1]
with tf.variable_scope(name) as sc:
cur_bn=slim.batch_norm(layer_collection,scope="bn")
# 在原文中transition层中的1x1卷积还有一个参数,即压缩参数,表示输出通道数变为输入特征图通道数的倍数a(0-1之间)
# 即这里变为:num_outputs=a*layer_collection_channel(此时称为DenseNet-C)
# 这里我们transition层中的1x1卷积不会改变通道数,注意在conv和pool之间要加一个relu,
# 因为在参数空间内,convolution2d是被设置为不跟激活函数的
# 4xgrowth_rate的过渡层和带压缩参数的过渡层都有的叫做DenseNet-BC
# 这里复现仅仅复现了DenseNet的核心:密集连接,去掉了Bottleneck层压缩层,这样在复现的难度上大大减少:无论在哪个Dense Block
# 的哪一层,只需要将之间所有Dense Block的所有层全部添加进去卷积就行
cur_conv=slim.convolution2d(cur_bn,num_outputs=layer_collection_channel,activation_fn=tf.nn.relu,
kernel_size=1,stride=1,scope="conv")
cuv_pool=slim.avg_pool2d(cur_conv,kernel_size=2,stride=2,scope="pool")
return cuv_pool
#inputs:[batch_size,28,28,3]
def inference(self, inputs):
print("Using DenseNet L=40,K=12.....")
with slim.arg_scope(self.DenseNet_arg_scope(is_training=self._is_training)):
layer_collection = slim.convolution2d(inputs,num_outputs=16,kernel_size=3,stride=1,padding="SAME")
with tf.variable_scope("block1") as sc:
for conv_id in range(self.conv_num):
layer_collection=self.add_layer(layer_collection, "layer" + str(conv_id))
layer_collection=self.add_transition(layer_collection,"transition")
print("layer_collection:", layer_collection)
with tf.variable_scope("block2") as sc:
for conv_id in range(self.conv_num):
layer_collection=self.add_layer(layer_collection, "layer" + str(conv_id))
layer_collection=self.add_transition(layer_collection,"transition")
print("layer_collection:", layer_collection)
#最后一个block后面不用添加变换层
with tf.variable_scope("block3") as sc:
for conv_id in range(self.conv_num):
layer_collection=self.add_layer(layer_collection, "layer" + str(conv_id))
# layer_collection = self.add_transition(layer_collection, "transition")
layer_collection=slim.convolution2d(layer_collection,num_outputs=self.growth_rate*2,kernel_size=3,
stride=2,activation_fn=tf.nn.relu,scope="transition_conv")
print("layer_collection:",layer_collection)
layer_collection=slim.batch_norm(layer_collection,scope="bn_last")
net_global_pool = tf.reduce_mean(layer_collection, [1, 2],name="global_pool",keep_dims=True)
net = slim.convolution2d(net_global_pool, num_outputs=self.num_classes,
kernel_size=1, activation_fn=None, normalizer_fn=None, scope="full_conv")
logits = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
print("logits:",logits)
return logits
def postprocess(self,logits):
softmax=tf.nn.softmax(logits)
classes=tf.cast(tf.argmax(softmax,axis=1),tf.int32)
return softmax,classes
def loss(self,logits,labels):
softmax_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits+1e-8,labels=labels),name="softmax_loss")
tf.add_to_collection("Loss",softmax_loss)
loss_all=tf.add_n(tf.get_collection("Loss"),name="total_loss")
return loss_all
def DenseNet_arg_scope(self,is_training,weight_decay=0.0001,batch_norm_decay=0.90,
batch_norm_epsilon=1e-5,batch_norm_scale=True):
batch_norm_params={
'is_training':is_training,
'decay':batch_norm_decay,
'epsilon':batch_norm_epsilon,
'scale':batch_norm_scale,
"activation_fn":tf.nn.relu
# 'updates_collections:':tf.GraphKeys.UPDATE_OPS
}
#DenseNet借鉴resNetV2,采用前置激活,不在卷积后进行bn和relu
with slim.arg_scope(
[slim.convolution2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=None):
with slim.arg_scope([slim.batch_norm],**batch_norm_params) :
with slim.arg_scope([slim.avg_pool2d],padding="SAME") as arg_sc:
return arg_sc | true |
6a1af4d58a47ae8d21c0dad7638805202bfdae7e | Python | aaronbolyard-school/kvlc | /main.py | UTF-8 | 1,567 | 3.640625 | 4 | [] | no_license | import kvlc.model.calculator as calculator
def get_integer(prompt):
while True:
print(prompt + " ", end="")
value = input()
try:
return int(value)
except:
print("Please enter an integer.")
OPERATION_ACTION_REPEAT = 1
OPERATION_ACTION_MAIN_MENU = 2
def should_repeat():
while True:
print("1. Repeat")
print("2. Main Menu")
action = get_integer("Enter a number:")
if action == OPERATION_ACTION_REPEAT:
return True
elif action == OPERATION_ACTION_MAIN_MENU:
return False
else:
print("Please select a valid option.")
def perform_operation(operation, name):
while True:
print()
print(name)
lhs = get_integer("Enter a number:")
rhs = get_integer("Enter a number:")
result = operation(lhs, rhs)
if result != False:
print(lhs, "+", rhs, "=", int(result))
else:
print("Bad input. Did you divide by zero?")
if not should_repeat():
break
ACTIONS = {
1: calculator.add,
2: calculator.sub,
3: calculator.div,
4: calculator.mul,
5: False
}
ACTION_NAMES = {
1: "Add",
2: "Subtract",
3: "Divide",
4: "Multiply"
}
def main_menu():
action = None
while action != False:
print('Welcome to the calculator program.')
print('1. Add')
print('2. Subtract')
print('3. Divide')
print('4. Multiply')
print('5. Exit')
index = get_integer("Enter a number:")
action = ACTIONS.get(index, None)
if action == None:
print("Please select a valid option.")
elif action != False:
perform_operation(action, ACTION_NAMES[index])
print("Goodbye.")
if __name__ == "__main__":
main_menu()
| true |
d85827df53d6c382f45a9628b06d5a314a744963 | Python | cffppa/appium_autotest | /mooc_project/pages/classify_page.py | UTF-8 | 959 | 2.703125 | 3 | [] | no_license | from pages.base_page import BasePage
import os
class ClassifyPage(BasePage):
def __init__(self,driver):
super(ClassifyPage,self).__init__(driver)
def lists(self):
classify_list=self.by_id('cn.com.open.mooc:id/rgClassify')
lists=classify_list.find_elements_by_class_name('android.widget.RadioButton')
print(lists)
return lists
'''
#这段函数功能是把对应的每个分类的名称写到文件里去。运行通过了,但是目标文件没有数据写入。
def get_classify_data(self):
path='F:\\code_new\\appium_autotest\\mooc_project\\data'
dirs=os.listdir(path)
data=open('classify_data.py','a')
print(data.name)
num1=len(self.lists())
print(num1)
for i in range(0,num1):
self.lists()[i].click()
titles=self.driver.find_elements_by_id('cn.com.open.mooc:id/tvTitle')
num2=len(titles)
for j in range(0,num2):
title=titles[j].get_attribute('text')
data.write(title+" ")
data.write('\n')
'''
| true |
60b223816690bf27b9eed3493b49e09886ef1698 | Python | hritesh-sonawane/pY1h0n | /Linear_DS/Linked_List/two_ptr_linked_list.py | UTF-8 | 1,825 | 4 | 4 | [] | no_license | from linked_list import LinkedList
def nth_last_node(linked_list, n):
current = None
tail_seeker = linked_list.head_node
count = 1
while tail_seeker:
tail_seeker = tail_seeker.get_next_node()
count += 1
if count >= n + 2:
if current is None:
current = linked_list.head_node
else:
current = current.get_next_node()
return current
def generate_test_linked_list():
ll = LinkedList()
for i in range(20, 0, -1):
ll.insert_beginning(i)
return ll
test_list = generate_test_linked_list()
print(test_list.stringify_list())
nth_last = nth_last_node(test_list, 4)
print(nth_last.value)
# egs: obtain the 2nd to last node of any linked list.
# T -> tail pointer
# N -> nth_last pointer
# Starting State
# count = 1
# T
# 1 2 3 4 5
# First Tick
# count = 2
# T
# 1 2 3 4 5
# Second Tick
# count = 3
# T
# N
# 1 2 3 4 5
# Third Tick
# count = 4
# T
# N
# 1 2 3 4 5
# Fourth Tick
# count = 5
# T
# N
# 1 2 3 4 5
# Final Tick
# count = 6
# T
# N
# 1 2 3 4 5 None
# Time Complexity: O(n) : We must iterate through the entire list once
# Space Complexity: O(1) : We always use only three variables no matter what size the linked list is: two pointers and a counter
# Pointers at different speeds
# egs: Find middle element of linked list
# 2 pointers: fast and slow. For every 2 steps fast takes, slow takes 1
# def find_middle(linked_list):
# count = 0
# fast = linked_list.head_node
# slow = linked_list.head_node
# while fast:
# fast = fast.get_next_node()
# if count % 2 != 0:
# slow = slow.get_next_node()
# count += 1
# return slow | true |
71d2db478e2976678a2bc9497a28bd37583cc9e8 | Python | dada00321/NTUST_SIS_LineBot | /module/epidemic_info/ntu_system_epidemic_info_assistant.py | UTF-8 | 17,667 | 2.828125 | 3 | [
"MIT"
] | permissive | """
ntu_system_epidemic_info_assistant
臺灣大學系統-三校防疫資訊小助手
"""
from selenium import webdriver
#from config_reader import get_config
#from modules.basic_scraping_module import get_response
from module.epidemic_info.config_reader import get_config
from module.epidemic_info.modules.basic_scraping_module import get_response
from os.path import exists
from os import mkdir, listdir
from datetime import datetime
import re
class Epidemic_info_crawler():
def crawl_epidemic_news(self, epidemic_news_links, school_abbrs, school_abbr):
# 可由外部呼叫
self.is_avaiable = False
self.epidemic_news_link = None
if school_abbr in school_abbrs:
self.school_abbr = school_abbr
tmp = epidemic_news_links[school_abbr]
if tmp is not None and tmp.strip() != "":
self.epidemic_news_link = tmp
self.is_avaiable = True
else:
print("epidemic_news_links 連結無效")
#--------------------
content = self.selenium_crawl_epidemic_news()
#content = self.requests_crawl_epidemic_news()
if content is None:
print("[WARNING] Epidemic_info_crawler 未抓取到任何內容")
else:
print("[INFO] 已抓取【防疫快訊】相關內容")
return content
else:
print("[WARNING] Epidemic_info_crawler 輸入資訊錯誤\n"
"crawl_epidemic_news() 參數 `school_abbr` 不在 參數 `school_abbrs` 之中")
return None
# --------------------
def selenium_crawl_epidemic_news(self):
# 抓取臺灣大學系統(三校)防疫快訊 | i.e., 各校網站較重要的即時防疫訊息 | ※非防疫專區的各式訊息
# 為求開發時間短、xpath 較通用的效益,先寫 selenium 而非 requests 或其它套件進行爬蟲的版本
if self.is_avaiable:
driver = self.get_web_driver(self.epidemic_news_link)
content = None
if self.school_abbr == "NTU":
content = self.selenium_crawl_epidemic_news_NTU(driver)
elif self.school_abbr == "NTNU":
content = self.selenium_crawl_epidemic_news_NTNU(driver)
elif self.school_abbr == "NTUST":
content = self.selenium_crawl_epidemic_news_NTUST(driver)
driver.quit()
return content
def get_web_driver(self, url):
driver = webdriver.Chrome("D:/geckodriver/chromedriver.exe")
driver.implicitly_wait(10)
driver.get(url)
return driver
def selenium_crawl_epidemic_news_NTU(self, driver):
content = dict()
# 奇數列:
odd_xpath = "//tr[@style='background-color:#EFF3FB;']"
# 偶數列:
even_xpath = "//tr[@style='background-color:White;']"
# 1) 抓取所有 [日期] 欄位的 content
# //tr[@style='background-color:#EFF3FB;']//td[contains(text(),'2021')]
xpaths = [f"{date_xpath}//td[contains(text(),'{datetime.now().year}')]"
for date_xpath in (odd_xpath, even_xpath)]
'''
date_list = [tag.text.strip()
for xpath in xpaths
for tag in driver.find_elements_by_xpath(xpath)]
'''
tmp_list = [[],[]]
count = 1
for xpath in xpaths:
for tag in driver.find_elements_by_xpath(xpath):
tmp = tag.text.strip()
if count % 2 == 1:
count += 1
tmp_list[0].append(tmp)
else:
tmp_list[1].append(tmp)
date_list = list()
len_odd, len_even = len(tmp_list[0]), len(tmp_list[1])
for i in range(max(len_odd, len_even)):
if i < len_odd:
date_list.append(tmp_list[0][i])
if i < len_even:
date_list.append(tmp_list[1][i])
print(date_list)
content.setdefault("日期", date_list)
# 2) 抓取所有 [標題] 欄位的 content
#articleLink_xpath = "//td[contains(text(),'2021/5/20')]/../td[1]"
#header_xpath = "//td[contains(text(),'2021/5/20')]/../td[2]"
header_list = list()
articleLink_list = list()
header_xpath_pattern = "//td[contains(text(),'{}')]/../td[2]"
articleLink_xpath_pattern = "//td[contains(text(),'{}')]/../td[1]"
test = date_list[:1]
for excution_count0, date in enumerate(test):
header_xpath = header_xpath_pattern.format(date)
articleLink_xpath = articleLink_xpath_pattern.format(date)
# (2)
tmp_header = driver.find_element_by_xpath(header_xpath).text
if tmp_header is not None and str(tmp_header).strip()!='':
# (deprecated) 原始 [標題] 同時有中英文 (較另二校內容較不易同步)
#header_list.append(tmp_header)
# [標題] 只保留中文部分
if re.compile(r'[a-zA-Z]').search(tmp_header) is not None:
header_list.append(tmp_header[:tmp_header.index(re.compile(r'[a-zA-Z]').search(tmp_header).group())].strip())
else:
header_list.append(tmp_header)
# (3)
'''tag_articleLink = driver.find_element_by_xpath(articleLink_xpath)
tag_articleLink.click()'''
driver.find_element_by_xpath(articleLink_xpath).click()
print(f"[INFO] 正在抓取第 {1 + excution_count0} 篇防疫快訊")
#tag.click()
col_1_xpaths = "//td[@class='auto-style2']"
col_2_xpaths = "//td[@class='auto-style1']"
col1 = [tag.text.strip()
for tag in driver.find_elements_by_xpath(col_1_xpaths)]
col2 = [tag.text.strip()
for tag in driver.find_elements_by_xpath(col_2_xpaths)]
content_msg = '\n'.join([f"{k}: {v}"
for k, v in zip(col1, col2)
if v is not None and v.strip()!=''])
articleLink_list.append(content_msg)
# 回上一頁
driver.find_element_by_xpath("//input[@type='submit']").click()
print("header_list:", header_list)
print("articleLink_list:", articleLink_list)
# 3) 抓取所有 [文章連結] 欄位的 content
#articleLink_xpath_pattern = "//td[contains(text(),'{}')]/../td[1]"
"""
# 2) 抓取所有 [標題] 欄位的 content
#//tr[@style='background-color:#EFF3FB;']//td[contains(text(),'2021')]/../td[2]
xpaths = [f"{date_xpath}//td[contains(text(),'{datetime.now().year}')]/../td[2]"
for date_xpath in (odd_xpath, even_xpath)]
header_list = [tag.text
for xpath in xpaths
for tag in driver.find_elements_by_xpath(xpath)]
"""
'''
header_list = [s[:s.index(re.compile(r'[a-zA-Z]').search(s).group())].strip()
if re.compile(r'[a-zA-Z]').search(s) is not None
else s
for s in header_list]
'''
#print(header_list)
# 3) 抓取所有 [文章連結] 欄位的 content
"""xpaths = [f"{date_xpath}//td[contains(text(),'{datetime.now().year}')]/../td[1]/a"
for date_xpath in date_xpaths]
xpaths = [f"{date_xpath}//td[contains(text(),'{datetime.now().year}')]/../td[1]/a"
for date_xpath in (odd_xpath, even_xpath)]
excution_times = 0
articleLink_tags = [tag
for xpath in xpaths
for tag in driver.find_elements_by_xpath(xpath)]
"""
"""
while True:
#for tag in articleLink_tags:
excution_limit = len(articleLink_tags)
if excution_times == excution_limit:
print(f"[INFO] 防疫快訊抓取完畢!共執行 {excution_limit} 次")
break
else:
articleLink_tags[excution_times].click()
excution_times += 1
print(f"[INFO] 正在抓取第 {excution_times} 篇防疫快訊")
#tag.click()
col_1_xpaths = "//td[@class='auto-style2']"
col_2_xpaths = "//td[@class='auto-style1']"
col1 = [tag.text.strip()
for tag in driver.find_elements_by_xpath(col_1_xpaths)]
col2 = [tag.text.strip()
for tag in driver.find_elements_by_xpath(col_2_xpaths)]
content_msg = '\n'.join([f"{k}: {v}"
for k, v in zip(col1, col2)
if v is not None and v.strip()!=''])
print(content_msg)
print("okk")
tag = driver.find_element_by_xpath("//input[@type='submit']")
tag.click() # 回上一頁
"""
'''
if len(articleLink_list) == 0:
print("[WARNING] selenium_crawl_epidemic_news_NTUST()"+\
" 無法抓取 [防疫快訊-文章連結] 故不往下執行")
return None
if len(date_list) == len(date_list) == len(date_list):
content = {"日期": date_list,
"標題": header_list,
"文章連結": articleLink_list}
return content
else:
print("[WARNING] selenium_crawl_epidemic_news_NTUST()"+\
" [防疫快訊] 底下資訊皆可抓取,但數量不相符,故不回傳資訊")
return None
'''
return {"test":"1234"}
def selenium_crawl_epidemic_news_NTNU(self, driver):
content = dict()
# 1) 抓取所有 [日期] 欄位的 content
# 2) 抓取所有 [標題] 欄位的 content
# 3) 抓取所有 [資訊連結] 欄位的 content # 資訊連結 | 原: 圖片連結
# 4) 下載、儲存 所有 [圖片] content
# //a[@class='thumbnail']
xpath = "//a[@class='thumbnail']"
tags = driver.find_elements_by_xpath(xpath)
date_list = list()
header_list = list()
imagePath_list = list()
for tag in tags[:3]:
tmp = tag.get_attribute("data-title").split(' ')
date_list.append(tmp[0])
header_list.append(tmp[-1])
src = tag.get_attribute("data-image")
image_link = self.epidemic_news_link[:8] + self.epidemic_news_link[8:].split('/')[0] + src
imagePath_list.append(image_link)
"""
r = get_response(image_link)
if r is not None:
# Line Bot
base_image_dir = "module/epidemic_info/media/"
# exe:
# ...
# test:
#base_image_dir = "./media/"
if not exists(base_image_dir):
mkdir(base_image_dir)
current_image_ID = len(listdir(base_image_dir)) + 1
current_image_extension = ".jpg"
current_image_fullpath = f"{base_image_dir}{current_image_ID}{current_image_extension}"
imagePath_list.append(current_image_fullpath)
try:
with open(current_image_fullpath, "wb") as fp:
fp.write(r.content)
except:
print("[WARNING] 無法下載、儲存 [圖片] content")
else:
print("[WARNING] selenium_crawl_epidemic_news_NTNU()"+\
"無法抓取 [防疫快訊] 內容")
"""
if len(date_list) == len(date_list) == len(date_list):
'''
content = {"日期": date_list,
"標題": header_list,
"圖片路徑": imagePath_list}
'''
content = {"日期": date_list,
"標題": header_list,
"資訊連結": imagePath_list} # 資訊連結 | 原: 圖片連結
return content
else:
print("[WARNING] selenium_crawl_epidemic_news_NTUST()"+\
" [防疫快訊] 底下資訊皆可抓取,但數量不相符,故不回傳資訊")
return None
def selenium_crawl_epidemic_news_NTUST(self, driver):
content = dict()
# //h2[contains(text(),'本校最新訊息')]
# //h2[contains(text(),'本校最新訊息')]/../../section
# //h2[contains(text(),'本校最新訊息')]/../../section/table
''' 抓取 [有關防疫快訊的 table] 裡面的 [日期], [標題], [文章連結] '''
# 1) 抓取所有 [日期] 欄位的 content
# //h2[contains(text(),'本校最新訊息')]/../../section/table//td[@data-th='日期']/div
xpath = "//h2[contains(text(),'本校最新訊息')]/../../section/table//td[@data-th='日期']/div"
date_list = [tag.text.strip()
for tag in driver.find_elements_by_xpath(xpath)]
#print(*(e for e in date_list))
if len(date_list) == 0:
print("[WARNING] selenium_crawl_epidemic_news_NTUST()"+\
" 無法抓取 [防疫快訊-日期] 故不往下執行")
return None
# 2) 抓取所有 [標題] 欄位的 content
# //h2[contains(text(),'本校最新訊息')]/../../section/table//td[@data-th='標題']//a
#import time
#time.sleep(5)
xpath = "//h2[contains(text(),'本校最新訊息')]/../../section/table//td[@data-th='標題']//a"
headers = driver.find_elements_by_xpath(xpath)
header_list = [tag.text.strip()
for tag in headers]
#print(header_list)
if len(header_list) == 0:
print("[WARNING] selenium_crawl_epidemic_news_NTUST()"+\
" 無法抓取 [防疫快訊-標題] 故不往下執行")
return None
# 3) 抓取所有 [文章連結] 欄位的 content
# ※ 沿用 headers ([文章連結] 與 [標題] 的 content 為同一網頁元素)
articleLink_list = [tag.get_attribute("href")
for tag in headers]
#print(articleLink_list)
if len(articleLink_list) == 0:
print("[WARNING] selenium_crawl_epidemic_news_NTUST()"+\
" 無法抓取 [防疫快訊-文章連結] 故不往下執行")
return None
if len(date_list) == len(date_list) == len(date_list):
content = {"日期": date_list,
"標題": header_list,
"文章連結": articleLink_list}
return content
else:
print("[WARNING] selenium_crawl_epidemic_news_NTUST()"+\
" [防疫快訊] 底下資訊皆可抓取,但數量不相符,故不回傳資訊")
return None
# --------------------
def requests_crawl_epidemic_news(self):
# 以後有空再寫,先 pass
pass
class Epidemic_info_assistant():
def __init__(self):
self.epidemic_news_links = None
# --------------------
self.school_abbrs = ("NTU", "NTNU", "NTUST")
self.read_config()
def read_config(self):
tmp_info = get_config()
if tmp_info is not None:
#NTU_SYSTEM_SCHOOLS_NUM = 3
#print(*(f"{tmp_info[i]}" for i in range(0, NTU_SYSTEM_SCHOOLS_NUM)), sep='\n'*2)
epidemic_news_links = {self.school_abbrs[0]: tmp_info[0],
self.school_abbrs[1]: tmp_info[1],
self.school_abbrs[2]: tmp_info[2]}
#print(epidemic_news_links["NTUST"])
self.epidemic_news_links = epidemic_news_links
def crawl_ntu_system_news(self, input_text):
# 利用 Epidemic_info_crawler 抓取臺灣大學系統(三校)防疫快訊
crawler = Epidemic_info_crawler()
# 取得爬蟲內容
content = crawler.crawl_epidemic_news(self.epidemic_news_links, self.school_abbrs, input_text)
if input_text in ('NTU', 'NTUST'):
fields = ["日期", "標題", "文章連結"]
#print(*(f'content["{field}"]: {content[field]}' for field in fields), sep='\n'*2)
#print(*(f"|{k} --- {v}|" for k, v in content.items()), sep='\n')
elif input_text == 'NTNU':
#fields = ["日期", "標題", "圖片路徑"] # (暫緩)
fields = ["日期", "標題", "資訊連結"] # 資訊連結 | 原: 圖片連結
msg = ""
article_num = len(content[fields[0]])
for i in range(article_num):
for field in fields:
msg += f"{field}: {content[field][i]}\n"
msg += '\n'
return msg
'''
if __name__ == "__main__":
#input_text = "NTU" # 使用者輸入資訊
#input_text = "NTNU" # 使用者輸入資訊
input_text = "NTUST" # 使用者輸入資訊
assistant = Epidemic_info_assistant()
content = assistant.crawl_ntu_system_news(input_text) # 抓取防疫快訊
print(content)
print(type(content))
''' | true |
b0e136cb1e03067a57db2d3f317bf2251d829720 | Python | khaledfouda/Porto-Seguro-Kaggle-competetion- | /py/log.py | UTF-8 | 1,122 | 2.5625 | 3 | [] | no_license | #! /usr/bin/env python
import logging
log = None
handler = None
LOG_PATH = '../data/log/'
#---------------------------------------
def init(filename):
global log, handler
if log and handler : return "Error: log is already initialized. Try log.close() first."
if type(filename) != str or filename == '' : return "Error: wrong filename"
log = logging.getLogger();
log.setLevel(logging.INFO)
filename = LOG_PATH + str(filename)
handler = logging.FileHandler(filename)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m-%d %H:%M'
)
handler.setFormatter(formatter)
log.addHandler(handler)
return
#------------------------------------
def msg(message):
if not message : return "Error: empty message"
if not log or not handler : return "Error: log is not initialized"
log.info(message);
return
#------------------------------------
def close():
global log,handler
if not log or not handler : return "Error: log is already closed."
log.removeHandler(handler)
log, handler = None, None
return
#----------------------------------------
| true |
fff4ebb2e009ac24db2d1a7f65fa74ecf300b31b | Python | andrewtarzia/stk | /src/stk/_internal/ea/fitness_normalizers/multiply.py | UTF-8 | 5,375 | 3.453125 | 3 | [
"MIT"
] | permissive | import typing
from collections.abc import Callable, Iterable
from typing import Any
import numpy as np
from .fitness_normalizer import FitnessNormalizer
T = typing.TypeVar("T")
class Multiply(FitnessNormalizer[T]):
"""
Multiplies the fitness values by some coefficient.
Examples:
*Multiplying Fitness Values by a Set of Coefficients*
In this example, assume that each fitness value consists of a
:class:`tuple` of numbers, each representing a different property
of the molecule, and each contributing to the final fitness value.
The properties can be anything, such as energy, number of atoms
or diameter.
If your final fitness value depends on the combination of these
properties, you will probably first want to scale them with
:class:`.DivideByMean`. Once this is done, you may want to
multiply each property by some coefficient, which reflects its
relative importance to the final fitness value. For example
if you multiply the value of one property by ``1`` and another
by ``2``, the second will contribute twice as much to the
final fitness value, assuming that you get the final fitness value
by using the :class:`.Sum` normalizer after :class:`.Multiply`.
Giving a concrete example
.. testcode:: multiplying-fitness-values-by-a-set-of-coefficients
import stk
import numpy as np
building_block = stk.BuildingBlock('BrCCBr', stk.BromoFactory())
record = stk.MoleculeRecord(
topology_graph=stk.polymer.Linear(
building_blocks=[building_block],
repeating_unit='A',
num_repeating_units=2,
),
)
fitness_values = {
record: (1, 1, 1),
}
normalizer = stk.Multiply((1, 2, 3))
normalized_fitness_values = normalizer.normalize(fitness_values)
assert np.all(np.equal(
normalized_fitness_values[record],
(1, 2, 3),
))
*Selectively Normalizing Fitness Values*
Sometimes, you only want to normalize some members of a population,
for example if some do not have an assigned fitness value,
because the fitness calculation failed for whatever reason.
You can use the `filter` parameter to exclude records from the
normalization
.. testcode:: selectively-normalizing-fitness-values
import stk
import numpy as np
building_block = stk.BuildingBlock('BrCCBr', stk.BromoFactory())
record1 = stk.MoleculeRecord(
topology_graph=stk.polymer.Linear(
building_blocks=[building_block],
repeating_unit='A',
num_repeating_units=2,
),
)
record2 = stk.MoleculeRecord(
topology_graph=stk.polymer.Linear(
building_blocks=[building_block],
repeating_unit='A',
num_repeating_units=2,
),
)
fitness_values = {
record1: (1, 1, 1),
record2: None,
}
normalizer = stk.Multiply(
coefficient=(1, 2, 3),
# Only normalize values which are not None.
filter=lambda fitness_values, record:
fitness_values[record] is not None,
)
normalized_fitness_values = normalizer.normalize(fitness_values)
assert np.all(np.equal(
normalized_fitness_values[record1],
(1, 2, 3),
))
assert normalized_fitness_values[record2] is None
"""
def __init__(
self,
coefficient: float | Iterable[float],
filter: Callable[
[dict[T, Any], T], bool
] = lambda fitness_values, record: True,
) -> None:
"""
Parameters:
coefficient (float | list[float]):
The coefficients each fitness value is multiplied by. Can
be a single number or multiple numbers, depending on the
form of the fitness value.
filter:
A function which returns ``True`` or ``False``. Only
molecules which return ``True`` will have fitness values
normalized. By default, all molecules will have fitness
values normalized.
The instance passed to the `fitness_values` argument of
:meth:`.normalize` is passed as the first argument, while
the second argument will be passed every
:class:`.MoleculeRecord` in it, one at a time.
"""
if not isinstance(coefficient, int | float):
coefficient = tuple(coefficient)
self._coefficient = coefficient
self._filter = filter
def normalize(self, fitness_values: dict[T, Any]) -> dict[T, Any]:
return {
record: np.multiply(self._coefficient, fitness_value)
if self._filter(fitness_values, record)
else fitness_value
for record, fitness_value in fitness_values.items()
}
| true |
203de36c568bb08e8db78d92ff016baaa6391aa9 | Python | kbhat1234/Python-Project | /python/time1.py | UTF-8 | 822 | 3.140625 | 3 | [] | no_license | import time
def time_func():
localtime = time.localtime(time.time())
print time.time()
print localtime
print time.clock()
print time.ctime()
print time.altzone
localtime = time.asctime( time.localtime(time.time()) )
print localtime
print "sleep for 10 seconds", time.sleep( 10 )
localtime = time.asctime( time.localtime(time.time()) )
print localtime
print time.daylight
print time.gmtime()
print time.timezone
print time.tzname
print "Sleep for 10 seconds", time.sleep(10)
print time.strptime("26 Sep 17", "%d %b %y")
def main():
time_func()
if(__name__ == "__main__"):
main()
| true |
eda806ccaa93cee3197932909311a5c40559d3b7 | Python | josedom24/plataforma_pledin | /cursos/_python3/python3/curso/u39/ejemplo6.py | UTF-8 | 154 | 3.734375 | 4 | [] | no_license |
def nivel(numero):
if numero<0:
raise ValueError("El número debe ser positivo:"+str(numero))
else:
return numero
print(nivel(5))
print(nivel(-1)) | true |
fba9c1eaa2a5dd6e8c9f5654bc9585004aa6c6d3 | Python | kevhahn97/Twitch-chat-insight | /chat_nlp.py | UTF-8 | 943 | 2.921875 | 3 | [] | no_license | import json
import re
from konlpy.tag import Okt
from collections import Counter
def main():
filename = input("Input: ")
with open(filename+'.json', 'r', encoding='utf-8-sig') as data_file:
data = json.load(data_file)
nouns_file = filename+"_nouns.txt"
nouns = []
nlpy = Okt()
for contents in data:
nouns = nouns + nlpy.nouns(contents["contents"])
count = Counter(nouns)
tags_count = []
tags = []
for n, c in count.most_common(50):
dics = {'tag': n, 'count': c}
tags_count.append(dics)
tags.append(dics['tag'])
output_file = open(nouns_file, 'w', 1, "utf-8")
for tag in tags_count:
print(' {:<14}\t{}\n'.format(tag['tag'], tag['count']))
output_file.write(' {:<14}\t{}\n'.format(tag['tag'], tag['count']))
output_file.close()
if __name__ == "__main__":
main() | true |
addbd44ab5cf686380b4f1ce7697d757506995cc | Python | IAmTomaton/KGG | /task4.py | UTF-8 | 2,010 | 3.390625 | 3 | [] | no_license | import math
from App import App
def coord_x(x, y, z):
return (y - x) * math.sqrt(3.0) / 2
def coord_y(x, y, z):
return (x + y) / 2 - z
def get_pixels(width, height, n, m, f):
x1 = -3
x2 = 3
y1 = -3
y2 = 3
top = [height for _ in range(0, width + 1)]
bottom = [0 for _ in range(0, width + 1)]
minx = math.inf
maxx = -minx
miny = minx
maxy = maxx
min_n = max(n, m)
for i in range(0, min_n):
x = x2 + i * (x1 - x2) / min_n
for j in range(0, min_n):
y = y2 + j * (y1 - y2) / min_n
z = f(x, y)
xx = coord_x(x, y, z)
yy = coord_y(x, y, z)
if xx > maxx:
maxx = xx
if yy > maxy:
maxy = yy
if xx < minx:
minx = xx
if yy < miny:
miny = yy
for i in range(0, n + 1):
x = x2 + i * (x1 - x2) / n
for j in range(0, m + 1):
y = y2 + j * (y1 - y2) / m
z = f(x, y)
xx = coord_x(x, y, z)
yy = coord_y(x, y, z)
xx = (xx - minx) * width / (maxx - minx)
yy = (yy - miny) * height / (maxy - miny)
xx = int(xx)
yy = int(yy)
if yy > bottom[xx]:
yield (xx, yy), True
bottom[xx] = yy
if yy < top[xx]:
yield (xx, yy), False
top[xx] = yy
def main():
def f(x, y):
return math.cos(x * y)
width, height = 800, 600
app = App((width, height))
tuples = get_pixels(width, height, 50, width * 2, f)
for pix, is_visible in tuples:
app.draw_dot(pix, (255, 0, 0) if is_visible else (0, 0, 255))
tuples = get_pixels(width, height, width * 2, 50, f)
for pix, is_visible in tuples:
app.draw_dot(pix, (255, 0, 0) if is_visible else (0, 0, 255))
app.update_image()
app.save('img.png')
app.mainloop()
if __name__ == '__main__':
main()
| true |
51e158da252294fd4a75b193fb86c26839f7d540 | Python | datemitumasa/Robocup | /body_module_wrist_wrench_manager/src/wrist_wrench_manager.py~ | UTF-8 | 3,042 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
import rospy
from geometry_msgs.msg import WrenchStamped
from body_module_wrist_wrench_manager.msg import WristWrenchForceAction, WristWrenchForceResult, WristWrenchForceFeedback
import numpy as np
import time
import actionlib
class WristWrench(object):
def __init__(self):
rospy.Subscriber("/hsrb/wrist_wrench/compensated", WrenchStamped, self.cb_wrench_compensated,queue_size=10)
rospy.Subscriber("/hsrb/wrist_wrench/raw", WrenchStamped, self.cb_wrench_raw, queue_size=10)
self.action_server = actionlib.SimpleActionServer("wrist_wrench_manager_action",
WristWrenchForceAction, execute_cb=self.force_check_compensated,
auto_start=False)
self._wrench_raw = WrenchStamped()
self._wrench_com = WrenchStamped()
self.rate = rospy.Rate(100)
self.action_server.start()
def cb_wrench_compensated(self, data):
self._wrench_com = data
def cb_wrench_raw(self, data):
self._wrench_raw = data
def force_check_compensated(self, data):
"""
指定時間の間に閾値以上の力が力各センサに加わったかどうかを返す。
現在の値からの変位を見ているため、開始のタイミングに注意
Args:
ntime int32: 指定時間
nforce float64: 閾値の力
Result:
success bool:
Feedback:
force float64: 現在加えられている力
"""
rospy.logwarn(data)
if data.force < 0.2:
rospy.logwarn("request force is too small")
base_wrench = self._wrench_com
wrench_feedback = WristWrenchForceFeedback()
flag = 0
if data.time == 0:
flag = 1
st_time = time.time()
success =WristWrenchForceResult()
success.success = False
print(base_wrench)
while (time.time() - st_time) < data.time or flag and not rospy.is_shutdown():
now_wrench = self._wrench_com
force_x = abs(now_wrench.wrench.force.x - base_wrench.wrench.force.x)
force_y = abs(now_wrench.wrench.force.y - base_wrench.wrench.force.y)
force_z = abs(now_wrench.wrench.force.z - base_wrench.wrench.force.z)
force = np.sqrt( force_x ** 2 + force_y ** 2 + force_z ** 2)
wrench_feedback.now_force = force
print(force_x, force_y, force_z)
self.action_server.publish_feedback(wrench_feedback)
if force > data.force:
print(force)
print success
success.success = True
break
self.rate.sleep()
if not success.success:
rospy.logwarn("no wrench force")
self.action_server.set_succeeded(success)
if __name__ == "__main__":
rospy.init_node("wrist_wrench_manager")
wrist_wrench = WristWrench()
rospy.spin()
| true |
96f6db1596e78e5f51548079fca922ad671c3c4c | Python | sHongJung/Data_Analytics | /06-API/3/Activities/01-Stu_Wrapper_Recap/Solved/WeatherForecast-Bonus.py | UTF-8 | 1,009 | 3.390625 | 3 | [] | no_license | # Dependencies
import requests
import json
import datetime
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
# Weather
api_key = "c7f9f57b4779391ea1f5ae067591c971"
# Endpoint URL for five day forecast Phoenix, AZ
target_url = "http://api.openweathermap.org/data/2.5/forecast" \
"?q=Phoenix,us&units=IMPERIAL&mode=json&APPID=" + api_key
# Print URL
print(target_url)
# Request Data
city_weather = requests.get(target_url).json()
# List for holding temperatures
temps = []
times = []
# Display the weather with dates
for temp in city_weather["list"]:
print("%s | %s F" % (temp["dt_txt"], temp["main"]["temp"]))
temps.append(temp["main"]["temp"])
weather_date = datetime.datetime.strptime(
temp["dt_txt"], "%Y-%m-%d %H:%M:%S")
times.append(weather_date)
# Plot the temperatures over time
plt.plot(times, temps)
formatter = DateFormatter('%Y-%m-%d %H:%M:%S')
plt.gcf().axes[0].xaxis.set_major_formatter(formatter)
plt.gcf().autofmt_xdate()
plt.show()
| true |
f2a806f38153d91c9506af966ccd10ef958ba4be | Python | antonmeskildsen/Thesis-Code | /thesis/optim/status.py | UTF-8 | 684 | 2.71875 | 3 | [] | no_license | from abc import ABC, abstractmethod
from tqdm import tqdm
from streamlit import progress
class ProgressBar(ABC):
@abstractmethod
def __init__(self, total):
...
@abstractmethod
def update(self, i):
...
@abstractmethod
def close(self):
...
class TQDMBar(ProgressBar):
def __init__(self, total):
self.pbar = tqdm(total=total)
def update(self, i):
self.pbar.update(i)
def close(self):
self.pbar.close()
class StreamLitBar(ProgressBar):
def __init__(self, total):
self.bpar = progress(total)
def update(self, i):
self.bpar.progress(i)
def close(self):
pass | true |
73c1835cd6bb071d9cb0093098cb08d8b2b30071 | Python | itsolutionscorp/AutoStyle-Clustering | /all_data/exercism_data/python/word-count/87b7de218dc14cdbaf023efd572de56b.py | UTF-8 | 292 | 3.203125 | 3 | [] | no_license | from collections import defaultdict
from string import punctuation
def word_count(sentence):
counts = defaultdict(int)
sentence = filter(lambda c: c not in punctuation, sentence).lower()
words = sentence.split()
for word in words:
counts[word] += 1
return counts
| true |
1b7e2ebd3ab87011697598a9fb7a0a6b3d9efcec | Python | chongminggao/pseudo_dyna_q | /utils/zlog.py | UTF-8 | 929 | 2.9375 | 3 | [] | no_license | #!/usr/bin/python
# encoding: utf-8
from datetime import datetime
def get_now_time():
return datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
def generating_log(*info):
temp = get_now_time() + " " + info[0]
for item in info[1:]:
temp += "\t" + str(item)
temp += '\n'
with open(log.log_path, 'a') as f:
f.writelines(temp)
print(temp.strip('\n'))
class log():
log_path = "./logs/"+str(get_now_time())+".log"
@classmethod
def set_log_path(cls,path):
cls.log_path = path
@classmethod
def redirect_log_path(cls,path):
cls.log_path = path
@classmethod
def structure_info(cls,title="",info = []):
temp = "#"*25+" "+str(title)+" "+"#"*25
generating_log(temp)
for item in info:
generating_log(*item)
generating_log(len(temp)*"#")
@classmethod
def info(cls,*info):
generating_log(*info)
| true |
f489fcf26e7266f13d3b1ebdfdb7f34aa5e0921d | Python | inkyu0103/BOJ | /Daliy/8.23/2346.py | UTF-8 | 414 | 3.21875 | 3 | [] | no_license | # 2346 풍선터뜨리기
import sys
from collections import deque
input = sys.stdin.readline
answer = []
stack = []
arr = deque([])
N = int(input())
for idx,ele in enumerate(map(int,input().split())):
arr.append([idx+1,ele])
# 첫 번째 풍선
while arr:
idx,ele = arr.popleft()
answer.append(idx)
if ele > 0:
arr.rotate(-(ele-1))
else:
arr.rotate(-ele)
print(*answer)
| true |
3b162963bed7bbbc4c00cee3f6ca9d73e8a25fe7 | Python | CleberSilva93/Study-Exercicios-Python | /06 - Two_Operation_on_format.py | UTF-8 | 126 | 3.875 | 4 | [] | no_license | r = int(input('Digite valor do raio de um círculo:\n'))
print('A área do circulo com raio {}, é {}'.format(r,(r**2)*3.14))
| true |
8f1d14e041e7f5305abc6b7528729d20e7cc1e1d | Python | bse524/test1 | /py005.py | UTF-8 | 237 | 3.40625 | 3 | [] | no_license | n1 = 21
if n1 % 3 ==0 and n1 % 7 ==0:
print('3과 7의 배수입니다.')
elif n1 % 3 ==0:
print('3의 배수입니다.')
elif n1 % 7 ==0:
print('7의 배수입니다.')
else:
print('3과 7의 배수가 아닙니다.')
| true |
5d841b1dbba5c38d7d97827eefcfd7a1f163c8ae | Python | ljyspark/AES-ECB-of-Python | /GUI.py | UTF-8 | 3,749 | 2.875 | 3 | [] | no_license | #-*- coding: utf-8 -*-
import wx
import sys, os
import AES
APP_TITLE = u'AES进制转换器'
APP_ICON = 'Key.ico' # 请更换成你的icon
class mainFrame(wx.Frame):
'''程序主窗口类,继承自wx.Frame'''
def __init__(self):
'''构造函数'''
wx.Frame.__init__(self, None, -1, APP_TITLE, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
# 默认style是下列项的组合:wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.RESIZE_BORDER | wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN
self.panel = wx.Panel(self)
self.SetBackgroundColour(wx.Colour(224, 224, 224))
self.SetSize((800, 600))
self.ico = wx.Icon('key.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(self.ico)
self.Center()
# 以下可以添加各类控件
self.data = wx.TextCtrl(self.panel,wx.ID_ANY, style=wx.TE_MULTILINE)
self.locked_data = wx.TextCtrl(self.panel,wx.ID_ANY, style=wx.TE_MULTILINE)
self.lock_button = wx.Button(self.panel, label=u"AES加密")
self.lock_button.Bind(wx.EVT_BUTTON,self.lock_fun)
self.unlock_button = wx.Button(self.panel, label=u"AES解密")
self.unlock_button.Bind(wx.EVT_BUTTON, self.unlock_fun)
self.KEY = wx.StaticText(self.panel, wx.ID_ANY, label="请输入密钥:")
self.KEYS = wx.TextCtrl(self.panel, wx.ID_ANY)
self.box = wx.BoxSizer()
self.box.Add(self.lock_button, proportion=2, flag=wx.EXPAND | wx.ALL, border=5)
self.box.Add(self.KEY, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)
self.box.Add(self.KEYS, proportion=3, flag=wx.EXPAND | wx.ALL, border=5)
self.box.Add(self.unlock_button, proportion=2, flag=wx.EXPAND | wx.ALL, border=5)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.data, proportion=2, flag=wx.EXPAND | wx.ALL, border=5)
self.vbox.Add(self.box, proportion=0, flag=wx.EXPAND | wx.ALL, border=5)
self.vbox.Add(self.locked_data, proportion=2, flag=wx.EXPAND | wx.ALL, border=5)
self.SetSizer(self.vbox)
def lock_fun(self, event):
try:
data = self.data.GetValue()
KEYS = self.KEYS.GetValue()
if len(KEYS)%16 != 0:
self.locked_data.SetValue("密钥长度应是16的倍数!")
return
if not KEYS.isalpha():
self.locked_data.SetValue("密钥只能是纯英文字符!")
return
locking = AES.PrpCrypt(self.KEYS.GetValue())
self.locked_data.SetValue(locking.encrypt(self.data.GetValue()))
except BaseException as f:
self.locked_data.SetValue(str(f))
def unlock_fun(self, event):
try:
locked_data = self.data.GetValue()
KEYS = self.KEYS.GetValue()
if len(KEYS)%16 != 0:
self.locked_data.SetValue("密钥长度应是16的倍数!")
return
if not KEYS.isalpha():
self.locked_data.SetValue("密钥只能是纯英文字符!")
return
locking = AES.PrpCrypt(self.KEYS.GetValue())
self.locked_data.SetValue(locking.decrypt(self.data.GetValue()))
except ValueError as f:
self.locked_data.SetValue("加密以后的字符串不带英文的!\n" + str(f))
except BaseException as f:
self.locked_data.SetValue(str(f))
class mainApp(wx.App):
def OnInit(self):
self.SetAppName(APP_TITLE)
self.Frame = mainFrame()
self.Frame.Show()
return True
app = mainApp(redirect=False)
app.MainLoop()
| true |
59b98448c571ed851ae118e9a7ae38e6cab6039d | Python | KangSuzy/algorithms | /baekjoon/1152.py | UTF-8 | 1,021 | 3.75 | 4 | [] | no_license | """
단어의 개수 성공
시간 제한 메모리 제한 제출 정답 맞은 사람 정답 비율
2 초 128 MB 74931 17055 12247 23.053%
문제
영어 대소문자와 띄어쓰기만으로 이루어진 문자열이 주어진다. 이 문자열에는 몇 개의 단어가 있을까? 이를 구하는 프로그램을 작성하시오. 단, 한 단어가 여러 번 등장하면 등장한 횟수만큼 모두 세어야 한다.
입력
첫 줄에 영어 대소문자와 띄어쓰기로 이루어진 문자열이 주어진다. 이 문자열의 길이는 1,000,000을 넘지 않는다. 단어는 띄어쓰기 한 개로 구분되며, 공백이 연속해서 나오는 경우는 없다. 또한 문자열의 앞과 뒤에는 공백이 있을 수도 있다.
출력
첫째 줄에 단어의 개수를 출력한다.
예제 입력 1
The Curious Case of Benjamin Button
예제 출력 1
6
예제 입력 2
Mazatneunde Wae Teullyeoyo
예제 출력 2
3
예제 입력 3
Teullinika Teullyeotzi
예제 출력 3
2
"""
a = input().split()
list(a)
print(len(a))
| true |
1c017dc1a8c3e9eb2d6b586166ced9f8a8b6204b | Python | whaleygeek/sa_piwars | /microbit_code/tilt_test.py | UTF-8 | 165 | 2.953125 | 3 | [
"MIT"
] | permissive | from microbit import *
while True:
sleep(100)
x = accelerometer.get_x()
if x < -200:
print("L")
if x > 200:
print("R")
| true |
7bf11122c85eb1125e085c665acdd51b47d4768c | Python | Aasthaengg/IBMdataset | /Python_codes/p03970/s893308714.py | UTF-8 | 255 | 3.265625 | 3 | [] | no_license | # Problem A - Signboard
# input process
S = input()
# initialization
teacher_data = "CODEFESTIVAL2016"
swap_count = 0
# count process
for i in range(len(S)):
if not S[i]==teacher_data[i]:
swap_count += 1
# output process
print(swap_count)
| true |
da426abe7ceee0ab38d34620ec38fb16d38c88db | Python | NTUT-109AB8011/crawler | /exercise/learn_python_dm2039/ch30/ch30_16.py | UTF-8 | 400 | 3.5 | 4 | [] | no_license | # ch30_16.py
import threading
import time
def worker():
print(threading.currentThread().getName(), 'Starting')
time.sleep(3)
print(threading.currentThread().getName(), 'Exiting')
w = threading.Thread(name='worker',target=worker)
w.start()
print('start join')
w.join(1.5) # 等待worker執行緒1.5秒工作完成才往下執行
print('end join')
| true |
a37cefc168f1db345d7966bfbef3f88cdd037410 | Python | chanyadeshani/student-api | /model.py | UTF-8 | 303 | 3.671875 | 4 | [] | no_license | import datetime
class Student:
birthday = datetime.datetime(1988, 1, 1)
def __init__(self, id, name):
self.id = id
self.name = name
def print_student(self):
print("Id : " + self.id + ", Name : " + self.name + ", Birthday : " + self.birthday.strftime('%Y-%m-%d'))
| true |
3b1f2f114f13a0c6a89a0ad715c0cbfab63ab6ae | Python | xsarinix/mission-to-mars | /app.py | UTF-8 | 854 | 2.625 | 3 | [] | no_license | # import libraries
from flask import Flask, render_template, redirect
import pymongo
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
db = client.mars_db
col = db.scrapes
# create instance of Flask app
app = Flask(__name__)
# create route that renders index.html template
@app.route("/scrape")
def scrape_route():
import scrape_mars
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
db = client.mars_db
col = db.scrapes
post = scrape_mars.scrape()
print(post)
db.col.insert_one(post)
return redirect("http://127.0.0.1:5000/")
@app.route("/")
def echo():
scrapes = list(db.col.find().sort('scrape_time', pymongo.DESCENDING))
print("Scrapes retrieved.")
return render_template('index.html', scrapes = scrapes)
if __name__ == "__main__":
app.run(debug=True)
| true |
62eb4249aea7d2a997901789d0b433ec519ba7fd | Python | daniel-reich/ubiquitous-fiesta | /sZkMrkgnRN3z4CxxB_3.py | UTF-8 | 348 | 3.4375 | 3 | [] | no_license |
class Rectangle:
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
def intersecting(r1, r2):
return ((r1.x <= r2.x <= r1.x + r1.w and r1.y <= r2.y <= r1.y + r1.h) or
(r1.x <= r2.x + r2.w <= r1.x + r1.w and r1.y <= r2.y + r2.w <=
r1.y + r1.h))
| true |
faa7b67c61d13271ff8d7dfa3f8347780b66bb67 | Python | YukunQu/pyfiber | /test/vis_surfer_conjuction_map.py | UTF-8 | 2,623 | 2.65625 | 3 | [] | no_license | import os.path as op
import numpy as np
import nibabel as nib
from surfer import Brain
print(__doc__)
"""
Initialize the visualization.
"""
brain = Brain("fsaverage_sym", "lh", "inflated", background="white")
"""
Read both of the activation maps in using
surfer's io functions.
"""
sig_v1v = nib.load("/nfs/s2/userhome/quyukun/workingdir/fiberdata/subjects/101410/fsaverage_sym/ep_v1v_101410.nii.gz").get_data().reshape(163842,1)
sig_v1d = nib.load("/nfs/s2/userhome/quyukun/workingdir/fiberdata/subjects/101410/fsaverage_sym/ep_v1d_101410.nii.gz").get_data().reshape(163842,1)
sig_v2v = nib.load("/nfs/s2/userhome/quyukun/workingdir/fiberdata/subjects/101410/fsaverage_sym/ep_v2v_101410.nii.gz").get_data().reshape(163842,1)
sig_v2d= nib.load("/nfs/s2/userhome/quyukun/workingdir/fiberdata/subjects/101410/fsaverage_sym/ep_v2d_101410.nii.gz").get_data().reshape(163842,1)
sig_v3v = nib.load("/nfs/s2/userhome/quyukun/workingdir/fiberdata/subjects/101410/fsaverage_sym/ep_v3v_101410.nii.gz").get_data().reshape(163842,1)
sig_v3d = nib.load("/nfs/s2/userhome/quyukun/workingdir/fiberdata/subjects/101410/fsaverage_sym/ep_v3d_101410.nii.gz").get_data().reshape(163842,1)
sig_other = nib.load("/nfs/s2/userhome/quyukun/workingdir/fiberdata/subjects/101410/fsaverage_sym/ep_other_101410.nii.gz").get_data().reshape(163842,1)
"""
Zero out the vertices that do not meet a threshold.
"""
"""
A conjunction is defined as the minimum significance
value between the two maps at each vertex.
"""
conjunct = np.min(np.vstack((sig_v1v, sig_v1d,sig_v2v,sig_v2d,sig_v3v,sig_v3d)), axis=0)
"""
Now load the numpy array as an overlay.
Use a high saturation point so that the
blob will largely be colored with lower
values from the lookup table.
"""
brain.add_overlay(sig_v1v,min= 0, name="sig_v1v")
"""
A pointer to the overlay's color manager
gets stored in the overlays dictionary.
Change the lookup table to "Reds" and turn the
color bar itself off, as otherwise the bars
for the three maps will get confusingly stacked.
"""
brain.overlays["sig_v1v"].pos_bar.lut_mode = "Reds"
brain.overlays["sig_v1v"].pos_bar.visible = False
"""
Now load the other two maps and again change
the lookup table and turn off the color bar itself.
"""
brain.add_overlay(sig_v1d,min=2, name="sig_v1d")
brain.overlays["sig_v1d"].pos_bar.lut_mode = "Blues"
brain.overlays["sig_v1d"].pos_bar.visible = False
"""
Display the overlap as purple, which is what you
get when you mix red and blue.
"""
brain.add_overlay(conjunct,min=0, name="conjunct")
brain.overlays["conjunct"].pos_bar.lut_mode = "Purples"
brain.overlays["conjunct"].pos_bar.visible = False | true |
79914e68529581182faf31bee61a90693a2ea8c1 | Python | 3ri4nG0ld/Arch-Installer | /TEST.py | UTF-8 | 41 | 3.140625 | 3 | [] | no_license | text="HOLA"
text=text.lower()
print(text) | true |
5c98a0198dd94a34c1f2e8b485fce19df4a6110b | Python | eselyavka/python | /leetcode/solution_314.py | UTF-8 | 1,303 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env python
import unittest
from collections import defaultdict, deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def verticalOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
acc = defaultdict(list)
queue = deque([(root, 0)])
while queue:
node, col = queue.popleft()
if node:
acc[col].append(node.val)
queue.append((node.left, col - 1))
queue.append((node.right, col + 1))
return [acc[k] for k in sorted(acc.keys())]
class TestSolution(unittest.TestCase):
def test_verticalOrder(self):
solution = Solution()
root = TreeNode(3)
root.left = TreeNode(9)
root.left.left = TreeNode(4)
root.left.right = TreeNode(0)
root.left.right.left = TreeNode(5)
root.right = TreeNode(8)
root.right.left = TreeNode(1)
root.right.left.right = TreeNode(2)
root.right.right = TreeNode(7)
self.assertListEqual(solution.verticalOrder(root), [[4], [9, 5], [3, 0, 1], [8, 2], [7]])
if __name__ == '__main__':
unittest.main()
| true |
1e89df26f5cc7a021693449424ac9d5263117560 | Python | mbharanya/Advent-of-code-2020 | /day8/day8_1.py | UTF-8 | 977 | 3.203125 | 3 | [] | no_license |
filename = "/home/xmbomb/dev/aoc2020/day8/input.txt"
example_instructions = """nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6
"""
def part1(lines):
def run(i, acc, already_ran):
line = lines[i]
if i in already_ran:
print(f'Infinite loop, acc is {acc}')
exit(0)
else:
already_ran.append(i)
(instruction, value) = tuple(line.split(" "))
value = int(value)
if(instruction == "acc"):
run(i + 1, acc + value, already_ran)
elif(instruction == "jmp"):
run(i + value, acc, already_ran)
elif(instruction == "nop"):
run(i + 1, acc, already_ran)
print(f'Code is done, acc is {acc}')
run(0,0,[])
# part1(lines=example_instructions.strip().split('\n'))
with open(filename, 'r') as file:
lines = file.read().strip().split('\n')
part1(lines)
# lines = example_instructions.strip().split('\n')
| true |
97e253cabe4c33d993855128a5da9d78ff334b7e | Python | ChangxingJiang/Python-DM-Homework-W1-W3 | /Day-06/Exercise-05-彭高杲.py | UTF-8 | 656 | 4.21875 | 4 | [] | no_license | #1. 将Day02中计算有效互动比的方法写为函数
def interactive_rate():
number_fans = float(input('Please write the number of fans there: '))
number_interaction = float(input('The number of interaction: '))
rate = str(round(number_interaction/number_fans,3))
print('Efficient interactive rate is '+rate)
interactive_rate()
#2. 使用迭代的方法实现菲波那切数列的计算
if __name__ == "__main__":
def fib(i):
a,b = 0,1
for i in range(i+1):
a,b = b, a+b
return a
num = int(input('Please input the integer there: '))
for q in range(num):
print(fib(q),end=',') | true |
4c3ba5cefe462df1fe28644084fb5330d97b283c | Python | n0thing233/n0thing233.github.io | /noodlewhale/Facebook/面试前过的题/65. Valid Number.py | UTF-8 | 1,528 | 3.109375 | 3 | [] | no_license | #edge cases 有限状态机太恶心了
# 注意区分None 和 len(x) == 0 是不一样的
from collections import Counter
class Solution:
def isNumber(self, s: str) -> bool:
def is_only_digit(s):
for i in s:
if not ord('0') <= ord(i) <= ord('9'):
return False
return True
def is_decimal(s):
if len(s) == 0:
return False
if s[0] in ['+','-']:
s = s[1:]
if len(s) == 0:
return False
s_c = Counter(s)
if '.' not in s_c or s_c['.'] > 1:
return False
a , b = s.split('.')
if len(a) == 0 and len(b) == 0:
return False
return is_only_digit(a) and is_only_digit(b)
def is_integer(s):
if len(s) == 0:
return False
if s[0] in ['+','-']:
s = s[1:]
if not s:
return False
return is_only_digit(s)
def is_valid_e(s):
s_c = Counter(s)
num_e = 0 if 'e' not in s_c else s_c['e']
num_E = 0 if 'E' not in s_c else s_c['E']
return num_e + num_E == 1
if is_valid_e(s):
if 'e' in s:
a,b = s.split('e')
else:
a,b = s.split('E')
return (is_decimal(a) or is_integer(a)) and is_integer(b)
else:
return is_decimal(s) or is_integer(s)
| true |
d174218a840987215e58c3be548455c2bfdab948 | Python | chenzheng1996/monitoring-ecosystem-resilience | /pyveg/src/image_utils.py | UTF-8 | 18,817 | 3.015625 | 3 | [
"MIT"
] | permissive | """
Modify, and slice up tif and png images using Python Image Library
Needs a relatively recent version of pillow (fork of PIL):
```
pip install --upgrade pillow
```
"""
import os
import sys
import json
import pandas as pd
import numpy as np
import cv2 as cv
from PIL import Image
import imageio
import matplotlib
matplotlib.use("PS")
import matplotlib.pyplot as plt
from .coordinate_utils import get_sub_image_coords
from .file_utils import save_image
def image_from_array(input_array, output_size=None, sel_val=200):
"""
Convert a 2D numpy array of values into
an image where each pixel has r,g,b set to
the corresponding value in the array.
If an output size is specified, rescale to this size.
"""
size_x, size_y = input_array.shape
new_img = Image.new("RGB", (size_x, size_y))
# count the number of distinct values in the array
for ix in range(size_x):
for iy in range(size_y):
val = int(input_array[ix, iy])
if val == sel_val:
new_img.putpixel((ix, iy), (0, val, val))
else:
new_img.putpixel((ix, iy), (val, val, val))
if output_size:
new_img = new_img.resize((output_size, output_size), Image.ANTIALIAS)
return new_img
def image_file_to_array(input_filename):
"""
Read an image file and convert to a 2D numpy array, with values
0 for background pixels and 255 for signal.
Assume that the input image has only two colours, and take
the one with higher sum(r,g,b) to be "signal".
"""
im = Image.open(input_filename)
return pillow_to_numpy(im)
def invert_binary_image(image):
"""
Swap (255,255,255) with (0,0,0) for all pixels
"""
new_img = Image.new("RGB", image.size)
pix = image.load()
for ix in range(image.size[0]):
for iy in range(image.size[1]):
if sum(pix[ix, iy]) == 0:
new_img.putpixel((ix, iy), (255, 255, 255))
else:
new_img.putpixel((ix, iy), (0, 0, 0))
return new_img
# def combine_tif(input_files, bands=["B4", "B3", "B2"]):
def combine_tif(band_dict):
"""
Read tif files - one per specified band, and rescale and combine
pixel values to r,g,b values betweek 0 and 255 in a combined output image.
Parameters
==========
band_dict: dict, format {'<r|g|b>': {'band': <band_name>, 'filename': <filename>}}
Returns
=======
new_img: PIL Image, 8-bit rgb image.
"""
for v in band_dict.values():
v["min_val"] = sys.maxsize
v["max_val"] = -1 * sys.maxsize
v["pix_vals"] = []
for col in band_dict.keys():
pix = cv.imread(band_dict[col]["filename"], cv.IMREAD_ANYDEPTH).transpose()
# find the minimum and maximum pixel values in the original scale
for ix in range(pix.shape[0]):
for iy in range(pix.shape[1]):
if pix[ix, iy] > band_dict[col]["max_val"]:
band_dict[col]["max_val"] = pix[ix, iy]
elif pix[ix, iy] < band_dict[col]["min_val"]:
band_dict[col]["min_val"] = pix[ix, iy]
band_dict[col]["pix_vals"] = pix
# Take the overall max of the three bands to be the value to scale down with.
overall_max = max((band_dict[col]["max_val"] for col in ["r", "g", "b"]))
# create a new image where we will fill RGB pixel values from 0 to 255
def get_pix_val(ix, iy, col):
return max(
0,
int(
band_dict[col]["pix_vals"][ix, iy]
* 255
/ (overall_max + 1) # band_dict[col]["max_val"]
),
)
new_img = Image.new("RGB", pix.shape)
for ix in range(new_img.size[0]):
for iy in range(new_img.size[1]):
new_img.putpixel(
(ix, iy), tuple(get_pix_val(ix, iy, col) for col in ["r", "g", "b"])
)
return new_img
def scale_tif(input_filename):
"""
Given only a single band, scale to range 0,255 and apply this
value to all of r,g,b
Parameters
==========
input_filename: str, location of input image
Returns
=======
new_img: pillow Image.
"""
max_val = -1 * sys.maxsize
min_val = sys.maxsize
# load the single band file and extract pixel data
pix = cv.imread(input_filename, cv.IMREAD_ANYDEPTH).transpose()
# find the minimum and maximum pixel values in the original scale
# print("Found image of size {}".format(im.size))
for ix in range(pix.shape[0]):
for iy in range(pix.shape[1]):
if pix[ix, iy] > max_val:
max_val = pix[ix, iy]
elif pix[ix, iy] < min_val:
min_val = pix[ix, iy]
# create a new image where we will fill RGB pixel values from 0 to 255
# global linear transform from [-1, 1] -> [0, 255]
# tested in issue #224
def get_pix_val(ix, iy):
return int((pix[ix, iy] + 1) / 2 * 255)
new_img = Image.new("RGB", pix.shape)
for iy in range(new_img.size[1]):
for ix in range(new_img.size[0]):
new_img.putpixel(
(ix, iy), tuple(get_pix_val(ix, iy) for col in ["r", "g", "b"])
)
return new_img
def convert_to_rgb(band_dict):
"""
If we are given three or more bands, interpret the first as red,
the second as green, the third as blue, and scale them to be between
0 and 255 using the combine_tif function.
If we are only given one band, use the scale_tif function to scale the
range of input values to between 0 and 255 then apply this to all of r,g,b
Parameters
==========
band_dict: dict, format {'<r|g|b|rgb>': {'band': <band_name>, 'filename': <filename>}}
"""
if len(band_dict.keys()) >= 3:
new_img = combine_tif(band_dict)
elif len(band_dict.keys()) == 1:
new_img = scale_tif(band_dict.values[0]["filename"])
else:
raise RuntimeError(
"Can't convert to RGB with {} bands".format(band_dict.keys())
)
return new_img
def plot_band_values(input_filebase, bands=["B4", "B3", "B2"]):
"""
Plot histograms of the values in the chosen bands of the input image
"""
num_subplots = len(bands)
for i, band in enumerate(bands):
im = Image.open(input_filebase + "." + band + ".tif")
pix = im.load()
vals = []
for ix in range(im.size[0]):
for iy in range(im.size[1]):
vals.append(pix[ix, iy])
plt.subplot(1, num_subplots, i + 1)
plt.hist(vals)
plt.show()
def crop_image_npix(input_image, n_pix_x, n_pix_y=None, region_size=None, coords=None):
"""
Divide an image into smaller sub-images with fixed pixel size.
If region_size and coordinates are provided, we want to return the
coordinates of the sub-images along with the sub-images themselves.
"""
# if n_pix_y not specified, assume we want equal x,y
if not n_pix_y:
n_pix_y = n_pix_x
xsize, ysize = input_image.size
x_parts = int(xsize // n_pix_x)
y_parts = int(ysize // n_pix_y)
# if we are given coords, calculate coords for all sub-regions
sub_image_coords = get_sub_image_coords(coords, region_size, x_parts, y_parts)
# now do the actual cropping
sub_images = []
for ix in range(x_parts):
for iy in range(y_parts):
box = (ix * n_pix_x, iy * n_pix_y, (ix + 1) * n_pix_x, (iy + 1) * n_pix_y)
region = input_image.crop(box)
# depending on whether we have been given coordinates,
# return a list of images, or a list of (image,coords) tuples.
if sub_image_coords:
sub_images.append((region, sub_image_coords[ix * x_parts + iy]))
else:
sub_images.append(region)
return sub_images
def crop_image_nparts(input_image, n_parts_x, n_parts_y=None):
"""
Divide an image into n_parts_x*n_parts_y equal smaller sub-images.
"""
# if n_parts_y not specified, assume we want equal x,y
if not n_parts_y:
n_parts_y = n_parts_x
xsize, ysize = input_image.size
x_sub = int(xsize / n_parts_x)
y_sub = int(ysize / n_parts_y)
sub_images = []
for ix in range(n_parts_x):
for iy in range(n_parts_y):
box = (ix * x_sub, iy * y_sub, (ix + 1) * x_sub, (iy + 1) * y_sub)
region = input_image.crop(box)
sub_images.append(region)
return sub_images
def convert_to_bw(input_image, threshold, invert=False):
"""
Given an RGB input, apply a threshold to each pixel.
If pix(r,g,b)>threshold, set to 255,255,255, if <threshold, set to 0,0,0
"""
pix = input_image.load()
new_img = Image.new("RGB", input_image.size)
for ix in range(input_image.size[0]):
for iy in range(input_image.size[1]):
p = pix[ix, iy]
try:
total = 0
for col in p:
total += col
except:
total = p
if (invert and (total > threshold)) or (
(not invert) and (total < threshold)
):
new_img.putpixel((ix, iy), (255, 255, 255))
else:
new_img.putpixel((ix, iy), (0, 0, 0))
return new_img
def crop_and_convert_to_bw(
input_filename, output_dir, threshold=470, num_x=50, num_y=50
):
"""
Open an image file, convert to monochrome, and crop into sub-images.
"""
orig_image = Image.open(input_filename)
bw_image = convert_to_bw(orig_image, threshold)
sub_images = crop_image_npix(bw_image, num_x, num_y)
# strip the file extension from the input_filename
filename_elements = os.path.basename(input_filename).split(".")
file_ext = filename_elements[-1]
new_filename_base = ""
for el in filename_elements[:-1]:
new_filename_base += el
for i, sub_image in enumerate(sub_images):
new_filename = "{}_{}.{}".format(new_filename_base, i, file_ext)
save_image(sub_image, output_dir, new_filename)
def create_gif_from_images(directory_path, output_name, string_in_filename=""):
"""
Loop through a directory and convert all images in it into a gif chronologically
:param directory_path: directory where all the files are.
:param output_name: name to be given to the output gif
:param string_in_filename: select only files that containsa particular string,
default is "" which implies all in directory files are selected
:return:
"""
file_names = [
f
for f in os.listdir(directory_path)
if (os.path.isfile(os.path.join(directory_path, f)) and f.endswith(".png"))
]
images = []
date = []
for filename in file_names:
# only use images with certain name (optional)
if string_in_filename in filename:
images.append(imageio.imread(os.path.join(directory_path, filename)))
# the name of each file should end with the date of the image
# (this is true in the gee images)
date.append(filename[-14:-4])
if len(images) == 0:
raise RuntimeError("No images found")
else:
image_dates_df = pd.DataFrame()
image_dates_df["date"] = date
image_dates_df["images"] = images
image_dates_df.sort_values(by=["date"], inplace=True, ascending=True)
imageio.mimsave(
os.path.join(directory_path, output_name + ".gif"),
image_dates_df["images"],
duration=0.5,
)
print(
"Saved gif file containing '{}' images in directory '{}'".format(
image_dates_df.shape[0], directory_path
)
)
return os.path.join(directory_path, output_name + ".gif")
def crop_and_convert_all(input_dir, output_dir, threshold=470, num_x=50, num_y=50):
"""
Loop through a whole directory and crop and convert to black+white all
files within it.
"""
for filename in os.listdir(input_dir):
if not (filename.endswith("tif") or filename.endswith("png")):
continue
print("Processing {}".format(filename))
input_filename = os.path.join(input_dir, filename)
crop_and_convert_to_bw(input_filename, output_dir, threshold, num_x, num_y)
def image_file_all_same_colour(image_filename, colour=(255, 255, 255), threshold=0.99):
"""
Wrapper for image_all_same_colour that opens and closes the image file
"""
image = Image.open(image_filename)
is_same_colour = image_all_same_colour(image, colour, threshold)
image.close()
return is_same_colour
def image_all_same_colour(image, colour=(255, 255, 255), threshold=0.99):
"""
Return true if all (or nearly all) pixels are same colour
"""
num_total = image.size[0] * image.size[1]
num_different = 0
pix = image.load()
for ix in range(image.size[0]):
for iy in range(image.size[1]):
if pix[ix, iy] != colour:
num_different += 1
if 1.0 - float(num_different / num_total) < threshold:
return False
return True
def compare_binary_image_files(filename1, filename2):
"""
Wrapper for compare_binary_images that opens and closes the image files.
"""
img1 = Image.open(filename1)
img2 = Image.open(filename2)
frac = compare_binary_images(img1, img2)
img1.close()
img2.close()
return frac
def compare_binary_images(image1, image2):
"""
Return the fraction of pixels that are the same in the two images.
"""
if not image1.size == image2.size:
return 0.0
pix1 = image1.load()
pix2 = image2.load()
num_same = 0
num_total = image1.size[0] * image1.size[1]
for ix in range(image1.size[0]):
for iy in range(image1.size[1]):
if pix1[ix, iy] == pix2[ix, iy]:
num_same += 1
return float(num_same / num_total)
# ---------------------------------------------------------------------
# Image processing functionality
# ---------------------------------------------------------------------
def pillow_to_numpy(pil_image):
"""
Convert a PIL Image object to a numpy array (used by openCV).
@param img PIL Image object to convert
@return 2D or 3D numpy array (depending on input image)
"""
if issubclass(type(pil_image), type(Image.Image)):
raise TypeError("Input should be a PIL Image object")
numpy_image = np.array(pil_image)
# if the array is already 2D, return it
if numpy_image.ndim == 2:
return numpy_image
# check that 3rd index is equal
r, g, b = numpy_image[:, :, 0], numpy_image[:, :, 1], numpy_image[:, :, 2]
if (b == g).all() and (b == r).all() and not (b == 0).all():
return numpy_image[:, :, 0] # return with 3rd index removed
else:
return numpy_image # return colour image
def numpy_to_pillow(numpy_image):
"""
Convert a 2D numpy array to a PIL Image object.
@param img 2D numpy array to convert
@return PIL Image object
"""
if not isinstance(numpy_image, np.ndarray):
raise TypeError("Input should be a NumPy array")
if numpy_image.ndim != 2:
raise ValueError("Input should be a grayscale image")
return Image.fromarray(numpy_image)
def hist_eq(img, clip_limit=2):
"""
Perform contrast limited local histogram equalisation on an imput
image.
@param img 2D numpy array representing a grayscale image
@param clip_limit controls the strength of the equalisation
@return 2D numpy array representing the equalised image
"""
if img.ndim != 2:
raise ValueError(
"The input image should be a 2D numpy array \
repersenting a grayscale image"
)
clahe = cv.createCLAHE(clipLimit=clip_limit, tileGridSize=(11, 11))
return clahe.apply(img)
def median_filter(img, r=3):
"""
Convolve a median filter over the image.
@param img 2D numpy array representing a grayscale image
@param r the size of the grid to convolve
@return 2D numpy array representing the smoothed image
"""
if img.ndim != 2:
raise ValueError(
"The input image should be a 2D numpy array \
repersenting a grayscale image"
)
return cv.medianBlur(img, r)
def adaptive_threshold(img):
"""
Threshold a grayscale image using the mean pixel value of a local area
to set the threshold at each pixel location. At the moment set above
average brightness pixels to the max (255) and vice versa for below
average brightness pixels.
@param img 2D numpy array representing a grayscale image
@return thresholded image
"""
if img.ndim != 2:
raise ValueError(
"The input image should be a 2D numpy array \
repersenting a grayscale image"
)
local_area_size = 51 # must be odd
offset = -5 # threshold = mean + offset
img_thresh = cv.adaptiveThreshold(
img,
255, # max value
cv.ADAPTIVE_THRESH_MEAN_C,
cv.THRESH_BINARY_INV, # can perform inverted threholding here
local_area_size,
offset,
)
return img_thresh
def process_and_threshold(img, r=3):
"""
Perform histogram equalisation, adaptive thresholding, and median
filtering on an input PIL Image. Return the result converted
back to a PIL Image.
@param img input PIL Image object
@return processed PIL Image
"""
img = pillow_to_numpy(img)
img = hist_eq(img)
img = adaptive_threshold(img)
img = median_filter(img, r)
return numpy_to_pillow(img)
# ---------------------------------------------------------------------
def check_image_ok(rgb_image, black_pix_threshold=0.05):
"""
Check the quality of an RGB image. Currently checking if we have
> X% pixels being masked. This indicates problems with cloud masking
in previous steps.
Parameters
----------
rgb_image : Pillow.Image
Input image to check the quality of
Returns
----------
bool
`True` if image passes quality requirements,
else `False`.
"""
img_array = pillow_to_numpy(rgb_image)
if len(img_array.shape) < 3:
return False
black = [0, 0, 0]
# catch an error where array elements can be zero, rather than [r,g,b] values
if len(img_array.shape) < 3:
return False
n_black_pix = np.count_nonzero(np.all(img_array == black, axis=2))
if n_black_pix / (img_array.shape[0] * img_array.shape[1]) >= black_pix_threshold:
return False
else:
return True
| true |
8110bde386ce20b444556a5d0cd1932c4e54eb23 | Python | abinj/distributed_ml_pyspark | /ml_pipeline.py | UTF-8 | 1,644 | 2.90625 | 3 | [] | no_license | from pyspark.sql import SparkSession
import matplotlib.pyplot as plt
# Instantiate a spark session
spark = SparkSession.builder\
.master("local[*]")\
.appName("flights_delay")\
.config("spark.driver.memory", "8g")\
.getOrCreate()
# Load raw data
df = spark.read.csv("/home/abin/my_works/datasets/flights.csv", header=True, inferSchema=True, nullValue=' ')
# Clean dataset
to_keep = ['MONTH', 'DAY', 'DAY_OF_WEEK', 'AIRLINE', 'FLIGHT_NUMBER', 'ORIGIN_AIRPORT', 'DESTINATION_AIRPORT'
, 'TAXI_OUT', 'SCHEDULED_DEPARTURE', 'DEPARTURE_DELAY', 'DISTANCE', 'SCHEDULED_ARRIVAL', 'ARRIVAL_DELAY']
df = df.select(to_keep)
df = df.dropna()
# df.show(20)
# Create Label
df = df.withColumn('label', (df.ARRIVAL_DELAY > 15).cast('integer'))
# df.show(50)
# Balance dataset
# Get number of delayed flights
on_time_flights_count = df.filter(df.label == 0).count()
# Calculate the sub-sampling ratio for the on-time flights
ratio = (df.count() - on_time_flights_count) / on_time_flights_count
# under sample the redundant class
# Since roughly 20 of the total flights are delayed, taking 20% of 0s and 100% of 1s into dataset
# We are doing this to balance the dataset
df = df.sampleBy('label', {0: ratio, 1: 1})
#df.show(20)
sub_df = df.groupBy('AIRLINE').agg({"ARRIVAL_DELAY": "avg"}).withColumnRenamed('avg(ARRIVAL_DELAY)', 'AVG_ARRIVAL_DELAY')
df_pandas = sub_df.toPandas()
df_pandas.plot(x='AIRLINE', y= 'AVG_ARRIVAL_DELAY', kind='bar', figsize=(10, 8))
plt.grid(which='major', linestyle='-', linewidth=0.5, color='green')
plt.grid(which='minor', linestyle=':', linewidth=0.5, color='black')
plt.show()
| true |
bec64435614e5fad3c2ea6178d3d7aac7babbd2f | Python | sharmaabhijith/Soft_Adversarial_Training | /model/network/LeNet.py | UTF-8 | 2,337 | 2.703125 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import copy
from torchvision.utils import make_grid
from matplotlib.pyplot import MultipleLocator
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5) # nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
#nn.init()
self.fc1 = nn.Linear(16 * 4 * 4, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
#show_graph(x, "origin")
x = self.conv1(x)
#show_graph(x, "conv1")
x = F.relu(x) #f(x) = max(0, x)
#show_graph(x, "conv1_relu")
x = F.max_pool2d(x, 2)
#show_graph(x, "conv1_relu_maxpool")
x = self.conv2(x)
#show_graph(x, "conv2")
x = F.relu(x)
#show_graph(x, "conv2_relu")
x = F.max_pool2d(x, 2)
#show_graph(x, "conv2_relu_maxpool")
x = x.view(x.size(0), -1)
#print(x[0].size())
x = self.fc1(x)
#print(x[0].size())
x = F.relu(x)
x = self.fc2(x)
#print(x[0].size())
x = F.relu(x)
x = self.fc3(x)
#print(x[0])
x = F.log_softmax(x, dim=1)
#print(x[0])
#exit()
return x
def show_graph(x, string):
# y = copy.deepcopy(x[0][0])
# make_grid(y)
# plt.imshow(y.cpu().numpy(), cmap='gray')
# plt.grid()
# plt.show()
y = copy.deepcopy(x[0])
print(y[0])
y = y * 0.3081 + 0.1307
y = y.cpu().numpy()
print(len(y[0]))
print(y[0])
ax = plt.gca()
if len(y[0]) < 10:
x_major_locator=MultipleLocator(1)
y_major_locator=MultipleLocator(1)
else:
x_major_locator=MultipleLocator(5)
y_major_locator=MultipleLocator(5)
ax.xaxis.set_major_locator(x_major_locator)
ax.yaxis.set_major_locator(y_major_locator)
for i in range(len(y)):
plt.imshow(y[0], cmap='gray')
#plt.grid(b=True, which="major", axis="both", ls="--")
#plt.xlim(0, len(y[0]))
#plt.ylim(len(y[0]), 0)
plt.title("LeNet_{}_{}".format(string, i + 1))
#plt.savefig("E:/WorkSpace/Pytorch/mnist/model/lenet_feature_map/{}_{}".format(string, i + 1))
plt.show()
| true |
edad2201b152df03ac370c826d1135ea54f87ff1 | Python | PonderLY/ACTOR | /code/crossorder.py | UTF-8 | 12,606 | 2.609375 | 3 | [] | no_license | """
Add User
20180923
Author: Liu Yang
"""
import numpy as np
import time
import ast
from copy import deepcopy
from collections import defaultdict
import itertools
import pickle
import random
import math
import os, sys
import pdb
from paras import load_params
from sklearn.preprocessing import normalize
from crossdata import CrossData
from evaluator import QuantitativeEvaluator, QualitativeEvaluator
from subprocess import call, check_call
class HighOrder(object):
def __init__(self, pd, graph_train, graph_test):
self.pd = pd
self.g = graph_train
self.g_test = graph_test
self.nt2nodes = self.construct_nt2nodes()
self.et2net = self.construct_et2net()
# self.construct_2nd_order_edges()
# self.et2net = self.construct_et2net_from_adjacency()
self.nt2vecs = None # center vectors
self.nt2cvecs = None # context vectors
def construct_nt2nodes(self):
"""
Construct self.nt2nodes from self.g.node_type and self.g.node_dict
self.g.node_type is a dictionary whose key is the node type and
value is a global node_id list of this type
self.g.node_dict is a dictionary whose key is global node id and
value is a list [node_type, intype_id, value]
self.nt2nodes stores local id for type 't' and 'l' and value for 'w' and 'u'
"""
nt2nodes = {nt:set() for nt in self.pd['nt_list']}
for nt in ['t','l']:
for n_id in self.g.node_type[nt]:
nt2nodes[nt].add(int(self.g.node_dict[n_id][1]))
print('The num of node type {} is {}'.format(nt, len(self.g.node_type[nt])))
for nt in ['w','u']:
for n_id in self.g.node_type[nt]:
nt2nodes[nt].add(self.g.node_dict[n_id][2])
print('The num of node type {} is {}'.format(nt, len(self.g.node_type[nt])))
print('There are {} nodes in total!'.format(self.g.node_num))
return nt2nodes
def construct_et2net(self):
"""
Construct self.et2net from self.g.et2net
self.g.et2net is a dictionary
the first key is the edge type
the second key is a 2-tuple (start_node, end_node) global id
value is the weight
self.et2net is a dictionary
the first key is the edge type
the second key is the start_node local id or value for word
the third key is the end_node local id or value for word
value is the weight
"""
node_dict = self.g.node_dict
et2net = defaultdict(lambda : defaultdict(lambda : defaultdict(float)))
for key_et in self.g.et2net.keys():
for key_s, key_t in self.g.et2net[key_et].keys():
if key_et[0]=='w':
s = node_dict[key_s][2]
elif key_et[0]=='u':
s = node_dict[key_s][2]
else:
s = int(node_dict[key_s][1])
if key_et[1]=='w':
t = node_dict[key_t][2]
elif key_et[1]=='u':
t = node_dict[key_t][2]
else:
t = int(node_dict[key_t][1])
et2net[key_et][s][t] = self.g.et2net[key_et][(key_s,key_t)]
print('There are {} edges in total!'.format(self.g.edge_num))
return et2net
def construct_2nd_order_edges(self):
second_type = ['tt', 'll']
multiply_type = ['tw', 'wt', 'lw', 'wl']
edge_num = self.g.edge_num
weight = {}
for key_et in multiply_type:
weight[key_et] = np.mat(self.construct_adjacency_matrix(key_et))
weight['tt'] = weight['tw']*weight['wt']
weight['ll'] = weight['lw']*weight['wl']
for key_et in second_type:
W = weight[key_et]
row, col = W.shape
for s in xrange(row):
for t in xrange(col):
if W[s,t] >1e-2:
self.et2net[key_et][s][t] = W[s,t]
edge_num += 1
print('There are {} edges in total!'.format(edge_num))
print("Activity Graph Edge Types are {}".format(self.et2net.keys()))
def construct_et2net_from_adjacency(self):
node_dict = self.g.node_dict
edge_type = self.g.edge_type
et2net = defaultdict(lambda : defaultdict(lambda : defaultdict(float)))
weight = {}
for key_et in edge_type:
weight[key_et] = np.mat(self.construct_adjacency_matrix(key_et))
weight['tt'] = weight['tw']*weight['wt']
weight['ll'] = weight['lw']*weight['wl']
edge_type.append('tt')
edge_type.append('ll')
for key_et in edge_type:
W = weight[key_et]
row, col = W.shape
for s in xrange(row):
for t in xrange(col):
if W[s,t] >1e-2:
if key_et[0]=='w':
key_id = self.g.node_id2id['w'][s]
key_s = node_dict[key_id][2]
elif key_et[0]=='u':
key_id = self.g.node_id2id['u'][s]
key_s = node_dict[key_id][2]
else:
key_s = s
if key_et[1]=='w':
key_id = self.g.node_id2id['w'][t]
key_t = node_dict[key_id][2]
elif key_et[1]=='u':
key_id = self.g.node_id2id['u'][t]
key_t = node_dict[key_id][2]
else:
key_t = t
et2net[key_et][key_s][key_t] = W[s,t]
print('There are {} edges in total!'.format(self.g.edge_num))
print("Activity Graph Edge Types are {}".format(et2net.keys()))
return et2net
def construct_adjacency_matrix(self, et):
"""
Construct adjacency matrix of edge type et from self.g.et2net[et]
adj_matrix[s, t] stores the weight of edge (s, t), s and t are local ids
"""
node_dict = self.g.node_dict
start_num = len(self.g.node_type[et[0]])
end_num = len(self.g.node_type[et[1]])
adj_matrix = np.zeros(shape=(start_num, end_num), dtype=np.float32)
# out_of_range = 0
for key_s,key_t in self.g.et2net[et].keys():
s = int(node_dict[key_s][1])
t = int(node_dict[key_t][1])
adj_matrix[s, t] = self.g.et2net[et][(key_s, key_t)]
# try:
# adj_matrix[s, t] = self.g.et2net[et][(key_s, key_t)]
# except:
# out_of_range += 1
# row normalization
# return normalize(adj_matrix, norm='l1')
# if out_of_range > 0:
# print('edge type {} out of range num is {}'.format(et, out_of_range))
return adj_matrix
def fit(self):
self.embed_algo = GraphEmbed(self.pd)
self.nt2vecs, self.nt2cvecs = self.embed_algo.fit(self.nt2nodes, self.et2net, self.pd['epoch']*self.pd['train_size'])
# self.nt2vecs, self.nt2cvecs = self.embed_algo.fit(self.nt2nodes, self.et2net, 100000000)
def mr_predict(self):
test_data = pickle.load(open(self.pd['test_data_path'], 'r'))
predictor = pickle.load(open(self.pd['model_pickled_path'], 'r'))
predictor.update_vec_cvec(self.nt2vecs, self.nt2cvecs)
start_time = time.time()
for t in self.pd['predict_type']:
evaluator = QuantitativeEvaluator(predict_type=t)
if self.pd['new_test_method']:
evaluator.get_ranks_from_test_graph(test_data, predictor, self.g_test)
mrr, mr = evaluator.compute_mrr()
print('Type:{} mr: {}, mrr: {} '.format(evaluator.predict_type, mr, mrr))
mrr, mr = evaluator.compute_highest_mrr()
print('Type:{} hmr: {}, hmrr: {} '.format(evaluator.predict_type, mr, mrr))
else:
evaluator.get_ranks(test_data, predictor)
# evaluator.get_ranks_with_output(test_data, predictor, config.result_pre+str(epoch)+t+'.txt')
mrr, mr = evaluator.compute_mrr()
print('Type:{} mr: {}, mrr: {} '.format(evaluator.predict_type, mr, mrr))
print("Prediction done, elapsed time {}s".format(time.time()-start_time))
if pd['perform_case_study']:
self.run_case_study(predictor, self.pd)
def run_case_study(self, model, pd):
start_time = time.time()
evaluator = QualitativeEvaluator(model, pd['case_dir'])
for word in ['food', 'restaurant', 'beach', 'weather', 'clothes', 'nba']:
evaluator.getNbs1(word)
for location in [[34.043021,-118.2690243], [33.9424, -118.4137], [34.008, -118.4961], [34.0711, -118.4434]]:
evaluator.getNbs1(location)
evaluator.getNbs2('outdoor', 'weekend')
print('Case study done. Elapsed time:{} '.format(round(time.time()-start_time)))
class GraphEmbed(object):
def __init__(self, pd):
self.pd = pd
self.nt2vecs = dict()
self.nt2cvecs = dict()
self.path_prefix = 'GraphEmbed/'
self.path_suffix = '-'+str(os.getpid())+'.txt'
def fit(self, nt2nodes, et2net, sample_size):
self.write_line_input(nt2nodes, et2net)
self.execute_line(sample_size)
self.read_line_output()
return self.nt2vecs, self.nt2cvecs
def write_line_input(self, nt2nodes, et2net):
if 'c' not in nt2nodes: # add 'c' nodes (with no connected edges) to comply to Line's interface
nt2nodes['c'] = self.pd['category_list']
for nt, nodes in nt2nodes.items():
# print nt, len(nodes)
node_file = open(self.path_prefix+'node-'+nt+self.path_suffix, 'w')
for node in nodes:
node_file.write(str(node)+'\n')
all_et = [nt1+nt2 for nt1, nt2 in itertools.product(nt2nodes.keys(), repeat=2)]
for et in all_et:
edge_file = open(self.path_prefix+'edge-'+et+self.path_suffix, 'w')
if et in et2net:
for u, u_nb in et2net[et].items():
for v, weight in u_nb.items():
edge_file.write('\t'.join([str(u), str(v), str(weight), 'e'])+'\n')
def execute_line(self, sample_size):
command = ['./hin2vec']
command += ['-size', str(self.pd['dim'])]
command += ['-negative', str(self.pd['negative'])]
command += ['-alpha', str(self.pd['alpha'])]
sample_num_in_million = max(1, sample_size/1000000)
command += ['-samples', str(sample_num_in_million)]
command += ['-threads', str(10)]
command += ['-second_order', str(self.pd['second_order'])]
command += ['-activity_mode', str(self.pd['activity_mode'])]
command += ['-schedule_report', str(self.pd['schedule_report'])]
command += ['-job_id', str(os.getpid())]
# call(command, cwd=self.path_prefix, stdout=open('stdout.txt','wb'))
call(command, cwd=self.path_prefix)
def read_line_output(self):
for nt in self.pd['nt_list']:
for nt2vecs,vec_type in [(self.nt2vecs,'output-'), (self.nt2cvecs,'context-')]:
vecs_path = self.path_prefix+vec_type+nt+self.path_suffix
vecs_file = open(vecs_path, 'r')
vecs = dict()
for line in vecs_file:
node, vec_str = line.strip().split('\t')
try:
node = ast.literal_eval(node)
except: # when nt is 'w', the type of node is string
pass
vecs[node] = np.array([float(i) for i in vec_str.split(' ')])
nt2vecs[nt] = vecs
for f in os.listdir(self.path_prefix): # clean up the tmp files created by this execution
if f.endswith(self.path_suffix):
os.remove(self.path_prefix+f)
if __name__ == "__main__":
para_file = sys.argv[1]
pd = load_params(para_file) # load parameters as a dict
g_train = CrossData(pd['node_dict'], pd['graph_edges'])
g_test = CrossData(pd['node_dict'], pd['test_edges'])
model = HighOrder(pd, g_train, g_test)
if pd['iter_num']:
iter_num = pd['iter_num']
else:
iter_num = 1
for k in range(iter_num):
print("Start the {}th training!".format(k))
start_time = time.time()
model.fit()
print("Model training done, elapsed time {}s".format(time.time()-start_time))
model.mr_predict() | true |
cc699c7e6a3c9fc1329f767190c2f54e439c3287 | Python | qiankl/spark-learning | /train_with_generator.py | UTF-8 | 7,522 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# # Food Classification with Deep Learning in Keras / Tensorflow
# ## *Computer, what am I eating anyway?*
# ## Experiment
# ### Loading and Preprocessing Dataset
# Let's import all of the packages needed for the rest of the notebook:
# In[1]:
import matplotlib.pyplot as plt
import matplotlib.image as img
import numpy as np
from scipy.misc import imresize
get_ipython().run_line_magic('matplotlib', 'inline')
import os
from os import listdir
from os.path import isfile, join
import shutil
import stat
import collections
from collections import defaultdict
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
import h5py
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from keras.applications.inception_v3 import preprocess_input
from keras.models import load_model
# In[2]:
sc.stop()
# In[3]:
from elephas.spark_model import SparkModel
from elephas.utils.rdd_utils import to_simple_rdd
from pyspark import SparkContext, SparkConf
# Create Spark context
conf = SparkConf().setAppName('Spark_MLP').setMaster('yarn')
sc = SparkContext(conf=conf)
# Download the dataset and extract it within the notebook folder. It may be easier to do this in a separate terminal window.
# A `multiprocessing.Pool` will be used to accelerate image augmentation during training.
# In[ ]:
# import multiprocessing as mp
# num_processes = 6
# pool = mp.Pool(processes=num_processes)
# We need maps from class to index and vice versa, for proper label encoding and pretty printing.
# In[4]:
class_to_ix = {}
ix_to_class = {}
with open('../food-101/meta/classes.txt', 'r') as txt:
classes = [l.strip() for l in txt.readlines()]
class_to_ix = dict(zip(classes, range(len(classes))))
ix_to_class = dict(zip(range(len(classes)), classes))
class_to_ix = {v: k for k, v in ix_to_class.items()}
sorted_class_to_ix = collections.OrderedDict(sorted(class_to_ix.items()))
# The Food-101 dataset has a provided train/test split. We want to use this in order to compare our classifcation performance with other implementations.
# In[5]:
# Only split files if haven't already
if not os.path.isdir('../food-101/test') and not os.path.isdir('../food-101/train'):
def copytree(src, dst, symlinks = False, ignore = None):
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def generate_dir_file_map(path):
dir_files = defaultdict(list)
with open(path, 'r') as txt:
files = [l.strip() for l in txt.readlines()]
for f in files:
dir_name, id = f.split('/')
dir_files[dir_name].append(id + '.jpg')
return dir_files
train_dir_files = generate_dir_file_map('food-101/meta/train.txt')
test_dir_files = generate_dir_file_map('food-101/meta/test.txt')
def ignore_train(d, filenames):
print(d)
subdir = d.split('/')[-1]
to_ignore = train_dir_files[subdir]
return to_ignore
def ignore_test(d, filenames):
print(d)
subdir = d.split('/')[-1]
to_ignore = test_dir_files[subdir]
return to_ignore
copytree('../food-101/images', '../food-101/test', ignore=ignore_train)
copytree('../food-101/images', '../food-101/train', ignore=ignore_test)
else:
print('Train/Test files already copied into separate folders.')
# We are now ready to load the training and testing images into memory. After everything is loaded, about 80 GB of memory will be allocated.
#
# Any images that have a width or length smaller than `min_size` will be resized. This is so that we can take proper-sized crops during image augmentation.
# In[6]:
from PIL import Image
x=np.array(Image.open('/home/hduser/food/food-101/train/apple_pie/208041.jpg').resize((150,150)))
x.shape
# In[7]:
from PIL import Image
def load_images(root,num=20):
all_imgs = []
all_classes = []
resize_count = 0
invalid_count = 0
for i, subdir in enumerate(listdir(root)):
imgs = listdir(join(root, subdir))
class_ix = class_to_ix[subdir]
print(i, class_ix, subdir)
n=0
for img_name in imgs:
if n < num:
img_arr = np.array(Image.open(join(root, subdir, img_name)).resize((200,200)))
img_arr_rs = img_arr
try:
if img_arr_rs.shape!=(200,200,3):
continue
all_imgs.append(img_arr_rs)
all_classes.append(class_ix)
n+=1
except:
invalid_count += 1
print(len(all_imgs), 'images loaded')
print(resize_count, 'images resized')
print(invalid_count, 'images skipped')
return np.array(all_imgs), np.array(all_classes)
# In[8]:
X_test, y_test = load_images('../food-101/test',50)
X_train, y_train = load_images('../food-101/train',20)
# In[9]:
print('X_train shape', X_train.shape)
print('y_train shape', y_train.shape)
print('X_test shape', X_test.shape)
print('y_test shape', y_test.shape)
# In[10]:
from keras.utils.np_utils import to_categorical
n_classes = 101
y_train_cat = to_categorical(y_train, num_classes=n_classes,dtype='int')
y_test_cat = to_categorical(y_test, num_classes=n_classes,dtype='int')
# In[11]:
# Build RDD from numpy features and labels
rdd = to_simple_rdd(sc, X_train, y_train_cat)
# In[17]:
from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',padding='SAME',
input_shape=(200, 200, 3))) #we need to specify the size of images, 150 x 150 in our case
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu',padding='SAME'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu',padding='SAME'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu',padding='SAME'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(101, activation='softmax'))
# In[18]:
model.summary()
# In[19]:
from keras import optimizers
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
# In[20]:
spark_model = SparkModel(model, mode='synchronous')
# In[21]:
spark_model.fit(rdd, epochs=2, batch_size=2, verbose=1, validation_split=0.1)
# In[22]:
score = spark_model.master_network.evaluate(X_test, y_test_cat, verbose=1)
# In[23]:
print(score)
| true |
cd920ec6e5d475f6e1fb56e09f6d5d748291c23f | Python | akimi-yano/algorithm-practice | /lc/review_129.SumRootToLeafNumbers.py | UTF-8 | 3,429 | 4.03125 | 4 | [] | no_license | # 129. Sum Root to Leaf Numbers
# Medium
# 3194
# 67
# Add to List
# Share
# You are given the root of a binary tree containing digits from 0 to 9 only.
# Each root-to-leaf path in the tree represents a number.
# For example, the root-to-leaf path 1 -> 2 -> 3 represents the number 123.
# Return the total sum of all root-to-leaf numbers. Test cases are generated so that the answer will fit in a 32-bit integer.
# A leaf node is a node with no children.
# Example 1:
# Input: root = [1,2,3]
# Output: 25
# Explanation:
# The root-to-leaf path 1->2 represents the number 12.
# The root-to-leaf path 1->3 represents the number 13.
# Therefore, sum = 12 + 13 = 25.
# Example 2:
# Input: root = [4,9,0,5,1]
# Output: 1026
# Explanation:
# The root-to-leaf path 4->9->5 represents the number 495.
# The root-to-leaf path 4->9->1 represents the number 491.
# The root-to-leaf path 4->0 represents the number 40.
# Therefore, sum = 495 + 491 + 40 = 1026.
# Constraints:
# The number of nodes in the tree is in the range [1, 1000].
# 0 <= Node.val <= 9
# The depth of the tree will not exceed 10.
# This solution works:
'''
The idea is traverse our tree, using any tree traversal algorighm. I choose dfs, and also I directly change the values of our tree.
If we reach non-existing node (None), we just return back.
If we reached leaf, that is it do not have any children, return value of this node.
Update values for left and right children if they exist.
Finally, call function recursively for left and right children and return sum of results for left and right.
image
We start traverse from root, and we replace its children 9 and 0 with 49 and 40.
Then for 49 we replace its children 5 and 1 with 495 and 491.
Finally, we evaluate sum of all leafs: 40 + 495 + 491.
Complexity: time complexity can be potentially O(nh), where n is number of nodes and h is number of levels, because at each step our numbers become bigger and bigger. If we assume that number we met will always be in 32int range, then can say that complexity is O(n). Space complexity is O(h) to keep the stack of recursion.
'''
class Solution:
def sumNumbers(self, root):
if not root: return 0
if not root.left and not root.right:
return int(root.val)
if root.left: root.left.val = 10*root.val + root.left.val
if root.right: root.right.val = 10*root.val + root.right.val
return self.sumNumbers(root.left) + self.sumNumbers(root.right)
# This solution works:
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sumNumbers(self, root):
def helper(cur, nums):
nonlocal ans
if not cur.left and not cur.right:
new_nums = list(nums)
new_nums.append(cur.val)
i = 0
total = 0
while new_nums:
val = new_nums.pop()
total += val*10**i
i += 1
ans += total
return
if cur.left:
helper(cur.left, nums + [cur.val])
if cur.right:
helper(cur.right, nums + [cur.val])
ans = 0
helper(root, [])
return ans | true |
389b2422a6b4b9273d39b77abbcd8fad6de2fd63 | Python | laura-yuan/KaggleSpeechRecognition | /system_config.py | UTF-8 | 301 | 2.875 | 3 | [] | no_license | def get_system_config():
import csv
# first, get into the folder of system config....
system_config = {}
with open('systemConfig.csv') as f:
content = csv.reader(f, delimiter = ';')
for row in content:
system_config[row[0]] = row[1]
return system_config | true |
e162776f770b5bf38bbb5285eb5cfcf7ddb69bfa | Python | hareton0807/CMPUT_206 | /lab3/part1.py | UTF-8 | 2,390 | 2.765625 | 3 | [] | no_license | import cv2
import numpy as np
import scipy
from scipy import ndimage, misc
import matplotlib.pyplot as plt
import math
def main():
# Read an grayscale image
img = cv2.imread("frame180.jpg",0)
## cv2.imshow("Gradient magnitude image",mag)
## cv2.waitKey(0)
## cv2.destroyAllWindows()
## # Display the image
## cv2.imshow("image",img)
## cv2.waitKey(0)
## cv2.destroyAllWindows()
#horizontal edges
kernel = np.zeros((3,3),int)
kernel[0][0] = 1
kernel[1][0] = 2
kernel[2][0] = 1
kernel[0][2] = -1
kernel[1][2] = -2
kernel[2][2] = -1
#print(kernel)
dst = cv2.filter2D(img,-1,kernel)
#vertical edges
kernel2 = np.zeros((3,3),int)
kernel2[0][0] = 1
kernel2[0][1] = 2
kernel2[0][2] = 1
kernel2[2][0] = -1
kernel2[2][1] = -2
kernel2[2][2] = -1
dst2 = cv2.filter2D(img,-1,kernel2)
# depth
kernel3 = np.zeros((3,3,3),int)
f0 = kernel3[0]
f0[0][0] = f0[0][2] = f0[2][0] = f0[2][2] = 1
f0[0][1] = f0[1][0] = f0[1][2] = f0[2][1] = 2
f0[1][1] = 4
f2 = kernel3[2]
f2[0][0] = f2[0][2] = f2[2][0] = f2[2][2] = -1
f2[0][1] = f2[1][0] = f2[1][2] = f2[2][1] = -2
f2[1][1] = -4
print("k3: ",kernel3)
# c processing
## dx = ndimage.sobel(img, 0) # x derivative
## dy = ndimage.sobel(img, 1) # y derivative
## #dz = ndimage.sobel(your3Dmatrix, 2) # z derivative
## result = abs(dx) + abs(dy)
##
## result = np.array(result,dtype=np.uint8)
## cv2.imshow("gradient",result)
## cv2.waitKey(0)
## cv2.destroyAllWindows()
##
##
## r2 = ndimage.sobel(img)
## r2 = np.array(r2,dtype=np.uint8)
## cv2.imshow("gradient",r2)
## cv2.waitKey(0)
## cv2.destroyAllWindows()
##
## compare = result - r2
## print(compare[0:10,0:10])
#Display those two images
cv2.imshow("vertical edges",dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imshow("horizontal edges",dst2)
cv2.waitKey(0)
cv2.destroyAllWindows()
#Compute magnitude
for i in range(img.shape[0]):
for j in range(img.shape[1]):
img[i][j] = abs(dst[i][j]) + abs(dst2[i][j])
#Display the gradient magnitude image
cv2.imshow("Gradient magnitude image",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
main()
| true |
7b4e51aff59383b23b7eff91c0836d2229ec2c63 | Python | mmmaaaggg/RefUtils | /src/fh_tools/language_test/Test/forfor.py | UTF-8 | 489 | 3.078125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on 2017/7/29
@author: MG
"""
import pandas as pd
file_path = r'd:\Downloads\Account600030.xls'
data_df = pd.read_excel(file_path)
import xlrd
# 获取一个Book对象
book = xlrd.open_workbook(file_path)
# 获取一个sheet对象的列表
sheets = book.sheets()
# 遍历每一个sheet,输出这个sheet的名字(如果是新建的一个xls表,可能是sheet1、sheet2、sheet3)
for sheet in sheets:
print(sheet.name)
| true |
5028753dadc2153ddf79d7b1018629d4b7e615de | Python | verbal-noun/tf-digit-recogniser- | /model.py | UTF-8 | 6,985 | 2.5625 | 3 | [] | no_license | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
from numpy import array
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import ShuffleSplit
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, StandardScaler
LABELS = 10 # Number of labls(1-10)
IMAGE_WIDTH = 28 # Width/height if the image
COLOR_CHANNELS = 1 # Number of color channels
VALID_SIZE = 1000 # Size of the Validation data
EPOCHS = 20000 # Number of epochs to run
BATCH_SIZE = 32 # SGD Batch size
FILTER_SIZE = 5 # Filter size for kernel
DEPTH = 32 # Number of filters/templates
FC_NEURONS = 1024 # Number of neurons in the fully
# connected later
LR = 0.001 # Learning rate Alpha for SGD
FLAGS = None
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
def deepnn(x):
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
x = tf.reshape(x, [-1, IMAGE_WIDTH, IMAGE_WIDTH, COLOR_CHANNELS])
# First convolution layer - maps one grayscale image to 8 feature maps.
w1 = weight_variable([FILTER_SIZE, FILTER_SIZE, COLOR_CHANNELS, DEPTH])
b1 = bias_variable([DEPTH])
layer_conv1 = tf.nn.relu(conv_2d(x, w1) + b1)
# Pooling layer - downsamples by 2X.
layer_pool1 = max_pool_2x2(layer_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
w2 = weight_variable([FILTER_SIZE, FILTER_SIZE, DEPTH, DEPTH * 2])
b2 = bias_variable([DEPTH * 2])
layer_conv2 = tf.nn.relu(conv_2d(layer_pool1, w2) + b2)
# Second pooling layer.
layer_pool2 = max_pool_2x2(layer_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 100 features.
wfc1 = weight_variable([IMAGE_WIDTH // 4 * IMAGE_WIDTH // 4 * 2 * DEPTH, FC_NEURONS])
bfc1 = bias_variable([FC_NEURONS])
flatten_pool2 = tf.reshape(layer_pool2, [-1, IMAGE_WIDTH // 4 * IMAGE_WIDTH // 4 * 2 * DEPTH])
layer_fc1 = tf.nn.relu(tf.matmul(flatten_pool2, wfc1) + bfc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
# keep_prob = tf.placeholder(tf.float32)
# layer_fc1_drop = tf.nn.dropout(layer_fc1,keep_prob)
# Map the 100 features to 10 classes, one for each digit
wfc2 = weight_variable([FC_NEURONS, LABELS])
bfc2 = bias_variable([LABELS])
y_conv = tf.matmul(layer_fc1, wfc2) + bfc2
return y_conv
# return y_conv,keep_prob
def conv_2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 down samples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main(_):
mnist = pd.read_csv('train.csv')
labels = np.array(mnist.pop('label'))
labels = LabelEncoder().fit_transform(labels)[:, None]
labels = OneHotEncoder().fit_transform(labels).todense()
mnist = StandardScaler().fit_transform(np.float32(mnist.values))
mnist = mnist.reshape(-1, IMAGE_WIDTH, IMAGE_WIDTH, COLOR_CHANNELS)
train_data, valid_data = mnist[:-VALID_SIZE], mnist[-VALID_SIZE:]
train_labels, valid_labels = labels[:-VALID_SIZE], labels[-VALID_SIZE:]
x = tf.placeholder(tf.float32, [None, IMAGE_WIDTH, IMAGE_WIDTH, COLOR_CHANNELS])
y_ = tf.placeholder(tf.float32, [None, 10])
y_conv = deepnn(x)
tf_pred = tf.nn.softmax(deepnn(x))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
train_step = tf.train.GradientDescentOptimizer(LR).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = 100 * tf.reduce_mean(correct_prediction)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
ss = ShuffleSplit(n_splits=EPOCHS, train_size=BATCH_SIZE)
ss.get_n_splits(train_data, train_labels)
history = [(0, np.nan, 10)] # Initial Error Measures
for step, (idx, _) in enumerate(ss.split(train_data, train_labels), start=1):
# fd = {x:train_data[idx], y_:train_labels[idx], keep_prob: 0.5}
fd = {x: train_data[idx], y_: train_labels[idx]}
sess.run(train_step, feed_dict=fd)
if step % 500 == 0:
# fd = {x:valid_data, y_:valid_labels, keep_prob: 0.5}
fd = {x: valid_data, y_: valid_labels}
valid_loss, valid_accuracy = sess.run([cross_entropy, accuracy], feed_dict=fd)
history.append((step, valid_loss, valid_accuracy))
print('Step %i \t Valid. Acc. = %f \n' % (step, valid_accuracy))
test = pd.read_csv('test.csv')
test_data = StandardScaler().fit_transform(np.float32(test.values)) # Convert the dataframe to a numpy array
test_data = test_data.reshape(-1, IMAGE_WIDTH, IMAGE_WIDTH,
COLOR_CHANNELS) # Reshape the data into 42000 2d images
# fd = {x:test_data, keep_prob: 1.0}
ss = ShuffleSplit(n_splits=28000, train_size=BATCH)
ss.get_n_splits(test_data)
test_labels = []
for (idx, _) in enumerate(ss.split(test_data), start=0):
temp_test_data = array(test_data[idx]).reshape(1, 28, 28, 1)
fd = {x: temp_test_data}
# image = array(img).reshape(1, 28,28,1)
test_pred = sess.run(tf_pred, feed_dict=fd)
temp = np.argmax(test_pred, axis=1)
test_labels.append(temp)
for i in range(len(test_labels)):
test_labels[i] = int(test_labels[i])
submission = pd.DataFrame(data={'ImageId': (np.arange(test_labels.shape[0]) + 1), 'Label': test_labels})
submission.index += 1
submission.to_csv('submission.csv', index=False)
#submission.tail()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | true |
66db3ea02ceb7437f2941a166fd1be5ac8f6b3e3 | Python | alainlou/leetcode | /p1685.py | UTF-8 | 521 | 3.015625 | 3 | [] | no_license | from typing import List
class Solution:
def getSumAbsoluteDifferences(self, nums: List[int]) -> List[int]:
n = len(nums)
left = [0]*n
for i in range(1,n):
left[i] += (nums[i]-nums[i-1])*(i)
left[i] += left[i-1]
right = [0]*n
for i in range(n-2, -1, -1):
right[i] += (nums[i+1]-nums[i])*(n-1-i)
right[i] += right[i+1]
ans = [0]*n
for i in range(n):
ans[i] = left[i] + right[i]
return ans
| true |
0b8c597ec73877fe6c1aef739ecc672cbb3e0aaa | Python | thanhpham3598/PhamHongThanh-Fundamental-C4E-14 | /session4/hw4/se2_a.py | UTF-8 | 136 | 3.890625 | 4 | [] | no_license | nums = [1, 6, 8, 1, 2, 1, 5, 6]
x = int(input('Enter a number:'))
print('{0} appears {1} times in my list'.format(x, nums.count(x)))
| true |
0e878016fadab1f137f3dd61c37197ecafa4511e | Python | viniciuskurt/LetsCode-PracticalProjects | /2-AdvancedStructures/Listas_com_While.py | UTF-8 | 438 | 4.25 | 4 | [] | no_license | # Uma forma inteligente de trabalhar é combinar Listas com Whiles
numeros = [1, 2, 3, 4, 5] #Criando e atribuindo valores a lista
indice = 0 #definindo contador no índice 0
while indice < 5: #Definindo repetição do laço enquanto menor que 5
print(numeros[indice]) #Exibe posição de determinado índice da lista
indice = indice + 1 #Incrementa mais um a cada volta no laço | true |
cdcd072002e891a68f107f5dcb218c2a50940df6 | Python | jesieOldenburg/python_book_1 | /exercises/jakes_flowers/arrangements/mothers_day_arrangement.py | UTF-8 | 1,429 | 3.4375 | 3 | [] | no_license | from . import Arrangement
from interfaces.not_refrigerated import INotRefrigerated
class Mothers_day_arrangement(Arrangement, INotRefrigerated):
"""
A class used to represent a arrangement of flowers
...
Attributes
----------
name : str
the name of the arrangement
stem_length : int
the length of each flowers stem in the arrangement
@inherited
flower_list : list
a list obj that contains the flowers within the arrangement
Methods
-------
display_flowers(self)
prints the flowers in the arrangement
@inherited
add_flower(self, flower)
takes a flower and appends it to the flower_list attribute
@inherited
print_flowers(self)
iterates through the flowers list and prints each flower
"""
def __init__(self):
super().__init__()
INotRefrigerated.__init__(self)
self.name = "Mother's Day Bouquet"
self.stem_length = 7
def add_flower(self, flower):
try:
if flower.refrigerated == False:
self.flower_list.append(flower)
print(f'You added a {flower.name} to the {self.name}')
except AttributeError:
raise AttributeError(f'You CANNOT add a refrigerated flower to the {self.name}') | true |
2c2e3550ee62acc9e775343ba99393673dfd1457 | Python | pkulwj1994/Contrastive_Divergence | /data.py | UTF-8 | 511 | 2.78125 | 3 | [] | no_license | import math
import torch
def sample_data(n_samples):
'''taken from https://github.com/kamenbliznashki/normalizing_flows/blob/master/bnaf.py'''
z = torch.randn(n_samples, 2)
scale = 4
sq2 = 1 / math.sqrt(2)
centers = [(1, 0), (-1, 0), (0, 1), (0, -1), (sq2, sq2),
(-sq2, sq2), (sq2, -sq2), (-sq2, -sq2)]
centers = torch.tensor([(scale * x, scale * y) for x, y in centers])
return sq2 * \
(0.5 * z + centers[torch.randint(len(centers), size=(n_samples,))])
| true |
5abc00493eaacd58685cd870c7a13e1430f14af0 | Python | samford100/nba-sentiment | /analysis.py | UTF-8 | 7,447 | 2.765625 | 3 | [] | no_license | import numpy as np
# import sklearn
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import tensorflow as tf
'''
select the autoregression j = 5 (go back 5 games)
win(0) = regress([
sen_com(-5), win(-5),
sen_com(-4), win(-4),
sen_com(-3), win(-3),
sen_com(-2), win(-2),
sen_com(-1), win(-1),
sen_com(0), team_name
])
'''
"""
reads the csv to a map of team_name to date ordered data
"""
def generate_timeseries_df(df):
groups = df.groupby(['team'])
x = []
for team, data in groups:
vecs = gen_vecs(team, data, w=3)
x = x + vecs
# print(x)
# df = pd.DataFrame(x, columns=["sen_com-4", "par_com-4", "won-4",
# "sen_com-3", "par_com-3", "won-3",
# "sen_com-2", "par_com-2", "won-2",
# "sen_com-1", "par_com-1", "won-1",
# "sen_com-0", "par_com-0", "won",
# "team", "is_tanking"])
# df = pd.DataFrame(x, columns=["sen_com-2", "won-2",
# "sen_com-1", "won-1",
# "sen_com-0", "won",
# "team", "is_tanking"])
df = pd.DataFrame(x, columns=["sen_com-2", "par_com-2", "won-2",
"sen_com-1", "par_com-1", "won-1",
"sen_com-0", "par_com-0", "won",
"team", "is_tanking"])
# print(df.head)
return df
'''
generate vectors for each team with parameter j
'''
def gen_vecs(team, df, w):
tanking_teams = ['Atlanta', 'Memphis', 'Sacramento', 'Dallas', 'Phoenix', 'Charlotte',
'LA Clippers', 'New York', 'Brooklyn', 'Chicago', 'LA Lakers', 'Orlando']
# print(df)
# x = [df[i:i+w] for i in range(len(df) - w)]
x = []
for i in range(len(df) - w):
slice = df[i:i+w][['sen_com', 'par_com', 'won']]
# print(slice)
# print(slice.values.shape)
vec = slice.values.reshape(1, w*3)
vec = vec[0]
vec = np.append(vec, [team])
vec = np.append(vec, [team in tanking_teams])
# vec = np.append(vec, team)
# print(vec)
x.append(vec)
# break
return x
def read_csv(file):
df = pd.read_csv(file)
# df = df.drop(['date'], axis=1)
df = generate_timeseries_df(df)
y = df[['won']]
# X = df.drop(['won', 'date'], axis=1)
X = df.drop(['won'], axis=1)
Z = onehot(X)
# X_train, X_test, y_train, y_test = train_test_split(
# Z, y, test_size=0.2)
splitter = round(len(Z) * .7)
print(splitter)
X_train, X_test = Z[:splitter], Z[splitter+1:]
y_train, y_test = y[:splitter], y[splitter+1:]
print(len(X_train))
print(len(X_test))
print(len(Z))
# train_tf(X_train, y_train, X_test, y_test)
y_hat = train(X_train, y_train.values.ravel(), X_test)
right = 0
for i in range(len(y_hat)):
if y_hat[i] == y_test.values[i]:
right += 1
print('' + str(right / len(y_hat)) + "%")
def onehot(X):
label_encoder = LabelEncoder()
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = label_encoder.fit_transform(X['team'])
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
inverted = label_encoder.inverse_transform(
[np.argmax(onehot_encoded[0, :])])
s = pd.DataFrame(onehot_encoded)
Z = pd.concat([X, s], axis=1)
Z = Z.drop(['team'], axis=1)
return Z
def multilayer_perceptron(x, weights, biases):
# hidden layer 1 with relu activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# hidden layer 2 with relu activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
def train_tf(X_train, y_train, X_test, y_test):
# initialize variables
num_inputs = 11
print(num_inputs)
num_hidden_1, num_hidden_2 = 10, 10
num_classes = 2
learning_rate = .001
training_epochs = 100
weights = {
'h1': tf.Variable(tf.random_normal([num_inputs, num_hidden_1])),
'h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2])),
'out': tf.Variable(tf.random_normal([num_hidden_2, num_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([num_hidden_1])),
'b2': tf.Variable(tf.random_normal([num_hidden_2])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
# construct model with placeholders
x_ = tf.placeholder("float", [None, num_inputs])
y_ = tf.placeholder("float", [None, num_classes])
model = multilayer_perceptron(x_, weights, biases)
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=y_))
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(cost)
init = tf.global_variables_initializer()
# train
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0.0
total_batch = int(len(X_train)/100)
X_batches = np.array_split(X_train, total_batch)
y_batches = np.array_split(y_train, total_batch)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = X_batches[i], y_batches[i]
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x_: batch_x,
y_: batch_y})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % 1 == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(model, 1), tf.argmax(y_train, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({x: X_test, y: y_test}))
global result
result = tf.argmax(model, 1).eval({x: X_test, y: y_test})
def train(X_train, y_train, X_test):
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
# clf = SVC()
# clf = RandomForestClassifier(n_estimators=100)
clf = LogisticRegression()
# hidden_layer_sizes = (40, 40, 30, 40, 30, 50) # best 68% relu
# hidden_layer_sizes = (40, 40, 30, 50, 30, 40)
# hidden_layer_sizes = (60, 12, 12)
hidden_layer_sizes = (20, 20)
# clf = MLPClassifier(solver='lbfgs', alpha=1e-5, activation='relu',
# hidden_layer_sizes=hidden_layer_sizes, random_state=1)
clf.fit(X_train, y_train)
y_hat = clf.predict(X_test)
print(hidden_layer_sizes)
return y_hat
read_csv('./nba_sentiment.csv')
| true |
90fc2780c54f91d5f5141c68fd0c4f718e080192 | Python | chamarthyl/Learn-Python | /GlobalVariable.py | UTF-8 | 116 | 3.796875 | 4 | [] | no_license | x = "I want to learn "
y = "Python!"
print(x + y )
a = str(3)
b = int(3)
c = float(3)
print(a)
print(b)
print(c)
| true |
428c917deeda6ddf27c08dd76c335b626790fb94 | Python | 6igsm0ke/Introduction-to-Programming-Using-Python-Liang-1st-edtion | /CH05/EX5.24.py | UTF-8 | 1,385 | 4.09375 | 4 | [] | no_license | # 5.24 (Financial application: loan amortization schedule) The monthly payment for a
# given loan pays the principal and the interest. The monthly interest is computed by
# multiplying the monthly interest rate and the balance (the remaining principal).
# The principal paid for the month is therefore the monthly payment minus the
# monthly interest. Write a program that lets the user enter the loan amount, number
# of years, and interest rate, and then displays the amortization schedule for the loan.
loanAmount = eval(input("Loan amount: "))
numOfYears = eval(input("Number of years: "))
annualIR = eval(input("Annual Interest Rate: "))
print()
monthlyIR = annualIR / 1200
monthlyPayment = loanAmount * monthlyIR / (1 - (pow(1 / (1 + monthlyIR), numOfYears * 12)))
balance = loanAmount
print("Monthly Payment:", int(monthlyPayment * 100) / 100.0)
print("Total Payment:", int(monthlyPayment * 12 * numOfYears * 100) / 100.0)
print()
print(format("Payment#", "<15s"), format("Interest", "<15s"), format("Principal", "<15s"), format("Balance", "<15s"))
for i in range(1, numOfYears * 12 + 1):
interest = int(monthlyIR * balance * 100) / 100.0
principal = int((monthlyPayment - interest) * 100) / 100.0
balance = int((balance - principal) * 100) / 100.0
print(format(i, "<15d"), format(interest, "<15.2f"), format(principal, "<15.2f"), format(balance, "<15.2f")) | true |
694ea72f384b9f3428408d9386de7e02cdad0a20 | Python | Divisekara/Python-Codes-First-sem | /PA2/PA2 2013/PA2-11/Asitha/pa2-11-2013.py | UTF-8 | 780 | 3.171875 | 3 | [] | no_license | n=0
words=[]
def getText():
try:
FileOpen=open("FileIn.txt","r")
L=map(list,FileOpen.read().split())
FileOpen.close()
except IOError:
print "File Not Found"
pass
else:
global n
global words
n=map(int,L[0])[0]
print n
words=L[1:]
def convert():
line=""
for i in range(n):
for j in words:
try:
line += j[i]
except IndexError:
continue
return line
def show(s):
print s
def saveFile(s):
try:
FileCreate=open("result.txt","w")
FileCreate.write(s)
FileCreate.close()
except IOError:
print "File error."
pass
getText()
show(convert())
saveFile(convert())
| true |
9ef387ac61d017d46c46dbd2bcb267bd10d72e10 | Python | BEEmod/BEE2.4 | /src/config/last_sel.py | UTF-8 | 2,400 | 2.578125 | 3 | [] | no_license | from typing import Dict, Union
import attrs
from srctools import Property
from srctools.dmx import Element
import config
@config.APP.register
@attrs.frozen(slots=False)
class LastSelected(config.Data, conf_name='LastSelected', uses_id=True):
"""Used for several general items, specifies the last selected one for restoration."""
id: Union[str, None] = None
@classmethod
def parse_legacy(cls, conf: Property) -> Dict[str, 'LastSelected']:
"""Parse legacy config data."""
result = {}
last_sel = conf.find_key('LastSelected', or_blank=True)
# Old -> new save IDs
for old, new in [
('Game', 'game'),
('Style', 'styles'),
('Skybox', 'skyboxes'),
('Voice', 'voicelines'),
('Elevator', 'elevators'),
('Music_Base', 'music_base'),
('Music_Tbeam', 'music_tbeam'),
('Music_Bounce', 'music_bounce'),
('Music_Speed', 'music_speed'),
]:
try:
value = last_sel[old]
except LookupError:
continue
if value.casefold() == '<none>':
result[new] = cls(None)
else:
result[new] = cls(value)
return result
@classmethod
def parse_kv1(cls, data: Property, version: int) -> 'LastSelected':
"""Parse Keyvalues data."""
assert version == 1, version
if data.has_children():
raise ValueError(f'LastSelected cannot be a block: {data!r}')
if data.value.casefold() == '<none>':
return cls(None)
return cls(data.value)
def export_kv1(self) -> Property:
"""Export to a property block."""
return Property('', '<NONE>' if self.id is None else self.id)
@classmethod
def parse_dmx(cls, data: Element, version: int) -> 'LastSelected':
"""Parse DMX elements."""
assert version == 1, version
if 'selected_none' in data and data['selected_none'].val_bool:
return cls(None)
else:
return cls(data['selected'].val_str)
def export_dmx(self) -> Element:
"""Export to a DMX element."""
elem = Element('LastSelected', 'DMElement')
if self.id is None:
elem['selected_none'] = True
else:
elem['selected'] = self.id
return elem
| true |
b62ca8914863320d2742d639eda357fe00ec329e | Python | priyanshik18/codechef | /march_cookoff/Box_of_chocolates.py | UTF-8 | 460 | 2.609375 | 3 | [] | no_license | #TLE error
try:
t=int(input())
for i in range(t):
n=int(input())
w=list(map(int,input().split()))
h=int(n/2)
m=max(w)
count=0
if m not in w[:h]:
count+=1
for i in range(h):
w.insert(0,0)
w[0]=w[-1]
del[w[-1]]
#print(w)
if m not in w[:h]:
count+=1
print(count)
except:
pass
| true |
bd9d989e4673479c7c52b507a55335789ae12469 | Python | zhujiang73/pytorch_mingw | /torch/_lowrank.py | UTF-8 | 10,419 | 3.234375 | 3 | [
"BSD-2-Clause"
] | permissive | """Implement various linear algebra algorithms for low rank matrices.
"""
__all__ = ['svd_lowrank', 'pca_lowrank']
from typing import Tuple, Optional
import torch
from torch import Tensor
from . import _linalg_utils as _utils
from ._overrides import has_torch_function, handle_torch_function
def get_approximate_basis(A, # type: Tensor
q, # type: int
niter=2, # type: Optional[int]
M=None # type: Optional[Tensor]
):
# type: (...) -> Tensor
"""Return tensor :math:`Q` with :math:`q` orthonormal columns such
that :math:`Q Q^H A` approximates :math:`A`. If :math:`M` is
specified, then :math:`Q` is such that :math:`Q Q^H (A - M)`
approximates :math:`A - M`.
.. note:: The implementation is based on the Algorithm 4.4 from
Halko et al, 2009.
.. note:: For an adequate approximation of a k-rank matrix
:math:`A`, where k is not known in advance but could be
estimated, the number of :math:`Q` columns, q, can be
choosen according to the following criteria: in general,
:math:`k <= q <= min(2*k, m, n)`. For large low-rank
matrices, take :math:`q = k + 5..10`. If k is
relatively small compared to :math:`min(m, n)`, choosing
:math:`q = k + 0..2` may be sufficient.
.. note:: To obtain repeatable results, reset the seed for the
pseudorandom number generator
Arguments::
A (Tensor): the input tensor of size :math:`(*, m, n)`
q (int): the dimension of subspace spanned by :math:`Q`
columns.
niter (int, optional): the number of subspace iterations to
conduct; ``niter`` must be a
nonnegative integer. In most cases, the
default value 2 is more than enough.
M (Tensor, optional): the input tensor's mean of size
:math:`(*, 1, n)`.
References::
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
structure with randomness: probabilistic algorithms for
constructing approximate matrix decompositions,
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
`arXiv <http://arxiv.org/abs/0909.4061>`_).
"""
niter = 2 if niter is None else niter
m, n = A.shape[-2:]
dtype = _utils.get_floating_dtype(A)
matmul = _utils.matmul
R = torch.randn(n, q, dtype=dtype, device=A.device)
A_H = _utils.transjugate(A)
if M is None:
(Q, _) = matmul(A, R).qr()
for i in range(niter):
(Q, _) = matmul(A_H, Q).qr()
(Q, _) = matmul(A, Q).qr()
else:
M_H = _utils.transjugate(M)
(Q, _) = (matmul(A, R) - matmul(M, R)).qr()
for i in range(niter):
(Q, _) = (matmul(A_H, Q) - matmul(M_H, Q)).qr()
(Q, _) = (matmul(A, Q) - matmul(M, Q)).qr()
return Q
def svd_lowrank(A, q=6, niter=2, M=None):
# type: (Tensor, Optional[int], Optional[int], Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor]
r"""Return the singular value decomposition ``(U, S, V)`` of a matrix,
batches of matrices, or a sparse matrix :math:`A` such that
:math:`A \approx U diag(S) V^T`. In case :math:`M` is given, then
SVD is computed for the matrix :math:`A - M`.
.. note:: The implementation is based on the Algorithm 5.1 from
Halko et al, 2009.
.. note:: To obtain repeatable results, reset the seed for the
pseudorandom number generator
.. note:: The input is assumed to be a low-rank matrix.
.. note:: In general, use the full-rank SVD implementation
``torch.svd`` for dense matrices due to its 10-fold
higher performance characteristics. The low-rank SVD
will be useful for huge sparse matrices that
``torch.svd`` cannot handle.
Arguments::
A (Tensor): the input tensor of size :math:`(*, m, n)`
q (int, optional): a slightly overestimated rank of A.
niter (int, optional): the number of subspace iterations to
conduct; niter must be a nonnegative
integer, and defaults to 2
M (Tensor, optional): the input tensor's mean of size
:math:`(*, 1, n)`.
References::
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
structure with randomness: probabilistic algorithms for
constructing approximate matrix decompositions,
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
`arXiv <http://arxiv.org/abs/0909.4061>`_).
"""
if not torch.jit.is_scripting():
tensor_ops = (A, M)
if (not set(map(type, tensor_ops)).issubset((torch.Tensor, type(None))) and has_torch_function(tensor_ops)):
return handle_torch_function(svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M)
return _svd_lowrank(A, q=q, niter=niter, M=M)
def _svd_lowrank(A, q=6, niter=2, M=None):
# type: (Tensor, Optional[int], Optional[int], Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor]
q = 6 if q is None else q
m, n = A.shape[-2:]
matmul = _utils.matmul
if M is None:
M_t = None
else:
M_t = _utils.transpose(M)
A_t = _utils.transpose(A)
# Algorithm 5.1 in Halko et al 2009, slightly modified to reduce
# the number conjugate and transpose operations
if m < n:
# computing the SVD approximation of a transpose in order to
# keep B shape minimal
Q = get_approximate_basis(A_t, q, niter=niter, M=M_t)
Q_c = _utils.conjugate(Q)
if M is None:
B_t = matmul(A, Q_c)
else:
B_t = matmul(A, Q_c) - matmul(M, Q_c)
U, S, V = torch.svd(B_t)
V = Q.matmul(V)
else:
Q = get_approximate_basis(A, q, niter=niter, M=M)
Q_c = _utils.conjugate(Q)
if M is None:
B = matmul(A_t, Q_c)
else:
B = matmul(A_t, Q_c) - matmul(M_t, Q_c)
U, S, V = torch.svd(_utils.transpose(B))
U = Q.matmul(U)
return U, S, V
def pca_lowrank(A, q=None, center=True, niter=2):
# type: (Tensor, Optional[int], bool, int) -> Tuple[Tensor, Tensor, Tensor]
r"""Performs linear Principal Component Analysis (PCA) on a low-rank
matrix, batches of such matrices, or sparse matrix.
This function returns a namedtuple ``(U, S, V)`` which is the
nearly optimal approximation of a singular value decomposition of
a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`.
.. note:: The relation of ``(U, S, V)`` to PCA is as follows:
- :math:`A` is a data matrix with ``m`` samples and
``n`` features
- the :math:`V` columns represent the principal directions
- :math:`S ** 2 / (m - 1)` contains the eigenvalues of
:math:`A^T A / (m - 1)` which is the covariance of
``A`` when ``center=True`` is provided.
- ``matmul(A, V[:, :k])`` projects data to the first k
principal components
.. note:: Different from the standard SVD, the size of returned
matrices depend on the specified rank and q
values as follows:
- :math:`U` is m x q matrix
- :math:`S` is q-vector
- :math:`V` is n x q matrix
.. note:: To obtain repeatable results, reset the seed for the
pseudorandom number generator
Arguments:
A (Tensor): the input tensor of size :math:`(*, m, n)`
q (int, optional): a slightly overestimated rank of
:math:`A`. By default, ``q = min(6, m,
n)``.
center (bool, optional): if True, center the input tensor,
otherwise, assume that the input is
centered.
niter (int, optional): the number of subspace iterations to
conduct; niter must be a nonnegative
integer, and defaults to 2.
References::
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
structure with randomness: probabilistic algorithms for
constructing approximate matrix decompositions,
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
`arXiv <http://arxiv.org/abs/0909.4061>`_).
"""
if not torch.jit.is_scripting():
if type(A) is not torch.Tensor and has_torch_function((A,)):
return handle_torch_function(pca_lowrank, (A,), A, q=q, center=center, niter=niter)
(m, n) = A.shape[-2:]
if q is None:
q = min(6, m, n)
elif not (q >= 0 and q <= min(m, n)):
raise ValueError('q(={}) must be non-negative integer'
' and not greater than min(m, n)={}'
.format(q, min(m, n)))
if not (niter >= 0):
raise ValueError('niter(={}) must be non-negative integer'
.format(niter))
dtype = _utils.get_floating_dtype(A)
if not center:
return _svd_lowrank(A, q, niter=niter, M=None)
if _utils.is_sparse(A):
if len(A.shape) != 2:
raise ValueError('pca_lowrank input is expected to be 2-dimensional tensor')
c = torch.sparse.sum(A, dim=(-2,)) / m
# reshape c
column_indices = c.indices()[0]
indices = torch.zeros(2, len(column_indices),
dtype=column_indices.dtype,
device=column_indices.device)
indices[0] = column_indices
C_t = torch.sparse_coo_tensor(
indices, c.values(), (n, 1), dtype=dtype, device=A.device)
ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device)
M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t))
return _svd_lowrank(A, q, niter=niter, M=M)
else:
c = A.sum(dim=(-2,)) / m
C = c.reshape(A.shape[:-2] + (1, n))
ones_m1 = torch.ones(A.shape[:-1] + (1, ), dtype=dtype, device=A.device)
M = ones_m1.matmul(C)
return _svd_lowrank(A - M, q, niter=niter, M=None)
| true |
7e46e0107bc7c83ae0ec4a231f4aaca3da5e8c7c | Python | itepifanio/maps-graphs | /map.py | UTF-8 | 1,102 | 2.625 | 3 | [
"MIT"
] | permissive | import folium; f_map = folium.Map; f_circle = folium.CircleMarker
import osmapi; osm_nodesget = osmapi.OsmApi().NodesGet; osm_nodeways = osmapi.OsmApi().NodeWays
import pickle # deserialize list from file
import json
import tqdm; tqdm = tqdm.tqdm # progress bar
tileset = r'https://api.mapbox.com/styles/v1/mapbox/streets-v9/tiles/256/{z}/{x}/{y}?access_token=pk.eyJ1IjoiamRpZWdvZ29uemFsZXMiLCJhIjoiY2l3aHV5MmhvMDAwNzJ0cGVrdmt5MDBxNSJ9.-4RsCL1bSGLP2A_x9XFNbQ'
map = f_map(location=[-12.0786,-77.0551], tiles=tileset, attr="Grafiteros", zoom_start=15) # coordinates from openstreetmaps.org
with open("in/nodesIds.p", "rb") as bfile:
nodesIds = pickle.load(bfile)
_half = len(nodesIds)/2
nodesIds1 = nodesIds[:_half]
nodesIds2 = nodesIds[_half:]
_raw_nodes = osm_nodesget(nodesIds1).values() + osm_nodesget(nodesIds2).values()
intersection_nodes = []
for node in tqdm(_raw_nodes):
if "lat" in node.keys():
cd = [node["lat"], node["lon"]]
n = {'id': node["id"], 'coordinates': cd}
f_circle(cd, radius=10).add_to(map)
intersection_nodes.append(n)
map.save("map.html")
with open("in/nodes.p", "wb") as bfile:
pickle.dump(intersection_nodes, bfile)
| true |
3d48dc1b7d646912a395ad819048fed09f4c046c | Python | aloctavodia/density_estimation | /simulation/simulation.py | UTF-8 | 3,790 | 2.703125 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sim_utils import print_status, simulate
pdf_kwargs = {
"gaussian_1": {"params": {"mean" : [0], "sd": [1]}, "func_key": "gaussian"},
"gaussian_2": {"params": {"mean" : [0], "sd": [2]}, "func_key": "gaussian"},
"gmixture_1": {"params": {"mean" : [-12, 12], "sd": [0.5, 0.5]}, "func_key": "gaussian"},
"gmixture_2": {"params": {"mean" : [0, 5], "sd": [0.1, 1]}, "func_key": "gaussian"},
"gmixture_3": {"params": {"mean" : [0, 0], "sd": [1, 0.1], "wt": [0.667, 0.333]}, "func_key": "gaussian"},
"gmixture_4": {"params": {"mean" : [0, 1.5], "sd": [1, 0.33], "wt": [0.75, 0.25]}, "func_key": "gaussian"},
"gmixture_5": {"params": {"mean" : [3.5, 9], "sd": [0.5, 1.5], "wt": [0.6, 0.4]}, "func_key": "gaussian"},
"gamma_1": {"params": {"shape" : 1}, "func_key": "gamma", "bc": True, "lims": [0, 5]},
"gamma_2": {"params": {"shape" : 2}, "func_key": "gamma", "bc": True, "lims": [0, 9.5]},
"beta_1": {"params": {"a" : 2.5, "b" : 1.5}, "func_key": "beta", "bc": True, "lims": [0, 1]},
"logn_1": {"params": {"scale" : 1}, "func_key": "logn", "bc": True, "lims": [0, 11]}
}
sizes = [200, 500, 1000, 5000, 10000]
sizes_sj = [200, 500, 1000]
niter = 500
niter_sj = 120
np.random.seed(1995)
print(f"Sizes: {sizes} and {sizes_sj} for Sheather-Jones")
print(f"Number of iterations: {niter} and {niter_sj} for Sheather-Jones")
print("-------------------------------------------")
# Fixed gaussian ------------------------------------------------------------
estimator_name = "fixed_gaussian"
mixture=False
print(f"Simulation with estimator {estimator_name}")
# Silverman
bw_name = "silverman"
print_status(bw_name)
df1 = simulate(estimator_name, bw_name, sizes, pdf_kwargs, mixture, niter)
bw_name = "scott"
print_status(bw_name)
df2 = simulate(estimator_name, bw_name, sizes, pdf_kwargs, mixture, niter)
# SJ
bw_name = "sj"
print_status(bw_name)
df3 = simulate(estimator_name, bw_name, sizes_sj, pdf_kwargs, mixture, niter_sj)
# ISJ
bw_name = "isj"
print_status(bw_name)
df4 = simulate(estimator_name, bw_name, sizes, pdf_kwargs, mixture, niter)
# Experimental
bw_name = "experimental"
print_status(bw_name)
df5 = simulate(estimator_name, bw_name, sizes, pdf_kwargs, mixture, niter)
df_fixed_gaussian = pd.concat([df1, df2, df3, df4, df5])
df_fixed_gaussian.to_csv("output/fixed_gaussian.csv", index=False)
# Adaptive gaussian ----------------------------------------------------------
estimator_name = "adaptive_gaussian"
print("-------------------------------------------")
print(f"Simulation with estimator {estimator_name}")
# Silverman
bw_name = "silverman"
print_status(bw_name)
df1 = simulate(estimator_name, bw_name, sizes, pdf_kwargs, mixture, niter)
# Scott
bw_name = "scott"
print_status(bw_name)
df2 = simulate(estimator_name, bw_name, sizes, pdf_kwargs, mixture, niter)
# SJ
bw_name = "sj"
print_status(bw_name)
df3 = simulate(estimator_name, bw_name, sizes_sj, pdf_kwargs, mixture, niter_sj)
# ISJ
bw_name = "isj"
print_status(bw_name)
df4 = simulate(estimator_name, bw_name, sizes, pdf_kwargs, mixture, niter)
# Experimental
bw_name = "experimental"
print_status(bw_name)
df5 = simulate(estimator_name, bw_name, sizes, pdf_kwargs, mixture, niter)
df_adaptive_gaussian = pd.concat([df1, df2, df3, df4, df5])
df_adaptive_gaussian.to_csv("output/adaptive_gaussian.csv", index=False)
# Mixture estimator ----------------------------------------------------------
estimator_name = "mixture"
bw_name = "mixture"
mixture=True
print("-------------------------------------------")
print(f"Simulation with estimator {estimator_name}")
print_status(bw_name)
df_mixture = simulate(estimator_name, bw_name, sizes, pdf_kwargs, mixture, niter)
df_mixture.to_csv("output/mixture.csv", index=False)
| true |
2ba79fa3ca9526e142bd515ef7f32389d517fa4d | Python | tanzimzaki/Python_EH | /Port_Scanner.py | UTF-8 | 755 | 3.71875 | 4 | [] | no_license | #!/usr/bin/python
#Tanzim_Zaki_Saklayen
#Student_ID:10520140
import socket #retrieve socket library to perform the port scanning activity
ip_address = input("Enter IP address: ") #user is prompt to insert the IP address to inititate the scan
first = int(input("Enter first port: ")) #user is prompt to insert the first port number to initiate the scan
last = int(input("Enter last port: ")) #user is prompt to insert the last port number to initiate the scan
for port in range (first, last): #for loop is implemented to scan each port if the port is open
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if s.connect_ex((ip_address, port)) == 0:
print("Port",port,"is Open")
s.close()
| true |
5855720cdb195d034e310d57be14d3abc423b98b | Python | Biewer/Doping-Tests-for-Cyber-Physical-Systems-Tool | /examples/nissan/DynamometerTrace.py | UTF-8 | 942 | 2.796875 | 3 | [
"MIT"
] | permissive | import os, sys
sys.path.insert(0, os.path.abspath("../"))
from tool.doping_monitor import RecordedTrace
from tool.doping_test import Input, Output
class DynamometerTrace(RecordedTrace):
"""A recorded trace on dynamometer and PEMS"""
def __init__(self, file_name):
super(DynamometerTrace, self).__init__(file_name)
# Each line is either an input or an output.
# The first letter in each line is either "i" (for input) or "o" (for output)
# followed by the string representation of a float
def get_symbol_from_string(self, strng):
if strng[0] == 'i':
# this is an input
# parse the value
number = float(strng[1:])
return Input(number)
elif strng[0] == 'o':
# we have an output
# parse the value
number = float(strng[1:])
return Output(number)
else:
raise Exception('Unknown file format! (' + strng + ')')
if __name__ == '__main__':
m = DynamometerTrace('examples/nissan/NEDC.txt')
print(m)
| true |
71fe5c9ab2a489e24fef0e8f4ea73c3e1774c2f0 | Python | hpp3/wec2016 | /parse.py | UTF-8 | 4,043 | 2.53125 | 3 | [] | no_license | import json
from astar import astar
roads_file = open('roads.json', 'r')
all_data = json.load(roads_file)
graph = {}
dist = {}
coord_to_seg = {}
seg_to_coord = {}
black_list = set([26896, 1517, 7608, 1526, 7947, 7494, 7469, 9064, 9062, 7572, 7514, 259, 1621, 29101, 30205, 7289, 274, 348, 80267])
# Update coord-seg mapping and vice versa
for road in all_data['features']:
from_coord = tuple(road['geometry']['coordinates'][0])
to_coord = tuple(road['geometry']['coordinates'][-1])
if road['geometry']['type'] == 'LineString':
coord_to_seg[(from_coord, to_coord)] = road['properties']['SEGMENT_ID']
coord_to_seg[(to_coord, from_coord)] = road['properties']['SEGMENT_ID']
seg_to_coord[road['properties']['SEGMENT_ID']] = road['geometry']['coordinates']
for road in all_data['features']:
if road['properties']['SEGMENT_ID'] in black_list:
continue
from_coord = tuple(road['geometry']['coordinates'][0])
to_coord = tuple(road['geometry']['coordinates'][-1])
if road['geometry']['type'] == 'LineString':
if road['properties']['FLOW_DIR'] == 'TwoWay':
if from_coord not in graph:
graph[from_coord] = []
graph[from_coord].append(to_coord)
dist[(from_coord, to_coord)] = road['properties']['LENGTH_M']
dist[(to_coord, from_coord)] = road['properties']['LENGTH_M']
if to_coord not in graph:
graph[to_coord] = []
graph[to_coord].append(from_coord)
elif road['properties']['FLOW_DIR'] == 'ToFrom':
if to_coord not in graph:
graph[to_coord] = []
graph[to_coord].append(from_coord)
dist[(to_coord, from_coord)] = road['properties']['LENGTH_M']
elif road['properties']['FLOW_DIR'] == 'FromTo':
if from_coord not in graph:
graph[from_coord] = []
graph[from_coord].append(to_coord)
dist[(from_coord, to_coord)] = road['properties']['LENGTH_M']
def getCoords(input_list=[7294, 274, 389]):
coord = []
for i in input_list:
coord.extend(seg_to_coord[i])
coord = [[y,x] for x, y in coord]
return coord
def getClosures(input_list=[7294, 274, 389]):
coord = []
for i in input_list:
if i in black_list:
coord.append(seg_to_coord[i])
coord = [[[y,x] for x, y in kappa] for kappa in coord]
return coord
def getPaths(input_list=[7294, 274, 389]):
# get start and end coords
if len(input_list) == 1:
start = seg_to_coord[input_list[0]][0]
end = seg_to_coord[input_list[-1]][-1]
else:
if seg_to_coord[input_list[0]][0] in seg_to_coord[input_list[1]]:
start = seg_to_coord[input_list[0]][-1]
else:
start = seg_to_coord[input_list[0]][0]
if seg_to_coord[input_list[-1]][0] in seg_to_coord[input_list[-2]]:
end = seg_to_coord[input_list[-1]][-1]
else:
end = seg_to_coord[input_list[-1]][0]
start = tuple(start)
end = tuple(end)
# calculate best paths
bestPaths = []
segblacklist = set()
bestPaths.append(astar(start, end, graph, dist, set()))
for z in range(2):
seg = set()
for i in range(len(bestPaths[-1])-1):
seg.add((bestPaths[-1][i+1], bestPaths[-1][i]))
for i in range(2-z):
if len(seg) < 5:
break
top = max(seg, key=lambda s: dist[s])
segblacklist.add(top)
seg.remove(top)
new = astar(start, end, graph, dist, segblacklist)
if new not in bestPaths:
bestPaths.append(new)
pathCoords = []
for path in bestPaths:
seg = set()
p = list(reversed(path))
coord = []
for i in range(1, len(p)):
seg = coord_to_seg[(p[i-1], p[i])]
c = seg_to_coord[seg]
c = [(b,a) for a, b in c]
coord.extend(c)
pathCoords.append(coord)
return pathCoords
| true |
170c670a9d2410d89eed45771b0ce97dee9fa39a | Python | xiuqingyao/azkaban_assistant | /schedule/util/alarm.py | UTF-8 | 384 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
# coding=utf-8
import os
def send_html(mailto,title,content):
print '假装在发送邮件,请自行接入(schedule/util/alarm)'
print mailto
print title
def send_msg(msgto,content):
print '假装在发送短信,请自行接入(schedule/util/alarm)'
print msgto
print content
if __name__=='__main__':
send_msg(11111,'aa')
| true |
05785f2f56f4aa108e8012241f0679de7fe4aa46 | Python | andrewyoung1991/supriya | /supriya/tools/pendingugentools/SyncSaw.py | UTF-8 | 3,285 | 2.8125 | 3 | [
"MIT"
] | permissive | # -*- encoding: utf-8 -*-
from supriya.tools.ugentools.PureUGen import PureUGen
class SyncSaw(PureUGen):
r'''
::
>>> sync_saw = ugentools.SyncSaw.ar(
... saw_frequency=440,
... sync_frequency=440,
... )
>>> sync_saw
SyncSaw.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = None
__slots__ = ()
_ordered_input_names = (
'sync_frequency',
'saw_frequency',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
saw_frequency=440,
sync_frequency=440,
):
PureUGen.__init__(
self,
calculation_rate=calculation_rate,
saw_frequency=saw_frequency,
sync_frequency=sync_frequency,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
saw_frequency=440,
sync_frequency=440,
):
r'''Constructs an audio-rate SyncSaw.
::
>>> sync_saw = ugentools.SyncSaw.ar(
... saw_frequency=440,
... sync_frequency=440,
... )
>>> sync_saw
SyncSaw.ar()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
saw_frequency=saw_frequency,
sync_frequency=sync_frequency,
)
return ugen
@classmethod
def kr(
cls,
saw_frequency=440,
sync_frequency=440,
):
r'''Constructs a control-rate SyncSaw.
::
>>> sync_saw = ugentools.SyncSaw.kr(
... saw_frequency=440,
... sync_frequency=440,
... )
>>> sync_saw
SyncSaw.kr()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
saw_frequency=saw_frequency,
sync_frequency=sync_frequency,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def saw_frequency(self):
r'''Gets `saw_frequency` input of SyncSaw.
::
>>> sync_saw = ugentools.SyncSaw.ar(
... saw_frequency=440,
... sync_frequency=440,
... )
>>> sync_saw.saw_frequency
440.0
Returns ugen input.
'''
index = self._ordered_input_names.index('saw_frequency')
return self._inputs[index]
@property
def sync_frequency(self):
r'''Gets `sync_frequency` input of SyncSaw.
::
>>> sync_saw = ugentools.SyncSaw.ar(
... saw_frequency=440,
... sync_frequency=440,
... )
>>> sync_saw.sync_frequency
440.0
Returns ugen input.
'''
index = self._ordered_input_names.index('sync_frequency')
return self._inputs[index] | true |