blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e69172b7b1718cc0041ee79f3b23f5c683dcec27 | Python | APS-XSD-OPT-Group/wavepytools | /wavepytools/diag/coherence/load_2_pickles_results.py | UTF-8 | 1,359 | 2.65625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*- #
"""
Created on %(date)s
@author: %(username)s
"""
# %%% imports cell
import numpy as np
import matplotlib.pyplot as plt
import wavepy.utils as wpu
# %%
import pickle
def _load_data_from_pickle(fname):
fig = pickle.load(open(fname, 'rb'))
fig.set_size_inches((12, 9), forward=True)
plt.show(block=True) # this lines keep the script alive to see the plot
curves = []
for i in range(len(fig.axes[0].lines)):
curves.append(np.asarray(fig.axes[0].lines[i].get_data()))
return curves
# %%
fname1 = 'CBhalfpi_3p4um_23p7keV_st8mm_step2mm_100ms_5images_01.pickle'
fname2 = 'CBhalfpi_3p4um_23p7keV_st8mm_step2mm_100ms_5images_02.pickle'
results1 = _load_data_from_pickle(fname1)
results2 = _load_data_from_pickle(fname2)
# %%
zvec1 = results1[0][0]*1e-3
contrastV1 = results1[0][1]*1e-2
contrastV1 /= np.max(contrastV1)
contrastH1 = results1[1][1]*1e-2
contrastH1 /= np.max(contrastH1)
zvec2 = results2[0][0]*1e-3
contrastV2 = results2[0][1]*1e-2
contrastV2 /= np.max(contrastV2)
contrastH2 = results2[1][1]*1e-2
contrastH2 /= np.max(contrastH2)
# %%
plt.figure(figsize=(10,6))
plt.subplot(121)
plt.plot(zvec1*1e3, contrastV1, '-b.')
plt.plot(zvec2*1e3, contrastV2, '-g.')
plt.subplot(122)
plt.plot(zvec1*1e3, contrastH1, '-b.')
plt.plot(zvec2*1e3, contrastH2, '-g.')
plt.show()
| true |
968888e10f5c992546f539957d92723eb89a464b | Python | textbook/aoc-2020 | /day04/impl.py | UTF-8 | 4,852 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env python3
from os.path import dirname
import re
from textwrap import dedent
import unittest
VALID_EYE_COLOURS = ("amb", "blu", "brn", "gry", "grn", "hzl", "oth")
def valid_year(min_, max_):
def valid(s):
return re.match(r"^\d{4}$", s) is not None and min_ <= int(s) <= max_
return valid
def valid_height(s):
if re.match(r"^\d{3}cm$", s):
return 150 <= int(s[:-2]) <= 193
elif re.match(r"^\d{2}in$", s):
return 59 <= int(s[:-2]) <= 76
return False
RULES = dict(
byr=valid_year(1920, 2002),
ecl=lambda s: s in VALID_EYE_COLOURS,
eyr=valid_year(2020, 2030),
hcl=re.compile(r"^#[0-9a-f]{6}$").match,
hgt=valid_height,
iyr=valid_year(2010, 2020),
pid=re.compile(r"^\d{9}$").match,
)
class PuzzleTest(unittest.TestCase):
example = dedent("""
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
""".strip())
def test_example(self):
self.assertEqual(puzzle(self.example, RULES), 4)
def test_byr_valid(self):
for byr in range(1920, 2003):
with self.subTest(byr=byr):
self.assertTrue(RULES["byr"](str(byr)))
def test_byr_invalid(self):
for byr in (1918, 1919, 2003, 2004, "foo"):
with self.subTest(byr=byr):
self.assertFalse(RULES["byr"](str(byr)))
def test_ecl_valid(self):
for ecl in VALID_EYE_COLOURS:
with self.subTest(ecl=ecl):
self.assertTrue(RULES["ecl"](ecl))
def test_ecl_invalid(self):
self.assertFalse(RULES["ecl"]("foo"))
def test_eyr_valid(self):
for eyr in range(2020, 2031):
with self.subTest(eyr=eyr):
self.assertTrue(RULES["eyr"](str(eyr)))
def test_eyr_invalid(self):
for eyr in (2018, 2019, 2031, 2032, "foo"):
with self.subTest(eyr=eyr):
self.assertFalse(RULES["eyr"](str(eyr)))
def test_hcl_valid(self):
for hcl in ("#abc123", "#000000"):
with self.subTest(hcl=hcl):
self.assertTrue(RULES["hcl"](hcl))
def test_hcl_invalid(self):
for hcl in ("#abg123", "foo"):
with self.subTest(hcl=hcl):
self.assertFalse(RULES["hcl"](hcl))
def test_hgt_valid(self):
for hgt in range(150, 194):
with self.subTest(hgt=f"{hgt}cm"):
self.assertTrue(RULES["hgt"](f"{hgt}cm"))
for hgt in range(59, 77):
with self.subTest(hgt=f"{hgt}in"):
self.assertTrue(RULES["hgt"](f"{hgt}in"))
def test_hgt_invalid(self):
for hgt in (148, 149, 194, 195):
with self.subTest(hgt=f"{hgt}cm"):
self.assertFalse(RULES["hgt"](f"{hgt}cm"))
for hgt in (57, 58, 77, 78):
with self.subTest(hgt=f"{hgt}in"):
self.assertFalse(RULES["hgt"](f"{hgt}in"))
self.assertFalse(RULES["hgt"]("foo"))
def test_iyr_valid(self):
for iyr in range(2010, 2021):
with self.subTest(iyr=iyr):
self.assertTrue(RULES["iyr"](str(iyr)))
def test_iyr_invalid(self):
for iyr in (2008, 2009, 2021, 2022, "foo"):
with self.subTest(iyr=iyr):
self.assertFalse(RULES["iyr"](str(iyr)))
def test_pid_valid(self):
for pid in ("896056539", "000000001"):
with self.subTest(pid=pid):
self.assertTrue(RULES["pid"](pid))
def test_pid_invalid(self):
for pid in ("8960565390", "foo"):
with self.subTest(pid=pid):
self.assertFalse(RULES["pid"](pid))
def parse(data):
return dict(s.split(":") for s in data.split())
def is_valid(passport, rules):
return all(
field in passport and valid(passport[field])
for field, valid in rules.items()
)
def puzzle(data, rules):
return sum(
is_valid(parse(passport), rules)
for passport in data.split("\n\n")
)
if __name__ == "__main__":
with open(f"{dirname(__file__)}/input.txt") as f:
print(puzzle(f.read().strip(), RULES))
| true |
97cdb186e97561a54929f112cb3ea1e4d4ab43e3 | Python | charliestrawn/thundersnow | /thundersnow/api/weeks.py | UTF-8 | 867 | 2.546875 | 3 | [] | no_license | import datetime
from flask import Blueprint, jsonify, request
from thundersnow import db
from thundersnow.utils import login_required
from thundersnow.models import Week
weeks_blueprint = Blueprint('weeks', __name__)
@weeks_blueprint.route('/weeks', methods=['GET', 'POST'])
@login_required
def api_weeks():
"""
Get/create week endpoint.
"""
if request.method == 'POST':
week_arr = request.json['week'].split('-')
week = Week(week_arr[0], week_arr[1], week_arr[2])
db.session.add(week)
db.session.commit()
return jsonify(str(week))
else:
year = datetime.datetime.now().year
if request.args.get('year') and 'undefined' != request.args['year']:
year = request.args['year']
weeks = Week.query.filter_by(year=year).all()
return jsonify([str(w) for w in weeks])
| true |
3764472f80ffa878f7fe3c8e576d79fb88ad4a6f | Python | EduardDek/ML-lvl-0-Homeworks | /week 5/ICe Cream Parlor.py | UTF-8 | 507 | 3.515625 | 4 | [] | no_license | t = int(input("how many times?"))
for i in range(0,t):
found = False
money = int(input("money="))
CostSize = int(input("Cost size="))
cost = []
for j in range (0,CostSize):
cost.append(int(input("Cost")))
for e in range (0,len(cost)):
for c in range(e+1,len(cost)):
if cost[e]+cost[c]==money:
print(e+1,c+1)
found = True
break
if found == True:
break
| true |
30978f6039713cabdc91d71fd13a4789219a1939 | Python | BrianBock/ENPM809T | /HW9/openmotors.py | UTF-8 | 353 | 2.5625 | 3 | [] | no_license |
import RPi.GPIO as gpio
def init():
gpio.setmode(gpio.BOARD)
gpio.setup(31,gpio.OUT)
gpio.setup(33,gpio.OUT)
gpio.setup(35,gpio.OUT)
gpio.setup(37,gpio.OUT)
gpio.setup(36,gpio.OUT)
def gameover():
gpio.output(31,False)
gpio.output(33,False)
gpio.output(35,False)
gpio.output(37,False)
gpio.output(36,False)
init()
gameover()
gpio.cleanup()
| true |
917f20d35009e13cb3a3fcd6853508c3707a77e2 | Python | hkdeman/FunScripts | /FacebookRepeatedMessages.py | UTF-8 | 591 | 2.6875 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
username = ""
password = ""
directory = ""
link = ""
driver = webdriver.Chrome(directory)
driver.get(link)
emailElement = driver.find_element_by_id('email')
emailElement.send_keys(username)
passElement = driver.find_element_by_id('pass')
passElement.send_keys(password)
driver.find_element_by_xpath('//*[@id="loginbutton"]').click()
actions = ActionChains(driver)
for i in range(200):
actions.send_keys("Isn't that super cool?\n")
actions.perform()
| true |
8e6fbf88f556db362f6edb37a9bf406a4561ce02 | Python | TalhaAsmal/machine-trading | /statistical_analysis.py | UTF-8 | 2,255 | 2.96875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
from os import path
import datetime
import numpy as np
from common import data_dir, get_bollinger_bands, plot_data, get_daily_returns
def bollinger_bands(df):
ax = df.plot(title="SPY rolling mean", label='SPY')
rm_spy = df['SPY'].rolling(center=False, window=20).mean()
rm_spy.plot(label='Rolling Mean', ax=ax)
rstd_spy = df['SPY'].rolling(center=False, window=20).std()
upper_band, lower_band = get_bollinger_bands(rm_spy, rstd_spy)
upper_band.plot(label="Upper band", ax=ax)
lower_band.plot(label="Lower band", ax=ax)
ax.set_xlabel('Date')
ax.set_ylabel('Price')
ax.legend(loc='upper left')
plt.show()
def stat_analysis(start_date, end_date, tracking_etf, stocks=None):
df = pd.read_csv(path.join(data_dir, "combined.csv"), index_col='Date', parse_dates=True, na_values=['nan'])
df = df.ix[start_date:end_date]
if stocks is not None:
df = df.ix[:, stocks]
daily_returns = get_daily_returns(df)
beta_XOM, alpha_XOM = create_scatter_plot(daily_returns, "SPY", "XOM")
beta_GLD, alpha_GLD = create_scatter_plot(daily_returns, "SPY", "GLD")
def create_histograms(df, bins):
# Get daily returns
daily_returns = get_daily_returns(df)
# Plot histogram
daily_returns.hist(bins=bins)
# Calculate and plot mean and std. deviation
mean = daily_returns.SPY.mean()
std_dev = daily_returns.SPY.std()
print("Mean: {}\nStd. Dev: {}".format(mean, std_dev))
plt.axvline(mean, color='w', linestyle='dashed', linewidth=2)
plt.axvline(std_dev, color='r', linestyle='dashed', linewidth=2)
plt.axvline(-std_dev, color='r', linestyle='dashed', linewidth=2)
# Calculate kurtosis
print("Kurtosis: {}".format(daily_returns.kurtosis()))
plt.show()
def create_scatter_plot(daily_returns, x_axis, y_axis):
daily_returns.plot(kind="scatter", x=x_axis, y=y_axis)
beta, alpha = np.polyfit(daily_returns[x_axis], daily_returns[y_axis], 1)
plt.plot(daily_returns[x_axis], beta * daily_returns[x_axis] + alpha, '-', color='r')
plt.show()
return beta, alpha
start = datetime.date(2009, 1, 1)
end = datetime.date.today()
stat_analysis(start, end, ["SPY", "XOM", "GLD"])
| true |
a3fb715cb80ced8ca6a584c967a15b26d3ec690f | Python | lulu03/TestCourses | /unittest_hw0604/run.py | UTF-8 | 1,343 | 3.359375 | 3 | [] | no_license | '''
新建一个run.py入口
'''
import unittest
from utils.HTMLTestRunner import HTMLTestRunner
# 1. 去查找testcase模块下面的所有的testcase开头的 .py 文件结束的py文件
# ./testcase:run.py所对应的相对路径
# testcase*.py:所有满足这个一个表达式的 py 文件
testcases = unittest.defaultTestLoader.discover("./testcases", "testcase*.py")
# 2. 把所有测试用例装载到测试集里面
testsuites = unittest.TestSuite()
testcases.addTest(testcases)
print(testcases.__dict__)
# 3. 去运行测试集
# 第一种:使用unittest自带的TextTestRunner去运行测试集
# runner = unittest.TextTestResult()
# runner.run(testsuites)
# 第二种:使用HTMLTestRunner这个工具去运行测试集,并生成高大上的html网页版的测试报告
title = "测试报告"
descr = "这是猫宁商城的测试报告"
file_path = "./reports/unittest_report.html"
# 新建一个html文件,把文件的对象赋值为f;wb:如果存在就替换,如果不存在就创建
with open(file_path, "wb") as f: # 作用等同于后面两句代码:① f = open(file_path, "wb") ② f.close()
# 把测试结果放到这个html里面,就是去填写测试报告的内容
runner = HTMLTestRunner(stream=f, title=title, description=descr)
# 运行测试集
runner.run(testsuites)
| true |
43624fc958980bb689122edea073d91307775d64 | Python | yruss972/opencontrol-linter | /vendor/schemas/transformation-scripts/utils.py | UTF-8 | 485 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | """ These are shared functions between v2_to_v1 and v1_to_v2 """
def add_if_exists(new_data, old_data, field):
""" Adds the field to the new data if it exists in the old data """
if field in old_data:
new_data[field] = old_data.get(field)
def transport_usable_data(new_data, old_data):
""" Adds the data structures that haven't changed to the new dictionary """
for field in old_data:
add_if_exists(new_data=new_data, old_data=old_data, field=field)
| true |
3ac4d9947703d532cf321fb2ec6682415ef83a9e | Python | hckmd/week7-lecture-demos | /parameters/app.py | UTF-8 | 1,502 | 3.21875 | 3 | [] | no_license | from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# Code to setup the connection to a database (more details on this later in the lecture)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///animals.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# This would normally go in a separate file called models.py, added here as a brief example
class Animal(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
rating = db.Column(db.Integer)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/print_id/<int:id>')
def print_id(id: int):
print(f'The id in the URL is: {id}')
return render_template('printing.html', id = id)
@app.route('/animal/<int:id>')
def get_animal(id: int):
print(f'The id for the animal is {id}')
# Retrieve the animal for the given id from the database
animal = Animal.query.get_or_404(id)
return render_template('animal.html', animal = animal)
@app.cli.command('init-db')
def init_db():
# Drop the database if it already exists and create it again, to start from scratch
db.drop_all()
db.create_all()
# Create two animal records for example queries
meerkat = Animal(name='Meerkat',rating=10)
db.session.add(meerkat)
elephant = Animal(name='Elephant',rating=10)
db.session.add(elephant)
# Save the changes to the animals database file
db.session.commit() | true |
110175361a94dcfb1be3ac80ca8b67c707ea72ec | Python | Tsgzj/CS6200 | /HW3/src/page.py | UTF-8 | 4,224 | 2.734375 | 3 | [
"MIT"
] | permissive | from lxml import etree
from urlparse import urlparse, urljoin
from readability.readability import Document
import re
import validators
import json
from sets import Set
# import urllib2 #used for test
class Page:
def __init__(self, url = None, header = None, body = None, inlinks = None, fetched = False):
self.__url = url
self.__inlinks = inlinks
# self.__body = self.clean(body)
self.__header = header
self.__body = body
self.__fetched = fetched
self.__sqz = re.compile(r'\/\/+')
def inlinks(self):
inlinks = []
for item in self.__inlinks:
inlinks.append(item)
return inlinks
def links(self):
links = Set()
try:
tree = etree.HTML(self.__body)
except:
return links
else:
try:
l = tree.xpath('//a/@href')
except:
return links
else:
for item in tree.xpath('//a/@href'):
realurl = self.absolute_link(item)
if realurl and self.isutf8(realurl):
if validators.url(realurl):
links.add(realurl)
return links
def canonicalize(self, url):
if url:
try:
surl = urlparse(url)
except:
print "Illegal url"
return None
else:
if surl.hostname and surl.path:
url = surl.scheme.lower() + '://' + \
surl.hostname.lower() + self.__sqz.sub('/', surl.path) + surl.params + surl.query
elif surl.path:
url = surl.scheme.lower() + '://' + \
self.__sqz.sub('/', surl.path) + surl.params + surl.query
elif surl.hostname:
url = surl.scheme.lower() + '://' + \
surl.hostname.lower() + surl.params + surl.query
else:
url = url
if url.endswith('/'):
return url[:-1]
else:
return url
else:
return None
def isutf8(self, url):
try:
url.decode('utf-8')
except:
return False
else:
return True
def absolute(self, url):
try:
surl = urlparse(url)
except:
print "Ilegal url"
else:
if surl.scheme:
return url
else:
return urljoin(self.__url, url)
def absolute_link(self, url):
return self.canonicalize(self.absolute(url))
def dump(self, ofile):
res = {}
res["url"] = self.__url
res["raw"] = self.__body
res["header"] = self.__header
res["inlinks"] = list(self.__inlinks)
res["outlinks"] = list(self.links())
try:
strres = json.dumps(res) + '\n'
except:
print "Enconding error, will not dump"
else:
ofile.write(strres)
@staticmethod
def domain(url):
if url:
purl = urlparse(url)
if purl.scheme and purl.hostname:
return purl.scheme + '://' + purl.hostname
else:
return None
else:
return None
@staticmethod
def canonical(url):
surl = urlparse(url)
url = surl.scheme.lower() + '://' + \
surl.hostname.lower() + re.compile(r'\/\/+').sub('/', surl.path)
if url.endswith('/'):
return url[:-1]
else:
return url
def fetched(self):
return self.__fetched
if __name__ == "__main__":
url = "http://www.harvard.edu"
req = urllib2.Request(url, headers={'User-Agent' : \
"Mozilla/5.0 (Macintosh; \
Intel Mac OS X 10.11; rv:47.0) \
Gecko/20100101 Firefox/47.0"})
con = urllib2.urlopen( req )
a = Page(url, con.read())
print a.links()
print a.host()
print len(a.links())
| true |
d8da2fbc13cc3f1920c678cb9316f93b1958a95d | Python | GustavoVargasHakim/Naive-Bayes-Book-Classification | /Python trials/CommonWordsBusiness | UTF-8 | 3,611 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 27 19:11:05 2019
@author: Clemente + Gustavo
"""
import nltk
import string
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
import fileinput
import glob
import csv
#Funcion para unir listas
def join(lista) :
union = lista[0]
for i in range(len(lista)) :
union = list(set(union).union(set(lista[i])))
return union
# Toma cualquier nombre del libro con comienzo "Busi_" y terminación ".txt"
archivos = glob.glob("../Management books/training/Busi_*.txt")
archivos.sort()
training = []
words_list = []
for i in range(20) :
training.append(archivos[i])
for linea in fileinput.input(training, openhook=fileinput.hook_encoded("utf-8")):
if fileinput.isfirstline():
# Files name
book = fileinput.filename()
Busi_1 = open(book, encoding="utf-8").read()
Busi1 = nltk.word_tokenize(Busi_1)
Busi1=[w.lower() for w in Busi1 if w.isalpha()]
stop_words = set(stopwords.words('english'))
filtered_book = [w for w in Busi1 if not w in stop_words]
single_character = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'eg',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'y', 'z', 'σi', 'σn',
'α', 'β', 'βn', 'xn', 'αv', 'ν', 'ϕ', 'ba', 'ip', 'fi', 'kr', 'fr', 'ij',
'bd', 'nj', 'ac', 'bd', 'hk', 'gc', 'xg', 'dn', 'bi', 'mn', 'αu', 'hg',
'zn', 'nth', 'mmc','gcd', 'cd', 'ub', 'di', 'ad', 'ab','gh', 'στ', 'σ', 'ai',
'cis', 'abab', 'aabb', 'id', 'sn', 'ax', 'bx', 'αn','px', 'acr', 'bcs', 'hn',
'kx', 'ζ', 'η', 'θ', 'κ', 'λ', 'μ', 'ξ', 'ρ', 'τ', 'φ', 'χ', 'ψ',
'ω', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Ω', 'Ψ', 'Σ', 'Π',
'Ξ', 'Λ', 'Θ', 'Δ', 'Γ', 'aβ', 'aβj', 'βj', 'gf', 'pn', 'bp', 'zp',
'bch', 'http://', 'http', 'xm','µx', 'also', 'url','ª', 'solu', 'equa', 'see',
'may', 'two', 'one', 'https')
filtered_book = [w for w in filtered_book if not w in single_character]
filtered_book_dist = nltk.FreqDist(w.lower() for w in filtered_book)
most_common_words = filtered_book_dist.most_common(10)
words_list.append(most_common_words)
#print(filtered_book, '\n')
#print("\n\nLibro:", book, "Frecuentes:", most_common_words)
#Separar las palabras mas frequentes sin tomar en cuenta sus distribuciones
#La lista common_words_lists contiene las 20 listas de palabras (una por cada
#libro), y cada lista contiene el numero de palabras mas frecuentes que se desee
common_words_lists = []
for i in range(20) :
words = []
for j in range(10) :
words.append(words_list[i][j][0])
common_words_lists.append(words)
#Encontrar las uniones entre todas las listas de palabras mas frecuentes
#entre los N libros considerados
palabras = join(common_words_lists)
#Se desean tomar unicamente las primeras 20 palabras
features = []
for i in range(20) :
features.append(palabras[i])
#Guardar la lista de features en un archivo de valores separados por comas (csv)
with open('business_features.csv', 'wt') as f:
csv_writer = csv.writer(f)
csv_writer.writerow(features) | true |
6f0ba16cff9f6c346303191729be4bbd1c0e5889 | Python | koren-v/SocialMediaClassification | /Neural Nets/utils_for_dl.py | UTF-8 | 7,675 | 2.625 | 3 | [] | no_license | import torch
from torchtext import data
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchtext
from torch.autograd import Variable
from torch.optim import lr_scheduler
import time
import copy
import numpy as np
def test(model, criterion, optimizer, test_iterator, batch_size, test_size):
model.eval() # Set model to evaluate mode
preds = np.array([])
for batch in test_iterator:
text = batch.Text
if torch.cuda.is_available():
text = text.cuda()
if (batch.Text.size()[1] is not batch_size):
continue
outputs = model(text)
outputs = F.softmax(outputs,dim=-1)
pred = outputs[:,1]
pred = pred.cpu().detach().numpy()
preds = np.append(preds, pred)
if len(preds) != test_size:
num_zeros = test_size - len(preds)
preds = np.append(preds, np.zeros((num_zeros,)))
return preds
def evaluate(model, criterion, optimizer, test_iterator, batch_size, test_size):
model.eval() # Set model to evaluate mode
sentiment_corrects = 0
phase = 'val'
preds = np.array([])
for batch in test_iterator:
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
text = batch.Text
label = batch.Label
label = torch.autograd.Variable(label).long()
if torch.cuda.is_available():
text = text.cuda()
label = label.cuda()
if (batch.Text.size()[1] is not batch_size):
continue
outputs = model(text)
outputs = F.softmax(outputs,dim=-1)
loss = criterion(outputs, label)
pred = outputs[:,1]
pred = pred.cpu().numpy()
preds = np.append(preds, pred)
if len(preds) != test_size:
num_zeros = test_size - len(preds)
preds = np.append(preds, np.zeros((num_zeros,)))
return preds
def train(model, criterion, optimizer, scheduler, train_iterator, batch_size, num_epochs):
for epoch in range(num_epochs):
#scheduler.step()
model.train() # Set model to training mode
phase = 'train'
# Iterate over data.
for batch in train_iterator:
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
text = batch.Text
label = batch.Label
label = torch.autograd.Variable(label).long()
if torch.cuda.is_available():
text = text.cuda()
label = label.cuda()
if (batch.Text.size()[1] is not batch_size):
continue
outputs = model(text)
outputs = F.softmax(outputs,dim=-1)
loss = criterion(outputs, label)
# backward + optimize only if in training phase
loss.backward()
optimizer.step()
def train_and_eval(model, criterion, optimizer, dataiter_dict, dataset_sizes, batch_size, scheduler, num_epochs=25):
since = time.time()
print('starting')
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 200
val_loss = []
train_loss = []
val_acc = []
train_acc = []
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch+1, num_epochs))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
#scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
sentiment_corrects = 0
tp = 0.0 # true positive
tn = 0.0 # true negative
fp = 0.0 # false positive
fn = 0.0 # false negative
# Iterate over data.
for batch in dataiter_dict[phase]:
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
text = batch.Text
label = batch.Label
label = torch.autograd.Variable(label).long()
if torch.cuda.is_available():
text = text.cuda()
label = label.cuda()
if (batch.Text.size()[1] is not batch_size):
continue
outputs = model(text)
outputs = F.softmax(outputs,dim=-1)
loss = criterion(outputs, label)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * text.size(0)
sentiment_corrects += torch.sum(torch.max(outputs, 1)[1] == label)
tp += torch.sum(torch.max(outputs, 1)[1] & label)
tn += torch.sum(1-torch.max(outputs, 1)[1] & 1-label)
fp += torch.sum(torch.max(outputs, 1)[1] & 1-label)
fn += torch.sum(1-torch.max(outputs, 1)[1] & label)
epoch_loss = running_loss / dataset_sizes[phase]
sentiment_acc = float(sentiment_corrects) / dataset_sizes[phase]
if phase == 'train':
train_acc.append(sentiment_acc)
train_loss.append(epoch_loss)
elif phase == 'val':
val_acc.append(sentiment_acc)
val_loss.append(epoch_loss)
print('{} total loss: {:.4f} '.format(phase,epoch_loss ))
print('{} sentiment_acc: {:.4f}'.format(
phase, sentiment_acc))
if phase == 'val' and epoch_loss < best_loss:
print('saving with loss of {}'.format(epoch_loss),
'improved over previous {}'.format(best_loss))
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
name = str(type(model))
torch.save(model.state_dict(), 'model_test.pth')
if phase == 'val' and epoch == num_epochs - 1:
recall = tp / (tp + fn)
print('recall {:.4f}'.format(recall))
print()
confusion_matrix = [[int(tp), int(fp)],[int(fn), int(tn)]]
precision = tp / (tp + fp)
f1 = 2*(precision*recall)/(precision+recall)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val loss: {:4f}'.format(float(best_loss)))
results = {'time': time_elapsed,
'recall': recall,
'precision': precision,
'f1': f1,
'conf_matr': confusion_matrix,
'val_loss': val_loss,
'train_loss': train_loss,
'val_acc': val_acc,
'train_acc': train_acc}
# load best model weights
model.load_state_dict(best_model_wts)
return model, results | true |
435ff50bc55ff7e6a6c1f99c6e34a6d9b5da0700 | Python | Qiguanyi/Coursera-Data-Structures-and-Algorithms-Specialization | /Part II - Data Structures/week2_priority_queues_and_disjoint_sets/3_merging_tables/merging_tables.py | UTF-8 | 2,093 | 2.84375 | 3 | [] | no_license | # python3
import sys
#class Database:
# def __init__(self, row_counts):
# self.row_counts = row_counts
# self.max_row_count = max(row_counts)
# n_tables = len(row_counts)
# self.ranks = [1] * n_tables
# self.parents = list(range(n_tables))
#
# def merge(self, src, dst):
# src_parent = self.get_parent(src)
# dst_parent = self.get_parent(dst)
#
# if src_parent == dst_parent:
# return False
#
# # merge two components
# # use union by rank heuristic
# # update max_row_count with the new maximum table size
# return True
#
# def get_parent(self, table):
# # find parent and compress path
# return self.parents[table]
n, m = map(int, sys.stdin.readline().split())
lines = list(map(int, sys.stdin.readline().split()))
rank = [1] * n
parent = list(range(0, n))
ans = [max(lines)]
act = {}
def getParent(table):
if table != parent[table]:
parent[table] = getParent(parent[table])
return parent[table]
def merge(destination, source):
realDestination, realSource = getParent(destination), getParent(source)
lineRoot = 0
if realDestination == realSource:
return False
if rank[realDestination] > rank[realSource]:
parent[realSource] = realDestination
lines[realDestination] += lines[realSource]
lineRoot = lines[realDestination]
lines[realSource] = 0
elif rank[realDestination] == rank[realSource]:
parent[realSource] = realDestination
lines[realDestination] += lines[realSource]
lineRoot = lines[realDestination]
lines[realSource] = 0
rank[realDestination] += 1
else:
parent[realDestination] = realSource
lines[realSource] += lines[realDestination]
lineRoot = lines[realSource]
lines[realDestination] = 0
if lineRoot > ans[0]:
ans[0] = lineRoot
return True
for i in range(m):
destination, source = map(int, sys.stdin.readline().split())
merge(destination - 1, source - 1)
print(ans[0])
| true |
9fb15012951ab0b5e7707019e27e70f8ebc6b4d0 | Python | harriscw/advent_of_code_2020 | /day17/part1.py | UTF-8 | 2,283 | 3.5625 | 4 | [] | no_license | import sys
import numpy as np
from collections import Counter
###
# Data wrangling
###
#Read data
text_file = open("input.txt", "r")
lines = text_file.readlines()
mylist=[]
for i in range(len(lines)):
mylist.append(list(lines[i].strip("\n")))
print(np.array(mylist)) #print as array for formatting
#get coords into a dict
hashes=[]
for i in range(len(mylist)):
for j in range(len(mylist[i])):
if mylist[i][j]=="#":
hashes.append((0,i,j))
#initialize dictionary of coordinates with count 0
mycoords=dict()
for i in hashes:
mycoords[i]=0
###
# Define necessary functions
###
#define a function to get all 26 [(3^3)-1] 3D neighbors
def getneighbors(pt):
first=[pt[0]-1,pt[0],pt[0]+1]
second=[pt[1]-1,pt[1],pt[1]+1]
third=[pt[2]-1,pt[2],pt[2]+1]
allneighbors=[]
for i in first:
for j in second:
for k in third:
if (i,j,k) != pt:#dont append the point itself
allneighbors.append((i,j,k))
return(allneighbors)
def hashstayhash(coords):#define a function to do what happens for existing hash marks
for key in coords.keys():
#generate all neighbors
neighbors=getneighbors(key)
for i in neighbors:#iterating over all neighbors add +1 to count if it sees an existing hash mark
if i in coords.keys():
coords[key]+=1
newcoords=dict()
for k,v in coords.items():#append that point to outlist only if count is 2 or 3 according to rules
if v == 2 or v == 3:
newcoords[k]=0
return(newcoords)
def blankturnhash(coords):#define a function to do what happens for empty points
neighborcount=[]
for key in coords.keys():
#generate all neighbors
neighbors=getneighbors(key)
for i in neighbors:#iterating over all neighbors add to list whenever a given point is a space with a hash mark
if i not in coords.keys():#ignore existing hashes
neighborcount.append(i)
newhahses0=Counter(neighborcount)#now count how many times a hash mark was a neighbor to a point
newhashes=dict()
for k,v in newhahses0.items():#if the amount of time is exactly 3 then output
if v == 3:
newhashes[k]=0
return(newhashes)
###
# Run it
###
cnt=1
while cnt<=6:##Run everything for 6 cycles
mycoords={**hashstayhash(mycoords),**blankturnhash(mycoords)} #create a single dictionary of only hash marks
print(len(mycoords.keys()))
cnt+=1
| true |
45184f8a65bac1ac4166d1153b212682722bd591 | Python | hewg2008/DeepBindRG | /python.py | UTF-8 | 295 | 2.5625 | 3 | [
"MIT"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import pandas as pd
# the random data
#f=open('out.csv', 'r')
#arr=f.readlines()
df=pd.read_csv('out.csv', sep = ' ',header = None)
x = df.iloc[:,0].values
y = df.iloc[:,1].values
print x
print y
| true |
206893a35a7a782d9cde7457c5d02ef297bb0a8f | Python | sarvex/composer | /composer/algorithms/colout/colout.py | UTF-8 | 13,822 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core ColOut classes and functions."""
from __future__ import annotations
import logging
import textwrap
import weakref
from typing import Any, Callable, Tuple, TypeVar, Union
import torch
import torch.utils.data
from PIL.Image import Image as PillowImage
from torch import Tensor
from torchvision.datasets import VisionDataset
from composer.algorithms.utils.augmentation_common import image_as_type
from composer.core import Algorithm, Event, State
from composer.datasets.utils import add_vision_dataset_transform
from composer.loggers import Logger
from composer.utils import ensure_tuple
log = logging.getLogger(__name__)
ImgT = TypeVar('ImgT', torch.Tensor, PillowImage)
__all__ = ['ColOut', 'ColOutTransform', 'colout_batch']
def colout_batch(sample: Union[ImgT, Tuple[ImgT, ImgT]],
p_row: float = 0.15,
p_col: float = 0.15,
resize_target: Union[bool, str] = 'auto') -> Union[ImgT, Tuple[ImgT, ImgT]]:
"""Applies ColOut augmentation to a batch of images and (optionally) targets,
dropping the same random rows and columns from all images and targets in a batch.
See the :doc:`Method Card </method_cards/colout>` for more details.
Example:
.. testcode::
from composer.algorithms.colout import colout_batch
new_X = colout_batch(X_example, p_row=0.15, p_col=0.15)
Args:
sample (torch.Tensor | PIL.Image | Tuple[torch.Tensor, torch.Tensor] | Tuple[PIL.Image, PIL.Image]):
Either a single tensor or image or a 2-tuple of tensors or images. When tensor(s), the tensor must be of shape
``CHW`` for a single image or ``NCHW`` for a batch of images of shape.
p_row (float, optional): Fraction of rows to drop (drop along H). Default: ``0.15``.
p_col (float, optional): Fraction of columns to drop (drop along W). Default: ``0.15``.
resize_target (bool | str, optional): If ``sample`` is a tuple, whether to resize both objects in the tuple.
If set to ``'auto'``, both objects will be resized if they have the same spatial dimensions.
Otherwise, only the first object is resized. Default: ``'auto'``.
Returns:
torch.Tensor | PIL.Image | Tuple[torch.Tensor, torch.Tensor] | Tuple[PIL.Image, PIL.Image]:
A smaller image or 2-tuple of images with random rows and columns dropped.
"""
sample = ensure_tuple(sample)
if len(sample) > 2:
raise ValueError('sample must either be single object or a tuple with a max length of 2')
input = sample[0]
# Convert image to Tensor if needed
X_tensor = image_as_type(input, torch.Tensor)
# Get the dimensions of the image
row_size = X_tensor.shape[-2]
col_size = X_tensor.shape[-1]
# Determine how many rows and columns to keep
kept_row_size = int((1 - p_row) * row_size)
kept_col_size = int((1 - p_col) * col_size)
# Randomly choose indices to keep. Must be sorted for slicing
kept_row_idx = sorted(torch.randperm(row_size)[:kept_row_size].numpy())
kept_col_idx = sorted(torch.randperm(col_size)[:kept_col_size].numpy())
# Keep only the selected row and columns
X_colout = X_tensor[..., kept_row_idx, :]
X_colout = X_colout[..., :, kept_col_idx]
# convert back to same type as input, and strip added batch dim if needed;
# we can't just reshape to input shape because we've reduced the spatial size
if not isinstance(input, torch.Tensor) or (input.ndim < X_colout.ndim):
X_colout = X_colout.reshape(X_colout.shape[-3:])
X_colout = image_as_type(X_colout, type(input))
if resize_target := _should_resize_target(sample, resize_target):
target = sample[1]
Y_tensor = image_as_type(target, torch.Tensor)
Y_colout = Y_tensor[..., kept_row_idx, :]
Y_colout = Y_colout[..., :, kept_col_idx]
# convert back to same type as input, and strip added batch dim if needed;
# we can't just reshape to input shape because we've reduced the spatial size
if not isinstance(target, torch.Tensor) or (target.ndim < Y_colout.ndim):
Y_colout = Y_colout.reshape(Y_colout.shape[-3:])
Y_colout = image_as_type(Y_colout, type(target))
return X_colout, Y_colout
return X_colout
class ColOutTransform:
"""Torchvision-like transform for performing the ColOut augmentation,
where random rows and columns are dropped from up to two Torch tensors
or two PIL images.
See the :doc:`Method Card </method_cards/colout>` for more details.
Example:
.. testcode::
from torchvision import datasets, transforms
from composer.algorithms.colout import ColOutTransform
colout_transform = ColOutTransform(p_row=0.15, p_col=0.15)
transforms = transforms.Compose([colout_transform, transforms.ToTensor()])
Args:
p_row (float, optional): Fraction of rows to drop (drop along H). Default: ``0.15``.
p_col (float, optional): Fraction of columns to drop (drop along W). Default: ``0.15``.
resize_target (bool | str, optional): Whether to resize the target in addition to the input.
If set to ``'auto'``, resizing the target will be based on if the target has the same spatial
dimensions as the input. Default: ``'auto'``.
"""
def __init__(self, p_row: float = 0.15, p_col: float = 0.15, resize_target: Union[bool, str] = 'auto'):
self.p_row = p_row
self.p_col = p_col
self.resize_target = resize_target
def __call__(self, sample: Union[ImgT, Tuple[ImgT, ImgT]]) -> Union[ImgT, Tuple[ImgT, ImgT]]:
"""Drops random rows and columns from up to two images.
Args:
sample (torch.Tensor | PIL.Image | Tuple[torch.Tensor, torch.Tensor] | Tuple[PIL.Image, PIL.Image]):
A single image or a 2-tuple of images as either :class:`torch.Tensor` or :class:`PIL.Image`.
Returns:
torch.Tensor | PIL.Image | Tuple[torch.Tensor, torch.Tensor] | Tuple[PIL.Image, PIL.Image]:
A smaller image or 2-tuple of images with random rows and columns dropped.
"""
sample = ensure_tuple(sample)
if len(sample) > 2:
raise ValueError(f'Colout transform does not support sample tuple of length {len(sample)} > 2')
return colout_batch(sample, p_row=self.p_row, p_col=self.p_col, resize_target=self.resize_target)
class ColOut(Algorithm):
"""Drops a fraction of the rows and columns of an input image and (optionally) a target image. If the fraction of
rows/columns dropped isn't too large, this does not significantly alter the content of the image, but reduces its
size and provides extra variability.
If ``batch`` is True (the default), then this algorithm runs on :attr:`.Event.AFTER_DATALOADER`
to modify the batch.
Otherwise, if ``batch=False`` (the default), this algorithm runs on :attr:`.Event.INIT` to insert
a dataset transformation. It is a no-op if this algorithm already applied itself on the :attr:`State.train_dataloader.dataset`.
See the :doc:`Method Card </method_cards/colout>` for more details.
Example:
.. testcode::
from composer.algorithms import ColOut
from composer.trainer import Trainer
colout_algorithm = ColOut(p_row=0.15, p_col=0.15, batch=True)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[colout_algorithm],
optimizers=[optimizer]
)
Args:
p_row (float, optional): Fraction of rows to drop (drop along H). Default: ``0.15``.
p_col (float, optional): Fraction of columns to drop (drop along W). Default: ``0.15``.
batch (bool, optional): Run ColOut at the batch level. Default: ``True``.
resize_target (bool | str, optional): Whether to resize the target in addition to the input. If set to ``'auto'``, resizing
the target will be based on if the target has the same spatial dimensions as the input. Default: ``auto``.
input_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the input
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 0, which corresponds to any sequence, where the first element
is the input. Default: ``0``.
target_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the target
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 1, which corresponds to any sequence, where the second element
is the target. Default: ``1``.
"""
def __init__(
self,
p_row: float = 0.15,
p_col: float = 0.15,
batch: bool = True,
resize_target: Union[bool, str] = 'auto',
input_key: Union[str, int, Tuple[Callable, Callable], Any] = 0,
target_key: Union[str, int, Tuple[Callable, Callable], Any] = 1,
):
if not (0 <= p_col <= 1):
raise ValueError('p_col must be between 0 and 1')
if not (0 <= p_row <= 1):
raise ValueError('p_row must be between 0 and 1')
if (not isinstance(resize_target, bool)) and (isinstance(resize_target, str) and resize_target != 'auto'):
raise ValueError(f'resize_target must be a boolean or ``auto``. Received: {resize_target}')
if resize_target is True and not batch:
raise NotImplementedError(
'Resizing targets is not currently support with batch=``False``'
)
self.p_row = p_row
self.p_col = p_col
self.batch = batch
self.resize_target = resize_target
self._transformed_datasets = weakref.WeakSet()
self.input_key, self.target_key = input_key, target_key
def match(self, event: Event, state: State) -> bool:
if self.batch:
return event == Event.AFTER_DATALOADER
if event != Event.FIT_START:
return False
assert state.dataloader is not None, 'dataloader should be defined on fit start'
if not isinstance(state.dataloader, torch.utils.data.DataLoader):
raise TypeError(f'{type(self).__name__} requires a PyTorch dataloader.')
return state.dataloader.dataset not in self._transformed_datasets
def _apply_sample(self, state: State) -> None:
"""Add the ColOut dataset transform to the dataloader."""
assert isinstance(state.dataloader, torch.utils.data.DataLoader), 'dataloader type checked on match()'
dataset = state.dataloader.dataset
transform = ColOutTransform(p_row=self.p_row, p_col=self.p_col, resize_target=self.resize_target)
if not isinstance(dataset, VisionDataset):
raise TypeError(
textwrap.dedent(f"""\
To use {type(self).__name__}, the dataset must be a
{VisionDataset.__qualname__}, not {type(dataset).__name__}"""))
add_vision_dataset_transform(dataset, transform, is_tensor_transform=False)
self._transformed_datasets.add(dataset)
def _apply_batch(self, state: State) -> None:
"""Transform a batch of images using the ColOut augmentation."""
inputs, target = state.batch_get_item(key=self.input_key), state.batch_get_item(key=self.target_key)
assert isinstance(inputs, Tensor) and isinstance(target, Tensor), \
'Inputs and target must be of type torch.Tensor for batch-wise ColOut'
sample = (inputs, target)
resize_target = _should_resize_target(sample, resize_target=self.resize_target)
colout_result = colout_batch(sample, p_row=self.p_row, p_col=self.p_col, resize_target=resize_target)
# colout_result will be a tuple if the targets are resized and a single object otherwise
if resize_target:
new_input, new_target = colout_result
state.batch_set_item(self.input_key, new_input)
state.batch_set_item(self.target_key, new_target)
else:
new_input = colout_result
state.batch_set_item(self.input_key, new_input)
def apply(self, event: Event, state: State, logger: Logger) -> None:
if self.batch:
self._apply_batch(state)
else:
self._apply_sample(state)
def _should_resize_target(sample: Union[ImgT, Tuple[ImgT, ImgT]], resize_target: Union[bool, str]) -> bool:
"""Helper function to determine if both objects in the tuple should be resized.
Decision is based on ``resize_target`` and if both objects in the tuple have the same spatial size.
"""
sample = ensure_tuple(sample)
if len(sample) > 2:
raise ValueError('sample must either be single object or a tuple with a max length of 2')
input = sample[0]
if isinstance(resize_target, bool):
return resize_target
if len(sample) == 1:
return False
if isinstance(resize_target, str) and resize_target.lower() == 'auto':
input_size = input.shape[-2:] if isinstance(input, torch.Tensor) else input.size[::-1]
target = sample[1]
if isinstance(target, PillowImage):
return target.size[::-1] == input_size
else:
return target.ndim > 2 and target.shape[-2:] == input_size
raise ValueError("resize_target must either be a boolean or 'auto'")
| true |
2e977066495d3861258edb19acc7a0290f1f814a | Python | fanliu1991/LeetCodeProblems | /93_Restore_IP_Addresses.py | UTF-8 | 2,298 | 3.578125 | 4 | [] | no_license | '''
Given a string containing only digits,
restore it by returning all possible valid IP address combinations.
Example:
Input: "25525511135"
Output: ["255.255.11.135", "255.255.111.35"]
'''
import sys, optparse, os
class Solution(object):
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
"""
Depth First Search
"""
def dfs(current_s, part_index, current_path):
if part_index == 4:
if current_s == "":
result.append(current_path[:-1])
else:
return
else:
for digits in range(1, 4):
if digits <= len(current_s):
if digits == 1:
dfs(current_s[1:], part_index+1, current_path + current_s[:1] + ".")
else:
if current_s[0] != "0" and int(current_s[:digits]) <= 255:
dfs(current_s[digits:], part_index+1, current_path + current_s[:digits] + ".")
result = []
part = 0
path = ""
dfs(s, part, path)
return result
# result = []
# for a in range(1, 4):
# for b in range(1, 4):
# for c in range(1, 4):
# for d in range(1, 4):
# if a+b+c+d == len(s):
# A = int(s[:a])
# B = int(s[a:a+b])
# C = int(s[a+b:a+b+c])
# D = int(s[a+b+c:a+b+c+d])
# if A <= 255 and B <= 255 and C <= 255 and D <= 255:
# ip_address = str(A) + "." + str(B) + "." + str(C) + "." + str(D)
# if len(ip_address) == len(s)+3: # in case of ip_address = 0.12.00.000
# result.append(ip_address)
# return result
s = "25525511135"
solution = Solution()
result = solution.restoreIpAddresses(s)
print result
'''
Complexity Analysis
Time complexity : O(3^n).
DFS algorithm, at every node there are 3 possible sub paths.
Space complexity : O(3^n).
Extra space is used to store split nodes.
'''
| true |
f3d3639d3bd8352cf160b99b7feaeb7356b7488f | Python | CaueVieira/curso | /Cap 4/Exercício 4.5.py | UTF-8 | 472 | 4.03125 | 4 | [] | no_license | """Escreva um programa que pergunte a distância que um passageiro deseja percorrer em km. Calcule o preço da passagem, cobrando R$ 0,50 por km
para viagens de até 200 km e R$ 0,45 para vigens mais longas"""
print ("Calculadora de preços com base em Kms percorridos em viagem")
kms = float(input("Informe distância em Kms:"))
preco = 0
if kms <= 200:
preco = kms * 0.5
else:
preco = kms * 0.45
print ("O preço da passagem é de R$%5.2f" % (preco))
| true |
ec81b2316981f9dccbe5db2a4857c8901e530d39 | Python | kellystroh/regression-tools | /regression_tools/dftransformers.py | UTF-8 | 4,072 | 3.140625 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler as SS
from sklearn.base import BaseEstimator, TransformerMixin
class ColumnSelector(BaseEstimator, TransformerMixin):
"""Transformer that selects a column in a numpy array or DataFrame
by index or name.
"""
def __init__(self, idxs=None, name=None):
self.idxs = np.asarray(idxs)
self.idxs = idxs
self.name = name
def fit(self, *args, **kwargs):
return self
def transform(self, X, **transform_params):
# Need to teat pandas data frames and numpy arrays slightly differently.
if isinstance(X, pd.DataFrame) and self.idxs:
return X.iloc[:, self.idxs]
if isinstance(X, pd.DataFrame) and self.name:
return X[self.name]
return X[:, self.idxs]
class Identity(TransformerMixin):
"""Transformer that does nothing, simply passes data through unchanged."""
def fit(self, X, *args, **kwargs):
return self
def transform(self, X, *args, **kwargs):
return X
class FeatureUnion(TransformerMixin):
"""Just like sklearn.FeatureUnion, but also works for pandas.DataFrame
objects.
Parameters
----------
transformer_list: list of Transformer objects.
"""
def __init__(self, transformer_list):
self.transformer_list = transformer_list
def fit(self, X, y=None):
for _, t in self.transformer_list:
t.fit(X, y)
return self
def transform(self, X, *args, **kwargs):
Xs = [t.transform(X) for _, t in self.transformer_list]
if isinstance(X, pd.DataFrame) or isinstance(X, pd.Series):
return pd.concat(Xs, axis=1)
return np.hstack(Xs)
class MapFeature(TransformerMixin):
"""Map a function across a feature.
Parameters
----------
f: function
The function to map across the array or series.
name: string
A name to assign to the transformed series.
"""
def __init__(self, f, name):
self.f = f
self.name = name
def fit(self, *args, **kwargs):
return self
def transform(self, X, *args, **kwargs):
if isinstance(X, pd.DataFrame):
raise ValueError("You must select a single column of a DataFrame"
" before using MapFeature")
Xind = self.f(X).astype(float)
if isinstance(X, pd.Series):
return pd.Series(Xind, index=X.index, name=self.name)
return Xind
class StandardScaler(TransformerMixin):
"""Standardize all the columns in a np.array or a pd.DataFrame.
Parameters:
None
"""
def __init__(self):
self._scaler = SS()
def fit(self, X, *args, **kwargs):
if isinstance(X, pd.DataFrame):
self._scaler.fit(X.values)
elif isinstance(X, pd.Series):
self._scaler.fit(X.values.reshape(-1,1))
else:
self._scaler.fit(X)
return self
def transform(self, X, *args, **kwargs):
if isinstance(X, pd.DataFrame):
return pd.DataFrame(
self._scaler.transform(X.values),
columns=X.columns,
index=X.index)
elif isinstance(X, pd.Series):
return pd.Series(
#StandardScaler requires 2-d data, pd.Series requires 1-d data
self._scaler.transform(X.values.reshape(-1,1)).reshape(-1),
name=X.name,
index=X.index)
else:
return self._scaler.transform(X)
class Intercept(TransformerMixin):
"""Create an intercept array or series (containing all values 1.0) of the
appropriate shape given an array or DataFrame.
"""
def fit(self, *args, **kwargs):
return self
def transform(self, X, *args, **kwargs):
if isinstance(X, pd.DataFrame) or isinstance(X, pd.Series):
return pd.Series(np.ones(X.shape[0]),
index=X.index, name="intercept")
return np.ones(X.shape[0])
| true |
983837142b953c52fb9ca39ee10da18cb6014ff1 | Python | jackh423/python | /CIS41B/SqLite3/Delete.py | UTF-8 | 1,001 | 3.75 | 4 | [
"Apache-2.0"
] | permissive | '''
To convert this delete code to be a member function of a class:
1. The sqliteConnection should be a data member of the class and already connected
2. The function should take a string parameter for the delete query
3. It should return 'true' if deleted, otherwise 'false'
'''
import sqlite3
def deleteRecord():
try:
sqliteConnection = sqlite3.connect('SQLite_Python.db')
cursor = sqliteConnection.cursor()
print("Connected to SQLite")
# Deleting single record now
sql_delete_query = """DELETE from Database where id = 2"""
cursor.execute(sql_delete_query)
sqliteConnection.commit()
print("Record deleted successfully ")
cursor.close()
except sqlite3.Error as error:
print("Failed to delete record from sqlite table", error)
finally:
if (sqliteConnection):
sqliteConnection.close()
print("the sqlite connection is closed")
deleteRecord() | true |
8b8163d7268ba5130500a986c55f43d7e2199b1b | Python | saarraz/fakebook | /model.py | UTF-8 | 11,512 | 2.578125 | 3 | [] | no_license | import abc
import random
import time
from typing import Optional, List, Union
import os
from PIL import Image as PILImage
import datetime
next_id = 0
IMAGE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'images')
class Node(object):
__metaclass__ = abc.ABCMeta
classes = []
@staticmethod
def generate_id():
global next_id
next_id += 1
return next_id
@staticmethod
def new_node_class(new_class):
Node.classes.append(new_class)
new_class._all = {}
@classmethod
def all(cls):
if not hasattr(cls, '_all'):
return []
return cls._all.values()
@classmethod
def from_id(cls, id: int):
if cls == Node:
for clazz in cls.classes:
if id in clazz.all():
return clazz.all()[id]
else:
if hasattr(cls, '_all'):
return cls._all[id]
raise KeyError('No such object')
def __init__(self):
if self.__class__ not in self.classes:
self.new_node_class(self.__class__)
self.id = self.generate_id()
self.__class__._all[self.id] = self
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
@abc.abstractmethod
def to_json(self):
raise NotImplementedError
class Reaction(object):
LIKE = 0
SAD = 1
LOVE = 2
HAHA = 3
DISLIKE = 4
SHIT = 5
THROW_UP = 6
NEGATIVE_TYPES = [THROW_UP, SHIT, DISLIKE, SAD]
TYPES = [LIKE, SAD, LOVE, HAHA, DISLIKE, SHIT, THROW_UP]
def __init__(self, user, target, type, time):
self.user = user
self.target = target
self.type = type
self.time = time
def to_json(self):
return {
'type': self.type,
'time': time.mktime(self.time.timetuple()),
'user': self.user.id
}
class User(Node):
MAX_BIRTHDAY_UPSETNESS = 0
GENDER_MALE = 0
GENDER_FEMALE = 1
def __init__(self, full_name, profile_picture, gender):
super(User, self).__init__()
self.gender = gender
self.full_name = full_name
self.profile_picture = profile_picture
self.birthday = None
self.birthday_upsetness = 0
def to_json(self):
return {'id': self.id, 'full_name': self.full_name, 'profile_picture': self.profile_picture.id,
'gender': self.gender}
def is_male(self):
return self.gender == self.GENDER_MALE
_main_user = None
@classmethod
def main_user(cls):
assert cls._main_user is not None, 'Main user not set'
return cls._main_user
@classmethod
def set_main_user(cls, user):
cls._main_user = user
class Image(Node):
def __init__(self, path):
super(Image, self).__init__()
self.path = path
@classmethod
def from_file(cls, path):
return Image(path)
class Page(Node):
def __init__(self, name: str, profile_picture: Image):
super(Page, self).__init__()
self.name = name
self.profile_picture = profile_picture
def to_json(self):
return {
'id': self.id,
'name': self.name,
'profile_picture': self.profile_picture.id
}
class Reactable(Node):
def __init__(self):
super(Reactable, self).__init__()
self.reactions = {}
self.comments = []
@abc.abstractmethod
def target_string(self):
raise NotImplementedError
class Post(Reactable):
def __init__(self, _time: datetime.datetime, _text: str, _img: Optional[Image], poster: Union[User, Page],
timeline: Optional[Union[User, Page]]):
super(Post, self).__init__()
self.image = _img
self.text = _text
self.time = _time
self.poster = poster
self.timeline = timeline
self.views = None
# self.sentiment =
def to_json(self):
return {
'id': self.id,
'user_id': self.poster.id if isinstance(self.poster, User) else None,
'page_id': self.poster.id if isinstance(self.poster, Page) else None,
'time': time.mktime(self.time.timetuple()),
'text': self.text,
'image': self.image.id if self.image is not None else None,
'reactions': [reaction.to_json() for reaction in self.reactions.values()],
'comments': [comment.to_json() for comment in self.comments],
'views': self.views
}
def target_string(self):
return '{whose} {post}{where}'.format(whose='your' if self.poster == User.main_user()
else '${}$\'s'.format(self.poster.full_name),
post='post' if self.image is None else 'photo',
where='' if self.timeline is None
else 'on ${}$\'s timeline'.format(self.timeline.full_name
if isinstance(self.timeline, User)
else self.timeline.name))
class Comment(Reactable):
def __init__(self, _text, _user, _parent : Reactable):
super(Comment, self).__init__()
self.text = _text
self.user = _user
self.target = _parent
def vote(self, user, type):
self.reactions.add(user, type)
def remove_vote(self, user, type):
self.reactions.remove(user, type)
def to_json(self):
return {
'text': self.text,
'reactions': [reaction.to_json() for reaction in self.reactions],
'user': self.user.id,
'replies': [reply.to_json() for reply in self.comments]
}
def target_string(self):
return '{whose} {comment} on {target}'.format(whose='your' if self.user == User.main_user()
else '${}$\'s'.format(self.user.full_name),
comment='comment' if isinstance(self.target, Post) else 'reply',
target=self.target.target_string())
class Notification(Node):
def __init__(self, time : datetime.datetime):
super(Notification, self).__init__()
self.read = False
self._time = time
def time(self):
return self._time
def kind(self):
return self.__class__.KIND
@abc.abstractmethod
def format(self):
raise NotImplementedError
@abc.abstractmethod
def image(self):
raise NotImplementedError
def to_json(self):
return {
'id': self.id,
'time': time.mktime(self.time().timetuple()),
'kind': self.__class__.KIND,
'text': self.format(),
'image': self.image().id,
'read': self.read
}
def users_string(users):
if len(users) == 1:
return '${}$'.format(users[0])
elif len(users) == 2:
return '${}$ and ${}$'.format(users[0], users[1])
else:
return '${}$ and {} others'.format(users[0], len(users) - 1)
class BirthdayNotification(Notification):
KIND = 0
ACTION_CALLS = [
'Try to ignore this elegantly.',
'Say happy birthday then continue ignoring {them}.',
'Copy your birthday wishes from last year.',
'Just so you know, {they} didn\'t wish you anything on your birthday.'
]
def __init__(self, users: List[User], time: datetime.datetime):
super(BirthdayNotification, self).__init__(time)
self.date = time.date()
self.users = users
self.action_call = random.choice(self.ACTION_CALLS)
def format_action_call(self):
return self.action_call.format(them='them' if len(self.users) > 1
else {User.GENDER_FEMALE: 'her',
User.GENDER_MALE: 'him'}[self.users[0].gender],
they='they' if len(self.users) > 1
else {User.GENDER_FEMALE: 'she',
User.GENDER_MALE: 'he'}[self.users[0].gender])
def format(self):
if len(self.users) == 1:
return 'It\'s ${}$\'s birthday today. {}'.format(self.users[0].full_name, self.format_action_call())
elif len(self.users) == 2:
return '${}$ and ${}$ have birthdays today. {}'.format(self.users[0].full_name, self.users[1].full_name,
self.format_action_call())
else:
return '${}$ and {} others have birthdays today. {}'.format(self.users[0], len(self.users) - 1,
self.format_action_call())
def image(self):
return self.users[0].profile_picture
class PostNotification(Notification):
KIND = 1
def __init__(self, post : Post):
super(PostNotification, self).__init__(post.time)
self.post = post
def format(self):
if self.post.image is not None:
return '${user}$ uploaded a photo.'.format(user=self.post.poster.full_name,
their='his' if self.post.poster else 'her')
return '${user}$ updated {their} status.'.format(user=self.post.poster.full_name,
their={User.GENDER_MALE: 'his', User.GENDER_FEMALE: 'her'}
[self.post.poster.gender])
def image(self):
return self.post.poster.profile_picture
Activity = Union[Comment, Reaction]
class ActivityNotification(Notification):
def __init__(self, activities: List[Union[Activity]]):
super(ActivityNotification, self).__init__(None)
assert len(set(activity.target for activity in activities)) == 1
assert len(set(type(activity) for activity in activities)) == 1
self.activities = activities
def kind(self):
if isinstance(self.activities[0], Reaction):
return 2
else:
return 3
@property
def target(self):
return self.activities[0].target
def users(self):
return set()
def time(self):
return max(activity.time for activity in self.activities)
def format(self):
users = set(activity.user for activity in self.activities)
if isinstance(self.activities[0], Reaction):
if all(reaction.type == Reaction.LIKE for reaction in self.activities):
what = 'liked'
else:
what = 'reacted to'
else:
assert isinstance(self.activities[0], Comment)
if isinstance(self.target, Comment):
what = 'replied to'
else:
what = 'commented on'
return '{users} {what} {target}'.format(users=users_string(users), what=what,
target=self.target.target_string())
def image(self):
return self.activities[0].user.profile_picture
random_people = []
friends = []
user_feed = []
notifications = [] | true |
3fa4b66a67446dd6e2e21c7beb7f5b161e8db7db | Python | gabriellaec/desoft-analise-exercicios | /backup/user_213/ch47_2019_04_02_23_09_30_567932.py | UTF-8 | 145 | 2.96875 | 3 | [] | no_license | mes=int(input('qual o n do mes? ')
meses=['jan','fev','mar','abr','maio','jun','jul','ago','set','out','nov','dez']
print (meses[mes-1])
| true |
21064a1765d577574e2d836bd97507fa05c20ecd | Python | tdworowy/PythonPlayground | /Playground/other_staff/prime_number_staff.py | UTF-8 | 311 | 3.1875 | 3 | [] | no_license | from math import sqrt
from itertools import islice, count
def is_prime(n):
return n > 1 and all(n % i for i in islice(count(2), int(sqrt(n) - 1)))
if __name__ == "__main__":
for x in range(1, 1000000):
y = (x ** 2) + x + 41
if not is_prime(y):
print(x)
break
| true |
e037b0a5538c965fdc0697f57d0f5dfe172272b9 | Python | alexandraback/datacollection | /solutions_5630113748090880_0/Python/Kenchy/prob2.py | UTF-8 | 668 | 3.078125 | 3 | [] | no_license | __author__ = 'ligenjian'
if __name__ == '__main__':
input = open('input.txt', 'r')
output = open('output.txt', 'w')
t = int(input.readline())
for i in range(t):
total_number = {}
n = int(input.readline())
for line in range(2 * n - 1):
numbers = map(int, input.readline().strip().split(' '))
for number in numbers:
total_number.setdefault(number, False)
total_number[number] = not total_number[number]
result = sorted(map(lambda x: x[0], filter(lambda (k,v): v,total_number.items())))
print>>output, 'Case #%d: %s' % ((i + 1), ' '.join(map(str, result)))
| true |
5caf567f6737e12fe80246366a42396d1c79a338 | Python | mcmoralesr/Learning.Python | /Code.Forces/P0334A_Candy_Bags.py | UTF-8 | 377 | 2.8125 | 3 | [] | no_license | __copyright__ = ''
__author__ = 'Son-Huy TRAN'
__email__ = "sonhuytran@gmail.com"
__doc__ = 'http://codeforces.com/problemset/problem/334/A'
__version__ = '1.0'
n = int(input())
n2 = n * n + 1
ndiv2 = n // 2
for i in range(1, n + 1):
first = list(range(ndiv2 * (i - 1) + 1, ndiv2 * i + 1))
last = [n2 - k for k in first]
candies = first + last
print(' '.join(map(str, candies))) | true |
ed659d5c9581ec4a813ad6fe455b0e6f50ea4e18 | Python | Halldor-Hrafn/PythonShenanigans | /Forrit41.py | UTF-8 | 283 | 3.46875 | 3 | [] | no_license | file = open('nofn2.txt')
longName = []
fourName = []
for line in file:
word = line.strip()
if len(word) >= 30:
longName.append(word)
if word.count(' ') >= 3:
fourName.append(word)
print(longName)
print('******************************')
print(fourName)
| true |
19a62541a9286f1b7cd4f34978c5ac3a40465589 | Python | a-valado/python | /Práctica 5/Práctica 5.7.py | UTF-8 | 244 | 3.796875 | 4 | [] | no_license | #Albert Valado Pujol
#Práctica 5 - Ejercicio 7
#Escribe un programa que pida la altura de un triángulo y lo dibuje de
#la siguiente manera:
altura=int(input("Introduce la altura del triángulo.\n"))
for i in range(altura,0, -1):
print("*"*i)
input(" ")
| true |
c7ce636459e8f3f64aa5011efb05623d6a4c4b35 | Python | EvanNingduoZhao/learning_while_recording | /plotly_study/plotly_experiment.py | UTF-8 | 808 | 2.640625 | 3 | [] | no_license | import plotly as py
import plotly.graph_objs as go
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
orders = pd.read_excel('/Users/apple/PycharmProjects/qtm385/plotly_study/sales.xls')
with pd.option_context('display.max_rows', 10, 'display.max_columns', 10): # more options can be specified also
print(orders.Sales)
plt.plot(orders.Sales)
plt.show()
# x = np.linspace(0,np.pi,1000)
#
# print(x)
#
# layout = go.Layout(
# title='example',
# yaxis=dict(
# title='volts'
# ),
# xaxis=dict(
# title='nanoseconds'
# )
# )
#
# trace1 = go.Scatter(
# x=x,
# y=np.sin(x),
# mode='lines',
# name='sin(x)',
# line = dict(
# shape='spline'
# )
# )
#
# fig = go.Figure(data=[trace1],layout=layout)
# py.offline.plot(fig)
| true |
5974d0a73444a83c7221a083835cc637c3bf95f1 | Python | offero/algs | /dcp629_kpartitions.py | UTF-8 | 1,155 | 4.0625 | 4 | [] | no_license | '''
# Definition
Given an array of numbers N and an integer k, your task is to split N into k partitions such that
the maximum sum of any partition is minimized. Return this sum.
For example, given N = [5, 1, 2, 7, 3, 4] and k = 3, you should return 8, since the optimal
partition is [5, 1, 2], [7], [3, 4].
# Solution strategy
Break the problem into sub-problems, solve sub-problems, combine.
max sum of parts = max(sum of first part, max sum of rest of parts)
f(arr, k) = max(sum(arr_up_to_i, f(arr_past_i, k-1))) for every i in arr
NOTE: we could make this more efficient by not copying the array with slices and just passing in the
index to the function.
'''
def kpartsum(arr, k):
if not arr:
return 0
# base case
if k == 1:
return sum(arr)
min_max_sum = None
for i, val in enumerate(arr, start=1):
first = sum(arr[:i])
min_of_rest = kpartsum(arr[i:], k-1)
max_sum = max(first, min_of_rest)
if (min_max_sum is None) or (max_sum < min_max_sum):
min_max_sum = max_sum
return min_max_sum
if __name__ == "__main__":
print(kpartsum([5, 1, 2, 7, 3, 4], 3))
| true |
d410f9eca0d23ab27405aa84259534c240e5a5a3 | Python | chewlite/selenium-project | /python-mod/exercise8.py | UTF-8 | 549 | 2.515625 | 3 | [] | no_license | import pytest
from selenium import webdriver
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def test_sticker_on_product(driver):
driver.get("http://localhost/litecart/")
box_list = driver.find_elements_by_class_name('box')
for box in box_list:
product_list = box.find_elements_by_class_name('product')
for product in product_list:
sticker_exists = product.find_elements_by_class_name('sticker')
assert len(sticker_exists) == 1
| true |
3e3903ee9c3fb016be6574c994a7ee6528b96e4e | Python | achiyae/repository_mining | /paper/graphics/ttest/ttest.py | UTF-8 | 1,808 | 2.671875 | 3 | [] | no_license | import os
from itertools import product
import pandas as pd
from scipy.stats import ttest_ind
from config import Config
path = Config.get_work_dir_path(os.path.join("paper", "graphics", "scores", "data.csv"))
data = pd.read_csv(path)
columns = [
'Feature Selection',
'Score',
'Dataset 1',
'Dataset 2',
't-statistic',
'p-value'
]
datasets = [
"Designite",
"Designite + Fowler",
"Fowler",
"Traditional",
"Traditional + Designite",
"Traditional + Designite + Fowler",
"Traditional + Fowler"]
feature_selection = [
"all",
"chi2_20p",
"chi2_50p",
"f_classif_20",
"f_classif_50",
"mutual_info_classif_20p",
"mutual_info_classif_50p",
"recursive_elimination"
]
score = [
"precision_mean",
"precision_max",
"recall_mean",
"recall_max",
"f1_measure_mean",
"f1_measure_max",
"auc_roc_mean",
"auc_roc_max",
"brier_score_mean",
"brier_score_max"
]
ttests_dicts = []
for row in product(feature_selection, datasets, datasets, score):
fs, ds_1, ds_2, s = row
cond_fs = data['feature_selection'] == fs
if ds_1 == ds_2:
continue
cond_ds_1 = data['dataset'] == ds_1
cond_ds_2 = data['dataset'] == ds_2
data_1 = data.loc[cond_fs & cond_ds_1][s].values
data_2 = data.loc[cond_fs & cond_ds_2][s].values
t_statistics, p_value = ttest_ind(data_1, data_2)
ttests_dict = {
'Feature Selection': fs,
'Score': s,
'Dataset 1': ds_1,
'Dataset 2': ds_2,
't-statistics': t_statistics,
'p-value': p_value
}
ttests_dicts.append(ttests_dict)
df = pd.DataFrame(ttests_dicts)
path = Config.get_work_dir_path(os.path.join("paper", "graphics", "ttest", "ttest.csv"))
df.to_csv(path, index=False, sep=';')
| true |
04cc192a41d4bd977540730956b0c0850d0edaa0 | Python | louiselessel/Shaders-on-raspberry-pi4 | /Ex_Pixelize/run_shader_Pixelize.py | UTF-8 | 4,261 | 2.6875 | 3 | [] | no_license | import time
import demo
import pi3d
#(W, H) = (None, None) # Fullscreen - None should fill the screen (there are unresolved edge issues)
(W, H) = (400, 400) # Windowed
# For scale, make sure the numbers are divisible to the resolution with no remainders (use even numbers between 0 and 1). 1.0 is full non-scaled resolution.
SCALE = .20 # downscale the shadertoy shader resolution
timeScalar = 1.0 # for scaling the speed of time
fps = 30 # framerate
BACKGROUND_COLOR = (0.0, 0.0, 0.0, 0.0)
display = pi3d.Display.create(w=W, h=H, frames_per_second=fps,
background=BACKGROUND_COLOR,
display_config=pi3d.DISPLAY_CONFIG_HIDE_CURSOR | pi3d.DISPLAY_CONFIG_MAXIMIZED,
use_glx=True)
print(display.opengl.gl_id) # the type of glsl your pi is running
if W is None or H is None:
(W, H) = (display.width, display.height)
print('setting display size to ' + str(W) + ' ' + str(H))
## shadertoy shader stuff ##
sprite = pi3d.Triangle(corners=((-1.0, -1.0),(-1.0, 3.0),(3.0, -1.0)))
shader = pi3d.Shader('cloud') # cloud shader
sprite.set_shader(shader)
## offscreen texture stuff ##
cam = pi3d.Camera(is_3d=False)
postsh = pi3d.Shader('post_pixelize')
post = pi3d.PostProcess(camera=cam, shader=postsh, scale=SCALE)
## interactive inputs ##
kbd = pi3d.Keyboard()
mouse = pi3d.Mouse() # pi3d.Mouse(restrict = True) # changes input coordinates
mouse.start()
MX, MY = mouse.position()
MXC, MYC = mouse.position()
MC = mouse.button_status() # 8 = hover, 9 = right Click down, 10 = left C, 12 = middle C
MouseClicked = False
## set up time ##
iTIME = 0
iTIMEDELTA = 0
iFRAME = 0
## pass shadertoy uniforms into our base shader from shadertoy ##
sprite.unif[0:2] = [W, H] # iResolution
sprite.unif[2] = iTIME # iTime - shader playback time
sprite.unif[3] = iTIMEDELTA # iTimeDelta - render time (in seconds)
sprite.unif[4] = SCALE # iScale - scale for downscaling the resolution of shader
sprite.unif[5] = iFRAME # iFrame - shader playback frame
sprite.unif[6:8] = [MX, MY] # iMouse - xpos, ypos (set while button held down)
sprite.unif[9:11] = [MXC, MYC] # iMouse - xposClicked, yposClicked (set on click)
## pass uniforms into postprocessing postsh ##
post.draw({0:W, 1:H, 2:iTIME, 3:iTIMEDELTA, 4:SCALE, 5:iFRAME})
# time at start
tm0 = time.time()
last_time = 0
while display.loop_running():
# drawing
post.start_capture()
sprite.draw()
post.end_capture()
post.draw()
## inputs - mouse ##
MX, MY = mouse.position()
MVX, MVY = mouse.velocity()
MC = mouse.button_status()
#print('(' + str(MX) + ', ' + str(MY) + ')')
# if mouse click on this frame (any button)
if MC == 9 or MC == 10 or MC == 12 and MouseClicked == False:
(MXC, MYC) = (MX, MY)
sprite.unif[9:11] = [MXC, MYC] # update iMouse - xposClicked, yposClicked
post.draw({9:MXC, 10:MYC})
#print('(' + str(MXC) + ', ' + str(MYC) + ')')
MouseClicked = True
# while mouse is clicked (button held down)
if MouseClicked == True:
sprite.unif[6:8] = [MX, MY] # update iMouse - xpos, ypos
post.draw({6:MX, 7:MY})
# mouse button released
if MC == 8 and MouseClicked == True:
MouseClicked = False
# keyboard control
k = kbd.read()
if k == 27:
kbd.close()
display.stop()
break
## setting non-interactive uniforms ##
iTIME = (time.time() - tm0) * timeScalar # change the timeScalar to slow time
iTIMEDELTA = display.time - last_time # display.time is set at start of each frame
last_time = display.time
## pass only the changed shadertoy uniforms into our base shader from shadertoy ##
sprite.unif[2] = iTIME # iTime - shader playback time
sprite.unif[3] = iTIMEDELTA # iTimeDelta - render time (in seconds)
sprite.unif[5] = iFRAME # iFrame - shader playback frame
## pass only the changed uniforms into postprocessing postsh ##
post.draw({2:iTIME, 3:iTIMEDELTA, 5:iFRAME})
## updating variables ##
iFRAME += 1
#print(int(FRAME/fps)) # calculate seconds based on framerate, not time.time
| true |
1faaa9939a70a2c663cb276c1d86096152571d88 | Python | dwiberg4/num_meth | /mullers.py | UTF-8 | 1,962 | 3.90625 | 4 | [] | no_license | # Root Finding Method
# Open Method
# Muller's Method
import numpy as np
# Define the main Muller's Method Function
def mullers(x0,x1,x2,es,imax):
itera = 0
ea = es
while (ea >= es) and (itera < imax):
itera += 1
# Muller's Method
h0 = x1-x0
h1 = x2-x1
d0 = (f(x1)-f(x0)) / h0
d1 = (f(x2)-f(x1)) / h1
a = (d1-d0)/ (h1+h0)
b = (a*h1) + d1
c = f(x2)
rad = np.sqrt((b**2)-(4*a*c)) # could be complex
if abs(b+rad) > abs(b-rad):
den = b + rad
else:
den = b - rad
xr = x2 + ((-2*c)/den)
if xr != 0:
# Formula for Approx. % Error
ea = abs((xr - x2)/ xr) *100
x0 = x1
x1 = x2
x2 = xr
return xr
# The function of the equation being examined
def f(x):
#y = (x**2) -.5
#y = 0.5*(x**2) + x - 43
y = 0.5*(x**2) + 13*x - 43
print("y equals: ",y)
return y
# Optional Graphing Method
def grapher():
import matplotlib.pyplot as plt
l = eval(input("Graph Left bound: "))
r = eval(input("Graph Right bound: "))
res = eval(input("Desired Resolution: "))
step = (r-l)/res
(x,y,flat) = [],[],[]
for i in range(res):
x.append(l+(step*i))
y.append(f(l+(step*i)))
flat.append(0)
plt.plot(x,y,color= 'orange')
plt.plot(x,flat,color= 'purple')
plt.show()
# Main interface function
def main():
graph = str(input("Do you wish to graph the function first? "))
while graph != 'n':
grapher()
graph = str(input("Do you wish to graph again? "))
x0 = eval(input("Initial estimate 1: "))
x1 = eval(input("Initial estimate 2: "))
x2 = eval(input("Initial estimate 3: "))
es = eval(input("Prescribed Error threshold: "))
imax = eval(input("Max iteration threshold: "))
x = mullers(x0,x1,x2,es,imax)
print("The root has been located at: ",x)
main() | true |
5d16e3cf3ddd8d92bdf252d16f722120d00b30f4 | Python | RifatTauwab/Python | /swap_char.py | UTF-8 | 377 | 3.96875 | 4 | [] | no_license | def swap_char(sentance):
sentance = list(sentance)
for i in range(len(sentance)):
if sentance[i].isalpha():
if ord(sentance[i])<91:
sentance[i] = chr(ord(sentance[i])+32)
else:
sentance[i] = chr(ord(sentance[i])-32)
return ''.join(sentance)
print swap_char("HeLlo")
| true |
f93177bfc864210176f46d3d1e9bb5566169526c | Python | moeyashi/practice-python-eel | /hello.py | UTF-8 | 563 | 2.640625 | 3 | [] | no_license | from __future__ import print_function # For Py2/3 compatibility
import eel
import random
# Set web files folder
eel.init('web')
@eel.expose
def py_random():
return random.random()
@eel.expose
def py_list():
return [1, 2, "3", "4"]
@eel.expose
def py_dict():
return {
"1": "hoge",
"a": "fuga",
}
class Hoge:
def __init__(self):
self.a = "aaaa"
self.b = "bbbb"
def getA(self):
return self.a
@eel.expose
def py_class():
return Hoge()
eel.start('hello.html', size=(400, 300)) # Start
| true |
da47d88f3a0c95e3b988d2cc675b4a394dfd2379 | Python | HaojieSHI98/HouseExpo | /pseudoslam/envs/simulator/util.py | UTF-8 | 4,076 | 3.1875 | 3 | [
"MIT"
] | permissive | import numpy as np
def transform_coord(y_coordMat, x_coordMat, rotationCenter, transformVect):
""" Transform x-y coordinate (y_mat & x_mat) by transformVect | round to int | return rotated y & x coord as vector"""
""" y_mat and x_mat are the coord to be rotated | rotationCenter [y;x] or [y;x;phi] are the centre of rotation by theta
transformVect [y;x;theta]: y & x are relative to rotationCenter if center [y;x], or relative to world ref frame if center [y;x;phi],
theta is the angle in rad which the coord to be rotated """
y_rc= rotationCenter[0]
x_rc= rotationCenter[1]
y_translate= transformVect[0]
x_translate= transformVect[1]
# change transform to be relative to rotationCenter frame if in form of [y;x;phi]
if rotationCenter.shape[0]>2:
y_translate= y_translate*np.cos(rotationCenter[2]) + x_translate*np.sin(rotationCenter[2])
x_translate= x_translate*np.cos(rotationCenter[2]) - y_translate*np.sin(rotationCenter[2])
theta= transformVect[2]
sthe = np.sin(theta)
cthe = np.cos(theta)
y_rot = sthe*x_coordMat + cthe*y_coordMat + (1-cthe)*y_rc - sthe*x_rc + y_translate
x_rot = cthe*x_coordMat - sthe*y_coordMat + (1-cthe)*x_rc + sthe*y_rc + x_translate
y_ind = np.round(y_rot).astype(int).reshape(y_rot.size, 1)
x_ind = np.round(x_rot).astype(int).reshape(x_rot.size, 1)
return y_ind, x_ind
def rad2deg(rad):
return 180.0/np.pi*rad
def deg2rad(deg):
return np.pi/180*deg
def angle_within_360(theta):
""" cast angle into range 0 < theta < 360"""
theta = np.mod(theta, 360)
if theta > 360:
theta -= 360
return theta
def angel_within_pi(theta):
""" cast angle into range 0 < theta < 2pi"""
theta= np.mod(theta, 2*np.pi)
if theta > 2*np.pi:
theta -= 2*np.pi
return theta
def meter2pixel(x_in_m, m2p_ratio):
""" convert world meter into pixel"""
return np.round(x_in_m*m2p_ratio).astype(int)
def pixel2meter(x_in_pixel, m2p_ratio):
""" convert pixel in world meter"""
return x_in_pixel*1.0/m2p_ratio
def world2mapCoord(p_world, worldOrigin, m2p_ratio=1):
""" convert world coordinate into map coordinate
world coord: (origin= worldOrigin & y-axis is upward) | map coord: (origin=top-left corner & y-axis is downward)
worldOrigin: [y,x] in pixel in img coord | p_world: [y,x] in meter in world coord
return p_map: [y,x] in pixel in img coord """
p_map_y= worldOrigin[0] - p_world[0]*m2p_ratio
p_map_x= worldOrigin[1] + p_world[1]*m2p_ratio
return np.array([p_map_y,p_map_x])
def map2worldCoord(p_map, worldOrigin, m2p_ratio=1):
""" convert map coordinate into world coordinate
map coord: (origin=top-left corner & y-axis is downward) | world coord: (origin= worldOrigin & y-axis is upward)
worldOrigin: [y,x] in pixel in img coord | p_map: [y,x] in pixel in img coord
return p_world: [y,x] in meter in world coord"""
p_world_y= (worldOrigin[0] - p_map[0])*1.0/m2p_ratio
p_world_x= (-worldOrigin[1] + p_map[1])*1.0/m2p_ratio
return np.array([p_world_y,p_world_x])
def within_bound(p,shape,r=0):
""" check if point p [y;x] or [y;x;theta] with radius r is inside world of shape (h,w)
return bool if p is single point | return bool matrix (vector) if p: [y;x] where y & x are matrix (vector) """
return (p[0] >= r) & (p[0] < shape[0]-r) & (p[1] >= r) & (p[1] < shape[1]-r)
def make_circle(r, pixelValue):
""" make a patch of circle with pixelValue """
patch = np.zeros([2*r+1, 2*r+1])
angles = np.arange(361).reshape(361, 1) * np.pi / 180
radius = np.linspace(0, r, num=30).reshape(1, 30)
y_mat = r + np.matmul(np.sin(angles), radius)
x_mat = r + np.matmul(np.cos(angles), radius)
y_ind = np.round(y_mat).astype(int).reshape(y_mat.size, 1)
x_ind = np.round(x_mat).astype(int).reshape(x_mat.size, 1)
patch[y_ind, x_ind] = pixelValue
return patch, r
def gauss_noise(mu=0, sigma=0.1):
""" return value sampled from Gaussian distribution """
return np.random.normal(mu,sigma)
| true |
4dc278cafd5a5fb84a0dd43b2c19501f1afcc399 | Python | menonf/PythonProjects | /MachineLearning/LinearAlgorithms/LinearRegression/LeastSquares.py | UTF-8 | 1,175 | 3.546875 | 4 | [] | no_license | from csv import reader
def load_csv(filename):
dataset = list()
with open(filename, 'r') as file:
csv_reader = reader(file)
for row in csv_reader:
if not row:
continue
dataset.append(row)
return dataset
# Convert string column to float
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
# Calculate the mean value of a list of numbers
def mean(values):
return sum(values) / float(len(values))
# Calculate the variance of a list of numbers
def variance(values, mean):
return sum([(x - mean) ** 2 for x in values])
# Calculate covariance between x and y
def covariance(x, mean_x, y, mean_y):
covar = 0.0
for i in range(len(x)):
covar += (x[i] - mean_x) * (y[i] - mean_y)
return covar
# Calculate coefficients
def coefficients_leastSquares(dataset):
x = [row[0] for row in dataset]
y = [row[1] for row in dataset]
x_mean, y_mean = mean(x), mean(y)
b1 = covariance(x, x_mean, y, y_mean) / variance(x, x_mean)
b0 = y_mean - b1 * x_mean
return [b0, b1] | true |
740c4a60d8773b862dcb4e1ddc563a908d4003c6 | Python | hasnatnayeem/hackerrank_solutions | /python/06_itertools/combinations-with-replacement.py | UTF-8 | 296 | 3.515625 | 4 | [] | no_license | # https://www.hackerrank.com/challenges/itertools-combinations-with-replacement
from itertools import combinations_with_replacement
text, length = input().split()
length = int(length)
ans = [''.join(sorted(x)) for x in combinations_with_replacement(text, length)]
print('\n'.join(sorted(ans)))
| true |
be67f242d26db44eb6a29456455ab1b785dbbd4c | Python | chasingegg/Data_Science | /Python/stock_sold/data.py | UTF-8 | 3,293 | 3.140625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# Author: Chao Gao
# manipulating data
import xlrd
import xlsxwriter
data = xlrd.open_workbook(u"test.xlsx")
#得到两张表
table_sold = data.sheet_by_name(u'销售结算单')
table_stock = data.sheet_by_name(u'进货结算单')
#获取行数和列数
sold_rows = table_sold.nrows
sold_cols = table_sold.ncols
#其实有多少列肯定是一样的,在这里好像有多少行也是一样的。。但还是分开写一下
stock_rows = table_stock.nrows
stock_cols = table_stock.ncols
#第一行是所有列属性的集合
colname = table_sold.row_values(0)
#这三个列属性是有用的
ID_index = colname.index(u'货品ID')
amount_index = colname.index(u'数量')
money_index = colname.index(u'金额')
#除去第一行都是有效数据,每一行作为一个元素存储到列表中,并进行排序其实如果本来就是有序且相同ID的货品都挨在一起的话排序可以省略
store_sold = [table_sold.row_values(i) for i in range(1, sold_rows)]
store_sold.sort(key = lambda x: x[ID_index])
store_stock = [table_stock.row_values(i) for i in range(1, stock_rows)]
store_stock.sort(key = lambda x: x[ID_index])
#合并相同ID的货品,将数量和金额进行相加
def getOutput(store, rows):
out = []
for i in range(1, rows-1):
if store[i][ID_index] == store[i-1][ID_index]:
store[i][amount_index] += store[i-1][amount_index]
store[i][money_index] += store[i-1][money_index]
else:
out.append(store[i-1])
out.append(store[rows-2])
return out
out_sold = getOutput(store_sold, sold_rows)
out_stock = getOutput(store_stock, stock_rows)
#合并以后如果两个表的行数不一致,应该是原始数据有问题,没有处理异常。。
#if len(out_sold) != len(out_stock):
# print("行数不相等")
#把合并操作完成后的两张表写到Excel文件,这个可用于调试检查,不需要中间结果的话可以注释
file_out = xlsxwriter.Workbook(u'中间结果.xlsx')
sold = file_out.add_worksheet(u'销售处理后的结果')
stock = file_out.add_worksheet(u'进货处理后的结果')
for i in range(sold_cols):
sold.write(0, i, colname[i]) #写第一行
for i in range(len(out_sold)):
for j in range(sold_cols):
sold.write(i+1, j, out_sold[i][j]) #写内容
for i in range(stock_cols):
stock.write(0, i, colname[i])
for i in range(len(out_stock)):
for j in range(stock_cols):
stock.write(i+1, j, out_stock[i][j])
#计算上面两张表的差异,将最终结果写到新的Excel文件中
result = xlsxwriter.Workbook(u'最终结果.xlsx')
out = result.add_worksheet(u'差异结果')
for i in range(sold_cols):
out.write(0, i, colname[i])
index = 1
tmp = 0
for i in range(len(out_sold)):
tmp = -1
for j in range(len(out_stock)):
if out_sold[i][ID_index] == out_stock[j][ID_index]:
tmp = j
break
if tmp == -1:
for k in range(sold_cols):
out.write(index, k, out_sold[i][k])
index += 1
else:
if not(out_sold[i][amount_index] == out_stock[tmp][amount_index] and out_sold[i][money_index] == out_stock[tmp][money_index]):
for k in range(sold_cols):
if k == amount_index or k == money_index:
out.write(index, k, out_sold[i][k] - out_stock[tmp][k])
else:
out.write(index, k, out_sold[i][k])
index += 1
file_out.close()
result.close()
| true |
c50ab308262a4f7bb32e7350f69f223b673a7a22 | Python | faberikaneko/Intern | /ImageSuggestion/ImageSuggestion/ScoringClass.py | UTF-8 | 5,975 | 2.78125 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import sys
import codecs
import csv
import sqlite3
#External import package to check Unicode parameter
import regex as re
#External import package to check encoding of file
import chardet
from chardet.universaldetector import UniversalDetector
#External import package to Morphological Analysis
import MeCab
class ScoringClass:
"""scoring sentense"""
clueword = None
keysentence = None
def openClueWord(self,filename="ClueWord_List.csv"):
''' <- filename : filename to read default = ClueWord_List.csv
->No return
read ClueWord.csv into dict(clueword)'''
if ScoringClass.clueword == None:
ScoringClass.clueword = {}
# read database(ClueWord)->data
with codecs.open(filename,"r",encoding="utf-8-sig") as file:
reader = csv.reader(file)
#readout header
next(reader)
#make data word:importance dict
for row in reader:
ScoringClass.clueword[row[0].decode("utf-8-sig")] = int(row[2].decode("utf-8-sig"))
return
def openClueWordDB(self,dbName=u"WordDB.sqlite3",tableName=u"clueword"):
''' <- dbname : filename to read default = WordDB.sqlite3
<- tableName : tablename to read/write default = clueword
->No return
read clueword table in Database into dict(clueword)
if no table or dbfile, read csvfile and save it'''
if ScoringClass.clueword == None:
try:
conn = sqlite3.connect(dbName)
with conn:
cr = conn.cursor()
if cr.execute(u"select count(*) from sqlite_master where type=\"table\" and name=?;",(tableName,)).fetchone()[0] == 0:
cr.execute(u"create table clueword (word ntext,importance real);")
self.openClueWord()
message = u"insert into "+tableName+" values (:key,:value)"
cr.executemany(message,self.clueword.iteritems())
else :
ScoringClass.clueword = {}
message = u"select * from "+tableName
for row in cr.execute(message):
ScoringClass.clueword[row[0]] = row[1]
except sqlite3.Error as e:
print e.message
except Exception as e:
print e.message
def openSentenceExpression(self,filename="SentenceExpression_List.csv"):
''' <- filename : filename to read default = SentenceExpression_List.csv
->No return
read SentenceExpression_List.csv into dict(keysentence)'''
if ScoringClass.keysentence == None:
ScoringClass.keysentence = {}
#read database(SentenceExpression)->dataC
with codecs.open(filename,"rt",encoding="utf-8-sig") as file:
reader = csv.reader(file)
#readout header
next(reader)
#make data exp:importance dict
for row in reader:
sentence = row[0].replace("~",".*").decode("utf-8")
sentence = sentence if sentence.startswith(r".*") else sentence
ScoringClass.keysentence[sentence] = int(row[2].decode("utf-8"))
if __name__=="__main__":
for key in ScoringClass.keysentence.keys():
print "%s,%d"%(key,ScoringClass.keysentence[key])
return
def openSentenceExpressionDB(self,dbName=u"Wordb.sqlite3",tableName=u"SenExp"):
if ScoringClass.keysentence == None:
try:
conn = sqlite3.connect(dbName)
with conn:
cr = conn.cursor()
if cr.execute(u"select count(*) from sqlite_master where type=\"table\" and name=?;",(tableName,)).fetchone()[0] == 0:
cr.execute(u"create table "+tableName+" (word ntext,importance real);")
self.openSentenceExpression()
message = u"insert into "+tableName+" values (:key,:value)"
cr.executemany(message,self.keysentence.iteritems())
else :
ScoringClass.keysentence = {}
message = u"select * from "+tableName
for row in cr.execute(message):
ScoringClass.keysentence[row[0]] = row[1]
except sqlite3.Error as e:
print e.message
except Exception as e:
print e.message
finally:
conn.close()
#read text
def scoreSentenceByWord(self,text):
""" in > text (one sentence)
out> matching word list[]"""
matching = []
m = MeCab.Tagger("-Owakati")
m.parse('')
encodeText = text.encode("utf-8")
node = m.parseToNode(text.encode("utf-8"))
ans = ""
node = node.next
nodeList = []
while node.next:
nodeList.append((node.surface,node.feature))
node = node.next
for node in nodeList:
try:
surface = node[0].decode("utf-8-sig")
feature = node[1].decode("utf-8-sig")
except UnicodeDecodeError:
surface = ""
exit("error! unicode decode error!")
#searching word
if surface in ScoringClass.clueword.keys():
matching.append(surface)
return matching
def scoreSentenceByExp(self,text):
""" in > text (one sentence)
out> matching word list[]"""
matching = []
for sentence in ScoringClass.keysentence.keys():
if re.match(sentence,text):
matching.append(sentence)
return matching
def scoreSentenceList(self,textList):
matchList = []
for text in textList:
matchWordList = self.scoreSentenceByWord(text)
matchExpList = self.scoreSentenceByExp(text)
matchList.append(matchWordList+matchExpList)
return matchList
def __init__(self):
reload(sys)
sys.setdefaultencoding('utf-8')
#self.openClueWord()
self.openClueWordDB(u"WordDB.sqlite3")
#self.openSentenceExpression()
self.openSentenceExpressionDB(u"WordDB.sqlite3")
#てすとプログラム
if __name__ == "__main__":
print "Start ScorinClass"
this = ScoringClass()
textList = []
filename = "input_main.txt"
textList = list()
with codecs.open(filename,mode="r",buffering=-1,encoding="utf-8-sig") as file:
for line in file.readlines():
textList.append(re.sub(ur"[\n\r]",u"",line))
scores = this.scoreSentenceList(textList)
filename = "output_scorig.txt"
with codecs.open(filename,"w",encoding="utf-8") as file:
for tap in zip(textList,scores):
file.write(tap[0])
score = tap[1]
file.write(u"\nmatch:" + unicode(len(score)) + u"\n")
for match in score:
file.write(u"\t" + match + ":" + str(ScoringClass.clueword[match] if match in ScoringClass.clueword else ScoringClass.keysentence[match]))
file.write(u"\n") | true |
4b3e852d1e555b0b7f19971ac1ab7573506703db | Python | AmaniAlshami/100DaysOfCodePython | /Day17-Tuples2.py | UTF-8 | 385 | 3.640625 | 4 | [] | no_license | color = tuple(("Red","Green","Yellow","Red"))
fruits = ('Apple','Banana','Orange')
Number= (1,2,3)
Number = Number + (4,5,6)
def check(fruits):
fruit = input("Enter fruti name : ")
if fruit in fruits :
print("Yes")
else:
print("No")
print(color[:1])
print(color.count("Red"))
check(fruits)
print(color.index('Green'))
print(Number)
print(len(Number))
| true |
e8e9d4c6a1b1705196095a49943251833731cf9a | Python | henrik-leisdon/Bachelor_Thesis_stochastic_computing | /04_image_processing/04_entropy_frame/entropy.py | UTF-8 | 4,991 | 2.90625 | 3 | [] | no_license | import math
import random
import numpy as np
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
from skimage.filters.rank import entropy
from skimage.morphology import disk
def save_img(img_mat, img_name, img_num):
result = Image.fromarray(img_mat)
r = result.convert("L")
r.save(str(img_name) + str(img_num) + ".png")
def calc_entropy(img):
"""shannon entropy: attempt 1"""
# image = Image.open('c16.png').convert('L')
# img = np.array(image)
# print(img)
# probability for every bit
p_k = [0]*256
for i in range(0, len(img)):
for j in range(0, len(img[0])):
p_k[int(img[i, j])] += 1
# print(p_k)
img_size = len(img)*len(img[0])
H = 0
normalize = 0
# calculate shannon entropy
for i in range(0, len(p_k)):
if p_k[i] == 0:
x = 0
else:
x = (p_k[i]/img_size)*math.log((p_k[i]/img_size), 2)
normalize += 1
H += x
H = -H
# H = H/normalize
# print(H)
return H
def calc_GLCM_entropy(img):
"""entropy of GLCM matrix"""
# image = Image.open('cm_sp_original.jpg').convert('L')
# img = np.array(image)
glcm = np.zeros((256, 256))
for i in range(0, len(img)-1):
for j in range(0, len(img[0])):
x = img[i, j]
y = img[i+1, j]
# print('x {}, y {}'.format(x,y))
glcm[x, y] += 1
# print(glcm[x, y])
H = 0
normalize = 0
num_pairs = (len(glcm[0])-1) * len(glcm)
length = 0
for i in range(0, len(glcm)):
for j in range(0, len(glcm[0])):
if glcm[i, j] == 0:
x = 0
else:
x = (glcm[i, j]/num_pairs)*math.log((glcm[i, j]/num_pairs), 2)
normalize += 1
H += x
length += 1
H = -H
print(H)
return H
def blocks():
"""method to split entropy into blocks
:return: matrix of entropy blocks. The higher the entropy, the lower the weighting"""
image = Image.open('cm_sp_original.jpg').convert('L')
image = ImageOps.invert(image)
img = np.array(image)
# imgplot2 = plt.imshow(img)
# plt.show()
e_list = []
entr = np.zeros((len(img), len(img[0])))
for i in range(0, len(img)-16, 16):
for j in range(0, len(img[i])-16, 16):
submat = img[i:i+16, j:j+16]
entropy_e = calc_entropy(submat)
entrpy = entropy_e
print(entrpy)
e_list.append(entrpy)
e_list = nomalize(e_list)
it = 0
for i in range(0, len(img) - 16, 16):
for j in range(0, len(img[i]) - 16, 16):
# print(entropy)
for x in range(0, 16):
for y in range(0, 16):
# print('i{}, j{}, x{}, y{} '.format(i, j, x, y))
entr[i+x, j+y] = e_list[it]*255
it += 1
save_img(entr, 'entropy_sh_', 3)
# imgplot = plt.imshow(entr)
# plt.show()
return entr
def nomalize(e_list):
max_val = max(e_list)
min_val = min(e_list)
normalized = []
for element in e_list:
normal = (element-min_val)/(max_val-min_val)
# print(normal)
normalized.append(normal)
return normalized
def entropy_library():
"""usage of the scikit library (best entropy results)"""
image = Image.open('cm_sp_original.jpg').convert('L')
image = ImageOps.invert(image)
img = np.array(image)
entr_img = entropy(img, disk(10))
print(entr_img)
imgplot2 = plt.imshow(entr_img, cmap='viridis')
plt.show()
def entropy_copy():
"""copy from the internet"""
# code from: https://www.hdm-stuttgart.de/~maucher/Python/MMCodecs/html/basicFunctions.html
colorIm = Image.open('cm_sp_original.jpg')
greyIm = colorIm.convert('L')
colorIm = np.array(colorIm)
greyIm = np.array(greyIm)
N = 5
S = greyIm.shape
E = np.array(greyIm)
for row in range(S[0]):
for col in range(S[1]):
Lx = np.max([0, col - N])
Ux = np.min([S[1], col + N])
Ly = np.max([0, row - N])
Uy = np.min([S[0], row + N])
region = greyIm[Ly:Uy, Lx:Ux].flatten()
E[row, col] = entropy_c(region)
plt.subplot(1, 3, 1)
plt.imshow(colorIm)
plt.subplot(1, 3, 2)
plt.imshow(greyIm, cmap=plt.cm.gray)
plt.subplot(1, 3, 3)
plt.imshow(E, cmap=plt.cm.jet)
plt.xlabel('Entropy in 10x10 neighbourhood')
plt.colorbar()
plt.show()
def entropy_c(signal):
'''
function returns entropy of a signal
signal must be a 1-D numpy array
'''
lensig = signal.size
symset = list(set(signal))
numsym = len(symset)
propab = [np.size(signal[signal == i]) / (1.0 * lensig) for i in symset]
ent = np.sum([p * np.log2(1.0 / p) for p in propab])
return ent
def main():
blocks()
# entropy_library()
# entropy_copy()
if __name__ == '__main__':
main()
| true |
5b5772dfdf572d9f0124d880054588515760d41d | Python | usersubsetscan/autoencoder_anomaly_subset | /visualization/detectionpower.py | UTF-8 | 1,718 | 2.609375 | 3 | [] | no_license | """ Detection power visualization """
import os
import argparse
from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
from util.resultparser import ResultParser, ResultSelector
def plot(cleanscores, anomscores):
""" plot and calculate the detection power """
resultselector = ResultSelector(score=True)
cleanres = ResultParser.get_results(cleanscores, resultselector)
anomres = ResultParser.get_results(anomscores, resultselector)
clean_scores = np.array(cleanres['scores'])
anom_scores = np.array(anomres['scores'])
plt.hist(clean_scores, histtype = 'step')
plt.hist(anom_scores, histtype = 'step')
plt.show()
y_true = np.append([np.ones(len(anom_scores))], [np.zeros(len(clean_scores))])
all_scores = np.append([anom_scores], [clean_scores])
fpr, tpr, _ = metrics.roc_curve(y_true, all_scores)
roc_auc = metrics.auc(fpr,tpr)
plt.plot(fpr,tpr)
plt.show()
print(roc_auc)
if __name__ == "__main__":
PARSER = argparse.ArgumentParser()
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
PARENT_DIR_PATH = os.path.abspath(DIR_PATH + "/../")
PARSER.add_argument('--cleanscores', type=str, default=PARENT_DIR_PATH+ '/results/clean_individ_act_19.out',
help='clean scores file path')
PARSER.add_argument('--anomscores', type=str, default=PARENT_DIR_PATH+ '/results/bim_02_targ0_individ_act_19.out',
help='anomalous scores file path')
PARSER_ARGS = PARSER.parse_args()
assert os.path.exists(PARSER_ARGS.cleanscores) == 1
assert os.path.exists(PARSER_ARGS.anomscores) == 1
plot(PARSER_ARGS.cleanscores, PARSER_ARGS.anomscores)
| true |
2329815fb4f750790bdb115055eda9f78ac9dc0b | Python | arevalolance/advent-of-code | /python/2015/day7/solve.py | UTF-8 | 548 | 3.203125 | 3 | [] | no_license | import fileinput
import math
print(123 & 456)
print(123 | 456)
print('left',123 << 2)
print('right',456 >> 2)
print(~123)
print(~456)
gates = dict()
def solve(q):
if 'AND' in q:
return q[0] and q[2]
elif 'OR' in q:
return q[0] and q[2]
elif 'LSHIFT' in q:
return q[0] << q[2]
elif 'RSHIFT' in q:
return q[0] >> q[2]
elif 'NOT' in q:
return ~q[0]
for line in fileinput.input():
line = line.strip()
q = line.split('->')
print(q)
act = q[0].split()
print(solve(act))
| true |
cf75ea3972664f9e333be8e48a97d5c7b5c03da9 | Python | m0baxter/hephe-article | /images/plotOneP.py | UTF-8 | 2,770 | 2.609375 | 3 | [] | no_license |
import matplotlib
matplotlib.use('PS')
import matplotlib.pyplot as plt
#plot parameter things:
lbl_size = 24
lgd_size = 20
mrks = 9
lw = 3
fontsize = 24
matplotlib.rcParams.update({'font.size': fontsize, 'text.usetex': True, "ps.usedistiller" : "xpdf"})
def readData(path):
xs = []
ys = []
with open( path, 'r' ) as readFile:
for line in readFile:
x, y = line.split()
xs.append( float(x) )
ys.append( float(y) )
return ( xs, ys )
def plotProbs(E):
Qup1 = readData( "./images/data/HepUpToI-E" + str(E) + ".txt" )
Qup2 = readData( "./images/data/HeUpToI-E" + str(E) + ".txt" )
Qdn1 = readData( "./images/data/HeDnToI-E" + str(E) + ".txt" )
Cup1 = readData( "./images/data/HepUpToO-E" + str(E) + ".txt" )
Cup2 = readData( "./images/data/HeUpToO-E" + str(E) + ".txt" )
Cdn1 = readData( "./images/data/HeDnToO-E" + str(E) + ".txt" )
fig = plt.figure(1, figsize = (12,10))
plt.figure(1)
plt.plot( Qup2[0], Qup2[1], "--", color = "#0000FF", linewidth = 0.5 * lw,
label = r"$\mathrm{He}(\uparrow_1)$ $\rightarrow$ $I$" )
plt.plot( Qdn1[0], Qdn1[1], ":", color = "#0000FF", linewidth = 0.5 * lw,
label = r"$\mathrm{He}(\downarrow_1)$ $\rightarrow$ $I$" )
plt.plot( Qup1[0], Qup1[1], "-", color = "#0000FF", linewidth = 0.5 * lw,
label = r"$\mathrm{He}^{+}(\uparrow_2)$ $\rightarrow$ $I$" )
plt.plot( Cup2[0], Cup2[1], "--", color = "#008000", linewidth = 1.1 * lw,
label = r"$\mathrm{He}(\uparrow_1)$ $\rightarrow$ $\mathrm{He}^{+}$" )
plt.plot( Cdn1[0], Cdn1[1], ":", color = "#008000", linewidth = 1.1 * lw,
label = r"$\mathrm{He}(\downarrow_1)$ $\rightarrow$ $\mathrm{He}^{+}$" )
plt.plot( Cup1[0], Cup1[1], "-", color = "#008000", linewidth = 1.1 * lw,
label = r"$\mathrm{He}^{+}(\uparrow_2)$ $\rightarrow$ $\mathrm{He}$" )
plt.xlabel("$\mathrm{b}$ $[\mathrm{a.u.}]$")
plt.ylabel("$p(b)$")
plt.xlim([0,5])
plt.ylim( ymin = 0 )
plt.legend( loc="best", fancybox=True, labelspacing = .2 )
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize = lgd_size)
ax = plt.gca()
ax.xaxis.set_tick_params(which='both', width=2)
ax.yaxis.set_tick_params(which='both', width=2)
ax.xaxis.set_tick_params(which='major', length=8)
ax.yaxis.set_tick_params(which='major', length=8)
ax.xaxis.set_tick_params(which='minor', length=5)
ax.yaxis.set_tick_params(which='minor', length=5)
fig.savefig('./images/hephe-pb-{0}.eps'.format(E),
format = 'eps', dpi = 20000, bbox_inches='tight')
fig.clear()
return
if __name__ == "__main__":
plotProbs( 40 )
| true |
121f58c90ab2a0d2bf005af4da83612042381d78 | Python | prayer1/Python_from_entry_to_practice | /ch7/counting.py | UTF-8 | 189 | 3.4375 | 3 | [] | no_license | current_number = 1
while current_number <= 5:
print(current_number)
current_number += 1
message = ""
while message != 'quit':
message = input("enter a str:")
print(message) | true |
fa47b3ccbaa70bc386e0c9e09d47e3b83c216415 | Python | liqiwa/python_work | /5/5.3/aline_color.py | UTF-8 | 989 | 3.921875 | 4 | [
"Apache-2.0"
] | permissive | alien_color = 'black'
if 'green' in alien_color:
print("you are right is green,and you get 5 point!")
if 'black' in alien_color:
print("you are right is black,and you get 5 point !")
if alien_color == 'green':
print('you get 5 point')
elif alien_color =='black':
print("you get 10 point")
elif alien_color =='red':
print('you get 15 point')
age = 33
if age <2:
print('he is baby')
elif age<4 and age>=2:
print('he is walk')
elif age >= 4 and age< 13:
print('he is children')
elif age >=13 and age< 20:
print('he is youth')
elif age >=20 and age<65:
print('he is adult ')
elif age >= 65:
print('he is old people')
favorite_fruits = ['apple','banana','peach']
if 'apple' in favorite_fruits:
print('You really like apple!')
if 'banana' in favorite_fruits:
print('You really like banana')
if 'peach' in favorite_fruits:
print('You really like peach')
if 'orange' in favorite_fruits:
print('You really like orange')
if 'pear' in favorite_fruits:
print('You really like pear')
| true |
6bec313d23a5a910b7f7e252921121c3bc1385f0 | Python | MissNeerajSharma/Text-Classification-using-TextBlob | /analysis2.py | UTF-8 | 3,545 | 2.78125 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
# get_ipython().run_line_magic('matplotlib', 'inline')
# import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[2]:
fruits = pd.read_table(r'C:\Users\akasriva2\Music\Analysis\fruit_data.txt')
fruits.head()
# In[3]:
print(fruits['fruit_name'].unique())
# In[4]:
print(fruits.shape)
# In[5]:
print(fruits.groupby('fruit_name').size())
# In[6]:
import seaborn as sns
sns.countplot(fruits['fruit_name'],label="Count")
plt.show()
# In[7]:
feature_names = ['mass', 'width', 'height', 'color_score']
X = fruits[feature_names]
y = fruits['fruit_label']
# In[8]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# In[9]:
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# In[10]:
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print('Accuracy of Logistic regression classifier on training set: {:.2f}'
.format(logreg.score(X_train, y_train)))
print('Accuracy of Logistic regression classifier on test set: {:.2f}'
.format(logreg.score(X_test, y_test)))
# In[11]:
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier().fit(X_train, y_train)
print('Accuracy of Decision Tree classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Decision Tree classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
# In[12]:
clf2 = DecisionTreeClassifier(max_depth=3).fit(X_train, y_train)
print('Accuracy of Decision Tree classifier on training set: {:.2f}'
.format(clf2.score(X_train, y_train)))
print('Accuracy of Decision Tree classifier on test set: {:.2f}'
.format(clf2.score(X_test, y_test)))
# In[13]:
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
print('Accuracy of K-NN classifier on training set: {:.2f}'
.format(knn.score(X_train, y_train)))
print('Accuracy of K-NN classifier on test set: {:.2f}'
.format(knn.score(X_test, y_test)))
# In[14]:
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
lda = LinearDiscriminantAnalysis()
lda.fit(X_train, y_train)
print('Accuracy of LDA classifier on training set: {:.2f}'
.format(lda.score(X_train, y_train)))
print('Accuracy of LDA classifier on test set: {:.2f}'
.format(lda.score(X_test, y_test)))
# In[15]:
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train, y_train)
print('Accuracy of GNB classifier on training set: {:.2f}'
.format(gnb.score(X_train, y_train)))
print('Accuracy of GNB classifier on test set: {:.2f}'
.format(gnb.score(X_test, y_test)))
# In[16]:
from sklearn.svm import SVC
svm = SVC()
svm.fit(X_train, y_train)
print('Accuracy of SVM classifier on training set: {:.2f}'
.format(svm.score(X_train, y_train)))
print('Accuracy of SVM classifier on test set: {:.2f}'
.format(svm.score(X_test, y_test)))
# In[17]:
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
pred = knn.predict(X_test)
print(confusion_matrix(y_test, pred))
print(classification_report(y_test, pred))
| true |
46c5be1f70d2a24bfc6d7b113a5842a2daffcde8 | Python | alexwestside/parser_bitcoins | /parser_bitcoins.py | UTF-8 | 3,560 | 3.25 | 3 | [] | no_license | import requests
import json
import re
import csv
from decimal import Decimal
top = 20 # Count of top coins that need to collect in dataset
coins_list = []
api_coins = 'https://www.cryptocompare.com/api/data/coinlist/' # Exemple of api request that given data of all coins wich we will range by parametr SortOrder
currency_list = {}
api_currency = 'https://min-api.cryptocompare.com/data/top/pairs?fsym=XMR&limit=1000' # Exemple of api request that given data of a coin-currency
data_coins = []
api_data = 'https://www.cryptocompare.com/api/data/coinsnapshot/?fsym=BTC&tsym=USD' # Exemple of api reauest that given data of a coin-market-currency
csv_colums = ['Coin name', 'Market', 'Currency', 'Price', 'Open 24H', 'Range 24H'] # Struct of CSV file
# Func making a list of a top20 coins
def get_coins_list():
request = requests.get(api_coins)
data_coins = request.content
data_coins = json.loads(data_coins)
data_coins = data_coins.get('Data')
for coin in data_coins:
if int(data_coins[coin].get('SortOrder')) in range(1, top):
coins_list.append((data_coins[coin].get('Name')))
return
# Func making a lists all curencys in connect by each coin
def get_currency_list():
for coin in coins_list:
re_find = re.findall(r'(?<=\=)\w+(?=\&)', api_currency)
get_api_currency = api_currency.replace(re_find[0], str(coin))
request = requests.get(get_api_currency)
data_currency = request.content
data_currency = json.loads(data_currency)
data_currency = data_currency.get('Data')
for tok in data_currency:
if coin not in currency_list:
currency_list[coin] = []
currency_list.get(coin).append(str(tok.get('toSymbol')))
else:
currency_list.get(coin).append(str(tok.get('toSymbol')))
return
# Func making a dataset fo all coin in coins_list and write it in file
def get_data_coins():
fp = open('datacoins.csv', 'wb')
csvwriter = csv.writer(fp, delimiter=',')
csvwriter.writerow(csv_colums)
for coin in currency_list:
currencys = currency_list.get(coin)
for currecy in currencys:
re_find_coin = re.findall(r'(?<=\=)\w+(?=\&)', api_data)
get_api_currency = api_data.replace(re_find_coin[0], str(coin))
re_find_currency = re.findall(r'(?<=\=)\w+', get_api_currency)
get_api_currency = get_api_currency.replace(re_find_currency[1], str(currecy))
request = requests.get(get_api_currency)
data_coin = request.content
data_coin = json.loads(data_coin)
data_coin = data_coin.get('Data')
if len(data_coin) != int(0):
data_coin = data_coin.get('Exchanges')
for data in data_coin:
datacoins = []
datacoins.append(data.get('FROMSYMBOL'))
datacoins.append(data.get('MARKET'))
datacoins.append(data.get('TOSYMBOL'))
datacoins.append(str(Decimal(float(data.get('PRICE'))))[:10])
datacoins.append(str(Decimal(float(data.get('OPEN24HOUR'))))[:10])
datacoins.append(str(Decimal(float(data.get('HIGH24HOUR')) - float(data.get('LOW24HOUR'))))[:10])
csvwriter.writerow(datacoins)
fp.close()
return
# Main func produce dataset named - "datacoins.csv"
def main():
get_coins_list()
get_currency_list()
get_data_coins()
if __name__ == "__main__":
main() | true |
a7e36de0a0f90363d0a15fe7838002d2a919e6c0 | Python | aduxhi/learnpython | /Class/class_learn.py | UTF-8 | 2,309 | 4.1875 | 4 | [] | no_license | '''
类(class):用来描述具有相同属性和方法的集合
方法:类中定义的函数
类变量:类变量在整个实例化的对象中是公用的。类变量定义在类中且在函数体之外。类变量通常不作为实例变量使用。
实例变量:在类的声名中,属性是用变量来表示的。这种变量就称为实例变量。是在类声明的内部但是在类的其他成员方法之外声明。
数据成员:类变量或者实例变量用于处理类及其实例对象的相关的数据。
实例化:创建一个类的实例,类的具体对象。
对象:通过类定义的数据结构实例。对象包括两个数据成员(类变量和实例变量)和方法。
'''
'''
class MyClass:
i = 12345
def f(self):
return "hello world"
x = MyClass()
print("MyClass lei de shu xing i wei :", x.i)
print("MyClass lei de fang fa wei :", x.f())
'''
'''
__init__() 实例化操作后会自动调用__init__()方法
'''
'''
class Complex:
def __init__(self, realpart, imagpart):
self.r = realpart
self.i = imagpart
x = Complex(3.0, -4.5)
print(x.r,x.i)
'''
class people:
#定义基本属性
name = ""
age = 0
# 定义私有属性,私有属性在类的外部无法直接进行访问
__weight = 0
# 定义构造方法
def __init__(self,n,a,w):
self.name = n
self.age = a
self.__weight = w
def speak(self):
print("%s 说:我 %d 岁。"%(self.name,self.age))
'''
p = people("runoob", 10, 30)
p.speak()
'''
#单态继承
class student(people):
grade = ""
def __init__(self,n,a,w,g):
#调用父类的函数
people.__init__(self,n,a,w)
self.grade = g
#覆写父类的方法
def speak(self):
print("%s shuo: wo %d sui le, zai du %d nian ji."%(self.name,self.age,self.grade))
'''
s =student("Allen Du", 25, 66, 13)
s.speak()
'''
class speaker:
topic = ""
name = ""
def __init__(self,n,t):
self.name = n
self. topic = t
def speak(self):
print("我叫 %s,我是一个演说家,我演讲的主题是 %s"%(self.name,self.topic))
# 多态继承
class sample (speaker,student):
a = ""
def __init__(self,n,a,w,g,t):
student.__init__(self,n,a,w,g)
speaker.__init__(self,n,t)
#方法名相同,默认调用的是括号中排在前面的父类方法
test = sample("Allen",25,66,6,"Python")
test.speak()
| true |
cc53d25ec9012395e834b0cd50b181e46210b7ce | Python | Chirag-v09/Python | /OpenCV/template bounding box.py | UTF-8 | 3,071 | 2.921875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 3 16:32:23 2020
@author: Chirag
"""
import cv2
method = cv2.TM_SQDIFF_NORMED
# Read the images from the file
small_image = cv2.imread('mahindra.jpg')
large_image = cv2.imread('mahindra small.jpg')
result = cv2.matchTemplate(large_image, small_image, method)
# We want the minimum squared difference
mn,_,mnLoc,_ = cv2.minMaxLoc(result)
# Draw the rectangle:
# Extract the coordinates of our best match
MPx,MPy = mnLoc
# Step 2: Get the size of the template. This is the same size as the match.
trows,tcols = small_image.shape[:2]
# Step 3: Draw the rectangle on large_image
cv2.rectangle(large_image, (MPx,MPy),(MPx+tcols,MPy+trows),(0,0,255),2)
# Display the original image with the rectangle around the match.
cv2.imshow('output',large_image)
# The image is only displayed if we call this
cv2.waitKey(0)
import cv2
import numpy as np
img_rgb = cv2.imread('mahindra.jpg')
template = cv2.imread('mahindra small.jpg')
w, h = template.shape[:-1]
res = cv2.matchTemplate(img_rgb, template, cv2.TM_CCOEFF_NORMED)
threshold = .8
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]): # Switch collumns and rows
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
cv2.imwrite('result.png', img_rgb)
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('mahindra.jpg',0)
img2 = img
template = cv2.imread('mahindra small.jpg',0)
w, h = template.shape[::-1]
# All the 6 methods for comparison in a list
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
methods = ['cv2.TM_CCOEFF']
for meth in methods:
img = img2
method = eval(meth)
# Apply template Matching
res = cv2.matchTemplate(img,template,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 2)
plt.subplot(121),plt.imshow(res,cmap = 'gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img,cmap = 'gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.suptitle(meth)
plt.show()
import cv2
import numpy as np
from matplotlib import pyplot as plt
img_rgb = cv2.imread('mahindra.jpg')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('mahindra small.jpg',0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv2.imwrite('res.png',img_rgb) | true |
b1d837c50e146e285485f9ce34170bbe83ccc4d3 | Python | django/django | /tests/db_functions/text/test_reverse.py | UTF-8 | 2,372 | 2.5625 | 3 | [
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] | permissive | from django.db import connection
from django.db.models import CharField, Value
from django.db.models.functions import Length, Reverse, Trim
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import Author
class ReverseTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.john = Author.objects.create(name="John Smith", alias="smithj")
cls.elena = Author.objects.create(name="Élena Jordan", alias="elena")
cls.python = Author.objects.create(name="パイソン")
def test_null(self):
author = Author.objects.annotate(backward=Reverse("alias")).get(
pk=self.python.pk
)
self.assertEqual(
author.backward,
"" if connection.features.interprets_empty_strings_as_nulls else None,
)
def test_basic(self):
authors = Author.objects.annotate(
backward=Reverse("name"),
constant=Reverse(Value("static string")),
)
self.assertQuerySetEqual(
authors,
[
("John Smith", "htimS nhoJ", "gnirts citats"),
("Élena Jordan", "nadroJ anelÉ", "gnirts citats"),
("パイソン", "ンソイパ", "gnirts citats"),
],
lambda a: (a.name, a.backward, a.constant),
ordered=False,
)
def test_transform(self):
with register_lookup(CharField, Reverse):
authors = Author.objects.all()
self.assertCountEqual(
authors.filter(name__reverse=self.john.name[::-1]), [self.john]
)
self.assertCountEqual(
authors.exclude(name__reverse=self.john.name[::-1]),
[self.elena, self.python],
)
def test_expressions(self):
author = Author.objects.annotate(backward=Reverse(Trim("name"))).get(
pk=self.john.pk
)
self.assertEqual(author.backward, self.john.name[::-1])
with register_lookup(CharField, Reverse), register_lookup(CharField, Length):
authors = Author.objects.all()
self.assertCountEqual(
authors.filter(name__reverse__length__gt=7), [self.john, self.elena]
)
self.assertCountEqual(
authors.exclude(name__reverse__length__gt=7), [self.python]
)
| true |
773c5919bd394d59e7b86dabb33a8c956c04d418 | Python | EmilHernvall/projecteuler | /12.py | UTF-8 | 428 | 3.265625 | 3 | [] | no_license | import math
t = 0
j = 0
maxCount = 0
while True:
t += j
i = 2
n = t
factorCount = 0
max = n
while i <= max:
if n % i == 0:
max = n / i
factorCount += 2
i = i + 1
if n > 0 and math.floor(math.sqrt(n))**2 == n:
factorCount += 1
if factorCount >= 500:
print "Found: " + str(t)
break
if factorCount > maxCount:
maxCount = factorCount
print str(j) + ": " + str(t) + ": " + str(factorCount)
j += 1
| true |
52106d6fbed41b27b31f07d89a6fb2f2c39e2e32 | Python | yubowenok/vision-zero | /hourly_segments/aggregate.py | UTF-8 | 8,654 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python
# Process the generated speed files and aggregate the speeds based on
# street, hour, time of day, etc.
import sys, datetime, re
import road_network, sign_installation, speed_limit
import argparse
supported_bin_attrs = [
'segment',
'day_of_month',
'time_of_day',
'day_of_week',
'is_weekday',
'hour',
'speed_limit',
'sign',
'season'
]
parser = argparse.ArgumentParser(
description='Aggregate the estimated speed/volume of TLC trip records for yellow cabs.')
parser.add_argument('--data_list', dest='data_list', type=str, required=True,
help='list of data file paths')
parser.add_argument('--bin', dest='bin', type=str, required=True,
help='bin attributes as a comma separated string of the following:' +
','.join(supported_bin_attrs))
parser.add_argument('--output', dest='output', type=str, required=True,
help='output path')
parser.add_argument('--without_sign', dest='with_sign', action='store_false',
help='include only segments without signs')
parser.add_argument('--total', dest='total', action='store_true',
help='compute total rather than average')
parser.add_argument('--data_type', dest='data_type', default='speed', type=str,
help='aggregated data type: speed, volume, count (only affect csv header)')
parser.set_defaults(with_sign=None)
args = parser.parse_args()
bin_attrs = args.bin.split(',')
for attr in bin_attrs:
if attr not in supported_bin_attrs:
print >> sys.stderr, 'unsupported bin attribute "%s"' % attr
sys.exit(1)
with_sign = args.with_sign
compute_total = args.total
# Parse the processed road network.
# Read the simple network from previous speed estimation work.
#network_simple = road_network.read_simple_network('network/manhattan_with_distances_clean.txt')
# Read the full LION network.
#network_full = road_network.read_lion('network/Node.csv', 'network/Edge.csv')
# Generate a pruned network for nodes in Manhattan only from the full LION network.
#network_pruned = road_network.prune_network(network_full, network_simple)
# Write the pruned network to node/edge files.
#road_network.write_lion_csv(network_pruned, 'network/lion_nodes.csv', 'network/lion_edges.csv')
# Write the pruned network to a clean network file used by speed estimation (without attributes irrelevant to speed estimation).
#road_network.write_clean_network(network_pruned, 'network/lion_network_pruned.txt')
# Read the lion network.
network_lion = road_network.read_lion('network/lion_nodes.csv', 'network/lion_edges.csv')
# Set network
network = network_lion
# Parse the sign installation. We do not have complete information and the precise installation dates
# for now. The following lines generate and read the (incomplete) sign installation information.
#sign_installation.process('corridors_sign_installation.csv', 'network/sign_installation.csv', network)
#sign_installation.read('network/sign_installation.csv', network)
# If speed limit information is needed, then place the speed_limit.csv file
# within the running directory and uncomment the line that generates/reads it.
# Generate speed limit
#speed_limit.process('Speed_limit_manhattan_verified.csv', 'speed_limit.csv', network)
# Read speed limit
speed_limit.read('network/speed_limit.csv', network)
# Plot speed limit (for visualization only)
#speed_limit.plot_sign('sign_locations_lion_maxsl.csv', network)
# Time of day definition.
times_of_day = {
'morning-peak': [datetime.time(06, 00, 00), datetime.time(9, 59, 59)],
'mid-day': [datetime.time(10, 00, 00), datetime.time(15, 59, 59)],
'afternoon-peak': [datetime.time(16, 00, 00), datetime.time(19, 59, 59)],
# Left is larger than right. This is left for the 'else' case.
#'off-peak': [datetime.time(20, 00, 00), datetime.time(05, 59, 59)],
}
times_of_day_rank = {
'morning-peak': 0,
'mid-day': 1,
'afternoon-peak': 2,
'off-peak': 3,
}
# Day of week definition.
days_of_week_names = {
0: 'Mon',
1: 'Tue-Thu',
2: 'Tue-Thu',
3: 'Tue-Thu',
4: 'Fri',
5: 'Sat',
6: 'Sun',
}
days_of_week_rank = {
'Mon': 0,
'Tue-Thu': 1,
'Fri': 2,
'Sat': 3,
'Sun': 4,
}
day_of_week_names = {
0: 'Mon',
1: 'Tue',
2: 'Wed',
3: 'Thu',
4: 'Fri',
5: 'Sat',
6: 'Sun'
}
day_of_week_rank = {
'Mon': 0,
'Tue': 1,
'Wed': 2,
'Thu': 3,
'Fri': 4,
'Sat': 5,
'Sun': 6
}
seasons = {
'Spring': [3, 4, 5],
'Summer': [6, 7, 8],
'Fall': [9, 10, 11],
'Winter': [12, 1, 2]
}
# Stores the bin sum and count.
# Bin id is a concatenation of attributes, such as '<segment_id>,<year>,<month>,<day of week>'
bins = {}
announcement_date = datetime.datetime(2014, 11, 7)
# Process the speed files.
f_speeds = open(args.data_list, 'r')
for speed_file in f_speeds.readlines():
if speed_file.strip() == '':
continue
print >> sys.stderr, 'processing %s' % speed_file.strip()
# get count file
count_file = re.sub('speeds', 'lion_counts', speed_file)
f = open(speed_file.strip(), 'r')
c = open(count_file.strip(), 'r')
for line in f.readlines():
count_tokens = c.readline().rstrip().split()
line_tokens = line.split()
dt_tokens = [int(x) for x in re.split('-|_', line_tokens[0])]
year, month, day, hour, minute, second = dt_tokens
dt = datetime.datetime(year, month, day, hour, minute, second)
time_of_day = 'off-peak' # If not in other time_of_day bins, then it's off peak.
for t_of_day, t_range in times_of_day.iteritems():
if t_range[0] <= dt.time() and dt.time() <= t_range[1]:
time_of_day = t_of_day
break
season = ''
for season_name, season_months in seasons.iteritems():
if month in season_months:
season = season_name
day_of_week = day_of_week_names[dt.weekday()]
is_weekday = True if dt.weekday() <= 4 else False
speeds = [float(x) for x in line_tokens[1:]]
counts = [int(x) for x in count_tokens[1:]]
for edge_index, speed in enumerate(speeds):
if speed == -1:
continue # Skip roads without computed speeds.
sign = network.edges[edge_index].sign
count = counts[edge_index]
if (with_sign == True and sign != 'yes') or (with_sign == False and sign != 'no'):
continue
speed_limit = network.edges[edge_index].speed_limit
#bin_id = ','.join([sign, 'before' if dt < announcement_date else 'after'])
#bin_arr = [year, month]
bin_arr = [] # used for non year/month computation
for attr in bin_attrs:
if attr == 'segment':
bin_arr.append(edge_index)
elif attr == 'day_of_month':
bin_arr.append(day)
elif attr == 'time_of_day':
bin_arr.append(time_of_day)
elif attr == 'day_of_week':
bin_arr.append(day_of_week)
elif attr == 'hour':
bin_arr.append(hour)
elif attr == 'speed_limit':
bin_arr.append(speed_limit)
elif attr == 'is_weekday':
bin_arr.append(is_weekday)
elif attr == 'season':
bin_arr.append(season)
bin_id = tuple(bin_arr)
if not bin_id in bins:
bins[bin_id] = [0, 0, 0] # [sum of speed, count, trip count]
bins[bin_id][0] += speed
bins[bin_id][1] += 1
bins[bin_id][2] += count
results = []
for bin_id, val in bins.iteritems():
if not compute_total:
value = -1 if val[1] == 0 else (val[0] / val[1]) # avg = sum / count
results.append([bin_id, value, val[1], val[2]])
else:
results.append([bin_id, val[0]])
def results_sorter(x):
return x[0]
def norm_date(y, m):
return str(y) + '/' + ('0' + str(m) if m < 10 else str(m))
def bin_id_formatter(x):
# The first two attrs are year, month and are formatted as YYYY/MM
#elements = [norm_date(x[0], x[1])] + [str(s) for s in x[2:]]
elements = [str(s) for s in x] # used for non year/month computation
return ','.join(elements)
sorted_results = sorted(results, key=results_sorter)
f_output = open(args.output, 'w')
# CSV header line.
#header_line = 'year_month'
header_line = [] # used for non year/month computation
for attr in bin_attrs:
header_line.append(str(attr))
header_line.append(args.data_type)
header_line += ['hour_count', 'trip_count']
f_output.write(','.join(header_line) + '\n')
for res in sorted_results:
f_output.write('%s,' % bin_id_formatter(res[0]))
if res[1] < 0:
f_output.write('-1')
else:
if compute_total:
f_output.write('%d' % res[1])
else: # average
f_output.write('%.6f' % res[1])
f_output.write(',%d,%d\n' % (res[2], res[3]))
| true |
b3aa402f821ebe0778bc5fd92da032bb37bc703d | Python | wangcho2k/confidence_intervals | /get_scores.py | UTF-8 | 7,973 | 2.578125 | 3 | [] | no_license | import argparse
import logging
import sys
from collections import OrderedDict
from my_pycocoevalcap.bleu.bleu import Bleu
from my_pycocoevalcap.rouge.rouge import Rouge
from my_pycocoevalcap.cider.cider import Cider
from my_pycocoevalcap.meteor.meteor import Meteor
from my_pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
import os, cPickle
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
class COCOScorer(object):
def score(self, GT, RES, IDs):
self.eval = {}
self.imgToEval = {}
gts = {}
res = {}
for ID in IDs:
gts[ID] = GT[ID]
res[ID] = RES[ID]
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
# =================================================
# Set up scorers
# =================================================
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
eval = {}
# =================================================
# Compute scores
# =================================================
for scorer, method in scorers:
sys.stderr.write('Computing %s metric...\n'%str(method))
score, scores = scorer.compute_score(gts, res, verbose=0)
if type(method) == list:
for j in range(len(scores)): # j : 1 .. 4
eval[method[j]] = []
for i in range(len(scores[0])): # i: 1 .. 670
eval[method[j]].append(scores[j][i])
else:
eval[method] = scores
scores_list = ''
for i in range(len(eval[scorers[0][1][0]])):
for _, method in scorers:
if type(method) == list:
for m in method:
scores_list += '%0.4f'%float(eval[m][i]) + " "
else:
scores_list += '%0.4f'%float(eval[method][i]) + " "
scores_list += '\n'
print scores_list
return self.eval
def setEval(self, score, method):
self.eval[method] = score
def setImgToEvalImgs(self, scores, imgIds, method):
for imgId, score in zip(imgIds, scores):
if not imgId in self.imgToEval:
self.imgToEval[imgId] = {}
self.imgToEval[imgId]["image_id"] = imgId
self.imgToEval[imgId][method] = score
def load_pkl(path):
f = open(path, 'rb')
try:
rval = cPickle.load(f)
finally:
f.close()
return rval
def score(ref, sample):
# ref and sample are both dict
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
final_scores = {}
for scorer, method in scorers:
print 'computing %s score with COCO-EVAL...'%(scorer.method())
score, scores = scorer.compute_score(ref, sample)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
print final_scores
return final_scores
def test_cocoscorer():
'''gts = {
184321:[
{u'image_id': 184321, u'id': 352188, u'caption': u'A train traveling down-tracks next to lights.'},
{u'image_id': 184321, u'id': 356043, u'caption': u"A blue and silver train next to train's station and trees."},
{u'image_id': 184321, u'id': 356382, u'caption': u'A blue train is next to a sidewalk on the rails.'},
{u'image_id': 184321, u'id': 361110, u'caption': u'A passenger train pulls into a train station.'},
{u'image_id': 184321, u'id': 362544, u'caption': u'A train coming down the tracks arriving at a station.'}],
81922: [
{u'image_id': 81922, u'id': 86779, u'caption': u'A large jetliner flying over a traffic filled street.'},
{u'image_id': 81922, u'id': 90172, u'caption': u'An airplane flies low in the sky over a city street. '},
{u'image_id': 81922, u'id': 91615, u'caption': u'An airplane flies over a street with many cars.'},
{u'image_id': 81922, u'id': 92689, u'caption': u'An airplane comes in to land over a road full of cars'},
{u'image_id': 81922, u'id': 823814, u'caption': u'The plane is flying over top of the cars'}]
}
samples = {
184321: [{u'image_id': 184321, 'id': 111, u'caption': u'train traveling down a track in front of a road'}],
81922: [{u'image_id': 81922, 'id': 219, u'caption': u'plane is flying through the sky'}],
}
'''
gts = {
'184321':[
{u'image_id': '184321', u'cap_id': 0, u'caption': u'A train traveling down tracks next to lights.',
'tokenized': 'a train traveling down tracks next to lights'},
{u'image_id': '184321', u'cap_id': 1, u'caption': u'A train coming down the tracks arriving at a station.',
'tokenized': 'a train coming down the tracks arriving at a station'}],
'81922': [
{u'image_id': '81922', u'cap_id': 0, u'caption': u'A large jetliner flying over a traffic filled street.',
'tokenized': 'a large jetliner flying over a traffic filled street'},
{u'image_id': '81922', u'cap_id': 1, u'caption': u'The plane is flying over top of the cars',
'tokenized': 'the plan is flying over top of the cars'},]
}
samples = {
'184321': [{u'image_id': '184321', u'caption': u'train traveling down a track in front of a road'}],
'81922': [{u'image_id': '81922', u'caption': u'plane is flying through the sky'}],
}
IDs = ['184321', '81922']
scorer = COCOScorer()
scorer.score(gts, samples, IDs)
def build_sample_pairs(samples, vid_ids):
d = OrderedDict()
for sample, vid_id in zip(samples, vid_ids):
d[vid_id] = [{'image_id': vid_id, 'caption': sample}]
return d
def load_txt_file(path):
f = open(path,'r')
lines = f.readlines()
f.close()
return lines
def main(text, task='youtube', dataset='test', pkl_name='./youtube.CAP.pkl', verbose=False):
if verbose:
logger.debug("Configuration:")
logger.debug("\t text: %s" % text)
logger.debug("\t task: %s" % task)
logger.debug("\t dataset: %s" % dataset)
logger.debug("\t pkl_name: %s" % pkl_name)
with open(pkl_name, 'r') as f:
caps = cPickle.load(f)
samples = load_txt_file(text)
samples = [sample.strip() for sample in samples]
if task == 'youtube':
if dataset == 'valid':
ids = ['vid%s' % i for i in range(1201, 1301)]
else:
ids = ['vid%s' % i for i in range(1301, 1971)]
samples = build_sample_pairs(samples, ids)
scorer = COCOScorer()
gts = OrderedDict()
for vidID in ids:
gts[vidID] = caps[vidID]
score = scorer.score(gts, samples, ids)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--task', type=str, default='youtube', help="Task we are computing the metrics (youtube, "
"flickr30k, flickr8k)")
parser.add_argument('-d', '--dataset', type=str, default='test', help="which dataset to use (dev or test)")
parser.add_argument('-c', '--caps', type=str, default='./youtube.CAP.pkl',
help=".pkl file containing the captions info")
parser.add_argument('-v', '--verbose', type=str, help="Be verbose")
parser.add_argument('text', type=str, help="Hypotheses file")
args = parser.parse_args()
main(args.text, task=args.task, dataset=args.dataset, pkl_name=args.caps, verbose=args.verbose) | true |
9e0bd84bef9b6795af293c27c91fc9dd80f5a6c2 | Python | SudeepS97/Daily-Market-Report | /market_report.py | UTF-8 | 2,486 | 2.625 | 3 | [] | no_license | import os
import argparse
import pandas as pd
import datetime as dt
from config.inputs import stocks
from utils.market_data_puller import get_stock_data, calc_market_stats
from utils.plotter import get_plot_price_movement, save_plot_as_image
from utils.reporter import Reporter
import dataframe_image as dfi
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--img_path', type=str, default='images/')
parser.add_argument('-s', '--sender', type=str)
parser.add_argument('-p', '--password', type=str)
parser.add_argument('-r', '--receiver', type=str)
parser.add_argument('-e', '--host', type=str, default="smtp.gmail.com")
parser.add_argument('-n', '--port', type=int, default=587)
parser.add_argument('-t', '--subject', type=str, default='Daily Market Report (' + str(dt.datetime.now().date()) + ')')
args = parser.parse_args()
img_path = args.img_path
sender = args.sender
password = args.password
receiver = args.receiver
host = args.host
port = args.port
subject = args.subject
def build_stats_and_plots(stocks, img_path='images/'):
format_dict = {}
stats = pd.DataFrame(
columns=['Open', 'High', 'Low', 'Close', 'Change', '%_Change', 'Total Volume (K)', 'Turnover (M)'])
for stock in stocks:
stock = stock.upper()
data = get_stock_data(stock)
stats = stats.append(calc_market_stats(data, stock))
fig = get_plot_price_movement(data, stock)
save_plot_as_image(fig, img_path, f'{stock}.png')
for col in stats.columns.tolist():
if '%' in col:
format_dict[col] = '{:,.2f}%'
elif 'Volume' in col:
format_dict[col] = '{:,.0f}'
elif 'Turnover' in col:
format_dict[col] = '${:,.0f}'
else:
format_dict[col] = '${:,.2f}'
dfi.export(stats.style. \
bar(align='mid', color=['salmon', 'darkseagreen'], subset=['%_Change']). \
bar(align='mid', color=['darkseagreen'], subset=['Total Volume (K)', 'Turnover (M)']). \
format(format_dict), f"{img_path}stats_table.png")
if __name__ == "__main__":
build_stats_and_plots(stocks, img_path)
email = Reporter(sender, password, receiver, host, port, subject)
images = [f"{img_path}{img}" for img in os.listdir(img_path) if img.split('.')[0] in stocks]
email.build_message()
email.build_image_grid(image_list=images, img_path=img_path, cols=3, offset=1)
email.send_message() | true |
47f0bd6cfe4a9e447b6706c717885db496d4d618 | Python | leehoawki/HyperShells | /timestamp.py | UTF-8 | 609 | 2.953125 | 3 | [] | no_license | #!/usr/bin/python3
import argparse
import time
from _datetime import datetime
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='timestamp command')
parser.add_argument('-a', help="e.g. timestamp -a 1420000000000")
parser.add_argument('-b', help="e.g. timestamp -b 20141231122640")
namespace = parser.parse_args()
if namespace.a:
print(datetime.fromtimestamp(float(namespace.a) / 1000))
elif namespace.b:
print(int(time.mktime(time.strptime(namespace.b, "%Y%m%d%H%M%S")) * 1000))
else:
print(int(datetime.now().timestamp()) * 1000)
| true |
dbe0655d11a83ddfd6cbe3943cac42378df738b6 | Python | binchen15/leet-python | /interview/prob150.py | UTF-8 | 605 | 3.078125 | 3 | [] | no_license | class Solution:
def evalRPN(self, tokens: List[str]) -> int:
def eval(a, b, op):
if op == "+":
return a + b
elif op == "-":
return a - b
elif op == "*":
return a * b
else:
return int(a / b)
stack = []
for t in tokens:
if t in "+-*/":
b, a = stack.pop(), stack.pop()
stack.append(eval(a, b, t))
else:
stack.append(int(t))
return stack[0]
| true |
70049e959a107a9ca2ad3358723e8eb2ea3ad1f8 | Python | PaddlePaddle/PARL | /benchmark/torch/coma/sc2_model.py | UTF-8 | 3,727 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import parl
class ComaModel(parl.Model):
def __init__(self, config):
super(ComaModel, self).__init__()
self.n_actions = config['n_actions']
self.n_agents = config['n_agents']
self.state_shape = config['state_shape']
self.obs_shape = config['obs_shape']
actor_input_dim = self._get_actor_input_dim()
critic_input_dim = self._get_critic_input_dim()
self.actor_model = ActorModel(actor_input_dim, self.n_actions)
self.critic_model = CriticModel(critic_input_dim, self.n_actions)
def policy(self, obs, hidden_state):
return self.actor_model(obs, hidden_state)
def value(self, inputs):
return self.critic_model(inputs)
def get_actor_params(self):
return self.actor_model.parameters()
def get_critic_params(self):
return self.critic_model.parameters()
def _get_actor_input_dim(self):
input_shape = self.obs_shape # obs: 30 in 3m map
input_shape += self.n_actions # agent's last action (one_hot): 9 in 3m map
input_shape += self.n_agents # agent's one_hot id: 3 in 3m map
return input_shape # 30 + 9 + 3 = 42
def _get_critic_input_dim(self):
input_shape = self.state_shape # state: 48 in 3m map
input_shape += self.obs_shape # obs: 30 in 3m map
input_shape += self.n_agents # agent_id: 3 in 3m map
input_shape += self.n_actions * self.n_agents * 2 # all agents' action and last_action (one-hot): 54 in 3m map
return input_shape # 48 + 30+ 3 = 135
# all agents share one actor network
class ActorModel(parl.Model):
def __init__(self, input_shape, act_dim):
""" input : obs, include the agent's id and last action, shape: (batch, obs_shape + n_action + n_agents)
output: one agent's q(obs, act)
"""
super(ActorModel, self).__init__()
self.hid_size = 64
self.fc1 = nn.Linear(input_shape, self.hid_size)
self.rnn = nn.GRUCell(self.hid_size, self.hid_size)
self.fc2 = nn.Linear(self.hid_size, act_dim)
def init_hidden(self):
# new hidden states
return self.fc1.weight.new(1, self.hid_size).zero_()
def forward(self, obs, h0):
x = F.relu(self.fc1(obs))
h1 = h0.reshape(-1, self.hid_size)
h2 = self.rnn(x, h1)
policy = self.fc2(h2)
return policy, h2
class CriticModel(parl.Model):
def __init__(self, input_shape, act_dim):
""" inputs: [ s(t), o(t)_a, u(t)_a, agent_a, u(t-1) ], shape: (Batch, input_shape)
output: Q, shape: (Batch, n_actions)
Batch = ep_num * n_agents
"""
super(CriticModel, self).__init__()
hid_size = 128
self.fc1 = nn.Linear(input_shape, hid_size)
self.fc2 = nn.Linear(hid_size, hid_size)
self.fc3 = nn.Linear(hid_size, act_dim)
def forward(self, inputs):
hid1 = F.relu(self.fc1(inputs))
hid2 = F.relu(self.fc2(hid1))
Q = self.fc3(hid2)
return Q
| true |
d36934cc623a396f13e0e1a5724e87ad6a59d6fd | Python | psc040922/presika | /A.IOP.py | UTF-8 | 142,016 | 2.640625 | 3 | [] | no_license | import discord
import openpyxl
import asyncio
client = discord.Client()
@client.event
async def on_ready():
print('*DB Online*')
print("Client Name= " + "'" + client.user.name + "'")
print("Client ID= " + "'" + client.user.id + "'")
print('---LOG---')
await client.change_presence(game=discord.Game(name='사용법 : /인형도감?', type=1))
@client.event
async def on_message(message):
if message.content.startswith('/오류'):
clear = message.content.split(" ")
print(clear)
if message.content.startswith("/인형도감사용법") or message.content.startswith("/인형도감?"):
channel = message.channel
embed = discord.Embed(
title = '명령어목록',
description = '"전 도움말을 다 읽는 파인데"',
colour = discord.Colour.red()
)
embed.set_footer(text = '대소문자 무시와 띄어쓰기는 안된다구욧!')
embed.add_field(name='/[인형이름]', value='해당 인형의 프로필를 알려줍니다' + '\n' + '/No.[숫자]. 또는 /[인형별명]으로도 검색가능합니다.', inline=False)
embed.add_field(name='/[인형이름] 드랍', value='해당 인형의 드랍 지역을 알려줍니다.', inline=False)
embed.add_field(name='/[제조시간]', value='해당 제조시간에서 획득할 수 있는 인형 또는 장비, 요정 등을 알려줍니다. ' + '\n' + '00:00 또는 0000의 양식으로 검색해주세요.', inline=False)
embed.add_field(name='/자원소비량', value='인형별 자원 소비량을 알려줍니다.', inline=False)
embed.add_field(name='/장비최소식', value='장비 제조의 필요한 최소 조건을 알려줍니다.', inline=False)
embed.add_field(name='/요정등장조건', value='요정별 등장 조건을 알려줍니다.', inline=False)
embed.add_field(name='팁', value='[괄호]는 예시일 뿐, 명령어에 포함되어있지않습니다. 괄호를 뺴야 정상작동합니다.', inline=False)
embed.add_field(name='현재 구현 인형', value='No.1~94', inline=False)
await client.send_message(channel,embed=embed)
if message.content.startswith("/제조식?"):
channel = message.channel
embed = discord.Embed(
title = '제조식 명령어 목록',
description = '나는 더 이상 자원 관리를 그만두겠다..!',
colour = discord.Colour.red()
)
embed.set_footer(text = ' ')
embed.add_field(name = '범용식', value = '/범용식 입력',inline = False)
embed.add_field(name = 'HG식', value = '/HG식 또는 /권총식 입력',inline = False)
embed.add_field(name='SMG식', value='/SMG식 또는 /슴지식 입력', inline=False)
embed.add_field(name='AR식', value='/AR식 또는 /에알식 입력', inline=False)
embed.add_field(name='RF식', value='/RF식 또는 /라플식 입력', inline=False)
embed.add_field(name='MG식', value='/MG식 또는 /망가식 입력', inline=False)
embed.add_field(name='SG식', value='/SG식 또는 /샷건식 입력', inline=False)
embed.add_field(name='중형범용식', value='/중범용식 또는 /중형범용식 입력', inline=False)
if message.content.startswith("/범용식"):
channel = message.channel
embed = discord.Embed(
title = '범용식',
colour = discord.Colour.blue()
)
embed.set_footer(text = 'AR, SMG, RF 획득 가능')
embed.add_field(name = '인력 탄약 식량 부품', value='430 430 430 230', inline=False)
await client.send_message(channel,embed=embed)
if message.content.startswith("/권총식") or message.content.startswith('/HG식'):
channel = message.channel
embed = discord.Embed(
title = 'HG식',
colour = discord.Colour.blue()
)
embed.set_footer(text = 'HG 획득 가능')
embed.add_field(name = '인력 탄약 식량 부품', value='120 120 120 120', inline=False)
embed.add_field(name = '저격식', value='170 160 160 30', inline=False)
await client.send_message(channel,embed=embed)
if message.content.startswith("/슴지식") or message.content.startswith('/SMG식'):
channel = message.channel
embed = discord.Embed(
title = 'SMG식',
colour = discord.Colour.blue()
)
embed.set_footer(text = 'SMG 획득 가능')
embed.add_field(name = '인력 탄약 식량 부품', value='430 430 130 230', inline=False)
await client.send_message(channel,embed=embed)
if message.content.startswith("/에알식") or message.content.startswith('/AR식'):
channel = message.channel
embed = discord.Embed(
title = 'AR식',
colour = discord.Colour.blue()
)
embed.set_footer(text = 'AR 획득 가능')
embed.add_field(name = '인력 탄약 식량 부품', value='130 430 430 230', inline=False)
embed.add_field(name = '저격식', value='95 400 400 95', inline=False)
await client.send_message(channel,embed=embed)
if message.content.startswith("/라플식") or message.content.startswith('/RF식'):
channel = message.channel
embed = discord.Embed(
title = 'RF식',
colour = discord.Colour.blue()
)
embed.set_footer(text = 'RF 획득 가능')
embed.add_field(name = '인력 탄약 식량 부품', value='430 130 430 230', inline=False)
embed.add_field(name = '저격식', value='404 131 404 233', inline=False)
await client.send_message(channel,embed=embed)
if message.content.startswith("/망가식") or message.content.startswith('/MG식'):
channel = message.channel
embed = discord.Embed(
title = 'MG식',
colour = discord.Colour.blue()
)
embed.set_footer(text = 'MG 획득 가능')
embed.add_field(name = '인력 탄약 식량 부품', value='600 600 100 400', inline=False)
embed.add_field(name = '저격식', value='730 630 130 430', inline=False)
await client.send_message(channel,embed=embed)
if message.content.startswith("/샷건식") or message.content.startswith('/SG식'):
channel = message.channel
embed = discord.Embed(
title = 'SG식',
colour = discord.Colour.blue()
)
embed.set_footer(text = 'SG 획득 가능')
embed.add_field(name = '인력 탄약 식량 부품', value='6000 2000 6000 4000', inline=False)
await client.send_message(channel,embed=embed)
if message.content.startswith("/중범용식") or message.content.startswith('/중형범용식'):
channel = message.channel
embed = discord.Embed(
title = '중형 범용식',
colour = discord.Colour.blue()
)
embed.set_footer(text = 'SMG AR RF MG SG 획득 가능')
embed.add_field(name = '인력 탄약 식량 부품', value='6000 2000 6000 4000', inline=False)
embed.add_field(name = '부가설명', value='SG식의 비해 SG가 나올 확률보다 HG제외 다른 모든 총기군의 5성 출현률이 높다', inline=False)
await client.send_message(channel,embed=embed)
if message.content.startswith("/DB Online?"):
await client.send_message(message.channel, embed=discord.Embed(title = 'DB Online', descrption = ''))
if message.content.startswith("/자원소비량"):
channel = message.channel
embed = discord.Embed(
title = '자원소비량',
description = '"그치만.. 이렇게라도 하지않으면 지휘관의 자원은 넘쳐나는걸?"',
colour = discord.Colour.red()
)
embed.set_footer(text = '(숫자) = 증가폭')
embed.add_field(name='HG', value='(편제 인원당)' + '\n' +'탄약' + '\n' + 'ㄴ 10 / 15 / 20 / 25 / 30 / (+5)' + '\n' + '식량' + '\n' + 'ㄴ 10 / 15 / 20 / 25 / 30 / (+5)', inline=True)
embed.add_field(name='SMG', value='(편제 인원당)' + '\n' +'탄약' + '\n' + 'ㄴ 25 / 40 / 55 / 70 / 85 / (+15)' + '\n' + '식량' + '\n' + 'ㄴ 20 / 30 / 40 / 50 / 60 / (+10)', inline=True)
embed.add_field(name='AR', value='(편제 인원당)' + '\n' +'탄약' + '\n' + 'ㄴ 20 / 30 / 40 / 50 / 60 / (+10)' + '\n' + '식량' + '\n' + 'ㄴ 20 / 30 / 40 / 50 / 60 / (+10)', inline=True)
embed.add_field(name='RF', value='(편제 인원당)' + '\n' +'탄약' + '\n' + 'ㄴ 15 / 25 / 35 / 45 / 55 / (+10)' + '\n' + '식량' + '\n' + 'ㄴ 30 / 45 / 60 / 75 / 90 / (+15)', inline=True)
embed.add_field(name='MG', value='(편제 인원당)' + '\n' +'탄약' + '\n' + 'ㄴ 40 / 65 / 80 / 115 / 140 / (+25)' + '\n' + '식량' + '\n' + 'ㄴ 30 / 45 / 60 / 75 / 90 / (+15)', inline=True)
embed.add_field(name='SG', value='(편제 인원당)' + '\n' +'탄약' + '\n' + 'ㄴ 30 / 45 / 60 / 75 / 90 / (+15)' + '\n' + '식량' + '\n' + 'ㄴ 40 / 65 / 80 / 115 / 140 / (+25)', inline=True)
embed.add_field(name='헬리포트 인력', value='(총기 종류에 무관하게 편제 수 합산 × 2)' + '\n' +'ex) 3/4/5/1/2 링크면 (3 + 4 + 5 + 1 + 2) × 2 = 30 소모', inline=True)
await client.send_message(channel,embed=embed)
if message.content.startswith("/장비최소식"):
channel = message.channel
embed = discord.Embed(
title = '장비최소식',
description = '"우중 센세 제발.."',
colour = discord.Colour.red()
)
embed.add_field(name='사이트류', value='탄약' + '\n' + 'ㄴ 150 이하' + '\n' + '부품' + '\n' + 'ㄴ 150 이하', inline=True)
embed.add_field(name='야시장비', value='조건없음', inline=True)
embed.add_field(name='소음기', value='조건없음', inline=True)
embed.add_field(name='탄약류', value='탄약' + '\n' + 'ㄴ 탄약 100 이상', inline=True)
embed.add_field(name='외골격', value='조건없음', inline=True)
embed.add_field(name='방탄판', value='조건없음', inline=True)
embed.add_field(name='탄약통', value='부품' + '\n' + 'ㄴ 부품 150 이상', inline=True)
embed.add_field(name='슈트', value='조건없음', inline=True)
await client.send_message(channel,embed=embed)
if message.content.startswith("/요정등장조건"):
channel = message.channel
embed = discord.Embed(
title = '요정 등장 조건',
description = '"공수레 공수거 공수좀"',
colour = discord.Colour.red()
)
embed.set_footer(text = '(숫자) = 증가폭')
embed.add_field(name='인력 탄약 식량 부품', value='------', inline=True)
embed.add_field(name='500 500 500 500', value='용사 요정, 격노 요정, 방패 요정, 수호 요정, 도발 요정, 저격 요정, 포격 요정, 공습 요정, 지휘 요정, 수색 요정, 조명 요정 (최소식요정들)', inline=True)
embed.add_field(name='2000 500 2000 1000', value='최소식 요정, 방어 요정, 증원 요정, 공수 요정', inline=True)
embed.add_field(name='500 2000 2000 1000', value='최소식 요정, 매설 요정, 로켓 요정, 공사 요정', inline=True)
embed.add_field(name='2000 2000 2000 1000', value='이벤트 요정을 제외한 모든 요정이 등장할 수 있는 범용식', inline=True)
await client.send_message(channel,embed=embed)
if message.content.startswith("/SkillPoint") or message.content.startswith("/스포") or message.content.startswith("/제작자"):
await client.send_file(message.channel, 'ACCESS DENIED.jpg')
channel = message.channel
embed = discord.Embed(
title = '스킬포인트 | 제작자',
description = '카리나, 안젤리아, 페르시카, 꼬마 봇의 제작자',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='♥', inline=True)
embed.add_field(name='종류', value='NoYe', inline=True)
embed.add_field(name='제조시간', value='Unknow', inline=True)
embed.add_field(name='별명', value='스포', inline=True)
print('SkillPoint')
await client.send_message(channel,embed=embed)
if message.content.startswith("/콜트리볼버") or message.content.startswith("/콜라") or message.content.startswith("/No.1."):
await client.send_file(message.channel, 'No.1_콜트_리볼버.png')
channel = message.channel
embed = discord.Embed(
title = '콜트 리볼버 | No.1.',
description = '"지휘관, 날…불렀어? 콜라 있어? 저기, 콜라 있는거야?"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:50', inline=True)
embed.add_field(name='스킬', value='일제사격' + '\n' + '지속시간 동안 아군 전원 화력 증가' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '화력 상승치 : 22%' + '\n' + '지속시간 : 8초', inline=True)
embed.add_field(name='버프', value='1편제-화력 12%, 명중 25% 상승' + '\n' + '□■□ 2편제-화력 15%, 명중 31% 상승' + '\n' + '■◎■ 3편제-화력 18%, 명중 37% 상승' + '\n' + '□■□ 4편제-화력 21%, 명중 43% 상승' + '\n' + '5편제-화력 24%, 명중 50% 상승', inline=True)
embed.add_field(name='별명', value='콜라', inline=True)
print('콜트 리볼버')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M1911") or message.content.startswith("/운명이") or message.content.startswith("/No.2."):
await client.send_file(message.channel, 'No.2_M1911.png')
channel = message.channel
embed = discord.Embed(
title = 'M1911 | No.2',
description = '"운명적인 만남이네요~! 또 이렇게 지휘관님이랑 만나게 되다니~"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:20', inline=True)
embed.add_field(name='스킬', value='연막탄' + '\n' + '폭발한 위치의 2.5 반경에 적의 사속/이속을 감소시키는 연막이 발생' + '\n' + '초반 쿨타임 : 1초' + '\n' + '쿨타임 : 12초' + '\n' + '사속/이속 감속치 : 36/45%' + '\n' + '지속시간 : 4초', inline=True)
embed.add_field(name='버프', value='1편제 - 사속 10%, 명중 25% 상승' + '\n' + '□■□ 2편제 - 사속 12%, 명중 31% 상승' + '\n' + '■◎■ 3편제 - 사속 15%, 명중 37% 상승' + '\n' + '□■□ 4편제 - 사속 17%, 명중 43% 상승' + '\n' + '5편제 - 사속 20%, 명중 50% 상승', inline=True)
embed.add_field(name='별명', value='운명이', inline=True)
print('M1911')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M9") or message.content.startswith("/엠구나노") or message.content.startswith("/No.3."):
await client.send_file(message.channel, 'No.3_M9.png')
channel = message.channel
embed = discord.Embed(
title = 'M9 | No.3',
description = '"베레타 M9인거야! 인기인인거야!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:40', inline=True)
embed.add_field(name='스킬', value='섬광탄' + '\n' + '폭발한 위치의 2.5 반경에 적에게 기절을 건다.' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 3.2초', inline=True)
embed.add_field(name='버프', value='1편제 - 화력 10%, 회피 10% 상승' + '\n' + '□■■ 2편제 - 화력 12%, 회피 12% 상승' + '\n' + '□◎□ 3편제 - 화력 15%, 회피 15% 상승' + '\n' + '□■■ 4편제 - 화력 17%, 회피 17% 상승' + '\n' + '5편제 - 화력 20%, 회피 20% 상승', inline=True)
embed.add_field(name='별명', value='엠구나노', inline=True)
print('M19')
await client.send_message(channel,embed=embed)
if message.content.startswith("/콜트파이슨") or message.content.startswith("/No.4."):
await client.send_file(message.channel, 'No.4_콜트_파이슨.png')
channel = message.channel
embed = discord.Embed(
title = '콜트파이슨 | No.4',
description = '"당신이 새로운 "사냥감"인가, 지휘관? 후훗, 걱정 마, 불경한 뜻은 전혀 없으니까."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='제조불가', inline=True)
embed.add_field(name='스킬', value='겁없는 녀석들' + '\n' + '패시브: 자신이 화력 / 사속 / 회피 / 명중 / 치명타율 버프를 받을 때(요정특성 포함).' + '\n' + '버프 진형의 아군에게 해당 스탯 버프 부여(3초 지속, 최대 3중첩)' + '\n' + '액티브: 발동 후 6회의 공격은 일정 확률로 지속시간 동안 자신의 화력 상승(최대 중첩 6회)' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 15초' + '\n' + '패시브 상승치(%) : 6 / 6 / 30 / 30 / 12' + '\n' + '화력 상승치 : 30%' + '\n' + '액티브 화력 상승 지속시간 : 3.2초', inline=True)
embed.add_field(name='버프', value='1편제 - 화력 15%, 치명타율 10% 상승' + '\n' + '□■■ 2편제 - 화력 18%, 치명타율 12% 상승' + '\n' + '■◎□ 3편제 - 화력 22%, 치명타율 15% 상승' + '\n' + '■■□ 4편제 - 화력 26%, 치명타율 17% 상승' + '\n' + '5편제 - 화력 30%, 치명타율 20% 상승', inline=True)
print('콜트파이슨')
await client.send_message(channel,embed=embed)
if message.content.startswith("/나강할매") or message.content.startswith("/나강리볼버") or message.content.startswith("/No.5."):
await client.send_file(message.channel, 'No.5_나강_리볼버.png')
channel = message.channel
embed = discord.Embed(
title = '나강리볼버 | No.5 ',
description = '"이런 늙은이가 취향이라니, 자네도 참 별나구먼."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:20', inline=True)
embed.add_field(name='스킬', value='기선제압N' + '\n' + '지속시간 동안 적 전체 화력 감소/뒤쪽의 수치는 주간작전에 사용시' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 8/5초' + '\n' + '화력감소치 : 35/20%', inline=True)
embed.add_field(name='버프', value='1편제 - 화력 16%, 치명률 8% 상승' + '\n' + '□■□ 2편제 - 화력 20%, 치명률 10% 상승' + '\n' + '■◎□ 3편제 - 화력 24%, 치명률 12% 상승' + '\n' + '□■□ 4편제 - 화력 28%, 치명률 14% 상승' + '\n' + '5편제 - 화력 32%, 치명률 16% 상승', inline=True)
embed.add_field(name='별명', value='나강할매', inline=True)
print('나강리볼버')
await client.send_message(channel,embed=embed)
if message.content.startswith("/토카레프") or message.content.startswith("/No.6."):
await client.send_file(message.channel, 'No.6_토카레프.png')
channel = message.channel
embed = discord.Embed(
title = '토카레프 | No.6',
description = '"아, 지휘관, 잘 부탁드립니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:45', inline=True)
embed.add_field(name='스킬', value='엄호개시' + '\n' + '지속시간 동안 아군 전체 회피 증가' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 8초' + '\n' + '회피 증가치 : 55%', inline=True)
embed.add_field(name='버프', value='1편제 - 사속 10%, 명중 25% 상승' + '\n' + '□■■ 2편제 - 사속 12%, 명중 31% 상승' + '\n' + '□◎□ 3편제 - 사속 15%, 명중 37% 상승' + '\n' + '□■■ 4편제 - 사속 17%, 명중 43% 상승' + '\n' + '5편제 - 사속 20%, 명중 50% 상승', inline=True)
print('토카레프')
await client.send_message(channel,embed=embed)
if message.content.startswith("/스테츠킨") or message.content.startswith("/스테") or message.content.startswith("/No.7."):
await client.send_file(message.channel, 'No.7_스테츠킨.png')
channel = message.channel
embed = discord.Embed(
title = '스테츠킨 | No.7',
description = '"자동 권총, 스테츠킨 APS! 등장!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:55', inline=True)
embed.add_field(name='스킬', value='진압신호' + '\n' + '지속시간 동안 아군 전원 사속 상승' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 8초' + '\n' + '사속 상승치 : 22%', inline=True)
embed.add_field(name='버프', value='1편제 - 화력 6%, 사속 12% 상승' + '\n' + '□■■ 2편제 - 화력 7%, 사속 15% 상승' + '\n' + '□◎□ 3편제 - 화력 9%, 사속 18% 상승' + '\n' + '□■■ 4편제 - 화력 10%, 사속 21% 상승' + '\n' + '5편제 - 화력 12%, 사속 24% 상승', inline=True)
embed.add_field(name='별명', value='스테, 번개머리, 수타치킨', inline=True)
print('스테츠킨')
await client.send_message(channel,embed=embed)
if message.content.startswith("/마카로프") or message.content.startswith("/No.8."):
await client.send_file(message.channel, 'No.8_마카로프.png')
channel = message.channel
embed = discord.Embed(
title = '마카로프 | No.8',
description = '"나는 있지, 지휘관님이 명령하기보다는 서로 맞춰나가는 쪽이 더 좋아."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:40', inline=True)
embed.add_field(name='스킬', value='시야봉쇄' + '\n' + '지속시간 동안 적 전체 명중 감소' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 8초' + '\n' + '명중 감소치 : 36%', inline=True)
embed.add_field(name='버프', value='1편제 - 화력 10%, 사속 6% 상승' + '\n' + '■□□ 2편제 - 화력 12%, 사속 7% 상승' + '\n' + '■◎■ 3편제 - 화력 15%, 사속 9% 상승' + '\n' + '■□□ 4편제 - 화력 17%, 사속 10% 상승' + '\n' + '5편제 - 화력 20%, 사속 12% 상승', inline=True)
print('마카로프')
await client.send_message(channel,embed=embed)
if message.content.startswith("/P38") or message.content.startswith("/상하이조") or message.content.startswith("/No.9."):
await client.send_file(message.channel, 'No.9_P38.png')
channel = message.channel
embed = discord.Embed(
title = 'P38 | No.9',
description = '"이것은 운명의 만남이에요!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:20', inline=True)
embed.add_field(name='스킬', value='조명탄' + '\n' + '지속시간 동안 아군 전체 명중 상승(야간작전 전용)' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 15초' + '\n' + '명중 감소치 : 90%', inline=True)
embed.add_field(name='버프', value='1편제 - 사속 7%, 명중 28% 상승' + '\n' + '□■■ 2편제 - 사속 8%, 명중 35% 상승' + '\n' + '□◎□ 3편제 - 사속 10%, 명중 42% 상승' + '\n' + '□■■ 4편제 - 사속 12%, 명중 49% 상승' + '\n' + '5편제 - 사속 14%, 명중 56% 상승', inline=True)
embed.add_field(name='별명', value='상하이조', inline=True)
print('P38')
await client.send_message(channel,embed=embed)
if message.content.startswith("/PPK") or message.content.startswith("/발터") or message.content.startswith("/No.10."):
await client.send_file(message.channel, 'No.10_PPK.png')
channel = message.channel
embed = discord.Embed(
title = 'PPK | No.10',
description = '"우후훗, 발터 PPK야. 지휘관, 만나게 돼서...기쁘네요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:22', inline=True)
embed.add_field(name='스킬', value='사냥신호' + '\n' + '지속시간 동안 아군 전원 화력, 치명률 증가' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 15초' + '\n' + '화력, 치명률 증가치 : 10%, 35%', inline=True)
embed.add_field(name='버프', value='1편제 - 사속 16%, 치명률 8% 상승' + '\n' + '■□□ 2편제 - 사속 20%, 치명률 10% 상승' + '\n' + '■◎□ 3편제 - 사속 24%, 치명률 12% 상승' + '\n' + '■□□ 4편제 - 사속 28%, 치명률 14% 상승' + '\n' + '5편제 - 사속 32%, 치명률 16% 상승', inline=True)
embed.add_field(name='별명', value='신살자, 유신, 발터, 피피케이', inline=True)
print('PPK')
await client.send_message(channel,embed=embed)
if message.content.startswith("/P08") or message.content.startswith("/No.11."):
await client.send_file(message.channel, 'No.11_P08.png')
channel = message.channel
embed = discord.Embed(
title = 'P08 | No.11',
description = '"루거 P08식 자동권총입니다. 모자라지만 잘 부탁드립니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:30', inline=True)
embed.add_field(name='스킬', value='엄호개시N' + '\n' + '지속시간 동안 아군 전체 회피 증가/ 뒤쪽의 수치는 주간작전에 사용시' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 8 / 5초' + '\n' + '회피 증가치 : 85 / 35%', inline=True)
embed.add_field(name='버프', value='1편제 - 화력 7%, 명중 35% 상승' + '\n' + '□■□ 2편제 - 화력 8%, 명중 43% 상승' + '\n' + '□◎■ 3편제 - 화력 10%, 명중 52% 상승' + '\n' + '□■□ 4편제 - 화력 12%, 명중 61% 상승' + '\n' + '5편제 - 화력 14%, 명중 70% 상승', inline=True)
print('P08')
await client.send_message(channel,embed=embed)
if message.content.startswith("/C96") or message.content.startswith("/No.12."):
await client.send_file(message.channel, 'No.12_C96.png')
channel = message.channel
embed = discord.Embed(
title = 'C96 | No.12',
description = '"당신이 제 지휘관인 것이군요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:30', inline=True)
embed.add_field(name='스킬', value='조명탄' + '\n' + '지속시간 동안 아군 전체 명중 증가(야간작전 전용)' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 15초' + '\n' + '명중 증가치 : 100%', inline=True)
embed.add_field(name='버프', value='1편제 - 명중 32%, 회피 15% 상승' + '\n' + '■□□ 2편제 - 명중 40%, 회피 18% 상승' + '\n' + '□◎■ 3편제 - 명중 48%, 회피 22% 상승' + '\n' + '■□□ 4편제 - 명중 56%, 회피 26% 상승' + '\n' + '5편제 - 명중 64%, 회피 30% 상승', inline=True)
print('C96')
await client.send_message(channel,embed=embed)
if message.content.startswith("/92식") or message.content.startswith("/No.13."):
await client.send_file(message.channel, 'No.13_92식.png')
channel = message.channel
embed = discord.Embed(
title = '92식 | No.13',
description = '"바로 저, 92식 권총이 착임했습니다. 배속되는 소대는 어디인가요?"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:35', inline=True)
embed.add_field(name='스킬', value='돌격개시' + '\n' + '지속시간 동안 아군 전원 화력, 사속 증가' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 8초' + '\n' + '화력, 사속 증가치 : 10%, 10%', inline=True)
embed.add_field(name='버프', value='1편제 - 명중 25%, 회피 20% 상승' + '\n' + '■■■ 2편제 - 명중 31%, 회피 25% 상승' + '\n' + '■◎■ 3편제 - 명중 37%, 회피 30% 상승' + '\n' + '■■■ 4편제 - 명중 43%, 회피 35% 상승' + '\n' + '5편제 - 명중 50%, 회피 40% 상승', inline=True)
print('92식')
await client.send_message(channel,embed=embed)
if message.content.startswith("/아스트라리볼버") or message.content.startswith("/No.14."):
await client.send_file(message.channel, 'No.14_아스트라_리볼버.png')
channel = message.channel
embed = discord.Embed(
title = '아스트라 리볼버 | No.14',
description = '"잘 부탁드릴게요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:40', inline=True)
embed.add_field(name='스킬', value='진압신호' + '\n' + '지속시간 동안 아군 전원 사속 증가' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 8초' + '\n' + '사속 증가치 : 20%', inline=True)
embed.add_field(name='버프', value='1편제 - 사속 10%, 회피 10% 상승' + '\n' + '■□■ 2편제 - 사속 12%, 회피 12% 상승' + '\n' + '□◎□ 3편제 - 사속 15%, 회피 15% 상승' + '\n' + '■□■ 4편제 - 사속 17%, 회피 17% 상승' + '\n' + '5편제 - 사속 20%, 회피 20% 상승', inline=True)
print('아스트라 리볼버')
await client.send_message(channel,embed=embed)
if message.content.startswith("/글록17") or message.content.startswith("/No.15."):
await client.send_file(message.channel, 'No.15_글록_17.png')
channel = message.channel
embed = discord.Embed(
title = '글록17 | No.15',
description = '"글록 17, 도착! 지금, 웃고계신건가요?"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='기선제압' + '\n' + '지속시간 동안 적 전체 화력 감소' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 8초' + '\n' + '화력 감소치 : 25%', inline=True)
embed.add_field(name='버프', value='1편제 - 명중 32%, 회피 15% 상승' + '\n' + '■□■ 2편제 - 명중 40%, 회피 18% 상승' + '\n' + '□◎■ 3편제 - 명중 48%, 회피 22% 상승' + '\n' + '■□■ 4편제 - 명중 56%, 회피 26% 상승' + '\n' + '5편제 - 명중 64%, 회피 30% 상승', inline=True)
print('글록17')
await client.send_message(channel,embed=embed)
if message.content.startswith("/톰슨") or message.content.startswith("/님총톰") or message.content.startswith("/시카고타자기") or message.content.startswith("/No.15."):
await client.send_file(message.channel, 'No.16_톰슨.png')
channel = message.channel
embed = discord.Embed(
title = '톰슨 | No.16',
description = '"당신이 새로운 보스인가... 시카고 타자기야. 잘 부탁해."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='02:39', inline=True)
embed.add_field(name='스킬', value='포스실드' + '\n' + '자신의 피해를 막는 왜곡방벽을 9999점 생성한다' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 4초', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 12%, 회피 15% 상승' + '\n' + '■□□' + '\n' + '□◎□' + '\n' + '■□□', inline=True)
embed.add_field(name='별명', value='님통촘, 시카고타자기', inline=True)
print('톰슨')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M3") or message.content.startswith("/No.17."):
await client.send_file(message.channel, 'No.17_M3.png')
channel = message.channel
embed = discord.Embed(
title = 'M3 | No.17',
description = '"아, 안녕하세요! M3라고 합니다. 자, 잘부탁드립니다!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='01:30', inline=True)
embed.add_field(name='스킬', value='수류탄' + '\n' + '폭발한 위치의 2.5반경 내의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 5.5배', inline=True)
embed.add_field(name='버프(AR 한정)', value='명중 40%, 회피 30% 상승' + '\n' + '□□□' + '\n' + '■◎□' + '\n' + '□□□', inline=True)
print('M3')
await client.send_message(channel,embed=embed)
if message.content.startswith("/MAC10") or message.content.startswith("/MAC-10") or message.content.startswith("/No.18."):
await client.send_file(message.channel, 'No.18_MAC-10.png')
channel = message.channel
embed = discord.Embed(
title = 'MAC-10 | No.18',
description = '"지휘관의 지시라면…잉그램 M10은…기쁘게 받아들이죠."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='02:00', inline=True)
embed.add_field(name='스킬', value='연막탄' + '\n' + '폭발 위치의 2.5 반경에 적의 사속/이속을 감소시키는 연막이 발생' + '\n' + '초반 쿨타임 : 1초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 4초' + '\n' + '사속 / 이속 감소치 : 36 / 50%', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 12% 상승' + '\n' + '■□□' + '\n' + '■◎□' + '\n' + '■□□', inline=True)
print('MAC-10')
await client.send_message(channel,embed=embed)
if message.content.startswith("/FMG9") or message.content.startswith("/FMG-9") or message.content.startswith("/No.19."):
await client.send_file(message.channel, 'No.19_FMG-9.png')
channel = message.channel
embed = discord.Embed(
title = 'FMG-9 | No.19',
description = '"FMG-9이 보스의 지휘 하에 들어왔습니다. 아, 걱정 마세요. 지금은 변장하지 않았으니까요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='연막탄' + '\n' + '지속시간 동안 자신의 회피 증가' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '회피 증가치 : 120%', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 10%, 회피 12% 상승' + '\n' + '■□□' + '\n' + '□◎□' + '\n' + '■□□', inline=True)
print('FMG-9')
await client.send_message(channel,embed=embed)
if message.content.startswith("/Vector") or message.content.startswith("/벡터") or message.content.startswith("/No.20."):
await client.send_file(message.channel, 'No.20_Vector.png')
channel = message.channel
embed = discord.Embed(
title = 'Vector | No.20',
description = '"응? 새로운 지휘관? 그래, 사이좋게 지내자."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='02:35', inline=True)
embed.add_field(name='스킬', value='소이탄' + '\n' + '소이탄을 던져 1.5 반경에 피해를 주고 매 0.33초마다 화상 대미지를 입히는 구간을 생성' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 16초' + '\n' + '폭발/지속 피해량 : 7 / 1배' + '\n' + '지속 시간 : 5초', inline=True)
embed.add_field(name='버프(AR 한정)', value='사속 25% 상승승' + '\n' + '□□□' + '\n' + '■◎□' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='벡터, 벡린탄', inline=True)
print('Vector')
await client.send_message(channel,embed=embed)
if message.content.startswith("/PPSh-41") or message.content.startswith("/PPSh41") or message.content.startswith("/파파샤") or message.content.startswith("/No.21."):
await client.send_file(message.channel, 'No.21_PPSh-41.png')
channel = message.channel
embed = discord.Embed(
title = 'PPSh-41 | No.21',
description = '"처음 뵙겠습니다. 지휘관, 저…전혀 무겁지 않아요!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='01:50', inline=True)
embed.add_field(name='스킬', value='수류탄' + '\n' + '폭발한 위치의 2.5반경 내의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 5.5배', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 10%, 사속 5% 상승' + '\n' + '□■□' + '\n' + '□◎□' + '\n' + '□■□', inline=True)
embed.add_field(name='별명', value='파파샤', inline=True)
print('PPSh-41')
await client.send_message(channel,embed=embed)
if message.content.startswith("/PPS-43") or message.content.startswith("/PPS43") or message.content.startswith("/핑파샤") or message.content.startswith("/No.22."):
await client.send_file(message.channel, 'No.22_PPS-43.png')
channel = message.channel
embed = discord.Embed(
title = 'PPS-43 | No.22',
description = '"동지여, 만나서 영광입니다. 저는 가벼운 게 장점입니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='02:10', inline=True)
embed.add_field(name='스킬', value='수류탄' + '\n' + '폭발한 위치의 2.5반경 내의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 6배', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 12% 상승' + '\n' + '■□□' + '\n' + '■◎□' + '\n' + '■□□', inline=True)
embed.add_field(name='별명', value='핑파샤', inline=True)
print('PPS-43')
await client.send_message(channel,embed=embed)
if message.content.startswith("/PP-90") or message.content.startswith("/PP90") or message.content.startswith("/란코") or message.content.startswith("/No.23."):
await client.send_file(message.channel, 'No.23_PP-90.png')
channel = message.channel
embed = discord.Embed(
title = 'PP-90 | No.23',
description = '"PP-90이야, 잘 부탁해. 지휘관의 첫 지시, 기쁜 마음으로 기다리고 있을게!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='02:20', inline=True)
embed.add_field(name='스킬', value='회피기동T' + '\n' + '지속시간 동안 자신의 회피 증가' + '\n' + '초반 쿨타임 : 4초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 15초' + '\n' + '회피 증가치 : 45%', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 8%, 회피 20% 상승' + '\n' + '■□□' + '\n' + '□◎□' + '\n' + '■□□', inline=True)
embed.add_field(name='별명', value='란코', inline=True)
print('PP-90')
await client.send_message(channel,embed=embed)
if message.content.startswith("/PP-2000") or message.content.startswith("/PP2000") or message.content.startswith("/PPAP") or message.content.startswith("/피피이천") or message.content.startswith("/No.24."):
await client.send_file(message.channel, 'No.24_PP-2000.png')
channel = message.channel
embed = discord.Embed(
title = 'PP-2000 | No.24',
description = '"PP-2000입니다. 계속 당신의 곁에 있을 수 있겠네요. 후훗."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='01:10', inline=True)
embed.add_field(name='스킬', value='수류탄' + '\n' + '폭발한 위치의 2.5반경 내의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 5.5배', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 10%, 명중 25% 상승' + '\n' + '■□□' + '\n' + '□◎□' + '\n' + '■□□', inline=True)
embed.add_field(name='별명', value='PPAP, 피피이천', inline=True)
print('PP-2000')
await client.send_message(channel,embed=embed)
if message.content.startswith("/MP40") or message.content.startswith("/승만이") or message.content.startswith("/엠피") or message.content.startswith("/No.25."):
await client.send_file(message.channel, 'No.25_MP40.png')
channel = message.channel
embed = discord.Embed(
title = 'MP40 | No.25',
description = '"지휘관님. 저, 있는 힘껏 노력할게요!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='01:30', inline=True)
embed.add_field(name='스킬', value='소이탄' + '\n' + '소이탄을 던져 1.5 반경에 피해를 주고 매 0.33초마다 화상 데미지를 입히는 구간을 생성' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 16초' + '\n' + '폭발/지속 피해량 : 5.5 / 1배' + '\n' + '지속시간 : 5초', inline=True)
embed.add_field(name='버프(AR 한정)', value='명중 25%, 회피 20% 상승' + '\n' + '■□□' + '\n' + '□◎□' + '\n' + '■□□', inline=True)
embed.add_field(name='별명', value='엠피, 승만이', inline=True)
print('MP40')
await client.send_message(channel,embed=embed)
if message.content.startswith("/MP5") or message.content.startswith("/우유") or message.content.startswith("/No.26."):
await client.send_file(message.channel, 'No.26_MP5.png')
channel = message.channel
embed = discord.Embed(
title = 'MP5 | No.26',
description = '"MP5, 지금 막 도착했습니다! 키, 키가 작다고 해서 얕잡아 보지 말아주셔요!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='02:20', inline=True)
embed.add_field(name='스킬', value='포스실드' + '\n' + '자신의 피해를 막는 왜곡방벽을 9999점 생성한다' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 3초', inline=True)
embed.add_field(name='버프(AR 한정)', value='명중 40%, 치명률 20% 상승' + '\n' + '■□□' + '\n' + '□◎□' + '\n' + '■□□', inline=True)
embed.add_field(name='별명', value='우유', inline=True)
print('MP5')
await client.send_message(channel,embed=embed)
if message.content.startswith("/스콜피온") or message.content.startswith("/사소리") or message.content.startswith("/No.27."):
await client.send_file(message.channel, 'No.27_스콜피온.png')
channel = message.channel
embed = discord.Embed(
title = '스콜피온 | No.27',
description = '"Vz.61 스콜피온이야. 잘 부탁해~ 전갈이긴 하지만 독은 없다구~"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='02:00', inline=True)
embed.add_field(name='스킬', value='소이탄' + '\n' + '소이탄을 던져 1.5 반경에 피해를 주고 매 0.33초마다 화상 데미지를 입히는 구간을 생성' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 16초' + '\n' + '폭발/지속 피해량 : 6 / 1배' + '\n' + '지속시간 : 5초', inline=True)
embed.add_field(name='버프(AR 한정)', value='사속 15%, 명중 50% 상승' + '\n' + '□□□' + '\n' + '■◎□' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='사소리', inline=True)
print('스콜피온')
await client.send_message(channel,embed=embed)
if message.content.startswith("/MP7") or message.content.startswith("/엠삐칠") or message.content.startswith("/No.28."):
await client.send_file(message.channel, 'No.28_MP7.png')
channel = message.channel
embed = discord.Embed(
title = 'MP7 | No.28',
description = '""사육사 씨", 드디어 마중 나온 거야? 수고했어."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='02:18(중형제조)', inline=True)
embed.add_field(name='스킬', value='현월무희' + '\n' + '지속시간 동안 자신의 사속, 명중이 감소하는 대신 기동력과 회피 증가' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 8초' + '\n' + '사속, 명중 감소량 : 20%' + '\n' + '기동력, 회피 증가량 : 50% / 180%' + '\n' + '지속시간 : 5초', inline=True)
embed.add_field(name='버프(AR 한정)', value='사속 15%, 명중 25% 상승' + '\n' + '■□□' + '\n' + '■◎□' + '\n' + '■□□', inline=True)
embed.add_field(name='별명', value='엠삐칠', inline=True)
print('MP7')
await client.send_message(channel,embed=embed)
if message.content.startswith("/스텐MkII") or message.content.startswith("/스댕") or message.content.startswith("/비빗쟈") or message.content.startswith("/No.29."):
await client.send_file(message.channel, 'No.29_스텐_Mkll.png')
channel = message.channel
embed = discord.Embed(
title = '스텐 MkII | No.29',
description = '"소문의 지휘관님이신가요? 처음 뵙겠습니다! 가자구요~"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='01:40', inline=True)
embed.add_field(name='스킬', value='수류탄' + '\n' + '폭발한 위치의 2.5반경 내의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 16초' + '\n' +'피해량 : 6배', inline=True)
embed.add_field(name='버프(AR 한정)', value='명중 10%, 회피 30% 상승' + '\n' + '■□□' + '\n' + '■◎□' + '\n' + '■□□', inline=True)
embed.add_field(name='별명', value='스댕, 비빗쟈', inline=True)
print('스텐 MkII')
await client.send_message(channel,embed=embed)
if message.content.startswith("/No.30."):
await client.send_file(message.channel, 'ACCESS DENIED.jpg')
channel = message.channel
embed = discord.Embed(
title = 'ACCESS DENIED | No.30',
description = '해당 번호는 결번입니다.',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='ACCESS DENIED', inline=True)
embed.add_field(name='종류', value='ACCESS DENIED', inline=True)
embed.add_field(name='제조시간', value='ACCESS DENIED', inline=True)
embed.add_field(name='스킬', value='ACCESS DENIED' + '\n' + 'ACCESS DENIED' + '\n' + '초반 쿨타임 : ACCESS DENIED초' + '\n' + '쿨타임 : ACCESS DENIED초' + '\n' + '지속시간 : ACCESS DENIED초' + '\n' + 'ACCESS DENIED : 0', inline=True)
embed.add_field(name='버프', value='ACCESS DENIED' + '\n' + '□□□' + '\n' + '□◎□' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='ACCESS DENIED', inline=True)
print('ACCESS DENIED 30')
await client.send_message(channel,embed=embed)
if message.content.startswith("/베레타38형") or message.content.startswith("/No.31."):
await client.send_file(message.channel, 'No.31_베레타_38형.png')
channel = message.channel
embed = discord.Embed(
title = '베레타 38형 | No.31',
description = '"베레타 M38입니다. 잘부탁드립니다!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='01:30', inline=True)
embed.add_field(name='스킬', value='섬광탄' + '\n' + '폭발한 위치의 2.5반경 내의 적에게 기절을 건다.' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 16초' + '\n' +'기절 지속시간 : 3.2초', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 5%, 사속 10% 상승' + '\n' + '■□□' + '\n' + '□◎□' + '\n' + '■□□', inline=True)
print('베레타 38형')
await client.send_message(channel,embed=embed)
if message.content.startswith("/마이크로우지") or message.content.startswith("/우지") or message.content.startswith("/No.32."):
await client.send_file(message.channel, 'No.32_마이크로_우지.png')
channel = message.channel
embed = discord.Embed(
title = '마이크로 우지 | No.32',
description = '"뭘 그렇게 보고 있는 거야? 부, 부끄러우니까 그만두라고..."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='01:40', inline=True)
embed.add_field(name='스킬', value='소이탄' + '\n' + '소이탄을 던져 1.5 반경에 피해를 주고 매 0.33초마다 화상 데미지를 입히는 구간을 생성' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 16초' + '\n' +'폭발/지속 피해량 : 6 / 1배' + '\n' +'지속시간 : 5초', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 18% 상승' + '\n' + '□■□' + '\n' + '□◎□' + '\n' + '□■□', inline=True)
embed.add_field(name='별명', value='우지', inline=True)
print('마이크로 우지')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M45") or message.content.startswith("/시나몬롤") or message.content.startswith("/No.33."):
await client.send_file(message.channel, 'No.33_M45.png')
channel = message.channel
embed = discord.Embed(
title = 'M45 | No.33',
description = '"지휘관! 맡아주셔서 감사드려요! 맛있는 빵을 구울수 있도록 힘낼게요!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='01:20', inline=True)
embed.add_field(name='스킬', value='연막탄' + '\n' + '폭발 위치의 2.5 반경에 적의 사속/이속을 감소시키는 연막이 발생' + '\n' + '초반 쿨타임 : 1초' + '\n' + '쿨타임 : 12초' + '\n' +'사속/이속 감소치 : 36 / 45%' + '\n' +'지속시간 : 4초', inline=True)
embed.add_field(name='버프(AR 한정)', value='사속 10%, 회피 10% 상승' + '\n' + '■□□' + '\n' + '□◎□' + '\n' + '■□□', inline=True)
embed.add_field(name='별명', value='시나몬롤', inline=True)
print('M45')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M1개런드") or message.content.startswith("/가란드") or message.content.startswith("/No.34."):
await client.send_file(message.channel, 'No.34_M1_개런드.png')
channel = message.channel
embed = discord.Embed(
title = 'M1 개런드 | No.34',
description = '"M1개런드 입니다. 앞으로 쭉 지휘관과 함께 싸우겠습니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='04:00', inline=True)
embed.add_field(name='스킬', value='정밀저격' + '\n' + '1.5초간 조준한 후에 공격하던 적에게 피해를 준다' + '\n' + '초반 쿨타임 : 10초' + '\n' + '쿨타임 : 16초' + '\n' +'피해량 : 5.5배' , inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 12% 감소' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='가란드', inline=True)
print('M1개런드')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M1A1") or message.content.startswith("/책가방") or message.content.startswith("/No.35."):
await client.send_file(message.channel, 'No.35_M1A1.png')
channel = message.channel
embed = discord.Embed(
title = 'M1A1 | No.35',
description = '"M1A1 들어가겠습니다. 함께 전쟁을 극복해나가요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='고속사격T' + '\n' + '지속시간 동안 자신의 사속 증가' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 16초' + '\n' + '사속 증가치 : 40%' + '\n' +'지속시간 : 15초' , inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 12% 감소' + '\n' + '□■□' + '\n' + '□◎□' + '\n' + '□■□', inline=True)
embed.add_field(name='별명', value='책가방', inline=True)
print('M1A1')
await client.send_message(channel,embed=embed)
if message.content.startswith("/스프링필드") or message.content.startswith("/춘전이") or message.content.startswith("/No.36."):
await client.send_file(message.channel, 'No.36_스프링필드_J.png')
channel = message.channel
embed = discord.Embed(
title = '스프링필드 | No.36',
description = '"지휘관, 제가 할 수 있는 일이 있다면, 부디 명령을…"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='04:25', inline=True)
embed.add_field(name='스킬', value='저격개시' + '\n' + '1.5초간 조준한 후에 가장 멀리있는 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 10초' + '\n' + '피해량 : 6배', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 15% 감소' + '\n' + '□□■' + '\n' + '□◎□' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='춘전이', inline=True)
print('스프링필드')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M14") or message.content.startswith("/엠씹새") or message.content.startswith("/씹새") or message.content.startswith("/No.37."):
await client.send_file(message.channel, 'No.37_M14.png')
channel = message.channel
embed = discord.Embed(
title = 'M14 | No.37',
description = '"지휘관! 당신의 기대에 반드시 보답할게요!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='03:40', inline=True)
embed.add_field(name='스킬', value='화력전개' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 8초' + '\n' + '화력 증가치 : 60%', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 12% 감소' + '\n' + '□□■' + '\n' + '□◎□' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='엠씹새, 씹새', inline=True)
print('M14')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M21") or message.content.startswith("/No.38."):
await client.send_file(message.channel, 'No.38_M21.png')
channel = message.channel
embed = discord.Embed(
title = 'M21 | No.38',
description = '"헬로~ M21이야. 저격무기라고 해서 모두 어둡지만은 않다구~"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='목표제거' + '\n' + '1.5초간 조준한 후에 특정 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 10초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 5.5배', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 12% 감소' + '\n' + '□■□' + '\n' + '□◎□' + '\n' + '□■□', inline=True)
print('M21')
await client.send_message(channel,embed=embed)
if message.content.startswith("/모신나강") or message.content.startswith("/하라쇼") or message.content.startswith("/No.39.") :
await client.send_file(message.channel, 'No.39_모신나강.png')
channel = message.channel
embed = discord.Embed(
title = '모신나강 | No.39',
description = '"동지, 훌륭해~"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='04:10', inline=True)
embed.add_field(name='스킬', value='저격개시' + '\n' + '1.5초간 조준한 후에 가장 멀리 있는 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 10초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 6배', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 15% 감소' + '\n' + '□□□' + '\n' + '□◎□' + '\n' + '□■□', inline=True)
print('모신나강')
await client.send_message(channel,embed=embed)
if message.content.startswith("/SVT-38") or message.content.startswith("/SVT38") or message.content.startswith("/No.40.") :
await client.send_file(message.channel, 'No.40_SVT-38.png')
channel = message.channel
embed = discord.Embed(
title = 'SVT-38 | No.40',
description = '"토카레프 M1940 등장. 지휘관, 지시를"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='03:30', inline=True)
embed.add_field(name='스킬', value='목표제거' + '\n' + '1.5초간 조준한 후에 특정 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 10초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 5배', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 10% 감소' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□□', inline=True)
print('SVT-38')
await client.send_message(channel,embed=embed)
if message.content.startswith("/시모노프") or message.content.startswith("/SKS") or message.content.startswith("/No.41.") :
await client.send_file(message.channel, 'No.41_시모노프.png')
channel = message.channel
embed = discord.Embed(
title = '시모노프 | No.41',
description = '"안녕하세요, 지휘관. 에이스인 내가 있으면 일당백이라고. 잘 부탁해."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='03:30', inline=True)
embed.add_field(name='스킬', value='고속사격' + '\n' + '지속시간 동안 자신의 사속 증가' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 8초' + '\n' + '사속 증가치 : 55%' + '지속시간 : 5초', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 10% 감소' + '\n' + '□■□' + '\n' + '□◎□' + '\n' + '□■□', inline=True)
print('시모노프')
await client.send_message(channel,embed=embed)
if message.content.startswith("/PTRD") or message.content.startswith("/No.42."):
await client.send_file(message.channel, 'No.42_PTRD.png')
channel = message.channel
embed = discord.Embed(
title = 'PTRD | No.42',
description = '"괜찮아, 지휘관. 누구라 해도 나의 탄환에서는 도망칠 수 없어."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='04:30', inline=True)
embed.add_field(name='스킬', value='확인사살' + '\n' + '2초간 조준한 후에 최전방의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 15초' + '\n' + '쿨타임 : 16.9초' + '\n' + '피해량 : 7배', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 15% 감소' + '\n' + '□■□' + '\n' + '□◎□' + '\n' + '□□□', inline=True)
print('PTRD')
await client.send_message(channel,embed=embed)
if message.content.startswith("/SVD") or message.content.startswith("/스브드") or message.content.startswith("/No.43."):
await client.send_file(message.channel, 'No.43_SVD.png')
channel = message.channel
embed = discord.Embed(
title = 'SVD | No.43',
description = '"스나이퍼 SVD야. 어디보자, 어느 행운아가 나를 맞이한 거야?"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='04:15', inline=True)
embed.add_field(name='스킬', value='고속사격' + '\n' + '지속시간 동안 자신의 사속 증가.' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 8초' + '\n' + '사속증가치 : 65%' + '지속시간 : 5초', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 15% 감소' + '\n' + '□□■' + '\n' + '□◎□' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='스브드', inline=True)
print('SVD')
await client.send_message(channel,embed=embed)
if message.content.startswith("/SV-98") or message.content.startswith("/SV98") or message.content.startswith("/큐하치") or message.content.startswith("/스브") or message.content.startswith("/No.44."):
await client.send_file(message.channel, 'No.44_SV-98.png')
channel = message.channel
embed = discord.Embed(
title = 'SV-98 | No.44',
description = '"SV-98 보고합니다. 명령을 내려주십시오."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='03:40', inline=True)
embed.add_field(name='스킬', value='확인사살' + '\n' + '1.5초간 조준한 후에 최전방의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 10초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 5.5배', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 12% 감소' + '\n' + '□□□' + '\n' + '□◎□' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='큐하치, 스브', inline=True)
print('SV-98')
await client.send_message(channel,embed=embed)
if message.content.startswith("/No.45."):
await client.send_file(message.channel, 'ACCESS DENIED.jpg')
channel = message.channel
embed = discord.Embed(
title = 'ACCESS DENIED | No.45',
description = '해당 번호는 결번입니다.',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='ACCESS DENIED', inline=True)
embed.add_field(name='종류', value='ACCESS DENIED', inline=True)
embed.add_field(name='제조시간', value='ACCESS DENIED', inline=True)
embed.add_field(name='스킬', value='ACCESS DENIED' + '\n' + 'ACCESS DENIED' + '\n' + '초반 쿨타임 : ACCESS DENIED초' + '\n' + '쿨타임 : ACCESS DENIED초' + '\n' + '지속시간 : ACCESS DENIED초' + '\n' + 'ACCESS DENIED : 0', inline=True)
embed.add_field(name='버프', value='ACCESS DENIED' + '\n' + '□□□' + '\n' + '□◎□' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='ACCESS DENIED', inline=True)
print('ACCESS DENIED 45')
await client.send_message(channel,embed=embed)
if message.content.startswith("/Kar.98k") or message.content.startswith("/부츠") or message.content.startswith("/카구팔") or message.content.startswith("/No.46."):
await client.send_file(message.channel, 'No.46_Kar98k.png')
channel = message.channel
embed = discord.Embed(
title = 'Kar98k | No.46',
description = '"지휘관, 마우저 카라비너 98 Kurz가 당신을 위해 있는 힘을 다하겠습니다. 당신에게 방해되는 것은 한 번에 처리해버릴게요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='04:40', inline=True)
embed.add_field(name='스킬', value='이중저격' + '\n' + '1초씩 두번 조준 사격하며 각각 현재 타깃에게 대미지를 입힌다.' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 3.5배', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 18% 감소' + '\n' + '□□■' + '\n' + '□◎□' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='카구팔, 부츠', inline=True)
print('Kar98k')
await client.send_message(channel,embed=embed)
if message.content.startswith("/G43") or message.content.startswith("/구텐탁") or message.content.startswith("/No.47."):
await client.send_file(message.channel, 'No.47_G43.png')
channel = message.channel
embed = discord.Embed(
title = 'G43 | No.47',
description = '"Guten Tag! 저는 발터 게베어 43, 오늘도 우아한 싸움을 보여드리겠어요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='03:10', inline=True)
embed.add_field(name='스킬', value='고속사격N' + '\n' + '지속시간 동안 자신의 사속 증가 / 뒤쪽의 수치는 주간작전에 사용시' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '사속 증가치 : 85 / 28%', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 10% 감소' + '\n' + '□□■' + '\n' + '□◎□' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='구텐탁', inline=True)
print('G43')
await client.send_message(channel,embed=embed)
if message.content.startswith("/WA2000") or message.content.startswith("/와짱") or message.content.startswith("/와짱") or message.content.startswith("/No.48."):
await client.send_file(message.channel, 'No.48_WA2000.png')
channel = message.channel
embed = discord.Embed(
title = 'WA2000 | No.48',
description = '"나의 이름은 발터 WA2000. 지휘관, 나의 발목을 잡는다면 가만두지 않을 거야!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='04:50', inline=True)
embed.add_field(name='스킬', value='고속사격' + '\n' + '지속시간 동안 자신의 사속 증가' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '사속 증가치 : 75%', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 18% 감소' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='와짱(쨩)', inline=True)
print('WA2000')
await client.send_message(channel,embed=embed)
if message.content.startswith("/56식반") or message.content.startswith("/No.49."):
await client.send_file(message.channel, 'No.49_56식_반.png')
channel = message.channel
embed = discord.Embed(
title = '56식 반 | No.49',
description = '"56식 반, 정식으로 배치를 명 받았습니다. 지휘관, 그리고 전우여러분! 잘부탁드려요!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='화력전개' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '화력 증가치 : 60', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 12% 감소' + '\n' + '□■□' + '\n' + '□◎□' + '\n' + '□■□', inline=True)
print('56식 반')
await client.send_message(channel,embed=embed)
if message.content.startswith("/리엔필드") or message.content.startswith("/리줌마") or message.content.startswith("/No.50."):
await client.send_file(message.channel, 'No.50_리엔필드.png')
channel = message.channel
embed = discord.Embed(
title = '리엔필드 | No.50',
description = '"오늘부로 배속된 리-엔필드 No.4 Mk I입니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='05:00', inline=True)
embed.add_field(name='스킬', value='화력전개' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '화력 증가치 : 75%', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 18% 감소' + '\n' + '□■□' + '\n' + '□◎□' + '\n' + '□■□', inline=True)
embed.add_field(name='별명', value='리줌마', inline=True)
print('리엔필드')
await client.send_message(channel,embed=embed)
if message.content.startswith("/FN-49") or message.content.startswith("/FN49") or message.content.startswith("/요요요") or message.content.startswith("/No.51."):
await client.send_file(message.channel, 'No.51_FN-49.png')
channel = message.channel
embed = discord.Embed(
title = 'FN-49 | No.51',
description = '"자, 자자자자, 잘 부탁 드립니다!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='03:10', inline=True)
embed.add_field(name='스킬', value='화력전개' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '화력 증가치 : 55%', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 10% 감소' + '\n' + '□□■' + '\n' + '□◎□' + '\n' + '□□■', inline=True)
print('FN-49')
await client.send_message(channel,embed=embed)
if message.content.startswith("/BM59") or message.content.startswith("/No.52."):
await client.send_file(message.channel, 'No.52_BM59.png')
channel = message.channel
embed = discord.Embed(
title = 'BM59 | No.52',
description = '"베레타 BM59입니다. 갖가지 개조를 거친 저라면, 지휘관을 실망시킬 일은 없겠죠."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='03:20', inline=True)
embed.add_field(name='스킬', value='고속사격 ' + '\n' + '지속시간 동안 자신의 사속 증가' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '사속 증가치 : 55%', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 10% 감소' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□□', inline=True)
print('BM59')
await client.send_message(channel,embed=embed)
if message.content.startswith("/NTW-20") or message.content.startswith("/NTW20") or message.content.startswith("/노태우") or message.content.startswith("/No.53."):
await client.send_file(message.channel, 'No.53_NTW-20.png')
channel = message.channel
embed = discord.Embed(
title = 'NTW-20 | No.53',
description = '"지휘관, 대물 저격총인 Denel NTW-20다. 강철의 벽이라 해도, 내가 뚫을 수 있다는 걸 보여주지."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='04:45', inline=True)
embed.add_field(name='스킬', value='확인사살' + '\n' + '2초간 조준한 후에 최전방의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 15초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 8배', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 18% 감소' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='노태우', inline=True)
print('NTW-20')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M16") or message.content.startswith("/M16A1") or message.content.startswith("/우리형") or message.content.startswith("/No.54."):
await client.send_file(message.channel, 'No.54_M16A1.png')
channel = message.channel
embed = discord.Embed(
title = 'M16A1 | No.54',
description = '"여어! M16이다. 임무라면 나한테 맡겨두도록."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='섬광탄' + '\n' + '폭발한 위치의 2.5반경 내의 적에게 기절을 건다.' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 16초' + '\n' + '기절 지속시간 : 4초', inline=True)
embed.add_field(name='버프(SMG 한정)', value='화력 10%, 회피 12% 상승' + '\n' + '□■■' + '\n' + '□◎□' + '\n' + '□■■', inline=True)
embed.add_field(name='별명', value='우리형', inline=True)
print('M16A1')
await client.send_message(channel,embed=embed)
if message.content.startswith("/느그형") or message.content.startswith("/철혈M16"):
await client.send_file(message.channel, 'No.54_M16A1S.E.jpg')
channel = message.channel
embed = discord.Embed(
title = 'M16A1 | No.54',
description = '"여어! M16이다. 임무라면 나한테 맡겨두도록."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='섬광탄' + '\n' + '폭발한 위치의 2.5반경 내의 적에게 기절을 건다.' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 16초' + '\n' + '기절 지속시간 : 4초', inline=True)
embed.add_field(name='버프(SMG 한정)', value='화력 10%, 회피 12% 상승' + '\n' + '□■■' + '\n' + '□◎□' + '\n' + '□■■', inline=True)
embed.add_field(name='별명', value='느그형', inline=True)
print('M16A1')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M4") or message.content.startswith("/M4A1") or message.content.startswith("/혐포") or message.content.startswith("/엠포") or message.content.startswith("/No.55."):
await client.send_file(message.channel, 'No.55_M4A1.png')
channel = message.channel
embed = discord.Embed(
title = 'M4A1 | No.55',
description = '"지휘관, 잘… 부탁드리겠습니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='화력전개T' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 4초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 10초' + '\n' + '화력 증가치 : 70%', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 18%, 치명률 30% 상승' + '\n' + '□■■' + '\n' + '□◎■' + '\n' + '□■■', inline=True)
embed.add_field(name='별명', value='엠포, 혐포', inline=True)
print('M4A1')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M4SOPMODII") or message.content.startswith("/솦모챠") or message.content.startswith("/솦모") or message.content.startswith("/비누") or message.content.startswith("/No.56."):
await client.send_file(message.channel, 'No.56_M4_SOPMOD_II.jpg')
channel = message.channel
embed = discord.Embed(
title = 'M4 SOP MODII | No.56',
description = '"지휘관, 잘… 부탁드리겠습니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='살상류탄' + '\n' + '폭발한 위치의 1.5반경 내의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 12배', inline=True)
embed.add_field(name='버프(SMG 한정)', value='명중 50%, 회피 12% 상승' + '\n' + '□□■' + '\n' + '□◎■' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='솦모챠, 비누', inline=True)
print('M4 SOP MODII')
await client.send_message(channel,embed=embed)
if message.content.startswith("/STAR-15") or message.content.startswith("/AR15") or message.content.startswith("/스타") or message.content.startswith("/No.57."):
await client.send_file(message.channel, 'No.57_ST_AR-15.png')
channel = message.channel
embed = discord.Embed(
title = 'ST AR-15 | No.57',
description = '"콜트 AR-15야. 정식으로 귀하의 부대에 합류하겠습니다. 제 활약을 확실히 눈에 새겨주세요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='고속사격T' + '\n' + '지속시간 동안 자신의 사속 증가' + '\n' + '초반 쿨타임 : 4초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 15초' + '\n' + '사속증가치 : 45%', inline=True)
embed.add_field(name='버프(SMG 한정)', value='사속 10%, 회피 12% 상승' + '\n' + '□□■' + '\n' + '□◎■' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='스타', inline=True)
print('ST AR-15')
await client.send_message(channel,embed=embed)
if message.content.startswith("/AK-47") or message.content.startswith("/AK47") or message.content.startswith("/에케") or message.content.startswith("/No.58."):
await client.send_file(message.channel, 'No.58_AK-47.png')
channel = message.channel
embed = discord.Embed(
title = 'AK-47 | No.58',
description = '"아하핫, 드디어 나의 차례구나, 지구를 뒤흔들 성능을 보여줄게!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='03:20', inline=True)
embed.add_field(name='스킬', value='기습공격' + '\n' + '지속시간 동안 자신의 화력, 명중 증가' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '화력, 명중 증가치 : 35, 100%', inline=True)
embed.add_field(name='버프(SMG 한정)', value='회피 18% 상승' + '\n' + '□□□' + '\n' + '□◎□' + '\n' + '□■□', inline=True)
embed.add_field(name='별명', value='에케', inline=True)
print('AK-47')
await client.send_message(channel,embed=embed)
if message.content.startswith("/AK-74U") or message.content.startswith("/AK74U") or message.content.startswith("/No.59."):
await client.send_file(message.channel, 'No.59_AK-74U.png')
channel = message.channel
embed = discord.Embed(
title = 'AK-74U | No.59',
description = '"아, 네가 보스야? AK-74U, 이게 내 이름이니까, 장사하고 싶으면, 날 어떻게 모실지 잘 생각해 봐."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='거부반응' + '\n' + '지속시간 동안 자신이 공격한 적은 일정 시간 동안 화력, 명중 감소 (엘리트 적은 효과 반감)' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 5초' + '\n' + '디버프 지속시간 : 5초' + '\n' + '화력, 명중 감소치 : 50%', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 15%, 명중 25% 상승' + '\n' + '■□□' + '\n' + '■◎□' + '\n' + '■□□', inline=True)
print('AK-74U')
await client.send_message(channel,embed=embed)
if message.content.startswith("/ASVAL") or message.content.startswith("/아스발") or message.content.startswith("/No.60."):
await client.send_file(message.channel, 'No.60_AS_Val.png')
channel = message.channel
embed = discord.Embed(
title = 'AS VAL | No.60',
description = '"안녕하세요오...저...아앗...아무것도 아니에요..."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='03:30', inline=True)
embed.add_field(name='스킬', value='화력전개N' + '\n' + '지속시간 동안 자신의 화력 증가 / 뒤쪽의 수치는 주간작전에 사용시' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 6초' + '\n' + '화력 증가치 : 180 / 60%', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 25%, 사속 10% 상승' + '\n' + '□■□' + '\n' + '□◎□' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='아스발', inline=True)
print('AS VAL')
await client.send_message(channel,embed=embed)
if message.content.startswith("/StG44") or message.content.startswith("/서태지") or message.content.startswith("/No.61."):
await client.send_file(message.channel, 'No.61_StG44.png')
channel = message.channel
embed = discord.Embed(
title = 'StG44 | No.61',
description = '"안녕하세요, 아, 악수는 거절하겠어요!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='03:00', inline=True)
embed.add_field(name='스킬', value='파열류탄' + '\n' + '유탄을 발사하여 폭발한 위치의 1/2.5/4 반경 내의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 4.5/1.8/1배', inline=True)
embed.add_field(name='버프(SMG 한정)', value='회피 20%, 명중 60% 상승' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='서태지', inline=True)
print('StG44')
await client.send_message(channel,embed=embed)
if message.content.startswith("/G3") or message.content.startswith("/No.63."):
await client.send_file(message.channel, 'No.63_G3.png')
channel = message.channel
embed = discord.Embed(
title = 'G3 | No.62',
description = '"안녕하세요, 지휘관씨, G3라고 불러주세요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='02:50', inline=True)
embed.add_field(name='스킬', value='살상류탄' + '\n' + '폭발한 위치의 1.5반경 내의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 10배', inline=True)
embed.add_field(name='버프(SMG 한정)', value='회피 20%, 명중 60% 상승' + '\n' + '□■□' + '\n' + '□◎□' + '\n' + '□□□', inline=True)
print('G3')
await client.send_message(channel,embed=embed)
if message.content.startswith("/G36") or message.content.startswith("/지상렬") or message.content.startswith("/상렬이") or message.content.startswith("/No.64."):
await client.send_file(message.channel, 'No.64_G36.png')
channel = message.channel
embed = discord.Embed(
title = 'G36 | No.65',
description = '"구텐 탁. 오늘부터 주인님의 전속 메이드가 되어 봉사하겠습니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='03:40', inline=True)
embed.add_field(name='스킬', value='화력전개T' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 4초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 10초' + '\n' + '화력 증가치 : 70%', inline=True)
embed.add_field(name='버프(SMG 한정)', value='화력 30%, 사속 10% 상' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='지상렬, 상렬이', inline=True)
print('G36')
await client.send_message(channel,embed=embed)
if message.content.startswith("/HK416") or message.content.startswith("/흥국이") or message.content.startswith("/No.65."):
await client.send_file(message.channel, 'No.65_HK416.png')
channel = message.channel
embed = discord.Embed(
title = 'HK416 | No.65',
description = '"HK416. 지휘관, 제대로 기억해주세요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='03:55', inline=True)
embed.add_field(name='스킬', value='살상류탄' + '\n' + '폭발한 위치의 1.5반경 내의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 15배', inline=True)
embed.add_field(name='버프(SMG 한정)', value='화력 40% 상승' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='흥국이', inline=True)
print('HK416')
await client.send_message(channel,embed=embed)
if message.content.startswith("/56-1식") or message.content.startswith("/No.66."):
await client.send_file(message.channel, 'No.66_56-1식.png')
channel = message.channel
embed = discord.Embed(
title = '56-1식 | No.66',
description = '"니 하오, 지휘관. 56식 자동보총 1형이야. 모든 적을 섬멸해줄께."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='03:25', inline=True)
embed.add_field(name='스킬', value='파열류탄 체인 블라스트(일)' + '\n' + '폭발한 위치의 1/2.5/4반경 내의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 5/2/1배', inline=True)
embed.add_field(name='버프(SMG 한정)', value='회피 15%, 치명률 10% 상승' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□□', inline=True)
print('56-1식')
await client.send_message(channel,embed=embed)
if message.content.startswith("/No.67."):
await client.send_file(message.channel, 'ACCESS DENIED.jpg')
channel = message.channel
embed = discord.Embed(
title = 'ACCESS DENIED | No.67',
description = '해당 번호는 결번입니다.',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='ACCESS DENIED', inline=True)
embed.add_field(name='종류', value='ACCESS DENIED', inline=True)
embed.add_field(name='제조시간', value='ACCESS DENIED', inline=True)
embed.add_field(name='스킬', value='ACCESS DENIED' + '\n' + 'ACCESS DENIED' + '\n' + '초반 쿨타임 : ACCESS DENIED초' + '\n' + '쿨타임 : ACCESS DENIED초' + '\n' + '지속시간 : ACCESS DENIED초' + '\n' + 'ACCESS DENIED : 0', inline=True)
embed.add_field(name='버프', value='ACCESS DENIED' + '\n' + '□□□' + '\n' + '□◎□' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='ACCESS DENIED', inline=True)
print('ACCESS DENIED 67')
await client.send_message(channel,embed=embed)
if message.content.startswith("/L85A1") or message.content.startswith("/장미") or message.content.startswith("/하지메마시떼") or message.content.startswith("/No.68."):
await client.send_file(message.channel, 'No.68_L85A1.png')
channel = message.channel
embed = discord.Embed(
title = 'L85A1 | No.68',
description = '"M4 SOPMOD-II, 지휘관, 드디어 만났네요!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='02:50', inline=True)
embed.add_field(name='스킬', value='강행돌파' + '\n' + '지속시간 동안 자신의 화력, 사속 증가' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '화력, 사속 증가치 : 35, 15%', inline=True)
embed.add_field(name='버프(SMG 한정)', value='화력 20%, 명중 50% 상승' + '\n' + '□■□' + '\n' + '□◎□' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='장미', inline=True)
print('L85A1')
await client.send_message(channel,embed=embed)
if message.content.startswith("/FAMAS") or message.content.startswith("/파마스") or message.content.startswith("/No.69."):
await client.send_file(message.channel, 'No.69_FAMAS.png')
channel = message.channel
embed = discord.Embed(
title = 'FAMAS | No.69',
description = '"지휘관님, 제가 당신의 제대에 가세한다면 일당백이나 다름없습니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='03:30', inline=True)
embed.add_field(name='스킬', value='파열류탄' + '\n' + '폭발한 위치의 1/2.5/4반경 내의 적에게 피해를 준다.' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 16초' + '\n' + '피해량 : 5/2/1배', inline=True)
embed.add_field(name='버프(SMG 한정)', value='화력 25%, 명중 60% 증가' + '\n' + '□□□' + '\n' + '□◎□' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='파마스', inline=True)
print('FAMAS')
await client.send_message(channel,embed=embed)
if message.content.startswith("/FNC") or message.content.startswith("/초코") or message.content.startswith("/No.70."):
await client.send_file(message.channel, 'No.70_FNC.png')
channel = message.channel
embed = discord.Embed(
title = 'FNC | No.70',
description = '"처음 뵙겠습니다, 지휘관님. 초콜렛 드실래요?"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='03:20', inline=True)
embed.add_field(name='스킬', value='화력전개' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '화력 증가치 : 60%', inline=True)
embed.add_field(name='버프(SMG 한정)', value='명중 50%, 회피 12% 상승' + '\n' + '□□■' + '\n' + '□◎□' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='초코', inline=True)
print('FNC')
await client.send_message(channel,embed=embed)
if message.content.startswith("/갈릴") or message.content.startswith("/No.71."):
await client.send_file(message.channel, 'No.71_갈릴.png')
channel = message.channel
embed = discord.Embed(
title = '갈릴 | No.71',
description = '"여어, 잘 부탁해, My 지휘관!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='02:40', inline=True)
embed.add_field(name='스킬', value='호흡조절' + '\n' + '지속시간 동안 자신의 명중 증가' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 15초' + '\n' + '명중 증가치 : 500%', inline=True)
embed.add_field(name='버프(SMG 한정)', value='명중 50%, 회피 10% 상승' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□□', inline=True)
print('갈릴')
await client.send_message(channel,embed=embed)
if message.content.startswith("/TAR-21") or message.content.startswith("/TAR21") or message.content.startswith("/타보르") or message.content.startswith("/타줌마") or message.content.startswith("/No.72."):
await client.send_file(message.channel, 'No.72_TAR-21.png')
channel = message.channel
embed = discord.Embed(
title = 'TAR-21 | No.72',
description = '"TAR-21, 지금부터 따르겠습니다, 부디 저에 대해 많이 신경써 주세요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='03:30', inline=True)
embed.add_field(name='스킬', value='강행돌파' + '\n' + '지속시간 동안 자신의 명중 증가' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 5초' + '\n' + '화력, 사속 증가치 : 75, 25%', inline=True)
embed.add_field(name='버프(SMG 한정)', value='명중 50%, 회피 10% 상승' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='타보르, 타줌마', inline=True)
print('TAR-21')
await client.send_message(channel,embed=embed)
if message.content.startswith("/AUG") or message.content.startswith("/어그") or message.content.startswith("/No.73."):
await client.send_file(message.channel, 'No.73_AUG.png')
channel = message.channel
embed = discord.Embed(
title = 'AUG | No.73',
description = '"지휘관님, 만약 적에게 장례식 화환을 보내고 싶으시다면······ 제가 당신의 "최고의 선택" 일 거에요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='장례식의 비' + '\n' + '지속시간 동안 자신의 명중이 감소하지만 사속이 150이 되고 난사한다.' + '\n' + '초반 쿨타임 : 4초' + '\n' + '쿨타임 : 16초' + '\n' + '지속시간 : 7초' + '\n' + '명중 감소치 : 0%', inline=True)
embed.add_field(name='버프', value='화력 12%, 명중 20% 상승' + '\n' + '□■■' + '\n' + '□◎■' + '\n' + '□■■', inline=True)
embed.add_field(name='별명', value='어그', inline=True)
print('AUG')
await client.send_message(channel,embed=embed)
if message.content.startswith("/SIG-510") or message.content.startswith("/SIG510") or message.content.startswith("/시그") or message.content.startswith("/No.74."):
await client.send_file(message.channel, 'No.74_SIG-510.png')
channel = message.channel
embed = discord.Embed(
title = 'SIG-510 | No.74',
description = '"SIG-510, 지금부터 따르겠습니다, 부디 저에 대해 많이 신경써 주세요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='AR', inline=True)
embed.add_field(name='제조시간', value='02:40', inline=True)
embed.add_field(name='스킬', value='화력전개' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '화력 증가치 : 55%', inline=True)
embed.add_field(name='버프(SMG 한정)', value='화력 20%, 사속 10% 상승' + '\n' + '□□■' + '\n' + '□◎□' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='시그', inline=True)
print('SIG-510')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M1918") or message.content.startswith("/바쨩") or message.content.startswith("/바짱") or message.content.startswith("/No.75."):
await client.send_file(message.channel, 'No.75_M1918.png')
channel = message.channel
embed = discord.Embed(
title = 'M1918 | No.75',
description = '"브라우닝 M1918이야. 왓! 지휘관! 여기에 계셨던거예요? 놀래키지 마셔요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='06:25', inline=True)
embed.add_field(name='스킬', value='화력전개MG' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 18초' + '\n' + '지속시간 : 6초' + '\n' + '화력 증가치 : 70%', inline=True)
embed.add_field(name='버프(SG 한정)', value='화력 15%, 장갑 10% 상승' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='바짱(쨩)', inline=True)
print('M1918')
await client.send_message(channel,embed=embed)
if message.content.startswith("/No.76."):
await client.send_file(message.channel, 'ACCESS DENIED.jpg')
channel = message.channel
embed = discord.Embed(
title = 'ACCESS DENIED | No.76',
description = '해당 번호는 결번입니다.',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='ACCESS DENIED', inline=True)
embed.add_field(name='종류', value='ACCESS DENIED', inline=True)
embed.add_field(name='제조시간', value='ACCESS DENIED', inline=True)
embed.add_field(name='스킬', value='ACCESS DENIED' + '\n' + 'ACCESS DENIED' + '\n' + '초반 쿨타임 : ACCESS DENIED초' + '\n' + '쿨타임 : ACCESS DENIED초' + '\n' + '지속시간 : ACCESS DENIED초' + '\n' + 'ACCESS DENIED : 0', inline=True)
embed.add_field(name='버프', value='ACCESS DENIED' + '\n' + '□□□' + '\n' + '□◎□' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='ACCESS DENIED', inline=True)
print('ACCESS DENIED 76')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M2HB") or message.content.startswith("/쵸로이") or message.content.startswith("/연필") or message.content.startswith("/HB연필") or message.content.startswith("/No.77."):
await client.send_file(message.channel, 'No.77_M2HB.png')
channel = message.channel
embed = discord.Embed(
title = 'M2HB | No.77',
description = '"저기~ 지휘관! 어서 적에게 총탄의 비를 퍼붓고 싶어! 더는 기다릴 수 없어!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='06:10', inline=True)
embed.add_field(name='스킬', value='사중극점' + '\n' + '3회 일반 공격 후 4회째 공격을 강화' + '\n' + 'Passive Skill' + '\n' + '공격력 배율 : 2.4배', inline=True)
embed.add_field(name='버프(SG 한정)', value='화력 22% 상승' + '\n' + '□□□' + '\n' + '◎□■' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='쵸로이, HB, HB연필', inline=True)
print('M2HB')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M60") or message.content.startswith("/No.78"):
await client.send_file(message.channel, 'No.78_M60.png')
channel = message.channel
embed = discord.Embed(
title = 'M60 | No.78',
description = '"M60이야! 자, 지시를 내려줘!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='06:10', inline=True)
embed.add_field(name='스킬', value='화력전개N-MG' + '\n' + '지속시간 동안 자신의 화력 증가 / 뒤쪽의 수치는 주간작전에 사용시' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 18초' + '\n' + '지속시간 : 6초' + '\n' + '화력 증가치 : 105 / 35%', inline=True)
embed.add_field(name='버프(SG 한정)', value='화력 10%, 사속 8% 상승' + '\n' + '◎□■' + '\n' + '□□□' + '\n' + '□□■', inline=True)
print('M60')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M249") or message.content.startswith("/풍선껌") or message.content.startswith("/No.79."):
await client.send_file(message.channel, 'No.79_M249_SAW.png')
channel = message.channel
embed = discord.Embed(
title = 'M249 | No.79',
description = '"지휘관, 너무 기대하지는 마."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='준비만전N' + '\n' + '야간작전에서 지속시간 동안 자신의 화력 증가, 발사중인 탄띠에 탄 추가 괄호 안의 수치는 주간작전에 사용시' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 18초' + '\n' + '지속시간 : 8초' + '\n' + '화력, 발사수 증가치 : 45%(10%), 4발', inline=True)
embed.add_field(name='버프(SG 한정)', value='사속 12%, 명중 10% 상승' + '\n' + '□□□' + '\n' + '◎□■' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='풍선껌', inline=True)
print('M249')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M1919A4") or message.content.startswith("/이치큐") or message.content.startswith("/No.80."):
await client.send_file(message.channel, 'No.80_M1919A4.png')
channel = message.channel
embed = discord.Embed(
title = 'M249 | No.80',
description = '"저는 브라우닝 M1919! 적을 분쇄하기 위해 찾아왔습니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='05:40', inline=True)
embed.add_field(name='스킬', value='사냥충동' + '\n' + '지속시간 동안 자신의 명중 상승, 모든 공격이 치명타가 된다.' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 18초' + '\n' + '지속시간 : 6초' + '\n' + '명중 증가치 : 65%', inline=True)
embed.add_field(name='버프(SG 한정)', value='명중 25%, 장갑 10% 상승' + '\n' + '□□■' + '\n' + '□□□' + '\n' + '◎□□', inline=True)
embed.add_field(name='별명', value='이치큐', inline=True)
print('M1919A4')
await client.send_message(channel,embed=embed)
if message.content.startswith("/LWMMG") or message.content.startswith("/람지") or message.content.startswith("/No.81."):
await client.send_file(message.channel, 'No.81_LWMMG.png')
channel = message.channel
embed = discord.Embed(
title = 'LWMMG | No.81',
description = '"처음 뵙겠습니다. 지휘관. 아니...다른 녀석들을 소개할 필요는 없어. 나 혼자서도 충분하니까."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='05:10', inline=True)
embed.add_field(name='스킬', value='사냥충동' + '\n' + '지속시간 동안 자신의 명중 상승, 모든 공격이 치명타가 된다.' + '\n' + '초반 쿨타임 : 3초' + '\n' + '쿨타임 : 18초' + '\n' + '지속시간 : 6초' + '\n' + '명중 증가치 : 60%', inline=True)
embed.add_field(name='버프(SG 한정)', value='화력 10%, 사속 10% 상승' + '\n' + '□□□' + '\n' + '◎□■' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='람지', inline=True)
print('LWMMG')
await client.send_message(channel,embed=embed)
if message.content.startswith("/DP-28") or message.content.startswith("/DP28") or message.content.startswith("/디피") or message.content.startswith("/No.82."):
await client.send_file(message.channel, 'No.82_DP-28.png')
channel = message.channel
embed = discord.Embed(
title = 'DP-28 | No.82',
description = '"꼬마야, 잘부탁해. 뭔가 곤란한 거라도 있니?"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='05:00', inline=True)
embed.add_field(name='스킬', value='준비만전' + '\n' + '지속시간 동안 자신의 화력 증가 발사중인 탄띠에 탄 추가' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 18초' + '\n' + '지속시간 : 8초' + '\n' + '화력, 발사수 증가치 : 28%, 3발', inline=True)
embed.add_field(name='버프(SG 한정)', value='사속 15% 상승' + '\n' + '□□■' + '\n' + '□□□' + '\n' + '◎□■', inline=True)
embed.add_field(name='별명', value='디피', inline=True)
print('DP-28')
await client.send_message(channel,embed=embed)
if message.content.startswith("/No.83."):
await client.send_file(message.channel, 'ACCESS DENIED.jpg')
channel = message.channel
embed = discord.Embed(
title = 'ACCESS DENIED | No.83',
description = '해당 번호는 결번입니다.',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='ACCESS DENIED', inline=True)
embed.add_field(name='종류', value='ACCESS DENIED', inline=True)
embed.add_field(name='제조시간', value='ACCESS DENIED', inline=True)
embed.add_field(name='스킬', value='ACCESS DENIED' + '\n' + 'ACCESS DENIED' + '\n' + '초반 쿨타임 : ACCESS DENIED초' + '\n' + '쿨타임 : ACCESS DENIED초' + '\n' + '지속시간 : ACCESS DENIED초' + '\n' + 'ACCESS DENIED : 0', inline=True)
embed.add_field(name='버프', value='ACCESS DENIED' + '\n' + '□□□' + '\n' + '□◎□' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='ACCESS DENIED', inline=True)
print('ACCESS DENIED 83')
await client.send_message(channel,embed=embed)
if message.content.startswith("/RPD") or message.content.startswith("/No.84."):
await client.send_file(message.channel, 'No.84_RPD.png')
channel = message.channel
embed = discord.Embed(
title = 'RPD | No.84',
description = '"지휘관, RPD가 왔습니다. 함께 싸울 수 있어서 영광입니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='불가능', inline=True)
embed.add_field(name='스킬', value='화력전개MG' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 18초' + '\n' + '지속시간 : 8초' + '\n' + '화력 증가치 : 65%', inline=True)
embed.add_field(name='버프(SG 한정)', value='사속 16% 상승' + '\n' + '□□■' + '\n' + '◎□□' + '\n' + '□□■', inline=True)
print('RPD')
await client.send_message(channel,embed=embed)
if message.content.startswith("/PK") or message.content.startswith("/피카") or message.content.startswith("/No.85."):
await client.send_file(message.channel, 'No.85_PK.png')
channel = message.channel
embed = discord.Embed(
title = 'PK | No.85',
description = '"지휘관, 적은 제대로 섬멸할 테니까, 가까이 오지 말아줄래?"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='06:30', inline=True)
embed.add_field(name='스킬', value='사중극점' + '\n' + '3회 일반 공격 후 4회째 공격을 강화' + '\n' + '쿨타임 : Passive Skill' + '\n' + '공격력 배율 : 2.6배', inline=True)
embed.add_field(name='버프(SG 한정)', value='화력 18% 상승' + '\n' + '□□■' + '\n' + '◎□■' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='피카', inline=True)
print('PK')
await client.send_message(channel,embed=embed)
if message.content.startswith("/MG42") or message.content.startswith("/망가42") or message.content.startswith("/No.86."):
await client.send_file(message.channel, 'No.86_MG42.png')
channel = message.channel
embed = discord.Embed(
title = 'MG42 | No.86',
description = '"처음뵙겠습니다, 지휘관님. 옷을 찢는 것 같은 소리를 들어보지 않겠어요?"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='05:50', inline=True)
embed.add_field(name='스킬', value='화력전개MG' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 18초' + '\n' + '지속시간 : 6초' + '\n' + '화력 증가치 : 65%', inline=True)
embed.add_field(name='버프(SG 한정)', value='화력 22% 상승' + '\n' + '◎□■' + '\n' + '□□□' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='망가42', inline=True)
print('MG42')
await client.send_message(channel,embed=embed)
if message.content.startswith("/MG34") or message.content.startswith("/망가34") or message.content.startswith("/No.87."):
await client.send_file(message.channel, 'No.87_MG34.png')
channel = message.channel
embed = discord.Embed(
title = 'MG34 | No.87',
description = '"당신이 지휘관이네요, MG42의 언니, MG34에요! 앞으로 잘 지내보도록 해요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='05:00', inline=True)
embed.add_field(name='스킬', value='화력전개MG' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 18초' + '\n' + '지속시간 : 4초' + '\n' + '화력 증가치 : 60%', inline=True)
embed.add_field(name='버프(SG 한정)', value='화력 20% 상승' + '\n' + '□□■' + '\n' + '□□□' + '\n' + '◎□□', inline=True)
embed.add_field(name='별명', value='망가34', inline=True)
print('MG34')
await client.send_message(channel,embed=embed)
if message.content.startswith("/MG34") or message.content.startswith("/망가34") or message.content.startswith("/No.87."):
await client.send_file(message.channel, 'No.87_MG34.png')
channel = message.channel
embed = discord.Embed(
title = 'MG34 | No.87',
description = '"당신이 지휘관이네요, MG42의 언니, MG34에요! 앞으로 잘 지내보도록 해요."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='05:00', inline=True)
embed.add_field(name='스킬', value='화력전개MG' + '\n' + '지속시간 동안 자신의 화력 증가' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 18초' + '\n' + '지속시간 : 4초' + '\n' + '화력 증가치 : 60%', inline=True)
embed.add_field(name='버프(SG 한정)', value='화력 20% 상승' + '\n' + '□□■' + '\n' + '□□□' + '\n' + '◎□□', inline=True)
embed.add_field(name='별명', value='망가34', inline=True)
print('MG34')
await client.send_message(channel,embed=embed)
if message.content.startswith("/MG3") or message.content.startswith("/망가3") or message.content.startswith("/No.88."):
await client.send_file(message.channel, 'No.88_MG3.png')
channel = message.channel
embed = discord.Embed(
title = 'MG3 | No.88',
description = '"나는 새로 들어온 MG3야! 폭풍과도 같은 화력을 맛보게 해줄게!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='06:30', inline=True)
embed.add_field(name='스킬', value='준비만전' + '\n' + '지속시간 동안 자신의 화력 증가 발사중인 탄띠에 탄 추가' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 18초' + '\n' + '지속시간 : 8초' + '\n' + '화력, 발사수 증가치 : 30%, 4발', inline=True)
embed.add_field(name='버프(SG 한정)', value='화력 10%, 명중 15% 상승' + '\n' + '□□■' + '\n' + '◎□□' + '\n' + '□□■', inline=True)
embed.add_field(name='별명', value='망가3', inline=True)
print('MG3')
await client.send_message(channel,embed=embed)
if message.content.startswith("/브렌") or message.content.startswith("/No.89."):
await client.send_file(message.channel, 'No.89_브렌.png')
channel = message.channel
embed = discord.Embed(
title = '브렌 | No.89',
description = '"나는 브렌 경기관총이다. 가혹한 임무라면 나에게 맡겨라."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='MG', inline=True)
embed.add_field(name='제조시간', value='05:20', inline=True)
embed.add_field(name='스킬', value='준비만전' + '\n' + '지속시간 동안 자신의 화력 증가 발사중인 탄띠에 탄 추가' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 18초' + '\n' + '지속시간 : 8초' + '\n' + '화력, 발사수 증가치 : 30%, 3발', inline=True)
embed.add_field(name='버프(SG 한정)', value='사속 10%, 명중 12% 상승' + '\n' + '◎□■' + '\n' + '□□□' + '\n' + '□□■', inline=True)
print('브렌')
await client.send_message(channel,embed=embed)
if message.content.startswith("/FNP-9") or message.content.startswith("/FNP9") or message.content.startswith("/No.90."):
await client.send_file(message.channel, 'No.90_FNP-9.png')
channel = message.channel
embed = discord.Embed(
title = 'FNP-9 | No.90',
description = '"FNP-9 화려하게 등장! 지휘관, 너의 제대에 넣어줘!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:25', inline=True)
embed.add_field(name='스킬', value='퇴로차단' + '\n' + '지속시간 동안 적 전체 회피 감소' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 8초' + '\n' + '회피 감소치 : 40%', inline=True)
embed.add_field(name='버프', value='1편제 - 사속 10%, 명중 20% 상승' + '\n' + '□■■ 2편제 - 사속 12%, 명중 25% 상승' + '\n' + '□◎■ 3편제 - 사속 15%, 명중 30% 상승' + '\n' + '□■■ 4편제 - 사속 17%, 명중 35% 상승' + '\n' + '5편제 - 사속 20%, 명중 40% 상승', inline=True)
print('FNP-9')
await client.send_message(channel,embed=embed)
if message.content.startswith("/MP-446") or message.content.startswith("/MP446") or message.content.startswith("/바이킹") or message.content.startswith("/No.91."):
await client.send_file(message.channel, 'No.91_MP-446.png')
channel = message.channel
embed = discord.Embed(
title = 'MP-446 | No.91',
description = '"겨우 찾아내 줬네, 지휘관! MP446이야, 바이킹이라고 불러줘!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:25', inline=True)
embed.add_field(name='스킬', value='격발차단' + '\n' + '지속시간 동안 적 전체 사속 감소' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 8초' + '\n' + '사속 감소치 : 22%', inline=True)
embed.add_field(name='버프', value='1편제 - 화력 14% 상승' + '\n' + '■■□ 2편제 - 2편제 - 화력 17% 상승' + '\n' + '■◎□ 3편제 - 화력 21% 상승' + '\n' + '■■□ 4편제 - 화력 24% 상승' + '\n' + '5편제 - 화력 28% 상승', inline=True)
embed.add_field(name='별명', value='바이킹', inline=True)
print('MP-446')
await client.send_message(channel,embed=embed)
if message.content.startswith("/MP-446") or message.content.startswith("/MP446") or message.content.startswith("/바이킹") or message.content.startswith("/No.91."):
await client.send_file(message.channel, 'No.91_MP-446.png')
channel = message.channel
embed = discord.Embed(
title = 'MP-446 | No.91',
description = '"겨우 찾아내 줬네, 지휘관! MP446이야, 바이킹이라고 불러줘!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='00:25', inline=True)
embed.add_field(name='스킬', value='격발차단' + '\n' + '지속시간 동안 적 전체 사속 감소' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 8초' + '\n' + '사속 감소치 : 22%', inline=True)
embed.add_field(name='버프', value='1편제 - 화력 14% 상승' + '\n' + '■■□ 2편제 - 2편제 - 화력 17% 상승' + '\n' + '■◎□ 3편제 - 화력 21% 상승' + '\n' + '■■□ 4편제 - 화력 24% 상승' + '\n' + '5편제 - 화력 28% 상승', inline=True)
print('MP-446')
await client.send_message(channel,embed=embed)\
if message.content.startswith("/SpectreM4") or message.content.startswith("/스펙트라") or message.content.startswith("/No.92."):
await client.send_file(message.channel, 'No.92_Spectre_M4.png')
channel = message.channel
embed = discord.Embed(
title = 'Spectre M4 | No.92',
description = '"스펙터 M4! 정식으로 입대합니다. 지휘관? 환영회 같은 건 없는 거야?"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='01:20', inline=True)
embed.add_field(name='스킬', value='회피기동' + '\n' + '지속시간 동안 자신의 회피 증가' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '회피 증가치 : 110%', inline=True)
embed.add_field(name='버프(AR 한정)', value='화력 20% 상승' + '\n' + '□□□' + '\n' + '■◎□' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='스펙트라', inline=True)
print('Spectre M4')
await client.send_message(channel,embed=embed)
if message.content.startswith("/IDW") or message.content.startswith("/고양이") or message.content.startswith("/아디따") or message.content.startswith("/No.93."):
await client.send_file(message.channel, 'No.93_IDW.png')
channel = message.channel
embed = discord.Embed(
title = 'IDW | No.93',
description = '"IDW다냥! 거둬주는 거냥? 지휘관...와앗! 다행이다냥~!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='01:10', inline=True)
embed.add_field(name='스킬', value='회피기동' + '\n' + '지속시간 동안 자신의 회피 증가' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 5초' + '\n' + '회피 증가치 : 110%', inline=True)
embed.add_field(name='버프(AR 한정)', value='회피 20% 상승' + '\n' + '■□□' + '\n' + '■◎□' + '\n' + '■□□', inline=True)
embed.add_field(name='별명', value='고양이, 아디따', inline=True)
print('IDW')
await client.send_message(channel,embed=embed)
if message.content.startswith("/64식") or message.content.startswith("/No.94."):
await client.send_file(message.channel, 'No.94_64식.png')
channel = message.channel
embed = discord.Embed(
title = '64식 | No.94',
description = '"저는 64식 소음 기관단총입니다. 지휘관의 곁에서 공부할 수 있어서, 영광입니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★', inline=True)
embed.add_field(name='종류', value='SMG', inline=True)
embed.add_field(name='제조시간', value='01:25', inline=True)
embed.add_field(name='스킬', value='회피기동' + '\n' + '폭발한 위치의 2.5반경 내의 적에게 기절을 건다.' + '\n' + '초반 쿨타임 : 5초' + '\n' + '쿨타임 : 16초' + '\n' + '기절 지속시간 : 3.2초', inline=True)
embed.add_field(name='버프(AR 한정)', value='사속 20% 상승' + '\n' + '□□□' + '\n' + '■◎□' + '\n' + '□□□', inline=True)
print('64식')
await client.send_message(channel,embed=embed)
if message.content.startswith("/한양조88식") or message.content.startswith("/한조") or message.content.startswith("/No.95."):
await client.send_file(message.channel, 'No.95_한양조_88식.png')
channel = message.channel
embed = discord.Embed(
title = '한양조 88식 | No.95',
description = '"어서 오세요! 저는 한양조 88식이에요. 주인님을 위해 봉사하겠습니다!"',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★', inline=True)
embed.add_field(name='종류', value='RF', inline=True)
embed.add_field(name='제조시간', value='03:50', inline=True)
embed.add_field(name='스킬', value='화력전개N' + '\n' + '지속시간 동안 자신의 화력 증가 / 뒤쪽의 수치는 주간작전에 사용 시' + '\n' + '초반 쿨타임 : 8초' + '\n' + '쿨타임 : 8초' + '\n' + '지속시간 : 6초' + '\n' + '화력 증가치 : 90 / 30%', inline=True)
embed.add_field(name='버프(HG 한정)', value='스킬 쿨타임 12% 감소' + '\n' + '□□□' + '\n' + '□◎■' + '\n' + '□□□', inline=True)
embed.add_field(name='별명', value='한조', inline=True)
print('한양조 88식')
await client.send_message(channel,embed=embed)
if message.content.startswith("/그리즐리MkV") or message.content.startswith("/그리즐리") or message.content.startswith("/곰누나") or message.content.startswith("/웅녀") or message.content.startswith("/No.96."):
await client.send_file(message.channel, 'No.96_그릴즐리_MkV.png')
channel = message.channel
embed = discord.Embed(
title = '그릴즐리 MkV | No.96 ',
description = '"어머, 지휘관님. 그리즐리 매그넘, 오늘부터 당신을 따라가겠습니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='01:10', inline=True)
embed.add_field(name='스킬', value='일제사격' + '\n' + '지속시간 동안 아군 전원 화력 증가' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 8초' + '\n' + '화력 증가치 : 25%', inline=True)
embed.add_field(name='버프', value='1편제 - 화력 15%, 회피 10% 상승' + '\n' + '■■□ 2편제 - 화력 18%, 회피 12% 상승' + '\n' + '□◎■ 3편제 - 화력 22%, 회피 15% 상승' + '\n' + '■■□ 4편제 - 화력 26%, 회피 17% 상승' + '\n' + '5편제 - 화력 30%, 회피 20% 상승', inline=True)
embed.add_field(name='별명', value='곰누나, 웅녀, 그리즐리', inline=True)
print('그릴즐리 MkV')
await client.send_message(channel,embed=embed)
if message.content.startswith("/M950A") or message.content.startswith("/미역") or message.content.startswith("/켈리코") or message.content.startswith("/No.97."):
await client.send_file(message.channel, 'No.97_M950A.png')
channel = message.channel
embed = discord.Embed(
title = 'M950A | No.97 ',
description = '"M950A. 지휘관, 오늘부터 당신을 따르겠습니다."',
colour = discord.Colour.blue()
)
embed.add_field(name='등급', value='★★★★★', inline=True)
embed.add_field(name='종류', value='HG', inline=True)
embed.add_field(name='제조시간', value='01:05', inline=True)
embed.add_field(name='스킬', value='진압신호' + '\n' + '지속시간 동안 아군 전원 화력 증가' + '\n' + '초반 쿨타임 : 6초' + '\n' + '쿨타임 : 12초' + '\n' + '지속시간 : 8초' + '\n' + '화력 증가치 : 25%', inline=True)
embed.add_field(name='버프', value='1편제 - 사속 15%, 명중 25% 증가' + '\n' + '■□■ 2편제 - 사속 18%, 명중 31% 증가' + '\n' + '□◎□ 3편제 - 사속 22%, 명중 37% 증가' + '\n' + '■□■ 4편제 - 사속 26%, 명중 43% 증가' + '\n' + '5편제 - 사속 30%, 명중 50% 증가', inline=True)
embed.add_field(name='별명', value='켈리코, 미역', inline=True)
print('M950A')
await client.send_message(channel,embed=embed)
if message.content.startswith("/00:20") or message.content.startswith("/0020"):
channel = message.channel
embed = discord.Embed(
title = '00:20',
description = '검색결과',
colour = discord.Colour.blue()
)
embed.set_footer(text = '/[인형이름]을 통해 바로 해당 인형의 정보를 검색 가능합니다.')
embed.add_field(name='인형', value='M1911' + '\n' + '나강 리볼버' + '\n' + 'P38', inline=True)
embed.add_field(name='장비', value='X', inline=True)
embed.add_field(name='요정', value='X', inline=True)
print('00:20')
await client.send_message(channel,embed=embed)
client.run('NTQ4NzIzNTAxNzkyNjI0NjQ5.D1Jesg.qG6cx2bOVrc4S_gXpB8WhauLyPU')
| true |
ef9c8ce7e52f7ccbac0a11111f7d8472e55144bb | Python | Thomas-Rice/mindmap | /Integration_Tests/Int_Add_Leaf.py | UTF-8 | 1,730 | 2.546875 | 3 | [] | no_license | import unittest
from app import *
class IntegrationAddLeaf(unittest.TestCase):
def setup_test(self):
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.sqlite3'
app.config['TESTING'] = True
client = app.test_client()
db.drop_all()
db.create_all()
return client
def test_add_leaf_with_path_and_text(self):
client = self.setup_test()
map_name = "add_leaf_test_1"
path = "int-test"
text = "random text"
response = client.post(f'/api/maps/{map_name}', json={'path': path, "text": text})
assert b'Created Leaf in MindMap: add_leaf_test_1 with path: int-test and text: random text' in response.data
assert response.status == "200 OK"
def test_add_leaf_without_path_and_with_text(self):
client = self.setup_test()
map_name = "add_leaf_test_1"
text = "random text"
response = client.post(f'/api/maps/{map_name}', json={"text": text})
assert b"This Request needs path and text data \n path: 'Test' \n text: 'Test'" in response.data
def test_add_leaf_without_text_and_with_path(self):
client = self.setup_test()
map_name = "add_leaf_test_1"
path = "int-test"
response = client.post(f'/api/maps/{map_name}', json={"path": path})
assert b"This Request needs path and text data \n path: 'Test' \n text: 'Test'" in response.data
def test_add_leaf_without_path_or_text(self):
client = self.setup_test()
map_name = "add_leaf_test_1"
response = client.post(f'/api/maps/{map_name}', json={})
assert b"This Request needs path and text data \n path: 'Test' \n text: 'Test'" in response.data
| true |
8bb879045c6b00d4d7370a87e2e9a5c9479ba41f | Python | pymee/studygroup | /1st/code/ono/omikuji_6_for.py | UTF-8 | 902 | 3.828125 | 4 | [] | no_license | # coding: utf-8
import random
# 運勢_1 =全体運,運勢_2 =仕事運
fortune = [{'運勢_1':'運勢は大吉! すべてよし。 ','運勢_2':'仕事運は、全て上手くいく'},
{'運勢_1':'運勢は中吉! まぁまぁよし。 ','運勢_2':'仕事運は、努力すれば実る'},
{'運勢_1':'運勢は吉! よし。 ','運勢_2':'仕事運は、なかなか実らず'},
{'運勢_1':'運勢は凶! わるし。 ','運勢_2':'仕事運は、全てが上手くいかず'}]
# 運勢1の中からランダムで選択
unsei_1 = random.choice([x['運勢_1'] for x in fortune])
# 運勢2の中からランダムで選択
unsei_2 = random.choice([x['運勢_2'] for x in fortune])
print('あなたの名前を入力してください')
# 名前を入力
name = input('>>')
# 結果を出力
print('{} さんの運勢は、'.format(name) + unsei_1 +'\n'+ unsei_2 + 'となります!') | true |
2e3489e4d121fd6b799eb91949842970875fe6ee | Python | amajal/Aoc2017 | /App20.py | UTF-8 | 2,431 | 3.4375 | 3 | [] | no_license | import functools
def convert_particle_string_to_coordinates(particle_string):
coordinate_string = particle_string.split('<')[-1]
return list(map(int, coordinate_string.split(',')))
def increment_properties():
for particle in particles:
for i in range(0, 3):
particle['v'][i] += particle['a'][i]
for i in range(0, 3):
particle['p'][i] += particle['v'][i]
def is_location_match(position1, position2):
return position1[0] == position2[0] and position2[0] == position1[0] and position1[2] == position2[2]
def get_matching_particles(particle_to_match):
matching_particles = []
position_to_match = particle_to_match['p']
for particle in particles:
if is_location_match(position_to_match, particle['p']) is True:
matching_particles.append(particle)
return matching_particles
def resolve_collisions():
for particle in particles:
matching_particles = get_matching_particles(particle)
if len(matching_particles) > 1:
print("Found particles to remove", len(matching_particles))
for mp in matching_particles:
print("removing", mp['p'], mp['i'])
particles.remove(mp)
def compute_particle_distance(position):
return functools.reduce(lambda x, y: x+y, map(abs, position))
def find_particle_with_shortest_difference():
min_distance = 1000000000000000
min_distance_particle = -1
for i, particle in enumerate(particles):
distance = 0
for property in ('p', 'v', 'a'):
distance += compute_particle_distance(particle[property])
if distance < min_distance:
min_distance = distance
min_distance_particle = (i, particle)
return min_distance, min_distance_particle
with open('Input.txt', 'r') as f:
initial_state = f.readlines()
particles = []
for state in initial_state:
tokens = state.split('>')
particle = {}
for index_identifier in ("0:p", "1:v", "2:a"):
index = int(index_identifier[0])
identifier = index_identifier[2]
particle[identifier] = convert_particle_string_to_coordinates(tokens[index])
particle["i"] = len(particles)
particles.append(particle)
counter = 0
print(particles)
while True:
resolve_collisions()
increment_properties()
#print(find_particle_with_shortest_difference())
print(len(particles)) | true |
3e2a27511bab0b4cf4737b1925cb457d65b21584 | Python | CommanderPho/pyPhoPlaceCellAnalysis | /src/pyphoplacecellanalysis/GUI/Qt/Unused/CustomGridLayout.py | UTF-8 | 1,864 | 3.09375 | 3 | [
"MIT"
] | permissive | from qtpy import QtWidgets, QtCore
class CustomGridLayout(QtWidgets.QVBoxLayout):
""" A replacement for QGridLayout that allows insert/deletion of rows into the layout at runtime to overcome the issue of being unable to set it
https://stackoverflow.com/questions/42084879/how-to-insert-qwidgets-in-the-middle-of-a-layout
https://stackoverflow.com/a/42147532
Credit to K. Muller
"""
def __init__(self):
super(CustomGridLayout, self).__init__()
self.setAlignment(QtCore.Qt.AlignTop) # !!!
self.setSpacing(20)
def addWidget(self, widget, row, col):
# 1. How many horizontal layouts (rows) are present?
horLaysNr = self.count()
# 2. Add rows if necessary
if row < horLaysNr:
pass
else:
while row >= horLaysNr:
lyt = QtWidgets.QHBoxLayout()
lyt.setAlignment((QtCore.Qt.AlignLeft)
self.addLayout(lyt)
horLaysNr = self.count()
###
###
# 3. Insert the widget at specified column
self.itemAt(row).insertWidget(col, widget)
''''''
def insertRow(self, row):
lyt = QtWidgets.QHBoxLayout()
lyt.setAlignment((QtCore.Qt.AlignLeft)
self.insertLayout(row, lyt)
''''''
def deleteRow(self, row):
for j in reversed(range(self.itemAt(row).count())):
self.itemAt(row).itemAt(j).widget().setParent(None)
###
self.itemAt(row).setParent(None)
def clear(self):
for i in reversed(range(self.count())):
for j in reversed(range(self.itemAt(i).count())):
self.itemAt(i).itemAt(j).widget().setParent(None)
###
###
for i in reversed(range(self.count())):
self.itemAt(i).setParent(None)
###
''''''
| true |
16eb99033431fe22037d89e1d9c67de01216851d | Python | Moejay10/FYS4150 | /Project4/Codes/plotter.py | UTF-8 | 9,631 | 2.78125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import os
print("Which Project Task do you want to run")
print("Task C - Equilibrium State: Write c")
print("Task D - Probability Histogram: Write d")
print("Task E & F - Phase Transitions & Critical Temperature: Write e")
Task = input("Write here: ")
tablesdir = os.path.join(os.path.dirname(__file__), '..', 'Tables')
plotsdir = os.path.join(os.path.dirname(__file__), '..', 'Results/Plots')
"""
-------------
Equilibrium
-------------
"""
if Task == "c":
filenames = ["Ordered1","Ordered"]
filenames2 = ["Unordered1", "Unordered"]
MCcycles = []
energyO = []
energyO2 = []
energyU = []
energyU2 = []
magO = []
magO2 = []
magU = []
magU2 = []
NconfigsU = []
NconfigsU2 = []
for i in (filenames):
with open(os.path.join(tablesdir,i)) as file:
lines = file.readlines()
#Skip the first two lines
for j in range(2,len(lines)):
line = lines[j]
pieces = line.split()
if i == "Ordered1":
MCcycles.append(float(pieces[0]))
energyO.append(float(pieces[1]))
magO.append(float(pieces[2]))
else:
energyO2.append(float(pieces[1]))
magO2.append(float(pieces[2]))
for i in (filenames2):
with open(os.path.join(tablesdir,i)) as file:
lines = file.readlines()
#Skip the first two lines
for j in range(2,len(lines)):
line = lines[j]
pieces = line.split()
if i == "Unordered1":
energyU.append(float(pieces[1]))
magU.append(float(pieces[2]))
NconfigsU.append(float(pieces[3]))
else:
energyU2.append(float(pieces[1]))
magU2.append(float(pieces[2]))
NconfigsU2.append(float(pieces[3]))
plt.figure()
plt.title("Ordered")
plt.plot(MCcycles, energyO)
plt.plot(MCcycles,energyO2)
plt.legend(["T = 1.0","T = 2.4"])
plt.xlabel("# of Monte Carlo cycles")
plt.ylabel("Energy expectation value $\langle$E$\\rangle$ [J]")
plt.savefig(os.path.join(plotsdir,"Energy_exp_ordered.png"))
plt.figure()
plt.title("Unordered")
plt.plot(MCcycles, energyU)
plt.plot(MCcycles,energyU2)
plt.legend(["T = 1.0","T = 2.4"])
plt.xlabel("# of Monte Carlo cycles")
plt.ylabel("Energy expectation value $\langle$E$\\rangle$ [J]")
plt.savefig(os.path.join(plotsdir,"Energy_exp_unordered.png"))
plt.figure()
plt.title("Ordered")
plt.plot(MCcycles, magO, "")
plt.plot(MCcycles, magO2, "")
plt.legend(["T = 1.0","T = 2.4"])
plt.xlabel("# of Monte Carlo cycles")
plt.ylabel("Magnetization expectation value $\langle$|M|$\\rangle$ [1]")
plt.savefig(os.path.join(plotsdir,"Magn_exp_ordered.png"))
plt.figure()
plt.title("Unordered")
plt.plot(MCcycles, magU, "")
plt.plot(MCcycles, magU2, "")
plt.legend(["T = 1.0","T = 2.4"])
plt.xlabel("# of Monte Carlo cycles")
plt.ylabel("Magnetization expectation value $\langle$|M|$\\rangle$ [1]")
plt.savefig(os.path.join(plotsdir,"Magn_exp_unordered.png"))
plt.figure()
plt.title("Unordered")
plt.plot(MCcycles, NconfigsU, "")
plt.plot(MCcycles, NconfigsU2, "")
plt.legend(["T = 1.0","T = 2.4"])
plt.xlabel("# of Monte Carlo cycles")
plt.ylabel("Accepted configurations (normalized)")
plt.savefig(os.path.join(plotsdir,"Accepted_configs_unordered.png"))
Temp = []
configs = []
with open(os.path.join(tablesdir,"Nconfig_vs_Temp")) as file:
lines = file.readlines()
for i in range(2,len(lines)):
pieces = lines[i].split()
Temp.append(float(pieces[0]))
configs.append(float(pieces[1]))
plt.figure()
plt.plot(Temp,configs)
plt.xlabel("Temperature [kT/J]")
plt.ylabel("Accepted number of configurations (normalized)")
plt.title("Accepted number of configurations (normalized) as a function of T")
plt.savefig(os.path.join(plotsdir,"Accepted_configs_temperature.png"))
plt.show()
"""
-------------
Probabilities
-------------
"""
if Task == "d":
filenames = ["Probability_1","Probability_24"]
for i in filenames:
with open(os.path.join(tablesdir,i)) as file:
lines = file.readlines()
Energies = []
counts = []
max_count = 0
most_probable_energy = 0
for j in range(1,len(lines)):
line = lines[j]
pieces = line.split()
energy = float(pieces[0])
count = float(pieces[1])
Energies.append((energy))
counts.append((count))
if count > max_count:
max_count = count
most_probable_energy = energy
plt.bar(Energies,counts,width = 4 if i == "Probability_1" else 3)
plt.xlim(-805,-770) if i == "Probability_1" else plt.xlim(-705,-305)
plt.xlabel("Energy [J]")
plt.ylabel("Energy counts")
plt.tight_layout()
plt.subplots_adjust(top=0.88)
if i == "Probability_1":
plt.title("T = 1.0")
else:
plt.title("T = 2.4")
props = dict(boxstyle='round', facecolor='wheat', alpha=1)
plt.text(0.05*(plt.xlim()[1]-plt.xlim()[0])+plt.xlim()[0] ,plt.ylim()[1]*0.85, "Most probable energy:\n" + str(most_probable_energy), bbox = props)
plt.savefig(os.path.join(plotsdir,i+".png"))
plt.show()
if Task == "e":
with open(os.path.join(tablesdir,"Temperature_100")) as file:
lines = file.readlines()
temps = []
energylist = []
maglist = []
Cvlist = []
Suscplist = []
indeks = 0
for i in range(1, len(lines)):
pieces = lines[i].split()
temps.append(float(pieces[0]))
energylist.append(float(pieces[1]))
maglist.append(float(pieces[2]))
Cvlist.append(float(pieces[3]))
Suscplist.append(float(pieces[4]))
firstTemp = temps[0]
for i in range(1,len(temps)):
if temps[i] == firstTemp:
temps = temps[0:i]
break
TCCv = []
TCX = []
for i in range(int(len(energylist)/len(temps))):
max_temp = 0
sublistCv = Cvlist[i*len(temps):len(temps)*(i+1)]
sublistSuscp = Suscplist[i*len(temps):len(temps)*(i+1)]
maxCv = max(sublistCv)
maxSuscp = max(sublistSuscp)
TCCv.append(temps[sublistCv.index(maxCv)])
TCX.append(temps[sublistSuscp.index(maxSuscp)])
print("Tc for Cv =",temps[sublistCv.index(maxCv)])
print("Tc for X =",temps[sublistSuscp.index(maxSuscp)])
plt.figure()
plt.title("Mean Energy")
plt.xlabel("T [kT/J]")
plt.ylabel("Energy expectation value $\langle$E$\\rangle$ [J]")
for i in range(int(len(energylist)/len(temps))):
plt.plot(temps,energylist[i*len(temps):len(temps)*(i+1)],"")
plt.legend(["L = 40","L = 60","L = 80","L = 100"])
plt.savefig(os.path.join(plotsdir,"Phase_trans_energy.png"))
plt.figure()
plt.title("Absolute mean Magnetization")
plt.xlabel("T [kT/J]")
plt.ylabel("Magnetization expectation value $\langle$|M|$\\rangle$ [1]")
for i in range(int(len(energylist)/len(temps))):
plt.plot(temps,maglist[i*len(temps):len(temps)*(i+1)],"")
plt.legend(["L = 40","L = 60","L = 80","L = 100"])
plt.savefig(os.path.join(plotsdir,"Phase_trans_mag.png"))
plt.figure()
plt.title("Specific heat")
plt.xlabel("T [kT/J]")
plt.ylabel("Specific heat $\langle$$C_v$$\\rangle$ [$J^2/kT^2$]")
for i in range(int(len(energylist)/len(temps))):
plt.plot(temps,Cvlist[i*len(temps):len(temps)*(i+1)],"")
plt.legend(["L = 40","L = 60","L = 80","L = 100"])
plt.savefig(os.path.join(plotsdir,"Phase_trans_Cv.png"))
plt.figure()
plt.title("Susceptibility")
plt.xlabel("T [kT/J]")
plt.ylabel("Susceptibility $\langle$$\chi$$\\rangle$ [1/kT]")
for i in range(int(len(energylist)/len(temps))):
plt.plot(temps,Suscplist[i*len(temps):len(temps)*(i+1)],"")
plt.legend(["L = 40","L = 60","L = 80","L = 100"])
plt.savefig(os.path.join(plotsdir,"Phase_trans_suscp.png"))
plt.show()
"""
Task f)
"""
#Performing a linear regression to find critical temp in thermodyn. limit
TCCv = np.array(TCCv)
TCX = np.array(TCX)
Llist = np.array([40,60,80,100])
Llist = 1.0/Llist
linreg1 = np.polyfit(Llist,TCCv,1)
linreg2 = np.polyfit(Llist,TCX,1)
plt.figure()
plt.title("Specific heat $C_V$")
plt.xlabel("$\\frac{1}{L}$")
plt.ylabel("$T_C$ [kT/J]")
plt.plot(Llist,TCCv,"o")
plt.plot(Llist,np.polyval(linreg1,Llist))
plt.legend(["$T_C$(L) from simulations","$T_C(L)$ = a$\\cdot$ $\\frac{1}{L}$ + $T_C(L = \infty)$ $\\to$ %g$\\cdot$x + %g" % (linreg1[0],linreg1[1])])
plt.savefig(os.path.join(plotsdir,"linregCv.png"))
plt.figure()
plt.title("Susceptibility $\chi$")
plt.xlabel("$\\frac{1}{L}$")
plt.ylabel("$T_C$ [kT/J]")
plt.plot(Llist,TCX,"o")
plt.plot(Llist,np.polyval(linreg2,Llist))
plt.legend(["$T_C$(L) from simulations","$T_C(L)$ = a$\\cdot$ $\\frac{1}{L}$ + $T_C(L = \infty)$ $\\to$ %g$\\cdot$x + %g" % (linreg2[0],linreg2[1])])
plt.savefig(os.path.join(plotsdir,"linregX.png"))
print("\n")
print("The estimated Critical Temperature from our simulations is Tc = %g " % (0.5*(linreg1[1]+linreg2[1])))
plt.show()
| true |
a29be6d8169147b125c6e42b5b6a5d4f12e7251a | Python | xyb/gmail-backup | /dobackup.py | UTF-8 | 3,188 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import email
import getpass
import imaplib
import os
import re
import time
from fix import fix_large_duplication, get_message_ctime, update_file_mtime, \
write_hash_data
LAST_ID_FILE = 'last_fetched_id.dat'
UID_RE = re.compile(r"\d+\s+\(UID (\d+)\)$")
FILE_RE = re.compile(r"(\d+).eml$")
GMAIL_FOLDER_NAME = "[Gmail]/All Mail"
def getUIDForMessage(svr, n):
resp, lst = svr.fetch(n, 'UID')
m = UID_RE.match(lst[0])
if not m:
raise Exception(
"Internal error parsing UID response: %s %s. Please try again" % (
resp, lst))
return m.group(1)
def get_filename_by_date(uid, ctime):
localtime = time.localtime(ctime)
year = localtime.tm_year
month = localtime.tm_mon
dir = '%s-%02d' % (year, month)
fname = '%s/%s.eml' % (dir, uid)
return fname
def downloadMessage(svr, n, uid):
resp, lst = svr.fetch(n, '(RFC822)')
if resp != 'OK':
raise Exception("Bad response: %s %s" % (resp, lst))
content = lst[0][1]
message = email.message_from_string(content)
ctime = get_message_ctime(message)
fname = get_filename_by_date(uid, ctime)
dir = os.path.dirname(fname)
if not os.path.exists(dir):
os.makedirs(dir)
with open(fname, 'w') as f:
f.write(content)
fix_large_duplication(fname, message)
update_file_mtime(fname, ctime)
def UIDFromFilename(fname):
m = FILE_RE.match(fname)
if m:
return int(m.group(1))
def get_credentials():
try:
user, pwd = open('account.conf').read().strip().split()
except:
user = raw_input("Gmail address: ")
pwd = getpass.getpass("Gmail password: ")
with open('account.conf', 'w') as f:
f.write('%s %s' % (user, pwd))
return user, pwd
def write_last_id(uid):
with open(LAST_ID_FILE, 'w') as f:
f.write(str(uid))
def read_last_id():
try:
return int(open(LAST_ID_FILE).read().strip())
except:
return 0
def do_backup():
print 'login...'
user, pwd = get_credentials()
svr = imaplib.IMAP4_SSL('imap.gmail.com')
svr.login(user, pwd)
resp, [countstr] = svr.select(GMAIL_FOLDER_NAME, readonly=True)
count = int(countstr)
lastdownloaded = read_last_id()
# A simple binary search to see where we left off
gotten, ungotten = 0, count + 1
while (ungotten - gotten) > 1:
attempt = (gotten + ungotten) / 2
uid = getUIDForMessage(svr, attempt)
if int(uid) <= lastdownloaded:
print "Finding starting point: %d/%d (UID: %s) too low" % (
attempt, count, uid)
gotten = attempt
else:
print "Finding starting point: %d/%d (UID: %s) too high" % (
attempt, count, uid)
ungotten = attempt
# The download loop
for i in range(ungotten, count + 1):
uid = getUIDForMessage(svr, i)
print "Downloading %d/%d (UID: %s)" % (i, count, uid)
downloadMessage(svr, i, uid)
write_last_id(uid)
write_hash_data()
svr.close()
svr.logout()
if __name__ == "__main__":
do_backup()
| true |
ab87313d3f964c3a2678bb1f6e90f8ad26f1dcb6 | Python | Ciuel/Python-Grupo12 | /Trabajo_Final/src/Event_Handlers/config.py | UTF-8 | 4,421 | 2.765625 | 3 | [
"MIT"
] | permissive | import PySimpleGUI as sg
import json
import os
from ..Components import menu
from ..Event_Handlers.Theme_browser import choose_theme
from ..Constants.constants import USER_JSON_PATH,BUTTON_SOUND_PATH,vlc_play_sound
def build_initial_config(nick:str)->dict:
"""Se busca la configuracion del usuario que inicia sesion, que se encuentra en el archivo json
Args:
nick (str): El nick del usuario que inicio sesion
Returns:
[dict]: La configuracion del usuario
"""
with open(os.path.join(os.getcwd(),USER_JSON_PATH),"r+") as info:
user_data = json.load(info)
return user_data[nick]["config"]
def check_radio_boxes(values:dict)->tuple:
"""Chequea los valores de los radio buttons y devuelve el seleccionado
Args:
values (dict): valores de la ventana, de donde obtenemos el type_radio y need_help
Returns:
[tuple]: Los valores seleccionados en los Radios
"""
type_radio="Text" if (values["-CHOOSE TYPE1-"]) else "Images"
need_help="yes" if (values["-CHOOSE HELP YES-"]) else "no"
return type_radio,need_help
def color_picker(theme:str)->str:
"""Llama al seleccionador de colores de PySimpleGUI
Returns:
[str]: El tema elegido
"""
return choose_theme(theme)
def check_empty_fields(values:dict)->bool:
"""Chequea que no haya campos vacios
Args:
values (dict): valores de la ventana, de donde obtenemos los valores a chequear
Returns:
[boolean]: Si hay campos vacios o no
"""
nonempty_values = [
values["-VICTORY TEXT-"],
values["-Lose TEXT-"]
]
radio_help= values["-CHOOSE HELP NO-"] or values["-CHOOSE HELP YES-"]
radio_type= values["-CHOOSE TYPE1-"] or values["-CHOOSE TYPE2-"]
return (all([x != "" for x in nonempty_values]) and radio_help and radio_type)
def back_button(window:sg.Window,event:str, nick:str, theme:str,vlc_dict:dict):
"""Cierra la ventana actual y abre el menu
Args:
window (sg.Window): La ventana donde ocurren los chequeos
event (str): El evento a chequear si es -BACK BUTTON-
nick (str): El nick del usuario que inicio sesion
theme (str): El tema de las ventanas a dibujar
"""
if event=="-BACK BUTTON-":
vlc_play_sound(vlc_dict, BUTTON_SOUND_PATH)
window.close()
menu.start(nick, theme,vlc_dict)
def save_changes(window:sg.Window,event:str,values:dict,theme:str,nick:str):
"""Esta funcion permite que al tocar el boton Guardar cambios, los cambios de configuracion que el usuario asigno se cargen dentro de nuestro
archivo json de configuracion, con la configuracion personalizada del usuario, esto lo hacemos mediante el uso del modulo JSON, manipulando el archivo
como una lista de diccionarios
Args:
window (sg.Window): La ventana donde ocurren los chequeos
event (str): El evento a chequear si es -SAVE CHANGES-
values (dict): Donde se guardan los campos a chequear
nick (str): El nick del usuario que inicio sesion
theme (str): El tema de las ventanas a dibujar
"""
if event=='-SAVE CHANGES-':
if check_empty_fields(values):
with open(os.path.join(os.getcwd(),USER_JSON_PATH),"r+") as info:
user_data = json.load(info)
type_radio,need_help=check_radio_boxes(values)
user_data[nick]["config"]= {
"Coincidences": values["-CHOOSE COINCIDENCES-"],
"Help": need_help,
"Type of token": type_radio,
"Level": values["-CHOOSE LEVEL-"],
"Theme": theme,
"VictoryText": values["-VICTORY TEXT-"],
"LoseText": values["-Lose TEXT-"]
}
info.seek(0)
json.dump(user_data, info, indent=4)
info.truncate()
window["-INFO USER-"].update("Los cambios se han guardado con Exito")
else:
window["-INFO USER-"].update("Llene el campo vacio antes de guardar")
def color_button(event:str,theme:str)->str:
"""Chequea si se clickeo el boton de elegir color
Args:
event (str): El evento del clickeo
theme (str): El tema actual
Returns:
[str]: El tema elegido
"""
return color_picker(theme) if event == "-CHOOSE COLOR-" else theme
| true |
79c0ba1f8071c42d3797f6440212a45f6c3398c5 | Python | elaeon/dama_ml | /src/dama/data/web.py | UTF-8 | 537 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | import tqdm
class HttpDataset(object):
def __init__(self, url, sess=None):
self.url = url
self.sess = sess
def download(self, filepath, chunksize):
response = self.sess.get(self.url)
with open(filepath, "wb") as f:
for chunk in tqdm.tqdm(response.iter_content(chunksize)):
if chunk:
f.write(chunk)
f.flush()
def from_data(self, dataset, chunksize=258):
self.download(dataset.filepath, chunksize=chunksize)
| true |
c57a513c1f8a0bb8f4c463aed4ab0f7dc945c7be | Python | AldenJurling/jwxml | /jwxml.py | UTF-8 | 33,322 | 2.734375 | 3 | [
"BSD-2-Clause"
] | permissive | """
jwxml: Various Python classes for parsing JWST-related information in XML files
* SUR: a segment update request file (mirror move command from the WAS to the MCS)
* Update: a single mirror update inside of a SUR
* SIAF: a SIAF file (Science Instrument Aperture File, listing the defined apertures for a given instrument)
* Aperture: a single aperture inside a SIAF
"""
import numpy as np
import matplotlib.pyplot as plt
try:
from lxml import etree
HAVE_LXML = True
except ImportError:
import xml.etree.cElementTree as etree
HAVE_LXML = False
import logging
import unittest
import os
_log = logging.getLogger('jwxml')
try:
import webbpsf
_HAS_WEBBPSF=True
except ImportError:
_HAS_WEBBPSF=False
#---------------------------------------------------------------------------------
# Mirror Move related classes
class Segment_Update(object):
""" Class for representing one single mirror update (will be inside of groups in SURs)
"""
def __init__(self, xmlnode):
if xmlnode.attrib['type'] != 'pose': raise NotImplemented("Only Pose updates supported yet")
self.id = int(xmlnode.attrib['id'])
self.type = xmlnode.attrib['type']
self.segment = xmlnode.attrib['seg_id'][0:2]
self.absolute = xmlnode.attrib['absolute'] =='true'
self.coord= xmlnode.attrib['coord'] #local or global
self.stage_type= xmlnode.attrib['stage_type'] # recenter_fine, fine_only, none
self.units = dict()
self.moves = dict()
for move in iterchildren(xmlnode):
#print(move.tag, move.text )
self.moves[move.tag] =float(move.text)
self.units[move.tag] = move.attrib['units']
#X_TRANS, Y_TRANS, PISTON, X_TILT, Y_TILT, CLOCK
#allowable units:
#units="id"
#units="meters"
#units="none"
#units="radians"
#units="sag"
#units="steps"
#
# pose moves will only ever have meters/radians as units
def __str__(self):
return ("Update %d, %s, %s: "% (self.id, 'absolute' if self.absolute else 'relative', self.coord)) + str(self.moves)
def shortstr(self):
outstr = ("Update %d: %s, %s, %s {"% (self.id, self.segment, 'absolute' if self.absolute else 'relative', self.coord))
outstr+= ", ".join([ coordname+"=%.3g" % self.moves[coordname] for coordname in ['PISTON','X_TRANS','Y_TRANS','CLOCK', 'X_TILT','Y_TILT']])
#for coordname in ['PISTON','X_TRANS','Y_TRANS','CLOCK', 'X_TILT','Y_TILT']:
#outstr+=coordname+"=%.3g" % self.moves[coordname]
outstr+="}"
return outstr
@property
def xmltext(self):
""" The XML text representation of a given move """
text= ' <UPDATE id="{0.id}" type="{0.type}" seg_id="{0.segment}" absolute="{absolute}" coord="{0.coord}" stage_type="{0.stage_type}">\n'.format( self, absolute = str(self.absolute).lower())
for key in ['X_TRANS','Y_TRANS','PISTON','X_TILT', 'Y_TILT', 'CLOCK']:
if key in self.moves:
text+=' <{key} units="{unit}">{val:E}</{key}>\n'.format(key=key, unit=self.units[key], val=self.moves[key])
text+= ' </UPDATE>\n'
return text
def toGlobal(self):
""" Return moves cast to global coordinates """
if self.coord =='global':
return self.moves
else:
raise NotImplemented("Error")
def toLocal(self):
""" Return moves cast to local coordinates """
if self.coord =='local':
return self.moves
else:
raise NotImplemented("Error")
# TO implement based on Ball's 'pmglobal_to_seg' in ./wfsc_core_algs/was_core_pmglobal_to_seg.pro
# or the code in ./segment_control/mcs_hexapod_obj__define.pro
class SUR(object):
""" Class for parsing/manipulating Segment Update Request files
"""
def __init__(self, filename):
""" Read a SUR from disk """
self.filename=filename
self._tree = etree.parse(filename)
for tag in ['creator','date','time','version', 'operational']:
self.__dict__[tag] = self._tree.getroot().attrib[tag]
for element in self._tree.getroot().iter():
if element.tag =='CONFIGURATION_NAME': self.configuration_name = element.text
if element.tag =='CORRECTION_ID': self.correction_id = element.text
self.groups = []
for grp in self._tree.getroot().iter('GROUP'):
myupdates = []
for update in grp.iter('UPDATE'):
myupdates.append(Segment_Update(update))
self.groups.append(myupdates)
def __str__(self):
outstr = "SUR %s\n" % self.filename #, type=%s, coords=%s\n" % (self.filename, 'absolute' if self.absolute else 'relative', self.coord)
for igrp, grp in enumerate(self.groups):
outstr+= "\tGroup %d\n" % (igrp+1)
for update in grp:
outstr+= "\t\t"+str(update)+"\n"
return outstr
@property
def xmltext(self):
""" The XML text representation of a given move """
text = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<SEGMENT_UPDATE_REQUEST creator="?" date="{date}" time="{time}" version="0.0.1" operational="false" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="../../setup_files/schema/segment_update_request.xsd">
<CONFIGURATION_NAME>{self.configuration_name}</CONFIGURATION_NAME>
<CORRECTION_ID>{self.correction_id}</CORRECTION_ID>\n""".format(self=self, date='YYYY-MM-DD', time='HH:MM:SS')
# FIXME add date and time keywords for real
for igrp, grp in enumerate(self.groups):
text+=' <GROUP id="{id}">\n'.format(id=igrp+1)
for update in grp:
text+=update.xmltext
text+=' </GROUP>\n'
text+= '</SEGMENT_UPDATE_REQUEST>'
return text
#@property
#def name(self): return self._tree.getroot().attrib['name']
#---------------------------------------------------------------------------------
# SIAF related classes
class Aperture(object):
""" An Aperture, as parsed from the XML.
All XML nodes are converted into object attributes.
See JWST-STScI-001550 for the reference on which this implementation was based.
4 Coordinate systems:
* Detector: pixels, in raw detector read out axes orientation ("Det")
* Science: pixels, in conventional DMS axes orientation ("Sci")
* Ideal: arcsecs relative to aperture reference location. ("Idl")
* Telescope: arcsecs V2,V3 ("Tel")
Example
========
ap = some_siaf['desired_aperture_name'] # extract one aperture from a SIAF
ap.Det2Tel(1024, 512) # convert pixel coordinates to sky Tel coords.
# takes pixel coords, returns arcsec
ap.Idl2Sci( 10, 3) # convert Idl coords to Sci pixels
# takes arcsec, returns pixels
# there exist functions for all of the possible {Tel,Idl,Sci,Det}2{Tel,Idl,Sci,Det} combinations.
# you can also specify frames by string:
ap.convert(1024, 512, frame_from='Det', frame_to='Tel') # same as first example above
ap.corners('Tel') # Get detector corners in Tel frame
ap.center('Tel') # Get the reference point defined in the SIAF
# this is typically the center of this region
ap.plot('Idl', annotate=True, title=True) # plot coordinates in Idl frame
ap.plotDetectorChannels() # color in the readout channels
"""
def __init__(self, xmlnode, instrument=None):
self.instrument=instrument
convfactors = {'RADIANS': 1, 'DEGREES': np.pi/180, 'ARCSECS': np.pi/180/60/60}
for node in iterchildren(xmlnode):
tag = node.tag.replace('{http://www.stsci.edu/SIAF}','')
if len(node.getchildren()) ==0:
# if doens't have children,
try:
value = float(node.text) # do we care about ints vs floats?
except (ValueError,TypeError):
value=node.text
self.__dict__[tag] = value
else:
# if does have children:
if '{http://www.stsci.edu/SIAF}units' in [c.tag for c in node.getchildren()]:
# this will be an angle/units pair. units are either in arcsec or degrees. Convert to radians in any case for internal use.
unit = node.find('{http://www.stsci.edu/SIAF}units').text
value =float( node.find('{http://www.stsci.edu/SIAF}value').text) * convfactors[unit]
self.__dict__[tag] = value
elif '{http://www.stsci.edu/SIAF}elt' in [c.tag for c in node.getchildren()]:
# an array of values which should go to an NDarray
elts = [float(c.text) for c in iterchildren(node, '{http://www.stsci.edu/SIAF}elt')]
self.__dict__[tag] = np.asarray(elts)
else:
raise NotImplemented("Not sure how to parse that node.")
# pack things into NDarrays for convenient access
# first the vertices
self.XIdlVert = np.asarray((self.XIdlVert1, self.XIdlVert2,self.XIdlVert3,self.XIdlVert4))
self.YIdlVert = np.asarray((self.YIdlVert1, self.YIdlVert2,self.YIdlVert3,self.YIdlVert4))
# then the transformation coefficients
if self.Sci2IdlDeg is not None:
self.Sci2IdlDeg = int(self.Sci2IdlDeg)
self.Sci2IdlCoeffs_X = np.zeros( (self.Sci2IdlDeg+1, self.Sci2IdlDeg+1))
self.Sci2IdlCoeffs_Y = np.zeros( (self.Sci2IdlDeg+1, self.Sci2IdlDeg+1))
self.Idl2SciCoeffs_X = np.zeros( (self.Sci2IdlDeg+1, self.Sci2IdlDeg+1))
self.Idl2SciCoeffs_Y = np.zeros( (self.Sci2IdlDeg+1, self.Sci2IdlDeg+1))
for i in range(1,self.Sci2IdlDeg+1):
for j in range(0,i+1):
#if self.AperName == 'FGS2_FULL_CNTR':
#print('Sci2IdlX{0:1d}{1:1d}'.format(i,j), self.__dict__['Sci2IdlX{0:1d}{1:1d}'.format(i,j)])
self.Sci2IdlCoeffs_X[i,j] = self.__dict__['Sci2IdlX{0:1d}{1:1d}'.format(i,j)]
self.Sci2IdlCoeffs_Y[i,j] = self.__dict__['Sci2IdlY{0:1d}{1:1d}'.format(i,j)]
self.Idl2SciCoeffs_X[i,j] = self.__dict__['Idl2SciX{0:1d}{1:1d}'.format(i,j)]
self.Idl2SciCoeffs_Y[i,j] = self.__dict__['Idl2SciY{0:1d}{1:1d}'.format(i,j)]
def __repr__(self):
return "<jwxml.Aperture object AperName={0} >".format(self.AperName)
#--- the actual fundamental transformation code follows in these next routines:
def Det2Sci(self, XDet, YDet):
""" Detector to Science, following Section 4.1 of JWST-STScI-001550"""
XDet = np.asarray(XDet, dtype=float)
YDet = np.asarray(YDet, dtype=float)
ang = np.deg2rad(self.DetSciYAngle)
XSci = self.XSciRef + self.DetSciParity* ((XDet - self.XDetRef)* np.cos(ang) + (YDet-self.YDetRef) * np.sin(ang))
YSci = self.YSciRef - (XDet - self.XDetRef)* np.sin(ang) + (YDet-self.YDetRef) * np.cos(ang)
return XSci, YSci
def Sci2Det(self, XSci, YSci):
""" Science to Detector, following Section 4.1 of JWST-STScI-001550"""
XSci = np.asarray(XSci, dtype=float)
YSci = np.asarray(YSci, dtype=float)
ang = np.deg2rad(self.DetSciYAngle)
XDet = self.XDetRef + self.DetSciParity * (XSci - self.XSciRef ) * np.cos(ang) - (YSci - self.YSciRef ) * np.sin(ang)
YDet = self.YDetRef + self.DetSciParity * (XSci - self.XSciRef ) * np.sin(ang) + (YSci - self.YSciRef ) * np.cos(ang)
return XDet, YDet
def Sci2Idl(self, XSci, YSci):
""" Convert Sci to Idl
input in pixel, output in arcsec """
dX = np.asarray(XSci, dtype=float) - self.XSciRef
dY = np.asarray(YSci, dtype=float) - self.YSciRef
degree = self.Sci2IdlDeg
#CX = self.Sci2IdlCoefX
#CY = self.Sci2IdlCoefY
#XIdl = CX[0]*dX + CX[1]*dY + CX[2]*dX**2 + CX[3]*dX*dY + CX[4]*dY**2
#YIdl = CY[0]*dY + CY[1]*dY + CY[2]*dY**2 + CY[3]*dY*dY + CY[4]*dY**2
XIdl = np.zeros_like(np.asarray(XSci), dtype=float)
YIdl = np.zeros_like(np.asarray(YSci), dtype=float)
for i in range(1,degree+1):
for j in range(0,i+1):
XIdl += self.Sci2IdlCoeffs_X[i,j] * dX**(i-j) * dY**j
YIdl += self.Sci2IdlCoeffs_Y[i,j] * dX**(i-j) * dY**j
return XIdl, YIdl
def Idl2Sci(self, XIdl, YIdl):
""" Convert Idl to Sci
input in arcsec, output in pixels """
XIdl = np.asarray(XIdl, dtype=float)
YIdl = np.asarray(YIdl, dtype=float)
degree = self.Sci2IdlDeg
#dX = XIdl #Idl origin is by definition 0
#dY = YIdl #Idl origin is by definition 0
XSci = np.zeros_like(np.asarray(XIdl), dtype=float)
YSci = np.zeros_like(np.asarray(YIdl), dtype=float)
for i in range(1,degree+1):
for j in range(0,i+1):
XSci += self.Idl2SciCoeffs_X[i,j] * XIdl**(i-j) * YIdl**j
YSci += self.Idl2SciCoeffs_Y[i,j] * XIdl**(i-j) * YIdl**j
#CX = self.Idl2SciCoefX
#CY = self.Idl2SciCoefY
#XSci = CX[0]*dX + CX[1]*dY + CX[2]*dX**2 + CX[3]*dX*dY + CX[4]*dY**2
#YSci = CY[0]*dY + CY[1]*dY + CY[2]*dY**2 + CY[3]*dY*dY + CY[4]*dY**2
return XSci + self.XSciRef, YSci + self.YSciRef
#return XSci, YSci
def Idl2Tel(self, XIdl, YIdl):
""" Convert Idl to Tel
input in arcsec, output in arcsec
WARNING
--------
This is an implementation of the planar approximation, which is adequate for most
purposes but may not be for all. Error is about 1.7 mas at 10 arcminutes from the tangent
point. See JWST-STScI-1550 for more details.
"""
XIdl = np.asarray(XIdl, dtype=float)
YIdl = np.asarray(YIdl, dtype=float)
#print(self.V2Ref, self.V3Ref)
#rad2arcsec = 1./(np.pi/180/60/60)
#V2Ref and V3Ref are now in arcseconds in the XML file
ang = np.deg2rad(self.V3IdlYAngle)
V2 = self.V2Ref + self.VIdlParity * XIdl * np.cos(ang) + YIdl * np.sin(ang)
V3 = self.V3Ref - self.VIdlParity * XIdl * np.sin(ang) + YIdl * np.cos(ang)
return V2, V3
def Tel2Idl(self,V2, V3):
""" Convert Tel to Idl
input in arcsec, output in arcsec
This transformation involves going from global V2,V3 to local angles with respect to some
reference point, and possibly rotating the axes and/or flipping the parity of the X axis.
WARNING
--------
This is an implementation of the planar approximation, which is adequate for most
purposes but may not be for all. Error is about 1.7 mas at 10 arcminutes from the tangent
point. See JWST-STScI-1550 for more details.
"""
#rad2arcsec = 1./(np.pi/180/60/60)
dV2 = np.asarray(V2, dtype=float)-self.V2Ref
dV3 = np.asarray(V3, dtype=float)-self.V3Ref
ang = np.deg2rad(self.V3IdlYAngle)
XIdl = self.VIdlParity * (dV2 * np.cos(ang) - dV2 * np.sin(ang))
YIdl = dV2 * np.sin(ang) + dV3 * np.cos(ang)
return XIdl, YIdl
#--- and now some compound transformations that are less fundamental. This just nests calls to the above.
def Det2Idl(self, *args):
return self.Sci2Idl(*self.Det2Sci(*args))
def Det2Tel(self, *args):
return self.Idl2Tel(*self.Sci2Idl(*self.Det2Sci(*args)))
def Sci2Tel(self, *args):
return self.Idl2Tel(*self.Sci2Idl(*args))
def Idl2Det(self, *args):
return self.Sci2Det(*self.Idl2Sci(*args))
def Tel2Sci(self, *args):
return self.Idl2Sci(*self.Tel2Idl(*args))
def Tel2Det(self, *args):
return self.Sci2Det(*self.Idl2Sci(*self.Tel2Idl(*args)))
#--- now, functions other than direct coordinate transformations
def convert(self, X, Y, frame_from=None, frame_to=None):
""" Generic conversion routine, that calls one of the
specific conversion routines based on the provided frame names as strings. """
if frame_from is None: raise ValueError("You must specify a frame_from value : Tel, Idl, Sci, Det")
if frame_to is None: raise ValueError("You must specify a frame_to value : Tel, Idl, Sci, Det")
if frame_from == frame_to: return X, Y # null transformation
#frames = ['Det','Sci', 'Idl','Tel']
function = eval('self.%s2%s' % (frame_from, frame_to))
return function(X,Y)
def corners(self, frame='Idl'):
" Return coordinates of the aperture outline"
return self.convert(self.XIdlVert, self.YIdlVert, 'Idl', frame)
def center(self, frame='Tel'):
""" Return the defining center point of the aperture"""
return self.convert(self.V2Ref, self.V3Ref, 'Tel', frame)
def plot(self, frame='Idl', label=True, ax=None, title=True, units='arcsec', annotate=False, color=None):
""" Plot this one aperture
Parameters
-----------
frame : str
Which coordinate system to plot in: 'Tel', 'Idl', 'Sci', 'Det'
label : bool
Add text label stating aperture name
units : str
one of 'arcsec', 'arcmin', 'deg'
annotate : bool
Add annotations for detector (0,0) pixels
title : str
If set, add a label to the plot indicating which frame was plotted.
"""
if units is None:
units='arcsec'
# should we flip the X axis direction at the end of this function?
need_to_flip_axis = False # only flip if we created the axis
if ax is None:
ax = plt.gca()
ax.set_aspect('equal')
if frame=='Idl' or frame=='Tel':
need_to_flip_axis = True # *and* we're displaying some coordinates in angles relative to V2.
ax.set_xlabel('V2 [{0}]'.format(units))
ax.set_ylabel('V3 [{0}]'.format(units))
elif frame=='Sci' or frame=='Det':
ax.set_xlabel('X pixels [{0}]'.format(frame))
ax.set_ylabel('Y pixels [{0}]'.format(frame))
x, y = self.corners(frame=frame)
if units.lower() == 'arcsec':
scale=1
elif units.lower() =='arcmin':
scale=01./60
elif units.lower() =='deg':
scale=01./60/60
else:
raise ValueError("Unknown units: "+units)
x2 = np.concatenate([x, [x[0]]]) # close the box
y2 = np.concatenate([y, [y[0]]])
# convert arcsec to arcmin and plot
if color is not None:
ax.plot(x2 * scale, y2 * scale, color=color)
else:
ax.plot(x2 * scale, y2 * scale)
if need_to_flip_axis:
#print("flipped x axis")
#ax.set_xlim(ax.get_xlim()[::-1])
pass
if label:
rotation = 30 if self.AperName.startswith('NRC') else 0 # partially mitigate overlapping NIRCam labels
ax.text(x.mean()*scale, y.mean()*scale, self.AperName,
verticalalignment='center', horizontalalignment='center', rotation=rotation,
color=ax.lines[-1].get_color())
if title:
ax.set_title("{0} frame".format(frame))
if annotate:
self.plotDetectorOrigin(frame=frame)
def plotDetectorOrigin(self, frame='Idl', which='both'):
""" Draw red and blue squares to indicate the raw detector
readout and science frame readout, respectively
Parameters
-----------
which : str
Which detector origin to plot: 'both', 'Det', 'Sci'
frame : str
Which coordinate system to plot in: 'Tel', 'Idl', 'Sci', 'Det'
"""
# raw detector frame
if which.lower() == 'det' or which.lower()=='both':
c1, c2 = self.convert( 0, 0, 'Det', frame)
plt.plot(c1, c2, color='red', marker='s', markersize=9)
# science frame
if which.lower() == 'sci' or which.lower()=='both':
c1, c2 = self.convert( 0, 0, 'Sci', frame)
plt.plot(c1, c2, color='blue', marker='s')
def plotDetectorChannels(self, frame='Idl', color='0.5', alpha=0.3, evenoddratio=0.5, **kwargs):
""" Mark on the plot the various detector readout channels
These are depicted as alternating light/dark bars to show the
regions read out by each of the output amps.
Parameters
----------
frame : str
Optional if you have already called plot() to specify a
coordinate frame.
"""
import matplotlib
if self.instrument == 'MIRI': npixels = 1024
else: npixels = 2048
ch = npixels/4
ax = plt.gca()
pts = ((0, 0), (ch,0), (ch,npixels), (0, npixels))
for chan in range(4):
plotpoints = np.zeros((4,2))
for i,xy in enumerate(pts):
plotpoints[i] = self.convert(xy[0]+chan*ch,xy[1],'Det',frame)
rect = matplotlib.patches.Polygon(plotpoints, closed=True,
alpha=(alpha if np.mod(chan,2) else alpha*evenoddratio),
facecolor=color, edgecolor='none', lw=0)
ax.add_patch(rect)
class SIAF(object):
""" Science Instrument Aperture File
This is a class interface to SIAF information stored in an XML file.
It lets you read (only) the SIAF information, retrieve apertures,
plot them, and transform coordinates accordingly.
This class is basically just a container. See the Aperture class for
the detailed implementation of the transformations.
Briefly, this class acts like a dict containing Aperture objects, accessible
using their names defined in the SIAF
Examples
---------
fgs_siaf = SIAF('FGS')
fgs_siaf.apernames # returns a list of aperture names
ap = fgs_siaf['FGS1_FULL_CNTR'] # returns an aperture object
ap.plot(frame='Tel') # plot one aperture
fgs_siaf.plot() # plot all apertures in this file
"""
def __init__(self, instr='NIRISS', filename=None, basepath=None, **kwargs):
#basepath="/Users/mperrin/Dropbox/JWST/Optics Documents/SIAF/"
""" Read a SIAF from disk
Parameters
-----------
instr : string
one of 'NIRCam', 'NIRSpec', 'NIRISS', 'MIRI', 'FGS'; case sensitive.
basepath : string
Directory to look in for SIAF files
filename : string, optional
Alternative method to specify a specific SIAF XML file.
"""
if instr not in ['NIRCam', 'NIRSpec', 'NIRISS', 'MIRI', 'FGS']:
raise ValueError("Invalid instrument name: {0}. Note that this is case sensitive.".format(instr))
self.instrument=instr
if filename is None:
if basepath is None:
if _HAS_WEBBPSF:
from webbpsf.utils import get_webbpsf_data_path
basepath = os.path.join(get_webbpsf_data_path(), instr)
else:
basepath='.'
self.filename=os.path.join(basepath, instr+'_SIAF.xml')
else:
self.filename = filename
self.apertures = {}
self._tree = etree.parse(self.filename)
self._last_plot_frame=None
#for entry in self._tree.getroot().iter('{http://www.stsci.edu/SIAF}SiafEntry'):
for entry in self._tree.getroot().iter('SiafEntry'):
aperture = Aperture(entry, instrument=self.instrument)
self.apertures[aperture.AperName] = aperture
def __getitem__(self, key):
return self.apertures[key]
def __len__(self):
return len(self.apertures)
@property
def apernames(self):
""" List of aperture names defined in this SIAF"""
return self.apertures.keys()
def _getFullApertures(self):
""" Return whichever subset of apertures correspond to the entire detectors. This is a
helper function for the various plotting routines following"""
fullaps = []
if self.instrument =='NIRCam':
fullaps.append( self.apertures['NRCA5_FULL'])
fullaps.append( self.apertures['NRCB5_FULL'])
#for letter in ['A', 'B']:
#for number in range(1,6):
#fullaps.append(self.apertures['NRC{letter}{number}_FULL_CNTR'.format(letter=letter, number=number)])
elif self.instrument =='NIRSpec':
#fullaps.append( self.apertures['NRS1_FULL'])
#fullaps.append( self.apertures['NRS2_FULL'])
fullaps.append( self.apertures['NRS_FULL_MSA1'])
fullaps.append( self.apertures['NRS_FULL_MSA2'])
fullaps.append( self.apertures['NRS_FULL_MSA3'])
fullaps.append( self.apertures['NRS_FULL_MSA4'])
elif self.instrument =='NIRISS':
fullaps.append( self.apertures['NIS-CEN'])
elif self.instrument =='MIRI':
fullaps.append( self.apertures['MIRIM_FULL_CNTR'])
elif self.instrument =='FGS':
fullaps.append( self.apertures['FGS1_FULL'])
fullaps.append( self.apertures['FGS2_FULL'])
return fullaps
def plot(self, frame='Tel', names=None, label=True, units=None, clear=True, annotate=False,
subarrays=True):
""" Plot all apertures in this SIAF
Parameters
-----------
names : list of strings
A subset of aperture names, if you wish to plot only a subset
subarrays : bool
Plot all the minor subarrays if True, else just plot the "main" apertures
label : bool
Add text labels stating aperture names
units : str
one of 'arcsec', 'arcmin', 'deg'
clear : bool
Clear plot before plotting (set to false to overplot)
annotate : bool
Add annotations for detector (0,0) pixels
frame : str
Which coordinate system to plot in: 'Tel', 'Idl', 'Sci', 'Det'
"""
if clear: plt.clf()
ax = plt.subplot(111)
ax.set_aspect('equal')
# which list of apertures to iterate over?
if subarrays:
iterable = self.apertures.itervalues
else:
iterable = self._getFullApertures
for ap in iterable():
if names is not None:
if ap.AperName not in names: continue
ap.plot(frame=frame, label=label, ax=ax, units=None)
if annotate:
ap.plotDetectorOrigin(frame=frame)
ax.set_xlabel('V2 [arcsec]')
ax.set_ylabel('V3 [arcsec]')
if frame =='Tel' or frame=='Idl':
# enforce V2 increasing toward the left
ax.autoscale_view(True,True,True)
xlim = ax.get_xlim()
if xlim[1] > xlim[0]: ax.set_xlim(xlim[::-1])
ax.set_autoscalex_on(True)
self._last_plot_frame = frame
def plotDetectorOrigin(self, which='both', frame=None):
""" Mark on the plot the detector's origin in Det and Sci coordinates
Parameters
-----------
which : str
Which detector origin to plot: 'both', 'Det', 'Sci'
frame : str
Which coordinate system to plot in: 'Tel', 'Idl', 'Sci', 'Det'
Optional if you have already called plot() to specify a
coordinate frame.
"""
if frame is None: frame = self._last_plot_frame
for ap in self._getFullApertures():
ap.plotDetectorOrigin(frame=frame, which=which)
def plotDetectorChannels(self, frame=None):
""" Mark on the plot the various detector readout channels
These are depicted as alternating light/dark bars to show the
regions read out by each of the output amps.
Parameters
----------
frame : str
Which coordinate system to plot in: 'Tel', 'Idl', 'Sci', 'Det'
Optional if you have already called plot() to specify a
coordinate frame.
"""
if frame is None: frame = self._last_plot_frame
for ap in self._getFullApertures():
ap.plotDetectorChannels(frame=frame)
def plotAllSIAFs(subarrays = True, showorigin=True, showchannels=True, **kwargs):
""" Plot All instrument """
for instr in ['NIRCam','NIRISS','NIRSpec','FGS','MIRI']:
aps =SIAF(instr, **kwargs)
print("{0} has {1} apertures".format(aps.instrument, len(aps)))
aps.plot(clear=False, subarrays=subarrays, **kwargs)
if showorigin: aps.plotDetectorOrigin()
if showchannels: aps.plotDetectorChannels()
def plotMainSIAFs(showorigin=False, showchannels=False, label=False, **kwargs):
col_imaging = 'blue'
col_coron = 'green'
col_msa = 'magenta'
nircam = SIAF('NIRCam')
niriss= SIAF('NIRISS')
fgs = SIAF('FGS')
nirspec = SIAF('NIRSpec')
miri = SIAF('MIRI')
im_aps = [ nircam['NRCA5_FULL'], nircam['NRCB5_FULL'], niriss['NIS-CEN'], miri['MIRIM_FULL_ILLCNTR'],
fgs['FGS1_FULL'], fgs['FGS2_FULL']]
coron_aps = [nircam['NRCA2_MASK210R'], nircam['NRCA4_MASKSWB'],
nircam['NRCA5_MASK335R'],
nircam['NRCA5_MASK430R'],
nircam['NRCA5_MASKLWB'],
nircam['NRCB3_MASKSWB'],
nircam['NRCB1_MASK210R'],
nircam['NRCB5_MASK335R'],
nircam['NRCB5_MASK430R'],
nircam['NRCB5_MASKLWB'],
miri['MIRIM_MASK1065_CNTR'],
miri['MIRIM_MASK1140_CNTR'],
miri['MIRIM_MASK1550_CNTR'],
miri['MIRIM_MASKLYOT_CNTR']]
msa_aps = [nirspec['NRS_FULL_MSA'+str(n+1)] for n in range(4)]
for aplist, col in zip( [im_aps, coron_aps, msa_aps], [col_imaging, col_coron, col_msa]):
for ap in aplist:
ap.plot(color=col, frame='Tel', label=label, **kwargs)
# ensure V2 increases to the left
#ax.set_xlim(
ax = plt.gca()
xlim = ax.get_xlim()
if xlim[0] < xlim[1]:
ax.set_xlim(xlim[::-1])
class Test_SIAF(unittest.TestCase):
def assertAlmostEqualTwo(self, tuple1, tuple2):
self.assertAlmostEqual(tuple1[0], tuple2[0], places=1)
self.assertAlmostEqual(tuple1[1], tuple2[1], places=1)
def _test_up(self):
siaf = SIAF("JwstSiaf-2010-10-05.xml")
startx = 1023
starty = 1024
nca = siaf['NIRCAM A']
self.assertAlmostEqualTwo( nca.Det2Sci(startx,starty), (1020.,1020.))
print("Det2Sci OK")
self.assertAlmostEqualTwo( nca.Det2Idl(startx,starty), (0.0, 0.0))
print("Det2Idl OK")
self.assertAlmostEqualTwo( nca.Det2Tel(startx,starty), (87.50, -497.10))
print("Det2Tel OK")
def _test_down(self):
siaf = SIAF("JwstSiaf-2010-10-05.xml")
startV2 = 87.50
startV3 = -497.10
nca = siaf['NIRCAM A']
self.assertAlmostEqualTwo( nca.Sci2Det(1020., 1020), (1023.,1024.))
print("Sci2Det OK")
self.assertAlmostEqualTwo( nca.Tel2Idl(startV2, startV3), (0.0, 0.0))
print("Tel2Idl OK")
self.assertAlmostEqualTwo( nca.Tel2Sci(startV2, startV3), (1020., 1020.))
print("Tel2Sci OK")
self.assertAlmostEqualTwo( nca.Tel2Det(startV2, startV3), (1023.,1024.))
print("Tel2Det OK")
def test_inverses(self):
siaf = SIAF("JwstSiaf-2010-10-05.xml")
nca = siaf['NIRCAM A']
self.assertAlmostEqualTwo( nca.Det2Sci(*nca.Sci2Det(1020., 1020)), (1020., 1020) )
self.assertAlmostEqualTwo( nca.Sci2Det(*nca.Det2Sci(1020., 1020)), (1020., 1020) )
print("Det <-> Sci OK")
self.assertAlmostEqualTwo( nca.Tel2Idl(*nca.Idl2Tel(10., 10)), (10., 10) )
self.assertAlmostEqualTwo( nca.Idl2Tel(*nca.Tel2Idl(10., 10)), (10., 10) )
print("Tel <-> Idl OK")
self.assertAlmostEqualTwo( nca.Tel2Sci(*nca.Sci2Tel(10., 10)), (10., 10) )
self.assertAlmostEqualTwo( nca.Sci2Tel(*nca.Tel2Sci(10., 10)), (10., 10) )
print("Tel <-> Sci OK")
# The ElementTree implementation in xml.etree does not support
# Element.iterchildren, so provide this wrapper instead
# This wrapper does not currently provide full support for all the arguments as
# lxml's iterchildren
def iterchildren(element, tag=None):
if HAVE_LXML:
return element.iterchildren(tag)
else:
if tag is None:
return iter(element)
def _iterchildren():
for child in element:
if child.tag == tag:
yield child
return _iterchildren()
if __name__== "__main__":
logging.basicConfig(level=logging.DEBUG,format='%(name)-10s: %(levelname)-8s %(message)s')
s = SIAF()
| true |
533752739b1cbf4d440aa785fa3ceab1a368adc4 | Python | duncanlindsey/GoogleCodeJam2018 | /trouble.py | UTF-8 | 3,193 | 3.109375 | 3 | [] | no_license | import sys
from random import randint
sample_input = ['2',
'5',
'5 6 8 4 3',
'3',
'8 9 7']
def solve(V):
list_1 = V[::2]
list_1.sort()
list_2 = V[1::2]
list_2.sort()
error = False
error_i = None
for i in range(len(list_2)):
if list_2[i] < list_1[i]:
error = True
error_i = int(2*i)
break
if i < len(list_1)-1 and list_2[i] > list_1[i+1]:
error = True
error_i = int(2*i)+1
break
if error:
return error_i
else:
return 'OK'
def write_output(t, result):
print ('Case #%s: %s' % (t, result))
sys.stdout.flush()
def trouble_sort(L):
V = L
done = False
while not done:
done = True
for i in range(len(V)-2):
if V[i] > V[i+2]:
done = False
new_V = []
if i > 0:
new_V.extend(V[:i:])
if i == len(V)-3:
new_V.extend(V[i::][::-1])
else:
new_V.extend(V[i:i+3][::-1])
new_V.extend(V[i+3::])
V = new_V
break
return V
def find_sort_error(V):
V_sorted = sorted(V)
error = False
error_i = None
for i in range(len(V)):
if V_sorted[i] != V[i]:
error = True
error_i = i
break
if error:
return error_i
else:
return 'OK'
def test(num_tests):
failed = False
fail_num = None
fail_N = None
fail_list = []
fail_model = None
fail_trouble = None
fail_check = None
for t in range(1, num_tests+1):
N = randint(3,100)
V = []
for i in range(N):
V.append(randint(0,10000000000))
model = solve(V)
trouble = trouble_sort(V)
check = find_sort_error(trouble)
if model != check:
failed = True
fail_num = t
fail_N = N
fail_list = V
fail_model = model
fail_trouble = trouble
fail_check = check
break
if failed:
print ('Test failed on test case: %s\nInput N: %s\nInput V: %s\nTrouble V: %s\nThe model returned: %s\nThe check on TROUBLE returned: %s' \
% (fail_num, fail_N, fail_list, fail_trouble, fail_model, fail_check))
else:
print ('No failures detected!')
def run():
#We collect the first input line consisting of a single integer = T, the total number of test cases
#T = int(input()) #SWAP
T = int(sample_input[0]) #SWAP
#We loop through each test case
for t in range(1, T+1):
N = int(sample_input[int(2*t)-1]) #SWAP
V = [int(v) for v in sample_input[int(2*t)].split(' ')] #SWAP
#N = int(input()) #SWAP
#V = [int(v) for v in input().split(' ')] #SWAP
write_output(t, solve(V))
test(10000)
#V = [0, 0, 2, 1, 2, 1, 3, 2, 3, 2, 6, 2, 7, 4, 7, 5, 8, 5, 8, 6, 9, 6, 9, 6, 10, 8, 10, 9, 10]
#V = [0,0,2,1,2]
#print (find_sort_error(V))
#run() | true |
1ff6d952b6315e8bf04a9ac394a183c9590316ae | Python | kevwill79/Python | /Learning Python/LearnPythonTheHardWay/Exercises/ex25.py | UTF-8 | 1,488 | 4.875 | 5 | [] | no_license | #More function practice
def break_words(stuff):
""""This function will break up words for us."""
words = stuff.split(' ')
return words
#Used str.lower so all words would be lowercase before sorting
def sort_words(words):
"""Sorts the words."""
return sorted(words, key=str.lower)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print(word)
def print_last_word(words):
""""Prints the last word after popping it off."""
word = words.pop(-1)
print(word)
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
sentence = "This is my test sentence!"
#Function calls
words = break_words(sentence)
print(words)
sorted_words = sort_words(words)
print(sorted_words)
print_first_word(words)
print_last_word(words)
sorted_sentence = sort_sentence(sentence)
print(sorted_sentence)
print_first_and_last(sentence)
print_first_and_last_sorted(sentence) | true |
22fa581bed33e20c75074d106eea1d9e2e9fd0ca | Python | salildabholkar/Machine-Learning | /code/knn.py | UTF-8 | 1,017 | 2.875 | 3 | [
"MIT"
] | permissive | from collections import Counter
import numpy as np
from utils.helpers import euclidean_distance
from utils.CommonSetup import CommonSetup
class KNN(CommonSetup):
def __init__(self, k=5):
self.k = k # l[:None] returns the whole list
def get_most_common(self, neighbors_targets):
return Counter(neighbors_targets).most_common(1)[0][0]
def _predict(self, X=None):
predictions = [self.__predict_x(x) for x in X]
return np.array(predictions)
# Predict label for x
def __predict_x(self, x):
# distances between x and all examples
distances = (euclidean_distance(x, example) for example in self.X)
# Sort all examples by their distance to x.
neighbors = sorted(((dist, target)
for (dist, target) in zip(distances, self.y)),
key=lambda x: x[0])
neighbors_targets = [target for (_, target) in neighbors[:self.k]]
return self.get_most_common(neighbors_targets)
| true |
cae6fd3840d58f917df6478edd2e268ea0fc7a2f | Python | shirsho-12/MathVisuals | /mult table.py | UTF-8 | 250 | 3.90625 | 4 | [] | no_license |
x = int(input("Enter number: "))
y = int(input("Enter number of multiples: "))
for i in range(1, y+1):
print('{0} x {1} = {2}'.format(x, i, x*i))
from fractions import Fraction
a, b = map(Fraction, input().split())
print(a+b, a*b, a-b, a/b) | true |
5b310f1c196d746f685dabb7b1ec908cbd682571 | Python | Aasthaengg/IBMdataset | /Python_codes/p03000/s923238339.py | UTF-8 | 141 | 2.84375 | 3 | [] | no_license | n, x = map(int, input().split())
cnt = 1
now = 0
for l in map(int, input().split()):
now += l
if now > x:
break
cnt += 1
print(cnt) | true |
c415ff4b9303309d9fc30cde892cd4aec9c9120e | Python | thiagoborba/py-easy-rest | /py_easy_rest/caches/memory.py | UTF-8 | 961 | 2.625 | 3 | [] | no_license | from datetime import datetime, timedelta
from py_easy_rest.caches import Cache
class MemoryCache(Cache):
def __init__(self, initial_data={}, initial_expire_data={}):
self._data = initial_data
self._when_data_expire = initial_expire_data
async def get(self, key):
value = self._data.get(key)
if value is not None:
when_data_expire = self._when_data_expire.get(key)
if when_data_expire and datetime.now() > when_data_expire:
await self.delete(key)
return None
return value
return None
async def set(self, key, value, ttl=None):
self._data[key] = value
if ttl is not None:
when_data_expire = datetime.now() + timedelta(seconds=ttl)
self._when_data_expire[key] = when_data_expire
async def delete(self, key):
self._data.pop(key, None)
self._when_data_expire.pop(key, None)
| true |
8bf859c41bfbd9f5433f12dca994f6154ec98e28 | Python | yinccc/leetcodeEveryDay | /BubbleSort.py | UTF-8 | 852 | 3.796875 | 4 | [] | no_license | def BubbleSort(nums):
length=len(nums)
if length<=1:
return nums
for i in range(length):
for j in range(length-i-1):
if nums[j]>nums[j + 1]:
nums[j], nums[j + 1]= nums[j + 1], nums[j]
return nums
#Time O(n2)
#Space O(1)
arr=[9,8,7,6,5,4,3,2,1]
print(BubbleSort(arr))
def BubbleSort2(nums):
if len(nums)<=1:
return nums
for i in range(len(nums)):
for j in range(len(nums)-i-1):
if nums[j]>nums[j+1]:
nums[j],nums[j+1]=nums[j+1],nums[j]
return nums
print(BubbleSort2(arr))
def BubbleSort3(nums):
if len(nums)<=1:
return nums
for i in range(len(nums)):
for j in range(len(nums)-i-1):
if nums[j]>nums[j+1]:
nums[j],nums[j+1]=nums[j+1],nums[j]
return nums
print(BubbleSort3(arr))
| true |
05b3efb522fc6ffc6860492acfc689418499d70f | Python | tungct/chatbot-accountant | /src/cosine_test.py | UTF-8 | 992 | 2.640625 | 3 | [] | no_license | import sys
sys.path.insert(0, '../../')
sys.path.insert(0, '../')
sys.path.insert(0, '.')
from src.cs import CS
from src.greeting_utils import Greeting
import random
if __name__ == '__main__':
cs = CS(threshold=0.45)
X, y = cs.data_utils.sent_tokens, cs.data_utils.labels
cs_greeting = CS(threshold=0.45)
greeting = Greeting()
X_greeting, y_greeting, map_greeting = greeting.sentences, greeting.labels, greeting.map_greeting
cs_greeting.map_qa = map_greeting
cs.fit(X, y)
cs_greeting.fit(X_greeting, y_greeting)
sentences = ['có gì vui']
pred = cs.predict(sentences)
cl_id = pred[0]
print(cl_id)
if cl_id == 0:
cl_other_id = cs_greeting.predict(sentences)[0]
print(cl_other_id)
if cl_other_id != 0:
answer = random.choice(map_greeting[cl_other_id].split('|')).strip()
else:
answer = cs.response_answer(-1)
else:
answer = cs.response_answer(cl_id)
# print(answer)
| true |
5642132c53b146bdd0a829e823f6633624a4c77f | Python | anebz/ctci | /01. Arrays and strings/hackerrank/reduce_to_palindrome.py | UTF-8 | 663 | 4.0625 | 4 | [] | no_license | # Algorithms > Strings > The Love-Letter Mystery
# https://www.hackerrank.com/challenges/the-love-letter-mystery
import unittest
def theLoveLetterMystery(s):
if len(s) == 1:
return 0
count = 0
for i in range(len(s)//2):
count += abs(ord(s[i]) - ord(s[-1-i]))
return count
class Test(unittest.TestCase):
data = [('abc', 2),
('abcba', 0),
('abcd', 4),
('cba', 2)]
def test(self):
for test_string, expected in self.data:
res = theLoveLetterMystery(test_string)
self.assertEqual(res, expected)
if __name__ == "__main__":
unittest.main()
| true |
95977b8fe3b66e547a9bf9a32aaef4fbe618b213 | Python | zeroryuki/mysolatcli | /mysolatcli/__init__.py | UTF-8 | 1,784 | 2.5625 | 3 | [] | no_license | """
A wrapper around the api.azanpro.com
"""
import requests,time
import requests_cache
from datetime import datetime,timedelta
def secondsinday():
time_delta = datetime.combine(
datetime.now().date() + timedelta(days=1), datetime.strptime("0000", "%H%M").time()
) - datetime.now()
return time_delta.seconds
requests_cache.install_cache('mysolat_cache',expire_after=secondsinday())
__version__ = '1.1.0'
class SolatAPIError(Exception):
"""Raised when API fails"""
pass
class SolatError(SolatAPIError):
"""Raised when API fails"""
def __init__(self, expression, message=""):
self.expression = expression
self.message = message
class SolatAPI:
BASE_URL = 'http://api.azanpro.com'
def __init__(self, user_agent='Python SolatAPI Client/{}'.format(__version__)):
self.headers = {'User-Agent': user_agent}
@staticmethod
def _validate_response(response):
if response['success'] != '1' and response['success'] != 1:
message = "\n".join([ f"{k}: {v}" for k,v in response.items() ])
raise SolatAPIError("success != 1", message=message)
return response
def get_zones(self) -> dict:
return requests.get(self.BASE_URL + "/zone/zones.json", headers=self.headers).json()
def get_negeri(self, state="") -> dict:
return requests.get(self.BASE_URL + "/zone/grouped.json?state=" + state, headers=self.headers).json()
def get_week(self, zone) -> dict:
return requests.get(self.BASE_URL + "/times/this_week.json?format=24-hour&zone=" + zone, headers=self.headers).json()
def get_today(self, zone) -> dict:
return requests.get(self.BASE_URL + "/times/today.json?format=24-hour&zone=" + zone, headers=self.headers).json()
| true |
5a10fd259854d4f0d2ccd13891fbd02526db65d4 | Python | ELW4156/W4156 | /lecture_code/object_relational_mapping/orm/ormservice.py | UTF-8 | 1,731 | 2.875 | 3 | [] | no_license | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask import jsonify
from sqlalchemy.sql import exists
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' # memory
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
db.create_all()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
dob = db.Column(db.String(10), unique=False)
def __init__(self, username, email, dob):
self.username = username
self.email = email
self.dob = dob
def __repr__(self):
return '<User %r>' % self.username
def init_db():
"""Initializes the database."""
db.create_all()
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/createuser/<name>')
def create_user(name=None):
exist = db.session.query(exists().where(User.username == name)).scalar()
if exist:
return jsonify(
success=False,
error={'code':0, 'message': "exists"}
)
else:
admin = User(name, name + '@foomail.com', '15/02/15')
db.session.add(admin)
db.session.commit()
return jsonify(
success=True,
error={}
)
@app.route('/listusers')
def list_users():
"""
{
"success": true/false,
"users": { id: username, id:username}
"error": {
"code": 123,
"message": "An error occurred!"
}
}
"""
l = db.session.query(User).all()
l = [i.username for i in l]
return jsonify(
success=True,
users=l
)
if __name__ == "__main__":
app.run() | true |
d8546dc19899c00b0175ffe7a292c337936a20ce | Python | Python3pkg/PyDataset | /pydataset/support.py | UTF-8 | 1,381 | 3.1875 | 3 | [
"MIT"
] | permissive |
from difflib import SequenceMatcher as SM
from collections import Counter
from .locate_datasets import __items_dict
DATASET_IDS = list(__items_dict().keys())
ERROR = ('Not valid dataset name and no similar found! '
'Try: data() to see available.')
def similarity(w1, w2, threshold=0.5):
"""compare two strings 'words', and
return ratio of smiliarity, be it larger than the threshold,
or 0 otherwise.
NOTE: if the result more like junk, increase the threshold value.
"""
ratio = SM(None, str(w1).lower(), str(w2).lower()).ratio()
return ratio if ratio > threshold else 0
def search_similar(s1, dlist=DATASET_IDS, MAX_SIMILARS=10):
"""Returns the top MAX_SIMILARS [(dataset_id : smilarity_ratio)] to s1"""
similars = {s2: similarity(s1, s2)
for s2 in dlist
if similarity(s1, s2)}
# a list of tuples [(similar_word, ratio) .. ]
top_match = Counter(similars).most_common(MAX_SIMILARS+1)
return top_match
def find_similar(query):
result = search_similar(query)
if result:
top_words, ratios = list(zip(*result))
print('Did you mean:')
print((', '.join(t for t in top_words)))
# print(', '.join('{:.1f}'.format(r*100) for r in ratios))
else:
raise Exception(ERROR)
if __name__ == '__main__':
s = 'ansc'
find_similar(s)
| true |
951ca941b1789bec15bc3c78abc47ffe9695d842 | Python | Sobeit-Tim/BigDataProject | /postproc.py | UTF-8 | 576 | 3.140625 | 3 | [] | no_license |
file = open("result.txt", "r")
res = file.read()
file.close()
print("pagerank result, (value, vertex)")
print(res)
res = res.split('\n')
keywords = []
for i in res:
if not i:
continue
temp = i.replace(')', ',').split(',')[1]
keywords.append(temp)
vocab = {}
file = open("vocab.txt", "r")
res2 = file.read()
file.close()
res2 = res2.split('\n')
cnt = 0
for i in res2:
if not i:
continue
vocab[cnt] = i
cnt += 1
word_keyword = []
for i in keywords:
word_keyword.append(vocab[int(i)])
print("top 3 keywords")
print(word_keyword)
| true |
e9c4c195fa72f876a8bedb77c7c958c79f405384 | Python | xk97/repo | /test.py | UTF-8 | 25,047 | 3.09375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 21 20:06:32 2018
@author: ccai
"""
import numpy as np
hour = ["%02d:00" % i for i in range(0, 24, 3)]
day = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
features = day + hour
"{Mon}, {Tue}".format(**{_: i+1 for i, _ in enumerate(day)})
x = list(range(10))
print(x)
y = [x, x]
x = np.power(x, 2)
x = []
#%%
#Machine Learning Algorithm (MLA) Selection and Initialization
MLA = [
#Ensemble Methods
ensemble.AdaBoostClassifier(),
ensemble.BaggingClassifier(),
ensemble.ExtraTreesClassifier(),
ensemble.GradientBoostingClassifier(),
ensemble.RandomForestClassifier(),
#Gaussian Processes
gaussian_process.GaussianProcessClassifier(),
#GLM
linear_model.LogisticRegressionCV(),
linear_model.PassiveAggressiveClassifier(),
linear_model.RidgeClassifierCV(),
linear_model.SGDClassifier(),
linear_model.Perceptron(),
#Navies Bayes
naive_bayes.BernoulliNB(),
naive_bayes.GaussianNB(),
#Nearest Neighbor
neighbors.KNeighborsClassifier(),
#SVM
svm.SVC(probability=True),
svm.NuSVC(probability=True),
svm.LinearSVC(),
#Trees
tree.DecisionTreeClassifier(),
tree.ExtraTreeClassifier(),
#Discriminant Analysis
discriminant_analysis.LinearDiscriminantAnalysis(),
discriminant_analysis.QuadraticDiscriminantAnalysis(),
#xgboost: http://xgboost.readthedocs.io/en/latest/model.html
XGBClassifier()
]
#split dataset in cross-validation with this splitter class: http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html#sklearn.model_selection.ShuffleSplit
#note: this is an alternative to train_test_split
cv_split = model_selection.ShuffleSplit(n_splits = 10, test_size = .3, train_size = .6, random_state = 0 ) # run model 10x with 60/30 split intentionally leaving out 10%
#create table to compare MLA metrics
MLA_columns = ['MLA Name', 'MLA Parameters','MLA Train Accuracy Mean', 'MLA Test Accuracy Mean', 'MLA Test Accuracy 3*STD' ,'MLA Time']
MLA_compare = pd.DataFrame(columns = MLA_columns)
#create table to compare MLA predictions
MLA_predict = data1[Target]
#index through MLA and save performance to table
row_index = 0
for alg in MLA:
#set name and parameters
MLA_name = alg.__class__.__name__
MLA_compare.loc[row_index, 'MLA Name'] = MLA_name
MLA_compare.loc[row_index, 'MLA Parameters'] = str(alg.get_params())
#score model with cross validation: http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html#sklearn.model_selection.cross_validate
cv_results = model_selection.cross_validate(alg, data1[data1_x_bin], data1[Target], cv = cv_split)
MLA_compare.loc[row_index, 'MLA Time'] = cv_results['fit_time'].mean()
MLA_compare.loc[row_index, 'MLA Train Accuracy Mean'] = cv_results['train_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy Mean'] = cv_results['test_score'].mean()
#if this is a non-bias random sample, then +/-3 standard deviations (std) from the mean, should statistically capture 99.7% of the subsets
MLA_compare.loc[row_index, 'MLA Test Accuracy 3*STD'] = cv_results['test_score'].std()*3 #let's know the worst that can happen!
#save MLA predictions - see section 6 for usage
alg.fit(data1[data1_x_bin], data1[Target])
MLA_predict[MLA_name] = alg.predict(data1[data1_x_bin])
row_index+=1
#print and sort table: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.sort_values.html
MLA_compare.sort_values(by = ['MLA Test Accuracy Mean'], ascending = False, inplace = True)
MLA_compare
#MLA_predict
#%%
import matplotlib.pyplot as plt
from numpy.random import random
colors = ['b', 'c', 'y', 'm', 'r']
lo = plt.scatter(random(10), random(10), marker='x', color=colors[0])
ll = plt.scatter(random(10), random(10), marker='o', color=colors[0])
l = plt.scatter(random(10), random(10), marker='o', color=colors[1])
a = plt.scatter(random(10), random(10), marker='o', color=colors[2])
h = plt.scatter(random(10), random(10), marker='o', color=colors[3])
hh = plt.scatter(random(10), random(10), marker='o', color=colors[4])
ho = plt.scatter(random(10), random(10), marker='x', color=colors[4])
plt.legend((lo, ll, l, a, h, hh, ho),
('Low Outlier', 'LoLo', 'Lo', 'Average', 'Hi', 'HiHi', 'High Outlier'),
scatterpoints=1,
loc='lower left',
ncol=3,
fontsize=8)
plt.show()
#%%
import numpy
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
# if x.ndim != 1:
# raise ValueError, "smooth only accepts 1 dimension arrays."
#
# if x.size < window_len:
# raise ValueError, "Input vector needs to be bigger than window size."
#
#
# if window_len<3:
# return x
# if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
# raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=numpy.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
#print(len(s))
if window == 'flat': #moving average
w=numpy.ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(w/w.sum(),s,mode='valid')
return y
from numpy import *
from pylab import *
def smooth_demo():
t=linspace(-4,4,100)
x=sin(t)
xn=x+randn(len(t))*0.1
y=smooth(x)
ws=31
subplot(211)
plot(ones(ws))
windows=['flat', 'hanning', 'hamming', 'bartlett', 'blackman']
hold(True)
for w in windows[1:]:
eval('plot('+w+'(ws) )')
axis([0,30,0,1.1])
legend(windows)
title("The smoothing windows")
subplot(212)
plot(x)
plot(xn)
for w in windows:
plot(smooth(xn,10,w))
l=['original signal', 'signal with noise']
l.extend(windows)
legend(l)
title("Smoothing a noisy signal")
show()
if __name__=='__main__':
smooth_demo()
#%%
x = np.linspace(0,2*np.pi,100)
y = np.sin(x) + np.random.random(100) * 0.8
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
plot(x, y,'o')
plot(x, smooth(y,3), 'r-', lw=2)
plot(x, smooth(y,19), 'g-', lw=2)
#%%
from scipy import signal
sig = np.repeat([0., 1., 0.], 100)
win = signal.hann(50)
filtered = signal.convolve(sig, win, mode='same') / sum(win)
plt.plot(win)
plt.plot(sig)
plt.plot(filtered)
#%%
from pyspark.sql import SparkSession
ss = SparkSession.builder.appName('abc').getOrCreate()
from pyspark.conf import SparkConf
SparkSession.builder.config(conf=SparkConf())
#%% https://github.com/vishwajeet97/Cocktail-Party-Problem
from scipy.io import wavfile
rate1, data1 = wavfile.read('../data/X_rsm2.wav')
plt.plot(range(data1.shape[0]), data1[:, 0])
plt.plot(range(data1.shape[0]), data1[:, 1])
plt.title((rate1, data1.shape))
x1 = pd.DataFrame(data1[:200]).melt()
plt.scatter(x1.index, x1.value, c=x1.variable, cmap=plt.cm.jet)
print(data1[-5:])
#%%
from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from pandas import datetime
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
from matplotlib import pyplot
import numpy
# date-time parsing function for loading the dataset
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# fit an LSTM network to training data
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
return model
# make a one-step forecast
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0,0]
# load dataset
fpath = r'C:\Users\cyret\Documents\Python Scripts\data'
series = read_csv(os.path.join(fpath, 'sales-of-shampoo-over-a-three-ye.csv'),
header=0, parse_dates=[0], index_col=0, squeeze=True,
date_parser=None).dropna()
# transform data to be stationary
raw_values = series.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets
train, test = supervised_values[0:-12], supervised_values[-12:]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
# repeat experiment
repeats = 30
error_scores = list()
for r in range(repeats):
# fit the model
lstm_model = fit_lstm(train_scaled, 1, 3000, 4)
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
lstm_model.predict(train_reshaped, batch_size=1)
# walk-forward validation on the test data
predictions = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, 1, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)
# store forecast
predictions.append(yhat)
# report performance
rmse = sqrt(mean_squared_error(raw_values[-12:], predictions))
print('%d) Test RMSE: %.3f' % (r+1, rmse))
error_scores.append(rmse)
# summarize results
results = DataFrame()
results['rmse'] = error_scores
print(results.describe())
results.boxplot()
pyplot.show()
#%%
import unittest
class Testing(unittest.TestCase):
def test_string(self):
a = 'some'
b = 'some '
self.assertEqual(a, b)
def test_boolean(self):
a = True
b = True
self.assertEqual(a, b)
if __name__ == '__main__':
unittest.main()
#%% Linear programming A*x <= b, A is matrix coef, b is number of bound
# https://www.jianshu.com/p/9be417cbfebb
import numpy as np
z = np.array([2, 3, 1])
a = np.array([[1, 4, 2], [3, 2, 0]])
b = np.array([8, 6])
x1_bound = x2_bound = x3_bound =(0, None)
from scipy import optimize
res = optimize.linprog(z, A_ub=-a, b_ub=-b,bounds=(x1_bound, x2_bound, x3_bound))
# a_ub, b_ub -> bound, a_eq, b_eq -> equal
print(res)
#%%
import unittest
# This is the class we want to test. So, we need to import it
import Person as PersonClass
class Test(unittest.TestCase):
"""
The basic class that inherits unittest.TestCase
"""
person = PersonClass.Person() # instantiate the Person Class
user_id = [] # variable that stores obtained user_id
user_name = [] # variable that stores person name
# test case function to check the Person.set_name function
def test_0_set_name(self):
print("Start set_name test\n")
"""
Any method which starts with ``test_`` will considered as a test case.
"""
for i in range(4):
# initialize a name
name = 'name' + str(i)
# store the name into the list variable
self.user_name.append(name)
# get the user id obtained from the function
user_id = self.person.set_name(name)
# check if the obtained user id is null or not
self.assertIsNotNone(user_id) # null user id will fail the test
# store the user id to the list
self.user_id.append(user_id)
print("user_id length = ", len(self.user_id))
print(self.user_id)
print("user_name length = ", len(self.user_name))
print(self.user_name)
print("\nFinish set_name test\n")
# test case function to check the Person.get_name function
def test_1_get_name(self):
print("\nStart get_name test\n")
"""
Any method that starts with ``test_`` will be considered as a test case.
"""
length = len(self.user_id) # total number of stored user information
print("user_id length = ", length)
print("user_name length = ", len(self.user_name))
for i in range(6):
# if i not exceed total length then verify the returned name
if i < length:
# if the two name not matches it will fail the test case
self.assertEqual(self.user_name[i], self.person.get_name(self.user_id[i]))
else:
print("Testing for get_name no user test")
# if length exceeds then check the 'no such user' type message
self.assertEqual('There is no such user', self.person.get_name(i))
print("\nFinish get_name test\n")
if __name__ == '__main__':
# begin the unittest.main()
unittest.main()
#%%
import random
import nltk
from nltk.corpus import movie_reviews
print(nltk.pos_tag(nltk.word_tokenize('Albert Einstein was born in Ulm, Germany in 1879.')))
print(movie_reviews.categories(), len(movie_reviews.fileids()), len(movie_reviews.words()))
documents = [(list(movie_reviews.words(fileid)), category)
for category in movie_reviews.categories()
for fileid in movie_reviews.fileids(category)]
random.shuffle(documents)
all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
word_features = list(all_words)[:2000]
def document_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains({})'.format(word)] = (word in document_words)
return features
print(document_features(movie_reviews.words('pos/cv957_8737.txt')))
#{'contains(waste)': False, 'contains(lot)': False, ...}
featuresets = [(document_features(d), c) for (d,c) in documents]
train_set, test_set = featuresets[100:], featuresets[:100]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier, test_set))
classifier.show_most_informative_features(5)
#%%
import pyspark
spark = pyspark.sql.SparkSession.builder.appName('test').getOrCreate()
print(spark.range(10).collect())
from pyspark import SQLContext
sqlContext = SQLContext(spark)
dataset = sqlContext.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
dataset.show()
#%%
from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local").setAppName("My App")
sc = SparkContext(conf = conf)
import random
NUM_SAMPLES = 10000
def inside(p):
x, y = random.random(), random.random()
return x*x + y*y < 1
count = sc.parallelize(list(range(0, NUM_SAMPLES)), 1).filter(inside).count()
pi = 4 * count / NUM_SAMPLES
print('Pi is roughly', pi)
sc.close()
#%%
import pyspark
import findspark
findspark.init()
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('data_processing').getOrCreate()
fpath = os.getcwd()
df=spark.read.csv('C:\\Users\\cyret\\Documents\\Python Scripts\\data/sales-of-shampoo-over-a-three-ye.csv',inferSchema=True, header=True)
df.columns
df.printSchema()
df.describe().show()
df.withColumn('total', df[df.columns[-1]]).show(3)
df = df.withColumn('time', df[df.columns[0]]).withColumn('total', df[df.columns[-1]])
df.filter(df['total'] > 200).select(['time', 'TIME', 'total']).show(5)
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType,DoubleType
split_col = pyspark.sql.functions.split(df['time'], '-')
df = df.withColumn('year', split_col.getItem(0))
df = df.withColumn('month', split_col.getItem(1))
df.withColumn('year_double',df['year'].cast(DoubleType())).show(10,False)
df.select('month').show(5)
df.select('year').distinct().show(5)
df.select(['year', 'total']).groupBy('year').mean().show(5,False)
df.show(5)
df_new = df.drop('Sales of shampoo over a three year period').dropna()
df_new = df_new.withColumn('year_double', df_new['year'].cast(DoubleType()))#.show(5)
df_new = df_new.withColumn('month_double', df_new['month'].cast(DoubleType()))#.show(5)
print('do pandas udf\n')
from pyspark.sql.functions import pandas_udf
def prod(month, year):
return 12 * (year - 1.0) + month
prod_udf = pandas_udf(prod, DoubleType())
df_new.withColumn('prod', prod_udf(df_new['month_double'], df_new['year_double'])).show(5)
print(pwd)
df.coalesce(1).write.format('csv').option('header', 'true').save('../data/sample_csv')
df_new.dropna().write.format('parquet').save('../data/parquet_uri')
df_new.show(5)
from pyspark.ml.linalg import Vector
from pyspark.ml.feature import VectorAssembler
vec_assembler = VectorAssembler(inputCols=['month_double', 'year_double'], outputCol='features')
df_new.printSchema()
print(df_new.count(), len(df_new.columns))
df_feature = vec_assembler.transform(df_new)
df_feature.show(5)
df_train, df_test = df_feature.randomSplit([0.7,0.3], seed=42)
from pyspark.ml.regression import LinearRegression
lin_reg = LinearRegression(labelCol='total')
lr_model = lin_reg.fit(df_train)
print(lr_model.coefficients, '\n', lr_model.intercept)
train_prediction = lr_model.evaluate(df_train)
print(train_prediction.r2, train_prediction.meanAbsoluteError)
test_prediction = lr_model.evaluate(df_test)
print(test_prediction.r2, test_prediction.meanAbsoluteError)
test_prediction.predictions.show(3)
from pyspark.ml.regression import RandomForestRegressor
rf_model = RandomForestRegressor(featuresCol='features',
labelCol='total', numTrees=100).fit(df_train)
predictions = rf_model.transform(df_test)
predictions.show()
rf_model.featureImportances
from pyspark.mllib.evaluation import RegressionMetrics
from pyspark.ml.evaluation import RegressionEvaluator
# Select (prediction, true label) and compute test error
evaluator = RegressionEvaluator(
labelCol="total", predictionCol="prediction", metricName="rmse")
rmse = evaluator.evaluate(predictions)
print("Root Mean Squared Error (RMSE) on test data = %g" % rmse)
rf_model.stages[1]
print(rf_model) # summary only
from pyspark.ml.feature import StandardScaler
scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures", withStd=True, withMean=False)
scaler.fit(df_train).transform(df_train).show()
spark.stop()
#%%
class Solution:
def twoSum1(self, nums: 'List[int]', target: 'int') -> 'List[int]':
if len(nums) == 0: return []
for i, valuei in enumerate(nums[:-1]):
value = target - valuei
for j, valuej in enumerate(nums[i+1:]):
if valuej == value:
return [i, i + 1 + j]
else:
pass
def twoSum(self, nums: 'List[int]', target: 'int') -> 'List[int]':
if len(nums) < 2: return []
dic = {}
for i, v in enumerate(nums):
print(i, v, target - v )
if target - v in dic:
print(dic)
return [dic[target - v], i]
else:
print(v, dic)
dic[v] = i
Solution().twoSum(nums = [2, 7, 11, 15], target = 9)
#%%
class Solution:
def threeSum(self, nums: 'List[int]') -> 'List[List[int]]':
if len(nums) < 3: return []
s = sorted(nums)
ans = []
# right = len(s) - 1
# for i, v in enumerate(s[:-2]):
for i in range(len(s) - 2):
if i > 0 and s[i] == s[i-1]: continue
left, right = i + 1, len(s) - 1
while left < right:
print(i, s[i], s[left], s[right], s[i]+ s[left] + s[right])
su = s[i] + s[left] + s[right]
if su < 0:
left += 1
elif su > 0:
right -= 1
else:
ans.append([s[i], s[left], s[right]])
while (left < right) and (s[left] == s[left+1]): left += 1
while (left < right) and (s[right] == s[right-1]): right -= 1
right -= 1
left += 1
return ans
def threeSum1(self, nums):
res = []
nums.sort()
for i in range(len(nums)-2):
if i > 0 and nums[i] == nums[i-1]:
continue
l, r = i+1, len(nums)-1
while l < r:
s = nums[i] + nums[l] + nums[r]
if s < 0:
l +=1
elif s > 0:
r -= 1
else:
res.append((nums[i], nums[l], nums[r]))
while l < r and nums[l] == nums[l+1]:
l += 1
while l < r and nums[r] == nums[r-1]:
r -= 1
l += 1; r -= 1
return res
#Solution().threeSum([-4, -1, -1, 0, 1, 2])
Solution().threeSum([-4,-2,-2,-2,0,1,2,2,2,3,3,4,4,6,6])
#%%
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from numpy import array
from keras.models import load_model
# return training data
def get_train():
seq = [[0.0, 0.1], [0.1, 0.2], [0.2, 0.3], [0.3, 0.4], [0.4, 0.5]]
seq = array(seq)
X, y = seq[:, 0], seq[:, 1]
X = X.reshape((len(X), 1, 1))
return X, y
# define model
model = Sequential()
model.add(LSTM(10, input_shape=(1,1)))
model.add(Dense(1, activation='linear'))
# compile model
model.compile(loss='mse', optimizer='adam')
# fit model
X,y = get_train()
model.fit(X, y, epochs=300, shuffle=False, verbose=0)
# save model to single file
model.save('lstm_model.h5')
# snip...
# later, perhaps run from another script
# load model from single file
model = load_model('lstm_model.h5')
# make predictions
yhat = model.predict(X, verbose=0)
print(yhat)
#%%
import pandas as pd
from sklearn.datasets import load_boston
import ggplot
from ggplot import * # aes, geom_density, scale_color_brewer, facet_wrap
data = load_boston(return_X_y=False)
df = pd.DataFrame(data.data, columns=data.feature_names)
(ggplot(df, aes(x='CRIM', y='AGE')) + \
geom_point() +\
facet_wrap('RAD')
+ ggtitle("Area vs Population"))
(ggplot(df, aes(x='CRIM', y='AGE'))
+ geom_point()
+ geom_step(method = 'loess')
+ ggtitle("Area vs Population"))
(ggplot(aes(x='ZN'), data=df)
+ geom_bar()
+ ggtitle("Area vs Population"))
| true |
457a57727f0f1c8077232ffc4543ffc26d1fde8b | Python | arunmastermind/AWS-examples-using-BOTO3 | /translate/TranslateText.py | UTF-8 | 399 | 2.671875 | 3 | [] | no_license | import boto3
translate = boto3.client('translate')
result = translate.translate_text(Text="Hello, World",
SourceLanguageCode="en",
TargetLanguageCode="de")
print(f'TranslatedText: {result["TranslatedText"]}')
print(f'SourceLanguageCode: {result["SourceLanguageCode"]}')
print(f'TargetLanguageCode: {result["TargetLanguageCode"]}') | true |
95afff888030cea1bf855c5d34bcd1392b7d6064 | Python | EvanGrandfield1/BeerMe | /beer_advocate_scraper_v2.py | UTF-8 | 2,205 | 2.890625 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup as soup
import pandas as pd
import itertools
url = "https://www.beeradvocate.com/beer/styles/"
# Deal with empty cells
def replaceBlank(x):
if x == '':
return 0
else:
return x
# create a new Chrome session
driver = webdriver.Chrome('/Users/evangrandfield/Desktop/BeerMe/chromedriver')
driver.implicitly_wait(30)
driver.get(url)
# get every beer style and every beer style's link to its specific starting page
html_list = driver.find_element_by_id("ba-content")
items = html_list.find_elements_by_tag_name("li")
base_links = html_list.find_elements_by_tag_name("a")
urls = [base_link.get_attribute("href")for base_link in base_links]
styles = [item.text for item in items]
# for each style and respective link go to each starting page by clicking the link
for i in range(0, len(urls) - 1):
driver.get(urls[i])
style_type = str(styles[i])
base_url = str(driver.current_url)
Name = []
Brewery = []
ABV = []
Ratings = []
Score = []
Style = []
d = {'Name': Name, 'Brewery': Brewery, 'ABV': ABV, 'Ratings': Ratings, 'Score': Score, 'Style': Style}
# once on the starting page, go to each next page, until there is not a next page (blank table), and then exit inner loop
for j in range(0, 40000, 50):
next_url = base_url[0:len(base_url)-1] + '?sort=revsD&start={0}'.format(j)
driver.get(next_url)
html = driver.page_source
page_soup = soup(html, 'lxml')
table = page_soup.find('table')
rows = table.find_all('tr')
iterrows = iter(rows)
print(rows)
next(iterrows)
next(iterrows)
next(iterrows)
if len(rows) > 4:
for row in iterrows:
print(row)
print('-'*20)
columns = row.find_all('td')
if len(columns) >= 4:
Name.append(replaceBlank(columns[0].text))
Brewery.append(replaceBlank(columns[1].text))
ABV.append(replaceBlank(columns[2].text))
Ratings.append(replaceBlank(columns[3].text))
Score.append(replaceBlank(columns[4].text))
Style.append(style_type)
else:
ratings = pd.DataFrame(d)
path = '/Users/evangrandfield/Desktop/BeerMe/beers{0}.csv'.format(i)
ratings.to_csv(path)
break
| true |
ceb1c5a84446af6bf0f2a285c9dde213633813d0 | Python | AgnesMartinez/Huachilate-tools | /core.py | UTF-8 | 6,100 | 2.9375 | 3 | [] | no_license | import time
import sqlite3
import operator
import random
import string
class HuachiNet():
"""Huachicol as a service
---------------------------------
Requiere nombre de usuario para obtener la siguiente informacion:
-Saldo total
-Historial de movimientos (Global,Depositos,Retiros)
El usuario puede realizar las siguientes funciones dentro de la red:
-Bono_Bienvenida
-Verificar_Usuario
-Enviar_Bineros
*Si el usuario no existe en la BD, se regresa None como valor
"""
def __init__(self,usuario):
#Conexion a BD
self.conn = sqlite3.connect("boveda.sqlite3")
self.cursor = self.conn.cursor()
self.id = usuario
self.saldo_total = self.Consultar_Saldo()
self.historial = self.Historial_Cuenta("Global")
self.depositos = self.Historial_Cuenta("Deposito")
self.retiros = self.Historial_Cuenta("Retiro")
self.asaltos = self.Historial_Cuenta("Asalto")
self.huachitos = self.Historial_Cuenta("Huachito")
self.premios_huachito = self.Historial_Cuenta("Premio Huachito")
self.atracos = self.Historial_Cuenta("Atraco")
self.levantones = self.Historial_Cuenta("Levanton")
def Bono_Bienvenida(self,usuario):
"""Entregar bineros a los clientes nuevos"""
query = """INSERT INTO transacciones (timestamp,usuario,cantidad,nota,origen_destino) VALUES (?,?,?,?,?)"""
timestamp = time.time()
try:
self.cursor.execute(query,(timestamp,usuario,1000,"Bono Inicial","Bodega"))
self.cursor.execute(query,(timestamp,"Bodega",-1000,"Retiro",usuario))
self.conn.commit()
except Exception as e:
print(f'----\n{e}')
def Verificar_Usuario(self,usuario):
"""Verificar si existe el cliente en la BD"""
query = """SELECT * FROM transacciones WHERE usuario=?"""
try:
self.cursor.execute(query,(usuario,))
resultado = self.cursor.fetchall()
if resultado != []:
for item in resultado:
if usuario in item:
return True
else:
return False
except Exception as e:
print(f'----\n{e}')
def Enviar_Bineros(self,usuario,cantidad,nota="Default"):
"""Registrar transacciones de bineros"""
query = """INSERT INTO transacciones (timestamp,usuario,cantidad,nota,origen_destino) VALUES (?,?,?,?,?)"""
timestamp = time.time()
try:
if nota == "Default":
self.cursor.execute(query,(timestamp,usuario,cantidad,"Deposito",self.id))
negativo = cantidad - (cantidad * 2)
self.cursor.execute(query,(timestamp,self.id,negativo,"Retiro",usuario))
self.conn.commit()
elif nota != "Default":
self.cursor.execute(query,(timestamp,usuario,cantidad,nota,self.id))
negativo = cantidad - (cantidad * 2)
self.cursor.execute(query,(timestamp,self.id,negativo,nota,usuario))
self.conn.commit()
except Exception as e:
print(f'----\n{e}')
def Consultar_Saldo(self):
"""Consulta el saldo total del cliente"""
query = """SELECT SUM(cantidad) FROM transacciones WHERE usuario=?"""
try:
self.cursor.execute(query,(self.id,))
resultado = self.cursor.fetchall()
return resultado[0][0]
except Exception as e:
print(f'----\n{e}')
def Historial_Cuenta(self,tipo_movimiento):
"""Consultar historial de movimientos del cliente desde el inicio de la cuenta"""
query = """SELECT id,timestamp,cantidad,nota,origen_destino FROM transacciones WHERE usuario=? ORDER BY id DESC"""
query2 = """SELECT id,timestamp,cantidad,origen_destino FROM transacciones WHERE usuario=? AND nota=? ORDER BY id DESC"""
try:
if tipo_movimiento == "Global":
self.cursor.execute(query,(self.id,))
resultado = self.cursor.fetchall()
return resultado
elif tipo_movimiento != "Global":
self.cursor.execute(query2,(self.id,tipo_movimiento))
resultado = self.cursor.fetchall()
return resultado
except Exception as e:
print(f'----\n{e}')
def Ranking(self):
"""Forbes Mujico - Usuarios Abinerados"""
#Obtener lista de usuarios
query = """SELECT usuario FROM transacciones WHERE nota='Bono Inicial'"""
clientes = [item[0] for item in self.cursor.execute(query).fetchall()]
#Obtener balance por usuario y anexar resultados a un diccionario
rank = {}
query2 = """SELECT SUM(cantidad) FROM transacciones WHERE usuario = ?"""
for cliente in clientes:
if cliente != None:
cantidad = self.cursor.execute(query2,(cliente,)).fetchall()
rank[cliente] = cantidad[0][0]
return sorted(rank.items(), key=operator.itemgetter(1), reverse=True)
def Huachiclave(self):
"""Regresa la huachiclave vigente o genera una nueva"""
query = """SELECT timestamp,huachiclave,cantidad,entregado FROM huachilate WHERE entregado = '0' ORDER BY timestamp"""
query2 = """INSERT INTO huachilate (timestamp,huachiclave,cantidad,entregado) VALUES (?,?,?,?)"""
resultado = self.cursor.execute(query).fetchall()
if resultado == []:
timestamp = time.time()
huachiclave = "".join(random.choices(string.ascii_letters + string.digits,k = 7))
cantidad = random.randint(5000,50000)
self.cursor.execute(query2,(timestamp,huachiclave,cantidad,0))
self.conn.commit()
return (timestamp,huachiclave,cantidad,0)
else:
return resultado[-1]
| true |
29898ae7ff96a7a73d44190e20d5e236222564e4 | Python | bpcrao/my-python-learnings | /IterTools.py | UTF-8 | 142 | 2.984375 | 3 | [] | no_license | from itertools import accumulate, takewhile
lista = list(accumulate(range(10)))
print(lista)
print(list(takewhile(lambda x: x < 10, lista))) | true |
69aeb66a233800d0e5848f25279c8b61d9a02aca | Python | Aasthaengg/IBMdataset | /Python_codes/p02831/s564333677.py | UTF-8 | 91 | 3.015625 | 3 | [] | no_license | a,b=map(int,input().split())
def g(x,y):
while y: x,y=y,x%y
return x
print(a*b//g(a,b)) | true |
190f77c910cea0ed83973dee0f65632168afd685 | Python | mgrose31/cn2_forecast | /create_dataset.py | UTF-8 | 9,686 | 2.59375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 17:27:10 2020
@author: mgrose
"""
# %% import libraries
import os
import pandas as pd
import numpy as np
from helpers import read_davis_weather, extract_DELTA_summary_data_single_dir
from helpers import window_average
import matplotlib.pyplot as plt
from datetime import date, datetime, timedelta
win_hms = [0,30,0]
int_hms = [0,1,0]
# %% read in the weather and turbulence data
path_wx = os.path.abspath(r"D:\Documents\EOP Program\Research\cn2_forecast\data\MZA_Wx_data\2020_WxData.txt")
path_cn2 = os.path.abspath(r"D:\Documents\EOP Program\Research\cn2_forecast\data\Fitz_Hall_DELTA_data")
df_wx = read_davis_weather(os.path.abspath(path_wx))
df_cn2 = extract_DELTA_summary_data_single_dir(os.path.abspath(path_cn2), 50, 70)
# %% convert air density and pressure to correct values at certain times
air_density = df_wx['air_density']
air_density_indices = air_density <= 0.5 # Get indices where air density is in lb/ft^3
df_wx.loc[air_density_indices, 'air_density'] = \
df_wx.loc[air_density_indices, 'air_density'] * 16.02 # Convert lb/ft^3 to kg/m^3
press_indices = df_wx.loc[:, 'DATE'].dt.to_pydatetime() <= datetime(2020,4,21,17,25,0)
df_wx.loc[press_indices, 'press'] = round(df_wx.loc[press_indices, 'press'] * (0.9690), -1)
# %% plot turbulence (cn2) and weather data
# plt.figure(figsize=(10, 6))
# plt.plot(df_cn2['DATE'], df_cn2['cn2'], 'k.', markersize=4)
# plt.yscale('log')
# plt.ylim(1e-17, 1e-13)
# plt.xlabel('local time (EST)')
# plt.ylabel('$C_{n}^{2} (m^{-2/3})$')
# plt.xticks(rotation=30)
# plt.grid(True)
# plt.tight_layout()
# plt.figure(figsize=(10, 6))
# plt.plot(df_wx['DATE'], df_wx['temp'], 'k.', markersize=4)
# plt.xlabel('local time (EST)')
# plt.ylabel('temperature (K)')
# plt.xticks(rotation=30)
# plt.grid(True)
# plt.tight_layout()
# plt.figure(figsize=(10, 6))
# plt.plot(df_wx['DATE'], df_wx['press'], 'c.', markersize=4)
# plt.xlabel('local time (EST)')
# plt.ylabel('pressure (Pa)')
# plt.xticks(rotation=30)
# plt.grid(True)
# plt.tight_layout()
# plt.figure(figsize=(10, 6))
# plt.plot(df_wx['DATE'], df_wx['rh'], 'm.', markersize=4)
# plt.ylim(0, 100)
# plt.xlabel('local time (EST)')
# plt.ylabel('relative humidity (%)')
# plt.xticks(rotation=30)
# plt.grid(True)
# plt.tight_layout()
# plt.figure(figsize=(10, 6))
# plt.plot(df_wx['DATE'], df_wx['rain_rate'], 'y.', markersize=4)
# plt.xlabel('local time (EST)')
# plt.ylabel('rain rate')
# plt.xticks(rotation=30)
# plt.grid(True)
# plt.tight_layout()
# plt.figure(figsize=(10, 6))
# plt.plot(df_wx['DATE'], df_wx['u_wind'], 'g.', markersize=4)
# plt.ylim(-10, 10)
# plt.xlabel('local time (EST)')
# plt.ylabel('u_wind (m/s)')
# plt.xticks(rotation=30)
# plt.grid(True)
# plt.tight_layout()
# plt.figure(figsize=(10, 6))
# plt.plot(df_wx['DATE'], df_wx['v_wind'], 'b.', markersize=4)
# plt.ylim(-10, 10)
# plt.xlabel('local time (EST)')
# plt.ylabel('v_wind (m/s)')
# plt.xticks(rotation=30)
# plt.grid(True)
# plt.tight_layout()
# plt.figure(figsize=(10, 6))
# plt.plot(df_wx['DATE'], df_wx['solar_irr'], 'r.', markersize=4)
# plt.xlabel('local time (EST)')
# plt.ylabel('solar irradiance ($W/m^{2}$)')
# plt.xticks(rotation=30)
# plt.grid(True)
# plt.tight_layout()
# %% format weather data
start_day = date(2020, 4, 12)
end_day = date(2020, 8, 11)
wx_date_range = pd.date_range(start_day, end_day, freq='30min')
wx_dates = df_wx['DATE']
print("Window averaging temperature...")
df_win_avg = window_average(x=df_wx['temp'],
t=wx_dates,
win_hms=win_hms, int_hms=int_hms)
t = df_win_avg['t_win_avg']
temp = df_win_avg['x_win_avg'].to_numpy()
print("Window averaging pressure...")
df_win_avg = window_average(x=df_wx['press'],
t=wx_dates,
win_hms=win_hms, int_hms=int_hms)
press = df_win_avg['x_win_avg'].to_numpy()
print("Window averaging relative humidity...")
df_win_avg = window_average(x=df_wx['rh'],
t=wx_dates,
win_hms=win_hms, int_hms=int_hms)
rh = df_win_avg['x_win_avg'].to_numpy()
print("Window averaging wind speed...")
df_win_avg = window_average(x=df_wx['wind_speed'],
t=wx_dates,
win_hms=win_hms, int_hms=int_hms)
wind_speed = df_win_avg['x_win_avg'].to_numpy()
print("Window averaging solar irradiance...")
df_win_avg = window_average(x=df_wx['solar_irr'],
t=wx_dates,
win_hms=win_hms, int_hms=int_hms)
solar_irr = df_win_avg['x_win_avg'].to_numpy()
# plt.figure()
# plt.plot(t, temp, 'k.')
# plt.grid(True)
# plt.grid(True, 'minor')
# plt.tight_layout()
# plt.figure()
# plt.plot(t, press, 'c.')
# plt.grid(True)
# plt.grid(True, 'minor')
# plt.tight_layout()
# plt.figure()
# plt.plot(t, rh, 'm.')
# plt.ylim(0, 100)
# plt.grid(True)
# plt.grid(True, 'minor')
# plt.tight_layout()
# plt.figure()
# plt.plot(t, wind_speed, 'b.')
# plt.grid(True)
# plt.grid(True, 'minor')
# plt.tight_layout()
# plt.figure()
# plt.plot(t, solar_irr, 'r.')
# plt.grid(True)
# plt.grid(True, 'minor')
# plt.tight_layout()
temp_interp = np.interp(wx_date_range, t, temp, np.nan, np.nan)
press_interp = np.interp(wx_date_range, t, press, np.nan, np.nan)
rh_interp = np.interp(wx_date_range, t, rh, np.nan, np.nan)
windspd_interp = np.interp(wx_date_range, t, wind_speed, np.nan, np.nan)
solarirr_interp = np.interp(wx_date_range, t, solar_irr, np.nan, np.nan)
dict_wx_sample = {
'DATE': wx_date_range,
'temp': temp_interp,
'press': press_interp,
'rh': rh_interp,
'wind_spd': windspd_interp,
'solar_irr': solarirr_interp}
df_wx_sample = pd.DataFrame(dict_wx_sample)
# get indices of interplations greater than 30 minutes
wx_idx = np.array([])
for this_time in wx_date_range:
wx_idx = np.append(wx_idx, any(abs(wx_dates-this_time)<timedelta(minutes=30)))
print("Removing {} interpolated weather measurements...".format(int(len(wx_idx) - wx_idx.sum())))
df_wx_sample.loc[wx_idx==0] = np.nan
df_wx_sample['DATE'] = wx_date_range
# plot interpolated weather data
plt.figure(figsize=(10, 6))
plt.plot(df_wx_sample['DATE'], df_wx_sample['temp'], 'k.', markersize=4)
plt.title("Sampled Weather: Temperature")
plt.ylabel("Temperature (K)")
plt.grid(True)
plt.xticks(rotation=45)
plt.tight_layout()
plt.figure(figsize=(10, 6))
plt.plot(df_wx_sample['DATE'], df_wx_sample['press'], 'c.', markersize=4)
plt.title("Sampled Weather: Pressure")
plt.ylabel("Pressure (Pa)")
plt.grid(True)
plt.xticks(rotation=45)
plt.tight_layout()
plt.figure(figsize=(10, 6))
plt.plot(df_wx_sample['DATE'], df_wx_sample['rh'], 'm.', markersize=4)
plt.ylim(0, 100)
plt.title("Sampled Weather: Relative Humidity")
plt.ylabel("Relative Humidity (%)")
plt.grid(True)
plt.xticks(rotation=45)
plt.tight_layout()
plt.figure(figsize=(10, 6))
plt.plot(df_wx_sample['DATE'], df_wx_sample['wind_spd'], 'y.', markersize=4)
plt.title("Sampled Weather: wind speed")
plt.ylabel("wind speed (m/s)")
plt.grid(True)
plt.xticks(rotation=45)
plt.tight_layout()
plt.figure(figsize=(10, 6))
plt.plot(df_wx_sample['DATE'], df_wx_sample['solar_irr'], 'r.', markersize=4)
plt.title("Sampled Weather: Solar Irradiance")
plt.ylabel("Solar Irradiance ($W/m^{2}$)")
plt.grid(True)
plt.xticks(rotation=45)
plt.tight_layout()
# %% format turbulence dataset
print("Window averaging...")
df_cn2_win_avg = window_average(x=df_cn2['log10_cn2'],
t=df_cn2['DATE'],
win_hms=win_hms, int_hms=int_hms)
dates_cn2 = pd.to_datetime(df_cn2_win_avg['t_win_avg'])
log10_cn2_interp = np.interp(wx_date_range, dates_cn2, df_cn2_win_avg['x_win_avg'], np.nan, np.nan)
dict_log10_cn2_sample = {
'DATE': wx_date_range,
'log10_cn2': log10_cn2_interp}
df_cn2_sample = pd.DataFrame(dict_log10_cn2_sample)
idx1 = np.array([]);
for this_time in wx_date_range:
idx1 = np.append(idx1, any(abs(dates_cn2-this_time)<timedelta(minutes=30)))
print("Removing {} interplated turbulence measurements...".format(int(len(idx1) - idx1.sum())))
log10_cn2_tmp = df_cn2_sample['log10_cn2'].to_numpy()
log10_cn2_tmp[idx1==0] = np.nan
df_cn2_sample['log10_cn2'] = log10_cn2_tmp
# df_cn2_sample['log10_cn2'].iloc[idx1==0] = np.nan
# %% combine weather and turbulence datasets into one
df_wx_sample.index = df_wx_sample['DATE']
df_wx_sample = df_wx_sample.drop(columns='DATE')
df_cn2_sample.index = df_cn2_sample['DATE']
df_cn2_sample = df_cn2_sample.drop(columns='DATE')
dataset = pd.concat([df_wx_sample, df_cn2_sample], axis=1)
# # %%
# plt.figure(figsize=(10, 6))
# plt.plot(dataset.index, dataset['temp'], 'k.')
# plt.grid(True)
# plt.grid(True, 'minor')
# plt.tight_layout()
# plt.figure(figsize=(10, 6))
# plt.plot(dataset.index, dataset['press'], 'k.')
# plt.grid(True)
# plt.grid(True, 'minor')
# plt.tight_layout()
# plt.figure(figsize=(10, 6))
# plt.plot(dataset.index, dataset['rh'], 'k.')
# plt.grid(True)
# plt.grid(True, 'minor')
# plt.tight_layout()
# plt.figure(figsize=(10, 6))
# plt.plot(dataset.index, dataset['wind_spd'], 'k.')
# plt.grid(True)
# plt.grid(True, 'minor')
# plt.tight_layout()
# plt.figure(figsize=(10, 6))
# plt.plot(dataset.index, dataset['solar_irr'], 'k.')
# plt.grid(True)
# plt.grid(True, 'minor')
# plt.tight_layout()
# %%
# dataset.to_pickle('dataset_30min.pkl') | true |
46e1b18d2a574770896bae6e10f7440a8ee554fb | Python | Schokokex/reverseEngineeringPython | /Classes/Type.py | UTF-8 | 731 | 2.890625 | 3 | [] | no_license | import Wrapper_Descriptor
import Object
class Type:
__init__ = Wrapper_Descriptor()
def __init__(self, object_or_name: str, bases: tuple, dict: dict):
# TypeError: type() takes 1 or 3 arguments
pass
# TypeError: type.__new__() argument 1 must be str, not int
# TypeError: type.__new__() argument 2 must be tuple, not int
# TypeError: type.__new__() argument 3 must be dict, not int
# TypeError: metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases
# TypeError: descriptor '__init__' requires a 'type' object but received a 'dict'
__call__ = Wrapper_Descriptor()
__base__ = Object
# type() is different from type.__init__()
# Wrapper_Descriptor | true |
2508e7af7fc201ad67f0910a11965cd65eea5fce | Python | asad1996172/Obfuscation-Systems | /Style Nueralization PAN16/AuthorObfuscation/AuthorObfuscation/Evaluation/pos_measures.py | UTF-8 | 989 | 2.6875 | 3 | [] | no_license | import text_utils
from Evaluation import POSTagging as pt
def pos_ratio(text):
if text:
word_count = text_utils.word_count(text)
pos_tagged = pt.pos_tag(text)
noun_count = 0
verb_count = 0
adj_count = 0
adv_count = 0
punctuation_count = 0
for tagged_word in pos_tagged:
word = tagged_word[0]
pos = tagged_word[1]
if pos == 'NOUN':
noun_count = noun_count + 1
elif pos == 'VERB':
verb_count = verb_count + 1
elif pos == 'ADJ':
adj_count = adj_count + 1
elif pos == 'ADV':
adv_count = adv_count + 1
elif pos == '.':
punctuation_count = punctuation_count + 1
return {
'NOUN' : noun_count/word_count,
'VERB' : verb_count/word_count,
'ADJ' : adj_count/word_count,
'ADV' : adv_count/word_count,
'PUNCT' : punctuation_count/word_count,
}
else:
return 0
| true |
08ad0c3f0830f527916442b451783228330eeb4a | Python | liangguang/20180910 | /pythoncode/catchImg/catchYoumeiyu.py | UTF-8 | 5,616 | 2.515625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import requests, traceback
import re,threading
import os,time,random
from bs4 import BeautifulSoup
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Mobile Safari/537.36'
}
def getHTMLText(url):
r = requests.get(url,headers=headers,timeout=10)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
def downloadImgs(nameUrl,root='youmeitu'):
if not os.path.exists(root):
os.mkdir(root)
imgs_url = 'http://www.youmeitu.com'+nameUrl
#print(nameUrl+'|'+root)
html=getHTMLText(imgs_url)
soup_p = BeautifulSoup(html,'lxml')
tab = soup_p.find('div',class_='NewPages')
pages = tab.find('li').get_text()
num = pages[pages.find('共') + 1:pages.find('页')]
print(num)
for page in range(1,int(num)):
img_url = 'http://www.youmeitu.com'+nameUrl.replace('.','_'+str(page)+'.')
print(img_url)
html=getHTMLText(img_url)
soup_p = BeautifulSoup(html,'lxml')
tab = soup_p.find('div',id='ArticleId60')
href = tab.find('p')
imgurl = href.a.img.get('src')
title = href.a.img.get('title')
path = os.path.join(root,title)+'.jpg'
try:
print(imgurl + '|'+ path)
downloadOne(imgurl,path)
except:
traceback.print_exc()
print('下载{}的图片{}异常'.format(name,title))
def downloadOne(imgurl,path):
if not os.path.exists(path):
r = requests.get(imgurl,timeout=5)
r.raise_for_status()
#使用with语句可以不用自己手动关闭已经打开的文件流
with open(path,"wb") as f: #开始写文件,wb代表写二进制文件
f.write(r.content)
print('下载'+path+'完成')
else:
print(path + "文件已存在")
def getPageUrls(text,name):
re_pageUrl=r'href="(.+)">\s*<img src="(.+)" alt="'+name
return re.findall(re_pageUrl,text)
def downPictures(text,root,name):
pageUrls=getPageUrls(text,name)
titles=re.findall(r'alt="'+name+r'(.+)" ',text)
for i in range(len(pageUrls)):
pageUrl=pageUrls[i][0]
path = root + titles[i]+ "//"
if not os.path.exists(path):
os.mkdir(path)
if not os.listdir(path):
pageText=getHTMLText(pageUrl)
totalPics=int(re.findall(r'<em>(.+)</em>)',pageText)[0])
downUrl=re.findall(r'href="(.+?)" class="">下载图片',pageText)[0]
cnt=1
while(cnt<=totalPics):
picPath=path+str(cnt)+".jpg"
r=requests.get(downUrl)
with open(picPath,'wb') as f:
f.write(r.content)
f.close()
print('{} - 第{}张下载已完成\n'.format(titles[i],cnt))
cnt+=1
nextPageUrl=re.findall(r'href="(.+?)">下一张',pageText)[0]
pageText=getHTMLText(nextPageUrl)
downUrl=re.findall(r'href="(.+?)" class="">下载图片',pageText)[0]
return
def getMeiNv():
urls = []
for s in range(1,215):
nameUrl = 'http://www.youmeitu.com/meinv/list_'+str(s)+'.html'
urls.append(nameUrl)
#threads = []
for url in urls:
try:
html=getHTMLText(url)
#print(html)
soup_p = BeautifulSoup(html,'lxml')
tab = soup_p.find('div',class_='TypeList')
hrefs = tab.select('li')
for href in hrefs:
nameUrl = href.a.get('href')
try:
# if len(threads) < 9:
# t = threading.Thread(target=downloadImgs,args=(nameUrl,))
# threads.append(t)
# t.start()
# else:
# downloadImgs(nameUrl)
downloadImgs(nameUrl)
except:
traceback.print_exc()
print('下载{}的图片异常'.format(nameUrl))
except Exception:
traceback.print_exc()
print('解析{}异常'.format(url))
#time.sleep(int(format(random.randint(2,5)))) # 设置随机等待时间
#break
def getWallpaper():
urls = []
for s in range(191,210):
for j in range(1,6):
nameUrl = 'http://www.win4000.com/wallpaper_'+str(s)+'_0_0_'+str(j)+'.html'
urls.append(nameUrl)
for s in [2285,2286,2287,2357,2358,2361]:
for j in range(1,6):
nameUrl = 'http://www.win4000.com/wallpaper_'+str(s)+'_0_0_'+str(j)+'.html'
urls.append(nameUrl)
for url in urls:
print(url)
try:
html=getHTMLText(url)
soup_p = BeautifulSoup(html,'lxml')
tab = soup_p.find('div',class_='Left_bar')
hrefs = tab.select('li')
for href in hrefs:
nameUrl = href.a.get('href')
try:
downloadImgs(nameUrl)
except:
traceback.print_exc()
print('下载{}的图片异常'.format(nameUrl))
except Exception:
traceback.print_exc()
print('解析{}异常'.format(url))
if __name__ == '__main__':
getMeiNv() | true |
4a7d2c24889bd94b3f13d42f02a3aa2c748cf2dd | Python | ashomah/ie_pandas | /tests/mixed/df.get_row()/test_input_mixed_in_dict_of_np_get_row.py | UTF-8 | 2,807 | 2.890625 | 3 | [] | no_license | from ie_pandas import DataFrame
import pytest
import numpy as np
def test_input_mixed_in_dict_of_np_get_row_by_index():
obj = {
"age": np.array([30.1, 53.1, 31.1, 47.1, 32.1]),
"albums": np.array([4, 10, 2, 5, 4]),
"C": np.array(["a", "b", "c", "d", "e"]),
"D": np.array([True, False, True, True, False]),
}
df = DataFrame(
obj,
colindex=["AGE", "ALBUMS", "C", "D"],
rowindex=["A", "B", "C", "D", "E"],
)
expected_output = [53.1, 10, "b", False]
actual_output = df.get_row(1)
assert actual_output == expected_output
def test_input_mixed_in_dict_of_np_get_row_by_rowindex():
obj = {
"age": np.array([30.1, 53.1, 31.1, 47.1, 32.1]),
"albums": np.array([4, 10, 2, 5, 4]),
"C": np.array(["a", "b", "c", "d", "e"]),
"D": np.array([True, False, True, True, False]),
}
df = DataFrame(
obj,
colindex=["AGE", "ALBUMS", "C", "D"],
rowindex=["A", "B", "C", "D", "E"],
)
expected_output = [53.1, 10, "b", False]
actual_output = df.get_row("B")
assert actual_output == expected_output
def test_input_mixed_in_dict_of_np_get_row_wrong():
obj = {
"age": np.array([30.1, 53.1, 31.1, 47.1, 32.1]),
"albums": np.array([4, 10, 2, 5, 4]),
"C": np.array(["a", "b", "c", "d", "e"]),
"D": np.array([True, False, True, True, False]),
}
df = DataFrame(
obj,
colindex=["AGE", "ALBUMS", "C", "D"],
rowindex=["A", "B", "C", "D", "E"],
)
with pytest.raises(Exception) as exc_info:
df.get_row(100)
exception_raised = exc_info.value
assert exception_raised
def test_input_mixed_in_dict_of_np_get_row_empty():
obj = {
"age": np.array([30.1, 53.1, 31.1, 47.1, 32.1]),
"albums": np.array([4, 10, 2, 5, 4]),
"C": np.array(["a", "b", "c", "d", "e"]),
"D": np.array([True, False, True, True, False]),
}
df = DataFrame(
obj,
colindex=["AGE", "ALBUMS", "C", "D"],
rowindex=["A", "B", "C", "D", "E"],
)
with pytest.raises(TypeError) as exc_info:
df.get_row()
exception_raised = exc_info.value
assert exception_raised
def test_input_mixed_in_dict_of_np_get_row_imaginary():
obj = {
"age": np.array([30.1, 53.1, 31.1, 47.1, 32.1]),
"albums": np.array([4, 10, 2, 5, 4]),
"C": np.array(["a", "b", "c", "d", "e"]),
"D": np.array([True, False, True, True, False]),
}
df = DataFrame(
obj,
colindex=["AGE", "ALBUMS", "C", "D"],
rowindex=["A", "B", "C", "D", "E"],
)
with pytest.raises(Exception) as exc_info:
df.get_row(1 + 2j)
exception_raised = exc_info.value
assert exception_raised
| true |
968a5fadb3b5521ae4b4c2de6194c5a2930c3b59 | Python | edfan/Project-Euler | /1.py | UTF-8 | 261 | 3.28125 | 3 | [] | no_license | numbers = []
result = []
top = 999
while top > 0:
numbers.append(top)
top -= 1
numbers.sort()
for n in numbers:
if n % 3 == 0:
result.append(n)
elif n % 5 == 0:
result.append(n)
print (sum(result))
| true |
a7efed60faa4c34ddb2b83e9da6635fe4d510c56 | Python | ug-kim/algorithms | /DFS_BFS/3_word_conversion.py | UTF-8 | 746 | 3.140625 | 3 | [] | no_license | from collections import deque
def solution(begin, target, words):
queue = deque()
bfs_dict = dict()
def trans_available_func(a, b): return sum(
(1 if x != y else 0) for x, y in zip(a, b)) == 1
bfs_dict[begin] = set(
filter(lambda x: trans_available_func(begin, x), words))
for word in words:
bfs_dict[word] = set(
filter(lambda x: trans_available_func(word, x), words))
queue.append((begin, 0))
while queue:
current, depth = queue.popleft()
if depth > len(words):
return 0
for w in bfs_dict[current]:
if w == target:
return depth + 1
else:
queue.append((w, depth+1))
return 0
| true |
fce66c737a23c069bf8a504c8e13e5c2621ce296 | Python | kopecmartin/grains-recognition | /main.py | UTF-8 | 2,348 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""
File: main.py
Authors: Martin Kopec <xkopec42@gmail.com>
Maros Kopec <xkopec44@vutbr.cz>
Patrik Segedy <xseged00@vutbr.cz>
Tomas Sykora <xsykor25>
"""
from AABBlib import detection
from AABBlib import threshold
import argparse
import csv
import cv2
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--image-path", required=True,
help="Path to an image to be processed")
parser.add_argument("--csv-path", required=True,
help="Path where csv file will be stored")
parser.add_argument("--resize", required=False, default=100,
help="Percentage to scale picture down")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
img = cv2.imread(args.image_path, 0)
if args.resize != 100:
resize_percentage = int(args.resize) / 100
img = cv2.resize(img,
(int(img.shape[1] * resize_percentage),
int(img.shape[0] * resize_percentage)))
thresh = threshold.Threshold(img)
thresh.blur()
thresh.otsu()
thresh.threshold_img()
detector = detection.Detector(thresh.get_img())
bboxes = detector.get_bounded_boxes()
box_width = []
box_height = []
max_thick = []
max_len = []
max_points = []
edge_list = []
# calculate a coefficient for changig lengths
# based on resize of input picture
k = int((1 / int(args.resize)) * 100)
for bbox in bboxes:
edge_list.append(detector.convex_hull(bbox))
box_height.append(bbox.shape[0] * k)
box_width.append(bbox.shape[1] * k)
for edges in edge_list:
max_l, max_p = detector.max_length(edges)
max_len.append(max_l * k)
max_points.append(max_p)
for edges, bbox, point in zip(edge_list, bboxes, max_points):
max_thick.append(round(detector.max_thickness(point, edges, bbox), 2))
zipped = zip(range(1, len(max_len) + 1),
box_width, box_height, max_len, max_thick)
with open(args.csv_path, 'w') as out_csv:
writer = csv.writer(out_csv)
writer.writerow(['Part #', 'Width', 'Height',
'Max Length', 'Thickness'])
writer.writerows(zipped)
cv2.imwrite('thresh.tif', thresh.get_img()) # dump threshed img
| true |
8b3da76da79180378978f34b806cc944b49818b6 | Python | olimpiadi-informatica/oii | /2018/territoriali-remastered/scommessa/managers/generator.py | UTF-8 | 1,199 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf8 -*-
from limits import MAXN
from sys import argv, exit, stderr
import os
from random import random, randint, choice, sample, shuffle, seed
usage = """Generatore di "oddcycle".
Parametri:
* S - seed
* 0 - il valore zero, molto importanteh!!!!
"""
def run(N, mode):
print(N)
if mode == 0:
nums = [str(x) for x in range(0, N)]
shuffle(nums)
print(' '.join(nums))
elif mode == 1:
nums = [str(x) for x in range(0, N)]
print(' '.join(nums))
elif mode == 2:
even = [str(i * 2) for i in range(0, (N + 1) // 2)]
odd = [str(i * 2 + 1) for i in range(0, N - 1 - N // 2)]
shuffle(even)
shuffle(odd)
print(' '.join(even + odd))
else:
print("Tipo di generazione non valido")
print(usage)
exit(1)
if __name__ == "__main__":
if len(argv) < 3:
print(usage)
exit(1)
S, _ = map(int, argv[1:])
seed(S)
T = 10
print(T)
for _ in range(T):
N = randint(1, (MAXN - 1) // 2)
N = 2 * N + 1
assert 1 <= N <= MAXN
assert N % 2 == 1
mode = randint(0, 2)
run(N, mode)
| true |
b245fac5c5fefc13308fa523b5b6f56ee93aa30e | Python | AllStars04/Dawood | /Commands/verifyResults.py | UTF-8 | 3,008 | 2.578125 | 3 | [] | no_license | import Module.getObject
import Module.logger
import Module.Algorithms
import Module.Utility
import Class.Automation
import Module.CleanUp
import Module.Report
import Class.UserDefinedException
import Class.SeleniumBrowser
import re
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
def verifyResults(self,values):
Module.logger.ERROR("Values are "+str(values))
Excep = Class.UserDefinedException.UserDefinedException()
## Get object of results table. its not in table format so getting based on class
all_rows = self.driver.find_elements_by_class_name("section_history")
## Getting 1st row of the result page
for rows in all_rows:
## Verify if row contains data
Module.logger.ERROR("Verifying for the row" +rows.text)
for vall in values:
val = Module.Utility.readTestData(str(vall))
Module.logger.ERROR("Value to verify is "+str(val))
newvalue = self.getValueFromDic(val)
newvalue = str(newvalue)
Module.logger.ERROR("Final value to verify is "+newvalue)
if newvalue.__contains__(":"):
try:
match = re.search(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', rows.text)
date_on_result = match.group()
Module.logger.ERROR("GOT DATE is" +date_on_result)
if date_on_result > newvalue:
Module.logger.ERROR("Date matched ========")
success = 1
else:
Module.logger.ERROR("Date not matched ========")
success = 0
break
except:
Module.logger.ERROR("Exception : Date not matched ========")
success = 0
break
else:
if newvalue in rows.text:
Module.logger.ERROR("Matched ======== "+newvalue)
success = 1
else:
Module.logger.ERROR("Not Matched ======== " + newvalue)
success = 0
if (success == 1):
targetObj = rows
break
if (success == 1):
try:
Module.logger.INFO("Results found , clicking")
Module.logger.ERROR("Results found , clicking on row "+targetObj.text)
targetObj.click()
Module.logger.INFO("Results found , clicked")
Module.Report.Success(self,"Results found , clicked")
except:
Module.logger.ERROR("ERROR in clicking")
Module.Report.Failure(self,"ERROR in clicking on results")
Excep.raiseException("ERROR in clicking on results")
else:
Module.logger.ERROR("Results not found based on given criteria")
Module.Report.Failure(self,"Results not found based on given criteria")
Excep.raiseException("Results not found based on given criteria")
| true |
5af8f43fa973ac836b18df8f9625c541b1ed13c9 | Python | hsjfans/machine_learning | /machine_learning/base.py | UTF-8 | 585 | 2.578125 | 3 | [
"MIT"
] | permissive | class BaseEstimator:
"""The base class for all estimators.
"""
def get_params(self):
"""
return: the params of model
"""
pass
def set_params(self):
"""
"""
pass
class ClassifierMixin:
"""Mixin class for all classifiers
"""
_estimator_type = "classifier"
def score(self,X, y):
"""the score of classifiers
Parameters:
X, input data, shape: [n_samples,dimensions]
y, ground truth, labels, shape: [n_samples,n_outputs]
"""
pass
| true |