blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9b78524c9d93e922f28b4de5fbca7329542eac25
|
1a788bf6e897b7c0df1ba62e3fdf7232c144cf5e
|
/aliyun-python-sdk-cs/aliyunsdkcs/request/v20151215/DescribeClusterNodeInfoWithInstanceRequest.py
|
f2115a60523894d2c171e6c7bd440f8a4c283188
|
[
"Apache-2.0"
] |
permissive
|
GuozhaoWu/aliyun-openapi-python-sdk
|
e4448aa34c544a2bc594ddd22f0e7f0345d2ff04
|
0efff20e540a375f62bdd4ecd5533eff81c154c8
|
refs/heads/master
| 2021-01-21T09:02:51.595319
| 2017-08-30T07:56:19
| 2017-08-30T07:56:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,417
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class DescribeClusterNodeInfoWithInstanceRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'CS', '2015-12-15', 'DescribeClusterNodeInfoWithInstance')
self.set_uri_pattern('/token/[Token]/instance/[InstanceId]/node_info')
self.set_method('GET')
def get_Token(self):
return self.get_path_params().get('Token')
def set_Token(self,Token):
self.add_path_param('Token',Token)
def get_InstanceId(self):
return self.get_path_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_path_param('InstanceId',InstanceId)
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
215f2ab820715a286898a34ec88398914b4b833d
|
6fac20b656a950cadae1e64b7b67d607e7940cad
|
/lb_weight.py
|
803fc3753c8eaf77ff6d2eb3253dad1937d99b19
|
[] |
no_license
|
Best1s/tencent_script
|
a499e54fbe1b00206f10b8fbb6a2373eb9814e04
|
b35236098577869b3d46f25cce13f025a3aceb87
|
refs/heads/master
| 2023-03-11T00:52:14.154415
| 2021-02-19T08:35:30
| 2021-02-19T08:35:30
| 331,915,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,706
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#argv[1]:LoadBalancerId
#argv[2]:ListenerId
#argv[3]:Targets.0.InstanceId
#argv[4]:Targets.0.Port
#argv[5]:Weight
import sys
import base64
import hashlib
import hmac
import random
import time
import os
import requests
import socket
# get tencentcloud InstanceId
from tencentcloud.common import credential
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.cvm.v20170312 import cvm_client, models
def get_InstanceId(ip, secretId, secret_key):
try:
cred = credential.Credential(secretId, secret_key)
httpProfile = HttpProfile()
httpProfile.endpoint = "cvm.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = cvm_client.CvmClient(cred, "ap-guangzhou", clientProfile)
req = models.DescribeInstancesRequest()
params = '{"Filters":[{"Name":"private-ip-address","Values":["' + ip + '"]}]}'
req.from_json_string(params)
resp = client.DescribeInstances(req)
data = resp.to_json_string()
return data.replace('"',"").split("InstanceId: ")[1].split(",")[0] # InstanceId
except TencentCloudSDKException as err:
print(err)
def get_string_to_sign(method, endpoint):
s = method + endpoint + "/?"
query_str = "&".join("%s=%s" % (k, data[k]) for k in sorted(data))
return s + query_str
def sign_str(key, s, method):
hmac_str = hmac.new(key.encode("utf8"), s.encode("utf8"), method).digest()
return base64.b64encode(hmac_str)
if __name__ == '__main__':
endpoint = "clb.tencentcloudapi.com"
secretId = 'xxxxx'
secret_key = 'xxxxx'
ip = socket.gethostbyname(socket.gethostname())
LoadBalancerId = "xxx"
ListenerId = ""
InstanceId = get_InstanceId(ip, secretId, secret_key)
Port = "80"
Weight = 20
data = {
'Action': 'ModifyTargetWeight',
'SecretId': secretId,
'Region': 'ap-guangzhou',
'Timestamp': int(time.time()),
'Nonce': random.randint(0, 1000000),
'Version': '2018-03-17',
'LoadBalancerId': LoadBalancerId,
'ListenerId': ListenerId,
'Targets.0.InstanceId': InstanceId,
'Targets.0.Port': Port,
'Weight': Weight
}
s = get_string_to_sign("GET", endpoint)
data["Signature"] = sign_str(secret_key, s, hashlib.sha1)
print(data["Signature"])
resp = requests.get("https://" + endpoint, params=data)
print(resp.text)
os.system("sleep 2")
|
[
"zhangbin@ppwang.com"
] |
zhangbin@ppwang.com
|
a485dc30e7230d31630cb45f6e656a840d5a9a93
|
1558786ded08e236187a937357959294da7cb6ba
|
/18.EM/18.6.GMM_pdf.py
|
34ffb6685038ed047ca3494c5426667b5f3afa16
|
[] |
no_license
|
highclow/MachineLearning
|
e522ff4fb6b855b6a8152c8afa8beb78da5105c8
|
60482434de05e78332d82e4289abea0dd68e8c17
|
refs/heads/master
| 2023-04-09T06:08:23.032303
| 2020-06-08T03:23:50
| 2020-06-08T03:23:50
| 270,513,016
| 0
| 1
| null | 2021-04-26T20:21:47
| 2020-06-08T03:22:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,709
|
py
|
# !/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
from sklearn.mixture import GaussianMixture
import scipy as sp
import matplotlib as mpl
import matplotlib.colors
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import warnings
def expand(a, b, rate=0.05):
d = (b - a) * rate
return a-d, b+d
if __name__ == '__main__':
warnings.filterwarnings(action='ignore', category=RuntimeWarning)
np.random.seed(0)
cov1 = np.diag((1, 2))
N1 = 500
N2 = 300
N = N1 + N2
x1 = np.random.multivariate_normal(mean=(3, 2), cov=cov1, size=N1)
m = np.array(((1, 1), (1, 3)))
x1 = x1.dot(m)
x2 = np.random.multivariate_normal(mean=(-1, 10), cov=cov1, size=N2)
x = np.vstack((x1, x2))
y = np.array([0]*N1 + [1]*N2)
gmm = GaussianMixture(n_components=2, covariance_type='full', random_state=0)
gmm.fit(x)
centers = gmm.means_
covs = gmm.covariances_
print 'GMM均值 = \n', centers
print 'GMM方差 = \n', covs
y_hat = gmm.predict(x)
colors = '#A0FFA0', '#E080A0',
levels = 10
cm = mpl.colors.ListedColormap(colors)
x1_min, x1_max = x[:, 0].min(), x[:, 0].max()
x2_min, x2_max = x[:, 1].min(), x[:, 1].max()
x1_min, x1_max = expand(x1_min, x1_max)
x2_min, x2_max = expand(x2_min, x2_max)
x1, x2 = np.mgrid[x1_min:x1_max:500j, x2_min:x2_max:500j]
grid_test = np.stack((x1.flat, x2.flat), axis=1)
print gmm.score_samples(grid_test)
grid_hat = -gmm.score_samples(grid_test)
grid_hat = grid_hat.reshape(x1.shape)
plt.figure(figsize=(9, 7), facecolor='w')
ax = plt.subplot(111)
cmesh = plt.pcolormesh(x1, x2, grid_hat, cmap=plt.cm.Spectral)
plt.colorbar(cmesh, shrink=0.9)
CS = plt.contour(x1, x2, grid_hat, levels=np.logspace(0, 2, num=levels, base=10), colors='w', linewidths=1)
plt.clabel(CS, fontsize=9, inline=True, fmt='%.1f')
plt.scatter(x[:, 0], x[:, 1], s=30, c=y, cmap=cm, marker='o')
for i, cc in enumerate(zip(centers, covs)):
center, cov = cc
value, vector = sp.linalg.eigh(cov)
width, height = value[0], value[1]
v = vector[0] / sp.linalg.norm(vector[0])
angle = 180* np.arctan(v[1] / v[0]) / np.pi
e = Ellipse(xy=center, width=width, height=height,
angle=angle, color='m', alpha=0.5, clip_box = ax.bbox)
ax.add_artist(e)
plt.xlim((x1_min, x1_max))
plt.ylim((x2_min, x2_max))
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False
plt.title(u'GMM似然函数值', fontsize=20)
plt.grid(True)
plt.show()
|
[
"lu.he@jd.com"
] |
lu.he@jd.com
|
74b5822b17147284bdd1dcaf4544898227ae0830
|
9ba5fc81b5553cf568adc597180cc695c75eadbc
|
/app/views.py
|
3167e364070d945596f273c2057739b7bf9d65fc
|
[] |
no_license
|
mahmudgithub/html-css-pactics
|
85ab411fc7df877f82a8c27441f1469b7bcc0cf4
|
035723db2e8377004d5e223ee2c6a6f49449920a
|
refs/heads/master
| 2023-02-23T14:46:14.171347
| 2021-01-29T05:42:44
| 2021-01-29T05:42:44
| 332,635,153
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
from django.shortcuts import render
def one(request):
return render(request,'home.html')
|
[
"mahmudhossain838@gmail.com"
] |
mahmudhossain838@gmail.com
|
c118cbaee6f21d1833c8a1cacf802f5e8f5e8db6
|
dbbb144cc93f44e0f82b762af3fefe6fe3beb605
|
/Chatbotforwip/models.py
|
9ed77c7479879a596becd6d32e3a370b0baa7337
|
[] |
no_license
|
srijibrockz95/chatbotfor_zlsur
|
5f4d9a0807818d85b6ba70a27392fd6b1aab7413
|
e97b1522f2124290544450cd3813f592c4ba86c0
|
refs/heads/main
| 2023-04-19T08:54:43.311293
| 2021-05-12T13:06:47
| 2021-05-12T13:06:47
| 366,716,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,860
|
py
|
from . import db
from sqlalchemy.sql import func
class Login(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
userfname = db.Column(db.String(120), nullable=False)
useremail = db.Column(db.String(120), nullable=False, unique=True)
userid = db.Column(db.String(20), nullable=False, unique=True)
userrole = db.Column(db.String(50), nullable=False)
passwordhash = db.Column(db.String(255), nullable=False)
creationdatetime = db.Column(db.DateTime, nullable=False, server_default=func.now())
updationdatetime = db.Column(db.DateTime, nullable=False, server_default=func.now(), onupdate=func.now())
createdby = db.Column(db.String(20))
modifiedby = db.Column(db.String(20))
def __init__(self, userfname, useremail, userid, userrole, passwordhash, createdby):
self.userfname = userfname
self.useremail = useremail
self.userid = userid
self.userrole = userrole
self.passwordhash = passwordhash
self.createdby = createdby
def __repr__(self):
return '<Login %r>' % self.userfname
class QuestionAnswer(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
question_type = db.Column(db.String(80), nullable=False)
question = db.Column(db.String(240), nullable=False)
answer = db.Column(db.String(240))
relatedquesid = db.Column(db.Integer)
orderofdisp = db.Column(db.Integer)
creationdatetime = db.Column(db.DateTime, nullable=False, server_default=func.now())
updationdatetime = db.Column(db.DateTime, nullable=False, server_default=func.now(), onupdate=func.now())
createdby = db.Column(db.String(20))
modifiedby = db.Column(db.String(20))
def __init__(self, question_type, question, answer, relatedquesid, orderofdisp, createdby):
self.question_type = question_type
self.question = question
self.answer = answer
self.relatedquesid = relatedquesid
self.orderofdisp = orderofdisp
self.createdby = createdby
def __repr__(self):
return '<QuestionAnswer %r>' % self.question
class Booking(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
userid = db.Column(db.String(20), nullable=False)
bookingmessage = db.Column(db.String(300), nullable=False)
creationdatetime = db.Column(db.DateTime, nullable=False, server_default=func.now())
updationdatetime = db.Column(db.DateTime, nullable=False, server_default=func.now(), onupdate=func.now())
createdby = db.Column(db.String(20))
modifiedby = db.Column(db.String(20))
def __init__(self, userid, bookingmessage, createdby):
self.userid = userid
self.bookingmessage = bookingmessage
self.createdby = createdby
def __repr__(self):
return '<Booking %r>' % self.userid
|
[
"srijibb5991@gmail.com"
] |
srijibb5991@gmail.com
|
08c28acaad47d0e0a42968f0f313be2783764ae7
|
b793ceedf60fb87902bc005cc18bda0478ead7c7
|
/sunrise.py
|
957688fe6cf090485430e10317ae58235701988e
|
[] |
no_license
|
thermalbarker/octocam-timelapse
|
35f407bf2afa846ba897d9df663518aefe6b36d6
|
448097de97477914954e9f41d2769469e46da35e
|
refs/heads/master
| 2021-01-01T21:58:45.302985
| 2020-03-29T09:53:07
| 2020-03-29T09:53:07
| 239,359,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,698
|
py
|
#!/usr/bin/python
# Credits here: https://michelanders.blogspot.com/2010/12/calulating-sunrise-and-sunset-in-python.html
from math import cos,sin,acos,asin,tan
from math import degrees as deg, radians as rad
from datetime import date,datetime,time,tzinfo,timedelta
# Nicked from https://docs.python.org/2/library/datetime.html#tzinfo-objects
import time as _time
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
SECONDS_PER_DAY = 24 * 60 * 60
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
class sun:
"""
Calculate sunrise and sunset based on equations from NOAA
http://www.srrb.noaa.gov/highlights/sunrise/calcdetails.html
typical use, calculating the sunrise at the present day:
import datetime
import sunrise
s = sun(lat=49,long=3)
print('sunrise at ',s.sunrise(when=datetime.datetime.now())
"""
def __init__(self,lat=49.0,long=8.4): # default Amsterdam
self.lat=lat
self.long=long
def sunrise(self,when=None):
"""
return the time of sunrise as a datetime.time object
when is a datetime.datetime object. If none is given
a local time zone is assumed (including daylight saving
if present)
"""
if when is None : when = datetime.now(tz=LocalTimezone())
self.__preptime(when)
self.__calc()
return sun.__timefromdecimalday(self.sunrise_t)
def sunset(self,when=None):
if when is None : when = datetime.now(tz=LocalTimezone())
self.__preptime(when)
self.__calc()
return sun.__timefromdecimalday(self.sunset_t)
def solarnoon(self,when=None):
if when is None : when = datetime.now(tz=LocalTimezone())
self.__preptime(when)
self.__calc()
return sun.__timefromdecimalday(self.solarnoon_t)
@staticmethod
def __timefromdecimalday(day):
"""
returns a datetime.time object.
day is a decimal day between 0.0 and 1.0, e.g. noon = 0.5
"""
hours = 24.0*day
h = int(hours)
minutes= (hours-h)*60
m = int(minutes)
seconds= (minutes-m)*60
s = int(seconds)
return time(hour=h,minute=m,second=s)
def __preptime(self,when):
"""
Extract information in a suitable format from when,
a datetime.datetime object.
"""
# datetime days are numbered in the Gregorian calendar
# while the calculations from NOAA are distibuted as
# OpenOffice spreadsheets with days numbered from
# 1/1/1900. The difference are those numbers taken for
# 18/12/2010
self.day = when.toordinal()-(734124-40529)
t=when.time()
self.time= (t.hour + t.minute/60.0 + t.second/3600.0)/24.0
self.timezone=0
offset=when.utcoffset()
if not offset is None:
self.timezone=offset.seconds/3600.0
def __calc(self):
"""
Perform the actual calculations for sunrise, sunset and
a number of related quantities.
The results are stored in the instance variables
sunrise_t, sunset_t and solarnoon_t
"""
timezone = self.timezone # in hours, east is positive
longitude= self.long # in decimal degrees, east is positive
latitude = self.lat # in decimal degrees, north is positive
time = self.time # percentage past midnight, i.e. noon is 0.5
day = self.day # daynumber 1=1/1/1900
Jday =day+2415018.5+time-timezone/24 # Julian day
Jcent =(Jday-2451545)/36525 # Julian century
Manom = 357.52911+Jcent*(35999.05029-0.0001537*Jcent)
Mlong = 280.46646+Jcent*(36000.76983+Jcent*0.0003032)%360
Eccent = 0.016708634-Jcent*(0.000042037+0.0001537*Jcent)
Mobliq = 23+(26+((21.448-Jcent*(46.815+Jcent*(0.00059-Jcent*0.001813))))/60)/60
obliq = Mobliq+0.00256*cos(rad(125.04-1934.136*Jcent))
vary = tan(rad(obliq/2))*tan(rad(obliq/2))
Seqcent = sin(rad(Manom))*(1.914602-Jcent*(0.004817+0.000014*Jcent))+sin(rad(2*Manom))*(0.019993-0.000101*Jcent)+sin(rad(3*Manom))*0.000289
Struelong= Mlong+Seqcent
Sapplong = Struelong-0.00569-0.00478*sin(rad(125.04-1934.136*Jcent))
declination = deg(asin(sin(rad(obliq))*sin(rad(Sapplong))))
eqtime = 4*deg(vary*sin(2*rad(Mlong))-2*Eccent*sin(rad(Manom))+4*Eccent*vary*sin(rad(Manom))*cos(2*rad(Mlong))-0.5*vary*vary*sin(4*rad(Mlong))-1.25*Eccent*Eccent*sin(2*rad(Manom)))
hourangle= deg(acos(cos(rad(90.833))/(cos(rad(latitude))*cos(rad(declination)))-tan(rad(latitude))*tan(rad(declination))))
self.solarnoon_t=(720-4*longitude-eqtime+timezone*60)/1440
self.sunrise_t =self.solarnoon_t-hourangle*4/1440
self.sunset_t =self.solarnoon_t+hourangle*4/1440
def delta_minutes(difference):
return (difference.days * SECONDS_PER_DAY + difference.seconds) / 60
def delta_minutes_now(t):
return delta_minutes(now - t)
if __name__ == "__main__":
s=sun()
print(datetime.today())
now = datetime.now()
beforesunrise = datetime.combine(date.today(), s.sunrise()) - HOUR
aftersunset = datetime.combine(date.today(), s.sunset()) + HOUR
midday = datetime.combine(date.today(), s.solarnoon())
print(max(0, delta_minutes_now(beforesunrise)))
print(max(0, delta_minutes_now(aftersunset)))
print(max(0, delta_minutes_now(midday)))
|
[
"thermalbarker@gmail.com"
] |
thermalbarker@gmail.com
|
459b3b92cf722eddf20d6aac667403bdbfa38dff
|
89c878102fa84dfb9d02ed6b7f79199b97035257
|
/net/tcp_socket_server.py
|
173d6a33347075d139e7c57413e0aae704d7ddb0
|
[
"Apache-2.0"
] |
permissive
|
chainren/python-learn
|
5d9b4b77c12dfbfc7e9ae805a783576196494309
|
5e48e96c4bb212806b9ae0954fdb368abdcf9ba3
|
refs/heads/master
| 2022-11-30T13:41:25.993871
| 2020-06-10T13:46:50
| 2020-06-10T13:46:50
| 147,663,348
| 1
| 0
|
Apache-2.0
| 2022-11-22T02:58:25
| 2018-09-06T11:26:26
|
Python
|
UTF-8
|
Python
| false
| false
| 826
|
py
|
# -- coding: utf-8 --
import socket
import threading
import time
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 监听端口:
s.bind(('127.0.0.1', 8899))
# 开始监听端口
s.listen(5)
print('Waiting for connection...')
def handler(sock, addr):
print('Accept new connection from %s:%s...' % addr)
sock.send(b'Welcome!')
while True:
data = sock.recv(1024)
time.sleep(1)
if not data or data.decode('utf-8') == 'exit':
break
msg = b'Hello, %s' % data.decode('utf-8').encode('utf-8')
sock.send(msg)
sock.close()
print('Connection from %s:%s closed.' % addr)
while True:
# 接受一个新连接:
sock, addr = s.accept()
# w创建新线程处理连接
t = threading.Thread(target=handler, args=(sock, addr))
t.start()
|
[
"chenrengui@xiangshang360.com"
] |
chenrengui@xiangshang360.com
|
c8ada1d71f2f47e3580cec80fd31c1e946ed0204
|
6ddf8e93ee13fa92e4bca3d924382d6bd8b55336
|
/코테 with Python/그리디/Q.04_만들 수 없는 금액.py
|
406718c37eb1628b42388192c390098d19a69559
|
[] |
no_license
|
gkcksrbs/Coding_Test_With_Python
|
8b5df49240458bdda3e1c4de6eb302052e0a1546
|
234e85a2b55a6c611618fb63a5c18873dd65c90d
|
refs/heads/master
| 2023-02-26T15:29:25.789368
| 2021-02-03T06:08:18
| 2021-02-03T06:08:18
| 291,658,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
# 비효율
# from itertools import combinations
#
# N = int(input())
# coin = list(map(int, input().split()))
# lst = []
#
# for i in range(1, N+1):
# for j in combinations(coin, i):
# lst.append(sum(j))
#
# lst = list(set(lst))
# last = lst[len(lst)-1]
# Min = 0
#
# for i in range(1, last+1):
# if i not in lst:
# Min = i
#
# if min == 0:
# print(last+1)
# else:
# print(Min)
import sys
# 입력 빠르게 하기 위한 변수
input = sys.stdin.readline
# 동전의 개수 입력
n = int(input())
# 각 동전의 화폐 단위 입력
data = list(map(int, input().split()))
# 결과값
target = 1
# 모든 동전 탐색
for x in data:
if target < x:
break
target += x
# 결과값 출력
print(target)
|
[
"70510732+gkcksrbs@users.noreply.github.com"
] |
70510732+gkcksrbs@users.noreply.github.com
|
ecf2c8f7150e201c0a2133d4dc6498ba28c0b1d9
|
dff43b744e22d8d5c6e0e597f4a313f4648e77bd
|
/lcu_connectorpy/__init__.py
|
367210b786063f1edbc5b0f7fc72c4c40ad614b3
|
[
"MIT"
] |
permissive
|
intendednull/lcu_connectorpy
|
57b2bb87e42871f44206a3d607714c7b2212d9e0
|
deb1050fe3a3fdc513d63d2ab08ab92bc86ac2f9
|
refs/heads/master
| 2021-10-28T13:03:38.215911
| 2019-04-23T20:57:07
| 2019-04-23T20:57:07
| 181,576,028
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 56
|
py
|
from .connect import Connector
__all__ = ['Connector']
|
[
"noah@coronasoftware.net"
] |
noah@coronasoftware.net
|
042003a699472ca85e5f334a84fcf5165eb2ec28
|
beebc5ff44407f3f3a4c1463cd09f0917dbe5391
|
/pytype/tools/arg_parser.py
|
cbfe85e340114e4d98881575cac90b2db29b3cb0
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
mraarif/pytype
|
4f190cb2591896133761295f3d84d80602dffb58
|
546e8b8114c9af54a409985a036398c4f6955677
|
refs/heads/master
| 2023-01-23T09:48:06.239353
| 2020-12-02T06:08:27
| 2020-12-02T06:08:27
| 303,069,915
| 1
| 0
|
NOASSERTION
| 2020-12-02T06:08:28
| 2020-10-11T07:53:55
| null |
UTF-8
|
Python
| false
| false
| 3,677
|
py
|
"""Argument parsing for tools that pass args on to pytype_single."""
import argparse
from pytype import config as pytype_config
from pytype import datatypes
from pytype import utils as pytype_utils
def string_to_bool(s):
return s == "True" if s in ("True", "False") else s
def convert_string(s):
s = s.replace("\n", "")
try:
return int(s)
except ValueError:
return string_to_bool(s)
class Parser:
"""Parser that integrates tool and pytype-single args."""
def __init__(self, parser, pytype_single_args):
"""Initialize a parser.
Args:
parser: An argparse.ArgumentParser or compatible object
pytype_single_args: Iterable of args that will be passed to pytype_single
"""
self.parser = parser
self.pytype_single_args = pytype_single_args
def create_initial_args(self, keys):
"""Creates the initial set of args.
Args:
keys: A list of keys to create args from
Returns:
An argparse.Namespace.
"""
return argparse.Namespace(**{k: None for k in keys})
def parse_args(self, argv):
"""Parses argv.
Args:
argv: sys.argv[1:]
Returns:
An argparse.Namespace.
"""
args = self.create_initial_args(self.pytype_single_args)
self.parser.parse_args(argv, args)
self.postprocess(args)
return args
def postprocess(self, args, from_strings=False):
"""Postprocesses the subset of pytype_single_args that appear in args.
Args:
args: an argparse.Namespace.
from_strings: Whether the args are all strings. If so, we'll do our best
to convert them to the right types.
"""
names = set()
for k in self.pytype_single_args:
if hasattr(args, k):
names.add(k)
if from_strings:
setattr(args, k, convert_string(getattr(args, k)))
pytype_config.Postprocessor(names, args).process()
def get_pytype_kwargs(self, args):
"""Return a set of kwargs to pass to pytype.config.Options.
Args:
args: an argparse.Namespace.
Returns:
A dict of kwargs with pytype_single args as keys.
"""
return {k: getattr(args, k) for k in self.pytype_single_args}
def add_pytype_and_parse(parser, argv):
"""Add basic pytype options and parse args.
Useful to generate a quick CLI for a library.
Args:
parser: An argparse.ArgumentParser
argv: Raw command line args, typically sys.argv[1:]
Returns:
A tuple of (
parsed_args: argparse.Namespace,
pytype_options: pytype.config.Options)
"""
# Add default --debug and input arguments.
parser.add_argument("--debug", action="store_true",
dest="debug", default=None,
help="Display debug output.")
parser.add_argument("inputs", metavar="input", nargs=1,
help="A .py file to index")
# Add options from pytype-single.
wrapper = datatypes.ParserWrapper(parser)
pytype_config.add_basic_options(wrapper)
parser = Parser(parser, wrapper.actions)
# Parse argv
args = parser.parse_args(argv)
cli_args = args.inputs.copy()
# Make sure we have a valid set of CLI options to pytype
## If we are passed an imports map we should look for pickled files as well.
if getattr(args, "imports_info", None):
cli_args += ["--imports_info", args.imports_info,
"--use-pickled-files"]
## We need to set this when creating Options (b/128032570)
if args.python_version:
cli_args += ["-V", pytype_utils.format_version(args.python_version)]
pytype_options = pytype_config.Options(cli_args, command_line=True)
pytype_options.tweak(**parser.get_pytype_kwargs(args))
return (args, pytype_options)
|
[
"rechen@google.com"
] |
rechen@google.com
|
dbed13deeec7d3532a35f6642b2175ad1adafab0
|
414517f9cda31b307f6221c101c90481cdf6f294
|
/testApp/accounts/forms.py
|
dd3c474402bac357e19086062888c0cd3f68e584
|
[] |
no_license
|
jutkarsh079/Django-TestApp
|
c63443b2d56f6eb7a8654cb8768b73a89c63e224
|
3c46943015baf9f8fca142bfd5c4f6642ddaee5c
|
refs/heads/master
| 2022-10-10T04:33:19.770057
| 2020-06-10T05:49:30
| 2020-06-10T05:49:30
| 271,190,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import get_user_model
class UserCreateform(UserCreationForm):
class Meta:
fields = ("username", "email", "password1", "password2")
model = get_user_model()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["username"].label = "Display Name"
self.fields["email"].label = "Email address"
|
[
"jainutkarsh@jains-MacBook-Pro.local"
] |
jainutkarsh@jains-MacBook-Pro.local
|
8d4d1a9e13927dcd6cc75f4d01265bdd8540bb3f
|
ee5474632d5a010fb2308c7c21af8aeb91bdb11b
|
/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py
|
179a022764a7ef306027821175add5d53ee14c24
|
[
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
qiang-zai/azure-sdk-for-python
|
b7ef8b90cee36c2aa92e5acae8152fecda1bab7b
|
e8ccd8bcbbab43c9b0f2f6323775c80a96cefeb0
|
refs/heads/master
| 2021-05-21T01:00:13.377148
| 2020-04-02T00:46:38
| 2020-04-02T00:46:38
| 252,476,851
| 1
| 0
| null | 2020-04-02T14:21:42
| 2020-04-02T14:21:42
| null |
UTF-8
|
Python
| false
| false
| 21,565
|
py
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from typing import ( # pylint: disable=unused-import
Union,
Optional,
Any,
List,
Dict,
TYPE_CHECKING
)
from azure.core.tracing.decorator_async import distributed_trace_async
from .._generated.models import TextAnalyticsErrorException
from .._generated.aio._text_analytics_client_async import TextAnalyticsClient as TextAnalytics
from ._base_client_async import AsyncTextAnalyticsClientBase
from .._request_handlers import _validate_batch_input
from .._response_handlers import (
process_batch_error,
entities_result,
linked_entities_result,
key_phrases_result,
sentiment_result,
language_result
)
from .._models import (
DetectLanguageInput,
TextDocumentInput,
DetectLanguageResult,
RecognizeEntitiesResult,
RecognizeLinkedEntitiesResult,
ExtractKeyPhrasesResult,
AnalyzeSentimentResult,
DocumentError,
)
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
from azure.core.credentials import AzureKeyCredential
class TextAnalyticsClient(AsyncTextAnalyticsClientBase):
"""The Text Analytics API is a suite of text analytics web services built with best-in-class
Microsoft machine learning algorithms. The API can be used to analyze unstructured text for
tasks such as sentiment analysis, key phrase extraction, and language detection. No training data
is needed to use this API - just bring your text data. This API uses advanced natural language
processing techniques to deliver best in class predictions.
Further documentation can be found in
https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview
:param str endpoint: Supported Cognitive Services or Text Analytics resource
endpoints (protocol and hostname, for example: https://westus2.api.cognitive.microsoft.com).
:param credential: Credentials needed for the client to connect to Azure.
This can be the an instance of AzureKeyCredential if using a
cognitive services/text analytics API key or a token credential
from :mod:`azure.identity`.
:type credential: :class:`~azure.core.credentials.AzureKeyCredential`
or :class:`~azure.core.credentials_async.AsyncTokenCredential`
:keyword str default_country_hint: Sets the default country_hint to use for all operations.
Defaults to "US". If you don't want to use a country hint, pass the string "none".
:keyword str default_language: Sets the default language to use for all operations.
Defaults to "en".
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_authentication_async.py
:start-after: [START create_ta_client_with_key_async]
:end-before: [END create_ta_client_with_key_async]
:language: python
:dedent: 8
:caption: Creating the TextAnalyticsClient with endpoint and API key.
.. literalinclude:: ../samples/async_samples/sample_authentication_async.py
:start-after: [START create_ta_client_with_aad_async]
:end-before: [END create_ta_client_with_aad_async]
:language: python
:dedent: 8
:caption: Creating the TextAnalyticsClient with endpoint and token credential from Azure Active Directory.
"""
def __init__( # type: ignore
self,
endpoint: str,
credential: Union["AzureKeyCredential", "AsyncTokenCredential"],
**kwargs: Any
) -> None:
super(TextAnalyticsClient, self).__init__(credential=credential, **kwargs)
self._client = TextAnalytics(
endpoint=endpoint, credentials=credential, pipeline=self._pipeline
)
self._default_language = kwargs.pop("default_language", "en")
self._default_country_hint = kwargs.pop("default_country_hint", "US")
@distributed_trace_async
async def detect_language( # type: ignore
self,
documents: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[DetectLanguageResult, DocumentError]]:
"""Detects Language for a batch of documents.
Returns the detected language and a numeric score between zero and
one. Scores close to one indicate 100% certainty that the identified
language is true. See https://aka.ms/talangs for the list of enabled languages.
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and country_hint on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.DetectLanguageInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.DetectLanguageInput`, like
`{"id": "1", "country_hint": "us", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.DetectLanguageInput]
:keyword str country_hint: A country hint for the entire batch. Accepts two
letter country codes specified by ISO 3166-1 alpha-2. Per-document
country hints will take precedence over whole batch hints. Defaults to
"US". If you don't want to use a country hint, pass the string "none".
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document
level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.DetectLanguageResult`
and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
were passed in.
:rtype: list[~azure.ai.textanalytics.DetectLanguageResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_detect_language_async.py
:start-after: [START batch_detect_language_async]
:end-before: [END batch_detect_language_async]
:language: python
:dedent: 8
:caption: Detecting language in a batch of documents.
"""
country_hint_arg = kwargs.pop("country_hint", None)
country_hint = country_hint_arg if country_hint_arg is not None else self._default_country_hint
docs = _validate_batch_input(documents, "country_hint", country_hint)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.languages(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=language_result,
**kwargs
)
except TextAnalyticsErrorException as error:
process_batch_error(error)
@distributed_trace_async
async def recognize_entities( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[RecognizeEntitiesResult, DocumentError]]:
"""Entity Recognition for a batch of documents.
Identifies and categorizes entities in your text as people, places,
organizations, date/time, quantities, percentages, currencies, and more.
For the list of supported entity types, check: https://aka.ms/taner
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.RecognizeEntitiesResult` and
:class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
passed in.
:rtype: list[~azure.ai.textanalytics.RecognizeEntitiesResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_recognize_entities_async.py
:start-after: [START batch_recognize_entities_async]
:end-before: [END batch_recognize_entities_async]
:language: python
:dedent: 8
:caption: Recognize entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.entities_recognition_general(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=entities_result,
**kwargs
)
except TextAnalyticsErrorException as error:
process_batch_error(error)
@distributed_trace_async
async def recognize_linked_entities( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[RecognizeLinkedEntitiesResult, DocumentError]]:
"""Recognize linked entities from a well-known knowledge base for a batch of documents.
Identifies and disambiguates the identity of each entity found in text (for example,
determining whether an occurrence of the word Mars refers to the planet, or to the
Roman god of war). Recognized entities are associated with URLs to a well-known
knowledge base, like Wikipedia.
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.RecognizeLinkedEntitiesResult`
and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
were passed in.
:rtype: list[~azure.ai.textanalytics.RecognizeLinkedEntitiesResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_recognize_linked_entities_async.py
:start-after: [START batch_recognize_linked_entities_async]
:end-before: [END batch_recognize_linked_entities_async]
:language: python
:dedent: 8
:caption: Recognize linked entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.entities_linking(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=linked_entities_result,
**kwargs
)
except TextAnalyticsErrorException as error:
process_batch_error(error)
@distributed_trace_async
async def extract_key_phrases( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[ExtractKeyPhrasesResult, DocumentError]]:
"""Extract Key Phrases from a batch of documents.
Returns a list of strings denoting the key phrases in the input
text. For example, for the input text "The food was delicious and there
were wonderful staff", the API returns the main talking points: "food"
and "wonderful staff"
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.ExtractKeyPhrasesResult` and
:class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
passed in.
:rtype: list[~azure.ai.textanalytics.ExtractKeyPhrasesResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_extract_key_phrases_async.py
:start-after: [START batch_extract_key_phrases_async]
:end-before: [END batch_extract_key_phrases_async]
:language: python
:dedent: 8
:caption: Extract the key phrases in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.key_phrases(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=key_phrases_result,
**kwargs
)
except TextAnalyticsErrorException as error:
process_batch_error(error)
@distributed_trace_async
async def analyze_sentiment( # type: ignore
self,
documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
**kwargs: Any
) -> List[Union[AnalyzeSentimentResult, DocumentError]]:
"""Analyze sentiment for a batch of documents.
Returns a sentiment prediction, as well as sentiment scores for
each sentiment class (Positive, Negative, and Neutral) for the document
and each sentence within it.
See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
for document length limits, maximum batch size, and supported text encoding.
:param documents: The set of documents to process as part of this batch.
If you wish to specify the ID and language on a per-item basis you must
use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
`{"id": "1", "language": "en", "text": "hello world"}`.
:type documents:
list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
:keyword str language: The 2 letter ISO 639-1 representation of language for the
entire batch. For example, use "en" for English; "es" for Spanish etc.
If not set, uses "en" for English as default. Per-document language will
take precedence over whole batch language. See https://aka.ms/talangs for
supported languages in Text Analytics API.
:keyword str model_version: This value indicates which model will
be used for scoring, e.g. "latest", "2019-10-01". If a model-version
is not specified, the API will default to the latest, non-preview version.
:keyword bool show_stats: If set to true, response will contain document level statistics.
:return: The combined list of :class:`~azure.ai.textanalytics.AnalyzeSentimentResult` and
:class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
passed in.
:rtype: list[~azure.ai.textanalytics.AnalyzeSentimentResult,
~azure.ai.textanalytics.DocumentError]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_analyze_sentiment_async.py
:start-after: [START batch_analyze_sentiment_async]
:end-before: [END batch_analyze_sentiment_async]
:language: python
:dedent: 8
:caption: Analyze sentiment in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
language = language_arg if language_arg is not None else self._default_language
docs = _validate_batch_input(documents, "language", language)
model_version = kwargs.pop("model_version", None)
show_stats = kwargs.pop("show_stats", False)
try:
return await self._client.sentiment(
documents=docs,
model_version=model_version,
show_stats=show_stats,
cls=sentiment_result,
**kwargs
)
except TextAnalyticsErrorException as error:
process_batch_error(error)
|
[
"noreply@github.com"
] |
qiang-zai.noreply@github.com
|
e236794a2ce83281e678cada00bcf18e030c4fb6
|
300e3e1ceec5940adf54a79f678c2a1a9e66aeb4
|
/cognitive radio/radio.py
|
b84a99259a2077c7d38e76ad599a7e3ee69caace
|
[] |
no_license
|
icopavan/cognitive-radio-3
|
ff136c5361146c4c113b68d5cc73a6030786a24f
|
d3cbb8b60eacab88bdfa05be483d0aee279ec5f4
|
refs/heads/master
| 2021-01-18T14:58:15.809314
| 2014-05-05T20:13:54
| 2014-05-05T20:13:54
| 33,601,393
| 1
| 0
| null | 2015-04-08T10:57:56
| 2015-04-08T10:57:56
| null |
UTF-8
|
Python
| false
| false
| 5,263
|
py
|
from __future__ import division
import numpy as np
from rtl import *
from paudio import *
from FSK import *
from QAM import *
from OFDM import *
from syncronization import *
from receiver import *
from fm_demodulator import Demodulator
from helpers import *
# This file tests various parts of my project over an actual RF channel.
#
# 1) I will be using UHF experimental channels. Every broadcast will be
# preceded by morse code with my call sign and a short explanation of
# the transmission.
def genPTT(plen,zlen,fs):
Nz = np.floor(zlen*fs)
Nt = np.floor(plen*fs)
pttsig = np.zeros(Nz)
t=np.r_[0.0:Nt]/fs
pttsig[:Nt] = 0.5*np.sin(2*np.pi*t*2000)
return pttsig
def text2Morse(text,fc,fs,dt):
CODE = {'A': '.-', 'B': '-...', 'C': '-.-.',
'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..',
'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---',
'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..',
'0': '-----', '1': '.----', '2': '..---',
'3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..',
'9': '----.',
' ': ' ', "'": '.----.', '(': '-.--.-', ')': '-.--.-',
',': '--..--', '-': '-....-', '.': '.-.-.-',
'/': '-..-.', ':': '---...', ';': '-.-.-.',
'?': '..--..', '_': '..--.-'
}
Ndot= 1.0*fs*dt
Ndah = 3*Ndot
sdot = np.sin(2*np.pi*fc*np.r_[0.0:Ndot]/fs)
sdah = np.sin(2*np.pi*fc*np.r_[0.0:Ndah]/fs)
# convert to dit dah
mrs = ""
for char in text:
mrs = mrs + CODE[char.upper()] + "*"
sig = np.zeros(1)
for char in mrs:
if char == " ":
sig = np.concatenate((sig,np.zeros(Ndot*7)))
if char == "*":
sig = np.concatenate((sig,np.zeros(Ndot*3)))
if char == ".":
sig = np.concatenate((sig,sdot,np.zeros(Ndot)))
if char == "-":
sig = np.concatenate((sig,sdah,np.zeros(Ndot)))
return sig
def tx_intro(fs):
msg = 'KK6KLA: Experiemental use.'
tx = genPTT(.5, 1, fs)
tx = np.append(tx, text2Morse(msg, 880, fs, .025))
tx = np.append(tx, np.zeros(fs*2)) # add silence for 5 seconds before starting
return tx
def tx_fsk(data, fs, f0, f1, n, symbol_length, sync_width, sync_pulse, start_stop_pulse):
data_size = data.size
symbol_size = fs*symbol_length
sync_size = sync_pulse.size
num_syncs = 1 + data_size // sync_width
f_list = np.linspace(f0, f1, n)
signal = np.zeros(data_size*symbol_size + sync_size*num_syncs)
j = 0
i = 0
while (i < data_size):
signal[i*symbol_size + j*sync_size : i*symbol_size + (j+1)*sync_size] = sync_pulse
j+=1
signal[i*symbol_size + j*sync_size : (i+sync_width)*symbol_size + j*sync_size] = modulateFSK(data[i:i+sync_width], f_list, fs, symbol_length)
i+=sync_width
# the terminating sync pulse
signal[i*symbol_size + j*sync_size : i*symbol_size + (j+1)*sync_size] = sync_pulse
# start-stop pulses
ss_pulse = start_stop_pulse
signal[:ss_pulse.size] = ss_pulse
signal[signal.size-ss_pulse.size:] = ss_pulse
signal = np.append(np.zeros(fs), signal)
signal = np.append(signal, np.zeros(fs))
return signal
def main():
fs_out = 44100.0
fs_in = fs_out #48000.0
channel = 432.079e6
sync_pulse = genSyncPulse2(4000, 1200,t=.02) # make sure t*fs < chunk size (1024)
start_stop_pulse = genSyncPulse2(2400, 1200, t=.02)
radioQ = Queue.Queue()
Qin = Queue.Queue()
Qout = Queue.Queue()
sdr = rtlsdr.RtlSdr()
initRtlSdr(sdr, channel, 2.4e5, 32)
getSamplesAsync(sdr, radioQ)
tx_data = np.random.randint(0, 2, 1000)
transmission = np.append(tx_intro(fs_out), tx_fsk(tx_data, fs_out, 1200, 2400, 2, .01, 200, sync_pulse, start_stop_pulse))
#transmission = tx_fsk(data, fs_out, 1200, 2400, 2, .005, 200, sync_pulse, start_stop_pulse)
p = pyaudio.PyAudio()
din, dout, dusbin, dusbout = audioDevNumbers(p)
tx_t = threading.Thread(target=play_audio, args = (Qout, p, fs_out, dusbout))
tx_t.start()
Qout.put(transmission)
play_t = threading.Thread(target = play_audio, args = (Qin, p, fs_in, dout))
play_t.start()
dem = Demodulator(2.4e5,16000.0, fs_in, 4000.0)
dec = Decoder(1200, 2400, 2, .01, sync_pulse, start_stop_pulse, 200, fs_in)
data = radioQ.get()
audio = dem.fm_demodulate(data)
Qin.put(audio) # play the incomming signal
while(True):
rx_signal = radioQ.get()
audio = dem.fm_demodulate(rx_signal)
#audio *= 2
Qin.put(audio) # play the incomming signal
if(dec.process(audio)):
break
rx_data = dec.rec_data
print tx_data[:20]
print rx_data[:20]
print np.sum(np.equal(tx_data, rx_data))/1000
for i in range(400):
if (tx_data[i] != rx_data[i]):
print(i),
p.terminate()
sdr.close()
if __name__ == '__main__':
main()
|
[
"peragwin@gmail.com"
] |
peragwin@gmail.com
|
7094a051df48048e91ca612f6c79782f53113d25
|
903c2f1cb06091f8199bf63d74111fe79dac6f8d
|
/packages&modules/Package2/module_script2.py
|
0686bb4eced75b4c7f63e691fa6ee6557e230770
|
[] |
no_license
|
praneethpeddi/Python-Assignments
|
c8b4d8fa58244b033bb4cdf36f55e42b7e7cf6fc
|
0a224aeae40a4cc55a5a7259ff2360fc61841abd
|
refs/heads/master
| 2023-01-19T07:58:59.144309
| 2020-11-19T01:46:45
| 2020-11-19T01:46:45
| 299,494,119
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
try:
import module2
print("In module 2: import success")
module2.c()
print("function c is executed")
module2.d()
print("function d is executed")
except Exception as var:
print(var)
|
[
"praneethpeddi1995@gmail.com"
] |
praneethpeddi1995@gmail.com
|
22998f536894be455f9fc54583b6c4a9508ff0a0
|
b74aeebd25b39532b51457a1a1e7bf369a9f7ef2
|
/fcdjango/fcdjango_venv/bin/django-admin
|
99e65d705c4233d436acc8ab83178ea13b90bc4e
|
[
"MIT"
] |
permissive
|
djangojeng-e/djangoproejcts
|
c16bffe98cd9b05805ecaa6cf8c2076155235fc9
|
1efc3bc04a4a1bef039c906584cfecf0231c177f
|
refs/heads/master
| 2022-12-22T03:27:58.695116
| 2020-10-16T00:10:57
| 2020-10-16T00:10:57
| 221,873,289
| 0
| 1
|
MIT
| 2022-12-19T03:11:29
| 2019-11-15T07:57:41
|
Python
|
UTF-8
|
Python
| false
| false
| 324
|
#!/home/dkang/documents/dev/djangoproejcts/fcdjango/fcdjango_venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"headfat1218@gmail.com"
] |
headfat1218@gmail.com
|
|
b3cf6c55dcab70f99e1cfda480f07f4b9321806d
|
ec7243cb3d117bf86321707e8bbf55e4389fc66d
|
/source/scrape.py
|
5e818c4c544a3909641173b0a469b71710946b89
|
[] |
no_license
|
datastoreking/number_scraping_lotterysite
|
4a40f82773719d858840ac22f9c2b5144bc75169
|
bc764013bf31c9ee6a57bdcb76b5c3008a2034de
|
refs/heads/main
| 2023-09-02T06:25:44.820165
| 2021-11-01T06:39:20
| 2021-11-01T06:39:20
| 423,362,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,667
|
py
|
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
import csv
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
options.add_argument("--ignore-certificate-errors-spki-list")
driver = webdriver.Chrome("./chromedriver",chrome_options=options)# get chromeDriver
driver.set_window_size(960, 640)
total_numbers_group = []
left_numbers_group = []
driver.get("https://www.fdj.fr/jeux-de-tirage/amigo/resultats/")# set url
try:
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.CLASS_NAME, "swiper-wrapper")))
print('waiting done')
blocks = driver.find_elements(By.CLASS_NAME, 'result-amigo_page')
for block_index in range(len(blocks)):
row_contents = blocks[block_index].find_elements(By.CLASS_NAME,'result-amigo_content');
for row_index in range(len(row_contents)):
data_content = row_contents[row_index].find_elements(By.CLASS_NAME, 'result-amigo_item')
left_content = data_content[1].find_elements(By.CLASS_NAME, 'numbers-item')
line_left_number_list = []
total_numbers_list = []
for left_content_index in range(len(left_content)):
left_numbers = left_content[left_content_index].find_element(By.CLASS_NAME, 'numbers-item_num').get_attribute('innerHTML');
line_left_number_list.append(int(left_numbers))
total_numbers_list.append(int(left_numbers))
right_content = data_content[2].find_elements(By.CLASS_NAME, 'numbers-item')
for right_content_index in range(len(right_content)):
right_numbers = right_content[right_content_index].find_element(By.CLASS_NAME, 'numbers-item_num').get_attribute('innerHTML');
total_numbers_list.append(int(right_numbers))
total_numbers_list.sort()
line_left_number_list.sort()
print(total_numbers_list)
left_numbers_group.insert(0, line_left_number_list)
total_numbers_group.insert(0, total_numbers_list)
with open('amigo.csv', 'w') as total:
write = csv.writer(total)
write.writerows(total_numbers_group)
with open('amigo_BLUE.csv', 'w') as blue:
write = csv.writer(blue)
write.writerows(left_numbers_group)
except TimeoutException:
print('network error')
driver.close()
|
[
"vladislavcukanov@gmail.com"
] |
vladislavcukanov@gmail.com
|
4a28229ccac1e54b1f8cb5515fe964ea7f8983da
|
f24f9284998e0487de34218e510961d5bbd5547b
|
/setting.py
|
8cdb43afb467d02d767d68a88033db3247c9ec28
|
[] |
no_license
|
HZCKen/plane_game
|
21780d8ae43bcfd6d70b1e43182c6f604ae1e155
|
ee0a0e5cc49602f8ea62a84c7760d6ad6528efd7
|
refs/heads/master
| 2020-03-17T17:26:22.809258
| 2018-07-09T07:53:26
| 2018-07-09T07:53:26
| 133,788,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
class Setting(object):
def __init__(self):
self.screen_width = 900
self.screen_height = 600
self.backgrounColor = (230, 230, 230)
self.ship_speed_factor = 1.5
self.ship_limit = 3
self.bullet_speed_factor = 3
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60, 60, 60
self.bullets_allowed = 3
self.alien_speed_factor = 1
self.flect_drop_speed = 50
# 方向 1为右移 -1 为左移
self.fleet_direction = 1
|
[
"13826457912@163.com"
] |
13826457912@163.com
|
744b012e97e4bc5c6f153076f46b9fef6a0756a6
|
b59b44c83c86aac8ebd971c996e2f95486a39cc3
|
/util/arp.py
|
b93c86d36a2df02fc347a8559d863126932e8acd
|
[] |
no_license
|
angelhack-conectus/conectus-user-backend
|
c99d48b94331f250086689518001cd73a3311b51
|
fa56fe826c1283313e6670788db7dfce38d75835
|
refs/heads/master
| 2020-05-30T14:44:27.509078
| 2019-06-04T06:50:16
| 2019-06-04T06:50:16
| 189,798,654
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
import subprocess
import re
import asyncio
mac_address = re.compile(
rb'[1234567890abcdf]{2}-[1234567890abcdf]{2}-[1234567890abcdf]{2}-[1234567890abcdf]{2}-[1234567890abcdf]{2}-[1234567890abcdf]{2}')
async def get_mac_address(ip):
p = await asyncio.create_subprocess_exec(
'arp', '-a', ip,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE, )
stdout, stderr = await p.communicate()
return mac_address.search(stdout).group(0).decode('charmap')
async def get_mac_address_in_thread(ip):
def _arp_request():
p = subprocess.run(['arp', '-a', ip], stdout=subprocess.PIPE)
return p.stdout
loop = asyncio.get_event_loop()
out = await loop.run_in_executor(None, _arp_request)
try:
return mac_address.search(out).group(0).decode('charmap')
except:
return '00-00-00-00-00-00'
if __name__ == '__main__':
_loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(_loop)
# _loop = asyncio.get_event_loop()
print(_loop.run_until_complete(get_mac_address_in_thread('192.168.0.92')))
|
[
"2_minchul@naver.com"
] |
2_minchul@naver.com
|
56003e100cddd599f5959ab7d12a3d33fd7950b4
|
70d081ab363373aafd60f649d4b216110a5659c9
|
/gpustat/util_test.py
|
cd18d39db2779c7cd2b0f00a77c0b9df74dc51ca
|
[
"MIT"
] |
permissive
|
wookayin/gpustat
|
37f9589f657920402fe37d5baaea93e7241ccd0d
|
e32e3f2cd7d1b08a08fa235efe1f69bc3d769171
|
refs/heads/master
| 2023-08-30T18:55:34.490915
| 2023-08-22T21:51:56
| 2023-08-22T21:52:11
| 56,966,563
| 3,623
| 290
|
MIT
| 2023-09-06T04:23:58
| 2016-04-24T10:46:43
|
Python
|
UTF-8
|
Python
| false
| false
| 771
|
py
|
import sys
import pytest
from gpustat import util
def test_safecall():
def _success():
return 42
def _error():
raise FileNotFoundError("oops")
assert util.safecall(_success, error_value=None) == 42
assert util.safecall(_error, error_value=-1) == -1
with pytest.raises(FileNotFoundError):
# not catched because exc_types does not match
assert util.safecall(_error, exc_types=ValueError, error_value=-1)
assert util.safecall(_error, error_value=-1,
exc_types=FileNotFoundError) == -1
assert util.safecall(_error, error_value=-1,
exc_types=(FileNotFoundError, OSError)) == -1
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
|
[
"wookayin@gmail.com"
] |
wookayin@gmail.com
|
2e2ad4e2b61111feb6acf532edbbb405d8fe638b
|
a088e9614ac6041fcf30d06e33b53529d3be395f
|
/05函数加强/07-匿名函数-lambda参数之默认参数.py
|
61860ef30af5292af33ef87a551ccc22b5ddcc98
|
[] |
no_license
|
yeshengwei/PythonLearning
|
69dd3f6168b66232de186bd459a7fb6f6d119893
|
97f4bffbe0c58fed708a63d21f73a5967359a78f
|
refs/heads/master
| 2020-08-13T14:38:34.802900
| 2019-11-22T07:24:03
| 2019-11-22T07:24:03
| 214,985,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
func1 = lambda x, y, z=100: x + y + z
print(func1(1, 10, 1000))
|
[
"yeshengwei@live.com"
] |
yeshengwei@live.com
|
85a43c44139b7e923b000451d7e3a74f01635743
|
2c580e0b03d5d87bf09a4a46c160f13233163772
|
/tests/test_backends.py
|
6c546612bed32db104e21da8366634e8fc701dbc
|
[
"MIT"
] |
permissive
|
greglever/django_microsoft_auth
|
cefea785dbc7c53bf57cae164bdc913c49476086
|
25ac0195ac7a052034359211b70c770c905b549f
|
refs/heads/master
| 2020-04-03T20:40:50.217761
| 2018-10-30T10:07:56
| 2018-10-30T10:07:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,350
|
py
|
from unittest.mock import Mock, patch
from django.contrib.auth import authenticate, get_user_model
from django.test import RequestFactory, override_settings
from microsoft_auth.models import MicrosoftAccount, XboxLiveAccount
from microsoft_auth.conf import LOGIN_TYPE_XBL
from . import TestCase
CODE = 'test_code'
TOKEN = {'access_token': 'test_token', 'scope': ['test']}
XBOX_TOKEN = {'Token': 'test_token'}
EMAIL = 'some.email@example.com'
FIRST = 'Test'
LAST = 'User'
MISSING_ID = 'some_missing_id'
GAMERTAG = 'Some Gamertag'
@override_settings(
AUTHENTICATION_BACKENDS=[
'microsoft_auth.backends.MicrosoftAuthenticationBackend',
'django.contrib.auth.backends.ModelBackend'
],
)
class MicrosoftBackendsTests(TestCase):
def setUp(self):
User = get_user_model()
self.factory = RequestFactory()
self.request = self.factory.get('/')
self.linked_account = MicrosoftAccount.objects.create(
microsoft_id='test_id'
)
self.linked_account.user = User.objects.create(username='user1')
self.linked_account.save()
self.unlinked_account = MicrosoftAccount.objects.create(
microsoft_id='missing_id'
)
self.unlinked_user = User.objects.create(
username='user2',
email='test@example.com',
)
def test_authenticate_no_code(self):
user = authenticate(self.request)
self.assertIs(user, None)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_invalid_token(self, mock_client):
mock_auth = Mock()
mock_auth.fetch_token.return_value = {}
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.assertTrue(mock_auth.fetch_token.called)
self.assertIs(user, None)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_invalid_scopes(self, mock_client):
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.valid_scopes.return_value = False
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.assertTrue(mock_auth.fetch_token.called)
self.assertTrue(mock_auth.valid_scopes.called)
self.assertIs(user, None)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_invalid_profile(self, mock_client):
mock_response = Mock()
mock_response.status_code = 400
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get.return_value = mock_response
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.assertTrue(mock_auth.fetch_token.called)
self.assertTrue(mock_auth.valid_scopes.called)
self.assertIs(user, None)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_errored_profile(self, mock_client):
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {'error': None}
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get.return_value = mock_response
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.assertTrue(mock_auth.fetch_token.called)
self.assertTrue(mock_auth.valid_scopes.called)
self.assertIs(user, None)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_existing_user(self, mock_client):
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
'id': self.linked_account.microsoft_id,
}
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get.return_value = mock_response
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.assertIsNot(user, None)
self.assertEqual(user.id, self.linked_account.user.id)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_existing_user_missing_user(self, mock_client):
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
'id': self.unlinked_account.microsoft_id,
'userPrincipalName': EMAIL,
}
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get.return_value = mock_response
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.unlinked_account.refresh_from_db()
self.assertIsNot(user, None)
self.assertEqual(user.id, self.unlinked_account.user.id)
self.assertEqual(EMAIL, self.unlinked_account.user.email)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_existing_user_no_user_with_name(self, mock_client):
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
'id': self.unlinked_account.microsoft_id,
'userPrincipalName': EMAIL,
'givenName': FIRST,
'surname': LAST,
}
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get.return_value = mock_response
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.unlinked_account.refresh_from_db()
self.assertIsNot(user, None)
self.assertEqual(user.id, self.unlinked_account.user.id)
self.assertEqual(EMAIL, self.unlinked_account.user.email)
self.assertEqual(FIRST, self.unlinked_account.user.first_name)
self.assertEqual(LAST, self.unlinked_account.user.last_name)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_existing_user_unlinked_user(self, mock_client):
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
'id': self.unlinked_account.microsoft_id,
'userPrincipalName': self.unlinked_user.email,
}
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get.return_value = mock_response
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.unlinked_account.refresh_from_db()
self.assertIsNot(user, None)
self.assertEqual(user.id, self.unlinked_account.user.id)
self.assertEqual(
self.unlinked_user.id,
self.unlinked_account.user.id)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_existing_user_missing_name(self, mock_client):
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
'id': self.unlinked_account.microsoft_id,
'userPrincipalName': self.unlinked_user.email,
'givenName': FIRST,
'surname': LAST,
}
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get.return_value = mock_response
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.unlinked_user.refresh_from_db()
self.assertIsNot(user, None)
self.assertEqual(user.id, self.unlinked_user.id)
self.assertEqual(FIRST, self.unlinked_user.first_name)
self.assertEqual(LAST, self.unlinked_user.last_name)
@override_settings(MICROSOFT_AUTH_AUTO_CREATE=False)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_no_autocreate(self, mock_client):
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
'id': MISSING_ID,
}
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get.return_value = mock_response
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.assertIs(user, None)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_autocreate(self, mock_client):
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
'id': MISSING_ID,
'userPrincipalName': EMAIL,
}
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get.return_value = mock_response
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.assertIsNot(user, None)
self.assertEqual(EMAIL, user.email)
self.assertEqual(MISSING_ID, user.microsoft_account.microsoft_id)
@override_settings(
AUTHENTICATION_BACKENDS=[
'microsoft_auth.backends.MicrosoftAuthenticationBackend',
'django.contrib.auth.backends.ModelBackend'
],
MICROSOFT_AUTH_LOGIN_TYPE=LOGIN_TYPE_XBL
)
class XboxLiveBackendsTests(TestCase):
def setUp(self):
User = get_user_model()
self.factory = RequestFactory()
self.request = self.factory.get('/')
self.linked_account = XboxLiveAccount.objects.create(
xbox_id='test_id',
gamertag='test_gamertag',
)
self.linked_account.user = User.objects.create(username='user1')
self.linked_account.save()
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_bad_xbox_token(self, mock_client):
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.fetch_xbox_token.return_value = {}
mock_auth.valid_scopes.return_value = True
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.assertFalse(mock_auth.get_xbox_profile.called)
self.assertIs(user, None)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_existing_user(self, mock_client):
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.fetch_xbox_token.return_value = XBOX_TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get_xbox_profile.return_value = {
'xid': self.linked_account.xbox_id,
'gtg': self.linked_account.gamertag,
}
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.assertIsNot(user, None)
self.assertEqual(user.id, self.linked_account.user.id)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_existing_user_new_gamertag(self, mock_client):
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.fetch_xbox_token.return_value = XBOX_TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get_xbox_profile.return_value = {
'xid': self.linked_account.xbox_id,
'gtg': GAMERTAG,
}
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.linked_account.refresh_from_db()
self.assertIsNot(user, None)
self.assertEqual(user.id, self.linked_account.user.id)
self.assertEqual(GAMERTAG, self.linked_account.gamertag)
@override_settings(MICROSOFT_AUTH_AUTO_CREATE=False)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_no_autocreate(self, mock_client):
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.fetch_xbox_token.return_value = XBOX_TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get_xbox_profile.return_value = {
'xid': MISSING_ID,
'gtg': GAMERTAG,
}
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.assertIs(user, None)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_autocreate(self, mock_client):
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.fetch_xbox_token.return_value = XBOX_TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get_xbox_profile.return_value = {
'xid': MISSING_ID,
'gtg': GAMERTAG,
}
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.assertIsNot(user, None)
self.assertEqual(GAMERTAG, user.username)
self.assertEqual(MISSING_ID, user.xbox_live_account.xbox_id)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_no_sync_username(self, mock_client):
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.fetch_xbox_token.return_value = XBOX_TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get_xbox_profile.return_value = {
'xid': self.linked_account.xbox_id,
'gtg': GAMERTAG,
}
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.linked_account.refresh_from_db()
self.assertIsNot(user, None)
self.assertEqual(user.id, self.linked_account.user.id)
self.assertNotEqual(GAMERTAG, self.linked_account.user.username)
@override_settings(MICROSOFT_AUTH_XBL_SYNC_USERNAME=True)
@patch('microsoft_auth.backends.MicrosoftClient')
def test_authenticate_sync_username(self, mock_client):
mock_auth = Mock()
mock_auth.fetch_token.return_value = TOKEN
mock_auth.fetch_xbox_token.return_value = XBOX_TOKEN
mock_auth.valid_scopes.return_value = True
mock_auth.get_xbox_profile.return_value = {
'xid': self.linked_account.xbox_id,
'gtg': GAMERTAG,
}
mock_client.return_value = mock_auth
user = authenticate(self.request, code=CODE)
self.linked_account.refresh_from_db()
self.linked_account.user.refresh_from_db()
self.assertIsNot(user, None)
self.assertEqual(user.id, self.linked_account.user.id)
self.assertEqual(GAMERTAG, self.linked_account.user.username)
|
[
"cbailey@mort.is"
] |
cbailey@mort.is
|
09da145aa374994d8a34de3e6af14b7df5557cb3
|
a6f96f766ec5d71bf38b281e47dc9c8846017266
|
/twistedbot/plugins/core/chat_follow.py
|
2c59b084e6e7d905719a8cb30cf5382be7e6db57
|
[
"MIT"
] |
permissive
|
jonasrk/TwistedBot
|
7b7a2de0c71256692aef0186cbec07e0bedb4b59
|
71448f81f8075774ab14677510e41e9af1d6e846
|
refs/heads/master
| 2021-01-18T15:03:52.335435
| 2013-07-06T21:36:59
| 2013-07-06T21:36:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
from twistedbot.plugins.base import PluginChatBase
from twistedbot.behavior_tree import FollowPlayer
class Follow(PluginChatBase):
@property
def command_verb(self):
return "follow"
@property
def help(self):
return "bot starts following you"
def command(self, sender, command, args):
self.world.bot.behavior_tree.new_command(FollowPlayer)
plugin = Follow
|
[
"lehner.lukas@gmail.com"
] |
lehner.lukas@gmail.com
|
0892b9b290e2a4186ff79137b2bf96b7e02217d4
|
ed471e0bdda3a8c2b1c31ea4e87942b30cdf19dd
|
/expenses/wsgi.py
|
6126c0706ac9ecb5155a5046fc4abb125161054f
|
[] |
no_license
|
blinduck/expenses-server
|
430dbfc3ed3466d172d3c35db9168014ea984fc8
|
16704a57d9cc5e96f2bb9c218cb399b285240ccb
|
refs/heads/master
| 2023-02-11T17:41:16.887161
| 2020-09-20T13:20:11
| 2020-09-20T13:20:11
| 141,822,324
| 0
| 0
| null | 2023-02-02T02:18:05
| 2018-07-21T14:56:08
|
Python
|
UTF-8
|
Python
| false
| false
| 394
|
py
|
"""
WSGI config for expenses project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "expenses.settings")
application = get_wsgi_application()
|
[
"blinduck@gmail.com"
] |
blinduck@gmail.com
|
3ca963f46a088f2c62e5b44b7f948bfda20e7288
|
7b437e095068fb3f615203e24b3af5c212162c0d
|
/enaml/qt/qt_image_view.py
|
333b3323dd96a76a7210114a6b6a475bdecd2f1d
|
[
"BSD-3-Clause"
] |
permissive
|
ContinuumIO/enaml
|
d8200f97946e5139323d22fba32c05231c2b342a
|
15c20b035a73187e8e66fa20a43c3a4372d008bd
|
refs/heads/master
| 2023-06-26T16:16:56.291781
| 2013-03-26T21:13:52
| 2013-03-26T21:13:52
| 9,047,832
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,105
|
py
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from PyQt4.QtGui import QFrame, QPainter, QPixmap
from atom.api import Typed
from enaml.widgets.image_view import ProxyImageView
from .q_resource_helpers import get_cached_qimage
from .qt_constraints_widget import size_hint_guard
from .qt_control import QtControl
class QImageView(QFrame):
""" A custom QFrame that will paint a QPixmap as an image. The
api is similar to QLabel, but with a few more options to control
how the image scales.
"""
def __init__(self, parent=None):
""" Initialize a QImageView.
Parameters
----------
parent : QWidget or None, optional
The parent widget of this image viewer.
"""
super(QImageView, self).__init__(parent)
self._pixmap = None
self._scaled_contents = False
self._allow_upscaling = False
self._preserve_aspect_ratio = False
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def paintEvent(self, event):
""" A custom paint event handler which draws the image according
to the current size constraints.
"""
pixmap = self._pixmap
if pixmap is None:
return
pm_size = pixmap.size()
pm_width = pm_size.width()
pm_height = pm_size.height()
if pm_width == 0 or pm_height == 0:
return
# Use the widget rect instead of the event rect so the image
# paints properly in a scroll area where it may be clipped.
evt_rect = self.rect()
evt_x = evt_rect.x()
evt_y = evt_rect.y()
evt_width = evt_rect.width()
evt_height = evt_rect.height()
if not self._scaled_contents:
# If the image isn't scaled, it is centered if possible.
# Otherwise, it's painted at the origin and clipped.
paint_x = max(0, int((evt_width / 2. - pm_width / 2.) + evt_x))
paint_y = max(0, int((evt_height / 2. - pm_height / 2.) + evt_y))
paint_width = pm_width
paint_height = pm_height
else:
# If the image *is* scaled, it's scaled size depends on the
# size of the paint area as well as the other scaling flags.
if self._preserve_aspect_ratio:
pm_ratio = float(pm_width) / pm_height
evt_ratio = float(evt_width) / evt_height
if evt_ratio >= pm_ratio:
if self._allow_upscaling:
paint_height = evt_height
else:
paint_height = min(pm_height, evt_height)
paint_width = int(paint_height * pm_ratio)
else:
if self._allow_upscaling:
paint_width = evt_width
else:
paint_width = min(pm_width, evt_width)
paint_height = int(paint_width / pm_ratio)
else:
if self._allow_upscaling:
paint_height = evt_height
paint_width = evt_width
else:
paint_height = min(pm_height, evt_height)
paint_width = min(pm_width, evt_width)
# In all cases of scaling, we know that the scaled image is
# no larger than the paint area, and can thus be centered.
paint_x = int((evt_width / 2. - paint_width / 2.) + evt_x)
paint_y = int((evt_height / 2. - paint_height / 2.) + evt_y)
# Finally, draw the pixmap into the calculated rect.
painter = QPainter(self)
painter.setRenderHint(QPainter.SmoothPixmapTransform)
painter.drawPixmap(paint_x, paint_y, paint_width, paint_height, pixmap)
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def sizeHint(self):
""" Returns a appropriate size hint for the image based on the
underlying QPixmap.
"""
pixmap = self._pixmap
if pixmap is not None:
return pixmap.size()
return super(QImageView, self).sizeHint()
def pixmap(self):
""" Returns the underlying pixmap for the image view.
"""
return self._pixmap
def setPixmap(self, pixmap):
""" Set the pixmap to use as the image in the widget.
Parameters
----------
pixamp : QPixmap
The QPixmap to use as the image in the widget.
"""
self._pixmap = pixmap
self.update()
def scaledContents(self):
""" Returns whether or not the contents scale with the widget
size.
"""
return self._scaled_contents
def setScaledContents(self, scaled):
""" Set whether the contents scale with the widget size.
Parameters
----------
scaled : bool
If True, the image will be scaled to fit the widget size,
subject to the other sizing constraints in place. If False,
the image will not scale and will be clipped as required.
"""
self._scaled_contents = scaled
self.update()
def allowUpscaling(self):
""" Returns whether or not the image can be scaled greater than
its natural size.
"""
return self._allow_upscaling
def setAllowUpscaling(self, allow):
""" Set whether or not to allow the image to be scaled beyond
its natural size.
Parameters
----------
allow : bool
If True, then the image may be scaled larger than its
natural if it is scaled to fit. If False, the image will
never be scaled larger than its natural size. In either
case, the image may be scaled smaller.
"""
self._allow_upscaling = allow
self.update()
def preserveAspectRatio(self):
""" Returns whether or not the aspect ratio of the image is
maintained during a resize.
"""
return self._preserve_aspect_ratio
def setPreserveAspectRatio(self, preserve):
""" Set whether or not to preserve the image aspect ratio.
Parameters
----------
preserve : bool
If True then the aspect ratio of the image will be preserved
if it is scaled to fit. Otherwise, the aspect ratio will be
ignored.
"""
self._preserve_aspect_ratio = preserve
self.update()
class QtImageView(QtControl, ProxyImageView):
""" A Qt implementation of an Enaml ProxyImageView.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QImageView)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying QImageView widget.
"""
self.widget = QImageView(self.parent_widget())
def init_widget(self):
""" Initialize the underlying control.
"""
super(QtImageView, self).init_widget()
d = self.declaration
self.set_image(d.image)
self.set_scale_to_fit(d.scale_to_fit)
self.set_allow_upscaling(d.allow_upscaling)
self.set_preserve_aspect_ratio(d.preserve_aspect_ratio)
#--------------------------------------------------------------------------
# Widget Update Methods
#--------------------------------------------------------------------------
def set_image(self, image, sh_guard=True):
""" Set the image on the underlying widget.
"""
qpixmap = None
if image:
qimage = get_cached_qimage(image)
qpixmap = QPixmap.fromImage(qimage)
if sh_guard:
with size_hint_guard(self):
self.widget.setPixmap(qpixmap)
else:
self.widget.setPixmap(qpixmap)
def set_scale_to_fit(self, scale):
""" Sets whether or not the image scales with the underlying
control.
"""
self.widget.setScaledContents(scale)
def set_allow_upscaling(self, allow):
""" Sets whether or not the image will scale beyond its natural
size.
"""
self.widget.setAllowUpscaling(allow)
def set_preserve_aspect_ratio(self, preserve):
""" Sets whether or not to preserve the aspect ratio of the
image when scaling.
"""
self.widget.setPreserveAspectRatio(preserve)
|
[
"sccolbert@gmail.com"
] |
sccolbert@gmail.com
|
b9048c10a2f1cc5c89401780f2865d98ca74a65c
|
40bf222b4515b16e1c3c069d84679c588667aea1
|
/yoyi_dataset_encode.py
|
fbe82e1283850cd71ed7eb1c9bd57d93324f9009
|
[] |
no_license
|
wty9391/GMM
|
1e6795a2a115632a208ae0172ce1b929beafc64e
|
776ddf57aedefa1483dd2653b863a810e83d099f
|
refs/heads/master
| 2022-05-16T16:38:21.104871
| 2022-03-11T04:39:52
| 2022-03-11T04:39:52
| 237,732,893
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,101
|
py
|
import sys
import pickle
import numpy as np
from scipy.sparse import csr_matrix, vstack
import myutil.encoder as encoder
import myutil.truthful_bidder as truthful_bidder
# ../make-yoyi-data/original-data/sample/train.yzx.txt ../make-yoyi-data/original-data/sample/test.yzx.txt ./result/yoyi_sample
if len(sys.argv) < 4:
print('Usage: .py trian_log_path test_log_path result_root_path')
exit(-1)
read_batch_size = 1e6
f_train_log = open(sys.argv[1], 'r', encoding="utf-8")
f_test_log = open(sys.argv[2], 'r', encoding="utf-8")
num_features = 1785106
pay_scale = 1
if sys.argv[3].find("yoyi_sample") > 0:
num_features = 3036119
pay_scale = 1e-3
yoyi = encoder.Encoder_yoyi(num_features, pay_scale)
X_train_raw = []
X_train = csr_matrix((0, num_features), dtype=np.int8)
Y_train = np.zeros((0, 1), dtype=np.int8)
Z_train = np.zeros((0, 1), dtype=np.int16)
X_test_raw = []
X_test = csr_matrix((0, num_features), dtype=np.int8)
Y_test = np.zeros((0, 1), dtype=np.int8)
Z_test = np.zeros((0, 1), dtype=np.int16)
count = 0
f_train_log.seek(0)
for line in f_train_log:
X_train_raw.append(line)
count += 1
if count % read_batch_size == 0:
X_train = vstack((X_train, yoyi.encode(X_train_raw)))
Y_train = np.vstack((Y_train, yoyi.get_col(X_train_raw, "click")))
Z_train = np.vstack((Z_train, yoyi.get_col(X_train_raw, "payprice")))
X_train_raw = []
if X_train_raw:
X_train = vstack((X_train, yoyi.encode(X_train_raw)))
Y_train = np.vstack((Y_train, yoyi.get_col(X_train_raw, "click")))
Z_train = np.vstack((Z_train, yoyi.get_col(X_train_raw, "payprice")))
X_train_raw = []
count = 0
f_test_log.seek(0)
for line in f_test_log:
X_test_raw.append(line)
count += 1
if count % read_batch_size == 0:
X_test = vstack((X_test, yoyi.encode(X_test_raw)))
Y_test = np.vstack((Y_test, yoyi.get_col(X_test_raw, "click")))
Z_test = np.vstack((Z_test, yoyi.get_col(X_test_raw, "payprice")))
X_test_raw = []
if X_test_raw:
X_test = vstack((X_test, yoyi.encode(X_test_raw)))
Y_test = np.vstack((Y_test, yoyi.get_col(X_test_raw, "click")))
Z_test = np.vstack((Z_test, yoyi.get_col(X_test_raw, "payprice")))
X_test_raw = []
# yoyi datasets has much useless features (zero columns) which should be removed to accelerate the learning
feature_count = X_test.sum(axis=0)
nonzero = np.array((feature_count != 0).tolist()[0])
X_train = (X_train.tocsc()[:, nonzero]).tocsr()
X_test = (X_test.tocsc()[:, nonzero]).tocsr()
bidder = truthful_bidder.Truthful_bidder()
bidder.fit(X_train, Y_train, Z_train)
bidder.evaluate(X_test, Y_test)
pickle.dump(X_train, open(sys.argv[3]+'/x_train', 'wb'))
pickle.dump(Y_train, open(sys.argv[3]+'/y_train', 'wb'))
pickle.dump(Z_train, open(sys.argv[3]+'/z_train', 'wb'))
pickle.dump(X_test, open(sys.argv[3]+'/x_test', 'wb'))
pickle.dump(Y_test, open(sys.argv[3]+'/y_test', 'wb'))
pickle.dump(Z_test, open(sys.argv[3]+'/z_test', 'wb'))
pickle.dump(bidder, open(sys.argv[3]+'/truthful_bidder', 'wb'))
f_train_log.close()
f_test_log.close()
|
[
"272076302@qq.com"
] |
272076302@qq.com
|
0093da243e6cad910e110757cd26a6bd4aba396b
|
20f2ce3642ff52dce0c8eba162ce8b017dd5683a
|
/이것이 코딩테스트다_python/구현/2. 시각.py
|
82a705e07f7d0ac1ac21662ce58992e5d316e942
|
[] |
no_license
|
HyoungSunChoi/Coding-Test
|
90a3b80c878dac8916827ffd2d8936fbb98cc92e
|
5f92966d49201e12046d1b0c23f941e5ebb3d59c
|
refs/heads/master
| 2023-04-22T06:26:02.402969
| 2021-05-07T05:44:21
| 2021-05-07T05:44:21
| 320,179,730
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
'''
정수 N 이 입력되면 00시 00분 00초 부터 N시 59분 59초까지의 모든 시각 중에서
3이 하나라도 포함되는 모든 경우의 수를 구하는 프로그램을 작성
'''
n=int(input())
cnt=0
for i in range(n+1):
for j in range(60):
for k in range(60):
res= str(i)+str(j)+str(k)
if '3' in res:
cnt+=1
print(cnt)
|
[
"noreply@github.com"
] |
HyoungSunChoi.noreply@github.com
|
34bdcf45336ac9fad8e79e864eb308065846a1c8
|
1e331aa934cf8a4d08402c6374f4c8dcfef9fd9d
|
/Cliff/main.py
|
6049f4139254bf6d208aceb2319e6eeca17d5aec
|
[] |
no_license
|
19LukaModric/Python-Test
|
da7383e1dfcb49e6a034c4e72d3cac2af1f31e13
|
4ace577cb9246d54a5c7c343c8173ae36ae8d145
|
refs/heads/master
| 2020-12-30T17:10:45.513854
| 2017-05-19T06:31:47
| 2017-05-19T06:31:47
| 91,060,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 946
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import logging
import sys
from cliff.app import App
from cliff.commandmanager import CommandManager
class DemoApp(App):
log = logging.getLogger(__name__)
def __init__(self):
super(DemoApp, self).__init__(
description = 'cliff demo app',
version = '0.1',
command_manager = CommandManager('cliff.demo'),
)
def initialize_app(self, argv):
self.log.debug('initialize_app')
def prepare_to_run_command(self, cmd):
self.log.debug('prepare_to_run_command %s',
cmd.__class__.__name__)
def clean_up(self, cmd, result, err):
self.log.debug('clean_up %s', cmd.__class__.__name__)
if err:
self.log.debug('got an error: %s', err)
def main(argv = sys.argv[1:]):
myapp = DemoApp()
return myapp.run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"cheben2014@outlook.com"
] |
cheben2014@outlook.com
|
a20d4050a3ca2eb9cf7b0fc5957e562aa85b3bfc
|
814ed222a37ff1bd19e6cc2eaecc6985916bf9e4
|
/getliquor.py
|
8a2ede91caee13c3a2e8fc5fba272cc27ef876a8
|
[] |
no_license
|
parduhne/RecipeRetriever
|
50bd2d5f7f4b40ad364534f36a132c68dfd1c9a3
|
fd54e128d236df40074131f96dea064aa7972c60
|
refs/heads/master
| 2022-12-20T15:49:35.338764
| 2020-05-10T21:53:22
| 2020-05-10T21:53:22
| 231,953,781
| 0
| 0
| null | 2022-12-11T19:24:06
| 2020-01-05T17:42:44
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 2,777
|
py
|
# Import libraries
import requests
import urllib.request
import time
from bs4 import BeautifulSoup
base_url = 'https://www.liquor.com'
buffer = []
def recursiveFoo(url):
# Connect to the URL
response = requests.get(url)
# Parse HTML and save to BeautifulSoup object¶
soup = BeautifulSoup(response.text, "html.parser")
div_tags = soup.findAll('div')
if(soup.findAll('div',attrs={"class":"measure"})):
# print(url)
getIngrediants(url)
return 1
a_tags = soup.findAll('a')
temp = ' '
for i in a_tags:
href_string = i['href']
if(href_string.find('recipes') != -1 and href_string.find('mosaic') == -1 and href_string.find('page') == -1 and href_string.find('liquor') != -1):
if(temp != href_string):
temp = href_string
recursiveFoo(href_string)
elif(href_string.find('page') != -1):
if(int(url.rsplit('/')[-2]) == 47):
print('\nPage '+url.rsplit('/')[-2]+" Done!\n")
# writeBuffer()
return -1
elif(int(url.rsplit('/')[-2]) == (int(href_string.rsplit('/')[-2]) - 1)):
print('Page '+url.rsplit('/')[-2]+" Done!")
if(recursiveFoo(href_string)==-1):
return -1
def getIngrediants(url):
response = requests.get(url)
# Parse HTML and save to BeautifulSoup object¶
file = open("recipes.txt",'a+')
file.write("\n" + url.rsplit('/')[-2] + "{\n")
# recipeString = "\n" + url.rsplit('/')[-2] + "{\n"
measures = []
ingred = []
soup = BeautifulSoup(response.text, "html.parser")
for i in soup.findAll('div',attrs={"class":"measure"}):
numb = i.text
numb = numb.replace('\n','')
numb = numb.replace('\t','')
numb = numb.replace('\xa0',' ')
measures.append(numb)
for i in soup.findAll('div',attrs={"class":"x-recipe-ingredient"}):
numb = i.text
numb = numb.replace('\n','')
numb = numb.replace('\t','')
ingred.append(numb)
for i in range(len(ingred)):
file.write(" " + measures[i]+" "+ingred[i]+"\n")
# recipeString = recipeString + " " + measures[i]+" "+ingred[i]+"\n"
# recipeString = recipeString + "}\n"
# buffer.append(recipeString)
file.write("}\n")
file.close()
time.sleep(1)
def writeBuffer():
file = open("recipes.txt",'a+')
for recipe in buffer:
file.write(recipe)
file.close()
# Set the URL you want to webscrape from
start_url = 'https://www.liquor.com/recipes/page/1/?'
recursiveFoo(start_url)
print("All done: check for recipes.txt in the folder you ran this in")
|
[
"noreply@github.com"
] |
parduhne.noreply@github.com
|
55c65f3586112411d03526d841b82394469f0b13
|
bb6a14c452b826a7fc3385273a82fa7f3db49139
|
/Financial Engineering/FE_HW2.py
|
12717d521bc9feec03018856a2253db3392921eb
|
[] |
no_license
|
howbonoh/Some-works-back-in-school
|
a6ef60c04737b8da1f8b5c35c37cb4a5b40f0b94
|
37304b7ad4cba7091edc2f5b5693cabe9a82c027
|
refs/heads/master
| 2020-08-01T17:36:37.016167
| 2019-10-01T06:40:23
| 2019-10-01T06:40:23
| 211,062,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
# coding: utf-8
# In[1]:
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
# In[2]:
def actual_actual(yieldOfMaturity,couponRate,settlement,maturity):
maturity = pd.to_datetime(maturity, format='%Y-%m-%d')
settlement = pd.to_datetime(settlement, format='%Y-%m-%d')
coupon = 100*couponRate/2
temp = []
temp.append(maturity)
flag = 0
while temp[flag] > settlement:
temp.append(temp[flag] - relativedelta(months=6))
flag += 1
omega = (temp[flag-1] - settlement) / (temp[flag-1] - temp[flag])
accruedInterest = coupon*(1 - omega)
dirty = 100*(1+yieldOfMaturity/2)**((1-flag-omega))
for i in range(flag):
dirty += coupon*(1+yieldOfMaturity/2)**((-omega-i))
print('Dirty Price:' + str(dirty))
print('Clean Price:' + str(dirty - accruedInterest))
# In[3]:
def thirty_360(yieldOfMaturity,couponRate,settlement,maturity):
maturity = pd.to_datetime(maturity, format='%Y-%m-%d')
settlement = pd.to_datetime(settlement, format='%Y-%m-%d')
coupon = 100*couponRate/2
temp = []
temp.append(maturity)
flag = 0
while temp[flag] > settlement:
temp.append(temp[flag] - relativedelta(months=6))
flag += 1
omega = 30*(temp[flag-1].month - settlement.month)/180
accruedInterest = coupon*(1-omega)
dirty = 100*(1+yieldOfMaturity/2)**((1-flag-omega))
for i in range(flag):
dirty += coupon*(1+yieldOfMaturity/2)**((-omega-i))
print('Dirty Price:' + str(dirty))
print('Clean Price:' + str(dirty - accruedInterest))
# In[4]:
actual_actual(0.03,0.1,'1993-7-1','1995-3-1')
# In[5]:
thirty_360(0.03,0.1,'1993-7-1','1995-3-1')
# In[ ]:
|
[
"noreply@github.com"
] |
howbonoh.noreply@github.com
|
b20e7ab119cda61531f2177a8822de09c6c8933f
|
198c1b47dc66e6775ecbd9533a109a4896c830b4
|
/flaskr/myapp/Connection/MysqlConn.py
|
7c189f6706c83d51ff2463f1f7bfefdf71f58f10
|
[] |
no_license
|
qianjiangchao1992/webFlask
|
24eefffbd612119731e71c25d3cf719d067ef285
|
ebb7ff6bc33ff9c302bbeb07bc531fc7d37c794e
|
refs/heads/master
| 2020-09-27T09:35:36.062184
| 2019-12-14T10:49:36
| 2019-12-14T10:49:36
| 226,486,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,523
|
py
|
import time
import pymysql
from configparser import ConfigParser
from queue import Queue
import threading
# 装饰器来计算时间时间
def time_calculate(f):
def wrapper(*args, **kwargs):
start_time = time.time()
res = f(*args, **kwargs)
end_time = time.time()
print("**********{}程序计算完成,所花时间为{}秒***********".format(f.__name__, round(end_time - start_time, 2)))
return res
return wrapper
class MysqlConn:
__v = None
_instance_lock = threading.Lock()
def __init__(self, name='BDY', max_conn=10):
self.cf = ConfigParser()
self.cf.read(r"C:\Users\Administrator\PycharmProjects\webFlask\flaskr\myapp\Connection\MYSQL.ini")
self.host = self.cf.get(name, 'HOST')
self.port = self.cf.getint(name, 'PORT')
self.user = self.cf.get(name, 'USERNAME')
self.password = self.cf.get(name, 'PASSWORD')
self.database = self.cf.get(name, 'DATABASE')
self.charset = self.cf.get(name, 'CHARSET')
self.max_conn = max_conn
self.pool = Queue(max_conn)
for i in range(self.max_conn):
try:
conn = pymysql.connect(host=self.host, port=self.port, user=self.user,
password=self.password, database=self.database, charset=self.charset)
conn.autocommit(True)
self.pool.put(conn)
except Exception as e:
raise IOError(e)
@classmethod
def get_instance(cls, *args, **kwargs):
"""
获取实例
type: object
"""
if cls.__v:
return cls.__v
else:
with cls._instance_lock:
cls.__v = cls(*args, **kwargs)
return cls.__v
@time_calculate
def exec_sql(self, sql, operation=None) -> None or int:
"""
执行 Delete,Update,Insert
type: object
"""
response = None
conn = self.pool.get()
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
try:
response = cursor.execute(sql, operation) if operation else cursor.execute(sql)
except Exception as e:
print(e)
finally:
cursor.close()
self.pool.put(conn)
return response
@time_calculate
def exec_sql_fetch(self, sql: str, operation=None) -> None or list:
"""
执行Select,返回结果集
type: object
"""
data = None
response = None
conn = self.pool.get()
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
try:
response = cursor.execute(sql, operation) if operation else cursor.execute(sql)
data = cursor.fetchall()
except Exception as e:
print(e)
finally:
cursor.close()
self.pool.put(conn)
return response, data
@time_calculate
def exec_sql_many(self, sql: str, operation=None) -> None or list:
"""
执行批量查询Select
type: object
"""
response = None
conn = self.pool.get()
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
try:
response = cursor.executemany(sql, operation) if operation else cursor.executemany(sql)
except Exception as e:
print(e)
finally:
cursor.close()
self.pool.put(conn)
return response
|
[
"qjchao1992@163.com"
] |
qjchao1992@163.com
|
8f2efd28c60aa3c632a782cbd5dbf69191d7df80
|
4f3a99c6576c7191588ef08f71bc29d72b8ec080
|
/data_handler.py
|
4f569c1340c02a8199aab23bc4312107efd5e372
|
[] |
no_license
|
zlinzju/ManifoldAlignmentUCL
|
33907212fabb4b736692897f8c5fa68ef5142e1c
|
0eac9695e7e2a68e86fa6e9b3e73d311f681db49
|
refs/heads/master
| 2020-08-22T01:14:17.599045
| 2019-09-08T12:31:35
| 2019-09-08T12:31:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,514
|
py
|
import pickle
import numpy as np
from scipy.spatial.distance import cdist,pdist, squareform
import scipy as scip
from scipy.linalg import eig,eigh
from scipy.stats import spearmanr
from math import factorial
import itertools
from sklearn.metrics import mean_squared_error as mse
import tensorflow as tf
from sklearn.manifold import Isomap, SpectralEmbedding
from numpy import genfromtxt
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.neighbors import NearestNeighbors
def get_complex_data(n, ssl=False):
n_batches=2
n_pts_per_cluster=100
m = (n_pts_per_cluster*n)
l = int(m*0.2)
make = lambda x,y,s: np.concatenate([np.random.normal(x,s, (n_pts_per_cluster, 1)), np.random.normal(y,s, (n_pts_per_cluster, 1))], axis=1)
# batch 1
xb1_list = [make(-1.3, 2.2, .1), make(.1, 1.8, .1), make(.8, 2, .1),make(-3.2, -1, .1), make(.5, 2.3, .1), make(.1, -0.9, .1)]
xb2_list = [make(-.9, -2, .1), make(0, -2.3, .1), make(1.5, -1.5, .1),make(4.1, 3.2, .1), make(0.1, -3.1, .1), make(-1.1, -0.8, .1)]
xb1 = np.concatenate(xb1_list[:n], axis=0)
labels1 = np.concatenate([x* np.ones(n_pts_per_cluster) for x in range(n)], axis=0)
xb1_full = np.hstack((xb1, labels1.reshape(m,1)))
# batch 2
xb2 = np.concatenate(xb2_list[:n], axis=0)
labels2 = labels1
xb2_full = np.hstack((xb2, labels2.reshape(m,1)))
xb1_full = np.take(xb1_full,np.random.permutation(xb1_full.shape[0]),axis=0,out=xb1_full)
xb2_full = np.take(xb2_full,np.random.permutation(xb2_full.shape[0]),axis=0,out=xb2_full)
if ssl:
return xb1_full[:l,:-1], xb2_full[:l,:-1], xb1_full[:l,-1], xb2_full[:l,-1], xb1_full[l:,:-1], xb2_full[l:,:-1], xb1_full[l:,-1], xb2_full[l:,-1]
else:
return xb1_full[:,:-1], xb2_full[:,:-1], xb1_full[:,-1], xb2_full[:,-1]
def get_data(dataset, dim=5, mnili=[3 ,7], ssl=False, noise=True):
if dataset == "glove":
fp_intersect_word_image = "assets/intersect_glove.840b-openimage.box.p"
intersect_data = pickle.load(open(fp_intersect_word_image, 'rb'))
z_word = intersect_data['z_0'] # The word embedding.
z_image = intersect_data['z_1'] # The image embedding.
vocab_intersect = intersect_data['vocab_intersect'] # The concept labels.
n_item = len(vocab_intersect)
l = int(n_item*0.2)
embedding = SpectralEmbedding(n_components=dim)
z_word_tr = embedding.fit_transform(z_word)
z_image_tr = embedding.fit_transform(z_image)
lword = np.arange(len(z_word_tr)).reshape((len(z_word_tr),1))
limage = np.arange(len(z_image)).reshape((len(z_image),1))
zword = np.hstack((z_word_tr, lword))
zimage = np.hstack((z_image_tr, lword))
zword = np.take(zword,np.random.permutation(zword.shape[0]),axis=0,out=zword)
zimage = np.take(zimage,np.random.permutation(zimage.shape[0]),axis=0,out=zimage)
if ssl:
return zword[:l,:-1], zimage[:l,:-1], zword[:l,-1], zimage[:l,-1], zword[l:,:-1], zimage[l:,:-1], zword[l:,-1], zimage[l:,-1]
else:
return zword[:,:-1], zimage[:,:-1], zword[:,-1], zimage[:,-1]
elif dataset == "gauss":
n_batches=2
n_pts_per_cluster=100
l = int((n_pts_per_cluster*3)*0.2)
make = lambda x,y,s: np.concatenate([np.random.normal(x,s, (n_pts_per_cluster, 1)), np.random.normal(y,s, (n_pts_per_cluster, 1))], axis=1)
# batch 1
xb1 = np.concatenate([make(-1.3, 2.2, .1), make(.1, 1.8, .1), make(.8, 2, .1)], axis=0)
labels1 = np.concatenate([0 * np.ones(n_pts_per_cluster), 1 * np.ones(n_pts_per_cluster), 2 * np.ones(n_pts_per_cluster)], axis=0)
xb1_full = np.hstack((xb1, labels1.reshape(300,1)))
# batch 2
xb2 = np.concatenate([make(-.9, -2, .1), make(0, -2.3, .1), make(1.5, -1.5, .1)], axis=0)
labels2 = np.concatenate([0 * np.ones(n_pts_per_cluster), 1 * np.ones(n_pts_per_cluster), 2 * np.ones(n_pts_per_cluster)], axis=0)
xb2_full = np.hstack((xb2, labels2.reshape(300,1)))
xb1_full = np.take(xb1_full,np.random.permutation(xb1_full.shape[0]),axis=0,out=xb1_full)
xb2_full = np.take(xb2_full,np.random.permutation(xb2_full.shape[0]),axis=0,out=xb2_full)
if ssl:
return xb1_full[:l,:-1], xb2_full[:l,:-1], xb1_full[:l,-1], xb2_full[:l,-1], xb1_full[l:,:-1], xb2_full[l:,:-1], xb1_full[l:,-1], xb2_full[l:,-1]
else:
return xb1_full[:,:-1], xb2_full[:,:-1], xb1_full[:,-1], xb2_full[:,-1]
elif dataset == "mnist":
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
images,labels = mnist.train.images, mnist.train.labels
labels = np.asarray([[labels[i],i] for i in range(len(mnili)*200) if (labels[i] in mnili)])
images = images[labels[:,1]]
images = images.reshape((-1, 28, 28))
xb1, labels1 = images, labels
labels2 = labels1
xb2 = np.zeros(xb1.shape)
for x in range(len(xb1)):
xb2[x] = np.rot90(xb1[x])
if noise:
xb1 = xb1+np.random.normal(0,0.1,xb1.shape)
l = len(labels)//2
if ssl:
return xb1[:l], xb2[:l], labels1[:l], labels2[:l], xb1[l:], xb2[l:], labels1[l:], labels2[l:]
else:
return xb1, xb2, labels1, labels2
else:
raise ValueError("Please enter {gauss, glove or mnist}")
|
[
"noreply@github.com"
] |
zlinzju.noreply@github.com
|
a9d7ff557ba46ec9e284b40f094a2d5158929c36
|
eaec3fece2e9106cf402d6e9d03b51dbe10af3d7
|
/cart/contexts.py
|
4c6db6d15897119cd854d5877198fcb976302bc6
|
[] |
no_license
|
kmaaallen/Django-E-Commerce
|
a6948cf51dd8cc51db9570949994a5adce717a62
|
4bdfb40ce2cd4c5a0d0c0c47b439b54ff6b7d36d
|
refs/heads/master
| 2021-11-24T17:21:02.053361
| 2021-11-06T11:38:28
| 2021-11-06T11:38:28
| 219,560,805
| 0
| 0
| null | 2021-11-06T11:38:28
| 2019-11-04T17:43:05
|
Python
|
UTF-8
|
Python
| false
| false
| 647
|
py
|
from django.shortcuts import get_object_or_404
from products.models import Product
def cart_contents(request):
"""
Ensures that the cart contents are available when rendering every page
"""
cart = request.session.get('cart', {})
cart_items = []
total = 0
product_count = 0
for id, quantity in cart.items():
product = get_object_or_404(Product, pk=id)
total += quantity*product.price
product_count += quantity
cart_items.append({'id': id, 'quantity': quantity, 'product': product})
return {'cart_items': cart_items, 'total': total,
'product_count': product_count}
|
[
"kmaaallen@gmail.com"
] |
kmaaallen@gmail.com
|
9b5c59f64aeca2a5ae4552827bf5df5b6fb8951e
|
529850c2785de1f8b4585a0638596ba86c59b5ac
|
/mmdet/datasets/lvis.py
|
d7a2e919f5fcc30f94e731f47dc4a1554cb07967
|
[
"Apache-2.0"
] |
permissive
|
hhaAndroid/mmdetection
|
b68609d7f78ed1ff795d48ffe68a389d86980cfe
|
5d34b77931c5f6516a694e1d46390166d404169e
|
refs/heads/master
| 2023-08-16T04:26:36.263068
| 2023-06-02T09:07:43
| 2023-06-02T09:07:43
| 319,885,247
| 18
| 19
|
Apache-2.0
| 2023-09-13T12:09:42
| 2020-12-09T08:04:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 46,184
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
import logging
import os.path as osp
import tempfile
import warnings
from collections import OrderedDict
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from .builder import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class LVISV05Dataset(CocoDataset):
CLASSES = (
'acorn', 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock',
'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet',
'antenna', 'apple', 'apple_juice', 'applesauce', 'apricot', 'apron',
'aquarium', 'armband', 'armchair', 'armoire', 'armor', 'artichoke',
'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award',
'awning', 'ax', 'baby_buggy', 'basketball_backboard', 'backpack',
'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball',
'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage',
'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel',
'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat',
'baseball_cap', 'baseball_glove', 'basket', 'basketball_hoop',
'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel',
'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead',
'beaker', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed',
'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can',
'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle', 'bench',
'beret', 'bib', 'Bible', 'bicycle', 'visor', 'binder', 'binoculars',
'bird', 'birdfeeder', 'birdbath', 'birdcage', 'birdhouse',
'birthday_cake', 'birthday_card', 'biscuit_(bread)', 'pirate_flag',
'black_sheep', 'blackboard', 'blanket', 'blazer', 'blender', 'blimp',
'blinker', 'blueberry', 'boar', 'gameboard', 'boat', 'bobbin',
'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt', 'bolt', 'bonnet',
'book', 'book_bag', 'bookcase', 'booklet', 'bookmark',
'boom_microphone', 'boot', 'bottle', 'bottle_opener', 'bouquet',
'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl',
'pipe_bowl', 'bowler_hat', 'bowling_ball', 'bowling_pin',
'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',
'bread-bin', 'breechcloth', 'bridal_gown', 'briefcase',
'bristle_brush', 'broccoli', 'broach', 'broom', 'brownie',
'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull',
'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board',
'bulletproof_vest', 'bullhorn', 'corned_beef', 'bun', 'bunk_bed',
'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butcher_knife',
'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',
'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',
'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',
'can', 'can_opener', 'candelabrum', 'candle', 'candle_holder',
'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'cannon',
'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap',
'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)',
'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan',
'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag',
'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast',
'cat', 'cauliflower', 'caviar', 'cayenne_(spice)', 'CD_player',
'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue',
'champagne', 'chandelier', 'chap', 'checkbook', 'checkerboard',
'cherry', 'chessboard', 'chest_of_drawers_(furniture)',
'chicken_(animal)', 'chicken_wire', 'chickpea', 'Chihuahua',
'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)',
'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk',
'chocolate_mousse', 'choker', 'chopping_board', 'chopstick',
'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette',
'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent',
'clementine', 'clip', 'clipboard', 'clock', 'clock_tower',
'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat',
'coat_hanger', 'coatrack', 'cock', 'coconut', 'coffee_filter',
'coffee_maker', 'coffee_table', 'coffeepot', 'coil', 'coin',
'colander', 'coleslaw', 'coloring_material', 'combination_lock',
'pacifier', 'comic_book', 'computer_keyboard', 'concrete_mixer',
'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cookie',
'cookie_jar', 'cooking_utensil', 'cooler_(for_food)',
'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn',
'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset',
'romaine_lettuce', 'costume', 'cougar', 'coverall', 'cowbell',
'cowboy_hat', 'crab_(animal)', 'cracker', 'crape', 'crate', 'crayon',
'cream_pitcher', 'credit_card', 'crescent_roll', 'crib', 'crock_pot',
'crossbar', 'crouton', 'crow', 'crown', 'crucifix', 'cruise_ship',
'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube',
'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupcake', 'hair_curler',
'curling_iron', 'curtain', 'cushion', 'custard', 'cutting_tool',
'cylinder', 'cymbal', 'dachshund', 'dagger', 'dartboard',
'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',
'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
'dishwasher_detergent', 'diskette', 'dispenser', 'Dixie_cup', 'dog',
'dog_collar', 'doll', 'dollar', 'dolphin', 'domestic_ass', 'eye_mask',
'doorbell', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',
'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',
'dresser', 'drill', 'drinking_fountain', 'drone', 'dropper',
'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling',
'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan',
'Dutch_oven', 'eagle', 'earphone', 'earplug', 'earring', 'easel',
'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',
'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',
'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',
'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',
'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',
'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',
'fireplug', 'fish', 'fish_(food)', 'fishbowl', 'fishing_boat',
'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flash',
'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)',
'flower_arrangement', 'flute_glass', 'foal', 'folding_chair',
'food_processor', 'football_(American)', 'football_helmet',
'footstool', 'fork', 'forklift', 'freight_car', 'French_toast',
'freshener', 'frisbee', 'frog', 'fruit_juice', 'fruit_salad',
'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',
'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',
'gasmask', 'gazelle', 'gelatin', 'gemstone', 'giant_panda',
'gift_wrap', 'ginger', 'giraffe', 'cincture',
'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',
'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',
'gorilla', 'gourd', 'surgical_gown', 'grape', 'grasshopper', 'grater',
'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle',
'grillroom', 'grinder_(tool)', 'grits', 'grizzly', 'grocery_bag',
'guacamole', 'guitar', 'gull', 'gun', 'hair_spray', 'hairbrush',
'hairnet', 'hairpin', 'ham', 'hamburger', 'hammer', 'hammock',
'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',
'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',
'hardback_book', 'harmonium', 'hat', 'hatbox', 'hatch', 'veil',
'headband', 'headboard', 'headlight', 'headscarf', 'headset',
'headstall_(for_horses)', 'hearing_aid', 'heart', 'heater',
'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus',
'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood',
'hook', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
'ice_tea', 'igniter', 'incense', 'inhaler', 'iPod',
'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jean',
'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewelry', 'joystick',
'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard',
'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten',
'kiwi_fruit', 'knee_pad', 'knife', 'knight_(chess_piece)',
'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat',
'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp',
'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer',
'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)',
'Lego', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy',
'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine',
'linen_paper', 'lion', 'lip_balm', 'lipstick', 'liquor', 'lizard',
'Loafer_(type_of_shoe)', 'log', 'lollipop', 'lotion',
'speaker_(stereo_equipment)', 'loveseat', 'machine_gun', 'magazine',
'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallet', 'mammoth',
'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini',
'mascot', 'mashed_potato', 'masher', 'mask', 'mast',
'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup',
'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone',
'microscope', 'microwave_oven', 'milestone', 'milk', 'minivan',
'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money',
'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',
'motor_scooter', 'motor_vehicle', 'motorboat', 'motorcycle',
'mound_(baseball)', 'mouse_(animal_rodent)',
'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',
'music_stool', 'musical_instrument', 'nailfile', 'nameplate', 'napkin',
'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newsstand',
'nightshirt', 'nosebag_(for_animals)', 'noseband_(for_animals)',
'notebook', 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)',
'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion',
'orange_(fruit)', 'orange_juice', 'oregano', 'ostrich', 'ottoman',
'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle',
'padlock', 'paintbox', 'paintbrush', 'painting', 'pajamas', 'palette',
'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose',
'papaya', 'paperclip', 'paper_plate', 'paper_towel', 'paperback_book',
'paperweight', 'parachute', 'parakeet', 'parasail_(sports)',
'parchment', 'parka', 'parking_meter', 'parrot',
'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'pegboard',
'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener',
'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper',
'pepper_mill', 'perfume', 'persimmon', 'baby', 'pet', 'petfood',
'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
'plate', 'platter', 'playing_card', 'playpen', 'pliers',
'plow_(farm_equipment)', 'pocket_watch', 'pocketknife',
'poker_(fire_stirring_tool)', 'pole', 'police_van', 'polo_shirt',
'poncho', 'pony', 'pool_table', 'pop_(soda)', 'portrait',
'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',
'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'printer',
'projectile_(weapon)', 'projector', 'propeller', 'prune', 'pudding',
'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', 'puppet',
'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', 'race_car',
'racket', 'radar', 'radiator', 'radio_receiver', 'radish', 'raft',
'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',
'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
'recliner', 'record_player', 'red_cabbage', 'reflector',
'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring',
'river_boat', 'road_map', 'robe', 'rocking_chair', 'roller_skate',
'Rollerblade', 'rolling_pin', 'root_beer',
'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)',
'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag',
'safety_pin', 'sail', 'salad', 'salad_plate', 'salami',
'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker',
'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer',
'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)',
'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard',
'scrambled_eggs', 'scraper', 'scratcher', 'screwdriver',
'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
'seashell', 'seedling', 'serving_dish', 'sewing_machine', 'shaker',
'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)',
'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog',
'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag', 'shopping_cart',
'short_pants', 'shot_glass', 'shoulder_bag', 'shovel', 'shower_head',
'shower_curtain', 'shredder_(for_paper)', 'sieve', 'signboard', 'silo',
'sink', 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka',
'ski_pole', 'skirt', 'sled', 'sleeping_bag', 'sling_(bandage)',
'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',
'snowmobile', 'soap', 'soccer_ball', 'sock', 'soda_fountain',
'carbonated_water', 'sofa', 'softball', 'solar_array', 'sombrero',
'soup', 'soup_bowl', 'soupspoon', 'sour_cream', 'soya_milk',
'space_shuttle', 'sparkler_(fireworks)', 'spatula', 'spear',
'spectacles', 'spice_rack', 'spider', 'sponge', 'spoon', 'sportswear',
'spotlight', 'squirrel', 'stapler_(stapling_machine)', 'starfish',
'statue_(sculpture)', 'steak_(food)', 'steak_knife',
'steamer_(kitchen_appliance)', 'steering_wheel', 'stencil',
'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',
'stirrup', 'stockings_(leg_wear)', 'stool', 'stop_sign', 'brake_light',
'stove', 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry',
'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer',
'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower',
'sunglasses', 'sunhat', 'sunscreen', 'surfboard', 'sushi', 'mop',
'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato',
'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table',
'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag',
'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)',
'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
'telephone_pole', 'telephoto_lens', 'television_camera',
'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',
'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',
'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',
'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',
'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',
'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',
'tray', 'tree_house', 'trench_coat', 'triangle_(musical_instrument)',
'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)',
'trunk', 'vat', 'turban', 'turkey_(bird)', 'turkey_(food)', 'turnip',
'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella',
'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'valve',
'vase', 'vending_machine', 'vent', 'videotape', 'vinegar', 'violin',
'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon',
'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet',
'walrus', 'wardrobe', 'wasabi', 'automatic_washer', 'watch',
'water_bottle', 'water_cooler', 'water_faucet', 'water_filter',
'water_heater', 'water_jug', 'water_gun', 'water_scooter', 'water_ski',
'water_tower', 'watering_can', 'watermelon', 'weathervane', 'webcam',
'wedding_cake', 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair',
'whipped_cream', 'whiskey', 'whistle', 'wick', 'wig', 'wind_chime',
'windmill', 'window_box_(for_plants)', 'windshield_wiper', 'windsock',
'wine_bottle', 'wine_bucket', 'wineglass', 'wing_chair',
'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', 'wreath',
'wrench', 'wristband', 'wristlet', 'yacht', 'yak', 'yogurt',
'yoke_(animal_equipment)', 'zebra', 'zucchini')
def load_annotations(self, ann_file):
"""Load annotation from lvis style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from LVIS api.
"""
try:
import lvis
if getattr(lvis, '__version__', '0') >= '10.5.3':
warnings.warn(
'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501
UserWarning)
from lvis import LVIS
except ImportError:
raise ImportError(
'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501
)
self.coco = LVIS(ann_file)
self.cat_ids = self.coco.get_cat_ids()
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
if info['file_name'].startswith('COCO'):
# Convert form the COCO 2014 file naming convention of
# COCO_[train/val/test]2014_000000000000.jpg to the 2017
# naming convention of 000000000000.jpg
# (LVIS v1 will fix this naming issue)
info['filename'] = info['file_name'][-16:]
else:
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05)):
"""Evaluation in LVIS protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None):
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float]): IoU threshold used for evaluating
recalls. If set to a list, the average recall of all IoUs will
also be computed. Default: 0.5.
Returns:
dict[str, float]: LVIS style metrics.
"""
try:
import lvis
if getattr(lvis, '__version__', '0') >= '10.5.3':
warnings.warn(
'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501
UserWarning)
from lvis import LVISResults, LVISEval
except ImportError:
raise ImportError(
'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501
)
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError('metric {} is not supported'.format(metric))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
eval_results = OrderedDict()
# get original api
lvis_gt = self.coco
for metric in metrics:
msg = 'Evaluating {}...'.format(metric)
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results['AR@{}'.format(num)] = ar[i]
log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i]))
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError('{} is not in results'.format(metric))
try:
lvis_dt = LVISResults(lvis_gt, result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type)
lvis_eval.params.imgIds = self.img_ids
if metric == 'proposal':
lvis_eval.params.useCats = 0
lvis_eval.params.maxDets = list(proposal_nums)
lvis_eval.evaluate()
lvis_eval.accumulate()
lvis_eval.summarize()
for k, v in lvis_eval.get_results().items():
if k.startswith('AR'):
val = float('{:.3f}'.format(float(v)))
eval_results[k] = val
else:
lvis_eval.evaluate()
lvis_eval.accumulate()
lvis_eval.summarize()
lvis_results = lvis_eval.get_results()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = lvis_eval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.load_cats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
for k, v in lvis_results.items():
if k.startswith('AP'):
key = '{}_{}'.format(metric, k)
val = float('{:.3f}'.format(float(v)))
eval_results[key] = val
ap_summary = ' '.join([
'{}:{:.3f}'.format(k, float(v))
for k, v in lvis_results.items() if k.startswith('AP')
])
eval_results['{}_mAP_copypaste'.format(metric)] = ap_summary
lvis_eval.print_results()
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
LVISDataset = LVISV05Dataset
DATASETS.register_module(name='LVISDataset', module=LVISDataset)
@DATASETS.register_module()
class LVISV1Dataset(LVISDataset):
CLASSES = (
'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol',
'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna',
'apple', 'applesauce', 'apricot', 'apron', 'aquarium',
'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor',
'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer',
'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy',
'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',
'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon',
'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo',
'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow',
'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap',
'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)',
'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)',
'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie',
'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper',
'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt',
'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor',
'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',
'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card',
'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',
'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry',
'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',
'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase',
'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',
'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)',
'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box',
'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',
'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase',
'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts',
'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer',
'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn',
'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card',
'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',
'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',
'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',
'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar',
'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup',
'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino',
'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car',
'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship',
'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton',
'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower',
'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone',
'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier',
'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard',
'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime',
'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar',
'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker',
'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider',
'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet',
'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine',
'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock',
'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster',
'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach',
'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table',
'coffeepot', 'coil', 'coin', 'colander', 'coleslaw',
'coloring_material', 'combination_lock', 'pacifier', 'comic_book',
'compass', 'computer_keyboard', 'condiment', 'cone', 'control',
'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie',
'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)',
'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet',
'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall',
'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker',
'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',
'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown',
'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch',
'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup',
'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain',
'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard',
'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',
'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup',
'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin',
'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',
'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',
'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)',
'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell',
'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring',
'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',
'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',
'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',
'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',
'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',
'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',
'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl',
'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap',
'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)',
'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal',
'folding_chair', 'food_processor', 'football_(American)',
'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car',
'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice',
'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',
'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',
'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator',
'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture',
'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',
'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',
'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat',
'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly',
'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet',
'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock',
'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',
'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',
'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband',
'headboard', 'headlight', 'headscarf', 'headset',
'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet',
'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog',
'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah',
'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board',
'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey',
'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak',
'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono',
'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit',
'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)',
'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)',
'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard',
'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather',
'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce',
'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',
'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor',
'lizard', 'log', 'lollipop', 'speaker_(stereo_equipment)', 'loveseat',
'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)',
'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger',
'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato',
'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox',
'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine',
'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone',
'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror',
'mitten', 'mixer_(kitchen_tool)', 'money',
'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',
'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)',
'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',
'music_stool', 'musical_instrument', 'nailfile', 'napkin',
'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper',
'newsstand', 'nightshirt', 'nosebag_(for_animals)',
'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker',
'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil',
'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich',
'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad',
'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas',
'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake',
'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book',
'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol',
'parchment', 'parka', 'parking_meter', 'parrot',
'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg',
'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',
'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)',
'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',
'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)',
'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',
'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)',
'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',
'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel',
'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune',
'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher',
'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit',
'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish',
'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',
'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
'recliner', 'record_player', 'reflector', 'remote_control',
'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map',
'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade',
'rolling_pin', 'root_beer', 'router_(computer_equipment)',
'rubber_band', 'runner_(carpet)', 'plastic_bag',
'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin',
'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)',
'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)',
'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',
'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf',
'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver',
'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark',
'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl',
'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt',
'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass',
'shoulder_bag', 'shovel', 'shower_head', 'shower_cap',
'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink',
'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole',
'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)',
'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',
'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball',
'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',
'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',
'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',
'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)',
'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish',
'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel',
'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',
'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer',
'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign',
'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl',
'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses',
'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband',
'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword',
'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table',
'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight',
'tambourine', 'army_tank', 'tank_(storage_vessel)',
'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
'telephone_pole', 'telephoto_lens', 'television_camera',
'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',
'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',
'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',
'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',
'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',
'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',
'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle',
'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat',
'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)',
'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',
'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest',
'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture',
'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick',
'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe',
'washbasin', 'automatic_washer', 'watch', 'water_bottle',
'water_cooler', 'water_faucet', 'water_heater', 'water_jug',
'water_gun', 'water_scooter', 'water_ski', 'water_tower',
'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake',
'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream',
'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',
'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',
'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon',
'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt',
'yoke_(animal_equipment)', 'zebra', 'zucchini')
def load_annotations(self, ann_file):
try:
import lvis
if getattr(lvis, '__version__', '0') >= '10.5.3':
warnings.warn(
'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501
UserWarning)
from lvis import LVIS
except ImportError:
raise ImportError(
'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501
)
self.coco = LVIS(ann_file)
self.cat_ids = self.coco.get_cat_ids()
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
# coco_url is used in LVISv1 instead of file_name
# e.g. http://images.cocodataset.org/train2017/000000391895.jpg
# train/val split in specified in url
info['filename'] = info['coco_url'].replace(
'http://images.cocodataset.org/', '')
data_infos.append(info)
return data_infos
|
[
"noreply@github.com"
] |
hhaAndroid.noreply@github.com
|
bcc1a31edbc8ec9e5e30ccfdce7356bbc0088ba1
|
2d5a5717015da6f659d85911da5a69a145b4c861
|
/reccomender.py
|
a4798df1aa989a8f6ad237f57b7c4633ec07fe99
|
[] |
no_license
|
williewlchew/url-tagging
|
14e2a026ba16d27c4a0b3b700c89139dbf8b7e63
|
bf1dc4e3e9faf59216599675ab3940beab6c43f1
|
refs/heads/master
| 2023-01-28T04:55:43.443270
| 2020-12-11T03:13:45
| 2020-12-11T03:13:45
| 319,844,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,125
|
py
|
'''
UrlTagging Reccomending
Willie Chew
2020
'''
import io, os, sys, pika, redis, hashlib, mysql.connector
import protos.links_pb2 as linksPb
redisHost = os.getenv("REDIS_HOST") or "localhost"
rabbitMQHost = os.getenv("RABBITMQ_HOST") or "localhost"
mysqlHost = os.getenv("MYSQL_HOST") or "localhost"
class RabbitHandler(object):
def __init__(self, consumingExchange, publishingExchange):
# init variable
self.rabbitMQHost = os.getenv("RABBITMQ_HOST") or "localhost"
self.rabbitMQ = pika.BlockingConnection(
pika.ConnectionParameters(host=self.rabbitMQHost))
self.rabbitMQChannel = self.rabbitMQ.channel()
self.publishingExchange = publishingExchange
self.reccRequestHandler = ReccRequestHandler()
# bind and consume
self.rabbitMQChannel.queue_declare('', exclusive=True)
self.rabbitMQChannel.queue_bind(exchange=consumingExchange, queue='', routing_key='a')
self.rabbitMQChannel.basic_consume(
queue='',
on_message_callback=self.on_response,
auto_ack=True)
self.rabbitMQChannel.start_consuming()
def on_response(self, ch, method, props, body):
# Recieve message
recc = self.reccRequestHandler.ProcessMessage(body)
print(recc)
self.log("[reccomender] reccomend success: " + ",".join(recc.inputTags[:]))
# Send results back
message = recc.SerializeToString()
self.rabbitMQChannel.basic_publish(exchange=self.publishingExchange, body=message, routing_key='b', properties=pika.BasicProperties(correlation_id = 'c'))
def log(self, message):
self.rabbitMQChannel.basic_publish(exchange='logging', body=message, routing_key='a')
class ReccRequestHandler(object):
def __init__(self):
self.redisCache = RedisCache()
self.sqlDatabase = SqlDatabase()
def ProcessMessage(self, message):
recc = linksPb.Reccomendation()
recc.ParseFromString(message)
# check cache
hashCheck = self.redisCache.Get(''.join(recc.inputTags))
if (hashCheck != None):
recc.ParseFromString(hashCheck)
return recc
# find relevant articles
results = self.ProcessTags(recc.inputTags)
articles = []
for result in results:
newLink = linksPb.Link();
newLink.url = result
articles.append(newLink)
recc.outputLinks.extend(articles)
# cache in redis
self.redisCache.Set(''.join(recc.inputTags), recc.SerializeToString())
return recc
def ProcessTags(self, tags):
tagSet = "\",\"".join(tags)
query = "SELECT tag_id FROM tags WHERE tag IN (\"" + tagSet + "\")"
relevantTagIds = self.sqlDatabase.Query(query)
relevantTagIds = list(map(lambda x: x[0], relevantTagIds))
relevantTagIds = "\",\"".join(relevantTagIds)
query = "SELECT url_id FROM link_tag WHERE tag_id IN (\"" + relevantTagIds + "\")"
relevantUrlIds = self.sqlDatabase.Query(query)
relevantUrlIds = list(map(lambda x: x[0], relevantUrlIds))
relevantUrlIds = "\",\"".join(relevantUrlIds)
query = "SELECT DISTINCT url FROM links WHERE url_id IN (\"" + relevantUrlIds + "\")"
results = self.sqlDatabase.Query(query)
results = list(map(lambda x: x[0], results))
return results
### Redis Caching
#################################
class RedisCache(object):
def __init__(self):
self.redisStore = redis.Redis(host=redisHost, db=1)
self.redisStore.flushdb()
def Set(self, key, value):
key = hashlib.sha224(bytes(key, 'utf-8')).hexdigest()
self.redisStore.set(key, value)
def Get(self, key):
key = hashlib.sha224(bytes(key, 'utf-8')).hexdigest()
return self.redisStore.get(key)
### MySql Database
#################################
class SqlDatabase(object):
def __init__(self):
self.mydb = mysql.connector.connect(
host=mysqlHost,
port="3306",
user="root",
password="password",
database="mydatabase"
)
def Query(self, query):
mycursor = self.mydb.cursor()
mycursor.execute(query)
return(mycursor.fetchall())
class ReccProcessor(object):
def Process(self, tags):
#wip
link1 = linksPb.Link();
link1.url = 'l1'
link2 = linksPb.Link();
link2.url = 'l2'
link3 = linksPb.Link();
link3.url = 'l3'
return [link1, link2, link3]
reccProcessor = ReccProcessor()
rabbitmq = RabbitHandler('reccReq', 'response')
|
[
"williewlchew@gmail.com"
] |
williewlchew@gmail.com
|
5bee624cf6a1512c53de9744c62b2e661ed4ff4e
|
9f242385186a9a9eafc61322e4f92515b42e3325
|
/DQN/DQN_p2_universe.py
|
f7aac3acb6a5e4bd2d53ff5ed0ee56513b853a66
|
[
"MIT"
] |
permissive
|
UofARLGames/reinforcement-learning
|
5c826b378934d99b9198fe5931e811d5207c2156
|
a1dfcf64087a3181b51b469d59fc34e6a494dcbd
|
refs/heads/master
| 2020-05-21T06:05:58.973153
| 2017-03-31T23:20:47
| 2017-03-31T23:20:47
| 84,584,607
| 0
| 0
| null | 2017-03-10T17:29:46
| 2017-03-10T17:29:46
| null |
UTF-8
|
Python
| false
| false
| 19,852
|
py
|
# coding: utf-8
# In[ ]:
#get_ipython().magic('matplotlib inline')
import gym
import itertools
import numpy as np
import os
import random
import sys
import universe
import pdb
if "../" not in sys.path:
sys.path.append("../")
from lib import plotting
from collections import deque, namedtuple
# In[ ]:
#env = gym.envs.make("Breakout-v0")
#env = gym.make('flashgames.DuskDrive-v0')
env = gym.make('Breakout-v0')
import tensorflow as tf
#env.configure(remotes=1) # automatically creates a local docker container
#observation_n = env.reset()
# In[ ]:
# Atari Actions: 0 (noop), 1 (fire), 2 (left) and 3 (right) are valid actions
#VALID_ACTIONS = [0, 1, 2, 3]
VALID_ACTIONS= [('KeyEvent', 'ArrowUp', True), ('KeyEvent', 'ArrowDown', True), ('KeyEvent', 'ArrowRight', True), ('KeyEvent', 'ArrowLeft', True)]
#VALID_ACTIONS= [('KeyEvent', 'ArrowUp', True), ('KeyEvent', 'ArrowUp', True), ('KeyEvent', 'ArrowUp', True), ('KeyEvent', 'ArrowUp', True)]
#VALID_ACTIONS= [('KeyEvent', 'ArrowUp', True)]
# In[ ]:
class StateProcessor():
"""StateProcessor"""
"""
Processes a raw Atari iamges. Resizes it and converts it to grayscale.
"""
S_W= 768
S_H= 1024
C_W= 400
C_H= 400
N_W= 200
N_H= 200
def __init__(self):
# Build the Tensorflow graph
with tf.variable_scope("state_processor"):
self.input_state = tf.placeholder(shape=[StateProcessor.S_W, StateProcessor.S_H, 3], dtype=tf.uint8)
self.output = tf.image.rgb_to_grayscale(self.input_state)
self.output = tf.image.crop_to_bounding_box(self.output, 34, 0, StateProcessor.C_W, StateProcessor.C_H)
self.output = tf.image.resize_images(
self.output, tf.constant(np.array([84, 84],dtype='int32')), tf.image.ResizeMethod.NEAREST_NEIGHBOR)
self.output = tf.squeeze(self.output)
def process(self, sess, state):
"""
Args:
sess: A Tensorflow session object
state: A [210, 160, 3] Atari RGB State
Returns:
A processed [84, 84, 1] state representing grayscale values.
"""
if state is not None:
return sess.run(self.output, { self.input_state: state['vision'] })
else:
return np.zeros((84,84))
# In[ ]:
class Estimator():
"""Q-Value Estimator neural network.
This network is used for both the Q-Network and the Target Network.
"""
def __init__(self, scope="estimator", summaries_dir=None):
self.scope = scope
# Writes Tensorboard summaries to disk
self.summary_writer = None
with tf.variable_scope(scope):
# Build the graph
self._build_model()
if summaries_dir:
summary_dir = os.path.join(summaries_dir, "summaries_{}".format(scope))
if not os.path.exists(summary_dir):
os.makedirs(summary_dir)
self.summary_writer = tf.train.SummaryWriter(summary_dir)
def _build_model(self):
"""
Builds the Tensorflow graph.
"""
# Placeholders for our input
# Our input are 4 RGB frames of shape 160, 160 each
self.X_pl = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X")
# The TD target value
self.y_pl = tf.placeholder(shape=[None], dtype=tf.float32, name="y")
# Integer id of which action was selected
self.actions_pl = tf.placeholder(shape=[None], dtype=tf.int32, name="actions")
X = tf.to_float(self.X_pl) / 255.0
batch_size = tf.shape(self.X_pl)[0]
# Three convolutional layers
conv1 = tf.contrib.layers.conv2d(
X, 32, 8, 4, activation_fn=tf.nn.relu)
conv2 = tf.contrib.layers.conv2d(
conv1, 64, 4, 2, activation_fn=tf.nn.relu)
conv3 = tf.contrib.layers.conv2d(
conv2, 64, 3, 1, activation_fn=tf.nn.relu)
# Fully connected layers
flattened = tf.contrib.layers.flatten(conv3)
fc1 = tf.contrib.layers.fully_connected(flattened, 512)
self.predictions = tf.contrib.layers.fully_connected(fc1, len(VALID_ACTIONS))
# Get the predictions for the chosen actions only
gather_indices = tf.range(batch_size) * tf.shape(self.predictions)[1] + self.actions_pl
self.action_predictions = tf.gather(tf.reshape(self.predictions, [-1]), gather_indices)
# Calcualte the loss
self.losses = tf.squared_difference(self.y_pl, self.action_predictions)
self.loss = tf.reduce_mean(self.losses)
# Optimizer Parameters from original paper
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
self.train_op = self.optimizer.minimize(self.loss, global_step=tf.contrib.framework.get_global_step())
# Summaries for Tensorboard
self.summaries = tf.merge_summary([
tf.scalar_summary("loss", self.loss),
tf.histogram_summary("loss_hist", self.losses),
tf.histogram_summary("q_values_hist", self.predictions),
tf.scalar_summary("max_q_value", tf.reduce_max(self.predictions))
])
def predict(self, sess, s):
"""
Predicts action values.
Args:
sess: Tensorflow session
s: State input of shape [batch_size, 4, 160, 160, 3]
Returns:
Tensor of shape [batch_size, NUM_VALID_ACTIONS] containing the estimated
action values.
"""
return sess.run(self.predictions, { self.X_pl: s })
def update(self, sess, s, a, y):
"""
Updates the estimator towards the given targets.
Args:
sess: Tensorflow session object
s: State input of shape [batch_size, 4, 160, 160, 3]
a: Chosen actions of shape [batch_size]
y: Targets of shape [batch_size]
Returns:
The calculated loss on the batch.
"""
feed_dict = { self.X_pl: s, self.y_pl: y, self.actions_pl: a }
summaries, global_step, _, loss = sess.run(
[self.summaries, tf.contrib.framework.get_global_step(), self.train_op, self.loss],
feed_dict)
if self.summary_writer:
self.summary_writer.add_summary(summaries, global_step)
return loss
def strip_arrays(state, done, reward):
return state[0], done[0], reward[0]
# In[ ]:
# For Testing....
tf.reset_default_graph()
global_step = tf.Variable(0, name="global_step", trainable=False)
e = Estimator(scope="test")
sp = StateProcessor()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
# Example observation batch
env.configure() # automatically creates a local docker container
observation = env.reset()
observation,_,_= strip_arrays(observation, [0], [0])
observation_p = sp.process(sess, observation)
observation = np.stack([observation_p] * 4, axis=2)
observations = np.array([observation] * 2)
# Test Prediction
print(e.predict(sess, observations))
# Test training step
y = np.array([10.0, 10.0])
a = np.array([1, 3])
print(e.update(sess, observations, a, y))
# In[ ]:
def copy_model_parameters(sess, estimator1, estimator2):
"""
Copies the model parameters of one estimator to another.
Args:
sess: Tensorflow session instance
estimator1: Estimator to copy the paramters from
estimator2: Estimator to copy the parameters to
"""
e1_params = [t for t in tf.trainable_variables() if t.name.startswith(estimator1.scope)]
e1_params = sorted(e1_params, key=lambda v: v.name)
e2_params = [t for t in tf.trainable_variables() if t.name.startswith(estimator2.scope)]
e2_params = sorted(e2_params, key=lambda v: v.name)
update_ops = []
for e1_v, e2_v in zip(e1_params, e2_params):
op = e2_v.assign(e1_v)
update_ops.append(op)
sess.run(update_ops)
# In[ ]:
def make_epsilon_greedy_policy(estimator, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function approximator and epsilon.
Args:
estimator: An estimator that returns q values for a given state
nA: Number of actions in the environment.
Returns:
A function that takes the (sess, observation, epsilon) as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(sess, observation, epsilon):
A = np.ones(nA, dtype=float) * epsilon / nA
print('Obersvation Dimension', observation.shape )
q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]
best_action = np.argmax(q_values)
A[best_action] += (1.0 - epsilon)
return A
return policy_fn
# In[ ]:
def sample_action():
c= np.random.choice(range(len(VALID_ACTIONS)))
return [VALID_ACTIONS[c]]
def wait(state, env):
while True:
action_n= []
for ob in state:
action_n.append(sample_action())
if state != [None]:
break
state, reward_n, done_n, info = env.step(action_n)
env.render()
return state
def deep_q_learning(sess,
env,
q_estimator,
target_estimator,
state_processor,
num_episodes,
experiment_dir,
replay_memory_size=500000,
replay_memory_init_size=50,
update_target_estimator_every=10000,
discount_factor=0.99,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_steps=500000,
batch_size=32,
record_video_every=50):
"""
Q-Learning algorithm for fff-policy TD control using Function Approximation.
Finds the optimal greedy policy while following an epsilon-greedy policy.
Args:
sess: Tensorflow Session object
env: OpenAI environment
q_estimator: Estimator object used for the q values
target_estimator: Estimator object used for the targets
state_processor: A StateProcessor object
num_episodes: Number of episodes to run for
experiment_dir: Directory to save Tensorflow summaries in
replay_memory_size: Size of the replay memory
replay_memory_init_size: Number of random experiences to sampel when initializing
the reply memory.
update_target_estimator_every: Copy parameters from the Q estimator to the
target estimator every N steps
discount_factor: Lambda time discount factor
epsilon_start: Chance to sample a random action when taking an action.
Epsilon is decayed over time and this is the start value
epsilon_end: The final minimum value of epsilon after decaying is done
epsilon_decay_steps: Number of steps to decay epsilon over
batch_size: Size of batches to sample from the replay memory
record_video_every: Record a video every N episodes
Returns:
An EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
Transition = namedtuple("Transition", ["state", "action", "reward", "next_state", "done"])
# The replay memory
replay_memory = []
# Keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
# Create directories for checkpoints and summaries
checkpoint_dir = os.path.join(experiment_dir, "checkpoints")
checkpoint_path = os.path.join(checkpoint_dir, "model")
monitor_path = os.path.join(experiment_dir, "monitor")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
if not os.path.exists(monitor_path):
os.makedirs(monitor_path)
saver = tf.train.Saver()
# Load a previous checkpoint if we find one
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
if latest_checkpoint:
print("Loading model checkpoint {}...\n".format(latest_checkpoint))
saver.restore(sess, latest_checkpoint)
# Get the current time step
total_t = sess.run(tf.contrib.framework.get_global_step())
# The epsilon decay schedule
epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)
# The policy we're following
policy = make_epsilon_greedy_policy(
q_estimator,
len(VALID_ACTIONS))
# Populate the replay memory with initial experience
print("Populating replay memory...")
state = env.reset()
state= wait(state, env)
print('Really Starting the popoulation of memory', state[0]['vision'].shape)
state, _, _= strip_arrays(state, [0], [0])
state = state_processor.process(sess, state)
state = np.stack([state] * 4, axis=2)
for i in range(replay_memory_init_size):
action_probs = policy(sess, state, epsilons[total_t])
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
next_state, reward, done, _ = env.step([[VALID_ACTIONS[action]]])
next_state, done, reward= strip_arrays(next_state, done, reward)
env.render()
next_state = state_processor.process(sess, next_state)
next_state = np.append(state[:,:,1:], np.expand_dims(next_state, 2), axis=2)
replay_memory.append(Transition(state, action, reward, next_state, done))
if done:
print('Some Unknown reason it is done')
state = env.reset()
state = state_processor.process(sess, state)
state = np.stack([state] * 4, axis=2)
else:
state = next_state
print("Finished Populating replay memory...")
# Record videos
env.monitor.start(monitor_path,
resume=True,
video_callable=lambda count: count % record_video_every == 0)
state= env.reset()
for i_episode in range(num_episodes):
# Save the current checkpoint
saver.save(tf.get_default_session(), checkpoint_path)
# Reset the environment
state = env.reset()
state= wait(state, env)
state, _, _= strip_arrays(state, [0], [0])
state = state_processor.process(sess, state)
state = np.stack([state] * 4, axis=2)
loss = None
# One step in the environment
for t in itertools.count():
# Epsilon for this time step
epsilon = epsilons[min(total_t, epsilon_decay_steps-1)]
# Add epsilon to Tensorboard
episode_summary = tf.Summary()
episode_summary.value.add(simple_value=epsilon, tag="epsilon")
q_estimator.summary_writer.add_summary(episode_summary, total_t)
# Maybe update the target estimator
if total_t % update_target_estimator_every == 0:
copy_model_parameters(sess, q_estimator, target_estimator)
print("\nCopied model parameters to target network.")
# Print out which step we're on, useful for debugging.
print("\rStep {} ({}) @ Episode {}/{}, loss: {}".format(
t, total_t, i_episode + 1, num_episodes, loss))
sys.stdout.flush()
# Take a step
action_probs = policy(sess, state, epsilon)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
next_state, reward, done, _ = env.step([[VALID_ACTIONS[action]]])
next_state, done, reward= strip_arrays(next_state, done, reward)
env.render()
next_state = state_processor.process(sess, next_state)
next_state = np.append(state[:,:,1:], np.expand_dims(next_state, 2), axis=2)
# If our replay memory is full, pop the first element
if len(replay_memory) == replay_memory_size:
replay_memory.pop(0)
# Save transition to replay memory
replay_memory.append(Transition(state, action, reward, next_state, done))
# Update statistics
stats.episode_rewards[i_episode] += reward
stats.episode_lengths[i_episode] = t
# Sample a minibatch from the replay memory
samples = random.sample(replay_memory, batch_size)
states_batch, action_batch, reward_batch, next_states_batch, done_batch = map(np.array, zip(*samples))
# Calculate q values and targets
q_values_next = target_estimator.predict(sess, next_states_batch)
targets_batch = reward_batch + np.invert(done_batch).astype(np.float32) * discount_factor * np.amax(q_values_next, axis=1)
# Perform gradient descent update
states_batch = np.array(states_batch)
print('Size: ', states_batch.shape, action_batch.shape, targets_batch.shape)
loss = q_estimator.update(sess, states_batch, action_batch, targets_batch)
if done:
break
state = next_state
total_t += 1
# Add summaries to tensorboard
episode_summary = tf.Summary()
episode_summary.value.add(simple_value=stats.episode_rewards[i_episode], node_name="episode_reward", tag="episode_reward")
episode_summary.value.add(simple_value=stats.episode_lengths[i_episode], node_name="episode_length", tag="episode_length")
q_estimator.summary_writer.add_summary(episode_summary, total_t)
q_estimator.summary_writer.flush()
yield total_t, plotting.EpisodeStats(
episode_lengths=stats.episode_lengths[:i_episode+1],
episode_rewards=stats.episode_rewards[:i_episode+1])
env.monitor.close()
yield stats
return
# In[ ]:
print('Running DQN\n')
tf.reset_default_graph()
# Where we save our checkpoints and graphs
experiment_dir = os.path.abspath("./experiments/{}".format(env.spec.id))
# Create a glboal step variable
global_step = tf.Variable(0, name='global_step', trainable=False)
# Create estimators
q_estimator = Estimator(scope="q", summaries_dir=experiment_dir)
target_estimator = Estimator(scope="target_q")
# State processor
state_processor = StateProcessor()
# Run it!
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for t, stats in deep_q_learning(sess,
env,
q_estimator=q_estimator,
target_estimator=target_estimator,
state_processor=state_processor,
experiment_dir=experiment_dir,
num_episodes=10000,
replay_memory_size=500000,
replay_memory_init_size=50,
update_target_estimator_every=10000,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_steps=500000,
discount_factor=0.99,
batch_size=32):
print("\nEpisode Reward: {}".format(stats.episode_rewards[-1]))
|
[
"mennatul@ualberta.ca"
] |
mennatul@ualberta.ca
|
1ebd169554ef609726d43aedd887225ea4fb9263
|
887b37befa1eac8f3d6de87fee4ba881fba169ce
|
/mysite/survey/migrations/0005_auto_20200414_2251.py
|
0afcbe7ccdb32ef2708c807c9529fc089b4e1778
|
[] |
no_license
|
judetomas/django_polls
|
16ae2f0206459c8ac8f71f8273f7766ef11a6663
|
d444a469bf4ab33934fdf59f06ed99a1d93964bf
|
refs/heads/master
| 2020-09-11T18:58:41.906228
| 2020-05-16T19:45:17
| 2020-05-16T19:45:17
| 222,159,695
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
# Generated by Django 2.2.5 on 2020-04-15 02:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('survey', '0004_remove_answer_answer_id'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_type', models.CharField(max_length=200)),
],
),
migrations.AddField(
model_name='answer',
name='answer_type',
field=models.CharField(default='E', max_length=200),
preserve_default=False,
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.Answer')),
],
),
]
|
[
"eduj1samot@gmail.com"
] |
eduj1samot@gmail.com
|
d7fb0e5f89514b585e6801d0934118e1ee780914
|
2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f
|
/aws.getCanonicalUserId-python/__main__.py
|
40fa0380f91a55b39b839daa26ac25a498186bc7
|
[] |
no_license
|
ehubbard/templates-aws
|
e323b693a18234defe6bd56ffcc64095dc58e3a1
|
2ae2e7a5d05490078017fed6d132dcdde1f21c63
|
refs/heads/master
| 2022-11-17T13:53:14.531872
| 2020-07-10T21:56:27
| 2020-07-10T21:56:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
import pulumi
import pulumi_aws as aws
current = aws.get_canonical_user_id()
pulumi.export("canonicalUserId", current.id)
|
[
"jvp@justinvp.com"
] |
jvp@justinvp.com
|
cac7f6322f286045bff90d19c7c4da732cb5bb58
|
18111665be78761003021f5231e5bb1cdeba842c
|
/_app/forms.py
|
a48284ecb9ac93996d56e9b0772693bd9b7683d0
|
[
"MIT"
] |
permissive
|
Wilfongjt/lb-data
|
0782896420d2bebc8aab7726f393952a112419c7
|
eca16bcec6cae5822146dfce8ea56e5f533c7f87
|
refs/heads/master
| 2022-11-13T04:33:54.272265
| 2020-07-10T16:08:25
| 2020-07-10T16:08:25
| 255,879,629
| 0
| 0
|
MIT
| 2020-07-10T16:08:26
| 2020-04-15T10:14:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,566
|
py
|
from list_forms import FormList
import json
class FormKeyList(list):
def __init__(self, dictionary):
if 'interfaces' in dictionary:
for formkey in dictionary['interfaces']:
self.append(formkey)
elif 'api-form' in dictionary:
self.append(dictionary['api-name'])
class Form(dict):
def default(self, f):
num_list =['INTEGER']
if f['type'] in [any(ele in f['json'] for ele in ['INTEGER']) ]:
rc = 0
elif f['type'] in [any(ele in f['json'] for ele in ['JSONB','JSON']) ]:
rc = {'na':'na'}
else:
rc = 'NA'
return rc
class InsertForm(Form):
def __init__(self, dictionary, form_key, constraints=['C','c']):
# dictionary is table dictionary
# form_key as found in forms
self.dictionary = dictionary
#contextDict = ContextDict().read()
for f in FormList(self.dictionary, form_key, constraints):
self[f['name']] = self.default(f)
self['type']=form_key
class UpdateForm(Form):
def __init__(self, dictionary, form_key, constraints=['I','U','u']):
self.dictionary = dictionary
#contextDict = ContextDict().read()
for f in FormList(self.dictionary, form_key, constraints):
self[f['name']] = self.default(f)##ContextDict(ContextKey('data-context',f))
self['type'] = form_key
def main():
import os
from test_func import test_table
from context_dict import ContextDict
os.environ['LB-TESTING'] = '1'
# FormKey
assert FormKeyList(test_table()) == ['app', 'user']
# InsertForm
form = InsertForm(test_table(), 'app')
assert type(form) == InsertForm
assert form == {'type': 'app', 'app-name': 'NA', 'version': 'NA', 'username': 'NA', 'password': 'NA'}
print('insert form', form)
context = ContextDict().read()
#print('context', context)
form = context.goodify(form)
print('context good', form)
form = context.badify(form)
print('context bad', form)
# UpdateForm
form = UpdateForm(test_table(), 'app')
print('update form', form)
assert type(form) == UpdateForm
assert form == {'id': 'NA', 'type': 'app', 'app-name': 'NA', 'username': 'NA', 'password': 'NA'}
form = context.goodify(form)
print('context good', form)
form = context.badify(form)
print('context bad', form)
form = InsertForm(test_table(), 'user')
print('insert form', form)
os.environ['LB-TESTING'] = '0'
if __name__ == "__main__":
main()
|
[
"wilfongjt@gmail.com"
] |
wilfongjt@gmail.com
|
d0e4abc5b35dc5ffc4c54602f2924a7977a22926
|
a5836366b1ddbbf2bb00f8424b4acef51ba0ca36
|
/src/action/exit.py
|
5bd59e7429d5284f377f267f515904d643889e98
|
[] |
no_license
|
d4rckh/bhs
|
ccbfad6c116af43a48fed750ef68a24d4ea90dc1
|
52f3f39970f83e82d61e3d0c4ee482cd0761daac
|
refs/heads/master
| 2022-12-05T23:53:24.177305
| 2020-09-02T15:11:11
| 2020-09-02T15:11:11
| 292,082,066
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from src.util.run_cmd import open as run_cmd
from src.util.cRes import cRes as res
from src.util.checkPort import checkPort
class Command:
def __init__(self, args, session):
self.args = args
self.ses = session
self.requiredPorts = []
pass
def run(self):
exit(0)
|
[
"m4dd0x.xxx@gmail.com"
] |
m4dd0x.xxx@gmail.com
|
84ed31ee8fbc6386e0418195aeda611fd7acf26b
|
8ded1e1c993051f3d71c55905ea812cd014aab3e
|
/model/model_QSMnet+.py
|
37d299f7391ef0ce9f0535483bcb310d5c9045d2
|
[] |
no_license
|
clydejung/QSMnet
|
49dcb8ceb77994ff9d9454197f7c1343e1857522
|
078016a69afbf7ba44f9c3c4d32285fc18c3d8f3
|
refs/heads/master
| 2020-07-01T00:39:46.488768
| 2019-08-07T07:28:58
| 2019-08-07T07:28:58
| 200,997,611
| 0
| 0
| null | 2019-08-07T07:33:12
| 2019-08-07T07:33:12
| null |
UTF-8
|
Python
| false
| false
| 8,814
|
py
|
import tensorflow as tf
def batch_norm(x, channel, isTrain, decay=0.99, name="bn"):
with tf.variable_scope(name):
beta = tf.get_variable(initializer=tf.constant(0.0, shape=[channel]), name='beta')
gamma = tf.get_variable(initializer=tf.constant(1.0, shape=[channel]), name='gamma')
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2, 3], name='moments')
mean_sh = tf.get_variable(initializer=tf.zeros([channel]), name="mean_sh", trainable=False)
var_sh = tf.get_variable(initializer=tf.ones([channel]), name="var_sh", trainable=False)
def mean_var_with_update():
mean_assign_op = tf.assign(mean_sh, mean_sh * decay + (1 - decay) * batch_mean)
var_assign_op = tf.assign(var_sh, var_sh * decay + (1 - decay) * batch_var)
with tf.control_dependencies([mean_assign_op, var_assign_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(tf.cast(isTrain, tf.bool), mean_var_with_update, lambda: (mean_sh, var_sh))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3, name="normed")
return normed
def conv3d(x, w_shape, b_shape, keep_prob_, train, isTrain):
weights = tf.get_variable("conv_weights", w_shape,
initializer=tf.contrib.layers.xavier_initializer(), trainable=train)
conv_3d = tf.nn.conv3d(x, weights, strides=[1, 1, 1, 1, 1], padding='SAME')
biases = tf.get_variable("biases", b_shape,
initializer=tf.random_normal_initializer(), trainable=train)
conv_3d = tf.nn.bias_add(conv_3d, biases)
channel = conv_3d.get_shape().as_list()[-1]
#print(channel)
bn_x = batch_norm(conv_3d, channel, isTrain)
#return tf.nn.relu(bn_x)
return tf.nn.leaky_relu(bn_x, alpha = 0.1)
def conv(x, w_shape, b_shape, train):
weights = tf.get_variable("weights", w_shape,
initializer=tf.contrib.layers.xavier_initializer(), trainable=train)
biases = tf.get_variable("biases", b_shape,
initializer=tf.random_normal_initializer(), trainable=train)
return tf.nn.conv3d(x, weights, strides=[1, 1, 1, 1, 1], padding='SAME') + biases
def deconv3d(x, w_shape, b_shape, stride, train):
x_shape = tf.shape(x)
weights = tf.get_variable("deconv_weights", w_shape,
initializer=tf.contrib.layers.xavier_initializer(), trainable=train)
biases = tf.get_variable("biases", b_shape,
initializer=tf.random_normal_initializer(), trainable=train)
output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4] // 2])
return tf.nn.conv3d_transpose(x, weights, output_shape, strides=[1, stride, stride, stride, 1],
padding='SAME') + biases
def max_pool(x, n):
return tf.nn.max_pool3d(x, ksize=[1, n, n, n, 1], strides=[1, n, n, n, 1], padding='SAME')
def avg_pool(x, n):
return tf.nn.avg_pool3d(x, ksize=[1, n, n, n, 1], strides=[1, n, n, n, 1], padding='SAME')
def crop_and_concat(x2,x1):
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
# offsets for the top left corner of the crop
offsets = [0, abs((x1_shape[1] - x2_shape[1])) // 2, abs(x1_shape[2] - x2_shape[2]) // 2, abs(x1_shape[3] - x2_shape[3]) // 2, 0]
size = [-1, x2_shape[1], x2_shape[2], x2_shape[3], -1]
x1_crop = tf.slice(x1, offsets, size)
return tf.concat([x1_crop, x2], 4)
def qsmnet_deep(x, keep_prob, reuse, isTrain):
with tf.variable_scope("qsmnet", reuse=reuse) as scope:
with tf.variable_scope("conv11", reuse=reuse) as scope:
conv11 = conv3d(x, [5, 5, 5, 1, 32], [32], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("conv12", reuse=reuse) as scope:
conv12 = conv3d(conv11, [5, 5, 5, 32, 32], [32], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("maxpool1", reuse=reuse) as scope:
pool1 = max_pool(conv12, 2)
scope.reuse_variables()
with tf.variable_scope("conv21", reuse=reuse) as scope:
conv21 = conv3d(pool1, [5, 5, 5, 32, 64], [64], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("conv22", reuse=reuse) as scope:
conv22 = conv3d(conv21, [5, 5, 5, 64, 64], [64], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("maxpool2", reuse=reuse) as scope:
pool2 = max_pool(conv22, 2)
scope.reuse_variables()
with tf.variable_scope("conv31", reuse=reuse) as scope:
conv31 = conv3d(pool2, [5, 5, 5, 64, 128], [128], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("conv32", reuse=reuse) as scope:
conv32 = conv3d(conv31, [5, 5, 5, 128, 128], [128], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("maxpool3", reuse=reuse) as scope:
pool3 = max_pool(conv32, 2)
scope.reuse_variables()
with tf.variable_scope("conv41", reuse=reuse) as scope:
conv41 = conv3d(pool3, [5, 5, 5, 128, 256], [256], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("conv42", reuse=reuse) as scope:
conv42 = conv3d(conv41, [5, 5, 5, 256, 256], [256], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("maxpool4", reuse=reuse) as scope:
pool4 = max_pool(conv42, 2)
scope.reuse_variables()
with tf.variable_scope("l_conv1", reuse=reuse) as scope:
l_conv1 = conv3d(pool4, [5, 5, 5, 256, 512], [512], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("l_conv2", reuse=reuse) as scope:
l_conv2 = conv3d(l_conv1, [5, 5, 5, 512, 512], [512], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("deconv4", reuse=reuse) as scope:
deconv4 = deconv3d(l_conv2, [2, 2, 2, 256, 512], [256], 2, True)
deconv_concat4 = tf.concat([conv42, deconv4], axis=4)
scope.reuse_variables()
with tf.variable_scope("conv51", reuse=reuse) as scope:
conv51 = conv3d(deconv_concat4, [5, 5, 5, 512, 256], [256], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("conv52", reuse-reuse) as scope:
conv52 = conv3d(conv51, [5, 5, 5, 256, 256], [256], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("deconv3", reuse=reuse) as scope:
deconv3 = deconv3d(conv52, [2, 2, 2, 128, 256], [128], 2, True)
deconv_concat3 = tf.concat([conv32, deconv3], axis=4)
scope.reuse_variables()
with tf.variable_scope("conv61", reuse=reuse) as scope:
conv61 = conv3d(deconv_concat3, [5, 5, 5, 256, 128], [128], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("conv62", reuse=reuse) as scope:
conv62 = conv3d(conv61, [5, 5, 5, 128, 128], [128], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("deconv2", reuse=reuse) as scope:
deconv2 = deconv3d(conv62, [2, 2, 2, 64, 128], [64], 2, True)
deconv_concat2 = tf.concat([conv22, deconv2], axis=4)
scope.reuse_variables()
with tf.variable_scope("conv71", reuse=reuse) as scope:
conv71 = conv3d(deconv_concat2, [5, 5, 5, 128, 64], [64], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("conv72", reuse=reuse) as scope:
conv72 = conv3d(conv71, [5, 5, 5, 64, 64], [64], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("deconv1", reuse=reuse) as scope:
deconv1 = deconv3d(conv72, [2, 2, 2, 32, 64], [32], 2, True)
deconv_concat1 = tf.concat([conv12, deconv1], axis=4)
scope.reuse_variables()
with tf.variable_scope("conv81", reuse=reuse) as scope:
conv81 = conv3d(deconv_concat1, [5, 5, 5, 64, 32], [32], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("conv82", reuse=reuse) as scope:
conv82 = conv3d(conv81, [5, 5, 5, 32, 32], [32], keep_prob, True, isTrain)
scope.reuse_variables()
with tf.variable_scope("out", reuse=reuse) as scope:
out_image = conv(conv82, [1, 1, 1, 32, 1], [1], True) # output = [10,512,512,9] #segment_data = [10,512,512]
scope.reuse_variables()
return out_image
|
[
"wjjung93@snu.ac.kr"
] |
wjjung93@snu.ac.kr
|
5b8924f9597b4cc28c2c83320007c9c3565e364d
|
681a708b81cc11a9fa15bf23ffeda9f07dc4846f
|
/LP2/Rafa/Sub_FinaL_AC.LP2/business/models.py
|
e656b714c2c3c809cf23af2a76b799ec826ad9d9
|
[
"Apache-2.0"
] |
permissive
|
RafaelaMiwaTokas/ACS-OPE
|
57ffb0587f02a93bbc1e63311ac33937fc80efad
|
7c37857982b6f621135a5ac0b9945bde02baafa1
|
refs/heads/master
| 2020-03-30T16:49:57.468191
| 2018-10-17T22:30:45
| 2018-10-17T22:30:45
| 151,429,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
from business.exceptions import ParametroNegativoException, ResultadoNegativoException, OperacaoMuitoFacilException, ParametroZeroException
class Calculadora():
def soma(self, a, b):
if a < 0 or b < 0:
raise ParametroNegativoException()
return a + b
def subtrai(self, a, b):
if a < 0 or b < 0:
raise ParametroNegativoException()
resultado = a - b
if resultado < 0:
raise ResultadoNegativoException(resultado)
return a - b
def multiplica(self, a, b):
if a == 1 or b == 1:
raise OperacaoMuitoFacilException(a*b)
return a * b
def divide(self, a, b):
try:
return a / b
except ZeroDivisionError:
raise ParametroZeroException()
|
[
"1800706@SALAS.AULAS"
] |
1800706@SALAS.AULAS
|
fa1e88b93bfadafa9a610505a1f1994c32e313f6
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/era5_scripts/02_preprocessing/lag82/600-tideGauge.py
|
8f9c90a0dc2de8f6b058903a57b951b859a2c87d
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,984
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:12:23 2020
****************************************************
Load predictors & predictands + predictor importance
****************************************************
@author: Michael Tadesse
"""
#import packages
import os
import pandas as pd
import datetime as dt #used for timedelta
from datetime import datetime
#define directories
dir_in = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
dir_out = '/lustre/fs0/home/mtadesse/eraFiveLag'
def lag():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 600
y = 601
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
# #check if the file exists
# os.chdir(dir_out)
# if (os.path.isfile(tg_name)):
# print('file already exists')
# continue
#cd to where the actual file is
os.chdir(dir_in)
pred = pd.read_csv(tg_name)
pred.sort_values(by = 'date', inplace=True)
pred.reset_index(inplace = True)
pred.drop('index', axis = 1, inplace = True)
#create a daily time series - date_range
#get only the ymd of the start and end times
start_time = pred['date'][0].split(' ')[0]
end_time = pred['date'].iloc[-1].split(' ')[0]
print(start_time, ' - ', end_time, '\n')
date_range = pd.date_range(start_time, end_time, freq = 'D')
#defining time changing lambda functions
time_str = lambda x: str(x)
time_converted_str = pd.DataFrame(map(time_str, date_range), columns = ['date'])
time_converted_stamp = pd.DataFrame(date_range, columns = ['timestamp'])
"""
first prepare the six time lagging dataframes
then use the merge function to merge the original
predictor with the lagging dataframes
"""
#prepare lagged time series for time only
#note here that since ERA20C has 3hrly data
#the lag_hrs is increased from 6(eraint) to 11 (era20C)
time_lagged = pd.DataFrame()
lag_hrs = [0, 6, 12, 18, 24, 30]
for lag in lag_hrs:
lag_name = 'lag'+str(lag)
lam_delta = lambda x: str(x - dt.timedelta(hours = lag))
lag_new = pd.DataFrame(map(lam_delta, time_converted_stamp['timestamp']), \
columns = [lag_name])
time_lagged = pd.concat([time_lagged, lag_new], axis = 1)
#datafrmae that contains all lagged time series (just time)
time_all = pd.concat([time_converted_str, time_lagged], axis = 1)
pred_lagged = pd.DataFrame()
for ii in range(1,time_all.shape[1]): #to loop through the lagged time series
print(time_all.columns[ii])
#extracting corresponding tag time series
lag_ts = pd.DataFrame(time_all.iloc[:,ii])
lag_ts.columns = ['date']
#merge the selected tlagged time with the predictor on = "date"
pred_new = pd.merge(pred, lag_ts, on = ['date'], how = 'right')
pred_new.drop('Unnamed: 0', axis = 1, inplace = True)
#sometimes nan values go to the bottom of the dataframe
#sort df by date -> reset the index -> remove old index
pred_new.sort_values(by = 'date', inplace=True)
pred_new.reset_index(inplace=True)
pred_new.drop('index', axis = 1, inplace= True)
#concatenate lagged dataframe
if ii == 1:
pred_lagged = pred_new
else:
pred_lagged = pd.concat([pred_lagged, pred_new.iloc[:,1:]], axis = 1)
#cd to saving directory
os.chdir(dir_out)
pred_lagged.to_csv(tg_name)
os.chdir(dir_in)
#run script
lag()
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
580529e04722c5f5a425b10e0393ded1433d5421
|
4f75a30a64a682468aa399367a652b97c151c4ef
|
/sdk/geetest_lib.py
|
b6ef2321d487e2bc26b65a7e3af78097b750b160
|
[] |
no_license
|
wdjlover/gt3-server-python-flask-bypass
|
eec010263e5675aaee32660de091469357a79cca
|
11062beeb1ee23aab8e72cb4313419ce85e68217
|
refs/heads/master
| 2023-01-21T02:56:06.414627
| 2020-11-26T09:55:31
| 2020-11-26T09:55:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,940
|
py
|
import string
import random
import json
import requests
import hmac
import hashlib
from .geetest_lib_result import GeetestLibResult
# sdk lib包,核心逻辑。
class GeetestLib:
IS_DEBUG = True # 调试开关,是否输出调试日志
API_URL = "http://api.geetest.com"
REGISTER_URL = "/register.php"
VALIDATE_URL = "/validate.php"
JSON_FORMAT = "1"
NEW_CAPTCHA = True
HTTP_TIMEOUT_DEFAULT = 5 # 单位:秒
VERSION = "python-flask:3.1.1"
GEETEST_CHALLENGE = "geetest_challenge" # 极验二次验证表单传参字段 chllenge
GEETEST_VALIDATE = "geetest_validate" # 极验二次验证表单传参字段 validate
GEETEST_SECCODE = "geetest_seccode" # 极验二次验证表单传参字段 seccode
def __init__(self, geetest_id, geetest_key):
self.geetest_id = geetest_id
self.geetest_key = geetest_key
self.libResult = GeetestLibResult()
def gtlog(self, message):
if self.IS_DEBUG:
print("gtlog: " + message)
# 验证初始化
def register(self, digestmod, param_dict):
self.gtlog("register(): 开始验证初始化, digestmod={0}.".format(digestmod));
origin_challenge = self.request_register(param_dict)
self.build_register_result(origin_challenge, digestmod)
self.gtlog("register(): 验证初始化, lib包返回信息={0}.".format(self.libResult));
return self.libResult
def request_register(self, param_dict):
param_dict.update({"gt": self.geetest_id, "sdk": self.VERSION, "json_format": self.JSON_FORMAT})
register_url = self.API_URL + self.REGISTER_URL
self.gtlog("requestRegister(): 验证初始化, 向极验发送请求, url={0}, params={1}.".format(register_url, param_dict))
try:
res = requests.get(register_url, params=param_dict, timeout=self.HTTP_TIMEOUT_DEFAULT)
res_body = res.text if res.status_code == requests.codes.ok else ""
self.gtlog("requestRegister(): 验证初始化, 与极验网络交互正常, 返回码={0}, 返回body={1}.".format(res.status_code, res_body))
res_dict = json.loads(res_body)
origin_challenge = res_dict["challenge"]
except Exception as e:
self.gtlog("requestRegister(): 验证初始化, 请求异常,后续流程走宕机模式, " + repr(e))
origin_challenge = ""
return origin_challenge
def local_init(self):
self.build_register_result("", "")
self.gtlog("local_init(): bypass当前状态为fail,后续流程将进入宕机模式, " + self.libResult.data)
return self.libResult
# 构建验证初始化返回数据
def build_register_result(self, origin_challenge, digestmod):
# origin_challenge为空或者值为0代表失败
if not origin_challenge or origin_challenge == "0":
# 本地随机生成32位字符串
challenge = "".join(random.sample('abcdefghijklmnopqrstuvwxyz0123456789', 32))
data = json.dumps(
{"success": 0, "gt": self.geetest_id, "challenge": challenge, "new_captcha": self.NEW_CAPTCHA})
self.libResult.set_all(0, data, "bypass当前状态为fail,后续流程走宕机模式")
else:
if digestmod == "md5":
challenge = self.md5_encode(origin_challenge + self.geetest_key)
elif digestmod == "sha256":
challenge = self.sha256_endode(origin_challenge + self.geetest_key)
elif digestmod == "hmac-sha256":
challenge = self.hmac_sha256_endode(origin_challenge, self.geetest_key)
else:
challenge = self.md5_encode(origin_challenge + self.geetest_key)
data = json.dumps(
{"success": 1, "gt": self.geetest_id, "challenge": challenge, "new_captcha": self.NEW_CAPTCHA})
self.libResult.set_all(1, data, "")
# 正常流程下(即验证初始化成功),二次验证
def successValidate(self, challenge, validate, seccode, param_dict={}):
self.gtlog(
"successValidate(): 开始二次验证 正常模式, challenge={0}, validate={1}, seccode={2}.".format(challenge, validate,
seccode))
if not self.check_param(challenge, validate, seccode):
self.libResult.set_all(0, "", "正常模式,本地校验,参数challenge、validate、seccode不可为空")
else:
response_seccode = self.requestValidate(challenge, validate, seccode, param_dict)
if not response_seccode:
self.libResult.set_all(0, "", "请求极验validate接口失败")
elif response_seccode == "false":
self.libResult.set_all(0, "", "极验二次验证不通过")
else:
self.libResult.set_all(1, "", "")
self.gtlog("successValidate(): 二次验证 正常模式, lib包返回信息={0}.".format(self.libResult))
return self.libResult
# 异常流程下(即验证初始化失败,宕机模式),二次验证
# 注意:由于是宕机模式,初衷是保证验证业务不会中断正常业务,所以此处只作简单的参数校验,可自行设计逻辑。
def failValidate(self, challenge, validate, seccode):
self.gtlog(
"failValidate(): 开始二次验证 宕机模式, challenge={0}, validate={1}, seccode={2}.".format(challenge, validate,
seccode))
if not self.check_param(challenge, validate, seccode):
self.libResult.set_all(0, "", "宕机模式,本地校验,参数challenge、validate、seccode不可为空.")
else:
self.libResult.set_all(1, "", "")
self.gtlog("failValidate(): 二次验证 宕机模式, lib包返回信息={0}.".format(self.libResult))
return self.libResult
# 向极验发送二次验证的请求,POST方式
def requestValidate(self, challenge, validate, seccode, param_dict):
param_dict.update(
{"seccode": seccode, "json_format": self.JSON_FORMAT, "challenge": challenge, "sdk": self.VERSION,
"captchaid": self.geetest_id})
validate_url = self.API_URL + self.VALIDATE_URL
self.gtlog("requestValidate(): 二次验证 正常模式, 向极验发送请求, url={0}, params={1}.".format(validate_url, param_dict))
try:
res = requests.post(validate_url, data=param_dict, timeout=self.HTTP_TIMEOUT_DEFAULT)
res_body = res.text if res.status_code == requests.codes.ok else ""
self.gtlog(
"requestValidate(): 二次验证 正常模式, 与极验网络交互正常, 返回码={0}, 返回body={1}.".format(res.status_code, res_body))
res_dict = json.loads(res_body)
seccode = res_dict["seccode"]
except Exception as e:
self.gtlog("requestValidate(): 二次验证 正常模式, 请求异常, " + repr(e))
seccode = ""
return seccode
# 校验二次验证的三个参数,校验通过返回true,校验失败返回false
def check_param(self, challenge, validate, seccode):
return not (
challenge is None or challenge.isspace() or validate is None or validate.isspace() or seccode is None or seccode.isspace())
def md5_encode(self, value):
md5 = hashlib.md5()
md5.update(value.encode("utf-8"))
return md5.hexdigest()
def sha256_endode(self, value):
sha256 = hashlib.sha256()
sha256.update(value.encode("utf-8"))
return sha256.hexdigest()
def hmac_sha256_endode(self, value, key):
return hmac.new(key.encode("utf-8"), value.encode("utf-8"), digestmod=hashlib.sha256).hexdigest()
|
[
"wangbo@geetest.com"
] |
wangbo@geetest.com
|
bba2b711e7ce6925f1e86ef8dbe91b5ebc967e6e
|
1561c3bab9abcfd0b943ef33695dcd1919e19fb2
|
/backend/editors/regex_dict.py
|
6094715e0334c2675b915c716bb89cc72b5b0cfc
|
[] |
no_license
|
Cyki89/RealTimeTrackingApplication
|
6dd7d8aefa9699cb75d3f8af3df0776f16e1d8ae
|
c5bbbf2ec80223f09b3e139fe02731d236dff29b
|
refs/heads/master
| 2022-12-14T08:16:55.097769
| 2020-09-10T17:53:32
| 2020-09-10T17:53:32
| 291,335,316
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
import re
from backend.utils.utils import singleton
class RegexDict(dict):
def __init__(self):
super().__init__()
def __getitem__(self, item):
for k, v in self.items():
if re.search(k, item):
return v
raise KeyError
|
[
"jakub.cykowski@gmail.com"
] |
jakub.cykowski@gmail.com
|
8657979c109138a54ea8897da438a5eb4cc0b1f3
|
f1a29aea3a79d5696368e5dcf8ebdd615cb831c4
|
/start.py
|
a4b0c301fc8e25185fee910048f84f6a52546ca6
|
[
"MIT"
] |
permissive
|
ChristopherSung/Stage1stHelper
|
7817e641ac661afd8e1c8e901d94cfdba7db7287
|
d86dbf1058150b59c3cb71f306a9812fab715c91
|
refs/heads/master
| 2023-08-16T05:48:28.081930
| 2021-10-15T06:53:40
| 2021-10-15T06:53:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 861
|
py
|
import json
import logging
import time
import os
from member import Member
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
config = json.load(open('./config.json', 'r', encoding='utf-8'))
refresh_time = config['refresh_time']
member_list = []
# 保存登录成功的对象
for i in config['members']:
member = Member(i['username'], i['password'])
member.login(on_success=lambda: member_list.append(member),
on_failure=lambda: logger.warning(i['username'] + ' 登录失败'))
# 没有登录成功的账号
if not member_list:
logger.error('没有登录成功的账号,即将退出')
os.system('pause')
exit(1)
while True:
for i in member_list:
i.action()
time.sleep(refresh_time)
|
[
"unknown"
] |
unknown
|
e78fbe197918f6638345b7ed338a008dbc44175a
|
a8c5397b5f32312095ef075c791502af055c6efb
|
/Processor/script/getkey.py
|
ccc057df0fb077629b4838489067c723d27c869e
|
[
"MIT"
] |
permissive
|
LinkleYping/BlockImage_System
|
77c126bfc55e8e5907178206d7d355cd58bb9dc3
|
66112a911c599eeb87ce0fbb3a10180739f45771
|
refs/heads/master
| 2022-10-25T00:17:16.156961
| 2020-06-15T02:45:37
| 2020-06-15T02:45:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
from random import Random
def random_str(randomlength):
str = ''
chars = 'abcdef0123456789'
length = len(chars) - 1
random = Random()
for i in range(randomlength):
str += chars[random.randint(0, length)]
return str
#
# if __name__ == '__main__':
# print(random_str(6))
|
[
"heyilinge0@gmail.com"
] |
heyilinge0@gmail.com
|
cf63b953d2e55044236cb7a0d49e53c7e3e267a9
|
c6759b857e55991fea3ef0b465dbcee53fa38714
|
/tools/nntool/nntool/quantization/float/quantizers/quantize_float.py
|
7c64d8773104ce0a2e7df691e9c41a6cb1372580
|
[
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] |
permissive
|
GreenWaves-Technologies/gap_sdk
|
1b343bba97b7a5ce62a24162bd72eef5cc67e269
|
3fea306d52ee33f923f2423c5a75d9eb1c07e904
|
refs/heads/master
| 2023-09-01T14:38:34.270427
| 2023-08-10T09:04:44
| 2023-08-10T09:04:44
| 133,324,605
| 145
| 96
|
Apache-2.0
| 2023-08-27T19:03:52
| 2018-05-14T07:50:29
|
C
|
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from copy import deepcopy
from typing import cast
import numpy as np
from bfloat16 import bfloat16
from nntool.graph.types import QuantizeNode
from nntool.quantization.float.float_quantization_handler import \
FloatQuantizionHandler
from nntool.quantization.new_qrec import QRec
from nntool.quantization.qtype import QType
from nntool.quantization.unified_quantization_handler import (out_qs_constraint,
params_type)
@params_type(QuantizeNode)
@out_qs_constraint({'dtype': set([np.float32, np.float16, bfloat16])})
class HandleQuantize(FloatQuantizionHandler):
@classmethod
def _quantize(cls, params, in_qs, stats, **kwargs):
params = cast(QuantizeNode, params)
force_out_qs, dtype = cls.get_float_opts(**kwargs)
if force_out_qs:
o_q = deepcopy(force_out_qs[0])
else:
o_q = QType(
dtype=dtype, min_val=in_qs[0].min_val, max_val=in_qs[0].max_val)
params.from_qtype = in_qs[0]
params.to_qtype = o_q
return QRec.float(in_qs=in_qs, out_qs=[o_q], float_dtype=dtype)
|
[
"yao.zhang@greenwaves-technologies.com"
] |
yao.zhang@greenwaves-technologies.com
|
62fd032eecda11368b44164612bad0d2d25fcd0d
|
f7d7d8e7ddd5ec47ad801bfe07c4fa80389afcbe
|
/lib/assertions.py
|
f3973661b7ca0a9ffd8feb6a7a936c535c92b665
|
[] |
no_license
|
MarySukhorukova/LearnQA_Python_API
|
d151423639412fd000eb99bd52cc9a1bd6b5c4be
|
263eaf489490862c5f241369577307e50e96d8ea
|
refs/heads/master
| 2023-06-27T06:22:41.926552
| 2021-07-19T20:25:20
| 2021-07-19T20:25:20
| 377,433,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,547
|
py
|
from requests import Response
import json
class Assertions:
@staticmethod
def assert_json_value_by_name(response: Response, name, expected_value, error_message):
try:
response_as_dict = response.json()
except json.JSONDecodeError:
assert False, f"response is not in JSON format. Response text is '{response.text}'"
assert name in response_as_dict, f"Response JSON doesn't have key '{name}'"
assert response_as_dict[name] == expected_value, error_message
@staticmethod
def assert_code_status(response: Response, expected_status_code):
assert response.status_code == expected_status_code, \
f'Unecspected status code! Expected: {expected_status_code}, Actual{response.status_code}'
@staticmethod
def assert_json_has_key(response: Response, name):
try:
response_as_dict = response.json()
except json.JSONDecodeError:
assert False, f"response is not in JSON format. Response text is '{response.text}'"
assert name in response_as_dict, f"Response JSON doesn't have key '{name}'"
@staticmethod
def assert_json_has_no_key(response: Response, name):
try:
response_as_dict = response.json()
except json.JSONDecodeError:
assert False, f"response is not in JSON format. Response text is '{response.text}'"
assert name not in response_as_dict, f"Response JSON should't have key '{name}'. But it is present"
@staticmethod
def assert_json_has_keys(response: Response, names: list):
try:
response_as_dict = response.json()
except json.JSONDecodeError:
assert False, f"response is not in JSON format. Response text is '{response.text}'"
for name in names:
assert name in response_as_dict, f"Response JSON doesn't have key '{name}'"
@staticmethod
def assert_json_has_no_keys(response: Response, names: list):
try:
response_as_dict = response.json()
except json.JSONDecodeError:
assert False, f"response is not in JSON format. Response text is '{response.text}'"
for name in names:
assert name not in response_as_dict, f"Response JSON should't have key '{name}'. But it is present"
@staticmethod
def assert_check_str(response: Response, name):
assert response.content.decode("utf-8") == f"The following required params are missed: {name}", \
f'wrong error message {response.content}'
|
[
"57411226+MarySukhorukova@users.noreply.github.com"
] |
57411226+MarySukhorukova@users.noreply.github.com
|
a5577ef843db47762ad28013db1fc9d919b1243d
|
63b79c404d83e4980891c488f4d9592558ecda35
|
/assets/src/ba_data/python/bastd/ui/coop/level.py
|
8a8d60d6e175af38081aa58b8a129c77b0e3c9b7
|
[
"MIT"
] |
permissive
|
kakekakeka/ballistica
|
56e8879cd5b4b990e5e05da3dfd300d7cbb45446
|
3ffeff8ce401a00128363ff08b406471092adaa9
|
refs/heads/master
| 2022-11-14T08:11:57.160782
| 2020-07-01T05:43:13
| 2020-07-01T05:49:44
| 276,755,445
| 2
| 0
|
MIT
| 2020-07-02T22:18:37
| 2020-07-02T22:18:36
| null |
UTF-8
|
Python
| false
| false
| 3,467
|
py
|
# Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Bits of utility functionality related to co-op levels."""
from __future__ import annotations
import ba
class CoopLevelLockedWindow(ba.Window):
"""Window showing that a level is locked."""
def __init__(self, name: ba.Lstr, dep_name: ba.Lstr):
width = 550.0
height = 250.0
lock_tex = ba.gettexture('lock')
super().__init__(root_widget=ba.containerwidget(
size=(width, height),
transition='in_right',
scale=1.7 if ba.app.small_ui else 1.3 if ba.app.med_ui else 1.0))
ba.textwidget(parent=self._root_widget,
position=(150 - 20, height * 0.63),
size=(0, 0),
h_align='left',
v_align='center',
text=ba.Lstr(resource='levelIsLockedText',
subs=[('${LEVEL}', name)]),
maxwidth=400,
color=(1, 0.8, 0.3, 1),
scale=1.1)
ba.textwidget(parent=self._root_widget,
position=(150 - 20, height * 0.48),
size=(0, 0),
h_align='left',
v_align='center',
text=ba.Lstr(resource='levelMustBeCompletedFirstText',
subs=[('${LEVEL}', dep_name)]),
maxwidth=400,
color=ba.app.infotextcolor,
scale=0.8)
ba.imagewidget(parent=self._root_widget,
position=(56 - 20, height * 0.39),
size=(80, 80),
texture=lock_tex,
opacity=1.0)
btn = ba.buttonwidget(parent=self._root_widget,
position=((width - 140) / 2, 30),
size=(140, 50),
label=ba.Lstr(resource='okText'),
on_activate_call=self._ok)
ba.containerwidget(edit=self._root_widget,
selected_child=btn,
start_button=btn)
ba.playsound(ba.getsound('error'))
def _ok(self) -> None:
ba.containerwidget(edit=self._root_widget, transition='out_left')
|
[
"ericfroemling@gmail.com"
] |
ericfroemling@gmail.com
|
f6019218c0ab19ab8d1de61088e9c3a88f0d22c1
|
1cb5d0275cd81cd49e4cf1bd158e2101a3fe5d79
|
/Prac04/intermediate exercise 1.py
|
45eae55e1263e7645d75d0cba89912ff1ed160d2
|
[] |
no_license
|
kkemei/Programming1-Practicals
|
1479be8bd4931bcdfca15313c78c316ebf4ed71d
|
2a064a27c6d43fc934e22b1505daf61726ffaeb5
|
refs/heads/master
| 2021-01-15T15:31:57.659734
| 2016-10-13T01:28:29
| 2016-10-13T01:28:29
| 68,797,239
| 0
| 0
| null | 2016-10-05T12:29:26
| 2016-09-21T08:38:19
|
Python
|
UTF-8
|
Python
| false
| false
| 449
|
py
|
def main():
numbers = []
for i in range(0, 5):
num_value = int(input("Enter a number? "))
numbers.append(num_value)
print("the first number is {}".format(numbers[0]))
print("the last number is {}".format(numbers[4]))
print("the smallest number is {}".format(min(numbers)))
print("the largest number is {}".format(max(numbers)))
print("the average number is {}".format(sum(numbers)/len(numbers)))
main()
|
[
"kipngetich.kemei@my.jcu.edu.au"
] |
kipngetich.kemei@my.jcu.edu.au
|
541a93cf695cfbde477fe29275eea22e11479520
|
efcf128f6fbd5419fba137dd2db7775b41567b7f
|
/doll_control.py
|
63979b91cb88a248e78f19e3466e9119d6b0777a
|
[] |
no_license
|
dawnho/doll_control
|
e6b4975a158b81551375a6aa97a30c9665e35bb3
|
ee14b05ea447be46c6e5390758cd7a583a6cb61f
|
refs/heads/master
| 2021-05-15T20:15:30.090899
| 2017-10-22T06:20:52
| 2017-10-22T06:20:52
| 107,842,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
from threading import Thread
import socket
import time
import RPi.GPIO as GPIO
VERBOSE= = TRUE
IP_PORT = 22000
P_BUTTON = 24
def setup():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(P_BUTTON, GPIO.IN, GPIO.PUD_UP)
def debug(text):
if VERBOSE:
print "Debug:---", text
# ------ class SocketHandler -----
class SocketHandler(Thread):
|
[
"dawn.ho@gmail.com"
] |
dawn.ho@gmail.com
|
f807a94a2d8348d9dc63eb80f02da06acbb76740
|
f7957eff1c606997661e21314dfe99550bc2e3cd
|
/code/server.py
|
f6d4290609e422bb90ff11cda45e03dd20cea7de
|
[] |
no_license
|
halfopen/GraduationThesis
|
3a738a346b8a1bbf1271fe65483a99c19dd048b1
|
a7e763ca9d4995a5afaded61926df7c29ce5a84b
|
refs/heads/master
| 2021-06-14T03:29:15.177322
| 2021-03-18T03:50:14
| 2021-03-18T03:50:14
| 204,376,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
# coding: utf-8
from flask import Flask, request
from handler import Handler
from flask_json import FlaskJSON, JsonError, json_response, as_json
app = Flask(__name__)
json = FlaskJSON(app)
@app.route('/', methods = ['POST', 'GET'])
@as_json
def serve():
if request.method == "GET":
return "OK"
hdl = Handler(request.form)
return hdl.handle()
if __name__ == "__main__":
app.run()
|
[
"halfopen@yeah.net"
] |
halfopen@yeah.net
|
56ac1babdae96b39f0d63482131bf16c3706e4f6
|
7513cd51d989bf0dc43287a70b238a02aee51486
|
/ch-13/ball/settings.py
|
40abf3b897f2543ac6ff69fb019338ce5b0a9f51
|
[] |
no_license
|
aklap/python-crash-course
|
826809649ba4f90fd3e12fcce26808e21e0adfec
|
7854c5242eaa9b5c4e15de03e9fdf859c9124372
|
refs/heads/master
| 2020-04-17T07:25:54.124403
| 2019-01-30T20:38:59
| 2019-01-30T20:38:59
| 166,368,994
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
class Settings():
"""Class for settings of a game."""
def __init__(self):
self.ball_limit = 3
|
[
"aklaporte@gmail.com"
] |
aklaporte@gmail.com
|
2218410e4e8156d005707c870ee0c2c1e2ca3200
|
711cdfa4ee0619fd5223e7f38356b112a7551f6a
|
/bible-chronicle/wsgi.py
|
e59b9374b07f82d825d54a31635b3fe4b31e40e9
|
[] |
no_license
|
ygmpkk/Bible-Chronicle
|
5dc5894b16a62c9b10e9d0771026c4fa65745d6f
|
b922039061ef338ef2ef4e587004df718626d871
|
refs/heads/master
| 2020-12-24T17:18:17.562466
| 2012-06-03T12:22:55
| 2012-06-03T12:22:55
| 4,112,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
"""
WSGI config for djproject project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bible-chronicle.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[
"Timothy@.(none)"
] |
Timothy@.(none)
|
6f5906bfbdeacf315d3770da2d384597f68228fe
|
9a09e0fdd7de7bed272bd2ae88a28834220cd3f3
|
/virtual/bin/pycodestyle
|
6e7659838eb27eb6e35add6595080878e2491c14
|
[
"MIT"
] |
permissive
|
DancanOduor/Minute-Pitches
|
5da9e46b2b4b21847c52ed76e851d872fb1fd22f
|
50d56a609dfd7a5719f19ad3297959c3585dcd1c
|
refs/heads/master
| 2022-10-07T13:33:38.047493
| 2019-11-27T12:29:06
| 2019-11-27T12:29:06
| 224,148,901
| 0
| 0
|
MIT
| 2022-09-16T18:14:07
| 2019-11-26T09:11:00
|
Python
|
UTF-8
|
Python
| false
| false
| 246
|
#!/home/moringa/python/Pitches/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pycodestyle import _main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(_main())
|
[
"dancanoduor1990@gmail"
] |
dancanoduor1990@gmail
|
|
a57a71644572968524842a8915273d496e97af7b
|
1930fec37ace24d01946845e9566071e4e96fe63
|
/tripletreid/embed.py
|
598574590e2a22c411c302f7163e7ccff5876089
|
[
"MIT"
] |
permissive
|
Qidian213/DeepCC-T
|
9e0599934c09495a3dfba051943892ca911fe730
|
697e792d38f46ecbe643ba756e517c2a893efb65
|
refs/heads/master
| 2021-10-26T08:04:26.753433
| 2019-04-11T09:16:31
| 2019-04-11T09:16:31
| 178,881,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,183
|
py
|
#!/usr/bin/env python
from argparse import ArgumentParser
from importlib import import_module
from itertools import count
import os
import h5py
import json
import numpy as np
import tensorflow as tf
from aggregators import AGGREGATORS
import common
parser = ArgumentParser(description='Embed a dataset using a trained network.')
# Required
parser.add_argument(
'--experiment_root', required=True,
help='Location used to store checkpoints and dumped data.')
parser.add_argument(
'--dataset', required=True,
help='Path to the dataset csv file to be embedded.')
# Optional
parser.add_argument(
'--image_root', type=common.readable_directory,
help='Path that will be pre-pended to the filenames in the train_set csv.')
parser.add_argument(
'--checkpoint', default=None,
help='Name of checkpoint file of the trained network within the experiment '
'root. Uses the last checkpoint if not provided.')
parser.add_argument(
'--loading_threads', default=8, type=common.positive_int,
help='Number of threads used for parallel data loading.')
parser.add_argument(
'--batch_size', default=256, type=common.positive_int,
help='Batch size used during evaluation, adapt based on available memory.')
parser.add_argument(
'--filename', default=None,
help='Name of the HDF5 file in which to store the embeddings, relative to'
' the `experiment_root` location. If omitted, appends `_embeddings.h5`'
' to the dataset name.')
parser.add_argument(
'--flip_augment', action='store_true', default=False,
help='When this flag is provided, flip augmentation is performed.')
parser.add_argument(
'--crop_augment', choices=['center', 'avgpool', 'five'], default=None,
help='When this flag is provided, crop augmentation is performed.'
'`avgpool` means the full image at the precrop size is used and '
'the augmentation is performed by the average pooling. `center` means'
'only the center crop is used and `five` means the four corner and '
'center crops are used. When not provided, by default the image is '
'resized to network input size.')
parser.add_argument(
'--aggregator', choices=AGGREGATORS.keys(), default=None,
help='The type of aggregation used to combine the different embeddings '
'after augmentation.')
parser.add_argument(
'--quiet', action='store_true', default=False,
help='Don\'t be so verbose.')
def flip_augment(image, fid, pid):
""" Returns both the original and the horizontal flip of an image. """
images = tf.stack([image, tf.reverse(image, [1])])
return images, tf.stack([fid]*2), tf.stack([pid]*2)
def five_crops(image, crop_size):
""" Returns the central and four corner crops of `crop_size` from `image`. """
image_size = tf.shape(image)[:2]
crop_margin = tf.subtract(image_size, crop_size)
assert_size = tf.assert_non_negative(
crop_margin, message='Crop size must be smaller or equal to the image size.')
with tf.control_dependencies([assert_size]):
top_left = tf.floor_div(crop_margin, 2)
bottom_right = tf.add(top_left, crop_size)
center = image[top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]]
top_left = image[:-crop_margin[0], :-crop_margin[1]]
top_right = image[:-crop_margin[0], crop_margin[1]:]
bottom_left = image[crop_margin[0]:, :-crop_margin[1]]
bottom_right = image[crop_margin[0]:, crop_margin[1]:]
return center, top_left, top_right, bottom_left, bottom_right
def main():
# Verify that parameters are set correctly.
args = parser.parse_args()
print(args)
# Possibly auto-generate the output filename.
if args.filename is None:
basename = os.path.basename(args.dataset)
args.filename = os.path.splitext(basename)[0] + '_embeddings.h5'
args.filename = os.path.join(args.experiment_root, args.filename)
# Load the args from the original experiment.
args_file = os.path.join(args.experiment_root, 'args.json')
if os.path.isfile(args_file):
if not args.quiet:
print('Loading args from {}.'.format(args_file))
with open(args_file, 'r') as f:
args_resumed = json.load(f)
# Add arguments from training.
for key, value in args_resumed.items():
args.__dict__.setdefault(key, value)
# A couple special-cases and sanity checks
if (args_resumed['crop_augment']) == (args.crop_augment is None):
print('WARNING: crop augmentation differs between training and '
'evaluation.')
args.image_root = args.image_root or args_resumed['image_root']
else:
raise IOError('`args.json` could not be found in: {}'.format(args_file))
# Check a proper aggregator is provided if augmentation is used.
if args.flip_augment or args.crop_augment == 'five':
if args.aggregator is None:
print('ERROR: Test time augmentation is performed but no aggregator'
'was specified.')
exit(1)
else:
if args.aggregator is not None:
print('ERROR: No test time augmentation that needs aggregating is '
'performed but an aggregator was specified.')
exit(1)
if not args.quiet:
print('Evaluating using the following parameters:')
for key, value in sorted(vars(args).items()):
print('{}: {}'.format(key, value))
# Load the data from the CSV file.
print(args.dataset)
print(args.image_root)
_, data_fids = common.load_dataset(args.dataset, args.image_root)
net_input_size = (args.net_input_height, args.net_input_width)
pre_crop_size = (args.pre_crop_height, args.pre_crop_width)
# Setup a tf Dataset containing all images.
dataset = tf.data.Dataset.from_tensor_slices(data_fids)
# Convert filenames to actual image tensors.
dataset = dataset.map(
lambda fid: common.fid_to_image(
fid, tf.constant('dummy'), image_root=args.image_root,
image_size=pre_crop_size if args.crop_augment else net_input_size),
num_parallel_calls=args.loading_threads)
# Augment the data if specified by the arguments.
# `modifiers` is a list of strings that keeps track of which augmentations
# have been applied, so that a human can understand it later on.
modifiers = ['original']
if args.flip_augment:
dataset = dataset.map(flip_augment)
dataset = dataset.apply(tf.contrib.data.unbatch())
modifiers = [o + m for m in ['', '_flip'] for o in modifiers]
if args.crop_augment == 'center':
dataset = dataset.map(lambda im, fid, pid:
(five_crops(im, net_input_size)[0], fid, pid))
modifiers = [o + '_center' for o in modifiers]
elif args.crop_augment == 'five':
dataset = dataset.map(lambda im, fid, pid: (
tf.stack(five_crops(im, net_input_size)),
tf.stack([fid]*5),
tf.stack([pid]*5)))
dataset = dataset.apply(tf.contrib.data.unbatch())
modifiers = [o + m for o in modifiers for m in [
'_center', '_top_left', '_top_right', '_bottom_left', '_bottom_right']]
elif args.crop_augment == 'avgpool':
modifiers = [o + '_avgpool' for o in modifiers]
else:
modifiers = [o + '_resize' for o in modifiers]
# Group it back into PK batches.
dataset = dataset.batch(args.batch_size)
# Overlap producing and consuming.
dataset = dataset.prefetch(1)
images, _, _ = dataset.make_one_shot_iterator().get_next()
# Create the model and an embedding head.
model = import_module('nets.' + args.model_name)
head = import_module('heads.' + args.head_name)
endpoints, body_prefix = model.endpoints(images, is_training=False)
with tf.name_scope('head'):
endpoints = head.head(endpoints, args.embedding_dim, is_training=False)
with h5py.File(args.filename, 'w') as f_out, tf.Session() as sess:
# Initialize the network/load the checkpoint.
if args.checkpoint is None:
checkpoint = tf.train.latest_checkpoint(args.experiment_root)
else:
checkpoint = os.path.join(args.experiment_root, args.checkpoint)
if not args.quiet:
print('Restoring from checkpoint: {}'.format(checkpoint))
tf.train.Saver().restore(sess, checkpoint)
# Go ahead and embed the whole dataset, with all augmented versions too.
emb_storage = np.zeros(
(len(data_fids) * len(modifiers), args.embedding_dim), np.float32)
for start_idx in count(step=args.batch_size):
try:
emb = sess.run(endpoints['emb'])
print('\rEmbedded batch {}-{}/{}'.format(
start_idx, start_idx + len(emb), len(emb_storage)),
flush=True, end='')
emb_storage[start_idx:start_idx + len(emb)] = emb
except tf.errors.OutOfRangeError:
break # This just indicates the end of the dataset.
print()
if not args.quiet:
print("Done with embedding, aggregating augmentations...", flush=True)
if len(modifiers) > 1:
# Pull out the augmentations into a separate first dimension.
emb_storage = emb_storage.reshape(len(data_fids), len(modifiers), -1)
emb_storage = emb_storage.transpose((1,0,2)) # (Aug,FID,128D)
# Store the embedding of all individual variants too.
emb_dataset = f_out.create_dataset('emb_aug', data=emb_storage)
# Aggregate according to the specified parameter.
emb_storage = AGGREGATORS[args.aggregator](emb_storage)
# Store the final embeddings.
emb_dataset = f_out.create_dataset('emb', data=emb_storage)
# Store information about the produced augmentation and in case no crop
# augmentation was used, if the images are resized or avg pooled.
f_out.create_dataset('augmentation_types', data=np.asarray(modifiers, dtype='|S'))
if __name__ == '__main__':
main()
|
[
"xhx1247786632@gmail.com"
] |
xhx1247786632@gmail.com
|
490925720e8b660543d0809f52bca3389d2bca62
|
d605b8aaba13de6be8760949548375c8f8714ccc
|
/undistort_img.py
|
ab7b996b6afb9c15c69bb4ad3aab6bc1af3488de
|
[
"MIT"
] |
permissive
|
fwa785/CarND-Advanced_Lane_Lines
|
733ac514679f39e5ef475ea01e4551e7610b59cc
|
b36852e2519128c40bc22a2efed92d8014496b7e
|
refs/heads/master
| 2020-03-17T13:02:35.140426
| 2018-05-26T05:46:20
| 2018-05-26T05:46:20
| 133,614,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
import cv2
import pickle
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
def load_cal_dist():
# load the pickle file
file = open("wide_dist_pickle.p", "rb")
dist_pickle = pickle.load(file)
objpoints = dist_pickle["objpoints"]
imgpoints = dist_pickle["imgpoints"]
return objpoints, imgpoints
# Function to undistorted the image
def cal_undistort(img, objpoints, imgpoints):
# Use cv2.calibrateCamera() and cv2.undistort()
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
undist = cv2.undistort(img, mtx, dist,None,mtx)
return undist
|
[
"fwang@cs.stanford.edu"
] |
fwang@cs.stanford.edu
|
4a5682dd8ba47d528ee3f7d9b2205251941f53e1
|
428b2789f055f35a3d7221dfdd35ef2a74262f76
|
/프로그래머스/가장 큰 수.py
|
1833f4ecc4730ecf5ed22d045b2c48397f56b6d9
|
[] |
no_license
|
sinsomi/Coding-Test
|
eb9fcf9c9ef2b427287a8f9ea27320bf6616e49a
|
881974b533dc8d1ba44e8734346e38a3e668fda8
|
refs/heads/master
| 2022-12-10T04:56:50.280532
| 2020-09-14T02:37:55
| 2020-09-14T02:37:55
| 287,198,959
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
def solution(numbers):
ans=''
numbers=list(map(str,numbers))
#문자열을 3번씩 나열한것을 기준으로 정렬
numbers.sort(key=lambda x:x*3,reverse=True)
return ''.join(numbers)
print(solution([3, 30, 34, 5, 9]))
|
[
"cindy960602@naver.com"
] |
cindy960602@naver.com
|
adc19dda16fb09b71ebe749693e137350ac13ad8
|
641b870d2fb27ed52371b92b91c37abdf2e4d44d
|
/8_17_20.py
|
f67eea0d0b9f634b12fb774578592be916f927b8
|
[] |
no_license
|
zoezirlin/COVID-19-Worldwide-Cases-and-Deaths-Analysis
|
c2831414979d113f265e33e07f73b442a68529b4
|
909179e8f24a341b397f81e00a43621578b58c2c
|
refs/heads/master
| 2022-11-30T04:48:37.164514
| 2020-08-17T14:48:36
| 2020-08-17T14:48:36
| 285,699,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,118
|
py
|
# COVID-19 Worldwide Analytics: Data from Our World in Data
## https://github.com/owid/covid-19-data/tree/master/public/data
# RQ1: is there a correlation between pop. 65+ and pop. 70+ in terms of total deaths?
# RQ2: is there a correlation between gdp per capia and total deaths? (chi-square)
# RQ3: diabetes prevelance
# RQ4: stringency index correlation with covid figures
#========================================================================================
### Importing packages
import pandas as pd # pandas for most everything
import numpy as np
import seaborn as sns # data visualization
from matplotlib import pyplot as plt # data visualization
import statsmodels.api as sm # stats procedures
from statsmodels.formula.api import ols # regression procedures
import scipy.stats as stats # stats procedures
from scipy.stats import chi2_contingency # chi2/contingency tables
from statsmodels.stats.outliers_influence import OLSInfluence # regression procedures
#========================================================================================
### Reading in the data
data = pd.read_excel('/Users/zoezirlin/Desktop/COVID DATA/owid-covid-data.xlsx') # excel file
#========================================================================================
# Finding out how many countries are represented in this dataset
freq = pd.value_counts(data['location']) #creating frequency table through pandas value count function
freq #printing the frequency table
#========================================================================================
# Analyzing total new cases by country
pt1 = pd.pivot_table(data, index = ['location'], values = ['new_cases'], aggfunc = np.sum)
pt1 # Here we see that we have too many countries (categories) to visualize all at once
# We will need to segment the data!
#========================================================================================
# Segmenting dataset into six separate dataframes by continent designation of country
pd.value_counts(data['continent']) # checking the counts of each continent to ensure normal distribution
# oceania has significantly less instances that europe, which has the most
# europe and asia have the most instances
europe = data[data['continent']=='Europe'] # creating dataframe that has only european countries
europe = europe.sort_values(['total_deaths'], ascending = False) # sorting the df by total deaths from most to least
asia = data[data['continent']=='Asia']
asia = asia.sort_values(['total_deaths'], ascending = False)
africa = data[data['continent']=='Africa']
africa = africa.sort_values(['total_deaths'], ascending = False)
north_america = data[data['continent']=='North America']
north_america = north_america.sort_values(['total_deaths'], ascending = False)
south_america = data[data['continent']=='South America']
south_america = south_america.sort_values(['total_deaths'], ascending = False)
oceania = data[data['continent']=='Oceania']
oceania = oceania.sort_values(['total_deaths'], ascending = False)
#========================================================================================
### Analyzing European Countries
# Europe - Average of total cases per million, shows the rate of death positioned against population of country
pte1 = pd.pivot_table(europe, index = ['location'], values = ['total_cases_per_million'], aggfunc = np.mean)
pte1 = pte1.sort_values(['total_cases_per_million'], ascending=[False])
pte1[:5]
# vatican has the most average cases per million, perhaphs because they have such miniscule population
# small countries seem to have the most cases per million
# Europe- Sum of new deaths, shows the total of deaths over the weeks reported
pte2 = pd.pivot_table(europe, index = ['location'], values = ['total_deaths'], aggfunc = np.sum)
pte2 = pte2.sort_values(['total_deaths'], ascending=[False])
pte2[:5]
# U.K. leads in total deaths, followed by italy, france, spain and belgium (IN EUROPE)
# Europe- Sum of new cases per million, shows the rate of total of cases by country
pte3 = pd.pivot_table(europe, index = ['location'], values = ['new_cases_per_million'], aggfunc = np.sum)
pte2 = pte3.sort_values(['new_cases_per_million'], ascending=[False])
pte3[:5]
# Visualizing Death Counts for European Countries by Total Deaths
plt.figure(figsize=(25,7))
sns.set()
sns.set_context("talk")
chart = sns.barplot(x='location', y='new_deaths', estimator=sum,
data=europe,
palette='Paired'
)
plt.xticks(
rotation=45,
horizontalalignment='right',
fontweight='light',
)
chart.set_title('Death Count by Location in European Countries')
chart.set_xlabel('Location')
chart.set_ylabel('Death Count')
# Visualizing COVID-19 Cases for European Countries by Total Cases
plt.figure(figsize=(25,7))
sns.set()
sns.set_context("talk")
chart = sns.barplot(x='location', y='new_cases', estimator=sum,
data=europe,
palette='Paired'
)
plt.xticks(
rotation=45,
horizontalalignment='right',
fontweight='light',
)
chart.set_title('Case Count by Location in European Countries| Note: Ordered by Most Deaths to Least')
chart.set_xlabel('Location')
chart.set_ylabel('Case Count')
#========================================================================================
### Analyzing Asian Countries
# Asia - Average of total cases per million, shows the rate of death positioned against population of country
pta1 = pd.pivot_table(asia, index = ['location'], values = ['total_cases_per_million'], aggfunc = np.mean)
pta1 = pta1.sort_values(['total_cases_per_million'], ascending=[False])
pta1[:5]
# Qatar has the most average cases per million
# Asia- Sum of new deaths, shows the total of deaths over the weeks reported
pta2 = pd.pivot_table(asia, index = ['location'], values = ['total_deaths'], aggfunc = np.sum)
pta2 = pta2.sort_values(['total_deaths'], ascending=[False])
pta2[:5]
# India has the most cumulative total deaths
# Asia- Sum of new cases per million, shows the rate of total of cases by country
pta3 = pd.pivot_table(asia, index = ['location'], values = ['new_cases_per_million'], aggfunc = np.sum)
pta3 = pta3.sort_values(['new_cases_per_million'], ascending=[False])
pta3[:5]
# Qatar has the most new cases per million, as well as having the most total cases per million
# Visualizing Death Counts for Asian Countries by Total Deaths
plt.figure(figsize=(25,7))
sns.set()
sns.set_context("talk")
chart = sns.barplot(x='location', y='new_deaths', estimator=sum,
data=asia,
palette='Paired'
)
plt.xticks(
rotation=45,
horizontalalignment='right',
fontweight='light',
)
chart.set_title('Death Count by Location in Asian Countries')
chart.set_xlabel('Location')
chart.set_ylabel('Death Count')
# Visualizing COVID-19 Cases for Asian Countries by Total Cases
plt.figure(figsize=(25,7))
sns.set()
sns.set_context("talk")
chart = sns.barplot(x='location', y='new_cases', estimator=sum,
data=asia,
palette='Paired'
)
plt.xticks(
rotation=45,
horizontalalignment='right',
fontweight='light',
)
chart.set_title('Case Count by Location in Asian Countries| Note: Ordered by Most Deaths to Least')
chart.set_xlabel('Location')
chart.set_ylabel('Case Count')
#========================================================================================
# Taking care of the NA variables
print(data.isnull()) # printing true/false is na for variables
print(data.isnull().sum())
# every variable has null counts except for location and date!
# create new dataset for regression procedures without NAs
#========================================================================================
# Statistical procedures
## Correlative relationships
corr = data.corr()
# handwashing facilities x extreme poverty: -.7703
# hospital beds per thousand x median age: .6606
# life expectancy x hand washing facilities: .825
# extreme poverty x life expectancy: -.748
# female smokers x aged 70+: .781
# stringency index x positive rate: .329 (low, kind like it doesnt much work)
# Variables of interest
# continent: categorical (need to recode)
# date: continuous
# total cases: continuous
# new cases: continuous
# total deaths: continuous
# new deaths: continuous
# total cases/mil: continuous
# new cases/mil: continuous
# total deaths/mil: continuous
# total cases/mil: continuous
# stringency index: continuous
# population density: continuous
# gdp per capita: continuous
# life expectancy: continuous
# Regression procedure: predicting total cases
# Scatterplot for total cases by stringency index
plt.figure(figsize=(7,7))
sns.set()
sns.set_context("talk")
chart = sns.scatterplot(data=data, y='total_cases', x= 'stringency_index', palette='Paired')
chart.set_title('Scatterplot for Total Cases by Stringency Index')
chart.set_xlabel('Stringency Index')
chart.set_ylabel('Total Cases')
# Scatterplot for total cases by gdp per capita
plt.figure(figsize=(7,7))
sns.set()
sns.set_context("talk")
chart = sns.scatterplot(data=data, y='total_cases', x= 'gdp_per_capita', palette='Paired')
chart.set_title('Scatterplot for Total Cases by GPA Per Capita')
chart.set_xlabel('Stringency Index')
chart.set_ylabel('Total Cases')
# Scatterplot for total cases by life expectancy
plt.figure(figsize=(7,7))
sns.set()
sns.set_context("talk")
chart = sns.scatterplot(data=data, y='total_cases', x= 'life_expectancy', palette='Paired')
chart.set_title('Scatterplot for Total Cases by Life Expectancy')
chart.set_xlabel('Stringency Index')
chart.set_ylabel('Total Cases')
# Creating new dataset without NAs
data1 = data.copy()
data1 = data1.dropna()
pd.value_counts(data['location'])
# this widdled down dataframe has 164 countries
# there are 195 countries in the world
# this represents 84% of countries
# Regression procedure no.1
X = data1[['life_expectancy']]
y = data1['total_cases']
X = sm.add_constant(X)
model_1 = sm.OLS(y,X).fit()
predictions_1 = model_1.predict(X)
model_1.summary()
# Regression procedure no.2
X = data1[['life_expectancy']]
y = data1['total_deaths']
X = sm.add_constant(X)
model_1 = sm.OLS(y,X).fit()
predictions_1 = model_1.predict(X)
model_1.summary()
# Regression procedure no.3
X = data1[['cardiovasc_death_rate']]
y = data1['total_deaths']
X = sm.add_constant(X)
model_1 = sm.OLS(y,X).fit()
predictions_1 = model_1.predict(X)
model_1.summary()
# Regression procedure no.4
## significant model, 78% of the variability explained
X = data1[['handwashing_facilities']]
y = data1['extreme_poverty']
X = sm.add_constant(X)
model_1 = sm.OLS(y,X).fit()
predictions_1 = model_1.predict(X)
model_1.summary()
# Regression procedure no.5
## significant model, 50% of the variability explained
X = data1[['handwashing_facilities']]
y = data1['life_expectancy']
X = sm.add_constant(X)
model_1 = sm.OLS(y,X).fit()
predictions_1 = model_1.predict(X)
model_1.summary()
|
[
"noreply@github.com"
] |
zoezirlin.noreply@github.com
|
5a81a4197fa3eba25a940755bdd2addd0a67a411
|
27ff7fec0ae3f29f58089a2acab0aa3bc4e6e1f7
|
/RIDE-python3/utest/namespace/test_retrievercontextfactory.py
|
ffcf5e68b915b40215bd7108b20d30b733dc6100
|
[
"Apache-2.0"
] |
permissive
|
zhangsong1417/xx
|
01435d6057364991b649c1acc00b36ab13debe5a
|
c40cfdede194daf3bdf91b36c1936150577128b9
|
refs/heads/master
| 2020-04-06T14:06:23.011363
| 2019-07-09T02:38:02
| 2019-07-09T02:38:02
| 157,528,207
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
import unittest
from nose.tools import assert_equal
from robotide.namespace.namespace import _RetrieverContextFactory
from robotide.robotapi import ResourceFile
def datafileWithVariables(vars):
data = ResourceFile()
for var in vars:
data.variable_table.add(var, vars[var])
return data
class RetrieverContextFactoryTest(unittest.TestCase):
def test_created_context_has_variable_table_variables(self):
factory = _RetrieverContextFactory()
ctx = factory.ctx_for_datafile(
datafileWithVariables({'${foo}': 'moi', '${bar}': 'hoi',
'@{zoo}': 'koi'}))
result = ctx.vars.replace_variables('!${foo}!${bar}!@{zoo}!')
print(ctx.vars)
assert_equal(result, "!moi!hoi!['koi']!")
if __name__ == '__main__':
unittest.main()
|
[
"44634576+shuiling21@users.noreply.github.com"
] |
44634576+shuiling21@users.noreply.github.com
|
36fe4d0372f7bd4c02c181de99948209bddf152f
|
3876aa07bd0d9d2a264174537ff382663c995619
|
/backend/server/server/settings.py
|
c47394a1d6f7eade47a3c037251e79b282f5d5c9
|
[] |
no_license
|
ImokAAA/image_enhancer
|
9858970675bb351c84bcd6afe7c46742239f402f
|
65f047e94d45f8d1113fa5df375aa1737a7158d2
|
refs/heads/main
| 2023-03-05T22:55:17.788420
| 2021-01-14T16:47:01
| 2021-01-14T16:47:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,088
|
py
|
"""
Django settings for server project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'us56&cza8v)=1crl(72n1+lliboivd2nnt^51i$$ki#qbq8ml8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"Imangali@gmail.com"
] |
Imangali@gmail.com
|
78170bad3a84f196ad48194a53eb050217a1ffb5
|
6be1990abf99c85ef886b49dcea1824aabb648d3
|
/weixinofneolocal/weixinofneolocal/zinnia/templates/zinnia/skeletonback.py
|
87d92f772446a25a103c06be241af22031b913f8
|
[] |
no_license
|
neoguojing/cloudServer
|
b53ae205efe52cf0aea28dbb9e6c16c20caf991f
|
7c19101789b0c46474269e4c8fe00e92203e9cd7
|
refs/heads/master
| 2020-12-04T23:02:23.551479
| 2017-09-22T03:08:35
| 2017-09-22T03:08:35
| 67,382,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,349
|
py
|
{% load i18n staticfiles %}
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xml:lang="{{ LANGUAGE_CODE }}" lang="{{ LANGUAGE_CODE }}" version="-//W3C//DTD XHTML 1.1//EN" xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Zinnia's Weblog - {% block title %}{% endblock title %}{% block title-page %}{% endblock title-page %}</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<meta http-equiv="cache-control" content="public" />
<meta name="robots" content="follow, all" />
<meta name="language" content="{{ LANGUAGE_CODE }}" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta name="description" content="{% block meta-description %}{% trans "Just another Zinnia weblog." %}{% endblock meta-description %}{% block meta-description-page %}{% endblock meta-description-page %}" />
<meta name="keywords" content="{% block meta-keywords %}django, blog, weblog, zinnia{% endblock meta-keywords %}" />
<meta name="author" content="Fantomas42" />
{% block meta %}{% endblock meta %}
<link rel="pingback" href="/xmlrpc/" />
<link rel="shortcut icon" href="{% static "/static/zinnia/img/favicon.ico" %}" />
<link rel="home" href="{% url 'zinnia_entry_archive_index' %}" />
<link rel="stylesheet" type="text/css" media="screen, projection" href="{% static "/static/zinnia/css/screen.css" %}" />
<link rel="stylesheet" type="text/css" media="print" href="{% static "/static/zinnia/css/print.css" %}" />
<!--[if lt IE 8]>
<link rel="stylesheet" type="text/css" media="screen, projection" href="{% static "/static/zinnia/css/ie.css" %}" />
<![endif]-->
{% block link %}{% endblock link %}
{% block script %}{% endblock script %}
</head>
<body class="zinnia {% block theme-class %}default{% endblock theme-class %} {% block color-class %}blue{% endblock color-class %} {% block sidebar-class %}right-sidebar{% endblock sidebar-class %} {% block body-class %}{% endblock body-class %}">
<div class="container">
<div id="header">
<ul class="top-navigation">
<li>
<a href="{% url 'zinnia_sitemap' %}" title="{% trans "Sitemap" %}" class="sitemap">
{% trans "Sitemap" %}
</a>
</li>
<li>
<a href="{% url 'zinnia_entry_latest_feed' %}" title="{% trans "RSS Feed of latest entries" %}" class="feeds">
{% trans "RSS Feed" %}
</a>
</li>
</ul>
<form method="get" action="{% url 'zinnia_entry_search' %}">
<p>
<input type="text" value="{% trans "Keywords..." %}" name="pattern" onfocus="this.value=''" />
<input type="submit" class="submitbutton" value="OK" />
</p>
</form>
<h1>
<a href="{% url 'zinnia_entry_archive_index' %}" title="Zinnia's Weblog" rel="home">
阳城微装修
</a>
</h1>
<blockquote>
<p>{% trans "Just another Zinnia weblog." %}</p>
</blockquote>
{% block breadcrumbs %}{% endblock breadcrumbs %}
</div>
{% block slider %}{% endblock slider %}
<div id="content" class="hfeed">
{% block content %}
<div class="links">
<h3>{% trans "Useful links" %}</h3>
<ul>
<li>
<a href="{% url 'zinnia_entry_archive_index' %}" title="{% trans "Weblog index" %}">
{% trans "Weblog index" %}
</a>
</li>
<li>
<a href="{% url 'zinnia_sitemap' %}" title="{% trans "Sitemap" %}">
{% trans "Sitemap" %}
</a>
</li>
</ul>
</div>
<div class="search">
<h3>{% trans "Search" %}</h3>
{% include "zinnia/tags/search_form.html" %}
</div>
{% endblock content %}
</div>
<div id="sidebar">
{% block sidebar %}
{% endblock sidebar %}
</div>
<div id="footer">
<p>{% blocktrans %}Powered by <a href="http://www.djangoproject.com">Django</a> and <a href="https://github.com/Fantomas42/django-blog-zinnia">Zinnia {{ ZINNIA_VERSION }}</a>.{% endblocktrans %}</p>
</div>
</div>
</body>
</html>
|
[
"guojing_neo@163.com"
] |
guojing_neo@163.com
|
228b7eb72785825bdb3372172019c0d93f9fa8cf
|
9124307911fe2c868d0b52cce274e3e06d050dfc
|
/spider/doubanSpider/doubanSpider/run.py
|
8f019a82e517f5bb666de728e7097c307a7035f8
|
[] |
no_license
|
shenzihu/wechat-library-system
|
2458f953886136e2ddfe742f0a1879056b556f06
|
5a7c9a2bf2e3844c3b0c8ccb9db0ee2439869ed2
|
refs/heads/main
| 2023-03-06T13:18:54.430461
| 2021-02-17T03:43:19
| 2021-02-17T03:43:19
| 339,595,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 827
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from scrapy import cmdline
import time
import subprocess
import sched, os
if __name__ == "__main__":
cmdline.execute('scrapy crawl doubanSpider'.split())
# 初始化sched模块的scheduler类
# 第一个参数是一个可以返回时间戳的函数,第二个参数可以在定时未到达之前阻塞。
schedule = sched.scheduler(time.time, time.sleep)
# 被周期性调度触发的函数
def func():
print("爬虫开始")
subprocess.Popen("scrapy crawl doubanSpider")
def perform1(inc):
schedule.enter(inc, 0, perform1, (inc,))
func() # 需要周期执行的函数
def mymain():
schedule.enter(0, 0, perform1, (6*3600,))
# if __name__ == "__main__":
# mymain()
# schedule.run() # 开始运行,直到计划时间队列变成空为止
|
[
"shenzihu123.123"
] |
shenzihu123.123
|
7b67496f9c670cfea18d79b91e9e31e13296b6be
|
ed6efcd5ffb150b1cbc4fceb9976685e1edde354
|
/JazzMock/Python/train.py
|
b7d09e77d8edaad8b56f84aa8bedf69c72533e17
|
[] |
no_license
|
zielo-hue/JazzMock
|
71cdc9e07815eec48cc6b3ce4f380e6f6375fb62
|
a27b8f419f63b3e2ae6885eb22f11144097ff0fe
|
refs/heads/master
| 2023-08-25T20:29:59.778534
| 2021-11-07T19:34:35
| 2021-11-07T19:34:35
| 345,299,138
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
import os
import gpt_2_simple as gpt2
import sys
from datetime import datetime
model_name = "355M"
if not os.path.isdir(os.path.join("models", model_name)):
print(f"Downloading {model_name} model...")
gpt2.download_gpt2(model_name=model_name)
file_name = "dataset.csv" # File name of dataset
sess = gpt2.start_tf_sess()
gpt2.finetune(
sess,
dataset=file_name,
model_name=model_name, # Model you have already downloaded
steps=10000, # -1 will do unlimited. Enter number of iterations otherwise
restore_from='latest', # Also allows 'fresh' which will overwrite old training
run_name='test', # The name to pull or create a checkpoint under
print_every=50, # Print iterations every X numebr
sample_every=15000, # Generate a text sample ever X number of iter.
save_every=300, # Save a snapshot every X number of iter.
learning_rate=0.0001, # Lower to 0.00001 if you are not getting massive changes in results
batch_size=2, # Keep at 1 or 2, will use up more memory if you raise this
overwrite=True
)
|
[
"51804240+zielo-hue@users.noreply.github.com"
] |
51804240+zielo-hue@users.noreply.github.com
|
32b1a41ac3b18a60699e71127b3e67b52f1e3621
|
249a31fd330dc179b89101f71f92f5474ce2a2dd
|
/back/fastapi/app/database/models.py
|
3a15a7c84f98644ae58e296613755954bb392cb6
|
[] |
no_license
|
WilliamBamba/thermo_ia
|
f9838d968ee6a24ab8573fcf6b52bed92f7f6a12
|
941aef7e210363c73fcd86176cf89c655e4e406d
|
refs/heads/master
| 2023-03-16T19:36:50.222152
| 2020-11-25T09:38:42
| 2020-11-25T09:38:42
| 343,266,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from app.database.config import Base
class Profile(Base):
__tablename__ = "profile"
id = Column(Integer, primary_key=True, index=True)
name = Column(String)
city = Column(String)
wtemp = Column(Integer, default=23)
time_table = Column(String, nullable=True) # "[8-11:45],[12:45-17]"
url_agenda = Column(String, nullable=True) # valide url pointing to a .ics celander
option_id = Column(Integer, ForeignKey('option.id'), nullable=True)
# input: (8-9:45) (10-11:45) (12:45-17)
# output: (8-11:45) (12:45-17)
def parse_agenda(self):
pass
def refresh_agenda(self):
pass
class Option(Base):
__tablename__ = "option"
id = Column(Integer, primary_key=True, index=True)
name = Column(String, nullable=True)
class SensorData(Base):
__tablename__ = "sensor_data"
id = Column(Integer, primary_key=True, index=True)
temperature = Column(Integer, nullable=True)
created_at = Column(DateTime, server_default=func.now())
|
[
"shajjad.hossain@etu.univ-lyon1.fr"
] |
shajjad.hossain@etu.univ-lyon1.fr
|
0ab4574b857eec1bbf73e814ebe45687b8341100
|
f957e7a7dac20e987b6bd467f6342d2ecd358879
|
/pset9/problem1_3.py
|
7ab5cbfa1b2e4bb72f25e76c8fb64d65dfc26e1e
|
[] |
no_license
|
Wambonaut/Computational-Physics-Homework
|
375407b2ccce9f3b8ffb2a3a0126b8306f28ae2f
|
8389e4c446b6f8e3cbdc6e51b6c228f18bc57935
|
refs/heads/master
| 2020-05-18T03:35:52.342590
| 2019-06-28T09:51:37
| 2019-06-28T09:51:37
| 184,148,341
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import os
import random
from rng import rng
figFolderPath = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'figures'
)
if not os.path.exists(figFolderPath):
os.makedirs(figFolderPath)
a, c, m = 106, 1283, 6075
I0 = 329
r = rng(I0, a, c, m)
N = 10000
n = np.zeros(N)
for i in range(N):
n[i] = np.sum([(r.next_int() % 6) + 1 for i in range(10)])
plt.hist(n, bins=50)
plt.savefig(os.path.join(figFolderPath, 'fig15.pdf'),
bbox_inches='tight', pad_inches=0.6)
plt.savefig(os.path.join(figFolderPath, 'fig15.pgf'),
bbox_inches='tight')
|
[
"jona.a99@gmail.com"
] |
jona.a99@gmail.com
|
5b6f3a167a3e1434a0bf2ef489f91a1678c97a32
|
39d10f7090c881c29c4dab8bfbfbb3937c2fa0d7
|
/game/views.py
|
34478b7c5d631ab7512098bc72730441c23f80ed
|
[] |
no_license
|
garncarz/vietcong1.eu
|
0afe7e2f3c0017cfb543d6945f30d78dc2c83198
|
259e9869d30e31b41ce1e0afca3956fafd31fd40
|
refs/heads/master
| 2021-01-10T11:12:15.166627
| 2020-01-02T00:54:35
| 2020-01-02T00:54:35
| 43,337,611
| 2
| 0
| null | 2020-01-02T00:55:01
| 2015-09-29T01:33:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
from django.contrib import messages
from django.shortcuts import redirect
from django.views import generic
from rest_framework import viewsets
from . import forms
from . import models
from . import serializers
class ServerListView(generic.ListView):
model = models.Server
def get_queryset(self):
return (super().get_queryset()
.select_related('map', 'mode')
.filter(online=True)
.all())
class ServerViewSet(viewsets.ModelViewSet):
queryset = models.Server.objects.all().order_by('name')
serializer_class = serializers.ServerSerializer
class ServerDetailView(generic.DetailView):
model = models.Server
def get_object(self, queryset=None):
queryset = queryset or self.get_queryset()
return queryset.get(ip=self.kwargs['ip'], port=self.kwargs['port'])
class PlayerListView(generic.ListView):
model = models.Player
def get_queryset(self):
return (super().get_queryset()
.select_related('server')
.filter(online=True, server__online=True)
.all())
class MapDetailView(generic.DetailView):
model = models.Map
class MapImageUploadView(generic.FormView):
template_name = 'game/mapimage_upload.html'
form_class = forms.MapImageForm
def get_map(self):
return models.Map.objects.get(pk=self.kwargs['pk'])
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['map'] = self.get_map()
return context
def form_valid(self, form):
map_image = form.save(commit=False)
map_image.map = self.get_map()
map_image.save()
messages.success(self.request, 'The picture has been uploaded.')
return redirect(map_image.map)
|
[
"ondrej@garncarz.cz"
] |
ondrej@garncarz.cz
|
062cd6337e303e827f0ceaf260029aef2efbade4
|
1e2624cb526210d1fefe5dfd979555bac55d4d26
|
/third/urls.py
|
0606c8bc28ee5e9a97e937d726b79dcae0bf6873
|
[] |
no_license
|
taemtaem0707/inflearn_python_board
|
1a149792d297580ffc13ef60a79394208c4eb751
|
a4508ba139a8882c3515b37e98fcb13c823a21a8
|
refs/heads/master
| 2022-12-01T19:14:55.532778
| 2020-08-15T12:17:03
| 2020-08-15T12:17:03
| 287,739,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('list/', views.list, name='list'),
path('create/', views.create, name='restaurant-create'),
path('restaurant/<int:id>/update/', views.update, name='restaurant-update'),
path('restaurant/<int:id>/delete/', views.delete, name='restaurant-delete'),
path('restaurant/<int:id>/', views.detail, name='restaurant-detail'),
path('restaurant/<int:restaurant_id>/review/create/', views.review_create, name='review-create'),
path('restaurant/<int:restaurant_id>/review/delete/<int:review_id>', views.review_delete, name='review-delete'),
path('review/list/', views.review_list, name='review-list'),
]
|
[
"taemtaem0707@google.com"
] |
taemtaem0707@google.com
|
3d257eebb84ad0064a3d1098a6ab95ca1d2478ce
|
733ab64585aace7ff3f679ed7a1d61562c0d83af
|
/naive_SVP_by_hand.py
|
15234d44e7f3ddc06f9399ea0a056990cec17060
|
[] |
no_license
|
sunchaopku/Naive_SVP_by_hand
|
877ef7d3e4ca121528376a43658535ce083ff1cc
|
df9961b61083df52fce676007231fce69b8850c2
|
refs/heads/main
| 2023-09-04T10:31:42.554849
| 2021-11-13T15:01:54
| 2021-11-13T15:01:54
| 427,695,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,518
|
py
|
m = 5
n = 5
B = matrix(ZZ, m, n)
for i in range(m):
for j in range(n):
B[i, j] = ZZ.random_element(1, 10)
n_rows = B.nrows()
n_cols = B.ncols()
x = matrix(ZZ, 1, n_cols, 0)
coef = matrix(ZZ, 1, n_rows, 0 )
[Gram, U]= B.gram_schmidt()
Gram
U
G = []
for i in range(n_rows):
G.append(norm(Gram[i]))
BBT = B*B.transpose()
volume = RR(sqrt(abs(BBT.determinant())))
bound = sqrt(n_rows) * volume.nth_root(n_rows)
shortest_vector = copy(B[0])
shortest_coef = matrix(ZZ, 1, n_rows, 0 )
def SVP(n):
global bound, B, G, shortest_vector, coef, n_rows,U, shortest_coef
if n == -1:
temp_vec = matrix(ZZ, 1, n_cols, 0)
# print("temp_vec is")
# print(temp_vec)
temp_vec = temp_vec + coef * B
# print("coef is")
# print(coef)
if norm(temp_vec) > 0 and norm(temp_vec) < norm(shortest_vector):
#bound = norm(temp_vec)
shortest_vector = temp_vec
shortest_coef = copy(coef)
return
temp_bound = bound
temp_coef = 0
#print("coef is")
# print(coef)
#print("n is ", n)
for i in range(n_rows - 1, n , - 1):
# print("i is ", i)
temp_coef = temp_coef + U[i,n] * coef[0,i]
#print("temp_coef is ", temp_coef )
temp_val = (bound / G[n])
for i in range(floor(-temp_val - temp_coef) , floor(temp_val - temp_coef) +1, 1):
coef[0, n] = i
SVP(n - 1)
SVP(n_rows - 1)
print(shortest_vector)
print("norm of shortest_vector", RR(norm(shortest_vector)))
short_bkz = B.BKZ()
print("shortest_coef ", shortest_coef)
print(short_bkz[0])
print("BKZ norm",RR(norm(short_bkz[0])))
print(short_bkz * B.inverse())
|
[
"noreply@github.com"
] |
sunchaopku.noreply@github.com
|
3217885c91b5ce893f45b536fad802172adeef77
|
98a05e66a7f8b1c778345ca6e36fd83b84f693f6
|
/colibri.py
|
764f54f0a98f4f6b952dd4ebc82989610065cddd
|
[] |
no_license
|
0206627/Colibri
|
7ff1c951cfb6f1a2be50faaaf893d6c372f0f90a
|
34a1d0f5a83c16bc533ed70773d594dfb5950486
|
refs/heads/master
| 2022-09-05T05:02:56.637791
| 2020-05-20T03:49:18
| 2020-05-20T03:49:18
| 261,040,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
from components.avionics_component import CPU, SD
from components.comms_component import Antenna
from components.power_component import Batteries, EPS
from components.thermal_component import TempSensor
from flight_control.con_op_manager import ConOpManager
from flight_control.flight_system import FlightSystem
flight_system = FlightSystem()
antenna = Antenna()
battery = Batteries()
cpu = CPU()
sd = SD()
temp_sensor = TempSensor()
flight_system.add_component(antenna)
flight_system.add_component(battery)
flight_system.add_component(cpu)
flight_system.add_component(sd)
flight_system.add_component(temp_sensor)
conOp = ConOpManager()
while True:
battery.battery_charge = (float)(input("Battery charge % [0.0, 1.0]: "))
antenna.comms_check = eval(input("Antenna communication [True, False]: "))
cpu.performance_percentage = (float)(input("CPU performance % [0.0, 1.0]: "))
sd.storage_free_percentage = (float)(input("SD storage free % [0.0, 1.0]: "))
temp_sensor.temp = (float)(input("Temperature sensor temp ºC [-10, 150]: "))
current_conop = conOp.get_current_conop()
flight_conditions = flight_system.retrieve_flight_conditions()
new_conop = flight_system.decide_con_op_mode(flight_conditions, current_conop)
print("")
print("Before: ", current_conop)
print("Flight Conditions:")
flight_system.print_flight_conditions()
print("After: ", new_conop)
print("")
print("------------------------------")
print("")
|
[
"0206627@up.edu.mx"
] |
0206627@up.edu.mx
|
bdfa59941b83aa98b2faca5d7e8397a676989276
|
a7425779e8932aa9f499c1fe5c20c18b5b0dcebb
|
/main/migrations/0004_remove_company_logo.py
|
298ca7f71bed6d57164b9be4bf3d783ba018495d
|
[] |
no_license
|
nandesh553/django-test
|
ad94e0664a28b6baa0c0f7642a7da26ee5b2b6c6
|
612f6a11068a67ba7d5abcf7967fdea25002c564
|
refs/heads/master
| 2020-08-06T13:57:00.323319
| 2019-10-05T18:21:01
| 2019-10-05T18:21:01
| 212,999,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
# Generated by Django 2.2.5 on 2019-10-05 12:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20191005_1623'),
]
operations = [
migrations.RemoveField(
model_name='company',
name='logo',
),
]
|
[
"guptanandeshwar553@gmail.com"
] |
guptanandeshwar553@gmail.com
|
406afe1c16cb054bada93da9cd18e427ec91dec9
|
be3c759bd915887a384d1ef437ebf7277c75bd06
|
/DynamicProgramming/HouseRobber.py
|
3f955e0a2cf78dc730de9f68598e24f0ce96fb45
|
[] |
no_license
|
yistar-traitor/LeetCode
|
c24411763d541b6eaf9ccc344c3fd24f9a00e633
|
0dd48b990f8bd0874630b1860361c6b3b2c801f6
|
refs/heads/master
| 2020-09-28T20:46:45.016872
| 2019-12-18T02:25:34
| 2019-12-18T02:25:34
| 226,861,515
| 0
| 0
| null | 2019-12-18T02:25:36
| 2019-12-09T12:04:01
| null |
UTF-8
|
Python
| false
| false
| 1,600
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/8/24 18:01
# @Author : tc
# @File : HouseRobber.py
"""
你是一个专业的小偷,计划偷窃沿街的房屋。每间房内都藏有一定的现金,影响你偷窃的唯一制约因素就是相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警。
给定一个代表每个房屋存放金额的非负整数数组,计算你在不触动警报装置的情况下,能够偷窃到的最高金额。
Input1:[1,2,3,1]
Output1:4
解释: 偷窃 1 号房屋 (金额 = 1) ,然后偷窃 3 号房屋 (金额 = 3)。
偷窃到的最高金额 = 1 + 3 = 4 。
Input2:[2,7,9,3,1]
Output2:12
解释: 偷窃 1 号房屋 (金额 = 2), 偷窃 3 号房屋 (金额 = 9),接着偷窃 5 号房屋 (金额 = 1)。
偷窃到的最高金额 = 2 + 9 + 1 = 12 。
动态规划的状态转移方程先从特殊情况开始考虑,比如当len(nums) == 1、2、3时 依次递推写出来。然后通过测试案例不断修正,得到最后完整的代码
官方解答:https://leetcode-cn.com/problems/house-robber/solution/da-jia-jie-she-by-leetcode/
"一个自然而然的想法是首先从最简单的情况开始" 与我刚刚阐述的解决步骤一致
"""
def rob(nums):
m = len(nums)
if not m:
return 0
dp = [0] * (m + 1)
dp[1] = nums[0]
for i in range(1,m):
dp[i+1] = max(dp[i], dp[i-1]+nums[i])
print(dp)
return dp[-1]
if __name__ == '__main__':
nums = [2,7,9,3,1]
print(rob(nums))
|
[
"2448424636@qq.com"
] |
2448424636@qq.com
|
4063445b6de2618ea6da9d05ed34ca6f414dc48a
|
8b22730050501c72421abda7f55db583773aa37c
|
/ecvheo1/Queue & Deque/2164.py
|
6c48e5596959bfc79582f84e7f209a74ffa2ef43
|
[] |
no_license
|
naekang/uos-algorithm
|
5f98b1eae7c3ee1bde4942976ce18d12cf0d5f19
|
a0c03cfc5571c6c6dda5a710bd6e40e7c699e5e0
|
refs/heads/main
| 2023-08-06T18:28:39.447373
| 2021-10-04T05:43:19
| 2021-10-04T05:43:19
| 396,826,675
| 0
| 0
| null | 2021-08-16T14:18:20
| 2021-08-16T14:18:20
| null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
from collections import deque
n = int(input())
li = []
for i in range(n, 0, -1):
li.append(i);
cardDeque = deque(li)
while len(cardDeque) != 1:
cardDeque.pop()
cardDeque.appendleft(cardDeque.pop())
print(cardDeque[0])
|
[
"ecvheo@naver.com"
] |
ecvheo@naver.com
|
5854bb09c8e7f904a02592d7ae0b2ae00c3ff580
|
a60e1ac0359986566cc79bc17bbc3f2d99d28486
|
/src/Blog_Project/wsgi.py
|
264ad976d9ff130104479c5d5c9019fd152cc565
|
[] |
no_license
|
zahidul-islam-144/Django-Mini-Blog-Site
|
fa278015ee8753e8e8e9c4322cfc850ed69e1a21
|
f0630ad46d1ffb905361da2752b32c0a8dbfb847
|
refs/heads/main
| 2023-06-09T17:08:51.885515
| 2021-07-03T15:24:30
| 2021-07-03T15:24:30
| 382,640,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for myProject1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myProject1.settings')
application = get_wsgi_application()
|
[
"zahidul.islam1@northsouth.edu"
] |
zahidul.islam1@northsouth.edu
|
794d8c6e6c666f6dd5bf58524acc120035e325b6
|
3a395b378f818ed37fde4f47ba1ab06dc2e4f8bb
|
/venv/bin/ipython
|
e68785c72c9c2799dd7a3a32e119d9c054032438
|
[] |
no_license
|
PoliNa1993/MyLab
|
391e400a248aec3dd69019847c81582688e21534
|
a334830a19669da022ff88762e4825abb1dfd597
|
refs/heads/master
| 2021-03-27T15:56:32.173159
| 2017-10-12T17:22:51
| 2017-10-12T17:22:51
| 105,839,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
#!/Users/polinaaniskina/Code/EasyLab/venv/bin/python3.5
# -*- coding: utf-8 -*-
import re
import sys
from IPython import start_ipython
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(start_ipython())
|
[
"ppoolliinnaa@hotmail.com"
] |
ppoolliinnaa@hotmail.com
|
|
3a32305e83ae2659bc69b1f0bd9521a5a6cc84ac
|
241ba34c76bd55ffd5891a37c637d52cb4fe3ad0
|
/stylize_example.py
|
60d8ac335a2c3afee4d091c4724821bb7a68d85a
|
[] |
no_license
|
tingleshao/neural_style_tf
|
7c2408e4a5e72849003790bdec23aade9fad8a49
|
fb32e58c20b9986457a3ba6629c9443fa5cba192
|
refs/heads/master
| 2020-12-03T09:29:55.863070
| 2017-07-11T17:50:54
| 2017-07-11T17:50:54
| 95,625,544
| 0
| 0
| null | 2017-06-28T03:37:19
| 2017-06-28T03:37:19
| null |
UTF-8
|
Python
| false
| false
| 6,783
|
py
|
"""
This file contains code to run neural_style.py It shows a few usages of the function and output the
results.
"""
from general_util import *
if __name__=='__main__':
# First download the required files.
download_if_not_exist('http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat', 'imagenet-vgg-verydeep-19.mat', 'Pretrained vgg 19')
download_if_not_exist('https://raw.githubusercontent.com/anishathalye/neural-style/master/examples/1-content.jpg',
'stylize_examples/1-content.jpg', 'Example content image')
download_if_not_exist('https://raw.githubusercontent.com/anishathalye/neural-style/master/examples/1-style.jpg',
'stylize_examples/1-style.jpg', 'Example style image')
download_if_not_exist('https://raw.githubusercontent.com/anishathalye/neural-style/master/examples/2-style1.jpg',
'stylize_examples/2-style.jpg', 'Example style image No.2')
download_if_not_exist('https://raw.githubusercontent.com/alexjc/neural-doodle/master/samples/Mia.jpg',
'stylize_examples/5-style.jpg', 'Style image for mrf loss with semantic masks')
download_if_not_exist('https://raw.githubusercontent.com/alexjc/neural-doodle/master/samples/Freddie.jpg',
'stylize_examples/5-content.jpg', 'Content image for mrf loss with semantic masks')
# The first example: Generate an image styled with Van Gogh's Starry Sky and with content as 1-content.jpg
content = 'stylize_examples/1-content.jpg'
styles = ['stylize_examples/1-style.jpg']
learning_rate = 10.0
iterations = 1000
width = 400
height = 533
checkpoint_output_str = 'stylize_examples/output_checkpoint/1_iter_%s.jpg'
output_str = 'stylize_examples/output/1_result.jpg'
os.system('python neural_style.py --content=%s --styles %s --learning-rate=%f '
'--iterations=%d --checkpoint-output=%s --output=%s --width=%d --height=%d'
%(content, ' '.join(styles), learning_rate, iterations, checkpoint_output_str, output_str, width, height))
# The second example: Generate an image textured with Van Gogh's Starry Sky without any content.
styles = ['stylize_examples/1-style.jpg']
learning_rate = 10.0
iterations = 500
width = 256
height = 256
checkpoint_output_str = 'stylize_examples/output_checkpoint/2_iter_%s.jpg'
output_str = 'stylize_examples/output/2_result.jpg'
os.system('python neural_style.py --styles %s --learning-rate=%f '
'--iterations=%d --checkpoint-output=%s --output=%s --width=%d --height=%d'
%(' '.join(styles), learning_rate, iterations, checkpoint_output_str, output_str, width, height))
# The third example is to use multiple styles with the first weighted more than the second one.
content = 'stylize_examples/1-content.jpg'
styles = ['stylize_examples/1-style.jpg', 'stylize_examples/2-style.jpg']
style_blend_weights = [0.7, 0.3]
learning_rate = 10.0
iterations = 1000
width = 400
height = 533
checkpoint_output_str = 'stylize_examples/output_checkpoint/3_iter_%s.jpg'
output_str = 'stylize_examples/output/3_result.jpg'
os.system('python neural_style.py --content=%s --styles %s --learning-rate=%f '
'--iterations=%d --checkpoint-output=%s --output=%s --width=%d --height=%d --style-blend-weights %s'
% (content, ' '.join(styles), learning_rate, iterations, checkpoint_output_str, output_str, width, height, ' '.join(map(str, style_blend_weights))))
# The fourth example: use mrf loss instead of gramian loss. Note that the image can't be too large or it will run
# out of memory.
content = 'stylize_examples/4-content.jpg'
styles = ['stylize_examples/4-style.jpg']
learning_rate = 10.0
iterations = 1000
width = 512
height = 384
style_weight = 2.0
checkpoint_output_str = 'stylize_examples/output_checkpoint/4_iter_%s.jpg'
output_str = 'stylize_examples/output/4_result.jpg'
os.system('python neural_style.py --content=%s --styles %s --learning-rate=%f '
'--iterations=%d --checkpoint-output=%s --output=%s --width=%d --height=%d --style-weight=%f --use_mrf'
%(content, ' '.join(styles), learning_rate, iterations, checkpoint_output_str, output_str, width, height, style_weight))
# The fifth example: use mrf loss with semantic masks.
# The content weight, style weight, and semantic masks weight became a little bit different from the paper due to
# different ways of implementation (they had only three channels while I support any number of channels),
# but it worked...
content = 'stylize_examples/5-content.jpg'
styles = ['stylize_examples/5-style.jpg']
learning_rate = 10.0
iterations = 1000
width = 512
height = 512
content_weight = 10.0
style_weight = 30.0
output_semantic_mask = 'stylize_examples/semantic_masks/Freddie_sem_masks/'
style_semantic_masks = ['stylize_examples/semantic_masks/Mia_sem_masks/']
semantic_masks_num_layers = 10
semantic_masks_weight = 3000.0
checkpoint_output_str = 'stylize_examples/output_checkpoint/5_iter_%s.jpg'
output_str = 'stylize_examples/output/5_result.jpg'
os.system('python neural_style.py --content=%s --styles %s --learning-rate=%f '
'--iterations=%d --checkpoint-output=%s --output=%s --width=%d --height=%d --content-weight=%f --style-weight=%f --use_mrf --use_semantic_masks --output_semantic_mask=%s --style_semantic_masks %s --semantic_masks_num_layers=%d --semantic_masks_weight=%f'
%(content, ' '.join(styles), learning_rate, iterations, checkpoint_output_str, output_str, width, height, content_weight, style_weight, output_semantic_mask, ' '.join(style_semantic_masks), semantic_masks_num_layers, semantic_masks_weight))
# The sixth example: use the 'content_img_style_weight_mask" to control the degree of stylization for each pixel.
content = 'stylize_examples/6-content.jpg'
styles = ['stylize_examples/1-style.jpg']
learning_rate = 10.0
iterations = 1000
width = 712
height = 474
content_img_style_weight_mask = 'stylize_examples/6-mask.jpg'
checkpoint_output_str = 'stylize_examples/output_checkpoint/6_iter_%s.jpg'
output_str = 'stylize_examples/output/6_result.jpg'
os.system('python neural_style.py --content=%s --styles %s --learning-rate=%f '
'--iterations=%d --checkpoint-output=%s --output=%s --width=%d --height=%d --content_img_style_weight_mask=%s'
%(content, ' '.join(styles), learning_rate, iterations, checkpoint_output_str, output_str, width, height, content_img_style_weight_mask))
|
[
"jerrylijiaming@gmail.com"
] |
jerrylijiaming@gmail.com
|
09aa66f1a4fca3aee8b591e74eef0c4a89bf44e4
|
65ea7da5b6673ce453066b2e2d8c3bcb3d3c9b27
|
/config.py
|
2d81b4733df2385399efa73bdb8d68b1413c3a17
|
[] |
no_license
|
pramuditorh/i-man
|
980dc5076862fb3ba99eb28824de5571573539a1
|
1ab40e98f5ed125705257fb7b63a74ad31e4da77
|
refs/heads/master
| 2021-02-09T12:16:55.984807
| 2020-03-04T11:39:40
| 2020-03-04T11:39:40
| 244,281,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
import os
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
[
"pramuditorh@gmail.com"
] |
pramuditorh@gmail.com
|
8f6281ac85780bc3cec088d17f9a584e9ed80bcf
|
33b424627a5b4d7f7cc045043682b6e1955bbfc7
|
/snp2sim_analysis/trajectoryAnalysis/regenerate_structs.py
|
5f354bfaf1b3de4372ea5a8150a6afc74db84582
|
[] |
no_license
|
mccoymd/snp2sim
|
e24b198fa192d1eec5e4ed539d6fc88a66e1e236
|
f5a48b46a829cfc7c8de0d7528d9875cee9c7dd5
|
refs/heads/master
| 2021-08-09T01:02:20.340214
| 2021-06-16T20:43:57
| 2021-06-16T20:43:57
| 137,887,506
| 2
| 4
| null | 2019-09-11T13:57:22
| 2018-06-19T12:19:08
|
Python
|
UTF-8
|
Python
| false
| false
| 703
|
py
|
import os
import sys
variants = []
vmd = "\"/Applications/VMD 1.9.3.app/Contents/Resources/VMD.app/Contents/MacOS/VMD\""
for file in os.listdir(sys.argv[1]):
if file != "analysis" and not file.startswith("."):
variants.append(file)
for v in variants:
os.system("python /Users/vikram/Documents/georgetown/summer_2019/snp2sim/snp2sim.py \
--runDIR /Users/vikram/Documents/georgetown/summer_2019/run_workflow/snp2simresults \
--protein PDL1 --varAA %s --varResID %s --genStructures --VMDpath /Applications/VMD_1.9.3.app/Contents/Resources/VMD.app/Contents/MacOS/VMD \
--mode varMDsim \
--newStruct /Users/vikram/Documents/georgetown/summer_2019/snp2sim/example/PDL1.Vtype.pdb" %(v[-1], v[:-1]))
|
[
"vshiv@berkeley.edu"
] |
vshiv@berkeley.edu
|
c0ff6baf8808cc36e46893b22eb24a2f76b8e3b5
|
b24302829278afbc8d95fa6e70aa11fd9ff983b6
|
/32/test_inventory.py
|
5c047e5f9ce3141ee6d140a5398639781bdb42fe
|
[] |
no_license
|
boraxpr/bitesofpy
|
1e01351021153dfe8a69b958593e90c6438f84b0
|
ff0176e029ddbc6469ecf79ea9fc1c3ff284c2e5
|
refs/heads/master
| 2023-05-10T22:27:01.093996
| 2023-05-07T11:59:52
| 2023-05-07T11:59:52
| 215,345,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
from inventory import items, duplicate_items
def test_change_copy_only():
items_copy = duplicate_items(items)
assert items == items_copy
# modify the copy
items_copy[0]['name'] = 'macbook'
items_copy[1]['id'] = 4
items_copy[2]['value'] = 30
# only copy should have been updated, check original items values
assert items[0]['name'] == 'laptop'
assert items[1]['id'] == 2
assert items[2]['value'] == 20
test_change_copy_only()
|
[
"naipawat.poo@student.mahidol.ac.th"
] |
naipawat.poo@student.mahidol.ac.th
|
654705c73e1b9d0cf21c5d9f68039be32a512999
|
6b915ee382f9b7e0b301315703f12e07fc1428a7
|
/src/geneviz/tracks/data.py
|
bf3e5d53298c44942926dcc35990cd02213b2413
|
[
"MIT"
] |
permissive
|
asalt/geneviz
|
44d0b714e6a616678905f777180c0f8c8bea9246
|
fe217080721c8a597d4b1fe2d82920105e95ee24
|
refs/heads/master
| 2020-03-29T07:30:49.061053
| 2017-05-10T18:01:41
| 2017-05-10T18:01:41
| 149,667,991
| 0
| 0
|
MIT
| 2018-09-20T20:39:56
| 2018-09-20T20:39:55
| null |
UTF-8
|
Python
| false
| false
| 3,093
|
py
|
# # pylint: disable=W0622,W0614,W0401
# from __future__ import absolute_import, division, print_function
# from builtins import *
# # pylint: enable=W0622,W0614,W0401
# import itertools
# import seaborn as sns
# import toolz
# from geneviz.tracks import Track
# class DataTrack(Track):
# _default_kws = dict(marker='o')
# def __init__(self,
# data,
# y=None,
# hue=None,
# palette=None,
# height=1,
# legend_kws=None,
# plot_kws=None):
# super().__init__(height=height)
# y = y or 'y'
# palette = palette or sns.color_palette()
# self._data = self._preprocess_data(data, y, hue, palette)
# self._legend_kws = legend_kws or {}
# self._plot_kws = toolz.merge(self._default_kws, plot_kws or {})
# @staticmethod
# def _preprocess_data(data, y, hue, palette):
# plot_data = data[['seqname', 'position']].copy()
# plot_data['y'] = data[y]
# if hue is not None:
# if not isinstance(palette, dict):
# palette = dict(
# zip(data[hue].unique(), itertools.cycle(palette)))
# plot_data['hue'] = data[hue]
# plot_data['color'] = data[hue].map(palette)
# return plot_data
# def draw(self, ax, seqname, start, end):
# # Subset data for range.
# data = self._data.query(('seqname == {!r} and '
# '{} <= position <= {}')
# .format(seqname, start, end))
# # Gather plotting kwargs.
# if 'hue' in data and 'color' in data:
# for (hue, color), grp in data.groupby(['hue', 'color']):
# grp = grp.sort_values(by='position')
# ax.plot(
# grp['position'],
# grp['y'],
# '.',
# label=hue,
# color=color,
# **self._plot_kws)
# ax.legend(**self._legend_kws)
# else:
# data = data.sort_values(by='position')
# ax.plot(data['position'], data['y'], **self._plot_kws)
# class BoxplotTrack(Track):
# # TODO: Seaborn styling?
# def __init__(self, data, seqname='seqname', x='x', y='y',
# height=1, **kwargs):
# super().__init__(height=height)
# self._data = data
# self._seqname = seqname
# self._x = x
# self._y = y
# self._kwargs = kwargs
# def draw(self, ax, seqname, start, end):
# # Subset data for range.
# mask = ((self._data[self._seqname] == seqname) &
# (self._data[self._x] >= start) &
# (self._data[self._x] <= end))
# data = self._data[mask]
# grouped = ((pos, grp[self._y].values)
# for pos, grp in data.groupby(self._x))
# positions, values = zip(*grouped)
# ax.boxplot(values, positions=positions, **self._kwargs)
|
[
"julianderuiter@gmail.com"
] |
julianderuiter@gmail.com
|
a6b1c9a03945b18be055d66eae19ce38b7c8e2f4
|
592e4c0c68b699db6d27a5c279efbb86b0c670e8
|
/edittodo_project/urls.py
|
2e96344340838ac9568bc7fa31cd87648c268f8e
|
[] |
no_license
|
LawrenceGao0224/Django_todolist
|
49317e034f3646b9a4aef7cd96613606a2184690
|
3707dce632a0bfa250e633f74c41fa3f125b1549
|
refs/heads/main
| 2023-04-10T04:58:06.050149
| 2021-04-21T04:01:29
| 2021-04-21T04:01:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
"""edittodo_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from hello.views import myView
from todo.views import todoview, addTodo, deleteTodo
urlpatterns = [
path('admin/', admin.site.urls),
path('hello/', myView),
path('todo/', todoview),
path('addTodo/', addTodo),
path('deleteTodo/<int:todo_id>/', deleteTodo),
]
|
[
"bightt53523@gmail.com"
] |
bightt53523@gmail.com
|
0b96170da6867728c3cc90e8b465f05174ef45d9
|
d9892e27470b303254c71c526717480c40c4baf0
|
/meiduo_mall/meiduo_mall/utils/models.py
|
ff469b7e0d1972f01d4a45a1623b15c8a9792b67
|
[] |
no_license
|
zhenMoses/meiduo_dj
|
d6f14d1aafaa5ee476f8f5763c2e7338d784a165
|
32b1a721f5ed433b1446054b4a0025849b5b65af
|
refs/heads/master
| 2020-04-17T07:07:48.523432
| 2019-02-18T11:10:23
| 2019-02-18T11:10:23
| 166,355,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
from django.db import models
class BaseModel(models.Model):
"""模型基类"""
create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
update_time = models.DateTimeField(verbose_name='更新时间', auto_now=True)
class Meta:
abstract = True # 表示此模型是一个抽象模型,将来迁移建表时,不会对它做迁移建表动作,它只用当其它模型的基类
|
[
"root@DESKTOP-6I0PKOQ.localdomain"
] |
root@DESKTOP-6I0PKOQ.localdomain
|
a8d3a0bbf3a7644dcfbf3797cb61e2b1791a10c3
|
56f8b95b75d3ea9e8a316ef43eaa61e5376feaec
|
/vocab.py
|
d91c147f200c8ca2e1ff4acf47a6231703baa808
|
[] |
no_license
|
p3i0t/TreeLSTM
|
447ee8e8bf037275cd51982a7598aa97bc4c1dce
|
1e0962fe46c0f50f2c42e8aba0e139bf826b622d
|
refs/heads/master
| 2022-03-08T01:44:30.238286
| 2017-11-28T08:05:29
| 2017-11-28T08:05:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,498
|
py
|
# vocab object from harvardnlp/opennmt-py
class Vocab(object):
def __init__(self, filename=None, data=None, lower=False):
self.idxToLabel = {}
self.labelToIdx = {}
self.lower = lower
# Special entries will not be pruned.
self.special = []
if data is not None:
self.addSpecials(data)
if filename is not None:
self.loadFile(filename)
def size(self):
return len(self.idxToLabel)
# Load entries from a file.
def loadFile(self, filename):
idx = 0
for line in open(filename):
token = line.rstrip('\n')
self.add(token)
idx += 1
def getIndex(self, key, default=None):
key = key.lower() if self.lower else key
try:
return self.labelToIdx[key]
except KeyError:
return default
def getLabel(self, idx, default=None):
try:
return self.idxToLabel[idx]
except KeyError:
return default
# Mark this `label` and `idx` as special
def addSpecial(self, label, idx=None):
idx = self.add(label)
self.special += [idx]
# Mark all labels in `labels` as specials
def addSpecials(self, labels):
for label in labels:
self.addSpecial(label)
# Add `label` in the dictionary. Use `idx` as its index if given.
def add(self, label):
label = label.lower() if self.lower else label
if label in self.labelToIdx:
idx = self.labelToIdx[label]
else:
idx = len(self.idxToLabel)
self.idxToLabel[idx] = label
self.labelToIdx[label] = idx
return idx
# Convert `labels` to indices. Use `unkWord` if not found.
# Optionally insert `bosWord` at the beginning and `eosWord` at the .
def convertToIdx(self, labels, unkWord, bosWord=None, eosWord=None):
vec = []
if bosWord is not None:
vec += [self.getIndex(bosWord)]
unk = self.getIndex(unkWord)
vec += [self.getIndex(label, default=unk) for label in labels]
if eosWord is not None:
vec += [self.getIndex(eosWord)]
return vec
# Convert `idx` to labels. If index `stop` is reached, convert it and return.
def convertToLabels(self, idx, stop):
labels = []
for i in idx:
labels += [self.getLabel(i)]
if i == stop:
break
return labels
|
[
"1075837750@qq.com"
] |
1075837750@qq.com
|
f95d00e357e663887c6fdebd6528e2754a4d7a64
|
43b59d7a8cea896a84eb0a658cda3601e30e24e5
|
/9021/quiz_finish/quiz_3.py
|
606990d4dfac803ab14516abd5ddd0cbc311f3bc
|
[] |
no_license
|
VigoWong/UNSW_summary
|
71bc5754dd19b965eeffabb4f320b5de9c88d118
|
bafe369f984e6518c8270371c1cc8cd9103c7432
|
refs/heads/master
| 2022-07-07T12:49:56.156607
| 2020-05-10T08:27:18
| 2020-05-10T08:27:18
| 255,821,689
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
# COMP9021 19T3 - Rachid Hamadi
# Quiz 3 *** Due Thursday Week 4
# Reading the number written in base 8 from right to left,
# keeping the leading 0's, if any:
# 0: move N 1: move NE 2: move E 3: move SE
# 4: move S 5: move SW 6: move W 7: move NW
#
# We start from a position that is the unique position
# where the switch is on.
#
# Moving to a position switches on to off, off to on there.
from copy import deepcopy
import sys
on = '\u26aa'
off = '\u26ab'
code = input('Enter a non-strictly negative integer: ').strip()
try:
if code[0] == '-':
raise ValueError
int(code)
except ValueError:
print('Incorrect input, giving up.')
sys.exit()
nb_of_leading_zeroes = 0
for i in range(len(code) - 1):
if code[i] == '0':
nb_of_leading_zeroes += 1
else:
break
print("Keeping leading 0's, if any, in base 8,", code, 'reads as',
'0' * nb_of_leading_zeroes + f'{int(code):o}.'
)
print()
# INSERT YOUR CODE HERE
# 1. convert the code to octonary number and stored it with a list
oct_code = list('0' * nb_of_leading_zeroes + f'{int(code):o}')
# 2. build a map by model the move of command
pos = [0, 0]
pos_ls = [[0, 0]]
for i in range(1, len(oct_code) + 1):
if oct_code[-i] == '0':
pos[1] += 1
elif oct_code[-i] == '1':
pos[0] += 1
pos[1] += 1
elif oct_code[-i] == '2':
pos[0] += 1
elif oct_code[-i] == '3':
pos[0] += 1
pos[1] -= 1
elif oct_code[-i] == '4':
pos[1] -= 1
elif oct_code[-i] == '5':
pos[0] -= 1
pos[1] -= 1
elif oct_code[-i] == '6':
pos[0] -= 1
elif oct_code[-i] == '7':
pos[0] -= 1
pos[1] += 1
pos_ls.append(deepcopy(pos))
# 2.1 calculate the width and height of the map
x_ls, y_ls = zip(*pos_ls)
width = max(x_ls) - min(x_ls) + 1
height = max(y_ls) - min(y_ls) + 1
map = [[off for i in range(width)] for k in range(height)]
# 3.confirm the poses that are white or black
# 3.1 move the coordinate to match the natural two-dimension array
if min(x_ls) < 0:
minus = min(x_ls)
x_ls = [x - minus for x in x_ls]
if max(y_ls) > 0:
plus = max(y_ls)
y_ls = [y - plus for y in y_ls]
pos_ls = list(zip(x_ls, y_ls))
#
for pos in pos_ls:
if pos_ls.count(pos) % 2 != 0:
map[-pos[1]][pos[0]] = on
else:
map[-pos[1]][pos[0]] = off
# remove rows that contain only "off"
start = 0
while start <= len(map)-1 and on not in map[start]:
map[start].clear()
start += 1
last = len(map) - 1
while last >= 0 and on not in map[last]:
map[last].clear()
last -= 1
# remove first several columns that contain only "off"
while True:
column = []
for i in range(height):
if map[i]:
column.append(map[i][0])
for i in range(height):
if on not in column and map[i]:
map[i].pop(0)
else:
break
# remove latter several columns that contain only "off"
while True:
column = []
for i in range(height):
if map[i]:
column.append(map[i][-1])
for i in range(height):
if on not in column and map[i]:
map[i].pop(-1)
else:
break
for i in map:
if i:
for l in i:
print(l, end='')
print()
|
[
"z5247672@ad.unsw.edu.au"
] |
z5247672@ad.unsw.edu.au
|
e9dc572135ea4ef8d42a863b163b9987a11db5d3
|
123d2df6d9d1e655ff72e1cd17ffb50a72dc7e3a
|
/Tic Tac Toe.py
|
d71f353f17347b7780de8f9bcc748ae0a0cfc58c
|
[] |
no_license
|
PrabhjitDutta/Tic_Tac_Toe
|
6ba6015283efac9dff138e247d277a8b4a2d50dd
|
43a6f484e54635dbd5cff8689a856d60f508d54e
|
refs/heads/master
| 2020-03-15T12:04:48.202760
| 2018-05-04T12:20:33
| 2018-05-04T12:20:33
| 132,135,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,989
|
py
|
def displayTable(l):
print(" {x} | {y} | {z}".format(x=l[0][0], y=l[0][1], z=l[0][2]))
print(" {x} | {y} | {z}".format(x=l[1][0], y=l[1][1], z=l[1][2]))
print(" {x} | {y} | {z}".format(x=l[2][0], y=l[2][1], z=l[2][2]))
def askinp():
while True:
try:
c = int(input())
except:
print("You made a mistake!\nTRY AGAIN")
continue
else:
return c
def inp():
print("Enter the Name of the first player:")
x = input()
print("Enter the Name of the Second player:")
y = input()
print("Hello {a} and {b},\nThis is your Tic-Tac-Toe Table.\n".format(a=x, b=y))
return x, y
def TableModX(c, l):
i = 0
j = 0
count = 0
while i < 3:
while j < 3:
if l[i][j] == c:
l[i][j] = 'X'
count += 1
break
else:
pass
j += 1
i += 1
j = 0
i += 1
if count == 1:
return l
else:
print("This Position is already taken\nTRY AGAIN")
return -1
def TableModO(c, l):
i = 0
j = 0
count = 0
while i < 3:
while j < 3:
if l[i][j] == c:
l[i][j] = 'O'
count += 1
break
else:
pass
j += 1
i += 1
j = 0
i += 1
if count == 1:
return l
else:
print("This Position is already taken\nTRY AGAIN")
return -1
def TableCheck(l):
i = 0
j = 0
while i < 3:
if l[i][j] == l[i][j+1] and l[i][j] == l[i][j+2]:
return 1
else:
pass
i += 1
i = 0
while i < 3:
if l[j][i] == l[j+1][i] and l[j][i] == l[j+2][i]:
return 1
else:
pass
i += 1
i = 0
if l[i][j] == l[i+1][j+1] and l[i][j] == l[i+2][j+2]:
return 1
elif l[i][j+2] == l[i+1][j+1] and l[i][j+2] == l[i+2][j]:
return 1
else:
return -1
while True:
l = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
tempL = l
x, y = inp()
displayTable(l)
i = 0
while i < 9:
print("\n{a}, please enter the first position of your choice with the help of its corresponding number:".format(a=x))
while True:
c = askinp()
if c not in range(0,10):
print("It's not a Valid Number\nTRY AGAIN")
continue
else:
break
l = TableModX(c, l)
if l == -1:
l = tempL
c = askinp()
l = TableModX(c, l)
else:
pass
l = tempL
displayTable(l)
z = TableCheck(l)
if z == 1:
print("\n\n{a} is the winner congrats".format(a=x))
break
i += 1
if i == 9:
print("Its a Draw")
break
print("\n{a}, please enter the first position of your choice with the help of its corresponding number:".format(a=y))
while True:
c = askinp()
if c not in range(0,10):
print("It's not a Valid Number\nTRY AGAIN")
continue
else:
break
l = TableModO(c, l)
if l == -1:
l = tempL
c = askinp()
l = TableModO(c, l)
else:
pass
l = tempL
displayTable(l)
z = TableCheck(l)
if z == 1:
print("\n\n{a} is the winner congrats".format(a=y))
break
i += 1
print("\n\nWould You like to play Again\nPress y for Yes and n for No")
h = input()
if h == 'y':
continue
else:
quit()
|
[
"duttaprabhjit6@gmail.com"
] |
duttaprabhjit6@gmail.com
|
1c4504c44308a745d32f5b358d7c5f292421063f
|
e871c676bdea8fa70c5024090ff90dc07068c549
|
/app.py
|
37e0336abca166a506c83b6bdfe724c32cc6fd2d
|
[
"MIT"
] |
permissive
|
vishcomestrue/Restaurant-Review-Classifier-Using-NLP-
|
72907ad8b8d977c9a4020e404109592768c27292
|
92b8df361f23209cfd93b70e4c118bf20364dccb
|
refs/heads/main
| 2023-08-25T20:45:16.185900
| 2021-10-17T05:48:17
| 2021-10-17T05:48:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
from flask import Flask,render_template,url_for,request
import pandas as pd
import pickle
clf = pickle.load(open('restaurant_nlp.pkl', 'rb'))
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/predict',methods=['POST'])
def predict():
if request.method == 'POST':
message = request.form['message']
data = [message]
#vect = cv.transform(data).toarray()
my_prediction = clf.predict(data)
return render_template('result.html',prediction = my_prediction)
if __name__ == '__main__':
app.run(debug=True)
|
[
"noreply@github.com"
] |
vishcomestrue.noreply@github.com
|
26322a2feb6508fe67e3f7a9fea395c1a1a4979d
|
48f68936bacabf35193d6765f91bab49471ffaf5
|
/myapp/migrations/0011_auto_20210211_2222.py
|
e795b10f743ca8b7dcb26145a36b6fb559b26959
|
[] |
no_license
|
Ekkasit1998/django-ecom
|
21a51b3c30e9f8e79fbbf2f5293a0f99aa4681f2
|
6af8ccc4172f260f0e658ba40acac3a474e5bf55
|
refs/heads/main
| 2023-05-15T09:32:08.100240
| 2021-05-31T20:24:53
| 2021-05-31T20:24:53
| 372,594,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
# Generated by Django 3.0 on 2021-02-11 22:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0010_auto_20210205_1502'),
]
operations = [
migrations.AddField(
model_name='orderpending',
name='silptime',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='orderpending',
name='silp',
field=models.ImageField(blank=True, null=True, upload_to='silp'),
),
]
|
[
"ekkasitpon1998@gmail.com"
] |
ekkasitpon1998@gmail.com
|
71409e1515a5187e496afd7370184a782b494726
|
ed0f8b22c2b37d8066178d79a74623077a4afdce
|
/crslab/data/dataloader/redial.py
|
6cd128902bcdd8e82628d7d49e0d1bf15731ce0f
|
[
"MIT"
] |
permissive
|
VanChengkai/CRSLab
|
5f1c4a0a7f14be34013d4c8296350f0662066f1d
|
8b69dcbb8f8568244bb25f29f89bcd227e80d69c
|
refs/heads/main
| 2023-04-15T04:36:05.120971
| 2021-04-25T16:16:17
| 2021-04-25T16:16:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,299
|
py
|
# @Time : 2020/11/22
# @Author : Chenzhan Shang
# @Email : czshang@outlook.com
# UPDATE:
# @Time : 2020/12/16
# @Author : Xiaolei Wang
# @Email : wxl1999@foxmail.com
import re
from copy import copy
import torch
from tqdm import tqdm
from crslab.data.dataloader.base import BaseDataLoader
from crslab.data.dataloader.utils import padded_tensor, get_onehot, truncate
movie_pattern = re.compile(r'^@\d{5,6}$')
class ReDialDataLoader(BaseDataLoader):
"""Dataloader for model ReDial.
Notes:
You can set the following parameters in config:
- ``'utterance_truncate'``: the maximum length of a single utterance.
- ``'conversation_truncate'``: the maximum length of the whole conversation.
The following values must be specified in ``vocab``:
- ``'pad'``
- ``'start'``
- ``'end'``
- ``'unk'``
the above values specify the id of needed special token.
- ``'ind2tok'``: map from index to token.
- ``'n_entity'``: number of entities in the entity KG of dataset.
- ``'vocab_size'``: size of vocab.
"""
def __init__(self, opt, dataset, vocab):
"""
Args:
opt (Config or dict): config for dataloader or the whole system.
dataset: data for model.
vocab (dict): all kinds of useful size, idx and map between token and idx.
"""
super().__init__(opt, dataset)
self.ind2tok = vocab['ind2tok']
self.n_entity = vocab['n_entity']
self.pad_token_idx = vocab['pad']
self.start_token_idx = vocab['start']
self.end_token_idx = vocab['end']
self.unk_token_idx = vocab['unk']
self.item_token_idx = vocab['vocab_size']
self.conversation_truncate = self.opt.get('conversation_truncate', None)
self.utterance_truncate = self.opt.get('utterance_truncate', None)
def rec_process_fn(self, *args, **kwargs):
dataset = []
for conversation in self.dataset:
if conversation['role'] == 'Recommender':
for item in conversation['items']:
context_entities = conversation['context_entities']
dataset.append({'context_entities': context_entities, 'item': item})
return dataset
def rec_batchify(self, batch):
batch_context_entities = []
batch_item = []
for conversation in batch:
batch_context_entities.append(conversation['context_entities'])
batch_item.append(conversation['item'])
context_entities = get_onehot(batch_context_entities, self.n_entity)
return {'context_entities': context_entities, 'item': torch.tensor(batch_item, dtype=torch.long)}
def conv_process_fn(self):
dataset = []
for conversation in tqdm(self.dataset):
if conversation['role'] != 'Recommender':
continue
context_tokens = [truncate(utterance, self.utterance_truncate, truncate_tail=True) for utterance in
conversation['context_tokens']]
context_tokens = truncate(context_tokens, self.conversation_truncate, truncate_tail=True)
context_length = len(context_tokens)
utterance_lengths = [len(utterance) for utterance in context_tokens]
request = context_tokens[-1]
response = truncate(conversation['response'], self.utterance_truncate, truncate_tail=True)
dataset.append({'context_tokens': context_tokens, 'context_length': context_length,
'utterance_lengths': utterance_lengths, 'request': request, 'response': response})
return dataset
def conv_batchify(self, batch):
max_utterance_length = max([max(conversation['utterance_lengths']) for conversation in batch])
max_response_length = max([len(conversation['response']) for conversation in batch])
max_utterance_length = max(max_utterance_length, max_response_length)
max_context_length = max([conversation['context_length'] for conversation in batch])
batch_context = []
batch_context_length = []
batch_utterance_lengths = []
batch_request = [] # tensor
batch_request_length = []
batch_response = []
for conversation in batch:
padded_context = padded_tensor(conversation['context_tokens'], pad_idx=self.pad_token_idx,
pad_tail=True, max_len=max_utterance_length)
if len(conversation['context_tokens']) < max_context_length:
pad_tensor = padded_context.new_full(
(max_context_length - len(conversation['context_tokens']), max_utterance_length), self.pad_token_idx
)
padded_context = torch.cat((padded_context, pad_tensor), 0)
batch_context.append(padded_context)
batch_context_length.append(conversation['context_length'])
batch_utterance_lengths.append(conversation['utterance_lengths'] +
[0] * (max_context_length - len(conversation['context_tokens'])))
request = conversation['request']
batch_request_length.append(len(request))
batch_request.append(request)
response = copy(conversation['response'])
# replace '^\d{5,6}$' by '__item__'
for i in range(len(response)):
if movie_pattern.match(self.ind2tok[response[i]]):
response[i] = self.item_token_idx
batch_response.append(response)
context = torch.stack(batch_context, dim=0)
request = padded_tensor(batch_request, self.pad_token_idx, pad_tail=True, max_len=max_utterance_length)
response = padded_tensor(batch_response, self.pad_token_idx, pad_tail=True,
max_len=max_utterance_length) # (bs, utt_len)
return {'context': context, 'context_lengths': torch.tensor(batch_context_length),
'utterance_lengths': torch.tensor(batch_utterance_lengths), 'request': request,
'request_lengths': torch.tensor(batch_request_length), 'response': response}
def policy_batchify(self, batch):
pass
|
[
"wxl1999@foxmail.com"
] |
wxl1999@foxmail.com
|
64d7089ac1740663367e9a615c9d00c9e0f7f813
|
db0b01788ed76e5b3c387361916891c81c36689f
|
/Instruction.py
|
4ec6dff3caecedd9c273512ecc942e2737ee22cd
|
[] |
no_license
|
kushalmehta13/MIPS-CPU-Simulator
|
83ef071252a69aa8577719605348577ad91b0b94
|
31da72febb776d78dda75cb71e294b5ec60095d8
|
refs/heads/master
| 2020-08-15T18:46:47.618575
| 2019-11-16T18:53:50
| 2019-11-16T18:53:50
| 215,390,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
class Instruction:
def __init__(self, inst):
self.inst = inst
self.instType = ''
self.op1 = ''
self.op2
self.op3 = ''
self.offset = 0
def setOp1(self, op1):
self.op1 = op1
def setOp2(self, op2):
self.op2 = op2
def setOp3(self, op3):
self.op3 = op3
def setOffset(self, offset):
self.offset = offset
def setInstType(self, instType):
self.instType = instType
def getOp1(self, op1):
return self.op1
def getOp2(self, op2):
return self.op2
def getOp3(self, op3):
return self.op3
def getOffset(self, offset):
return self.offset
def getInstType(self, instType):
return self.instType
|
[
"kushalmehta13@gmail.com"
] |
kushalmehta13@gmail.com
|
71c7390bbd5f6de3c2210e3b57a6826df6b81282
|
ff5f3e43acb6dd26b63d386d1e90b4b3795b940e
|
/data/scam_pagerank2.py
|
106a0d2e2b867ced0d27a652e9bd453af52ceea4
|
[] |
no_license
|
manugoyal/wikipedia-pagerank
|
de7eb41042194bbc59fd7e6e335644fa556d0fd5
|
ca82aab92cd3dc9d497d16a6fb7e2b16f416b420
|
refs/heads/master
| 2021-01-10T07:53:55.095560
| 2015-10-22T15:09:01
| 2015-10-22T15:09:01
| 44,555,631
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,612
|
py
|
# Tool that suggests ways to artificially inflate your pagerank, without the
# restriction that you can only edit public pages.
import data_utils
import pagerank_utils
import copy
import numpy as np
def get_clean_data():
"""Returns the true pageranks, backlinks, outlinks_count, etc for wikipedia
as a map. This dict should be passed around unmodified to the tool"""
pageranks = data_utils.get_pageranks()
backlinks = data_utils.get_backlinks()
editable_pages = backlinks.keys() # This is all the pages
return {
# Mapping from id to raw pagerank
'pageranks': pageranks,
# Mapping from id to normalized pagerank
'normalized_pageranks': data_utils.normalized_pageranks(pageranks),
# Mapping from id to list of backlinks
'backlinks': backlinks,
# Mapping from id to number of outlinks
'outlinks_count': data_utils.get_outlinks_count(),
# List of pages we can edit
'editable_pages': editable_pages
}
def get_current_page_rank(data, page_id):
return data['normalized_pageranks'][page_id]
def inflate_page_rank(data, page_id, amount):
"""Try to increase the normalized page rank roughly to the given amount.
Returns the amount we predict the increase to be, and the list of pages to
edit to make it happen."""
inflation_factor = float(amount) / data['normalized_pageranks'][page_id]
raw_target_pagerank = data['pageranks'][page_id] * inflation_factor
# By adding a link from a page with rank r and number of outbound links k,
# we'll get roughly a page rank boost of d*(r/(k+1)), where d is the damping
# factor.
pagerank_boost = lambda id: (pagerank_utils.DAMPING *
data['pageranks'][id] /
(data['outlinks_count'][id] + 1))
raw_pagerank_delta = raw_target_pagerank - data['pageranks'][page_id]
# Sort the entire set of pages by approximately how much page rank they
# could lend to us
editable_pages = sorted(data['editable_pages'],
key=pagerank_boost, reverse=True)
# Go down the list of editable pages, and keep adding pages to the list
# until we hit our target. Note that we can't add pages that already have a
# link to our page, because in general, wikipedia doesn't duplicate links
# between pages
total_boost = 0
pages_to_edit = []
for id in editable_pages:
if total_boost > raw_pagerank_delta:
break
if id in data['backlinks'][page_id]:
continue
total_boost += pagerank_boost(id)
pages_to_edit.append(id)
normalized_boost = (total_boost / data['pageranks'][page_id] *
data['normalized_pageranks'][page_id])
return normalized_boost, pages_to_edit
def evaluate_scam(data, page_id, pages_to_edit):
"""Given a clean data set, a page to inflate the rank of, and a list of
pages to add links to the target page, return the new normalized pagerank of
the page"""
new_backlinks = copy.copy(data['backlinks'])
new_outlinks_count = copy.copy(data['outlinks_count'])
# Link the pages_to_edit to the page_id
for id in pages_to_edit:
new_backlinks[page_id] = np.append(new_backlinks[page_id], id)
new_outlinks_count[id] += 1
# Run pagerank on the new dataset
new_pageranks = pagerank_utils.converge_pageranks(
data['pageranks'], new_backlinks, new_outlinks_count)
# Return the new normalized pagerank
return new_pageranks[page_id] / min(new_pageranks.itervalues())
|
[
"manu.goyal2013@gmail.com"
] |
manu.goyal2013@gmail.com
|
87e241ebd07b7b4b41c2e8bc0b2e3e0de0383382
|
340fd9a6345a1fd6ea5c494fd1e25ef1a15cd83d
|
/my_blog/celery_tasks/email/tasks.py
|
e1f12962f31abad01edce72576156599fb1c4f27
|
[] |
no_license
|
ltfred/blog
|
48024992922d570db7042d56855f6c28b7c581f8
|
9e7c9253a4576cbc443edea8fdebed92733d6f9f
|
refs/heads/master
| 2020-07-26T15:24:24.215702
| 2019-09-23T02:18:55
| 2019-09-23T02:18:55
| 208,689,787
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
from django.core.mail import send_mail
from django.conf import settings
from celery_tasks.main import celery_app
@celery_app.task(bind=True, name='send_reset_email', retry_backoff=3)
def send_reset_mail(self, to_email, password):
# 标题
subject = '重置密码邮件'
# 内容
html_message = '感谢您使用Fred的个人网站,您的密码重置为:{},请登录后修改密码'.format(password)
try:
send_mail(subject, '', settings.EMAIL_FROM, [to_email], html_message=html_message)
except Exception as e:
# 有异常自动重试三次
raise self.retry(exc=e, max_retries=3)
|
[
"ltfred@163.com"
] |
ltfred@163.com
|
5477872da5a91004aec0826004a9730fb5d9934b
|
d0c82d7665c3b676b56097cfa944abe18f709c7e
|
/Python相关/捷联惯导相关/work2/read.py
|
61e46bb888a933564ad4b7225730fc10adcd00e0
|
[] |
no_license
|
LRoel/collection
|
78e7d83e942cfa305105b9584712a9977d0334e9
|
2890ac462c68e3b0f7bf8f30b208bbdc458521ba
|
refs/heads/master
| 2021-05-16T12:29:21.102179
| 2017-05-11T12:56:26
| 2017-05-11T12:56:26
| 105,291,256
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,272
|
py
|
#!/usr/bin/env python
# import rospy
import serial
import sys
import string
import operator
import numpy as np
import math
import matplotlib.pyplot as plt
# from sensor_msgs.msg import NavSatFix
# #print("import Nav")
# from work1.msg import comb
# #print("import comb")
glv_Re = 6378137
glv_f = 1 / 298.25
glv_e = math.sqrt(2 * glv_f - glv_f ** 2)
glv_e2 = glv_e ** 2
glv_Rp = (1 - glv_f) * glv_Re
glv_ep = math.sqrt(glv_Re ** 2 + glv_Rp ** 2) / glv_Rp
glv_ep2 = glv_ep ** 2
glv_wie = 7.2921151467e-5
glv_g0 = 9.7803267714
glv_mg = 1.0e-3 * glv_g0
glv_ug = 1.0e-6 * glv_g0
n = 0
f1 = open('/home/exbot/imu_data.txt','a')
f2 = open('/home/exbot/123.txt','r')
def Qnorm(Q_con):
Q_norm = np.linalg.norm(Q_con)
return Q_con / Q_norm
def text_data():
line = f2.readline()
if line:
words = string.split(line, ",")
GyroX = string.atof(words[0])
GyroY = string.atof(words[1])
GyroZ = string.atof(words[2])
AccX = string.atof(words[3])
AccY = string.atof(words[4])
AccZ = string.atof(words[5])
print (GyroX, GyroY, GyroZ, AccX, AccY, AccZ)
return (GyroX, GyroY, GyroZ, AccX, AccY, AccZ)
else:
return (10000,0,0,0,0,0)
def raw_data():
global n
while 1:
if ser.read() != '$':
continue
line0 = ser.readline()
cs = line0[-4:-2]
cs1 = int(cs, 16)
cs2 = checksum(line0)
# print("cs1,cs2", cs1, cs2)
if cs1 != cs2:
continue
line = line0.replace("GTIMU,", "")
line = line.replace("\r\n", "")
# if "\x00" in line:
# continue
if not string.find('*', line):
continue
line = line.replace("*", ",")
words = string.split(line, ",")
# print(words)
GPSWeek = string.atoi(words[0])
GPSTime = string.atof(words[1])
GyroX = string.atof(words[2])
GyroY = string.atof(words[3])
GyroZ = string.atof(words[4])
AccX = string.atof(words[5])
AccY = string.atof(words[6])
AccZ = string.atof(words[7])
Tpr = string.atof(words[8])
s = str(GyroX) + ',' + str(GyroY) + ',' + str(GyroZ) + ',' + str(AccX) + ',' + str(AccY)+ ',' + str(AccZ) + '\n'
f1.write(s)
print (n)
n = n + 1
return (GyroX, GyroY, GyroZ, AccX, AccY, AccZ)
def coarse_align(GyroX,GyroY,GyroZ,AccX,AccY,AccZ,g,wie,L):
T31 = AccX /g
T32 = AccY /g
T33 = AccZ /g
T21 = (GyroX - T31 * math.sin(L)) / (wie * math.cos(L))
T22 = (GyroY - T32 * math.sin(L)) / (wie * math.cos(L))
T23 = (GyroZ - T33 * math.sin(L)) / (wie * math.cos(L))
T11 = T22*T33 - T23*T32
T12 = T23*T31 - T21*T33
T13 = T21*T32 - T22*T31
C_n_b = np.matrix([[T11, T12, T13],[T21, T22, T23],[T31, T32, T33]])
return C_n_b
def m_cross(a,b):
a_tmp = a.reshape(1, 3)
b_tmp = b.reshape(1, 3)
r = np.cross(a_tmp, b_tmp).reshape(3, 1)
return r
def global_vip():
global glv_Re,glv_f,glv_e,glv_e2,glv_Rp,glv_ep,glGyroXv_ep2,glv_wie,glv_g0,glv_mg,glv_ug
def earth(pos, vn):
# global_vip()
sl=math.sin(pos[0])
cl=math.cos(pos[0])
tl=sl/cl
sl2=sl*sl
sl4=sl2*sl2
wien = glv_wie*np.array([[cl],[0],[-sl]])
Rx = glv_Re*(1-2*glv_e2+3*glv_e2*sl2)+pos[2]
Ry = glv_Re*(1+glv_e2)*sl2+pos[2]
wenn = np.array([[0],[0],[0]])
wenn[0]=vn[1] / (Ry + pos[2])
wenn[1]=-vn[0] / (Rx + pos[2])
wenn[2]=-vn[1] * tl / (Ry + pos[2])
g = glv_g0*(1+5.27094e-3*sl2+2.32718e-5*sl4)-3.086e-6*pos[2]
gn = np.array([[0],[0],[-g]])
return (wien, wenn, Rx, Ry, gn)
def qmulti(p, q):
q = np.dot(np.array([[p[0], -p[1], -p[2], -p[3]],
[p[1], p[0], -p[3], p[2]],
[p[2], p[3], p[0], -p[1]],
[p[3], -p[2], p[1], p[0]]]),q)
return q
def att2C_b_n(yaw, pitch, roll):
cr = math.cos(roll)
sr = math.sin(roll)
cy = math.cos(yaw)
sy = math.sin(yaw)
cp = math.cos(pitch)
sp = math.sin(pitch)
C_b_n = np.matrix([[cy * cr - sr * sp, -sy * cp, sr * cy + cr * sy * sp],
[cr * sy + sr * cy * sp, cy * cp, sr * sy - cr * cy * sp],
[-sr * cp, sp, cr * cp]])
return C_b_n
def Q2C_b_n(Q):
C_b_n = np.matrix([[Q[0] ** 2 + Q[1] ** 2 - Q[2] ** 2 - Q[3] ** 2, 2 * (Q[1] * Q[2] - Q[0] * Q[3]),
2 * (Q[1] * Q[3] + Q[0] * Q[2])],
[2 * (Q[1] * Q[2] + Q[0] * Q[3]), 1 - 2 * (Q[1] ** 2 + Q[3] ** 2),
2 * (Q[2] * Q[3] - Q[0] * Q[1])],
[2 * (Q[1] * Q[3] - Q[0] * Q[2]), 2 * (Q[2] * Q[3] + Q[0] * Q[1]),
Q[0] ** 2 - Q[1] ** 2 - Q[2] ** 2 + Q[3] ** 2]])
return C_b_n
def C_b_n2Q(C_b_n):
Q_1 = 0.5 * math.sqrt(abs(1 + C_b_n[0, 0] - C_b_n[1, 1] - C_b_n[2, 2]))
Q_2 = 0.5 * math.sqrt(abs(1 - C_b_n[0, 0] + C_b_n[1, 1] - C_b_n[2, 2]))
Q_3 = 0.5 * math.sqrt(abs(1 - C_b_n[0, 0] - C_b_n[1, 1] + C_b_n[2, 2]))
Q_0 = 0.5 * math.sqrt(abs(1 + C_b_n[0, 0] + C_b_n[1, 1] + C_b_n[2, 2]))
Q_1 = Q_1 * np.sign(C_b_n[2, 1] - C_b_n[1, 2])
Q_2 = Q_2 * np.sign(C_b_n[0, 2] - C_b_n[2, 0])
Q_3 = Q_3 * np.sign(C_b_n[1, 0] - C_b_n[0, 1])
Q = np.array([Q_0, Q_1, Q_2, Q_3])
return Q
def r2q(r):
fi = np.linalg.norm(r)
q = np.array(
[math.cos(fi / 2), r[0] / fi * math.sin(fi / 2), r[1] / fi * math.sin(fi / 2), r[2] / fi * math.sin(fi / 2)])
return q
def checksum(nmea_str0):
nmea_str = nmea_str0[0:-5]
return reduce(operator.xor, map(ord, nmea_str), 0)
# port = '/dev/ttyUSB0'
#
# try:
# ser = serial.Serial(port=port, baudrate=115200, timeout=1)
# except serial.serialutil.SerialException:
# print("IMU not found at port " + port + ". Did you specify the correct port in the launch file?")
# # exit
# sys.exit(0)
rpd = math.pi / 180
e = 1 / 298.3
R_e = 6378254
R_p = 6356803
w_ie = 7.2722e-005
# vec_Position_m1 = [h_X_e0 h_Y_n0 h_Z_u0]'
# H_m1 = h_H0
# L_m1 = h_L0
# J_m1 = h_J0
# vec_V_m1 = [true_v_x_rec(1) true_v_y_rec(1) true_v_z_rec(1)]'
vec_Position_m1 = math.pi/180*np.array([[36],[120],[60]])
H_m1 = 0 #High
L_m1 = 0 #Lattitude
J_m1 = 0 #lontitude
vec_V_m1 = np.array([[1],[0],[0]])
vec_rot_n = np.array([[0], [0], [0]])
# J=h_J
# h_L = 45 * rpd
# H=h_H
vec_V = vec_V_m1
# vec_Position=[h_X_e h_Y_n h_Z_u]'
vec_Position = vec_Position_m1
dRn_m = np.array([0, 0, 0])
Ts = 0.01
vec_w1 = np.array([[0], [0], [0]])
vec_f1 = np.array([[0], [0], [0]])
GyroX_Sum = 0
GyroY_Sum = 0
GyroZ_Sum = 0
AccX_Sum = 0
AccY_Sum = 0
AccZ_Sum = 0
for i in range(1, 100):
(GyroX, GyroY, GyroZ, AccX, AccY, AccZ) = text_data()
GyroX_Sum = GyroX_Sum + GyroX
GyroY_Sum = GyroY_Sum + GyroX
GyroZ_Sum = GyroZ_Sum + GyroX
AccX_Sum = AccX_Sum + AccX
AccY_Sum = AccY_Sum + AccY
AccZ_Sum = AccZ_Sum + AccZ
GyroX = GyroX_Sum / 100
GyroY = GyroY_Sum / 100
GyroZ = GyroZ_Sum / 100
AccX = AccX_Sum / 100
AccY = AccY_Sum / 100
AccZ = AccZ_Sum / 100
(vec_w_n_ie, vec_w_n_en, R_m, R_n, vec_g) = earth(vec_Position, vec_V)
C_n_b = coarse_align(GyroX, GyroY, GyroZ, AccX, AccY, AccZ, -vec_g[2, 0], glv_wie, vec_Position[0, 0])
C_b_n = np.transpose(C_n_b)
Q = C_b_n2Q(C_b_n)
#
pitch222_rec = []
yaw222_rec = []
roll222_rec = []
vx_rec = []
vy_rec = []
vz_rec = []
px_rec = []
py_rec = []
pz_rec = []
# ser.flush()
seq = 0
while 1:
# $GTIMU,0,1148.980,1.3391,-0.6484,-0.7041,0.0040,0.0048,1.0018,48.1*5B#
(GyroX, GyroY, GyroZ, AccX, AccY, AccZ) = text_data()
if GyroX == 10000:
break
(vec_w_n_ie, vec_w_n_en, R_m, R_n, vec_g) = earth(vec_Position, vec_V)
# R_m = R_e * (1 - e * e) / (1 - e * e * math.sin(L_m1) * math.sin(L_m1)) ^ (3 / 2)
# R_n = R_e / (1 - e * e * math.sin(L_m1) * math.sin(L_m1)) ^ (1 / 2)
# g = 9.7803267714 * (
# 1 + 0.00527094 * math.sin(L_m1) * math.sin(L_m1) + 0.0000232718 * math.sin(L_m1) * math.sin(L_m1) * math.sin(
# L_m1) * math.sin(
# L_m1)) - 0.3086 * 0.00001 * H_m1
# vec_g = np.array([[0], [0], [-g]])
w_bx_ib = GyroX
w_by_ib = GyroY
w_bz_ib = GyroZ
f_bx = AccX
f_by = AccY
f_bz = AccZ
vec_w = np.array([[w_bx_ib], [w_by_ib], [w_bz_ib]])
vec_f = np.array([[f_bx], [f_by], [f_bz]])
# vec_w_n_ie = np.array([[0], [w_ie * math.cos(L_m1)], [w_ie * math.sin(L_m1)]])
# vec_w_n_en = np.array([[-vec_V_m1[2] / (R_m + H_m1)], [vec_V_m1[1] / (R_n + H_m1)],
# [vec_V_m1[1] * math.tan(L_m1) / (R_n + H_m1)]])
Q_con = np.array([Q[0], -Q[1], -Q[2], -Q[3]])
C_n_b = Q2C_b_n(Qnorm(Q_con))
vec_w = vec_w - C_n_b * (vec_w_n_ie + vec_w_n_en)
w_a = vec_w1
w_b = (vec_w - vec_w1) / Ts / 2
r = w_a * Ts + w_b * Ts * Ts + 1 / 6 * Ts * Ts * Ts * m_cross(w_a, w_b)
fi = np.linalg.norm(r)
if fi > 0:
q = r2q(r)
Q = qmulti(Q, q)
C_b_n = Q2C_b_n(Qnorm(Q))
pitch = math.asin(C_b_n[2, 1])
roll = math.atan(-C_b_n[2, 0] / C_b_n[2, 2])
yaw = math.atan(-C_b_n[0, 1] / C_b_n[1, 1])
vec_dV_g_corm = (vec_g - m_cross((2 * vec_w_n_ie + vec_w_n_en), vec_V_m1)) * Ts
vec_dV = (vec_f1 + vec_f) * Ts / 2
vec_dgyro_out = (vec_w1 + vec_w) * Ts / 2
vec_dV_rotm = 1 / 2 * m_cross(vec_dgyro_out, vec_dV)
v_A = vec_f1
v_B = (vec_f - vec_f1) / Ts / 2
vec_dV_sculm = Ts * Ts * Ts / 6 * (m_cross(w_a, v_B) + m_cross(v_A, w_b))
vec_dV_sfm = vec_dV + vec_dV_rotm + vec_dV_sculm
vec_V = vec_V_m1 + C_b_n * vec_dV_sfm + vec_dV_g_corm
dR_n = (vec_V_m1 + vec_V) * Ts / 2
vec_Position = vec_Position_m1 + dR_n
H = H_m1 + dR_n[2]
L = L_m1 + dR_n[1] / (R_m + H)
J = J_m1 + dR_n[0] / (R_n + H) / math.cos(L)
vec_Position_m1 = vec_Position
H_m1 = H
L_m1 = L
J_m1 = J
vec_V_m1 = vec_V
vec_w1 = vec_w
vec_f1 = vec_f
pitch222_rec.append(pitch)
yaw222_rec.append(yaw)
roll222_rec.append(roll)
vx_rec.append(vec_V[0])
vy_rec.append(vec_V[1])
vz_rec.append(vec_V[2])
px_rec.append(vec_Position[0,0])
py_rec.append(vec_Position[1,0])
pz_rec.append(vec_Position[2,0])
print ('!!!')
plt.figure(1)
plt.plot(px_rec,py_rec)
while 1:
print '1'
|
[
"lroel@LRMB.local"
] |
lroel@LRMB.local
|
6f44b107077e908b6ebd5b31c3cb038207655492
|
07b40e821b74fccd8e6601581ddd5d7fb89b613c
|
/geekbangpython/python_demo/07/zodiac.py
|
7f9d107c9e09d819a86525954d66c1f22c95cc8f
|
[] |
no_license
|
Natt7/python-learning
|
4c3c6e402a8916bf558e0ae44a0f7d765b296cb0
|
9c7d860863f240b339aaf115757b901bc2952fd0
|
refs/heads/master
| 2020-04-10T22:05:08.509006
| 2019-02-12T13:35:13
| 2019-02-12T13:35:13
| 161,314,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
#记录12生肖,根据年份判断生肖
# zodiac_name = (u'摩羯座', u'水瓶座', u'双鱼座', u'白羊座', u'金牛座', u'双子座',
# u'巨蟹座', u'狮子座', u'处女座', u'天秤座', u'天蝎座', u'射手座')
# zodiac_days = ((1, 20), (2, 19), (3, 21), (4, 21), (5, 21), (6, 22),
# (7, 23), (8, 23), (9, 23), (10, 23), (11, 23), (12, 23))
#
#
# (month, day) = (2, 15)
#
# zodiac_day = filter(lambda x: x<=(month, day), zodiac_days)
# # print(zodiac_day)
#
# zodac_len = len(list(zodiac_day)) % 12
# print(zodiac_name[zodac_len])
a_list = ['abc', 'xyz']
a_list.append('X')
print (a_list)
a_list.remove('xyz')
print(a_list)
|
[
"natt.ni@qq.com"
] |
natt.ni@qq.com
|
94e655d50d9a0aa7e89be11d2ab8857b6b26f1bb
|
94c70198b34198069fbd3b7aa29c828490bad391
|
/ex090.py
|
356368b5838ebf8e8330bf21b360ac97273dc840
|
[
"MIT"
] |
permissive
|
danilodelucio/Exercicios_Curso_em_Video
|
5254fbf45807ac948fb93eb96c1335070fa22f5b
|
d59e1b4efaf27dd0fc828a608201613c69ac333d
|
refs/heads/master
| 2022-04-17T15:50:16.130487
| 2020-04-22T11:44:46
| 2020-04-22T11:44:46
| 257,882,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
ficha = dict()
ficha['Nome'] = str(input('Nome: '))
ficha['Media'] = float(input(f'Média de {ficha["Nome"]}: '))
if ficha['Media'] < 5:
ficha['Situação'] = 'Reprovado'
elif ficha['Media'] >= 8:
ficha['Situação'] = 'Aprovado'
else:
ficha['Situação'] = 'Recuperação'
print('-' * 30)
for k, v in ficha.items():
print(f'{k} é igual a {v}.')
|
[
"danilodelucio@gmail.com"
] |
danilodelucio@gmail.com
|
adc03defaa2493bdf9c1e47722a936d366ad2fa3
|
98032a164e8a09c792879a0ee99aa01ee875b8c9
|
/homework/mt0a/mt0a.py
|
6feeec91c79f5e4699d1754f10241aad7b6b2805
|
[] |
no_license
|
olivia-salaben/astro98sp19
|
0056f143c666ba77f6f67ffa5e59ab293e8677c6
|
57a21661b4e1b736bb4498ce259b882c534461f1
|
refs/heads/master
| 2020-05-14T12:02:55.464087
| 2019-03-12T06:18:31
| 2019-03-12T06:18:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,311
|
py
|
"""ASTRONOMY 98/198 SPRING 2019"""
"""MIDTERM PART 1: REVIEW"""
"""
Hey everyone, Alex here. This week's "homework" (midterm part 1) is going to be a review.
It's come to my attention that the lectures have not been fully in line with the homework,
so this week I'm going to try to write something that is hopefully a lot more clear
with its directions. I will also include descriptions/reviews of concepts that
should have been covered in lecture. Ideally, you will find this homework at once
rewarding and manageable. Unlike other homeworks, there will be a lot more reading
involved in this document.
"""
"""
Firstly, a review of how to do the assignments. Recall that in lecture you learned
what functions were (at least you should have learned).
So suppose I'm in my terminal, in a Python environment. I should see this:
[Below is a Terminal Display - I'm writing this just for clarity, you will
not see these bracketed things in your terminal window.]
>>>
[End of Terminal Display]
The three arrows indicate user input. Once a user writes something, and then presses
the "Enter/Return" key on their keyboard, Python will interpret what was written
and "return" a result. For instance,
[Below is a Terminal Display]
>>> 2
2
>>> 3.14159
3.14159
>>> "Alex"
'Alex'
>>> Alex
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'Alex' is not defined
[End of Terminal Display]
Woah, what happened there? Well, any set of characters that is not a number or not
enclosed in quotes is considered a variable. With the exception of certain built-in
Python functions or key words, such as True, False, and type, the onus is on you
to define these variables. If you do not, you will get the error you see above.
In this case, the error is quite clear; "name 'Alex' is not defined". We haven't
defined a variable named 'Alex' yet! Let's do just that:
[Below is a Terminal Display]
>>> Alex = 'Alex'
>>> Alex
'Alex'
>>> Alex = 2
>>> Alex
2
[End of Terminal Display]
You can redefine your variables, as I did up there. I first defined Alex to be a string,
'Alex', before redefining Alex to be the integer 2. When you call a variable in Python,
Python will attempt to retrieve the most recently defined variable.
Now you've also learned about functions. Functions make it easy to generalize a procedure.
For instance, suppose Nicholas asked you to find the powers of many numbers. He says,
"Find the 5th power of 2, the 8th power of 4, and the 3rd power of 9." You can certainly
input these things manually into Python like so:
[Below is a Terminal Display]
>>> 2 * 2 * 2 * 2 * 2
32
>>> 4 * 4 * 4 * 4 * 4 * 4 * 4 * 4
65536
>>> 9 * 9 * 9
729
[End of Terminal Display]
But it's more convenient to use a function to do the above:
[Below is a Terminal Display]
>>> def exp(num, pow):
... result = num**pow
... return result
...
>>> exp(2, 5)
32
>>> exp(4,8)
65536
>>> exp(9,3)
729
[End of Terminal Display]
Of course, you could have just typed 2**5 to begin with, but when it comes to more
complex operations, functions are the way to go. That said, what happens if I set
variables equal to functions? Well, let's find out! Suppose I implemented the exp
function shown above.
[Below is a Terminal Display]
>>> my_var = exp
>>> my_var #The portion after 'at' will vary from user/computer to user/computer
<function exp at 0x1047921e0>
>>> my_var = exp(1, 1)
>>> my_var
1
[End of Terminal Display]
***IMPORANT:
If you set a variable equal to the function without calling it - without parenthesis () -
then the variable will be equal to the function. If you set a variable equal to the
function call, then the variable will be equal to that function's return value.
^^^THIS IS SUPER IMPORTANT. REREAD IF YOU MUST.^^^
For fun, let's further investigate setting a variable equal to a function without
calling it:
[Below is a Terminal Display]
>>> my_var = exp
>>> my_var(1, 1)
1
[End of Terminal Display]
Neat!
ALL THIS SAID, ALL OF YOUR HOMEWORK PROBLEMS ARE DONE IN THE CONTEXT OF FUNCTIONS JUST
LIKE THE ONE YOU SAW ABOVE. THE DIFFERENCE IS I INCLUDE A DOCTEST. WHAT DOES THAT MEAN?
Well. It means that when I define a function, I don't just implement it, but I also
include a string that documents what it's supposed to do - aptly named a "docstring."
This docstring includes a series of tests to make sure one implements it correctly -
hence the name, "doctest." As an example, the above function would look like this:
"""
#Q00: DOCSTRINGS/DOCTESTS********************************
#Re-implement the function above!
def exp(num, pow):
"""
This function takes in two numbers and returns the first number to the power
of the second number. Notice that this docstring explains what I explained above,
but more concisely.Beneath this description is a docstring that shows how the
function should behave, once it's implemented. For the sake of clarity and ease,
the docstest for this function is exactly the same as the examples I used above.
>>> exp(2, 5)
32
>>> exp(4,8)
65536
>>> exp(9,3)
729
"""
#YOUR CODE HERE
return
"""
Made it this far? Great! Let's review one of the most important structures in Python:
lists. Lists are an incredibly convenient way to store data, and you will have to
work with them throughout your time as a programmer - especially if you decide to
pursue astronomy. So how do lists work? Let's see!
[Below is a Terminal Display]
>>> [1, 2, 3, 4, 5] #This is a list
[1, 2, 3, 4, 5]
>>> [] #This is an empty list
>>> new_list = [] #Let's set a variable equal to an empty list
>>> new_list
[]
>>> type(new_list) #Querying the datatype
<class 'list'>
>>> len(new_list) #Length of the list
0
>>> new_list.append(10) #Appending a value to the list
>>> len(new_list)
1
>>> new_list #Appending modifies the original list
[10]
>>> new_list[1]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
IndexError: list index out of range
>>> new_list[0] #The above error occurs because list-indexing begins at 0
10
>>> new_list.append('Not a Number')
>>> len(new_list)
2
>>> new_list #You are allowed to mix datatypes in a list.
[10, 'Not a Number']
>>> new_list[1]
'Not a Number']
>>> print(new_list[1]) #Printing displays strings without quotes.
Not a Number
[End of Terminal Display]
So those are some of the basics of lists. Let's start with something easy regarding lists.
Finish the function below so that it returns a list of 5 elements.
"""
#Q01: LIST FIVE THINGS
def five_things():
"""
This function takes in no arguments and returns a list of 5 arbitrary things.
>>> test = five_things()
>>> len(test)
5
"""
my_list = []
#YOUR CODE HERE
#YOU CAN APPEND 5 RANDOM THINGS TO my_list OR RETURN ANOTHER LIST OF 5 RANDOM ITEMS
return #SOME LIST
"""
Lists often go hand in hand with loops in Python. Lists can be constructed from loops,
or lists can be parsed through with loops. For instance, suppose I have a set of data
that I want to sum. I could do something like this:
[Below is a Terminal Display]
>>> some_data = [2, 4, 6, 8, 10, 3, 3, 3]
>>> sum = 0 #Why would I set this equal to 0?
>>> for item in some_data:
... sum += item
...
>>> sum
39
[End of Terminal Display]
Now you try!
"""
#Q02: Multiply List Elements
def list_product(some_list):
"""
This function takes in a list of numbers and returns the product of the list elements.
>>> test = [2, 2, 2]
>>> list_product(test)
8
>>> test2 = [10, 20, 30, 0]
>>> list_product(test2)
0
>>> test3 = [1, 1, 1, -1]
>>> list_product(test3)
-1
"""
#YOUR CODE HERE
return
"""
Sometimes, we have to sift through a list and cut out some elements. How can we do this?
Well, we know how to read a list using a for loop - which goes through list elements one
at a time, in order. But you should have learned about something called a "conditional."
The anatomy of a conditional statement is like so::
if <condition>:
<code>
Note that <code> will run only if Python evaluates <condition> to be True. For instance,
if x == 1:
print("Success!")
Here, Python will only display Success! if some variable x is equal to 1. A longer conditional
layout can be as follows:
if <condition>:
<code>
elif <condition>:
<code>
else:
<code>
For if and elif statements, the subsequent indented code will only run if <condition> evaluates
to True. If none of the if statements are True, then the code in the else block will run.
For example,
[Below is a Terminal Display]
>>> def is_positive(num):
... if num > 0:
... print("Positive!")
... else:
... print("Not positive...")
...
>>> is_positive(20)
Positive!
>>> is_positive(-1)
Not positive...
>>> is_positive(0)
Not positive...
[End of Terminal Display]
Now you try!
"""
#Q03: Number Classifier:
def classify_number(number):
"""
This function takes in a number and prints whether it is positive, negative, or neither.
>>> test_numbers = [-100, 0, 100]
>>> classify_number(1)
Positive.
>>> classify_number(-1)
Negative.
>>> classify_number(test_numbers[1])
Zero.
"""
#YOUR CODE HERE
return
"""
Now let's combine what you know about iterating through lists and conditional statements.
"""
#Q04: Greater Than 100?
def over_hundred(some_list):
"""
This function takes in a list and returns whether the sum of the elements of that list
is greater than 100.
>>> test_numbers = [-100, 0, 100]
>>> over_hundred(test_numbers)
False
>>> over_hundred([50, 50, 50])
True
>>> over_hundred([20, 20, 20, 20, 20, 20, -200])
False
>>> nicholas_christmas_lights = [-2000, 9000]
True
>>> sorry_about_that_problem = [100, -100, 100, -100, 200]
True
>>> i_am_the_best = [100]
False
"""
#YOUR CODE HERE
return
"""
Fantastic work. Several more things. The first is a dictionary. If given the choice, do not
work with dictionaries. They are fricken slow. However, they are more readable and a useful
structure, especially for databases. They look like this:
[Below is a Terminal Display]
>>> some_dict = {}
>>> some_dict
{}
>>> some_dict['key'] = 'value'
>>> some_dict
{'key': 'value'}
[End of Terminal Display]
Instead of using integers as indices (0, 1, 2, 3, ...) like lists, dictionaries use
user-defined keys as indices, as shown above. So unless you set 0 equal to something
in a dictionary, asking for the 0th index - dict[0], for instance - would make no
sense. Like a list, the elements of a dictionary can be anything.
[Below is a Terminal Display]
>>> another_dict = {}
>>> another_dict[0] = 0
>>> another_dict[1] = 'Random'
>>> another_dict[2] = []
>>> another_dict
{0: 0, 2: [], 1: 'Random'}
>>> another_dict[2].append([])
>>> another_dict
{0: 0, 2: [[]], 1: 'Random'}
>>> list(another_dict.keys())
[0, 2, 1]
>>> list(another_dict.values())
[0, [[]], 'Random']
[End of Terminal Display]
Yes, lists can be in lists. Yes, dictionaries can be in dictionaries. No, dictionaries
do not always display in the order that you construct it in. Okay, now I want you
to build a dictionary using that you've learnt.
"""
#Q05: NAME AND AGE
def name_age(a_list):
"""
This function takes in a list containing two elements: a string and a number.
This function returns a dictionary with the string as a key and the number as a value.
>>> person_age = ['Captain Underpants', 30]
>>> name_age(person_age)
{'Captain Underpants': 30}
>>> list(name_age(person_age).keys())
['Captain Underpants']
>>> list(name_age(person_age).values())
[30]
"""
#YOUR CODE HERE
return
"""
Great. Hopefully all of the work you've done up until this point has clarified a lot of
what lecture has covered. You will learn more throughout the course, and this covers
some of the fundamentals you will need - variables, lists, iterating, dictionaries, and
functions. You have been exposed to other things already - modules/packages such as
numpy and astropy - but these are the fundamentals you will absolutely need as the course
progresses!
Okay, now consider the following problem:
"""
#Q06: USE EVERYTHING YOU KNOW FROM ABOVE FOR THIS!
def filter_flux(fluxes, allowance):
"""
This function takes in a list of flux readings (basically the amount of light we see) and
an allowance number (a threshold for acceptance or rejection) and returns either an
average value or a notice of rejection. If this method of allowing measurement values
seems wrong to you, trust that feeling. This is a pretty bad method... But it's a start!
And you should try to implement it before trying for more complicated methods!
>>> light_data = [300, 400, 600, 800, 800, 900, 900, 1000]
>>> light_data2 = [21, 78, 32, 56, 128, 98, 82, 46, 55]
>>> light_data3 = [1, 3, 15, 39, 130, 311, 529, 601, 732]
>>> filter_flux(light_data, 300)
712.5
>>> filter_flux(light_data2, 300)
Reject this light curve.
>>> filter_flux(light_data3, 300)
Reject this light curve.
"""
#YOUR CODE HERE
return
"""
Lastly, modules. You need to some basics about these. The ones of primary concern are numpy,
astropy, and matplotlib. Importing a module has the following syntax:
import <module>
You may also write:
import <module> as <shorthand>
This way, you can call a module without typing the entire thing. For instance,
[Below is a Terminal Display]
>>> import pandas as pd
>>> type(pd)
<class 'module'>
[End of Terminal Display]
"""
#Q07: IMPORT THREE MODULES
"""Here, I want you to import numpy (using the name np), matplotlib.pyplot (using the name
plt), and Table from astropy.table."""
#import _ as _
#import _ as _
#from _ import _
"""
LAST FEW REVIEW REMARKS.
You can assign multiple variables at once. For instance,
[Below is a Terminal Display]
>>> a, b = 1, 2
>>> a
1
>>> b
2
[End of Terminal Display]
Numpy arrays are also more convenient to work with than lists. For instance, instead of
using for loops, numpy comes with built-in methods that does a lot of what you'd like to
do. For instance,
[Below is a Terminal Display]
>>> np.array([1, 2, 3]).sum()
6
>>> np.array([1, 1]).mean()
1.0
[End of Terminal Display]
Okay. That was a bit quick, but it should suffice for the last problem.
"""
#Q08a:
def last_problem_a(a_list):
"""
This function takes in a list and returns a numpy array of that same list, as well
as the sum and mean of the array. As a hint, you do not have to write loops here!
>>> type(Table)
<class 'type'>
>>> type(plt)
<class 'module'>
>>> data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> array, sum, mean = last_problem_a(data)
>>> type(array)
<class 'numpy.ndarray'>
>>> sum
55
>>> mean
5.5
"""
#YOUR CODE HERE
return
#Q08b:
def last_problem_b(database):
"""
This function takes in a database (STRUCTURED AS A DICTIONARY) and returns a numpy
array of mean values.
For instance, if I have a dictionary like so:
{'a': [100, 200], 'b': [100, 200, 300]},
then my function should return - maybe in a different order:
array([150, 200])
>>> data = {'star_a': [10, 20, 30, 40, 50], 'star_b': [0, 5, 10], 'star_c': [200, 400, 600]}
>>> averages = last_problem_b(data)
>>> np.array(averages).mean()
145.0
"""
#YOUR CODE HERE
return
#NOTE: The test in the last problem is to take the mean value of your resultant list of means.
#This is because the dictionary values are not always in the order you generate them in.
#If this is confusing, don't worry for now.
"""
Review these things! Part 1 of the midterm was intended to be primarily review. It's okay
if you did not get these subjects before, and it's okay if you don't fully understand it now.
Next week, Midterm Part 2, will have you working through an astropy table to analyze some
data using the skills you've learned in this review!
"""
|
[
"nrui@berkeley.edu"
] |
nrui@berkeley.edu
|
3662c862ea6338dc8755352414e07032cbac6664
|
e1175d0d4bd4fb0a19c1c55cf92c5acf2fcf6e38
|
/holocron/models/__init__.py
|
7991fdd3ad617336a208ea82800509448a481442
|
[
"MIT"
] |
permissive
|
waterbearbee/Holocron
|
96db7103a18994dcd14aac260bbc36b70b5d1974
|
2b3c8a4f2ef59b9170546bd1d7907a971d47422e
|
refs/heads/master
| 2022-05-24T21:34:16.348575
| 2020-05-03T21:10:37
| 2020-05-03T21:10:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
from .res2net import *
from .resnets import *
from .darknet import *
from .detection import *
|
[
"noreply@github.com"
] |
waterbearbee.noreply@github.com
|
3e49c2fbb08e1b3681c2720668a763a157e7f454
|
01874b9d88c5d8d16ed73303685e98b007d025e8
|
/tritium/apps/subscriptions/migrations/0005_subscription_abi_methods.py
|
d8e567317e1c5af629ccb0e529243eab91fb0518
|
[] |
no_license
|
oohmygaud/txgun-backend
|
191dce754f6b84855b3625564ed1722033a6ab76
|
0e96798fda4a1666f309dbfc30a9dee1bb3eabe7
|
refs/heads/master
| 2022-05-02T05:01:18.147765
| 2020-03-31T18:24:55
| 2020-03-31T18:24:55
| 251,443,986
| 2
| 0
| null | 2022-04-22T23:15:03
| 2020-03-30T22:36:24
|
Python
|
UTF-8
|
Python
| false
| false
| 425
|
py
|
# Generated by Django 2.0 on 2019-05-07 16:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('subscriptions', '0004_subscription_specific_contract_calls'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='abi_methods',
field=models.TextField(blank=True, null=True),
),
]
|
[
"worshamaj@gmail.com"
] |
worshamaj@gmail.com
|
3b8094b6039760bf30e4c185659fa96eeb16bb8a
|
e812a5f022071c9bec7e9e9e2466887ba7f8ae4c
|
/mysite/mysite/urls.py
|
bfa2fb5151045a6863bc31456d3a3538e2d6ac8b
|
[] |
no_license
|
jvfarrell/my-django-site
|
1fd7993155ae41a9c0307ef1f53e5a7ecd5454cb
|
9d04c99e0b1f6a719b4b37ee97f624ea7a1991f7
|
refs/heads/master
| 2022-11-23T05:46:31.811411
| 2017-06-16T17:47:28
| 2017-06-16T17:47:28
| 91,369,330
| 1
| 1
| null | 2022-11-21T15:15:40
| 2017-05-15T18:10:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,564
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from mysite.views import hello, my_homepage_view, current_datetime, hours_ahead, display_meta, contact, resume, home, nfl_analytics, videos_page
from books import views
import lol.views as lolview
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', home),
url(r'^hello/$', hello),
url(r'^resume/$', resume),
url(r'^home/$', home),
url(r'^nfl/$', nfl_analytics),
url(r'^time/$', current_datetime),
url(r'^time/plus/(\d{1,2})/$', hours_ahead),
url(r'^MetaDisplay/', display_meta),
url(r'^search-form/$', views.search_form),
url(r'^search/$', views.search),
url(r'^lolsearch/$', lolview.search),
url(r'^summoner/(.*)/$', lolview.summoner),
url(r'^summoner/$', lolview.summoner_landing),
url(r'^contact/$', contact),
url(r'^error/$', lolview.error),
url(r'^videos/$', videos_page),
]
|
[
"jvictorfarrell@gmail.com"
] |
jvictorfarrell@gmail.com
|
d19106c7aa31601b491ac5931a4fe3aecc22e1a3
|
32fd08556f6b00bcdb8266ec00cefa7f715b44b0
|
/src2/ch5/recog/cifar10-mlp.py
|
59a877df72c7b904ec3c24d9a18d091db4d27e27
|
[] |
no_license
|
TakahashiKazuki-0604/book-mlearn-gyomu
|
ae2c16143481b316a6da941a0140cea2fb999e8f
|
bf4248afca645ef4b3799dad70a29977c8179813
|
refs/heads/master
| 2022-09-19T23:01:53.405437
| 2020-05-29T14:16:56
| 2020-05-29T14:16:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,586
|
py
|
import matplotlib.pyplot as plt
import keras
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout
num_classes = 10
im_rows = 32
im_cols = 32
im_size = im_rows * im_cols * 3
# データを読み込む --- (*1)
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# データを一次元配列に変換 --- (*2)
X_train = X_train.reshape(-1, im_size).astype('float32') / 255
X_test = X_test.reshape(-1, im_size).astype('float32') / 255
# ラベルデータをOne-Hot形式に変換
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# モデルを定義 --- (*3)
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(im_size,)))
model.add(Dense(num_classes, activation='softmax'))
# モデルをコンパイル --- (*4)
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 学習を実行 --- (*5)
hist = model.fit(X_train, y_train,
batch_size=32, epochs=50,
verbose=1,
validation_data=(X_test, y_test))
# モデルを評価 --- (*6)
score = model.evaluate(X_test, y_test, verbose=1)
print('正解率=', score[1], 'loss=', score[0])
# 学習の様子をグラフへ描画 --- (*7)
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('Accuracy')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Loss')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
[
"kujira@kujirahand.com"
] |
kujira@kujirahand.com
|
ead5f64b33ff99ac62a5610baa54d4bb1cf71562
|
4a03ee810730156cebe9e07d0f02984a1e4f98dc
|
/pandas/tests/indexing/multiindex/test_sorted.py
|
b1db930765093c1212aad66e4e229eeced7ea34a
|
[
"BSD-3-Clause"
] |
permissive
|
iRaied/pandas
|
2779fc4bf1b6b4896ef5f673dfdfd86310c1b176
|
b9e46ad1b10bce78464a07454dafd680273af6ff
|
refs/heads/master
| 2020-05-16T09:41:52.571366
| 2019-04-23T04:45:01
| 2019-04-23T04:45:01
| 182,958,410
| 1
| 0
|
BSD-3-Clause
| 2019-04-23T07:25:03
| 2019-04-23T07:25:02
| null |
UTF-8
|
Python
| false
| false
| 3,401
|
py
|
import numpy as np
from numpy.random import randn
from pandas.compat import lzip
from pandas import DataFrame, MultiIndex, Series
from pandas.util import testing as tm
class TestMultiIndexSorted:
def test_getitem_multilevel_index_tuple_not_sorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.loc[query_index, "data"]
xp_idx = MultiIndex.from_tuples([(0, 1, 0)], names=['a', 'b', 'c'])
xp = Series(['x'], index=xp_idx, name='data')
tm.assert_series_equal(rs, xp)
def test_getitem_slice_not_sorted(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.sort_index(level=1).T
# buglet with int typechecking
result = df.iloc[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
tm.assert_frame_equal(result, expected)
def test_frame_getitem_not_sorted2(self):
# 13431
df = DataFrame({'col1': ['b', 'd', 'b', 'a'],
'col2': [3, 1, 1, 2],
'data': ['one', 'two', 'three', 'four']})
df2 = df.set_index(['col1', 'col2'])
df2_original = df2.copy()
df2.index.set_levels(['b', 'd', 'a'], level='col1', inplace=True)
df2.index.set_codes([0, 1, 0, 2], level='col1', inplace=True)
assert not df2.index.is_lexsorted()
assert not df2.index.is_monotonic
assert df2_original.index.equals(df2.index)
expected = df2.sort_index()
assert expected.index.is_lexsorted()
assert expected.index.is_monotonic
result = df2.sort_index(level=0)
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_frame_getitem_not_sorted(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns.values)]
result = df['foo']
result2 = df.loc[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.loc['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index.values)]
result = s['qux']
result2 = s.loc['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
|
[
"jeff@reback.net"
] |
jeff@reback.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.