blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57c4c091a2ba52d1de4451b90cf6c0b231aeb8e0
|
70eb368ea25ad8767e6713ea88936642154f43ae
|
/workUndone/Suite15/OpenFlight_API/samples/scripts/egswitch1.py
|
23612a022f3d36c1a2340cad12cd1a3882d46f71
|
[] |
no_license
|
CatalinaPrisacaru/Di-Java
|
816cb3428d5026fb63934e14d09c422aa1537b08
|
1c35b28e0b8c8f3c25afbc7b2c0a7fe8cac96c6b
|
refs/heads/develop
| 2021-01-18T02:23:47.759177
| 2016-04-11T19:55:35
| 2016-04-11T19:55:35
| 54,333,823
| 0
| 0
| null | 2016-03-20T18:36:51
| 2016-03-20T18:36:51
| null |
UTF-8
|
Python
| false
| false
| 4,400
|
py
|
##
##
## Sample file: egswitch1.py
##
## Objectives:
## Manipulate switch node masks
##
## Program functions:
## Add Switch Node Masks
## Get and Set bits in the masks
##
## API functions used:
## mgInitSwitchMask(), mgAddSwitchMask(),
## mgSetSwitchBit(), mgGetSwitchBit(),
## mgGetSwitchMaskCount(), mgGetSwitchMaskNo(),
## mgNewRec(), mgAttach(),
## mgNewDb(), mgWriteDb(), mgCloseDb()
##
##
import sys
# import OpenFlight API module
from mgapilib import *
def BuildSwitchMasks (switchRec):
# build a set of switch masks, each one that turns on a
# single child of the switch node
numChildren = mgCountChild (switchRec)
# create a set of new masks, each one turns on a
# single child of the switch node
for i in range (0, numChildren):
# create a new mask that will turn on this child
switchNo = mgAddSwitchMask (switchRec)
# init all bits of this mask to off
ok = mgInitSwitchMask (switchRec, switchNo, MG_FALSE)
# then turn on the bit corresponding to this child
ok = mgSetSwitchBit (switchRec, switchNo, i, MG_TRUE)
def PrintSwitchMasks (switchRec):
# print the values of each of the bits of each of the
# switch masks of the switch node
# get number of masks defined for the switch node
maskCount = mgGetSwitchMaskCount (switchRec)
# get the current switch mask number (info only)
ok, curMaskNo = mgGetSwitchMaskNo (switchRec)
# get the values for each bit of the mask and print it
for maskNo in range (0, maskCount):
bitNo = 0 # bitnum represents the bit number
mgSendMessage (MMSG_STATUS, "Mask %d :" % (maskNo))
result, onFlag = mgGetSwitchBit (switchRec, maskNo, bitNo)
while result:
mgSendMessage (MMSG_STATUS, "\tbit %d : %d" % (bitNo, onFlag))
bitNo = bitNo + 1
result, onFlag = mgGetSwitchBit (switchRec, maskNo, bitNo)
def REPORT_NODE_CREATED(_node):
if _node:
print "Creating %s : Ok\n" % (mgGetName(_node))
else:
print "Creating %s : Failed\n" % (mgGetName(_node))
def REPORT_NODE_ATTACHED(_ok,_parent,_child):
print "Attaching " #_child " to " #_parent " : %s\n", ((_ok)==MG_TRUE) ? "Ok" : "Failed")
if _ok == MG_TRUE:
print "Attaching %s to %s : Ok\n" % (mgGetName(_child), mgGetName(_parent))
else:
print "Attaching %s to %s : Failed\n" % (mgGetName(_child), mgGetName(_parent))
def main ():
# check for proper arguments
if len(sys.argv) < 2:
print "\nUsage: %s <create_db_filename>\n" % (sys.argv[0])
print " Creates database: <create_db_filename>\n"
print " Creates switch node with masks\n"
print " Get and set bits in the masks\n"
print "\n"
return
# initialize the OpenFlight API
# always call mgInit BEFORE any other OpenFlight API calls
#
mgInit (None, None)
# start a new OpenFlight database, overwrite if exists
mgSetNewOverwriteFlag (MG_TRUE)
print "\nCreating database: %s\n" % (sys.argv[1])
db = mgNewDb (sys.argv[1])
if db == None:
msgbuf = mgGetLastError()
print msgbuf, "\n"
mgExit()
return
## Throughout the following, error conditions are checked for
## and (in some cases) reported but processing will continue.
## In your code, you should consider appropriate action upon
## function failures.
##
# create group, switch, and 3 object nodes
group = mgNewRec (fltGroup)
REPORT_NODE_CREATED (group)
ok = mgAttach (db, group)
REPORT_NODE_ATTACHED (ok, db, group)
switchRec = mgNewRec (fltSwitch)
REPORT_NODE_CREATED (switchRec)
ok = mgAttach (group, switchRec)
REPORT_NODE_ATTACHED (ok, group, switchRec)
object1 = mgNewRec (fltObject)
REPORT_NODE_CREATED (object1)
object2 = mgNewRec (fltObject)
REPORT_NODE_CREATED (object2)
object3 = mgNewRec (fltObject)
REPORT_NODE_CREATED (object3)
ok = mgAttach (switchRec, object1)
REPORT_NODE_ATTACHED (ok, switchRec, object1)
ok = mgAttach (switchRec, object2)
REPORT_NODE_ATTACHED (ok, switchRec, object2)
ok = mgAttach (switchRec, object3)
REPORT_NODE_ATTACHED (ok, switchRec, object3)
# set up the switch masks
BuildSwitchMasks (switchRec)
# echo the values of the switch masks
PrintSwitchMasks (switchRec)
# write and close the database
ok = mgWriteDb (db)
if ok == MG_FALSE:
print "Error writing database\n"
ok = mgCloseDb (db)
if ok == MG_FALSE:
print "Error closing database\n"
# always call mgExit() AFTER all OpenFlight API calls
mgExit ()
main()
|
[
"albisteanu.sebastian@yahoo.com"
] |
albisteanu.sebastian@yahoo.com
|
386961a6d4ad8ba5e91b202f26a4853a3a5e894b
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/A_Primer_on_Scientific_Programming_with_Python/discalc/discrete_func_vec.py
|
41d60f2cf924caaec283b0bb8405acdfbc3d5aaa
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 421
|
py
|
#!/usr/bin/env python
def discrete_func(f, a, b, n):
x = linspace(a, b, n+1)
y = f(x)
return x, y
from scitools.std import *
try:
f_formula = sys.argv[1]
a = eval(sys.argv[2])
b = eval(sys.argv[3])
n = int(sys.argv[4])
except:
print "usage: %s 'f(x)' a b n" % sys.argv[0]
sys.exit(1)
f = StringFunction(f_formula)
f.vectorize(globals())
x, y = discrete_func(f, a, b, n)
plot(x, y)
|
[
"bb@b.om"
] |
bb@b.om
|
d2b74819323cbf09c35bedc8eec0bafbfcd622eb
|
3551e031a4fb8b3e67f374a457f0fc5f7b56cd24
|
/Job4/models.py
|
e541ed1f537fb66a88052ff1e81cfa0c2a7b9528
|
[] |
no_license
|
klaskan/master-second-experiment
|
8b8114987d1d09076f3b9bc4af3c4e5a22d668cd
|
eef8d7a2360e706246c71897254828b113f99b44
|
refs/heads/master
| 2022-12-08T13:53:32.575309
| 2020-08-23T17:54:20
| 2020-08-23T17:54:20
| 279,106,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,423
|
py
|
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
doc = ''
class Constants(BaseConstants):
name_in_url = 'Job4'
players_per_group = None
num_rounds = 1
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
typeracer = models.LongStringField(blank=True)
this_round_point = models.FloatField(initial=0)
random_num = models.IntegerField(initial=0)
declare = models.FloatField(min=0)
fine = models.FloatField(initial=0)
not_deklarert = models.FloatField(initial=0)
score_after_taxes = models.FloatField()
got_audited_score = models.FloatField(initial=0)
not_audited_score = models.FloatField()
eq1 = models.IntegerField(label='11 - 7 = ')
eq2 = models.IntegerField(label='5 * 8 =')
eq3 = models.IntegerField(label='45 - 30 = ')
eq4 = models.IntegerField(label='44 + 30 ')
eq5 = models.IntegerField(label='6 * 6 = ')
eq6 = models.IntegerField(label='4 + 17 =')
eq7 = models.IntegerField(label='52 - 30 = ')
eq8 = models.IntegerField(label='44 - 33 = ')
eq9 = models.IntegerField(label='90 - 30 = ')
eq10 = models.IntegerField(label='28 - 11 = ')
def declare_max(self):
return self.this_round_point
def posibility(self):
import random
self.random_num = random.randint(1,100)
|
[
"klaskan@pop-os.localdomain"
] |
klaskan@pop-os.localdomain
|
3c8918400e5b1ec794124fbc66eb5763ad955796
|
5421a4f93da3aad9f146cd2e72ddcdb398ddb2ef
|
/python/190324.py
|
d5c8b1bd1963f1aa29ac643d32791b93f3042823
|
[
"Apache-2.0"
] |
permissive
|
entroychang/main
|
dcccc372fef2ae5e67b551442b67192a6ea97052
|
1e1f51a4b63adaaaf21f8fda58daec3fc06b8b1e
|
refs/heads/master
| 2022-06-04T21:19:28.180978
| 2019-10-15T13:30:06
| 2019-10-15T13:30:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
i, j = map(int,input().split()) #分隔同行輸入
lenth=list()#儲存長度數列
print(i,j)
if i>j:
i,j=j,i #若i較大 前後交換
for x in range(i,j) :#有限迴圈
data=list()#3n+1數列
#data.append(x)
print(x)
while x!=1:#算出數列
if x%2==0:
x=x/2
data.append(x)
else:
x=x*3+1
data.append(x)
lenth.append(len(data))#將長度加入長度數列
print(max(lenth)) #找最大
|
[
"trusaidlin@gmail.com"
] |
trusaidlin@gmail.com
|
8dfe69d1359e3056848825729008657f98e5359f
|
6bb2634bb201139f576b8a6e0f2c4445bcdf9464
|
/Array/18.BuySellStock.py
|
89390d40bde60f1e1742d35ab1f04d90b1636690
|
[] |
no_license
|
mayankmr2-dev/challenge
|
f3fcab375d6a39f9dddaaa6a9aa3ff08db226fc5
|
6a6f0b520d3ba4a4b63e2c82776a92a58f45534b
|
refs/heads/master
| 2023-07-16T12:02:55.389154
| 2021-08-29T18:31:33
| 2021-08-29T18:31:33
| 266,380,080
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
ar1 = [7, 1, 5, 3, 6, 4]
def solution(arr):
n = len(arr)
left = 0
right = 1
maxP = 0
while right < n:
if(arr[right] <= arr[left]):
left = right
else:
diff = arr[right] - arr[left]
maxP = max(maxP, diff)
right += 1
return maxP
if __name__ == '__main__':
print(solution(ar1))
|
[
"mayankmr2@gmail.com"
] |
mayankmr2@gmail.com
|
c07dce52c212b33b85e1d54cd6385992a9fa5fcc
|
e4f04c827c2402440915619a51dfbf0700688398
|
/03Flask/falskday01/app7.py
|
0904b0977e69f37fee0ad24c840d7a45ff146492
|
[] |
no_license
|
wangxinglong74520/filename
|
58177cb0d1dfc262713816d175334bbd52ace3b8
|
3347ab61ed1cf0290c6cc431d9931fb0975a612f
|
refs/heads/main
| 2023-03-30T21:55:23.207561
| 2021-03-22T08:21:40
| 2021-03-22T08:21:40
| 349,773,715
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : app7.py
# @Time : 2021/2/15 15:50
# @Author : Merle
# @Site :
"""
"""
from flask import Flask, request, url_for, make_response, render_template
import setting
app = Flask(__name__)
app.config.from_object(setting)
@app.route('/show1')
def show1():
girls = ['杨幂', '如花', '孙艺珍', '孙尚香', '貂蝉', '林允儿']
users = [
{'user': '战三1', 'passwd': 123121},
{'user': '战三2', 'passwd': 2112123},
{'user': '战三3', 'passwd': 3121233},
]
return render_template('show-7.html', girls=girls,users=users)
if __name__ == '__main__':
app.run()
|
[
"1639837640@qq.com"
] |
1639837640@qq.com
|
4822c14902d0d55628fcbe0c7a9f1a9c3c9a7292
|
dbf7512728bdfa2302ad537112ad49bd736ff87c
|
/share/qt/clean_mac_info_plist.py
|
c33db216e1e69f262bfb4cc1f0be8089c1f336cb
|
[
"MIT"
] |
permissive
|
ohathar/easysend
|
a6acd4352048ae07895b7fe4f9c816b4682aec31
|
f5cc8ca750bef627fb0eb73baf2c80e65c2e7d7f
|
refs/heads/master
| 2021-01-10T22:29:41.570756
| 2015-08-02T16:38:51
| 2015-08-02T16:38:51
| 39,847,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 898
|
py
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Easysend-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Easysend-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"easysend-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
[
"zach.mcelroy@gmail.com"
] |
zach.mcelroy@gmail.com
|
40098a3b910d8c207f2b2ab9fcda0e117533f5a6
|
2f72e60c1b267206ef5723cf8496efb1ee19bb10
|
/vk_parser.py
|
4b37dc3384667ca0d914b2e947eafd30a8e84227
|
[] |
no_license
|
sh1n0b1n0m0n0/vk_parser
|
d378a2c751827847d2d8caa66f71d9f3fb6ad2c6
|
0ac83255431ceeb37518213d086f8e8e259ea166
|
refs/heads/main
| 2023-04-28T04:41:36.713939
| 2021-05-23T06:40:54
| 2021-05-23T06:40:54
| 317,965,235
| 0
| 0
| null | 2021-05-22T11:17:50
| 2020-12-02T19:10:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,527
|
py
|
import requests
import settings
import time
from datetime import datetime
import csv
from urllib.error import HTTPError
from webapp.models import Post, Group, Comment, db
import sqlite3
def take_groups(group_name):
group = []
VK_GROUPS = 'https://api.vk.com/method/groups.getById'
response = requests.get(VK_GROUPS,
params={
'access_token': settings.TOKEN,
'v': settings.API_VERSION,
'group_id': group_name})
response.raise_for_status()
data = response.json()['response']
group.extend(data)
time.sleep(1)
for item in group:
bd_save_groups(id=item['id'],
screen_name=item['screen_name'],
name=item['name'])
def take_posts(group_name):
all_posts = []
count = 100
offset = 0
VK_POSTS = 'https://api.vk.com/method/wall.get'
DOMAIN = str(group_name)
try:
while offset < 100:
response = requests.get(VK_POSTS,
params={
'access_token': settings.TOKEN,
'v': settings.API_VERSION,
'domain': DOMAIN,
'count': count,
'offset': offset
})
response.raise_for_status()
data = response.json()['response']['items']
offset += count
all_posts.extend(data)
time.sleep(0.5)
for post in all_posts:
bd_save_posts(group_id=post['from_id'],
post_id=post['id'],
date=datetime.fromtimestamp(post['date']),
text=post['text'],
likes=post['likes']['count'])
take_comments(group_id=post['from_id'],
group_name=DOMAIN,
owner_id=post['from_id'],
post_id=post['id'])
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
def take_comments(group_id, group_name, owner_id, post_id):
all_comments = []
counts = 1000 # max number of comments
offset = 0
VK_COMMENTS = "https://api.vk.com/method/wall.getComments"
DOMAIN = group_name
try:
response = requests.get(VK_COMMENTS,
params={
'access_token': settings.TOKEN,
'v': settings.API_VERSION,
'domain': DOMAIN,
'count': counts,
'offset': offset,
'owner_id': owner_id,
'post_id': post_id
})
data = response.json()['response']['items']
all_comments.extend(data)
time.sleep(0.5)
for comment in all_comments:
bd_save_comments(group_id=group_id,
post_id=comment['post_id'],
owner_id=comment['id'],
date=datetime.fromtimestamp(comment['date']),
comment_text=comment['text'],
likes=comment['thread']['count'],
sentiment=0)
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
def write_to_text(posts):
with open('waha.txt', 'a', encoding="utf-8") as file:
for text in posts:
str_id = str(text["owner_id"])
file.write(f'owner_id = {str_id}\n{text["text"]}\n\n##########################################################\n\n')
def write_to_csv(posts):
with open('wrongart.csv', 'w', encoding="utf-8") as file:
wr = csv.writer(file, dialect='excel')
for post in posts:
wr.writerows(post['text'])
def bd_save_groups(id, screen_name, name):
group_exists = Group.query.filter(Group.group_id == id).count()
url_exists = Group.query.filter(Group.domain == screen_name).count()
group_group = Group(url='https://vk.com/public' + str(id),
group_id=-id,
domain=screen_name,
group_name=name)
if not (url_exists and group_exists):
try:
db.session.add(group_group)
db.session.commit()
except sqlite3.IntegrityError as int_err:
print(f"ooops it is {int_err}")
finally:
db.session.close()
def bd_save_posts(group_id, post_id, date, text, likes):
post_exists = Post.query.filter(Post.post_id == post_id).count()
group_exists = Post.query.filter(Post.group_id == group_id).count()
print("posts and groups exists= ",post_exists, group_exists)
post_post = Post(group_id=group_id,
post_id=post_id,
date=date,
text=text,
likes=likes)
if not (post_exists and group_exists):
try:
db.session.add(post_post)
db.session.commit()
except sqlite3.IntegrityError as int_err:
print(f"ooops it is {int_err}")
finally:
db.session.close()
def bd_save_comments(group_id, post_id, owner_id, date, comment_text, likes, sentiment):
post_exists = Comment.query.filter(Comment.post_id == post_id).count()
owner_exists = Comment.query.filter(Comment.owner_id == owner_id).count()
print('posts and owners exists=',post_exists, owner_exists)
comm_comm = Comment(group_id=group_id,
post_id=post_id,
owner_id=owner_id,
date=date,
comment_text=comment_text,
likes=likes,
sentiment=sentiment)
if not (post_exists and owner_exists):
try:
db.session.add(comm_comm)
db.session.commit()
except sqlite3.IntegrityError as int_err:
print(f"ooops it is {int_err}")
finally:
db.session.close()
|
[
"nindzja.t001@gmail.com"
] |
nindzja.t001@gmail.com
|
114f352e57bda54372e23e977fd49df9d4e87a5d
|
6575f6be2cfc0681b3e1b3613d930aff7ceaf957
|
/CS372_HW1_code_[20160650].py
|
7f4f108bb1384de4b8fc09cda67d69b7d521d0f1
|
[] |
no_license
|
jsch8q/CS372_NLP_with_NLTK
|
3e08b4108133e4b19d78e17933f75a02f21fa500
|
7798d5db333c5ba3f734b58d5a00c6b462882adc
|
refs/heads/master
| 2023-04-23T13:10:12.041348
| 2021-05-18T12:30:50
| 2021-05-18T12:30:50
| 264,391,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,046
|
py
|
import nltk
import time
from nltk.corpus import reuters
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
now = time.time()
# To analyze better we use wordnet Lemmatizer
# and the corpus we use is the Reuters corpus
wnl = nltk.WordNetLemmatizer()
reuter_text = nltk.Text(reuters.words())
def similarity_likelihood(w1, tuple1):
# a test function to see if our triple satisfies the standards
# the triple is packed as w1, (w2, w3), e.g. 'extol', ('praise', 'highly')
w2, w3 = tuple1
# use synsets to get the definition string
s1 = wn.synsets(w1)
s2 = wn.synsets(w2)
s3 = wn.synsets(w3)
if wnl.lemmatize(w1) == wnl.lemmatize(w2) or wnl.lemmatize(w1) == wnl.lemmatize(w3):
# we want similar phrases, not phrases with essentially same words.
return False
# get all possible part of speeches each word can have
w1_pos = set([sset.pos() for sset in s1])
w2_pos = set([sset.pos() for sset in s2])
w3_pos = set([sset.pos() for sset in s3])
sset_list = [sset1 for sset1 in s1]
if set('n') == w1_pos:
# we at least want to have a possibility of w1 not being a noun.
# we do check this below again, but for early detection we add this step.
return False
excellence = False
# for each synset in synsets...
for sset in sset_list:
# ...get the part of speech...
target_pumsa = sset.pos()
# ...and if the synset is not a noun...
if not target_pumsa == 'n':
#...get the definition string of the synset...
defs = sset.definition()
#...where if w2 or w3 is in the describing string and might have the same part of speech of w1, while the other one has a possibility of being an adverb
if w2 in defs :
if len(set(target_pumsa) & w2_pos) > 0 and 'r' in w3_pos:
excellence = True
elif w3 in defs:
if len(set(target_pumsa) & w3_pos) > 0 and 'r' in w2_pos:
excellence = True
return excellence
stopword = stopwords.words()
print("precomputing... \nto inform you the progress, the numbers will count up to %1.1f million, twice." %(len(reuter_text) / (10 ** 6)))
# we want to find pairs of w1 and (w2, w3) so that there exists two words w_a and w_b such that both strings (w_a + w1 + w_b) and (w_a + w2 + w3 + w_b) exist in the corpus.
# so we make a python dictionary of 3-consecutive words and 4-consecutive words, where the key is the first and last word pair and the value of the key is the middle word(s).
# further we just discard non-alphabetic tokens and stopwords to improve quality.
trigrams = {}
for i in range(len(reuter_text) - 2):
if (i % 100000 == 0):
print(i)
w1, w2, w3 = reuter_text[i: i+3]
if w2.isalpha() and not w2.lower() in stopword:
w1 = w1.lower()
w2 = w2.lower()
w3 = w3.lower()
if (w1, w3) in trigrams:
trigrams[(w1, w3)] = trigrams[(w1, w3)] | set([w2])
else :
trigrams[(w1, w3)] = set([w2])
quadgrams = {}
for i in range(len(reuter_text) - 3):
if (i % 100000 == 0):
print(i)
w1, w2, w3, w4 = reuter_text[i: i+4]
#print(w1, w2, w3)
if w2.isalpha() and w3.isalpha() and (not w2.lower() in stopword) and (not w3.lower() in stopword) :
w1 = w1.lower()
w2 = w2.lower()
w3 = w3.lower()
w4 = w4.lower()
if (w1, w4) in trigrams:
if (w1, w4) in quadgrams:
quadgrams[(w1, w4)] = quadgrams[(w1, w4)] | set([(w2, w3)])
else :
quadgrams[(w1, w4)] = set([(w2, w3)])
# from dictionaries made we find for a match; this and the previous step is necessarily finding w1, (w2, w3) pairs with the same context.
res_list = []
search_table = dict()
inverse_search_table = dict()
print("%d keys to test are found, please be patient." %( len(list(quadgrams.keys())) ))
# for those matching pairs with the same context we use the test function defined above to see if they are 'synomyms' in the sense of the test function result.
for key in list(quadgrams.keys()):
tests = [(target, bullet) for target in trigrams[key] for bullet in quadgrams[key]]
for test in tests:
w1, tuple1 = test
# to avoid superfluous overlapping, if w1 or (w2, w3) pair is already in the result list we reject this test case ...
if w1 in search_table:
break
if tuple(sorted(tuple1)) in inverse_search_table:
break
w2, w3 = sorted(tuple1)
# ...and also the case where w2 or w3 is not indisputably an adverb.
# This step could be merged with the test function, but as an effort to reduce the running time of this code, checking such is done before calling the test function.
if (not set([sset.pos() for sset in wn.synsets(w2)]) == set('r')) and (not set([sset.pos() for sset in wn.synsets(w3)]) == set('r')):
break
# finally the test function.
if similarity_likelihood(w1, tuple1):
res_list.append(test)
search_table[w1] = tuple1
inverse_search_table[tuple(sorted(tuple1))] = w1
# print out first 50 results of triples
fout = open("./CS372_HW1_output_[20160650].csv", 'w')
res = res_list[:50]
for triple in res :
w1, w23 = triple
w2, w3 = w23
print(w1 + ',' + w2 + ',' + w3, file = fout)
fout.close()
# among results we find those words which are "purely" adverbs, and print out the results according to their frequency of appearing in full result list.
describing = [w for tup in list(inverse_search_table)\
for w in list(tup)\
if set([sset.pos() for sset in wn.synsets(w)]) == set('r')]
fd = nltk.FreqDist(describing)
adverbs = [adv for adv, _ in list(fd.most_common())]
print("candidates of intensity-modifying verbs : ", adverbs[:min(len(adverbs), 50)])
print("elapsed : %.6f" %(time.time() - now))
|
[
"jsch@kaist.ac.kr"
] |
jsch@kaist.ac.kr
|
bb36495ae9c725e71057d86118eab52cd0b1cbcb
|
24eccbb309064d6bb63d776354cb6fb861e3d1c6
|
/flask_API_intermidiate/5_Storing_Resources_in_SQL_DB/5_14_get_post_put_delete/Code_part/app.py
|
62e432e8aa3d82cb5e39d00428c7fbbfc1895da5
|
[] |
no_license
|
shown440/Flask_API_A_to_Z
|
e74f2e3c48185c04b0b66506fc22759c9d52555d
|
a8048ff5e46bfdf3f957d0771436dc7dd2fe92ab
|
refs/heads/master
| 2022-12-10T02:39:03.268361
| 2019-12-24T10:40:00
| 2019-12-24T10:40:00
| 194,473,971
| 1
| 1
| null | 2022-12-08T05:20:41
| 2019-06-30T04:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 533
|
py
|
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT
from security import authenticate, identity
from user import UserRegister
from item import Item, ItemList
app = Flask(__name__)
app.secret_key = "jose"
api = Api(app)
jwt = JWT(app, authenticate, identity) # /auth
api.add_resource(Item, "/item/<string:name>") ### http://127.0.0.1:5000/student/Shifullah
api.add_resource(ItemList, "/items/")
api.add_resource(UserRegister, "/register/")
if __name__ == '__main__':
app.debug=True
app.run()
|
[
"ahmed.shifullah@gmail.com"
] |
ahmed.shifullah@gmail.com
|
baebc6c8653d34a3449018406978c98ff7d671d3
|
9ebd09ff1957596817d00543cc04797fde6ba754
|
/thresholding/otsu_thresholding.py
|
b43660f328a970182bd61b749a424a02982f8b05
|
[] |
no_license
|
levilevi10/SurfaceDefectDetection
|
987a3dea0c82a8fc7c59a811fe3983d9af6979d1
|
240ed25da74404b3f785bdd513600a6e4c89942e
|
refs/heads/master
| 2023-02-19T07:57:31.914557
| 2023-02-07T03:32:17
| 2023-02-07T03:32:17
| 225,205,812
| 0
| 0
| null | 2019-12-01T18:54:11
| 2019-12-01T18:09:58
| null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
import cv2 as cv
import os
from pathlib import Path
#specify input and output folder
input_folder = Path(r'C:\Users\lvinzenz\Documents\Data\Image Recognition\SurfaceDefectDetection\LeoderBachelor\Images_to_use')
output_folder = "OtsuThresholding"
#get list of all images in folder
liste = []
for image in input_folder.iterdir():
if image.name.endswith('.jpg'):
liste.append(image.name)
#apply thresholding with otsu on each image in folder and safe output to thresholding_folder
for i in range(len(liste)):
image_path = os.path.join(input_folder, liste[i])
if image_path.endswith(".jpg"):
img = cv.imread(image_path, 0)
#use kernelsize for medianBlur [1, 3, 5]
blur_kernel = 5
median = cv.medianBlur(img, blur_kernel)
ret2,th2 = cv.threshold(median,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
os.chdir(os.path.join(str(input_folder)+"\\"+output_folder))
cv.imwrite(liste[i][0:-4] + "_blur" + str(blur_kernel) + "_"+ "Otsu.jpg", th2)
|
[
"noreply@github.com"
] |
levilevi10.noreply@github.com
|
2beae9e71e428414fad3d5f3bfdbb38d39016d8b
|
da4cd8f752a475ddb0298f19e26d1232657a08ff
|
/tests/llvm/datasets/anghabench_test.py
|
bb4149dca82082447c76cb489ba9c548846da6cd
|
[
"MIT"
] |
permissive
|
hughleat/CompilerGym
|
923e9ec42eae6100e3c474011d421ab408d54d62
|
6e2ce3ead07250738264db7171f6aef7aa468365
|
refs/heads/development
| 2023-08-04T11:34:33.523794
| 2021-06-28T17:00:38
| 2021-06-28T17:00:38
| 381,304,781
| 0
| 0
|
MIT
| 2021-06-29T09:09:38
| 2021-06-29T09:09:38
| null |
UTF-8
|
Python
| false
| false
| 2,238
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the AnghaBench dataset."""
import sys
from itertools import islice
from pathlib import Path
import gym
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import AnghaBenchDataset
from tests.pytest_plugins.common import skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def anghabench_dataset() -> AnghaBenchDataset:
env = gym.make("llvm-v0")
try:
ds = env.datasets["anghabench-v1"]
finally:
env.close()
yield ds
def test_anghabench_size(anghabench_dataset: AnghaBenchDataset):
if sys.platform == "darwin":
assert anghabench_dataset.size == 1041265
else:
assert anghabench_dataset.size == 1041333
def test_missing_benchmark_name(anghabench_dataset: AnghaBenchDataset, mocker):
# Mock install() so that on CI it doesn't download and unpack the tarfile.
mocker.patch.object(anghabench_dataset, "install")
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://anghabench-v1$"
):
anghabench_dataset.benchmark("benchmark://anghabench-v1")
anghabench_dataset.install.assert_called_once()
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://anghabench-v1/$"
):
anghabench_dataset.benchmark("benchmark://anghabench-v1/")
assert anghabench_dataset.install.call_count == 2
@skip_on_ci
@pytest.mark.parametrize("index", range(250))
def test_anghabench_random_select(
env: LlvmEnv, anghabench_dataset: AnghaBenchDataset, index: int, tmpwd: Path
):
uri = next(islice(anghabench_dataset.benchmark_uris(), index, None))
benchmark = anghabench_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "function.c").is_file()
if __name__ == "__main__":
main()
|
[
"cummins@fb.com"
] |
cummins@fb.com
|
6408b28de6d713e64aff7aa47f8af2fda7bc40e8
|
3f5f5de7244cb1c9566a6275b9822ebf89b19d51
|
/course3/assignment2_q2.py
|
8b93fec95aa3d47fc51390972d9d9e429e54d2b2
|
[] |
no_license
|
denck007/Algorithms_specialization
|
03b2989fc2b5b3629544bd5e2060237e94b8d67e
|
2a9b795d3bbcccd5b1fce83d3ed431ec54d084a7
|
refs/heads/master
| 2020-03-30T00:18:42.269510
| 2019-01-25T02:51:21
| 2019-01-25T02:51:21
| 150,515,575
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,474
|
py
|
'''
Course 3 Week 2:Greedy Algorithms, Minimum Spanning Trees, and Dynamic Programming
Question 2: Clustering large dataset
We are given a large dataset with each node having 24 'values'.
The values are boolean.
We are tasked with finding the number of clusters needed so that the spacing between any 2 nodes
in seperate clusters have at least 2 different 'values'
The dataset is large and it is advised to no try and measure the distance between all the points.
This solution is still not optimal, but it is still pretty quick. Optimial is <20 seconds, this is 450seconds.
Most of the time (~70%) is spent creating a string to hash, and an additional 10% is spent creating an extended list object
Converting the extended list object to a bit object and bit fiddling would likely get us sub 40 seconds.
This solution creates a hash table of the 'locations' of each point as key
It then goes through and merges all verticies that have the same location
Then it goes through and inverts one bit at a time, and sees if that location is in the hash table
If it is then the nodes are merged
Then it goes through and inverts all of the other bits one by one, and sees if that value is in the hash table
This method could be cleaned up and have recursion added to make it more generic
'''
import os
import sys
sys.path.append("/home/neil/Algorithms_specialization")
from helpers.Heap import Heap
from helpers.UnionFind import UnionFind
import time
class list_string(list):
'''
extend the list class with the ability to turn it into a string
of just the elements
Not elegent, but handy
'''
def __init__(self,*args):
list.__init__(self,*args)
self.string = None # allows caching of the result
#@profile
def stringify(self):
if self.string is None:
self.string = ""
for idx in range(self.__len__()):
self.string += str(self.__getitem__(idx))
return self.string
def __hash__(self):
return hash(self.stringify())
def __copy__(self):
self.string = None
self.stringify()
class HammingCluster():
def __init__(self,fname,testing=False):
'''
Load the dataset and convert it to booleans
'''
self.fname = fname
self.testing = testing
self.keys_iter = 0
self.union_iter = 0
self.list_string_iter = 0
with open(fname,'r') as f:
data = f.readlines()
self.num_nodes = int(data[0].strip().split()[0]) # number of nodes in the graph
self.num_dims = int(data[0].strip().split()[1]) # number of dimensions the node has, number of values it has
self.unionfind = UnionFind(self.num_nodes)
self.data = {}
for node,line in enumerate(data[1:]):
vals = list_string(line.strip().split())
if vals not in self.data:
self.data[vals] = [node]
else:
self.data[vals].append(node)
if testing:
fname = fname.replace("input","output")
with open(fname,'r') as f:
self.correct_solution = int(f.read())
def cluster(self):
'''
'''
# look for verticies that are at the same location
for key in self.data:
self.keys_iter += 1
if len(self.data[key]) != 1:
u = self.data[key][0]
for s_v in range(1,len(self.data[key])):
v= self.data[key][s_v]
self.union_iter += 1
self.unionfind.union_if_unique(u,v)
for key in self.data:
self.keys_iter += 1
u = self.data[key][0]
for idx_1 in range(self.num_dims):
self.list_string_iter += 1
u_value_new_1 = list_string(key)
if u_value_new_1[idx_1] == "0":
u_value_new_1[idx_1] = "1"
else:
u_value_new_1[idx_1] = "0"
# check and see if the single bit change exists
if u_value_new_1 in self.data:
v = self.data[u_value_new_1][0]
self.union_iter += 1
self.unionfind.union_if_unique(u,v)
for idx_2 in range(idx_1+1,self.num_dims):
self.list_string_iter += 1
u_value_new_2 = list_string(u_value_new_1)
if u_value_new_2[idx_2] == "0":
u_value_new_2[idx_2] = "1"
else:
u_value_new_2[idx_2] = "0"
# see if the 2 bit change is in the data
_ = u_value_new_2.stringify()
if u_value_new_2 in self.data:
v = self.data[u_value_new_2][0]
self.union_iter += 1
self.unionfind.union_if_unique(u,v)
return self.unionfind.num_groups
base_path = "course3/test_assignment2/question2"
with open("output.csv",'w') as f:
f.write("n,dims,keys,union,list_string\n")
for fname in os.listdir(base_path):
if "input" not in fname:
continue
count_end = fname.rfind("_")
count_start = fname[:count_end].rfind("_")+1
#if int(fname[count_start:count_end]) > 1024:
# continue
print("{}".format(fname),end="")
start_time = time.time()
hc = HammingCluster(os.path.join(base_path,fname),testing=True)
num_groups = hc.cluster()
if hc.correct_solution != num_groups:
print("\n\tExpected {:4} Got {:4} error {:4}".format(hc.correct_solution,num_groups,hc.correct_solution-num_groups))
print("\tElapsed time: {:.1f}sec".format(time.time()-start_time))
print("\tn: {} keys: {} union: {} list_string:{}\n".format(hc.num_nodes,hc.keys_iter,hc.union_iter,hc.list_string_iter))
else:
print(" Correct!")
with open("output.csv",'a') as f:
f.write("{},{},{},{},{}\n".format(hc.num_nodes,hc.num_dims,hc.keys_iter,hc.union_iter,hc.list_string_iter))
base_path = "course3/"
fname = "assignment2_q2.txt"
print("Starting assignment")
start_time = time.time()
hc = HammingCluster(os.path.join(base_path,fname),testing=False)
num_groups = hc.cluster()
print("\tGot {:4}".format(num_groups))
print("\tElapsed time: {:.1f}sec".format(time.time()-start_time))
|
[
"denck007@umn.edu"
] |
denck007@umn.edu
|
a3f9e4786860ae300ca008d167c2056b7e584589
|
c744ca4848ac0a6be88124a00bdd54b221a414ba
|
/lovers/chain.py
|
6a3519dffa2c5c5ec7331d98ccca86afb7709cc3
|
[] |
no_license
|
XunylYasna/Murang-Algocom
|
fb6242b03587c0c343a50f12951239eefd91873d
|
e88ee8be78d66a34119378b8b52a420d7173a5ca
|
refs/heads/master
| 2022-12-12T12:45:42.843631
| 2020-09-18T18:48:54
| 2020-09-18T18:48:54
| 285,238,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
def solve(cards):
# cards.sort()
room = 0
i = 1
while( i < 10):
if(cards[i] > 0):
room += 1
cards[i] -= 1
if(i == 9):
if(cards[0] > 0):
room += 1
cards[0] -= 1
i = 0
else:
i = 10
i += 1
return room
cards = list(map(int,input().strip().split(" ")))
print("{}".format(solve(cards)))
|
[
"lynux_ansay@dlsu.edu.ph"
] |
lynux_ansay@dlsu.edu.ph
|
6c448d6f9ae383fc8a43c8c220302d78428d7576
|
72915375a374764c3a819e7ed2950fe1d66c069c
|
/tests/tests_base.py
|
8bcd59c2188301e4661fdeb63df1afdc5cfb54af
|
[] |
no_license
|
jespino/pydstorages
|
e4b3a6a854b17a229fa83d8b3dd5793413d3a672
|
6bc781606a3071116ff4feff329749225e294493
|
refs/heads/master
| 2020-04-06T05:45:36.578928
| 2013-08-15T09:11:14
| 2013-08-15T09:11:14
| 12,011,670
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,624
|
py
|
from __future__ import unicode_literals
import os
import gzip
import tempfile
import unittest
from pydstorages.base import File
from pydstorages.move import file_move_safe
from pydstorages.base import ContentFile
TEST_TEMP_DIR = os.path.join(os.path.dirname(__file__), 'temp')
class StorageTest(unittest.TestCase):
pass
class ContentFileTest(unittest.TestCase):
pass
class FileTests(unittest.TestCase):
def test_context_manager(self):
orig_file = tempfile.TemporaryFile()
base_file = File(orig_file)
with base_file as f:
self.assertIs(base_file, f)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
self.assertTrue(orig_file.closed)
def test_file_mode(self):
# Should not set mode to None if it is not present.
# See #14681, stdlib gzip module crashes if mode is set to None
file = File("mode_test.txt", b"content")
self.assertFalse(hasattr(file, 'mode'))
g = gzip.GzipFile(fileobj=file)
class FileMoveSafeTests(unittest.TestCase):
def test_file_move_overwrite(self):
handle_a, self.file_a = tempfile.mkstemp(dir=TEST_TEMP_DIR)
handle_b, self.file_b = tempfile.mkstemp(dir=TEST_TEMP_DIR)
# file_move_safe should raise an IOError exception if destination file exists and allow_overwrite is False
self.assertRaises(IOError, lambda: file_move_safe(self.file_a, self.file_b, allow_overwrite=False))
# should allow it and continue on if allow_overwrite is True
self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True))
|
[
"jesus.espino@kaleidos.net"
] |
jesus.espino@kaleidos.net
|
d13b522c261e63500490a1afe42df2608d9be1a9
|
c243cff9218b72b4171a3fc294607fa830561d08
|
/7_Shortest_Path/Jongmin/1753.py
|
5157f40c517c04146fe9bbbbf52a42dae0c70827
|
[] |
no_license
|
ISANGDEV/Algorithm_Study
|
31e65cc6916be92d2a56aef1ad18eacb5b04f787
|
0eec5e3b2321f521e617d7bdc99fca8a4103f0bb
|
refs/heads/main
| 2023-07-24T08:18:20.794903
| 2021-09-01T14:05:54
| 2021-09-01T14:05:54
| 347,665,106
| 0
| 0
| null | 2021-09-01T14:03:32
| 2021-03-14T14:51:24
|
Python
|
UTF-8
|
Python
| false
| false
| 684
|
py
|
import sys
from heapq import heappush,heappop
INF=int(1e9)
V,E=map(int, input().split())
K=int(input())
graph=[[] for i in range(V+1)]
for i in range(E):
u,v,w=map(int, sys.stdin.readline().split())
graph[u].append([w,v])
distance = [INF] * (V + 1)
heap=[]
def dijkstra(start):
distance[start]=0
heappush(heap,[0,start])
while heap:
c,p=heappop(heap)
for point in graph[p]:
cc,pp=point
cost=cc+c
if cost<distance[pp]:
distance[pp]=cost
heappush(heap,[cost, pp])
dijkstra(K)
for i in range(1,V+1):
if distance[i]==INF:
print("INF")
else:
print(distance[i])
|
[
"sdr2936@gmail.com"
] |
sdr2936@gmail.com
|
5b89d17fe310e2282732583cf20e21987579608a
|
5ab4ed1e8eb7f942db03eb06a56f2dc0fb8056f8
|
/code/scripts/sandbox/tensorly_mps.py
|
196f5671e665de4dea34f51d7f8dabf2010d9c85
|
[
"MIT"
] |
permissive
|
lucgiffon/psm-nets
|
b4f443ff47f4b423c3494ff944ef0dae68badd9d
|
dec43c26281febf6e5c8b8f42bfb78098ae7101d
|
refs/heads/main
| 2023-05-04T17:56:11.122144
| 2021-05-28T16:31:34
| 2021-05-28T16:31:34
| 337,717,248
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
from tensorly.decomposition import matrix_product_state
import numpy as np
a = np.random.rand(128, 256)
in_mods = [2, 4, 4, 4]
out_mods = [4, 4, 4, 4]
a_ = np.reshape(a, tuple(in_mods[i]*out_mods[i] for i in range(len(in_mods))))
ranks = [1, 2, 2, 2, 1]
l = list()
for i in range(len(in_mods)):
l.append([out_mods[i] * ranks[i + 1], ranks[i] * in_mods[i]])
res = matrix_product_state(a_, ranks)
for idx_core, shape_core in enumerate(l):
res[idx_core] = np.reshape(res[idx_core], tuple(shape_core))
|
[
"luc.giffon@lis-lab.fr"
] |
luc.giffon@lis-lab.fr
|
bbad456f801d79bffb0babaf1ec61ccfa0d8bba4
|
93dd3e71c147f0647877e0bf22d1d78fdcc2a7d0
|
/Pictures/testers.py
|
8905ff629b1acdc9eb72d1a9bd9513d2698306cc
|
[] |
no_license
|
e-farkas/PodLine
|
6bea0ce14e2782a7580b721525f130a7cb2e1671
|
3e345e5908eff5d3f36eda1613243d11d729346f
|
refs/heads/master
| 2021-09-07T17:47:05.684459
| 2018-02-27T03:03:18
| 2018-02-27T03:03:18
| 118,840,199
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,017
|
py
|
from PIL import Image
import pytesseract
import os
path = "/usr/local/home/eguerrer/PodLine/Pictures/"
firstString = "";
Ofile = open("TITLES.txt", "w")
list = os.listdir(path)
for im in list:
if im.endswith('.png'):
image = Image.open(im)
text = pytesseract.image_to_string(image, lang = 'eng')
title = text.partition("\n\n")[0]
filteredText = "".join(i for i in title if ord(i) <128)
# if firstString is empty
# set firstString to filtered text
# and write to text
# else if first string is not empty
# compare the first String to filtered String
# if it is the same then don't write to text
# if it is not the same then do write it to the file
# assign first String to filtered text
firstString = Ofile.readline()
for st in Ofile
if firstString != filteredText
inputPath = os.path.join(path, im)
Ofile.write("Next image:" + im)
Ofile.write("\n")
Ofile.write( "Next slide: " + filteredText)
Ofile.write("\n")
st = Ofile.readline()
Ofile.close()
|
[
"eguerrer@vm133.sysnet.ucsd.edu"
] |
eguerrer@vm133.sysnet.ucsd.edu
|
8e78cc3767e4278b4921f0823929d60fee07494b
|
8347bcc0296a6fe5a3559d193830d604ea3b8d18
|
/music/models.py
|
8433e518d9c23ec891f10058ddc7f1070b6f0b23
|
[] |
no_license
|
sajadab/PythonTest
|
d4e44d35ea2cea32b15abb20d12f7024980b2b4a
|
910bcf4f6f5b883f5359a611bd5a5b30a311a531
|
refs/heads/master
| 2021-09-07T07:00:28.368204
| 2018-02-19T08:24:00
| 2018-02-19T08:24:00
| 115,856,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
from django.db import models
# from django.core.urlresolvers import reverse
class Album(models.Model):
artist = models.CharField(max_length=250)
album_title = models.CharField(max_length=500)
genre = models.CharField(max_length=100)
album_logo = models.CharField(max_length=1000)
# def get_absolute_url(self):
# return reverse('music:detail', kwargs={'pk': self.pk})
def __str__(self):
return self.album_title + ' - ' + self.artist
class Song(models.Model):
album = models.ForeignKey(Album, on_delete=models.CASCADE)
file_type = models.CharField(max_length=10)
song_title = models.CharField(max_length=250)
is_favorite = models.BooleanField(default=False)
def __str__(self):
return self.song_title
|
[
"you@example.com"
] |
you@example.com
|
89c304a08474593738cc5b93bd5b8b0255b6c2b5
|
003d9b0175cf4416114efa4cdbf9ae9d7a623237
|
/wadl_to_iodocs.py
|
6b6e111c385b0910da0259a61f4dd76a09cdf260
|
[] |
no_license
|
brunoroussel/wadl-to-iodocs
|
49b569aeff5ddf12022c4a5aaed7fb38f1a895bd
|
e7eeb19fde6c267a97d9ea9e44ff779f5874a750
|
refs/heads/master
| 2021-04-12T02:43:37.364528
| 2012-12-29T01:14:27
| 2012-12-29T01:14:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,245
|
py
|
#!/usr/bin/python
from BeautifulSoup import BeautifulSoup
import re
import simplejson as json
import sys
def wadl_to_rails_syntax(url):
url = re.sub(r'{', r':', url)
url = re.sub(r'}', r'', url)
return url
def params_to_json(params):
param_list = []
for param in params:
p = {
'Name': param['name'],
'Required': 'Y' if param['required'] == 'true' else 'N',
'Default': '',
'Type': 'string' if param['type'] == 'xsd:string' else param['type'],
'Description': ''
}
try:
p['Description'] = param.doc.text
except AttributeError:
p['Description'] = ''
param_list.append(p)
return param_list
def method_to_json(method):
j = {
'MethodName': method['apigee:displayname'],
'Synopsis': method.doc.text,
'HTTPMethod': method['name'],
'URI': '',
'RequiresOAuth': 'N',
'parameters': [],
}
params = method.request.findAll('param')
p = params_to_json(params)
j['parameters'] = p
return j
if __name__=='__main__':
wf = sys.argv[1]
jf = sys.argv[2]
wadl_file = open('%s' % wf)
json_file = open('%s' % jf, 'w+')
soup = BeautifulSoup(wadl_file.read())
resources = soup.findAll('resource')
groups = {}
tags = soup.findAll('apigee:tag', primary='true')
for tag in tags:
p = tag.text
if p not in groups.keys():
groups[p] = []
else:
pass
for resource in resources:
methods = resource.findAll('method')
for method in methods:
print method['apigee:displayname']
j = method_to_json(method)
j['URI'] = wadl_to_rails_syntax(resource['path'])
group = method.findAll('apigee:tag', primary='true')[0].text
groups[group].append(j)
endpoints = []
for key in groups:
key_str = '%s related methods' % key
endpoints.append({'name': key_str, 'methods': groups[key]})
data = json.dumps({'endpoints': endpoints }, sort_keys=True, indent=4)
json_file.write(data)
|
[
"jbkimelman@gmail.com"
] |
jbkimelman@gmail.com
|
15873693c27a82312804d65b817f5134a1ad17b8
|
3cddae9abac1a5f89ff56ecac4179c741700f02f
|
/Dragon/python/dragon/vm/caffe/layers/__init__.py
|
594afe3640ae152ccec31d8ba31637258e0ec54b
|
[
"BSD-2-Clause"
] |
permissive
|
divfor/Dragon
|
03383c6db4bcbe72b76ea1cc27abd5de64a7e847
|
53d5742dd20f3b345ae5648066bf3a1329ce3ee4
|
refs/heads/master
| 2023-05-30T12:18:08.692159
| 2017-08-09T03:43:30
| 2017-08-09T03:43:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
# --------------------------------------------------------
# Caffe for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from .data import DataLayer, MemoryDataLayer
from .vision import ConvolutionLayer, DeconvolutionLayer, PoolingLayer, \
LRNLayer, ROIPoolingLayer, ROIAlignLayer, NNResizeLayer
from .neuron import ReLULayer, DropoutLayer, TanhLayer, PowerLayer
from .loss import SoftmaxWithLossLayer, SigmoidCrossEntropyLossLayer, \
L2LossLayer, SmoothL1LossLayer
from .mpi import MPIBroadcastLayer, MPIGatherLayer
from .common import InnerProductLayer, AccuracyLayer, BatchNormLayer, \
BatchRenormLayer, BNLayer, ConcatLayer, \
CropLayer, PythonLayer, AddLayer, \
ReshapeLayer, EltwiseLayer, ScaleLayer, \
SoftmaxLayer, PermuteLayer, FlattenLayer, ConcatLayer, \
NormalizeLayer, InstanceNormLayer, TileLayer, \
ExpandDimsLayer, ProposalLayer, DenseConcatLayer
|
[
"ting.pan@seetatech.com"
] |
ting.pan@seetatech.com
|
9293bcfd52fa3f687ca1ab4e6d9d2935eab178af
|
f023936fe61984604da81533ac96d184e5b92a73
|
/manage.py
|
9920dbc8244cc2e17bb19655dcdb23fbb8c5f74e
|
[] |
no_license
|
LousGndiner/django_todo_list
|
c7c02c228f596269078737a2fcdeb367c6aa6266
|
25d279aacb98d27fae2ca4bac6bd4a82b9569fa4
|
refs/heads/master
| 2022-07-23T05:17:54.958572
| 2020-05-18T06:13:03
| 2020-05-18T06:13:03
| 264,845,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rjagonzales.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"rjagonzales@addu.edu.ph"
] |
rjagonzales@addu.edu.ph
|
a3eaf0af41a96f610e5598fd78c598613b06c0ea
|
63fed887a6755c371be31e6ba59fcab1c0762c66
|
/treex/nn/mlp.py
|
d8a11cfed74a4b2dbbd85f3701f91250520c9bf6
|
[
"MIT"
] |
permissive
|
rohitkuk/treex
|
2986d9b7d6e64dd2a18de703154f1e0c081a577a
|
e4f30d1ce41c0ecb491610e607edd335a8700e37
|
refs/heads/master
| 2023-07-21T16:39:07.657968
| 2021-08-30T19:00:12
| 2021-08-30T19:00:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,763
|
py
|
import typing as tp
import jax
import jax.numpy as jnp
import numpy as np
from flax.linen import linear as flax_module
from treex import types
from treex.module import Module
from treex.nn.linear import Linear
class MLP(Module):
"""A Multi-Layer Perceptron (MLP) that applies a sequence of linear layers
with a given activation (relu by default), the last layer is linear.
"""
# pytree
layers: tp.List[Linear]
# props
features: tp.Sequence[int]
module: flax_module.Dense
def __init__(
self,
features: tp.Sequence[int],
activation: tp.Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.relu,
use_bias: bool = True,
dtype: tp.Any = jnp.float32,
precision: tp.Any = None,
kernel_init: tp.Callable[
[flax_module.PRNGKey, flax_module.Shape, flax_module.Dtype],
flax_module.Array,
] = flax_module.default_kernel_init,
bias_init: tp.Callable[
[flax_module.PRNGKey, flax_module.Shape, flax_module.Dtype],
flax_module.Array,
] = flax_module.zeros,
):
"""
Arguments:
features: a sequence of L+1 integers, where L is the number of layers,
the first integer is the number of input features and all subsequent
integers are the number of output features of the respective layer.
activation: the activation function to use.
use_bias: whether to add a bias to the output (default: True).
dtype: the dtype of the computation (default: float32).
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer function for the weight matrix.
bias_init: initializer function for the bias.
"""
if len(features) < 2:
raise ValueError("features must have at least 2 elements")
self.features = features
self.activation = activation
self.layers = [
Linear(
features_in=features_in,
features_out=features_out,
use_bias=use_bias,
dtype=dtype,
precision=precision,
kernel_init=kernel_init,
bias_init=bias_init,
)
for features_in, features_out in zip(features[:-1], features[1:])
]
def __call__(self, x: np.ndarray) -> jnp.ndarray:
"""
Applies the MLP to the input.
Arguments:
x: input array.
Returns:
The output of the MLP.
"""
for layer in self.layers[:-1]:
x = self.activation(layer(x))
return self.layers[-1](x)
|
[
"cgarcia.e88@gmail.com"
] |
cgarcia.e88@gmail.com
|
9f24ba2a410dbd012c84bb095afad472d2612f1c
|
dd26ab5d59139b4db6ac9d7de3244f7ec7a2249e
|
/notebooks/Users/svyatkin@tunein.com/RedShift Example Upsert Table.py
|
3bcb6a925112a00845e8ecf3e505d351c16a44a9
|
[] |
no_license
|
SVyatkin/databricks
|
e10c9816be8153fa252d30432b89cc6b2eb06165
|
6b8466e52b313707fb32bff319d38043eccf5d50
|
refs/heads/master
| 2022-11-14T19:11:09.571358
| 2020-07-10T21:31:04
| 2020-07-10T21:31:04
| 275,268,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,192
|
py
|
# Databricks notebook source
# MAGIC %run Shared/PythonUtils
# COMMAND ----------
# MAGIC %md ##RedShift Examples how to use different modes for PythonUtils
# MAGIC
# MAGIC This notebook walks through the process of:
# MAGIC
# MAGIC 1. How to create/update/append a redshift table
# MAGIC 2. How to use different mode:
# MAGIC
# MAGIC a. default mode "error" create new table. If the table exists it throws an exception.
# MAGIC
# MAGIC b. mode "overwrite" drops a table and creates new table from dataframe data.
# MAGIC
# MAGIC c. mode "append" appends rows from dataframe to the table. Side effect duplications rows.
# MAGIC
# MAGIC 3. Work around how to upsert dataframe to a redshift table
# COMMAND ----------
# MAGIC %md ##"ERROR" mode
# MAGIC
# MAGIC Initial mode to create new table from dataframe
# COMMAND ----------
from pyspark.sql import Row
# create data frame
mesage01 = Row(id=1, description='01 description')
mesage02 = Row(id=2, description='02 description')
mesage03 = Row(id=3, description='03 description')
list = [mesage01, mesage02, mesage03]
df = spark.createDataFrame(list)
display(df)
# COMMAND ----------
# create a table test_upsert
# MODE E R R O R
# mode = "error" is default
# if table is already exist - we have an error: Table <table name> already exists! (SaveMode is set to ErrorIfExists)
# and we need to use OVERWRITE mode
table_name = "public.test_upsert"
write_to_redshift(df, table_name)
# COMMAND ----------
# read table
query_df = read_from_redshift(query="SELECT * FROM public.test_upsert")
display(query_df)
# COMMAND ----------
# MAGIC %md ##"OVERWRITE" mode
# MAGIC
# MAGIC A mode to overwrite redshift table with new data from dataframe
# COMMAND ----------
# create a table test_upsert
# MODE O V E R W R I T E
table_name = "public.test_upsert"
mode = "overwrite"
write_to_redshift(df, table_name, mode)
# COMMAND ----------
# read table
query_df = read_from_redshift(query="SELECT * FROM public.test_upsert")
display(query_df)
# COMMAND ----------
# MAGIC %md ##"APPEND" mode
# MAGIC
# MAGIC A mode to append dataframe to redshift table
# COMMAND ----------
# MODE A P P E N D
# create new data frame to append into redshift table
mesage01 = Row(id=4, description='04 description')
mesage02 = Row(id=5, description='05 description')
mesage03 = Row(id=6, description='06 description')
list = [mesage01, mesage02, mesage03]
df = spark.createDataFrame(list)
table_name = "public.test_upsert"
mode = "append"
write_to_redshift(df, table_name, mode)
# COMMAND ----------
# read table
query_df = read_from_redshift(query="SELECT * FROM public.test_upsert")
display(query_df)
# COMMAND ----------
# MAGIC %md ##Upsert work around
# MAGIC
# MAGIC Create updated dataframe and overwrite a redshift table
# COMMAND ----------
# Ubion 2 dataframes before update
mesage01 = Row(id=4, description='04 update')
mesage02 = Row(id=5, description='05 update')
mesage03 = Row(id=6, description='06 update')
list = [mesage01, mesage02, mesage03]
df = spark.createDataFrame(list)
table_name = "public.test_upsert"
mode = "append"
write_to_redshift(df, table_name, mode)
# COMMAND ----------
# read table
df1 = read_from_redshift(query="SELECT * FROM public.test_upsert")
display(df1)
# COMMAND ----------
# MAGIC %md ##Union two data frames and delete duplications
# COMMAND ----------
from pyspark.sql import Row
# create new dataframe with dups ids from frame 1
mesage01 = Row(id=4, description='04 merge')
mesage02 = Row(id=5, description='05 merge')
mesage03 = Row(id=6, description='06 merge')
mesage04 = Row(id=7, description='07 merge')
list = [mesage01, mesage02, mesage03, mesage04]
df2 = spark.createDataFrame(list).sort("id")
display(df2)
# COMMAND ----------
# MAGIC %md ###Union data frames and delete duplications
# COMMAND ----------
df = df2.union(df1).dropDuplicates(["id"]).sort("id")
display(df)
# COMMAND ----------
df = df1.union(df2).dropDuplicates(["id"]).sort("id")
display(df)
# COMMAND ----------
|
[
"vyatkinhome@yahoo.com"
] |
vyatkinhome@yahoo.com
|
6f951281db6c2f6132c705c86eb5ea3f511d552c
|
7b705b92bd4bbb20e69303f41b7f37a5ab270cf9
|
/Linear Regression/Regress.py
|
733fd0f5a5bcbf0e2f2cac8c7d96877cab6e1a30
|
[] |
no_license
|
noahsolomon0518/Data-Science-Projects
|
c7531a68aa93d74872359e2c63b6736ccf7ed55d
|
c7b32540eba9eca144fed0d410b87b2790128606
|
refs/heads/master
| 2020-12-14T02:47:45.901869
| 2020-03-08T00:38:23
| 2020-03-08T00:38:23
| 234,611,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,254
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 17 14:42:04 2020
@author: noahs
"""
#import pandas
import math
import pandas as pd
import numpy as np
import func
class best_Fit:
def __init__(self):
self.df = 0
self.x = 0
self.Y = 0
self.size = 0
self.avgP = 0
self.slope = 0
self.b = 0
def fit(self,x,Y):
self.x = x
self.Y = Y
self.size = len(x)
self.avgP = func.avg_Point(x,Y)
def slope_Best(self):
n = self.size
xy = func.sum_xy(self.x, self.Y)
x = func.sum_x(self.x)
y = func.sum_y(self.Y)
x2 = func.sum_x2(self.x)
avgx = self.avgP[0]
avgy = self.avgP[1]
self.slope = ((-2*xy)+(2*avgx*y)+(2*avgy*x)-(2*n*avgx*avgy))/((4*avgx*x)-(2*n*avgx**2)-(2*x2))
return self.slope
def b_Best(self):
avgx = self.avgP[0]
avgy = self.avgP[1]
self.b = (avgy - self.slope*avgx)
return self.b
def get_b(self):
return self.b
def get_m(self):
return self.slope
def get_avgP(self):
return self.avgP
|
[
"noreply@github.com"
] |
noahsolomon0518.noreply@github.com
|
e9081a4231ebda8f5aad0545ac59c82832882a68
|
b932ddc6d1187a795ef3c2b2af0ef5b186c8463f
|
/billing/__init__.py
|
4ff2652b98b36eb5229baffd5c8b02abdaa0f245
|
[] |
no_license
|
FlashBanistan/drf-property-management
|
77f7ce487878b08298627e08dbaf5b9599768e73
|
016fb3e512dafa901de70e0b75ce0a6f6de38933
|
refs/heads/master
| 2021-11-16T18:55:48.314808
| 2020-09-09T03:13:36
| 2020-09-09T03:13:36
| 98,379,119
| 1
| 0
| null | 2021-09-22T17:37:36
| 2017-07-26T04:21:59
|
Python
|
UTF-8
|
Python
| false
| false
| 49
|
py
|
default_app_config = 'billing.apps.BillingConfig'
|
[
"FlashBanistan66@gmail.com"
] |
FlashBanistan66@gmail.com
|
3b51022c0793182fd0303ab104e6f38e648b8672
|
b4324e79e8b54fa016bb7f4dbf89b111d9e01690
|
/programa.py
|
2bf409565add643fb28d6118d9f714bb19a68bd0
|
[] |
no_license
|
Danielconrad2001/GitTrabalho
|
cbd92f51a51e46bab409844d87de81824412abab
|
dfc1287c6c8b6e4db8085d980aea6f1113dfb571
|
refs/heads/main
| 2023-08-11T13:23:25.485959
| 2021-09-22T23:21:41
| 2021-09-22T23:21:41
| 404,856,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,052
|
py
|
import os
from usuario import Cadastro
carrinho = [] # listas usadas para armazenar informações necessárias para o funcionamento do programa.
produtos = []
usuario_atual = []
class Store:
def __init__(self):
self.i_produtos()
self.logo()
self.menu_inicial()
def i_produtos(self): # função para carregar os produtos que estão registrados no .txt e armazená-los em uma lista.
with open('a_produtos.txt', 'r') as arquivo:
for l in arquivo:
x = l.split(',')
x[2] = x[2].replace('\n', '')
x[1] = float(x[1].replace('R$ ', ''))
produtos.append(x)
def menu_inicial(self): # menu inicial, onde possui as primeiras funções do programa.
self.linha()
print('''
[1] - CADASTRAR
[2] - LOGIN
[0] - SAIR
''')
self.linha()
while True:
self.opc = input('Digite uma opção: ')
if self.opc == '1':
Cadastro()
elif self.opc == '2':
a = self.logar()
if a:
self.menu_loja(carrinho)
elif self.opc == '0':
self.exit()
else:
print('Opção não encontrada.')
def menu_loja(self, carrinho): # menu loja, onde temos as principais funções do programa.
self.linha()
print('''
[1] - VER PRODUTOS
[2] - VER CARRINHO
[3] - EFETUAR PAGAMENTO
[4] - MEUS DADOS
[0] - LOGOUT
''')
self.linha()
while True:
opx = input('Digite uma opção: ')
if opx == '1':
self.ver_produtos(produtos)
elif opx == '2':
self.ver_carrinho(carrinho, produtos)
elif opx == '3':
self.opcoes_pagamento()
elif opx == '4':
self.meus_dados(usuario_atual)
elif opx == '0':
carrinho.clear()
usuario_atual.clear()
self.menu_inicial()
else:
print('Opção não encontrada.')
def logo(self): # função que guarda a logo do programa.
self.linha()
print(f'''
__ __
\ \ / / __ _ _ __ ___ _ _
\ V / / _` | | '_ \ / _ \ | '_|
\_/ \__,_| | .__/ \___/ |_|
|_|''')
def linha(self): # função printa uma linha, auxilia na organização do programa.
print('=='*33)
def exit(self): # função para encerar o funcionamento do programa.
exit()
def logar(self): # função que solicita as informações para logar no programa.
while True:
self.cpf = input('Digite seu cpf: ').replace('.', '').replace('-', '')
self.senha = input('Digite sua senha: ')
if self.verificar_usuario(self.cpf, self.senha):
print('Usuario logado com sucesso.')
break
else:
print('Cpf ou senha invalida.')
return True
def verificar_usuario(self, cpf, senha): # função que verifica as informações solicitadas acima.
r_user = []
with open('a_user_register.txt','r') as arquivo:
for l in arquivo:
r_user.append(l.split(','))
for user in r_user:
try:
if str(user[3].replace('\n', '')) == str(cpf) and str(user[1].replace('\n', '')) == str(senha):
user[4] = user[4].replace('\n', '')
usuario_atual.append(user)
usuario_atual[0].append(user[4])
return True
except IndexError:
pass
return False
def ver_produtos(self, produtos): # função que mostra para o usuário todos os produtos registrados no .txt.
self.linha()
print('|Cód| |Descrição| |Preço|')
self.linha()
for produto in produtos:
print(f' {produto[0]:<5}{produto[2]:<50}R$ {int(produto[1]):.2f}')
self.linha()
print('''
[CÓD 1-20] - COLOCAR NO CARRINHO
[99] - VER CARRINHO
[0] - VOLTAR
''')
self.linha()
while True:
self.op = input('Digite uma opção: ')
if self.op.isnumeric():
if 1 <= int(self.op) <= 20 :
un = int(input(f'[{self.op}] - QUANTIDADE DE UNIDADES: '))
self.colocar_carrinho(self.op, un, produtos, usuario_atual, carrinho)
elif self.op == '99':
self.ver_carrinho(carrinho, produtos)
elif self.op == '0':
self.menu_loja(carrinho)
else:
print('Opção não encontrada.')
else:
print('Opção não encontrada.')
def colocar_carrinho(self, cod, un, produtos, usuario_atual, carrinho): # função para colocar um produto no carrinho.
saldo = float(usuario_atual[0][4])
sum = 0
registro = []
for produto in produtos:
if produto[0] == cod:
sum += (produto[1] * un)
if sum > saldo:
print(f'Limite do seu saldo foi ultrapassado.')
return False
else:
print(f'{un} unidades do cód {cod} foram adicionados ao carrinho.')
usuario_atual[0][4] = float(usuario_atual[0][4]) - sum
existe = 0
for i in carrinho:
if i[0] == cod:
i[1] += un
existe = 1
if existe == 0:
registro.append(cod)
registro.append(un)
carrinho.append(registro)
def ver_carrinho(self, carrinho, produtos): # função para mostrar o carrinho do usuário.
sum = 0
self.linha()
print('|Cód| |Descrição| |un| |Preço|')
self.linha()
try:
for item in carrinho:
for produto in produtos:
if item[0] == produto[0]:
sum += item[1] * produto[1]
print(f' {produto[0]:<5}{produto[2]:<47} {item[1]} R$ {produto[1]}')
except:
pass
self.linha()
print(f'|Total: {sum:>56.2f}|')
self.linha()
self.linha()
print('''
[1] - REMOVER PRODUTO DO CARRINHO
[2] - EFETUAR PAGAMENTO
[0] - VOLTAR
''')
self.linha()
while True:
self.opa = input('Digite uma opção: ')
if self.opa == '1':
self.remover_produto(usuario_atual[0][4], produtos, usuario_atual)
elif self.opa == '2':
self.opcoes_pagamento()
elif self.opa == '0':
self.menu_loja(carrinho)
else:
print('Opção não encontrada.')
def opcoes_pagamento(self): # função para mostrar as opções de pagamento ao usuário.
self.linha()
print('''
[1] - DESCONTAR DO SALDO
[2] - PAGAR CONTA
[0] - VOLTAR
''')
self.linha()
while True:
self.opr = input('Digite uma opção: ')
if self.opr == '1':
self.descontar_saldo(usuario_atual[0][3], usuario_atual[0][4], carrinho)
elif self.opr == '2':
self.pagar_conta(usuario_atual[0][3], carrinho, usuario_atual)
elif self.opr == '0':
self.menu_loja(carrinho)
else:
print('Opção não encontrada.')
def descontar_saldo(self, cpf, novo_saldo, carrinho): # função que desconta os produtos do carrinho no saldo do usuário, se o mesmo tiver limite disponível.
senha = input('Digite sua senha para confirmar o pagamento ou [0] para voltar: ')
if senha == '0':
self.opcoes_pagamento()
else:
conf = self.conferir_senha(senha, cpf)
if conf:
if len(carrinho) != 0:
conta = []
with open('a_user_register.txt','r') as arquivo:
for linha in arquivo:
a = linha.split(',')
if len(a) < 5:
continue
if a[3] == str(cpf):
a[4] = str(novo_saldo) + '\n'
conta.append(a)
with open('a_user_register.txt','w') as arquivo:
for c in conta:
arquivo.write(str(f'{c[0]},{c[1]},{c[2]},{c[3]},{c[4]}')+'\n')
print(f'Sua conta foi descontada, seu novo saldo é R$ {float(novo_saldo):.2f}')
carrinho.clear()
self.menu_loja(carrinho)
else:
print('Seu carrinho esta vazio.')
else:
print('Senha incorreta.')
self.descontar_saldo(usuario_atual[0][3], usuario_atual[0][4], carrinho)
def pagar_conta(self, cpf, carrinho, usuario_atual): # função que paga todas as dividas do usuario e os itens que o mesmo estiver no carrinho no momento.
senha = input('Digite sua senha para confirmar o pagamento ou [0] para voltar: ')
if senha == '0':
self.opcoes_pagamento()
else:
conf = self.conferir_senha(senha, cpf)
if conf:
if len(carrinho) != 0 or usuario_atual[0][4] != '1000':
conta = []
with open('a_user_register.txt','r') as arquivo:
for linha in arquivo:
a = linha.split(',')
if len(a) < 5:
continue
if a[3] == str(cpf):
a[4] = str(1000) + '\n'
conta.append(a)
with open('a_user_register.txt','w') as arquivo:
for c in conta:
arquivo.write(str(f'{c[0]},{c[1]},{c[2]},{c[3]},{c[4]}')+'\n')
print(f'Sua conta foi paga, seu novo saldo é de R$ 1.000,00')
usuario_atual[0][4] = '1000'
carrinho.clear()
self.menu_loja(carrinho)
else:
print('Seu carrinho esta vazio.')
else:
print('Senha incorreta.')
self.pagar_conta(usuario_atual[0][3], carrinho, usuario_atual)
def meus_dados(self, usuario): # função que mostra os dados do usuário logado no momento.
self.linha()
print()
print(f' Nome cadastrado: {usuario[0][0]}')
print(f' Email cadastrado: {usuario[0][2]}')
print(f' Cpf cadastrado: {self.cpf_formatado(usuario[0][3])}')
print(f' Saldo atual: R$ {self.saldo_atual(usuario[0][3]):.2f}')
print(f' Saldo com produtos no carrinho: R$ {float(usuario[0][4]):.2f}')
print()
self.menu_loja(carrinho)
def cpf_formatado(self, cpf): # função pega o cpf do usuário logado e coloca ele formatado.
cpf_f = ''
c = 0
for i in cpf:
cpf_f += i
c+=1
if c == 3:
cpf_f += '.'
if c == 6:
cpf_f += '.'
if c == 9:
cpf_f += '-'
return cpf_f
def remover_produto(self, saldo, produtos, usuario_atual): # função para remover produtos do carrinho.
global carrinho
novo_carrinho = []
cod = input('Digite o código do produto: ')
un = input('Digite a quantidade que deseja remover: ')
if len(carrinho) != 0:
for prod in carrinho:
if int(prod[0]) == int(cod):
if int(prod[1]) <= int(un):
un = int(prod[1])
else:
prod[1] = int(prod[1]) - int(un)
novo_carrinho.append(prod)
for produto in produtos:
if produto[0] == cod:
saldo += float(produto[1]) * int(un)
else:
novo_carrinho.append(prod)
usuario_atual[0][4] = saldo
carrinho = novo_carrinho.copy()
self.ver_carrinho(carrinho, produtos)
else:
print('Seu carrinho esta vazio.')
def conferir_senha(self, senha, cpf): # função que confere a senha do usuario.
r_user = []
with open('a_user_register.txt','r') as arquivo:
for l in arquivo:
r_user.append(l.split(','))
for user in r_user:
try:
if str(user[3].replace('\n', '')) == str(cpf) and str(user[1].replace('\n', '')) == str(senha):
return True
except IndexError:
pass
return False
def saldo_atual(self, cpf): # função usada para verificar o saldo atual do usuário.
r_user = []
with open('a_user_register.txt','r') as arquivo:
for l in arquivo:
r_user.append(l.split(','))
for user in r_user:
try:
if str(user[3].replace('\n', '')) == str(cpf):
return float(user[4])
except IndexError:
pass
|
[
"noreply@github.com"
] |
Danielconrad2001.noreply@github.com
|
197074ad5677e7bb1e7ca06693ab8ac749893068
|
b3f098cc09fae9bdd6f7ad9ae15cad66109a3c07
|
/listings/migrations/0001_initial.py
|
165183e634cafcfda2287c7c91afe688bd4637e8
|
[] |
no_license
|
SurajSankarsingh/btdjango
|
05aa4673daa2ca52bbd41c772e03fa6060609b0f
|
1c66330b11e91d5eb5ff0135a0d6726ab22dd030
|
refs/heads/main
| 2023-03-12T18:08:42.785246
| 2021-02-27T19:46:38
| 2021-02-27T19:46:38
| 342,003,777
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,168
|
py
|
# Generated by Django 3.1.7 on 2021-02-25 03:46
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('zipcode', models.CharField(max_length=20)),
('description', models.TextField(blank=True)),
('price', models.IntegerField()),
('bedrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('garage', models.IntegerField(default=0)),
('sqft', models.IntegerField()),
('lot_size', models.DecimalField(decimal_places=1, max_digits=5)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.realtor')),
],
),
]
|
[
"suraj.sankarsingh@hotmail.com"
] |
suraj.sankarsingh@hotmail.com
|
cda3a2d491c728ec8fb0de2cf4d437cfc3c35695
|
a7a68c828b7833a841786b7f9a13a72ad7265a11
|
/ros/build/catkin_generated/order_packages.py
|
8d7f8c4b759f36793a3f5976baab81e89fc3a9c5
|
[] |
no_license
|
gurugithub/Carnd-Project14-Capstone-Programming-a-Real-Self-Driving-Car
|
857a8df0609f74b8ad4fe7c0ea67c3cc952de125
|
0cb8a63ee5cf2c14d3133c8352331572a860a6cf
|
refs/heads/master
| 2021-09-10T03:55:15.925545
| 2018-03-20T22:34:13
| 2018-03-20T22:34:13
| 126,089,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
# generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/student/catkin_ws/CarND-Capstone/ros/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/student/catkin_ws/CarND-Capstone/ros/devel;/opt/ros/kinetic".split(';') if "/home/student/catkin_ws/CarND-Capstone/ros/devel;/opt/ros/kinetic" != "" else []
|
[
"guru.shetti@gmail.com"
] |
guru.shetti@gmail.com
|
2cccff769017cdf02a9225cc06706099f04a61c0
|
f2e8ba21ff0e82c7b8f47d9c40e0b05ae5a15a27
|
/backstage/act/act.py
|
e114a357e15a95f11494a3eefa24775fe2ee2dd2
|
[] |
no_license
|
MiddleFork/django_backstage
|
28c55dd668cf65b937a276f776f740fba12fb4b0
|
47117483851ec3445c18ea5ad23fe65e2df2ac0d
|
refs/heads/master
| 2021-01-13T02:05:53.939988
| 2014-08-31T06:54:31
| 2014-08-31T06:54:31
| 14,842,822
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,962
|
py
|
import os
import sys
import time
import requests
from backstage.utils import uwsgi_portsniffer
from backstage.utils.uwsgi.uwsgi_utils import build_uwsgi
# Choose one of the below as the default uwsgi emperor vassal control:
from backstage.utils.uwsgi.linker_file_ini import start, stop, restart
#from backstage.utils.uwsgi.linker_pg_plugin import start, stop, restart
class Act():
def __init__(self, venue, actname):
acthome = os.path.join(venue.acts_root, actname)
kf = 'backstage-%s-%s.id' % (venue.venue_name, actname)
keyfile = os.path.join(acthome, '.LIVE', kf)
if not os.path.exists(keyfile):
#not a valid act
return
self.venue = venue
self.actname = actname
self.name = self.actname
self.acthome = acthome
self.longname = 'backstage-%s-%s' % (self.venue.name, self.name)
self.keyfile = keyfile
self.conn = venue.conn
self.get_settings()
self.uwsgi_config, self.uwsgi_ini = build_uwsgi(self, 'act')
inifile = '%s.ini' % self.longname
self.uwsgi_file = os.path.join(self.acthome, inifile)
self.uwsgi_vassal = os.path.join(self.settings.UWSGI_VASSALS, inifile)
#necessary for file-based uwsgi linking
self.uwsgi_ip = None
self.uwsgi_port = None
if not os.path.exists(self.uwsgi_file):
with open(self.uwsgi_file, 'w') as f:
f.write(self.uwsgi_ini)
def start(self):
start(self)
def stop(self):
stop(self)
def restart(self):
restart(self)
def get_settings(self):
syspath = sys.path
sys.path.insert(0, os.path.join(self.venue.venue_home, 'acts'))
settings = None
exec('from %s import settings' % self.actname)
sys.path = syspath
self.settings = settings
return
def get_uwsgi_log(self):
fo = open(self.uwsgifile, 'r')
d = fo.readlines()
fo.close()
logfile = None
for line in d:
line = line.strip()
if line[0:9] == 'daemonize':
logfile = line.split('=')[1]
return logfile
return logfile
def get_uwsgi_port(self):
"""Get the uwsgi port using lsof. Requires that lsof and fuser be suid root"""
start_port =self.uwsgi_port
timeout = 10
nap = 1
starttime = time.time()
elapsed = 0
valid = False
while not valid and elapsed < timeout:
try:
fullport = uwsgi_portsniffer.port_from_lsof(self)
new_ip, new_port = fullport.split(':')
if new_port <> start_port:
self.uwsgi_ip = new_ip
self.uwsgi_port = new_port
print 'OK %s:%s' % (new_ip,new_port)
valid = True
except:
pass
if not valid:
time.sleep(nap)
elapsed = time.time() - starttime
return
def connect(self):
"""
connect to the instance's default database
@return:
"""
from backstage.db.db_utils import connect_default
conn = connect_default(self)
def sniff_uwsgi_port(self):
"""sniff the uwsgi port from the log file. inefficient but does not require
root access"""
ip, port = uwsgi_portsniffer.portsniffer(self.uwsgi_log)
if port is None:
print "No port. Try self.uwsgi_linker(linkmode='link')"
return
uwsgi_uri = 'http://%s:%s' % (ip, port)
try:
h = requests.head(uwsgi_uri)
if h.status_code == 200:
self.uwsgi_ip = ip
self.uwsgi_port = port
print str(ip), str(port)
else:
print 'request for %s resulted in a status code of %s' % (uwsgi_uri, h.status_code)
print 'the entire header follows:'
print h
if h.status_code == 500:
s = 'A status code of 500 means that the port is bound OK but that '
s+= 'there is probably a coding error somewhere in the Act '
s+= 'which is causing it to fail to load. '
s+= 'In your browser - and with DEBUG enabled, visit %s and review the error message' % (uwsgi_uri)
print s
self.uwsgi_ip = None
self.uwsgi_port = None
except requests.exceptions.ConnectionError:
s= 'Failure to load the URI at %s' % (uwsgi_uri)
s+= 'Hint: this is probably a stale port.\nTry reloading the Act by touching its .ini file.\n'
s+= 'Or, wait a few more seconds and try "(self).get_uwsgi_port() again'
print s
self.uwsgi_ip = None
self.uwsgi_port = None
print 'None'
return
|
[
"walker@mfgis.com"
] |
walker@mfgis.com
|
a223dbf4047cf6d6f448ba61f55a6cfadbd7abe1
|
e44716801694aa856b5fd8ef4aabe31fbd7ecab8
|
/capabilities/perception/person/person_detection.py
|
4fd7309198d949ad18d9c58de9212fb982556da6
|
[] |
no_license
|
joseastorgat/Skills
|
4b911df0d520fe3ef5727c9a459f3c6b6753b8f2
|
56fcd3201ea6a7619baf3805990d1f2a00c518cd
|
refs/heads/master
| 2021-01-01T19:08:41.839405
| 2018-10-30T19:34:41
| 2018-10-30T19:34:41
| 98,521,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,927
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import qi
import rospy
from uchile_skills.robot_skill import RobotSkill
from geometry_msgs.msg import PoseStamped
class PersonDetectionSkill(RobotSkill):
"""
"""
_type = "person_detector"
def __init__(self):
"""
Person Detector Skill
"""
super(PersonDetectionSkill, self).__init__()
self._description = "Person detection skill based in Naoqi Apis"
self.ids = []
def setup(self):
self.people_perception = self.robot.session.service("ALPeoplePerception")
self.memory = self.robot.session.service("ALMemory")
self.pd_mem_subs = self.memory.subscriber("PeoplePerception/PeopleDetected")
return True
def check(self):
return True
def start(self):
try:
self.loginfo("[{0}] Detection start ".format(PersonDetectionSkill._type))
self.__subscriber_name = "PersonDetectionSkill_" + str(rospy.Time.now())
self.people_subscriber = self.people_perception.subscribe("PersonDetectionSkill") # subscribe people_perception service
# self.subscriber = self.memory.subscriber("PeoplePerception/PeopleList") #subscribe event memory
self.__reset_population()
self.pd_mem_subs.signal.connect(self.__on_people_detected) #connect callback
except Exception as e:
self.logerr("[{0}] Detection start failed {1}".format(PersonDetectionSkill._type,e))
print(e)
return True
def pause(self):
try:
self.loginfo("[{0}] Detection Pause".format(PersonDetectionSkill._type))
self.people_perception.unsubscribe(self.__subscriber_name)
self.pd_mem_subs.signal.disconnect(self.pd_mem_subs)
except Exception as e:
self.logerr("[{0}] Detection Pause Failed {1} ".format(PersonDetectionSkill._type,e))
return True
def shutdown(self):
self.pause()
return True
def person_detection(self):
poses = []
for id in self.ids:
pose = self._get_person_pose(id)
if pose is not None:
poses.append(pose)
self.loginfo("[{0}] Person detection {1}".format(PersonDetectionSkill._type,poses))
return poses
def _get_person_pose(self,id):
try:
position = self.memory.getData("PeoplePerception/Person/"+str(id)+"/PositionInRobotFrame")
print(position)
except Exception as e:
self.logerr("[{0}] Error getting position from person {1}, {2}".format(PersonDetectionSkill._type, id, e ))
return None
pose = PoseStamped()
pose.header.stamp=rospy.Time.now()
pose.header.frame_id = "/maqui" # buscar frame de robot Frame
pose.pose.position.x = position[0]
pose.pose.position.y = position[1]
pose.pose.position.z = position[2]
pose.pose.orientation.w = 1
return pose
def tshirt_detection(self):
label = self.__get_tshirt()
total = len(label)
color = {'black':0, 'white':0, 'red':0, 'blue':0, 'green':0, 'yellow':0}
for c in label:
color[c] += 1
self.loginfo("[{0}] T-shirt color detection {1} ".format(PersonDetectionSkill._type,color))
return color, total
def tshirt_pose(self):
"""
...
"""
try:
poses = self.person_detection()
label = self.__get_tshirt()
except rospy.ServiceException, e:
self.logerr("{0} : Couldn't get people tshirt or poses ".format(PersonDetectionSkill._type))
return None, None
self.loginfo("[{0}] Person and color detection".format(PersonDetectionSkill._type))
return poses, label
"""
Extra Methods for Maqui
"""
def __on_people_detected(self,value):
# [
# [TimeStamp_Seconds, TimeStamp_Microseconds],
# [PersonData_1, PersonData_2, ... PersonData_n],
# CameraPose_InTorsoFrame,
# CameraPose_InRobotFrame,
# Camera_Id
# ]
# PersonData_i =
# [
# Id,
# DistanceToCamera,
# PitchAngleInImage,
# YawAngleInImage
# ]
personData = value[1]
self.ids = []
if personData == []:
return
for person in personData:
self.ids.append(person[0])
# self.loginfo("[{0}] Detections : {1}".format(PersonDetectionSkill._type, len(self.ids)))
# self.logdebug("[{0}] Detections : {1}".format(PersonDetectionSkill._type, self.ids))
return
def __reset_population(self):
try:
self.people_perception.resetPopulation()
except Exception, e:
raise e
def __get_tshirt(self):
label = []
for id in self.ids:
tshirt = self.__get_person_tshirt(id)
if tshirt is not None:
label.append(tshirt.lower())
return label
def __get_person_tshirt(self,id):
"""
"""
try:
tshirt = self.memory.getData("PeoplePerception/Person/"+str(id)+"/ShirtColor")
except Exception as e:
self.logwarn("[{0}] Error getting t-shirt color from person {1}, {2}".format(PersonDetectionSkill._type, id, e ))
return None
return tshirt
"""
ALPeoplePerception
Configuration:
ALPeoplePerception::setFastModeEnabled
ALPeoplePerception::setGraphicalDisplayEnabled
ALPeoplePerception::setMaximumDetectionRange
ALPeoplePerception::setMovementDetectionEnabled
ALPeoplePerception::setTimeBeforePersonDisappears
ALPeoplePerception::setTimeBeforeVisiblePersonDisappears
"""
|
[
"jose.n.astorga.tobar@gmail.com"
] |
jose.n.astorga.tobar@gmail.com
|
5fe2ad33fe332438565368d3571bc37962ef4958
|
0761c57443d2491b00753a6545395f682be27273
|
/PythonProgramming/4-18/Sales/write_sales.py
|
696753fc5f951f940f65c2edc8fbf1b4acba6fa4
|
[] |
no_license
|
MorgFost96/School-Projects
|
842835f97c025ee97e106540f2e6f03f5fdac563
|
9c86a4133e7cb587d7ad15af8da962278636db1f
|
refs/heads/master
| 2020-09-21T22:19:49.494044
| 2019-11-30T22:19:56
| 2019-11-30T22:19:56
| 224,951,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
# Writes he data for the sales onto the hard drive
def main():
num_days = int( input( "For how many days do you have sales? " ) )
sales_file = open( "sales.txt", "w" )
for count in range( 1, num_days + 1 ):
sales = float( input( "Enter the sales for day #" + str( count ) + ": " ) )
sales_file.write( str( sales ) + "\n" )
sales_file.close()
print( "Data written to sales.txt" )
main()
##>>>
##For how many days do you have sales? 5
##Enter he sales for day #1: 1000
##Enter he sales for day #2: 2000
##Enter he sales for day #3: 3000
##Enter he sales for day #4: 4000
##Enter he sales for day #5: 5000
##Data written to sales.txt
##>>>
|
[
"morgfost96@gmail.com"
] |
morgfost96@gmail.com
|
a538d29d3dea94bfe649f344e8cbfd29e7fc3507
|
3691f0b571612fd550095af0d7c93f22d5a8061c
|
/ERP/stock/views.py
|
2159d924a7a70ddce4f72028a50576cc9e9d821b
|
[] |
no_license
|
sambapython/db16
|
29db8c6be5a3628cd3063cc0d8e092ae8ea69d60
|
98d751ffc7277bb4e28f90b7cb470d667ab47593
|
refs/heads/master
| 2021-02-11T22:02:34.251113
| 2020-03-03T03:43:29
| 2020-03-03T03:43:29
| 244,532,780
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
from django.shortcuts import render
# Create your views here.
def index_view(request):
return render(request,"stock/index.html")
|
[
"sambapython@gmail.com"
] |
sambapython@gmail.com
|
df1c55cf344cb1f86b80deaaa42fe7d770f2fc61
|
2ab5b2b50ac91ded2e7bfee6fc8a9af8d57738c9
|
/model/Linear.py
|
9af0111aaaf357b1d996f8b157f61eb443ff7777
|
[] |
no_license
|
pokleung5/FYP
|
9fc0627aa3ee405ad3bbede29b55e440ac19b038
|
fa08353ed6fb1612a004fc55f8e6edebd5c8c432
|
refs/heads/master
| 2022-11-17T22:48:21.944028
| 2020-07-16T17:49:29
| 2020-07-16T17:49:29
| 240,876,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,702
|
py
|
import torch
from torch import tensor
from torch import nn, optim
from torch.nn import functional as F
import utils
def get_Linear_Sequential(dim: list, activation):
nL = len(dim)
return nn.Sequential(
*sum([[
nn.Linear(dim[i], dim[i + 1]),
activation()
] for i in range(0, nL - 2, 1)], []),
nn.Linear(dim[nL - 2], dim[nL - 1])
)
class Linear(nn.Module):
def __init__(self, dim: list,
activation=nn.ReLU, final_activation=None):
super(Linear, self).__init__()
self.encoder = get_Linear_Sequential(dim, activation)
if final_activation is not None:
self.final_act = final_activation()
else:
self.final_act = None
def encode(self, x):
e = self.encoder(x)
if self.final_act is not None:
e = self.final_act(e)
return e
def forward(self, x):
return self.encode(x)
class ReuseLinear(nn.Module):
def __init__(self, N, dim: list, n_reuse, preprocess,
activation=nn.ReLU, final_activation=None):
super(ReuseLinear, self).__init__()
self.encoder = get_Linear_Sequential(dim, activation)
self.N = N
self.n_reuse = n_reuse
self.preprocess = preprocess
if final_activation is not None:
self.final_act = final_activation()
else:
self.final_act = None
def forward(self, x):
batch = x.size()[0]
dm = x
rs = []
for i in range(self.n_reuse):
e = self.encoder(dm)
rs.append(e)
e = e.view(batch, self.N, -1)
dm = utils.get_distanceSq_matrix(e)
dm = torch.sqrt(dm)
dm = self.preprocess(dm)
if self.final_act is not None:
rs[-1] = self.final_act(e)
return rs
class StepLinear(nn.Module):
def __init__(self, dim_list: list,
activation=nn.ReLU, final_activation=None):
super(StepLinear, self).__init__()
self.encoder = get_Linear_Sequential(dim_list[0], activation=activation)
if len(dim_list) > 1:
self.nextStep = StepLinear(dim_list[1:], activation, None)
else:
self.nextStep = None
if final_activation is not None:
self.final_act = final_activation()
else:
self.final_act = None
def forward(self, x):
e = self.encoder(x)
rs = [e]
if self.nextStep is not None:
rs = [e, *self.nextStep(e)]
if self.final_act is not None:
rs[-1] = self.final_act(rs[-1])
return rs
|
[
"pokleung5-c@my.cityu.edu.hk"
] |
pokleung5-c@my.cityu.edu.hk
|
badf6102be896eba234714f67183557470b7b46a
|
001f47164eb0fb5b02aeb89aba5514e95055e305
|
/mult_server.py
|
d78995c558616e84bb532bf1bf7b6a3b34d0e16f
|
[] |
no_license
|
Phantsure/ICMP-Transfer
|
42f869a549baf2cf46786b1632adee0cd8b9808c
|
c31f874faa1abec0339a44952051f7f532b97070
|
refs/heads/master
| 2021-03-02T17:19:27.634619
| 2020-03-08T21:10:09
| 2020-03-08T21:10:09
| 245,888,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
# server
import socket
import select
import sys
from thread import *
# server sockets
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# checks for sufficient arguments
if len(sys.argv) != 3:
print "Correct usage: script, IP address, port number"
exit()
# IP address
IP_address = str(sys.argv[1])
# port
Port = int(sys.argv[2])
# binding
server.bind((IP_address, Port))
# start listening
server.listen(100)
list_of_clients = []
def clientthread(conn, addr):
# sends a message
conn.send("Welcome to this chatroom!")
while True:
try:
message = conn.recv(64)
if message:
# print the message
print "<" + addr[0] + "> " + message
# Calls broadcast function to send message to all
message_to_send = "<" + addr[0] + "> " + message
broadcast(message_to_send, conn)
else:
# if empty message then remove
remove(conn)
except:
continue
# broadcast to all clients
def broadcast(message, connection):
for clients in list_of_clients:
if clients!=connection:
try:
clients.send(message)
except:
clients.close()
# if the link is broken, we remove the client
remove(clients)
# function to remove
def remove(connection):
if connection in list_of_clients:
list_of_clients.remove(connection)
while True:
# accept from new client
conn, addr = server.accept()
# append that client to list
list_of_clients.append(conn)
# prints the address of the user that just connected
print addr[0] + " connected"
# creates and individual thread for every user that connects
start_new_thread(clientthread,(conn,addr))
conn.close()
server.close()
|
[
"samparksharma2000@gmail.com"
] |
samparksharma2000@gmail.com
|
833540140792b7871cf1ee8d9b5a360c40ea827b
|
9cb55ee410e574c98d62675d4fcabe475f9e6c5e
|
/amnesia/modules/search/views/tag.py
|
1007986750b2043e26541c027e737a19e1793714
|
[
"BSD-2-Clause"
] |
permissive
|
rayddteam/amnesia
|
8f45728dc082d649a8b6f0bc799035375ffd710d
|
8719c607c30bb80e7564bec70bcd624f9ad08f6d
|
refs/heads/master
| 2020-07-30T21:53:44.407980
| 2019-01-16T10:02:36
| 2019-01-16T10:02:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPNotFound
from amnesia.modules.tag import Tag
from amnesia.modules.search import SearchResource
def includeme(config):
''' Pyramid includeme func'''
config.scan(__name__)
@view_config(context=SearchResource, name='tag', request_method='GET',
renderer='amnesia:templates/search/tag.pt')
def tag(context, request):
tag_id = request.GET.get('id', '').strip()
tag_obj = request.dbsession.query(Tag).get(tag_id)
if not tag_obj:
raise HTTPNotFound()
search_query = context.tag_id(tag_obj, limit=500)
return {
'results': search_query.query.all(),
'count': search_query.count,
'tag': tag_obj
}
|
[
"julien.cigar@gmail.com"
] |
julien.cigar@gmail.com
|
a2a45358ef74f440bad9aaf20a27643065d2ec7e
|
e5cb9bd3569a8dde3aa3da35e4e84d556df27cec
|
/visiobased_object_placement/scripts/image_crop.py
|
c3da818bb38470223a07e34a54265dd1f5f2f8b0
|
[
"BSD-3-Clause"
] |
permissive
|
HassanAmr/Visio-based-Object-Placement
|
6c4789867aa0196f4201bf3367155b2b908b03f3
|
aab1753a926ee04932d5ef7e857637b4adb81e2e
|
refs/heads/master
| 2020-12-02T20:55:08.320080
| 2017-10-21T08:24:26
| 2017-10-21T08:24:26
| 96,228,055
| 0
| 0
| null | 2017-10-21T08:24:27
| 2017-07-04T14:45:13
|
C++
|
UTF-8
|
Python
| false
| false
| 1,409
|
py
|
#! /usr/bin/python
import rospy
import cv2
import numpy as np
def crop(image1, image2, threshold=0):
"""Crops any edges below or equal to threshold
Crops blank image to 1x1.
Returns cropped image.
"""
inv_image1 = cv2.bitwise_not(image1)
inv_image2 = cv2.bitwise_not(image2)
#image = cv2.imread(inputImg)
if len(inv_image1.shape) == 3:
flatImage1 = np.max(inv_image1, 2)
flatImage2 = np.max(inv_image2, 2)
else:
flatImage1 = inv_image1
flatImage2 = inv_image2
assert len(flatImage1.shape) == 2
rows = np.where(np.max(flatImage1, 0) > threshold)[0]
if rows.size:
cols = np.where(np.max(flatImage1, 1) > threshold)[0]
inv_image1 = inv_image1[cols[0]: cols[-1] + 1, rows[0]: rows[-1] + 1]
inv_image2 = inv_image2[cols[0]: cols[-1] + 1, rows[0]: rows[-1] + 1]
else:
inv_image1 = inv_image1[:1, :1]
inv_image2 = inv_image2[:1, :1]
return [cv2.bitwise_not(inv_image1),cv2.bitwise_not(inv_image2)]
#img = cv2.imread(image)
#gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#_,thresh = cv2.threshold(gray,1,255,cv2.THRESH_BINARY)
#contours,hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#cnt = contours[0]
#x,y,w,h = cv2.boundingRect(cnt)
#crop = img[y:y+h,x:x+w]
#return crop
#cv2.imwrite('sofwinres.png',crop)
|
[
"hassan.amr89@gmail.com"
] |
hassan.amr89@gmail.com
|
031e33695e468516f5e4ca9d619595c8d28f61c6
|
8cea3db36d35285e50583617b655dd2d0de745df
|
/Chapter_2 Softmax Regression/softmax_regression_train.py
|
fe4ccea107347ce930ade4e7c5aec94f41ed27b1
|
[] |
no_license
|
TRcoder/Python-Machine-Learning-Algorithm
|
08f1166cfc068dec72024790900480bfdace98cb
|
b7c7348951631faf5dbd6b7919fb6993fa7e8855
|
refs/heads/master
| 2021-07-04T22:21:28.641036
| 2017-09-25T14:46:41
| 2017-09-25T14:46:41
| 106,824,521
| 0
| 1
| null | 2017-10-13T12:58:34
| 2017-10-13T12:58:34
| null |
UTF-8
|
Python
| false
| false
| 3,013
|
py
|
# coding:UTF-8
'''
Date:20160805
@author: zhaozhiyong
'''
import numpy as np
def load_data(inputfile):
'''导入训练数据
input: inputfile(string)训练样本的位置
output: feature_data(mat)特征
label_data(mat)标签
k(int)类别的个数
'''
f = open(inputfile) # 打开文件
feature_data = []
label_data = []
for line in f.readlines():
feature_tmp = []
feature_tmp.append(1) # 偏置项
lines = line.strip().split("\t")
for i in xrange(len(lines) - 1):
feature_tmp.append(float(lines[i]))
label_data.append(int(lines[-1]))
feature_data.append(feature_tmp)
f.close() # 关闭文件
return np.mat(feature_data), np.mat(label_data).T, len(set(label_data))
def cost(err, label_data):
'''计算损失函数值
input: err(mat):exp的值
label_data(mat):标签的值
output: sum_cost / m(float):损失函数的值
'''
m = np.shape(err)[0]
sum_cost = 0.0
for i in xrange(m):
if err[i, label_data[i, 0]] / np.sum(err[i, :]) > 0:
sum_cost -= np.log(err[i, label_data[i, 0]] / np.sum(err[i, :]))
else:
sum_cost -= 0
return sum_cost / m
def gradientAscent(feature_data, label_data, k, maxCycle, alpha):
'''利用梯度下降法训练Softmax模型
input: feature_data(mat):特征
label_data(mat):标签
k(int):类别的个数
maxCycle(int):最大的迭代次数
alpha(float):学习率
output: weights(mat):权重
'''
m, n = np.shape(feature_data)
weights = np.mat(np.ones((n, k))) # 权重的初始化
i = 0
while i <= maxCycle:
err = np.exp(feature_data * weights)
if i % 500 == 0:
print "\t-----iter: ", i , ", cost: ", cost(err, label_data)
rowsum = -err.sum(axis=1)
rowsum = rowsum.repeat(k, axis=1)
err = err / rowsum
for x in range(m):
err[x, label_data[x, 0]] += 1
weights = weights + (alpha / m) * feature_data.T * err
i += 1
return weights
def save_model(file_name, weights):
'''保存最终的模型
input: file_name(string):保存的文件名
weights(mat):softmax模型
'''
f_w = open(file_name, "w")
m, n = np.shape(weights)
for i in xrange(m):
w_tmp = []
for j in xrange(n):
w_tmp.append(str(weights[i, j]))
f_w.write("\t".join(w_tmp) + "\n")
f_w.close()
if __name__ == "__main__":
inputfile = "SoftInput.txt"
# 1、导入训练数据
print "---------- 1.load data ------------"
feature, label, k = load_data(inputfile)
# 2、训练Softmax模型
print "---------- 2.training ------------"
weights = gradientAscent(feature, label, k, 5000, 0.2)
# 3、保存最终的模型
print "---------- 3.save model ------------"
save_model("weights", weights)
|
[
"noreply@github.com"
] |
TRcoder.noreply@github.com
|
2e18904fb3a4c87432f3c42a317bb3c9f570d35a
|
e8ff5786d35d84f6f1226b1df95b851d712d08c7
|
/Clustering/DBSCAN.py
|
70016b5c0a33edebde06a831c7d96cc2ae6d1cd7
|
[] |
no_license
|
anitacsp/FFM-MA
|
eb64471cbe6be158616d9537a03b15dd0db148e1
|
c71087339f01164cfaf86a693df61d6799e120b5
|
refs/heads/master
| 2020-06-04T03:29:22.558654
| 2019-07-01T09:14:38
| 2019-07-01T09:14:38
| 191,856,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from mpl_toolkits.mplot3d import Axes3D
dirty = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\oil.csv")
#data = dirty.loc[:, ['growth', 'inflation', 'return', 'type']]
x = StandardScaler().fit_transform(dirty)
dbscan = DBSCAN(eps=0.3, min_samples = 2)
model = dbscan.fit(x)
labels = model.labels_
print(labels)
core_samples = np.zeros_like(labels, dtype=bool)
core_samples[model.core_sample_indices_] = True
num_clusters = len(set(labels)) - (1 if -1 in labels else 0)
print(num_clusters)
xx, yy, zz, aa, bb = zip(*x)
#xx = np.arange(-3,3, 0.25)
#yy = np.arange(-3,3, 0.25)
#zz = np.arange(-3,3, 0.25)
#aa = np.arange(-3,3, 0.25)
#bb = np.arange(-3,3, 0.25)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xx,yy,zz, c=bb, cmap=plt.hot())
plt.show()
show()
#noNoise = list(labels).count(-1)
#print('Est No. of Clusters: %d'% noClusters)
#print('Est No. of Noise: %d'% noNoise)
#print('Homogeneity: %0.3f'% metrics.homogenity_score(labels_)
|
[
"anita.chia.2016@sis.smu.edu.sg"
] |
anita.chia.2016@sis.smu.edu.sg
|
d8a0f96c3fa158292c56773b2f724ae258f3408a
|
04e7f5ae41aa8e068a7d2534b43fd4db94abfee3
|
/hw8/problemone.py
|
37672542c21a349489148d4eb0195f8cfb1318be
|
[] |
no_license
|
MathematicianVogt/Control
|
f90890610009883bb5a1cfdd5c31a861278307f8
|
e6cf4dbf6e4a4314ea6602dc4d5f042ca9771f44
|
refs/heads/master
| 2021-01-23T06:58:50.807096
| 2017-03-30T14:05:47
| 2017-03-30T14:05:47
| 86,415,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
import numpy as np
import scipy.integrate as integrate
import scipy.interpolate as interpolate
import pylab as plt
import scipy.optimize as op
import math
def make_cons(parameter_guess):
cons=()
for i in range(0,len(parameter_guess)):
constraint = {'type': 'ineq', 'fun': lambda x: -math.fabs(x[i]) + 1 }
cons +=(constraint,)
# print cons
#cons=({'type': 'ineq', 'fun': lambda parameter_guess: -parameter_guess+ 1 })
return cons
def bnds(parameter_guess):
bnds=()
for i in range(0,len(parameter_guess)):
bnds +=((-1.0,1.0),)
print bnds
return bnds
def problem(N,IC):
t=np.linspace(0,5,1000)
tt=np.linspace(0,5,N+1)
parameter_guess = .5*np.ones(len(tt))
res=op.minimize(cost_function, parameter_guess, args=(t,tt,IC), method='SLSQP',bounds=bnds(parameter_guess))
true_param= res.x
print res.message
print true_param
generate_state_and_control(true_param,t,tt,IC)
def cost_function(parameter_guess,t,tt,IC):
#print parameter_guess
f_p = interpolate.interp1d(tt, parameter_guess)
sol = integrate.odeint(f, [IC[0],IC[1],0], t, args=(f_p,))
cost_sol = sol[:,2]
cost=cost_sol[-1]
print 'cost ' + str(cost)
return cost
def f(y,t,f_p):
if t<5.0:
dydt=[-y[0] +2*y[1] , y[0] -.2*y[1] + f_p(t), .5*(y[0]**2 + 2*y[1]**2 + 3*f_p(t)**2)]
else:
dydt=[-y[0] +2*y[1] , y[0] -.2*y[1] + f_p(5), .5*(y[0]**2 + 2*y[1]**2 + 3*f_p(5)**2)]
return dydt
def generate_state_and_control(parameters,t,tt,IC):
f_p = interpolate.interp1d(tt, parameters)
sol = integrate.odeint(f, [IC[0],IC[1],0], t, args=(f_p,))
control=f_p(t)
position=sol[:,0]
velocity=sol[:,1]
cost_sol = sol[:,2]
cost=cost_sol[-1]
print 'cost ' + str(cost)
print parameters
plt.plot(tt,parameters,label='Control')
plt.xlabel('time')
plt.ylabel('u')
plt.title('Control')
plt.show()
plt.clf()
plt.plot(position,velocity,label='Velocity vs Position')
plt.xlabel('Position')
plt.ylabel('Velocity')
plt.title('Velocity vs Position')
plt.show()
plt.clf()
problem(5,[.1,.1])
problem(5,[3,6])
problem(15,[.1,.1])
problem(15,[3,6])
|
[
"rvogt@MathVogt.local"
] |
rvogt@MathVogt.local
|
d97856fe130bafdb43da005191add63e0370713e
|
043b45f72d50dc697e59e341b286e75d4eaf2e6d
|
/py-ping2.py
|
c69d0aa069fb313b7f01dded02643b0262cba6a7
|
[] |
no_license
|
alarmon/self-lab
|
dcebbd15d610b8c0bb982f4cc9baf89ef05f0967
|
6aa517b1de995695e57a249343b2cbf818c52e1d
|
refs/heads/master
| 2020-07-25T19:07:02.445913
| 2019-09-14T05:59:21
| 2019-09-14T05:59:21
| 208,395,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,628
|
py
|
#!/usr/bin/python3.6.4
#!coding:utf-8
__author__ = 'Rosefinch'
__date__ = '2018/5/31 22:27'
import time
import struct
import socket
import select
import sys
def chesksum(data):
"""
校验
"""
n = len(data)
m = n % 2
sum = 0
for i in range(0, n - m ,2):
sum += (data[i]) + ((data[i+1]) << 8)#传入data以每两个字节(十六进制)通过ord转十进制,第一字节在低位,第二个字节在高位
if m:
sum += (data[-1])
#将高于16位与低16位相加
sum = (sum >> 16) + (sum & 0xffff)
sum += (sum >> 16) #如果还有高于16位,将继续与低16位相加
answer = ~sum & 0xffff
#主机字节序转网络字节序列(参考小端序转大端序)
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
'''
连接套接字,并将数据发送到套接字
'''
def raw_socket(dst_addr,imcp_packet):
rawsocket = socket.socket(socket.AF_INET,socket.SOCK_RAW,socket.getprotobyname("icmp"))
send_request_ping_time = time.time()
#send data to the socket
rawsocket.sendto(imcp_packet,(dst_addr,80))
return send_request_ping_time,rawsocket,dst_addr
'''
request ping
'''
def request_ping(data_type,data_code,data_checksum,data_ID,data_Sequence,payload_body):
#把字节打包成二进制数据
imcp_packet = struct.pack('>BBHHH32s',data_type,data_code,data_checksum,data_ID,data_Sequence,payload_body)
icmp_chesksum = chesksum(imcp_packet)#获取校验和
imcp_packet = struct.pack('>BBHHH32s',data_type,data_code,icmp_chesksum,data_ID,data_Sequence,payload_body)
return imcp_packet
'''
reply ping
'''
def reply_ping(send_request_ping_time,rawsocket,data_Sequence,timeout = 2):
while True:
started_select = time.time()
what_ready = select.select([rawsocket], [], [], timeout)
wait_for_time = (time.time() - started_select)
if what_ready[0] == []: # Timeout
return -1
time_received = time.time()
received_packet, addr = rawsocket.recvfrom(1024)
icmpHeader = received_packet[20:28]
type, code, checksum, packet_id, sequence = struct.unpack(
">BBHHH", icmpHeader
)
if type == 0 and sequence == data_Sequence:
return time_received - send_request_ping_time
timeout = timeout - wait_for_time
if timeout <= 0:
return -1
'''
实现 ping 主机/ip
'''
def ping(host):
data_type = 8 # ICMP Echo Request
data_code = 0 # must be zero
data_checksum = 0 # "...with value 0 substituted for this field..."
data_ID = 0 #Identifier
data_Sequence = 1 #Sequence number
payload_body = b'abcdefghijklmnopqrstuvwabcdefghi' #data
dst_addr = socket.gethostbyname(host)#将主机名转ipv4地址格式,返回以ipv4地址格式的字符串,如果主机名称是ipv4地址,则它将保持不变
print("正在 Ping {0} [{1}] 具有 32 字节的数据:".format(host,dst_addr))
for i in range(0,4):
icmp_packet = request_ping(data_type,data_code,data_checksum,data_ID,data_Sequence + i,payload_body)
send_request_ping_time,rawsocket,addr = raw_socket(dst_addr,icmp_packet)
times = reply_ping(send_request_ping_time,rawsocket,data_Sequence + i)
if times > 0:
print("来自 {0} 的回复: 字节=32 时间={1}ms".format(addr,int(times*1000)))
time.sleep(0.7)
else:
print("请求超时。")
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.exit('Usage: ping.py <host>')
ping(sys.argv[1])
|
[
"noreply@github.com"
] |
alarmon.noreply@github.com
|
80c91ccaab048f0e281730356e04ec8c4daeea69
|
2434958662c346163bf50a3912a0109c90b67a11
|
/myproject/myapp/migrations/0002_auto_20201007_1727.py
|
a36ec8ea8f515374c1a6c453c1435f02301f2846
|
[] |
no_license
|
Ankit-Developer143/Internshala-task
|
08a1435f31bc6508a2a134bc2ccb7c13d9a0f596
|
ed4b8e7648138448d687f6f228096ed39e0eae3d
|
refs/heads/master
| 2023-01-21T12:12:18.567289
| 2020-11-27T15:32:39
| 2020-11-27T15:32:39
| 302,076,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
# Generated by Django 3.1.2 on 2020-10-07 11:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='data',
name='time_date',
),
migrations.RemoveField(
model_name='data',
name='timestamp',
),
]
|
[
"dante7785@gmail.com"
] |
dante7785@gmail.com
|
04fd5f442d4db88a4a6cdd0e9735bd8dba613b8f
|
0353ad8e53450d2cacd12b28aa1614a8c5fac2e9
|
/Rabbit/getstockbasic.py
|
97b7f679356ec85254aabb664e81ab96d0221c9d
|
[] |
no_license
|
yjintai/jypython
|
7e67fd1ecfdc1c30303018d38bcfb3cbbc6c3069
|
99c1358aa7d1faae378e6ec821428e505816f2fa
|
refs/heads/master
| 2022-08-28T19:33:47.786887
| 2022-08-18T12:11:28
| 2022-08-18T12:11:28
| 127,250,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,049
|
py
|
#!/usr/bin/python3
# coding:utf-8
# -*- coding: utf-8 -*-
import time
import datetime
import random
import tushare
import pandas
#import pymssql
#import sqlalchemy
#import mysql.connector
import sqlalchemy
import pymysql
#需修改的参数
stock_list_file = 'd:/stock_list.csv'
databasename = 'msstock'
sqlenginestr='mysql+pymysql://pyuser:Pyuser18@127.0.0.1/'+databasename+'?charset=utf8mb4'
databasename = 'msstock'
#tushare token
tushare_token='e239683c699765e4e49b43dff2cf7ed7fc232cc49f7992dab1ab7624'
#股票列表
def initiate():
#初始化tushare
tushare.set_token(tushare_token)
engine=sqlalchemy.create_engine(sqlenginestr)
return engine
def get_stock_basic(engine = sqlenginestr,schema = databasename):
print('start to download stock_basic data')
pro = tushare.pro_api()
df = pro.stock_basic(fields='ts_code,symbol,name,area,industry,fullname,cnspell,market,exchange,curr_type,list_status,list_date,delist_date,is_hs')
try:
pandas.io.sql.to_sql(frame=df, name='tb_stock_basic', con=engine, schema= schema, if_exists='replace', index=True)
except:
print('To SQL Database Failed')
finally:
pass
print('download stock_basic data successed!')
return 1
def get_trade_cal(engine = sqlenginestr,schema = databasename):
print('start to download trade_cal data')
date_now = datetime.datetime.now().strftime('%Y%m%d')
pro = tushare.pro_api()
df = pro.trade_cal(start_date='20200101', end_date=date_now, fields='exchange,cal_date,is_open')
try:
pandas.io.sql.to_sql(frame=df, name='tb_trade_cal', con=engine, schema= schema, if_exists='replace', index=True)
except:
print('To SQL Database Failed')
finally:
pass
print('download trade_cal data successed!')
return 1
#全量下载所有股票列表数据
if __name__ == '__main__':
print('开始')
engine = initiate()
print('获取列表...')
get_stock_basic(engine,databasename)
get_trade_cal(engine,databasename)
print('结束')
|
[
"yjintai@126.com"
] |
yjintai@126.com
|
9e187e8ab3e42f22dcbcef7b79682de10f4a0883
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp-with-texts/ASCEND-MIBDMTALNET-MIB.py
|
b58b5f17791c362bb62c7941689041af29abfbcc
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395
| 2019-08-16T15:51:41
| 2019-08-16T15:53:57
| 237,512,469
| 0
| 0
|
Apache-2.0
| 2020-01-31T20:41:36
| 2020-01-31T20:41:35
| null |
UTF-8
|
Python
| false
| false
| 45,583
|
py
|
#
# PySNMP MIB module ASCEND-MIBDMTALNET-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBDMTALNET-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:27:06 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Gauge32, TimeTicks, Counter64, MibIdentifier, ObjectIdentity, Bits, ModuleIdentity, NotificationType, Unsigned32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, iso, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "TimeTicks", "Counter64", "MibIdentifier", "ObjectIdentity", "Bits", "ModuleIdentity", "NotificationType", "Unsigned32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "iso", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DisplayString(OctetString):
pass
mibdmtAlDslNetworkProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 10))
mibdmtAlDslNetworkProfileTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 10, 1), )
if mibBuilder.loadTexts: mibdmtAlDslNetworkProfileTable.setStatus('mandatory')
if mibBuilder.loadTexts: mibdmtAlDslNetworkProfileTable.setDescription('A list of mibdmtAlDslNetworkProfile profile entries.')
mibdmtAlDslNetworkProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1), ).setIndexNames((0, "ASCEND-MIBDMTALNET-MIB", "dmtAlDslNetworkProfile-Shelf-o"), (0, "ASCEND-MIBDMTALNET-MIB", "dmtAlDslNetworkProfile-Slot-o"), (0, "ASCEND-MIBDMTALNET-MIB", "dmtAlDslNetworkProfile-Item-o"))
if mibBuilder.loadTexts: mibdmtAlDslNetworkProfileEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mibdmtAlDslNetworkProfileEntry.setDescription('A mibdmtAlDslNetworkProfile entry containing objects that maps to the parameters of mibdmtAlDslNetworkProfile profile.')
dmtAlDslNetworkProfile_Shelf_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 1), Integer32()).setLabel("dmtAlDslNetworkProfile-Shelf-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_Shelf_o.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_Shelf_o.setDescription('')
dmtAlDslNetworkProfile_Slot_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 2), Integer32()).setLabel("dmtAlDslNetworkProfile-Slot-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_Slot_o.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_Slot_o.setDescription('')
dmtAlDslNetworkProfile_Item_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 3), Integer32()).setLabel("dmtAlDslNetworkProfile-Item-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_Item_o.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_Item_o.setDescription('')
dmtAlDslNetworkProfile_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 4), DisplayString()).setLabel("dmtAlDslNetworkProfile-Name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_Name.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_Name.setDescription('For future use. The current design does not use the name field but instead references Cell Based Dmt Adsl lines by the physical address; we may in the future support referencing Cell Based Dmt Adsl lines by name as well as by address. The name consists of a null terminated ascii string supplied by the user; it defaults to the ascii form of the Cell Based Dmt Adsl line physical address.')
dmtAlDslNetworkProfile_PhysicalAddress_Shelf = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("anyShelf", 1), ("shelf1", 2), ("shelf2", 3), ("shelf3", 4), ("shelf4", 5), ("shelf5", 6), ("shelf6", 7), ("shelf7", 8), ("shelf8", 9), ("shelf9", 10)))).setLabel("dmtAlDslNetworkProfile-PhysicalAddress-Shelf").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_PhysicalAddress_Shelf.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_PhysicalAddress_Shelf.setDescription('The number of the shelf that the addressed physical device resides on.')
dmtAlDslNetworkProfile_PhysicalAddress_Slot = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 55, 56, 57, 58, 49, 50, 42, 53, 54, 45, 46, 51, 59))).clone(namedValues=NamedValues(("anySlot", 1), ("slot1", 2), ("slot2", 3), ("slot3", 4), ("slot4", 5), ("slot5", 6), ("slot6", 7), ("slot7", 8), ("slot8", 9), ("slot9", 10), ("slot10", 11), ("slot11", 12), ("slot12", 13), ("slot13", 14), ("slot14", 15), ("slot15", 16), ("slot16", 17), ("slot17", 18), ("slot18", 19), ("slot19", 20), ("slot20", 21), ("slot21", 22), ("slot22", 23), ("slot23", 24), ("slot24", 25), ("slot25", 26), ("slot26", 27), ("slot27", 28), ("slot28", 29), ("slot29", 30), ("slot30", 31), ("slot31", 32), ("slot32", 33), ("slot33", 34), ("slot34", 35), ("slot35", 36), ("slot36", 37), ("slot37", 38), ("slot38", 39), ("slot39", 40), ("slot40", 41), ("aLim", 55), ("bLim", 56), ("cLim", 57), ("dLim", 58), ("leftController", 49), ("rightController", 50), ("controller", 42), ("firstControlModule", 53), ("secondControlModule", 54), ("trunkModule1", 45), ("trunkModule2", 46), ("controlModule", 51), ("slotPrimary", 59)))).setLabel("dmtAlDslNetworkProfile-PhysicalAddress-Slot").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_PhysicalAddress_Slot.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_PhysicalAddress_Slot.setDescription('The number of the slot that the addressed physical device resides on.')
dmtAlDslNetworkProfile_PhysicalAddress_ItemNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 7), Integer32()).setLabel("dmtAlDslNetworkProfile-PhysicalAddress-ItemNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_PhysicalAddress_ItemNumber.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_PhysicalAddress_ItemNumber.setDescription('A number that specifies an addressable entity within the context of shelf and slot.')
dmtAlDslNetworkProfile_Enabled = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("dmtAlDslNetworkProfile-Enabled").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_Enabled.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_Enabled.setDescription('TRUE if the line is enabled, otherwise FALSE.')
dmtAlDslNetworkProfile_SparingMode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 63), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inactive", 1), ("manual", 2), ("automatic", 3)))).setLabel("dmtAlDslNetworkProfile-SparingMode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_SparingMode.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_SparingMode.setDescription('Port sparing operational mode for this port.')
dmtAlDslNetworkProfile_ProfileNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 9), Integer32()).setLabel("dmtAlDslNetworkProfile-ProfileNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_ProfileNumber.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_ProfileNumber.setDescription('For potential backwards compatibility. The current design consists of one line profile numbered 0.')
dmtAlDslNetworkProfile_IgnoreLineup = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 73), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("systemDefined", 1), ("no", 2), ("yes", 3)))).setLabel("dmtAlDslNetworkProfile-IgnoreLineup").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_IgnoreLineup.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_IgnoreLineup.setDescription('Ignore line up value for this port.')
dmtAlDslNetworkProfile_LineConfig_NailedGroup = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 11), Integer32()).setLabel("dmtAlDslNetworkProfile-LineConfig-NailedGroup").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_NailedGroup.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_NailedGroup.setDescription('A number that identifies the set of lines that makes up a nailed group. 0 means this line is not part of a nailed group.')
dmtAlDslNetworkProfile_LineConfig_VpSwitchingVpi = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 55), Integer32()).setLabel("dmtAlDslNetworkProfile-LineConfig-VpSwitchingVpi").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_VpSwitchingVpi.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_VpSwitchingVpi.setDescription('The Vpi to be used for the VP switching. Rest of the VPIs within valid vpi-vci-range will be used for the VC switching. Changes in this range will take effect immediately. THE USER SHOULD BE VERY CAREFUL WHILE CHANGING THIS VALUE BECAUSE ALL CONNECTIONS ON THE LIM WHERE THIS PORT BELONGS WILL BE DROPPED IN ORDER TO MAKE THIS NEW VALUE EFFECTIVE IMMEDIATELY.')
dmtAlDslNetworkProfile_LineConfig_RateAdaptModeUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("operator", 1), ("automaticAtStartup", 2), ("dynamic", 3)))).setLabel("dmtAlDslNetworkProfile-LineConfig-RateAdaptModeUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_RateAdaptModeUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_RateAdaptModeUp.setDescription('The up stream rate adaptive mode of operation. ONLY OPERATOR_CONTROLLED and AUTOMATIC_AT_STARTUP are currently supported. ')
dmtAlDslNetworkProfile_LineConfig_RateAdaptModeDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("operator", 1), ("automaticAtStartup", 2), ("dynamic", 3)))).setLabel("dmtAlDslNetworkProfile-LineConfig-RateAdaptModeDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_RateAdaptModeDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_RateAdaptModeDown.setDescription('The down stream rate adaptive mode of operation. ONLY OPERATOR_CONTROLLED and AUTOMATIC_AT_STARTUP are currently supported. ')
dmtAlDslNetworkProfile_LineConfig_RateAdaptRatioUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 21), Integer32()).setLabel("dmtAlDslNetworkProfile-LineConfig-RateAdaptRatioUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_RateAdaptRatioUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_RateAdaptRatioUp.setDescription('The ratio in percent of excess bitrate distribution over the up stream fast and interleaved latencies. 100% - fast path , 0% - interleaved path. Valid ONLY in rate-adapt-mode = AUTOMATIC_AT_STARTUP or DYNAMIC . ')
dmtAlDslNetworkProfile_LineConfig_RateAdaptRatioDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 22), Integer32()).setLabel("dmtAlDslNetworkProfile-LineConfig-RateAdaptRatioDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_RateAdaptRatioDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_RateAdaptRatioDown.setDescription('The ratio in percent of excess bitrate distribution over the down stream fast and interleaved latencies. 100% - fast path , 0% - interleaved path. Valid ONLY in rate-adapt-mode = AUTOMATIC_AT_STARTUP or DYNAMIC . ')
dmtAlDslNetworkProfile_LineConfig_MaxAggrPowerLevelUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 56), Integer32()).setLabel("dmtAlDslNetworkProfile-LineConfig-MaxAggrPowerLevelUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_MaxAggrPowerLevelUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_MaxAggrPowerLevelUp.setDescription('The maximum aggregate output power in dBm allowed on the line in the up stream direction. Increasing value may result in capacity boosting. ')
dmtAlDslNetworkProfile_LineConfig_MaxAggrPowerLevelDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 57), Integer32()).setLabel("dmtAlDslNetworkProfile-LineConfig-MaxAggrPowerLevelDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_MaxAggrPowerLevelDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_MaxAggrPowerLevelDown.setDescription('The maximum aggregate output power in dBm allowed on the line in the down stream direction. Increasing value may result in capacity boosting. ')
dmtAlDslNetworkProfile_LineConfig_MaxPowerSpectralDensity = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 25), Integer32()).setLabel("dmtAlDslNetworkProfile-LineConfig-MaxPowerSpectralDensity").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_MaxPowerSpectralDensity.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_MaxPowerSpectralDensity.setDescription('The power spectral density in dBm/Hz allowed on the line. Decreasing the value may reduce capacity. Defined for downstream only. Actual value is negative.')
dmtAlDslNetworkProfile_LineConfig_LineCode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 58), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(3, 2, 4, 5, 6, 7))).clone(namedValues=NamedValues(("autoSelect", 3), ("gLite", 2), ("ansiDmt", 4), ("gDmt", 5), ("legacyMode", 6), ("etsiAnnexB", 7)))).setLabel("dmtAlDslNetworkProfile-LineConfig-LineCode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_LineCode.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_LineCode.setDescription('The DMT line code is used for the training. NOTE: for the ADSL 12-ports LIM card only (which uses the ALCATEL chipset), setting the line code to ansi-dmt will provide better line rate than auto-select for an ansi-dmt link. Therefore, set the line code to ansi-dmt for an ansi-dmt link to obtain optimal rate.')
dmtAlDslNetworkProfile_LineConfig_LineLatencyDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 59), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("fast", 2), ("interleave", 3), ("both", 4)))).setLabel("dmtAlDslNetworkProfile-LineConfig-LineLatencyDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_LineLatencyDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_LineLatencyDown.setDescription('The DMT line latency to be used for the downstream data transport.')
dmtAlDslNetworkProfile_LineConfig_LineLatencyUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 60), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("fast", 2), ("interleave", 3), ("both", 4)))).setLabel("dmtAlDslNetworkProfile-LineConfig-LineLatencyUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_LineLatencyUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_LineLatencyUp.setDescription('The DMT line latency to be used for the upstream data transport.')
dmtAlDslNetworkProfile_LineConfig_TrellisEncoding = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 61), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("dmtAlDslNetworkProfile-LineConfig-TrellisEncoding").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_TrellisEncoding.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_TrellisEncoding.setDescription('TRUE if trellis encoding is to be enabled, FALSE otherwise.')
dmtAlDslNetworkProfile_LineConfig_GainDefault = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 62), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 1))).clone(namedValues=NamedValues(("n-20Db", 2), ("n-16Db", 1)))).setLabel("dmtAlDslNetworkProfile-LineConfig-GainDefault").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_GainDefault.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_GainDefault.setDescription('The default gain value in db to be used for the AGC.')
dmtAlDslNetworkProfile_LineConfig_UpstreamStartBin = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 64), Integer32()).setLabel("dmtAlDslNetworkProfile-LineConfig-UpstreamStartBin").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_UpstreamStartBin.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_UpstreamStartBin.setDescription('The starting upstream frequency bin.')
dmtAlDslNetworkProfile_LineConfig_UpstreamEndBin = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 65), Integer32()).setLabel("dmtAlDslNetworkProfile-LineConfig-UpstreamEndBin").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_UpstreamEndBin.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_UpstreamEndBin.setDescription('The ending upstream frequency bin.')
dmtAlDslNetworkProfile_LineConfig_DownstreamStartBin = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 66), Integer32()).setLabel("dmtAlDslNetworkProfile-LineConfig-DownstreamStartBin").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_DownstreamStartBin.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_DownstreamStartBin.setDescription('The starting downstream frequency bin.')
dmtAlDslNetworkProfile_LineConfig_DownstreamEndBin = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 67), Integer32()).setLabel("dmtAlDslNetworkProfile-LineConfig-DownstreamEndBin").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_DownstreamEndBin.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_DownstreamEndBin.setDescription('The ending downstream frequency bin.')
dmtAlDslNetworkProfile_LineConfig_LoopBack = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 69), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("analog", 2), ("digital", 3)))).setLabel("dmtAlDslNetworkProfile-LineConfig-LoopBack").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_LoopBack.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_LoopBack.setDescription('Configuration of different modem loopbacks.')
dmtAlDslNetworkProfile_LineConfig_BitSwapping = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 70), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("dmtAlDslNetworkProfile-LineConfig-BitSwapping").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_BitSwapping.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_BitSwapping.setDescription('Controls wether Bit-Swapping is enabled or no. On 12 port DMT card and 48 port G.lite card has not effect.')
dmtAlDslNetworkProfile_LineConfig_FbmDbmMode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 71), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("fbm", 1), ("dbm", 2)))).setLabel("dmtAlDslNetworkProfile-LineConfig-FbmDbmMode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_FbmDbmMode.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_FbmDbmMode.setDescription('Controls wether the line is in Fixed-Bit-Map or Dual-Bit-Map mode. Only relevant for Annex-C cards.')
dmtAlDslNetworkProfile_LineConfig_AlcatelUs413Boost = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 74), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("new", 1), ("old", 2), ("unknown", 3)))).setLabel("dmtAlDslNetworkProfile-LineConfig-AlcatelUs413Boost").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_AlcatelUs413Boost.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_LineConfig_AlcatelUs413Boost.setDescription('Provides an increase in upstream rate in T1.413 mode for 24/48 Port Annex A boards based on the Globespan chip set when connected to an Alcatel CPE. Irrelevant for any other situtation. Use with extreme caution.')
dmtAlDslNetworkProfile_FastPathConfig_MinBitrateUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 26), Integer32()).setLabel("dmtAlDslNetworkProfile-FastPathConfig-MinBitrateUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_FastPathConfig_MinBitrateUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_FastPathConfig_MinBitrateUp.setDescription('The up stream minimum requested bitrate, in Kbps. NOT valid in rate-adapt-mode = OPERATOR_CONTROLLED. ')
dmtAlDslNetworkProfile_FastPathConfig_MinBitrateDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 27), Integer32()).setLabel("dmtAlDslNetworkProfile-FastPathConfig-MinBitrateDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_FastPathConfig_MinBitrateDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_FastPathConfig_MinBitrateDown.setDescription('The down stream minimum requested bitrate, in Kbps. NOT valid in rate-adapt-mode = OPERATOR_CONTROLLED. ')
dmtAlDslNetworkProfile_FastPathConfig_MaxBitrateUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 28), Integer32()).setLabel("dmtAlDslNetworkProfile-FastPathConfig-MaxBitrateUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_FastPathConfig_MaxBitrateUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_FastPathConfig_MaxBitrateUp.setDescription('The up stream maximum requested bitrate, in Kbps. NOT valid in rate-adapt-mode = OPERATOR_CONTROLLED. ')
dmtAlDslNetworkProfile_FastPathConfig_MaxBitrateDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 29), Integer32()).setLabel("dmtAlDslNetworkProfile-FastPathConfig-MaxBitrateDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_FastPathConfig_MaxBitrateDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_FastPathConfig_MaxBitrateDown.setDescription('The down stream maximum requested bitrate, in Kbps. NOT valid in rate-adapt-mode = OPERATOR_CONTROLLED. ')
dmtAlDslNetworkProfile_FastPathConfig_PlannedBitrateUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 30), Integer32()).setLabel("dmtAlDslNetworkProfile-FastPathConfig-PlannedBitrateUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_FastPathConfig_PlannedBitrateUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_FastPathConfig_PlannedBitrateUp.setDescription('The up stream rate that will be used, in Kbps. ONLY valid in rate-adapt-mode = OPERATOR_CONTROLLED. ')
dmtAlDslNetworkProfile_FastPathConfig_PlannedBitrateDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 31), Integer32()).setLabel("dmtAlDslNetworkProfile-FastPathConfig-PlannedBitrateDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_FastPathConfig_PlannedBitrateDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_FastPathConfig_PlannedBitrateDown.setDescription('The down stream rate that will be used, in Kbps. ONLY valid in rate-adapt-mode = OPERATOR_CONTROLLED. ')
dmtAlDslNetworkProfile_InterleavePathConfig_MinBitrateUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 32), Integer32()).setLabel("dmtAlDslNetworkProfile-InterleavePathConfig-MinBitrateUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_MinBitrateUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_MinBitrateUp.setDescription('The up stream minimum requested bitrate, in Kbps. NOT valid in rate-adapt-mode = OPERATOR_CONTROLLED. ')
dmtAlDslNetworkProfile_InterleavePathConfig_MinBitrateDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 33), Integer32()).setLabel("dmtAlDslNetworkProfile-InterleavePathConfig-MinBitrateDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_MinBitrateDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_MinBitrateDown.setDescription('The down stream minimum requested bitrate, in Kbps. NOT valid in rate-adapt-mode = OPERATOR_CONTROLLED. ')
dmtAlDslNetworkProfile_InterleavePathConfig_MaxBitrateUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 34), Integer32()).setLabel("dmtAlDslNetworkProfile-InterleavePathConfig-MaxBitrateUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_MaxBitrateUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_MaxBitrateUp.setDescription('The up stream maximum requested bitrate, in Kbps. NOT valid in rate-adapt-mode = OPERATOR_CONTROLLED. ')
dmtAlDslNetworkProfile_InterleavePathConfig_MaxBitrateDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 35), Integer32()).setLabel("dmtAlDslNetworkProfile-InterleavePathConfig-MaxBitrateDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_MaxBitrateDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_MaxBitrateDown.setDescription('The down stream maximum requested bitrate, in Kbps. NOT valid in rate-adapt-mode = OPERATOR_CONTROLLED. ')
dmtAlDslNetworkProfile_InterleavePathConfig_PlannedBitrateUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 36), Integer32()).setLabel("dmtAlDslNetworkProfile-InterleavePathConfig-PlannedBitrateUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_PlannedBitrateUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_PlannedBitrateUp.setDescription('The up stream rate that will be used, in Kbps. ONLY valid in rate-adapt-mode = OPERATOR_CONTROLLED. ')
dmtAlDslNetworkProfile_InterleavePathConfig_PlannedBitrateDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 37), Integer32()).setLabel("dmtAlDslNetworkProfile-InterleavePathConfig-PlannedBitrateDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_PlannedBitrateDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_PlannedBitrateDown.setDescription('The down stream rate that will be used, in Kbps. ONLY valid in rate-adapt-mode = OPERATOR_CONTROLLED. ')
dmtAlDslNetworkProfile_InterleavePathConfig_MaxDelayUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 38), Integer32()).setLabel("dmtAlDslNetworkProfile-InterleavePathConfig-MaxDelayUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_MaxDelayUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_MaxDelayUp.setDescription('The maximum allowed up stream interleaver induced delay, in msec. ')
dmtAlDslNetworkProfile_InterleavePathConfig_MaxDelayDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 39), Integer32()).setLabel("dmtAlDslNetworkProfile-InterleavePathConfig-MaxDelayDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_MaxDelayDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_InterleavePathConfig_MaxDelayDown.setDescription('The maximum allowed down stream interleaver induced delay, in msec. ')
dmtAlDslNetworkProfile_MarginConfig_TargetNoiseMarginUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 40), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-TargetNoiseMarginUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_TargetNoiseMarginUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_TargetNoiseMarginUp.setDescription('The up stream noise margin in dB that the modem shall achieve relative to BER 10^-7. ')
dmtAlDslNetworkProfile_MarginConfig_TargetNoiseMarginDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 41), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-TargetNoiseMarginDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_TargetNoiseMarginDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_TargetNoiseMarginDown.setDescription('The down stream noise margin in dB that the modem shall achieve relative to BER 10^-7. ')
dmtAlDslNetworkProfile_MarginConfig_MinNoiseMarginUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 42), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-MinNoiseMarginUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_MinNoiseMarginUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_MinNoiseMarginUp.setDescription('The up stream minimum noise margin in dB that the modem shall tolerate relative to BER 10^-7. If current noise margin falls below this level the ATU shall attempt to increase far-end output power to get margin above this limit, by means of bit swapping. ')
dmtAlDslNetworkProfile_MarginConfig_MinNoiseMarginDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 43), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-MinNoiseMarginDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_MinNoiseMarginDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_MinNoiseMarginDown.setDescription('The down stream minimum noise margin in dB that the modem shall tolerate relative to BER 10^-7. If current noise margin falls below this level the ATU shall attempt to increase far-end output power to get margin above this limit, by means of bit swapping. ')
dmtAlDslNetworkProfile_MarginConfig_MaxAddNoiseMarginUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 44), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-MaxAddNoiseMarginUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_MaxAddNoiseMarginUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_MaxAddNoiseMarginUp.setDescription('The up stream maximum noise margin in dB on top of the target-noise-margin that the modem shall tolerate relative to BER 10^-7. If current noise margin is above this level the ATU shall attempt to reduce far-end output power to get margin below this limit, by means of bit swapping. ')
dmtAlDslNetworkProfile_MarginConfig_MaxAddNoiseMarginDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 45), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-MaxAddNoiseMarginDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_MaxAddNoiseMarginDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_MaxAddNoiseMarginDown.setDescription('The down stream maximum noise margin in dB on top of the target-noise-margin that the modem shall tolerate relative to BER 10^-7. If current noise margin is above this level the ATU shall attempt to reduce far-end output power to get margin below this limit, by means of bit swapping. ')
dmtAlDslNetworkProfile_MarginConfig_RaDownshiftMarginUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 46), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-RaDownshiftMarginUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaDownshiftMarginUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaDownshiftMarginUp.setDescription('Valid ONLY in rate-adapt-mode = DYNAMIC. In dB. If current up stream noise margin is below this parameter for more than ra-downshift-int-up sec, modem shall attemp to rate adapt (bitrate down). ')
dmtAlDslNetworkProfile_MarginConfig_RaDownshiftIntUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 47), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-RaDownshiftIntUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaDownshiftIntUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaDownshiftIntUp.setDescription('Valid ONLY in rate-adapt-mode = DYNAMIC. In seconds. If current up stream noise margin is below ra-downshift-margin-up for more than this, modem shall attemp to rate adapt (bitrate down). ')
dmtAlDslNetworkProfile_MarginConfig_RaDownshiftMarginDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 48), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-RaDownshiftMarginDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaDownshiftMarginDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaDownshiftMarginDown.setDescription('Valid ONLY in rate-adapt-mode = DYNAMIC. In dB. If current down stream noise margin is below this parameter for more than ra-downshift-int-down sec, modem shall attemp to rate adapt (bitrate down). ')
dmtAlDslNetworkProfile_MarginConfig_RaDownshiftIntDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 49), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-RaDownshiftIntDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaDownshiftIntDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaDownshiftIntDown.setDescription('Valid ONLY in rate-adapt-mode = DYNAMIC. In seconds. If current down stream noise margin is below ra-downshift-margin-down for more than this, modem shall attemp to rate adapt (bitrate down). ')
dmtAlDslNetworkProfile_MarginConfig_RaUpshiftMarginUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 50), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-RaUpshiftMarginUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaUpshiftMarginUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaUpshiftMarginUp.setDescription('Valid ONLY in rate-adapt-mode = DYNAMIC. In dB. If current up stream noise margin is above this parameter for more than ra-downshift-int-up sec, modem shall attemp to rate adapt (bitrate up). ')
dmtAlDslNetworkProfile_MarginConfig_RaUpshiftIntUp = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 51), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-RaUpshiftIntUp").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaUpshiftIntUp.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaUpshiftIntUp.setDescription('Valid ONLY in rate-adapt-mode = DYNAMIC. In seconds. If current up stream noise margin is above ra-downshift-margin-up for more than this, modem shall attemp to rate adapt (bitrate up). ')
dmtAlDslNetworkProfile_MarginConfig_RaUpshiftMarginDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 52), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-RaUpshiftMarginDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaUpshiftMarginDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaUpshiftMarginDown.setDescription('Valid ONLY in rate-adapt-mode = DYNAMIC. In dB. If current down stream noise margin is above this parameter for more than ra-downshift-int-down sec, modem shall attemp to rate adapt (bitrate up). ')
dmtAlDslNetworkProfile_MarginConfig_RaUpshiftIntDown = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 53), Integer32()).setLabel("dmtAlDslNetworkProfile-MarginConfig-RaUpshiftIntDown").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaUpshiftIntDown.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_MarginConfig_RaUpshiftIntDown.setDescription('Valid ONLY in rate-adapt-mode = DYNAMIC. In seconds. If current down stream noise margin is above ra-downshift-margin-down for more than this, modem shall attemp to rate adapt (bitrate up). ')
dmtAlDslNetworkProfile_ThreshProfile = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 72), DisplayString()).setLabel("dmtAlDslNetworkProfile-ThreshProfile").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_ThreshProfile.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_ThreshProfile.setDescription('The name of the DSL-THRESHOLD profile which applies to this ADSL line. ')
dmtAlDslNetworkProfile_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 10, 1, 1, 54), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("dmtAlDslNetworkProfile-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_Action_o.setStatus('mandatory')
if mibBuilder.loadTexts: dmtAlDslNetworkProfile_Action_o.setDescription('')
mibBuilder.exportSymbols("ASCEND-MIBDMTALNET-MIB", dmtAlDslNetworkProfile_PhysicalAddress_Shelf=dmtAlDslNetworkProfile_PhysicalAddress_Shelf, dmtAlDslNetworkProfile_LineConfig_LoopBack=dmtAlDslNetworkProfile_LineConfig_LoopBack, dmtAlDslNetworkProfile_MarginConfig_RaUpshiftMarginUp=dmtAlDslNetworkProfile_MarginConfig_RaUpshiftMarginUp, dmtAlDslNetworkProfile_LineConfig_RateAdaptModeDown=dmtAlDslNetworkProfile_LineConfig_RateAdaptModeDown, DisplayString=DisplayString, dmtAlDslNetworkProfile_MarginConfig_RaUpshiftIntDown=dmtAlDslNetworkProfile_MarginConfig_RaUpshiftIntDown, dmtAlDslNetworkProfile_LineConfig_VpSwitchingVpi=dmtAlDslNetworkProfile_LineConfig_VpSwitchingVpi, mibdmtAlDslNetworkProfileTable=mibdmtAlDslNetworkProfileTable, dmtAlDslNetworkProfile_ProfileNumber=dmtAlDslNetworkProfile_ProfileNumber, dmtAlDslNetworkProfile_LineConfig_FbmDbmMode=dmtAlDslNetworkProfile_LineConfig_FbmDbmMode, dmtAlDslNetworkProfile_InterleavePathConfig_MinBitrateDown=dmtAlDslNetworkProfile_InterleavePathConfig_MinBitrateDown, dmtAlDslNetworkProfile_MarginConfig_MaxAddNoiseMarginDown=dmtAlDslNetworkProfile_MarginConfig_MaxAddNoiseMarginDown, dmtAlDslNetworkProfile_Enabled=dmtAlDslNetworkProfile_Enabled, dmtAlDslNetworkProfile_LineConfig_MaxAggrPowerLevelUp=dmtAlDslNetworkProfile_LineConfig_MaxAggrPowerLevelUp, dmtAlDslNetworkProfile_Shelf_o=dmtAlDslNetworkProfile_Shelf_o, dmtAlDslNetworkProfile_FastPathConfig_MaxBitrateDown=dmtAlDslNetworkProfile_FastPathConfig_MaxBitrateDown, dmtAlDslNetworkProfile_ThreshProfile=dmtAlDslNetworkProfile_ThreshProfile, dmtAlDslNetworkProfile_MarginConfig_RaUpshiftMarginDown=dmtAlDslNetworkProfile_MarginConfig_RaUpshiftMarginDown, dmtAlDslNetworkProfile_PhysicalAddress_Slot=dmtAlDslNetworkProfile_PhysicalAddress_Slot, dmtAlDslNetworkProfile_InterleavePathConfig_MaxDelayUp=dmtAlDslNetworkProfile_InterleavePathConfig_MaxDelayUp, dmtAlDslNetworkProfile_InterleavePathConfig_MaxDelayDown=dmtAlDslNetworkProfile_InterleavePathConfig_MaxDelayDown, dmtAlDslNetworkProfile_MarginConfig_RaDownshiftMarginDown=dmtAlDslNetworkProfile_MarginConfig_RaDownshiftMarginDown, dmtAlDslNetworkProfile_FastPathConfig_MinBitrateDown=dmtAlDslNetworkProfile_FastPathConfig_MinBitrateDown, dmtAlDslNetworkProfile_LineConfig_RateAdaptModeUp=dmtAlDslNetworkProfile_LineConfig_RateAdaptModeUp, mibdmtAlDslNetworkProfile=mibdmtAlDslNetworkProfile, dmtAlDslNetworkProfile_MarginConfig_TargetNoiseMarginUp=dmtAlDslNetworkProfile_MarginConfig_TargetNoiseMarginUp, dmtAlDslNetworkProfile_LineConfig_MaxPowerSpectralDensity=dmtAlDslNetworkProfile_LineConfig_MaxPowerSpectralDensity, dmtAlDslNetworkProfile_LineConfig_GainDefault=dmtAlDslNetworkProfile_LineConfig_GainDefault, dmtAlDslNetworkProfile_MarginConfig_TargetNoiseMarginDown=dmtAlDslNetworkProfile_MarginConfig_TargetNoiseMarginDown, dmtAlDslNetworkProfile_FastPathConfig_PlannedBitrateUp=dmtAlDslNetworkProfile_FastPathConfig_PlannedBitrateUp, dmtAlDslNetworkProfile_FastPathConfig_MinBitrateUp=dmtAlDslNetworkProfile_FastPathConfig_MinBitrateUp, dmtAlDslNetworkProfile_InterleavePathConfig_MaxBitrateDown=dmtAlDslNetworkProfile_InterleavePathConfig_MaxBitrateDown, dmtAlDslNetworkProfile_FastPathConfig_PlannedBitrateDown=dmtAlDslNetworkProfile_FastPathConfig_PlannedBitrateDown, dmtAlDslNetworkProfile_PhysicalAddress_ItemNumber=dmtAlDslNetworkProfile_PhysicalAddress_ItemNumber, dmtAlDslNetworkProfile_LineConfig_LineLatencyUp=dmtAlDslNetworkProfile_LineConfig_LineLatencyUp, dmtAlDslNetworkProfile_IgnoreLineup=dmtAlDslNetworkProfile_IgnoreLineup, dmtAlDslNetworkProfile_InterleavePathConfig_PlannedBitrateUp=dmtAlDslNetworkProfile_InterleavePathConfig_PlannedBitrateUp, dmtAlDslNetworkProfile_LineConfig_DownstreamEndBin=dmtAlDslNetworkProfile_LineConfig_DownstreamEndBin, dmtAlDslNetworkProfile_LineConfig_LineCode=dmtAlDslNetworkProfile_LineConfig_LineCode, dmtAlDslNetworkProfile_MarginConfig_RaUpshiftIntUp=dmtAlDslNetworkProfile_MarginConfig_RaUpshiftIntUp, dmtAlDslNetworkProfile_LineConfig_UpstreamStartBin=dmtAlDslNetworkProfile_LineConfig_UpstreamStartBin, dmtAlDslNetworkProfile_Name=dmtAlDslNetworkProfile_Name, dmtAlDslNetworkProfile_LineConfig_RateAdaptRatioUp=dmtAlDslNetworkProfile_LineConfig_RateAdaptRatioUp, dmtAlDslNetworkProfile_LineConfig_DownstreamStartBin=dmtAlDslNetworkProfile_LineConfig_DownstreamStartBin, dmtAlDslNetworkProfile_Action_o=dmtAlDslNetworkProfile_Action_o, mibdmtAlDslNetworkProfileEntry=mibdmtAlDslNetworkProfileEntry, dmtAlDslNetworkProfile_LineConfig_MaxAggrPowerLevelDown=dmtAlDslNetworkProfile_LineConfig_MaxAggrPowerLevelDown, dmtAlDslNetworkProfile_LineConfig_LineLatencyDown=dmtAlDslNetworkProfile_LineConfig_LineLatencyDown, dmtAlDslNetworkProfile_LineConfig_TrellisEncoding=dmtAlDslNetworkProfile_LineConfig_TrellisEncoding, dmtAlDslNetworkProfile_MarginConfig_MinNoiseMarginDown=dmtAlDslNetworkProfile_MarginConfig_MinNoiseMarginDown, dmtAlDslNetworkProfile_MarginConfig_MaxAddNoiseMarginUp=dmtAlDslNetworkProfile_MarginConfig_MaxAddNoiseMarginUp, dmtAlDslNetworkProfile_LineConfig_RateAdaptRatioDown=dmtAlDslNetworkProfile_LineConfig_RateAdaptRatioDown, dmtAlDslNetworkProfile_Slot_o=dmtAlDslNetworkProfile_Slot_o, dmtAlDslNetworkProfile_Item_o=dmtAlDslNetworkProfile_Item_o, dmtAlDslNetworkProfile_SparingMode=dmtAlDslNetworkProfile_SparingMode, dmtAlDslNetworkProfile_FastPathConfig_MaxBitrateUp=dmtAlDslNetworkProfile_FastPathConfig_MaxBitrateUp, dmtAlDslNetworkProfile_InterleavePathConfig_MinBitrateUp=dmtAlDslNetworkProfile_InterleavePathConfig_MinBitrateUp, dmtAlDslNetworkProfile_InterleavePathConfig_PlannedBitrateDown=dmtAlDslNetworkProfile_InterleavePathConfig_PlannedBitrateDown, dmtAlDslNetworkProfile_LineConfig_BitSwapping=dmtAlDslNetworkProfile_LineConfig_BitSwapping, dmtAlDslNetworkProfile_LineConfig_NailedGroup=dmtAlDslNetworkProfile_LineConfig_NailedGroup, dmtAlDslNetworkProfile_LineConfig_AlcatelUs413Boost=dmtAlDslNetworkProfile_LineConfig_AlcatelUs413Boost, dmtAlDslNetworkProfile_InterleavePathConfig_MaxBitrateUp=dmtAlDslNetworkProfile_InterleavePathConfig_MaxBitrateUp, dmtAlDslNetworkProfile_MarginConfig_MinNoiseMarginUp=dmtAlDslNetworkProfile_MarginConfig_MinNoiseMarginUp, dmtAlDslNetworkProfile_LineConfig_UpstreamEndBin=dmtAlDslNetworkProfile_LineConfig_UpstreamEndBin, dmtAlDslNetworkProfile_MarginConfig_RaDownshiftMarginUp=dmtAlDslNetworkProfile_MarginConfig_RaDownshiftMarginUp, dmtAlDslNetworkProfile_MarginConfig_RaDownshiftIntDown=dmtAlDslNetworkProfile_MarginConfig_RaDownshiftIntDown, dmtAlDslNetworkProfile_MarginConfig_RaDownshiftIntUp=dmtAlDslNetworkProfile_MarginConfig_RaDownshiftIntUp)
|
[
"dcwangmit01@gmail.com"
] |
dcwangmit01@gmail.com
|
27bbe71fc025cdc5aff8af857e0329fbcf02387a
|
16badf0376eca37f061ea8f1e871cb0bdb306287
|
/Collection/Counter object.py
|
33999b8cde18a4d4a27e1ae6bc160d6724961033
|
[] |
no_license
|
ShikhaShrivastava/Python-core
|
4da99f86d926b30f099dc89fe8514bd62543b23d
|
da48bdd0c7f1446b225262dcec5b999a47ed6a73
|
refs/heads/master
| 2023-04-23T23:53:40.815558
| 2021-05-13T08:55:31
| 2021-05-13T08:55:31
| 366,992,135
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
'''_____________Counter Object_________________'''
from collections import Counter
def counter_object_demo():
lst = ['a', 'b', 'c', 'b', 'a', 'c', 'a', 'c', 'b']
tup = (1, 2, 3, 2, 1, 4, 2, 3, 1, 2, 1, 3, 2)
set_ele = {10, 30, 20, 10, 20, 40, 60, 10}
dct = {'a': 1, 'b': 5, 'c': 9, 'd': 8}
data = 'hey there is whatsapp'
toll_lst = Counter(lst)
toll_tup = Counter(tup)
toll_set = Counter(set_ele)
toll_dict1 = Counter(dct)
toll_data = Counter(data)
toll_dict2 = Counter(a=2, b=3, c=5)
print(toll_lst)
print(toll_tup)
print(toll_set)
print(toll_dict1)
print(toll_dict2)
print(toll_data)
if __name__ == "__main__":
counter_object_demo()
|
[
"shikhashrivastava2908@gmail.com"
] |
shikhashrivastava2908@gmail.com
|
21f25fee17256ef3a894bd61ef9e9f30b7553747
|
644b9d51fa4ebcf64e762a68d5d5c62966aeab9d
|
/polls/views.py
|
dffd6166b95a741a3f824d15c3095efd79cefda1
|
[] |
no_license
|
ekulbyrnes/djangopolls
|
e54653c453eafc06044d9680f25a484dba6ca2ba
|
db4c544a3f7d587deaa141da487d417d80f8c8b4
|
refs/heads/main
| 2023-04-01T14:42:16.232327
| 2021-04-07T06:23:50
| 2021-04-07T06:23:50
| 343,943,843
| 0
| 1
| null | 2021-04-01T04:14:52
| 2021-03-02T23:40:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,597
|
py
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from django.utils import timezone
# Create your views here:
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
""" Return the last five published questions (not including those set to be published in the future)."""
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by ('pub_date')[:5]
# this is now redundant due to time zone conditions being added above:
# return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {'question': question, 'error_message': "You didn't select a choice.", })
else:
selected_choice.votes += 1
selected_choice.save()
# Always return a HttpResponseRedirect after successfully dealing with POST data. This prevents data from being posted twice inf a user hits the BACK button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
# redundant code: return HttpResponse("You're voting on question %s." % question_id)
#
# Old code before using Django Generic Views (up to pt 4 tutorial)#
#
#
# def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# context = {'latest_question_list': latest_question_list,}
# return render(request, 'polls/index.html', context)
# def detail(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/detail.html', {'question': question})
# def results(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/results.html', {'question':question})
# # Redundant code:
# #response = "You're looking at the results of question %s."
# #return HttpResponse(response % question_id)
# def vote(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# try:
# selected_choice = question.choice_set.get(pk=request.POST['choice'])
# except (KeyError, Choice.DoesNotExist):
# # Redisplay the question voting form.
# return render(request, 'polls/detail.html', {'question': question, 'error_message': "You didn't select a choice.", })
# else:
# selected_choice.votes += 1
# selected_choice.save()
# # Always return a HttpResponseRedirect after successfully dealing with POST data. This prevents data from being posted twice inf a user hits the BACK button.
# return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
# # redundant code: return HttpResponse("You're voting on question %s." % question_id)
|
[
"luke.e.byrnes@gmail.com"
] |
luke.e.byrnes@gmail.com
|
fbc48eb4b6cec14f4d08c8e39813e251aa04423b
|
b762bbc972446e75a65c5a3ccb32fedb39fa76da
|
/DropoutPrediction/results/RandomForest.py
|
65f4cfc683202d7d4241716910530d876a5fe0c2
|
[] |
no_license
|
NishanthSV/DropoutPrediction
|
e6b31c148c9bd4a1226d1be3baf75082e9a694f3
|
673c79f073080b98800b4667e3d6451d4ef77835
|
refs/heads/master
| 2022-12-01T08:30:30.245670
| 2020-08-14T02:43:56
| 2020-08-14T02:43:56
| 260,514,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,115
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[12]:
# Constants and imports.
BASE_NUM = 1
RANDOM_STATE = None
CV = 5
TEST_SIZE = 0.2
import os
import itertools
import pandas as pd
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
# In[13]:
# Load data.
data = pd.read_csv('base_1.csv', sep=';')
data.head()
# In[14]:
import random
COURSE = []
for i in range(100):
randlist = ['CSE', 'AUTOMOBILE_ENGINEERING', 'APPLIED_SCIENCE', 'BIO_TECHNOLOGY', 'BIOMEDICAL_ENGINEERING','CHEMISTRY','CIVIL_ENGINEERING','ECE','EEE','ENGLISH','FASHION_TECHNOLOGY','HUMANITICS','IT','MATHEMATICS','COMPUTER_APPLICATIONS','MECHANICAL','METALLURIGCAL','PHYSICS','PRODUCTION','ROBOTICS','TEXTILE']
COURSE.append(random.choice(randlist))
# for val in COURSE:
# print(val)
data["COURSE"] = COURSE
# print(data['COURSE'])
ATTENDANCE = []
for i in data['LARGE_PERIOD_ABSENT']:
ATTENDANCE.append(1-i)
data['ATTENDANCE'] = ATTENDANCE
data.drop(["COURSE_OF_STUDY","NATIONALITY", "AGE_WHEN_STARTED", "ELEMENTARY_SCHOOL", "SCHOOL", "ELEMENTARY_GRADE_9", "ELEMENTARY_GRADE_1", "ELEMENTARY_GRADE_2", "ELEMENTARY_GRADE_3", "ELEMENTARY_GRADE_4", "ELEMENTARY_GRADE_AVG", "SUPERVISOR_GROUP_SIZE","CLASS_BASED_SCHOOL","SMALL_PERIOD_ON_TIME","SMALL_PERIOD_ABSENT","SMALL_PERIOD_LATE",
"SMALL_PERIOD_AVG_ASSIGNMENT_GRADE","LARGE_PERIOD_AVG_ASSIGNMENT_GRADE","CREDITS_LAST_SEMESTER","CLASSES_LAST_SEMESTER","LARGE_PERIOD_ON_TIME","LARGE_PERIOD_LATE","LARGE_PERIOD_ABSENT"], axis = 1, inplace = True)
# for col in data.columns:
# print(col)
data.head()
# for val in data.columns:
# print(val)
data.rename(columns={"LARGE_PERIOD_AVG_GRADE":"CGPA"})
# In[15]:
data.fillna(data.mean(), inplace=True)
data = data.drop(['COURSE'],axis = 1)
# In[16]:
# Split train / test
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=TEST_SIZE, random_state=RANDOM_STATE)
for train_index, test_index in split.split(data, data['DROPPED_OUT']):
train_set = data.loc[train_index]
test_set = data.loc[test_index]
# In[17]:
X_train = train_set.drop(['DROPPED_OUT'],axis = 1)
Y_train = train_set['DROPPED_OUT']
X_test = test_set.drop(['DROPPED_OUT'],axis = 1)
Y_test = test_set['DROPPED_OUT']
print(X_train.columns)
# In[18]:
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
#Create a Gaussian Classifier
clf=RandomForestClassifier(n_estimators=200)
clf.fit(X_train,Y_train)
y_pred = clf.predict(X_test)
score = metrics.accuracy_score(Y_test,y_pred)
print((score))
# In[20]:
from sklearn.metrics import roc_curve , roc_auc_score
yscore = clf.predict_proba(X_test)[:,1]
false_positive_rate , true_positive_rate , threshold = roc_curve(Y_test,yscore)
print("roc_auc_score : ", roc_auc_score(Y_test,yscore))
# In[21]:
plt.title('Receiver operating characteristic')
plt.plot(false_positive_rate , true_positive_rate)
plt.plot([0,1] , ls = "--")
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
# In[ ]:
# In[ ]:
|
[
"noreply@github.com"
] |
NishanthSV.noreply@github.com
|
d21c4684449f82f2bebd1e269e253fd9b98d3764
|
ddb4d6a3b839e8325e1902181e4c1aa0a3c75935
|
/coin_flip.py
|
bb9016b036aaa6092aaf2957ad699c54b6b51766
|
[] |
no_license
|
baremetals/Automate_Python
|
8c99ae0ee21fc7c12e7735ce41acd1948addfc93
|
edae29ea1d15e342b3dde6a59ffc31b1a40ac026
|
refs/heads/master
| 2023-01-23T05:41:34.100296
| 2020-11-27T13:18:18
| 2020-11-27T13:18:18
| 311,421,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
import random
number_of_streaks = 0
results = []
tails = 0
heads = 0
for experimentNumber in range(20):
coin_flip = random.randint(0, 1)
if (coin_flip == 0):
results.append('T')
tails += 1
else:
results.append('H')
heads += 1
if (results[:3] == 'H'):
number_of_streaks += 1
# for streak in results:
# for streak in results:
# if ("H" ):
# number_of_streaks += 1
print(results)
print(number_of_streaks)
|
[
"baremetals16@gmail.com"
] |
baremetals16@gmail.com
|
38e97c040afacbcaa11df302fb94cea8de0f4864
|
72b8752c1a0e8012f0c7c2060632ba34a50059d6
|
/store/models.py
|
35f77c70324332f529d62bda7b1eaa4312361958
|
[] |
no_license
|
yurachistic1/stickingTogether
|
b3e4c6f3b79fa82c3eef2c58388b7f559a866dc1
|
aa402305049ef4459477e6ea075ff24f06d4c7a4
|
refs/heads/master
| 2023-07-14T05:31:09.154038
| 2021-08-21T08:22:28
| 2021-08-21T08:22:28
| 295,382,899
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
from django.db import models
def image_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/<cause>/<filename>
return "{0}/{1}".format(instance.sticker.cause, filename)
def image_directory_path2(instance, filename):
pass
# Create your models here.
class Sticker(models.Model):
BEIRUTREDCROSS = "BRC"
CAUSES_CHOICES = [
(BEIRUTREDCROSS, "Beirut Red Cross"),
]
name = models.CharField(max_length=30, unique=True)
price = models.FloatField(verbose_name="Price (£)")
price_sg = models.FloatField(verbose_name="Price (S$)")
description = models.TextField(default="", blank=True)
dimensions = models.CharField(
verbose_name="Dimensions (A x B cm)", null=True, max_length=10, blank=True
)
artist = models.CharField(max_length=30, blank=True, null=True)
singapore_stock = models.IntegerField()
uk_stock = models.IntegerField()
cause = models.CharField(max_length=5, choices=CAUSES_CHOICES)
ordering = models.IntegerField(
verbose_name="Order of appearance(lowest first)", default=1
)
def __str__(self):
return self.name + "-ordering: " + str(self.ordering)
class Meta:
ordering = ["ordering"]
class StickerImage(models.Model):
text_description = models.CharField(max_length=100)
image = models.ImageField(upload_to=image_directory_path)
sticker = models.ForeignKey(Sticker, on_delete=models.CASCADE)
def __str__(self):
return self.text_description
|
[
"yurachistic@gmail.com"
] |
yurachistic@gmail.com
|
f4af1c72efe5203acbacf3f5d7b78c40f779dc73
|
7da1e26fa269f241b42748d76caa72b0095557f5
|
/solutions_week5/water/read_water.py
|
60d0a9c11c5754715175ffd0210fc38cf28a51d2
|
[] |
no_license
|
jensengroup/molstat
|
b305b15ef83fef9d8781217092cffed98b75636f
|
5cb9633acbe3717c2db008d14981e13bd6193b74
|
refs/heads/master
| 2020-12-24T06:53:56.227817
| 2018-05-08T08:56:15
| 2018-05-08T08:56:15
| 13,613,262
| 8
| 3
| null | 2014-02-15T12:45:05
| 2013-10-16T08:11:23
|
TeX
|
UTF-8
|
Python
| false
| false
| 1,632
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn
# If working with multiple files
# you can read loop over the files by creating a array of
# filenames
dat_files = ['CCSD(t).dat', 'F12.dat', 'B3LYP.dat', 'mp2.dat']
# initialize empty lists
energies = []
r_lists = []
# for data-file in file-list
for datf in dat_files:
# initialize empty list for current
# data file
dat_energy = []
r_list = []
# open datafile and loop over lines
f = open(datf, 'r')
for line in f:
# split line (string) into a line (list) for every space
line = line.split()
# Check if line is empty by checking the length of the line list
if len(line) < 1:
continue
# Get the energy and distance
# and convert it to float from string
energy = float(line[-1])
r = float(line[0])
# append it to energies
dat_energy.append(energy)
r_list.append(r)
dat_energy = np.array(dat_energy)
m = dat_energy.min()
dat_energy -= m
dat_energy *= 627.509 # a.u. to kcal/mol
# append energy list to overall energy array
energies.append(dat_energy)
r_lists.append(r_list)
# energies is now a "list of lists"
# which we can access as
plt.plot(r_lists[0], energies[0], '.-', label='CCSD(T)')
plt.plot(r_lists[1], energies[1], '.-', label='F12')
plt.plot(r_lists[2], energies[2], '.-', label='B3LYP')
plt.plot(r_lists[3], energies[3], '.-', label='MP2')
plt.legend(loc='upper right')
plt.xlabel('Displacement [$\AA$]')
plt.ylabel('Relative energy [kcal/mol]')
plt.savefig('energy_water.png')
|
[
"jimmy@charnley.dk"
] |
jimmy@charnley.dk
|
cbd93ad4a706441984a2725d46c992fd90f68ea3
|
3a09d5c19b0f3c78615c8306fb8123cf5234f716
|
/kats/models/nowcasting/nowcastingplus.py
|
65d3b8ed88a86d453c326a96562c6871413d1b4d
|
[
"MIT"
] |
permissive
|
vladimirlojanica/Kats
|
ad384a1fdf3f2e6907a709bf257fbb6400d74f73
|
9f496f94c4a35b8ef0c37eeb2c73224922888d24
|
refs/heads/master
| 2023-09-03T05:09:41.942281
| 2021-11-11T03:20:12
| 2021-11-11T03:21:30
| 380,343,770
| 0
| 0
|
MIT
| 2021-06-25T20:08:42
| 2021-06-25T20:08:41
| null |
UTF-8
|
Python
| false
| false
| 8,765
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
"""NowcastingPlus is a basic model for short-term forecasting.
This modules contains class NowcastingParams, which is the class parameter
and class NowcastingPlusModel, which is the model.
Typical usage example:
nr = NowcastingPlusModel(data = data, params = NowcastingParams(step = 10))
nr.feature_extraction()
nr.label_extraction()
nr.fit()
output = nr.predict()
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import Any, List
import kats.models.model as m
import numpy as np
import pandas as pd
from kats.consts import Params, TimeSeriesData
from kats.models.nowcasting.feature_extraction import LAG, ROC, MA, MOM
from kats.models.nowcasting.model_io import (
serialize_for_zippy,
deserialize_from_zippy,
)
from sklearn import linear_model
from sklearn import preprocessing
from sklearn.linear_model import LinearRegression
def poly(df, n):
"""
Takes the column x from the dataframe df and takes
the value from x to the power n
"""
poly = pd.Series(df.x ** n, name="poly_" + str(n))
df = df.join(poly)
return df
class NowcastingParams(Params):
"""The class for Nowcasting Parameters.
Takes parameters for class NowcastingModel.
Attributes:
step: An integer indicating how many steps ahead we are forecasting. Default is 1.
"""
def __init__(self, step: int = 1, **kwargs) -> None:
super().__init__()
self.step = step
logging.debug(f"Initialized QuadraticModel with parameters: step:{step}")
def validate_params(self):
"""Raises: NotImplementedError("Subclasses should implement this!")."""
logging.warning("Method validate_params() is not implemented.")
raise NotImplementedError("Subclasses should implement this!")
class NowcastingPlusModel(m.Model):
"""The class for NowcastingPlus Model.
This class performs data processing and short term prediction, for time series
based on machine learning methodology.
Attributes:
TimeSeriesData: Time Series Data Source.
NowcastingParams: parameters for Nowcasting.
"""
def __init__(
self,
data: TimeSeriesData,
params: NowcastingParams,
model: Any = None,
poly_model: Any = None,
feature_names: List[str] = [],
poly_feature_names: List[str] = [],
scaler: Any = None,
label_scaler: Any = None,
y_train_season_obj: Any = None,
) -> None:
super().__init__(data, params)
if not isinstance(self.data.value, pd.Series):
msg = "Only support univariate time series, but get {type}.".format(
type=type(self.data.value)
)
logging.error(msg)
raise ValueError(msg)
self.df = data.to_dataframe()
self.step = params.step
self.model = model
self.feature_names = feature_names
self.poly_model = poly_model
self.df_poly = data.to_dataframe()
self.poly_feature_names = poly_feature_names
self.df_nowcasting = data.to_dataframe()
self.scaler = scaler
self.label_scaler = label_scaler
self.y_train_season_obj = y_train_season_obj
def feature_extraction(self) -> None:
"""
Extracts features for time series data.
"""
# Add the hour, minute, and x column to the data
self.df_poly["hour"] = self.df_poly["time"].apply(lambda y: y.hour)
self.df_poly["minute"] = self.df_poly["time"].apply(lambda y: y.minute)
self.df_poly["x"] = self.df_poly["hour"] * 60 + self.df_poly["minute"]
# Empty list to hold the feature names
poly_feature_names = []
# Add the poly columns to the df_poly
for degree in [0, 1, 2, 3, 4, 5]:
self.df_poly = poly(self.df_poly, degree)
poly_feature_names.append("poly_" + str(degree))
# filterout + - inf, nan
self.df_poly = self.df_poly[
~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)
]
# Save the poly feature name
self.poly_feature_names = poly_feature_names
feature_names = []
#########################################################################################
train_index_poly = self.df_poly[
~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)
].index
X_train_poly, y_train_poly = (
self.df_poly[self.poly_feature_names].loc[train_index_poly],
self.df_poly["y"].loc[train_index_poly],
)
# Build the Polynomial Regression Model
lin_reg = LinearRegression()
lin_reg.fit(X_train_poly, y_train_poly)
self.poly_model = lin_reg
y_train_season = lin_reg.predict(X_train_poly)
self.y_train_season_obj = y_train_season
#########################################################################################
for n in [10, 15, 20, 25, 30]:
self.df = MOM(self.df, n)
feature_names.append("MOM_" + str(n))
for n in [10, 15, 20, 25, 30]:
self.df = ROC(self.df, n)
feature_names.append("ROC_" + str(n))
for n in [1, 2, 3, 4, 5]:
self.df = LAG(self.df, n)
feature_names.append("LAG_" + str(n))
for n in [10, 20, 30]:
self.df = MA(self.df, n)
feature_names.append("MA_" + str(n))
self.df = self.df[
~self.df.isin([np.nan, np.inf, -np.inf]).any(1)
] # filterout + - inf, nan
self.feature_names = feature_names
def label_extraction(self) -> None:
"""Extracts labels from time series data."""
self.df["label"] = self.df["y"]
###################### module 1: for offline training ######################
def fit(self) -> None:
"""Fits model."""
logging.debug(
"Call fit() with parameters: " "step:{step}".format(step=self.step)
)
n = 1
train_index = self.df[~self.df.isin([np.nan, np.inf, -np.inf]).any(1)].index
X_train = self.df[self.feature_names].loc[train_index]
std_scaler = preprocessing.StandardScaler()
X_train = std_scaler.fit_transform(X_train)
self.scaler = std_scaler
n = self.step
y_train = (
self.df["label"].loc[train_index] - self.y_train_season_obj[train_index]
).diff(-n)[:-n]
X_train = X_train[:-n]
reg = linear_model.LassoCV()
reg.fit(X_train, y_train)
self.model = reg
def save_model(self) -> bytes:
"""Saves sklearn model as bytes."""
return serialize_for_zippy(self.model)
###################### module 2: for online prediction ######################
def predict(self, **kwargs):
"""Predicts the time series in the future.
Nowcasting forecasts at the time unit of step ahead.
This is in order to keep precision and different from usual algorithms.
Returns:
A float variable, the forecast at future step.
"""
logging.debug(
"Call predict() with parameters. "
"Forecast 1 step only, kwargs:{kwargs}".format(kwargs=kwargs)
)
X_test = self.df[-self.step :][self.feature_names]
X_test = self.scaler.transform(X_test)
y_predict = self.model.predict(X_test)
poly_now = self.y_train_season_obj[-1]
first_occ = np.where(self.y_train_season_obj == poly_now)
polynext = self.y_train_season_obj[first_occ[0][0] + self.step]
now = self.df["y"][-self.step :]
return (now - poly_now) - y_predict + polynext
def predict_polyfit(self, model=None, df=None, **kwargs):
poly_now = self.y_train_season_obj[-1]
first_occ = np.where(self.y_train_season_obj == poly_now)
polynext = self.y_train_season_obj[first_occ[0][0] + self.step]
return polynext
def load_model(self, model_as_bytes: bytes) -> None:
"""Loads model_as_str and decodes into the class NowcastingModel.
Args:
model_as_bytes: a binary variable, indicating whether to read as bytes.
"""
self.model = deserialize_from_zippy(model_as_bytes)
def plot(self):
"""Raises: NotImplementedError("Subclasses should implement this!")。"""
raise NotImplementedError("Subclasses should implement this!")
def __str__(self):
"""Returns the name as Nowcasting,"""
return "Nowcasting"
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
c5c56fd2a6c00afab2add942ac211a66fff636d2
|
bfde6d26e62aa94eebbec96763de40f78f4d4374
|
/python_scripts/rl/DQNAgent.py
|
371e937bd45331fed86d0331f4c5006e5e23eae3
|
[] |
no_license
|
bryonkucharski/robot-catcher
|
8a15d3951454e7204e4f8989c8d306fcd8f5b11b
|
41caad5dfddb1ae7604fb82ced2143d8e3e7cc87
|
refs/heads/master
| 2021-05-10T14:46:30.152702
| 2019-11-10T14:18:05
| 2019-11-10T14:18:05
| 118,531,219
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,486
|
py
|
'''
This class is heavily from https://github.com/keon/deep-q-learning
Modified by Bryon Kucharski
Summer 2018
'''
import random
import numpy as np
from collections import deque
from keras.initializers import normal, identity
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import SGD , Adam
import json
import time
from keras.callbacks import TensorBoard
class DQNAgent:
def __init__(self, state_size, action_size, gamma, epsilon, epsilon_min, epsilon_decay, learning_rate, model_type):
self.max_memory = 2000
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=self.max_memory)
self.gamma = gamma#0.95 # discount rate
self.epsilon = epsilon # 1.0 # exploration rate
self.epsilon_min = epsilon_min# 0.01
self.epsilon_decay = epsilon_decay # 0.995
self.learning_rate = learning_rate # 0.001
#self.tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
if model_type == 'DeepMind':
self.model = self.DeepMindModel()
elif model_type == 'DeepModel':
self.model = self.DeepModel()
def DeepModel(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(20, input_shape=((self.state_size,)), activation='relu'))
model.add(Dense(18, activation='relu'))
model.add(Dense(10, kernel_initializer='uniform', activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
def DeepMindModel(self):
model = Sequential()
model.add(Conv2D(32, 8, 8, subsample=(4, 4), border_mode='same',input_shape=(80,80,4))) #80*80*4
model.add(Activation('relu'))
model.add(Conv2D(64, 4, 4, subsample=(2, 2), border_mode='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, 3, 3, subsample=(1, 1), border_mode='same'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(self.action_size))
adam = Adam(lr=1e-6)
model.compile(loss='mse',optimizer=adam)
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
if len(self.memory) > self.max_memory:
del self.memory[0]
def take_action(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = (reward + self.gamma * np.amax(self.model.predict(next_state)[0]))
target_f = self.model.predict(state)
target_f[0][action] = target
#tensorboard = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
history = self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
return history
def predict(self, state):
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def train(self, X_batch, y_batch):
return self.model.train_on_batch(X_batch, y_batch)[0] #may not need the [0]
def load(self, name):
print("Loading Model")
self.model.load_weights(name)
def save(self, name):
print("Saving Model")
self.model.save_weights(name)
def memory_length(self):
return len(self.memory)
def print_model_weights(self):
i = 0
for layer in self.model.layers:
i +=1
weights = layer.get_weights() # list of numpy arrays
print("Layer " + str(i) + ": " + str(weights))
|
[
"bryonkucharski@gmail.com"
] |
bryonkucharski@gmail.com
|
5e75d0ea121df2f4dcfb81ee93b32ff59299c7cd
|
f4dfbcc41dcd2a06909a39f4b8b03c42bfe921cf
|
/users/migrations/0001_initial.py
|
d947babbe4043be68e9254796ad55b3cb28f9360
|
[] |
no_license
|
VincentBai-dotcom/twitterReplica
|
ffc8187f22ff57f453f097023dde82b6c042322c
|
d131c97e0f335166da1ec0c742b1c61c7af8bc9c
|
refs/heads/master
| 2023-08-27T13:05:05.901118
| 2021-10-23T18:17:38
| 2021-10-23T18:17:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
# Generated by Django 3.2.4 on 2021-07-18 19:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('email', models.EmailField(max_length=60, unique=True, verbose_name='email')),
('username', models.CharField(max_length=15, unique=True)),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('last_login', models.DateTimeField(auto_now=True, verbose_name='last login')),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
]
|
[
"butcheryyy@gmail.com"
] |
butcheryyy@gmail.com
|
8ec4a67b5504ab1b614830a98aea9527a0438e56
|
f0bfe4be1e0c9b129476587144b8d661b1104f1b
|
/napari/_qt/dialogs/qt_plugin_dialog.py
|
2f98b66861d01ad78d0965c8a1c532cbf8172b2d
|
[
"BSD-3-Clause"
] |
permissive
|
zzalscv2/napari
|
4e8cf31be709368443c5280dcf791cb08d5aff4f
|
45cdcc85f17442dcb8eab7f65311ba21467419c8
|
refs/heads/master
| 2023-03-29T06:57:32.921552
| 2021-04-01T16:17:28
| 2021-04-01T16:17:28
| 354,060,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,631
|
py
|
import os
import sys
from pathlib import Path
from typing import Sequence
from napari_plugin_engine.dist import standard_metadata
from napari_plugin_engine.exceptions import PluginError
from qtpy.QtCore import QEvent, QProcess, QProcessEnvironment, QSize, Qt, Slot
from qtpy.QtGui import QFont, QMovie
from qtpy.QtWidgets import (
QCheckBox,
QDialog,
QFrame,
QHBoxLayout,
QLabel,
QLineEdit,
QListWidget,
QListWidgetItem,
QPushButton,
QSizePolicy,
QSplitter,
QTextEdit,
QVBoxLayout,
QWidget,
)
import napari.resources
from ...plugins.pypi import (
ProjectInfo,
iter_napari_plugin_info,
normalized_name,
)
from ...utils._appdirs import user_plugin_dir, user_site_packages
from ...utils.misc import parse_version, running_as_bundled_app
from ...utils.translations import trans
from ..qthreading import create_worker
from ..widgets.qt_eliding_label import ElidingLabel
from ..widgets.qt_plugin_sorter import QtPluginSorter
from .qt_plugin_report import QtPluginErrReporter
# TODO: add error icon and handle pip install errors
# TODO: add queue to handle clicks when already processing
class Installer:
def __init__(self, output_widget: QTextEdit = None):
from ...plugins import plugin_manager
# create install process
self._output_widget = None
self.process = QProcess()
self.process.setProgram(sys.executable)
self.process.setProcessChannelMode(QProcess.MergedChannels)
self.process.readyReadStandardOutput.connect(self._on_stdout_ready)
# setup process path
env = QProcessEnvironment()
combined_paths = os.pathsep.join(
[user_site_packages(), env.systemEnvironment().value("PYTHONPATH")]
)
env.insert("PYTHONPATH", combined_paths)
# use path of parent process
env.insert(
"PATH", QProcessEnvironment.systemEnvironment().value("PATH")
)
self.process.setProcessEnvironment(env)
self.process.finished.connect(lambda: plugin_manager.discover())
self.process.finished.connect(lambda: plugin_manager.prune())
self.set_output_widget(output_widget)
def set_output_widget(self, output_widget: QTextEdit):
if output_widget:
self._output_widget = output_widget
self.process.setParent(output_widget)
def _on_stdout_ready(self):
if self._output_widget:
text = self.process.readAllStandardOutput().data().decode()
self._output_widget.append(text)
def install(self, pkg_list: Sequence[str]):
cmd = ['-m', 'pip', 'install', '--upgrade']
if running_as_bundled_app() and sys.platform.startswith('linux'):
cmd += [
'--no-warn-script-location',
'--prefix',
user_plugin_dir(),
]
self.process.setArguments(cmd + list(pkg_list))
if self._output_widget:
self._output_widget.clear()
self.process.start()
def uninstall(self, pkg_list: Sequence[str]):
args = ['-m', 'pip', 'uninstall', '-y']
self.process.setArguments(args + list(pkg_list))
if self._output_widget:
self._output_widget.clear()
self.process.start()
class PluginListItem(QFrame):
def __init__(
self,
package_name: str,
version: str = '',
url: str = '',
summary: str = '',
author: str = '',
license: str = "UNKNOWN",
*,
plugin_name: str = None,
parent: QWidget = None,
enabled: bool = True,
):
super().__init__(parent)
self.setup_ui()
if plugin_name:
self.plugin_name.setText(plugin_name)
self.package_name.setText(f"{package_name} {version}")
self.summary.setText(summary)
self.package_author.setText(author)
self.action_button.setText(trans._("remove"))
self.action_button.setObjectName("remove_button")
self.enabled_checkbox.setChecked(enabled)
if PluginError.get(plugin_name=plugin_name):
def _show_error():
rep = QtPluginErrReporter(
parent=self._get_dialog(), initial_plugin=plugin_name
)
rep.setWindowFlags(Qt.Sheet)
close = QPushButton(trans._("close"), rep)
rep.layout.addWidget(close)
rep.plugin_combo.hide()
close.clicked.connect(rep.close)
rep.open()
self.error_indicator.clicked.connect(_show_error)
self.error_indicator.show()
self.summary.setIndent(18)
else:
self.summary.setIndent(38)
else:
self.plugin_name.setText(package_name)
self.package_name.setText(version)
self.summary.setText(summary)
self.package_author.setText(author)
self.action_button.setText(trans._("install"))
self.enabled_checkbox.hide()
def _get_dialog(self) -> QDialog:
p = self.parent()
while not isinstance(p, QDialog) and p.parent():
p = p.parent()
return p
def setup_ui(self):
self.v_lay = QVBoxLayout(self)
self.v_lay.setContentsMargins(-1, 8, -1, 8)
self.v_lay.setSpacing(0)
self.row1 = QHBoxLayout()
self.row1.setSpacing(8)
self.enabled_checkbox = QCheckBox(self)
self.enabled_checkbox.setChecked(True)
self.enabled_checkbox.setDisabled(True)
self.enabled_checkbox.setToolTip(trans._("enable/disable"))
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.enabled_checkbox.sizePolicy().hasHeightForWidth()
)
self.enabled_checkbox.setSizePolicy(sizePolicy)
self.enabled_checkbox.setMinimumSize(QSize(20, 0))
self.enabled_checkbox.setText("")
self.row1.addWidget(self.enabled_checkbox)
self.plugin_name = QLabel(self)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.plugin_name.sizePolicy().hasHeightForWidth()
)
self.plugin_name.setSizePolicy(sizePolicy)
font16 = QFont()
font16.setPointSize(16)
self.plugin_name.setFont(font16)
self.row1.addWidget(self.plugin_name)
self.package_name = QLabel(self)
self.package_name.setAlignment(
Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter
)
self.row1.addWidget(self.package_name)
self.action_button = QPushButton(self)
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.action_button.sizePolicy().hasHeightForWidth()
)
self.action_button.setSizePolicy(sizePolicy)
self.row1.addWidget(self.action_button)
self.v_lay.addLayout(self.row1)
self.row2 = QHBoxLayout()
self.error_indicator = QPushButton()
self.error_indicator.setObjectName("warning_icon")
self.error_indicator.setCursor(Qt.PointingHandCursor)
self.error_indicator.hide()
self.row2.addWidget(self.error_indicator)
self.row2.setContentsMargins(-1, 4, 0, -1)
self.summary = ElidingLabel(parent=self)
sizePolicy = QSizePolicy(
QSizePolicy.MinimumExpanding, QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.summary.sizePolicy().hasHeightForWidth()
)
self.summary.setSizePolicy(sizePolicy)
self.summary.setObjectName("small_text")
self.row2.addWidget(self.summary)
self.package_author = QLabel(self)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.package_author.sizePolicy().hasHeightForWidth()
)
self.package_author.setSizePolicy(sizePolicy)
self.package_author.setObjectName("small_text")
self.row2.addWidget(self.package_author)
self.v_lay.addLayout(self.row2)
class QPluginList(QListWidget):
def __init__(self, parent: QWidget, installer: Installer):
super().__init__(parent)
self.installer = installer
self.setSortingEnabled(True)
@Slot(ProjectInfo)
def addItem(
self, project_info: ProjectInfo, plugin_name=None, enabled=True
):
# don't add duplicates
if self.findItems(project_info.name, Qt.MatchFixedString):
if not plugin_name:
return
item = QListWidgetItem(project_info.name, parent=self)
item.version = project_info.version
super().addItem(item)
widg = PluginListItem(
*project_info,
parent=self,
plugin_name=plugin_name,
enabled=enabled,
)
method = getattr(
self.installer, 'uninstall' if plugin_name else 'install'
)
widg.action_button.clicked.connect(lambda: method([project_info.name]))
item.setSizeHint(widg.sizeHint())
self.setItemWidget(item, widg)
@Slot(ProjectInfo)
def tag_outdated(self, project_info: ProjectInfo):
for item in self.findItems(project_info.name, Qt.MatchFixedString):
current = item.version
latest = project_info.version
if parse_version(current) >= parse_version(latest):
continue
if hasattr(item, 'outdated'):
# already tagged it
continue
item.outdated = True
widg = self.itemWidget(item)
update_btn = QPushButton(
trans._("update (v{latest})").format(latest=latest), widg
)
update_btn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
update_btn.clicked.connect(
lambda: self.installer.install([item.text()])
)
widg.row1.insertWidget(3, update_btn)
class QtPluginDialog(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.installer = Installer()
self.setup_ui()
self.installer.set_output_widget(self.stdout_text)
self.installer.process.started.connect(self._on_installer_start)
self.installer.process.finished.connect(self._on_installer_done)
self.refresh()
def _on_installer_start(self):
self.show_status_btn.setChecked(True)
self.working_indicator.show()
self.process_error_indicator.hide()
def _on_installer_done(self, exit_code, exit_status):
self.working_indicator.hide()
if exit_code:
self.process_error_indicator.show()
else:
self.show_status_btn.setChecked(False)
self.refresh()
self.plugin_sorter.refresh()
def refresh(self):
self.installed_list.clear()
self.available_list.clear()
# fetch installed
from ...plugins import plugin_manager
plugin_manager.discover() # since they might not be loaded yet
already_installed = set()
for plugin_name, mod_name, distname in plugin_manager.iter_available():
# not showing these in the plugin dialog
if plugin_name in ('napari_plugin_engine',):
continue
if distname:
already_installed.add(distname)
meta = standard_metadata(distname)
else:
meta = {}
self.installed_list.addItem(
ProjectInfo(
normalized_name(distname or ''),
meta.get('version', ''),
meta.get('url', ''),
meta.get('summary', ''),
meta.get('author', ''),
meta.get('license', ''),
),
plugin_name=plugin_name,
enabled=plugin_name in plugin_manager.plugins,
)
# self.v_splitter.setSizes([70 * self.installed_list.count(), 10, 10])
# fetch available plugins
self.worker = create_worker(iter_napari_plugin_info)
def _handle_yield(project_info):
if project_info.name in already_installed:
self.installed_list.tag_outdated(project_info)
else:
self.available_list.addItem(project_info)
self.worker.yielded.connect(_handle_yield)
self.worker.finished.connect(self.working_indicator.hide)
self.worker.finished.connect(self._update_count_in_label)
self.worker.start()
def setup_ui(self):
self.resize(1080, 640)
vlay_1 = QVBoxLayout(self)
self.h_splitter = QSplitter(self)
vlay_1.addWidget(self.h_splitter)
self.h_splitter.setOrientation(Qt.Horizontal)
self.v_splitter = QSplitter(self.h_splitter)
self.v_splitter.setOrientation(Qt.Vertical)
self.v_splitter.setMinimumWidth(500)
self.plugin_sorter = QtPluginSorter(parent=self.h_splitter)
self.plugin_sorter.layout().setContentsMargins(2, 0, 0, 0)
self.plugin_sorter.hide()
installed = QWidget(self.v_splitter)
lay = QVBoxLayout(installed)
lay.setContentsMargins(0, 2, 0, 2)
lay.addWidget(QLabel(trans._("Installed Plugins")))
self.installed_list = QPluginList(installed, self.installer)
lay.addWidget(self.installed_list)
uninstalled = QWidget(self.v_splitter)
lay = QVBoxLayout(uninstalled)
lay.setContentsMargins(0, 2, 0, 2)
self.avail_label = QLabel(trans._("Available Plugins"))
lay.addWidget(self.avail_label)
self.available_list = QPluginList(uninstalled, self.installer)
lay.addWidget(self.available_list)
self.stdout_text = QTextEdit(self.v_splitter)
self.stdout_text.setReadOnly(True)
self.stdout_text.setObjectName("pip_install_status")
self.stdout_text.hide()
buttonBox = QHBoxLayout()
self.working_indicator = QLabel(trans._("loading ..."), self)
sp = self.working_indicator.sizePolicy()
sp.setRetainSizeWhenHidden(True)
self.working_indicator.setSizePolicy(sp)
self.process_error_indicator = QLabel(self)
self.process_error_indicator.setObjectName("error_label")
self.process_error_indicator.hide()
load_gif = str(Path(napari.resources.__file__).parent / "loading.gif")
mov = QMovie(load_gif)
mov.setScaledSize(QSize(18, 18))
self.working_indicator.setMovie(mov)
mov.start()
self.direct_entry_edit = QLineEdit(self)
self.direct_entry_edit.installEventFilter(self)
self.direct_entry_edit.setPlaceholderText(
trans._('install by name/url, or drop file...')
)
self.direct_entry_btn = QPushButton(trans._("Install"), self)
self.direct_entry_btn.clicked.connect(self._install_packages)
self.show_status_btn = QPushButton(trans._("Show Status"), self)
self.show_status_btn.setFixedWidth(100)
self.show_sorter_btn = QPushButton(trans._("<< Show Sorter"), self)
self.close_btn = QPushButton(trans._("Close"), self)
self.close_btn.clicked.connect(self.reject)
buttonBox.addWidget(self.show_status_btn)
buttonBox.addWidget(self.working_indicator)
buttonBox.addWidget(self.direct_entry_edit)
buttonBox.addWidget(self.direct_entry_btn)
buttonBox.addWidget(self.process_error_indicator)
buttonBox.addSpacing(60)
buttonBox.addWidget(self.show_sorter_btn)
buttonBox.addWidget(self.close_btn)
buttonBox.setContentsMargins(0, 0, 4, 0)
vlay_1.addLayout(buttonBox)
self.show_status_btn.setCheckable(True)
self.show_status_btn.setChecked(False)
self.show_status_btn.toggled.connect(self._toggle_status)
self.show_sorter_btn.setCheckable(True)
self.show_sorter_btn.setChecked(False)
self.show_sorter_btn.toggled.connect(self._toggle_sorter)
self.v_splitter.setStretchFactor(1, 2)
self.h_splitter.setStretchFactor(0, 2)
def _update_count_in_label(self):
count = self.available_list.count()
self.avail_label.setText(
trans._("Available Plugins ({count})").format(count=count)
)
def eventFilter(self, watched, event):
if event.type() == QEvent.DragEnter:
# we need to accept this event explicitly to be able
# to receive QDropEvents!
event.accept()
if event.type() == QEvent.Drop:
md = event.mimeData()
if md.hasUrls():
files = [url.toLocalFile() for url in md.urls()]
self.direct_entry_edit.setText(files[0])
return True
return super().eventFilter(watched, event)
def _toggle_sorter(self, show):
if show:
self.show_sorter_btn.setText(trans._(">> Hide Sorter"))
self.plugin_sorter.show()
else:
self.show_sorter_btn.setText(trans._("<< Show Sorter"))
self.plugin_sorter.hide()
def _toggle_status(self, show):
if show:
self.show_status_btn.setText(trans._("Hide Status"))
self.stdout_text.show()
else:
self.show_status_btn.setText(trans._("Show Status"))
self.stdout_text.hide()
def _install_packages(self, packages: Sequence[str] = ()):
if not packages:
_packages = self.direct_entry_edit.text()
if os.path.exists(_packages):
packages = [_packages]
else:
packages = _packages.split()
self.direct_entry_edit.clear()
if packages:
self.installer.install(packages)
if __name__ == "__main__":
from qtpy.QtWidgets import QApplication
app = QApplication([])
w = QtPluginDialog()
w.show()
app.exec_()
|
[
"noreply@github.com"
] |
zzalscv2.noreply@github.com
|
6c05c7073fcadf893e77fa9b4e837fe1d19d0d8b
|
b5c2571948d1e7fd6a21cfe3267cb7de9088cf56
|
/Bytecode Decompile/inspect.py
|
be063eb8ef519133f5a0bd1dcfd234259b4c0c72
|
[] |
no_license
|
C0MPU73R/Toontown-2003-Bytecode
|
ff32042d4da5894ec3a4fb7da43614df26d25a9d
|
aa6862f86034f342d5fee9934cd6ed3e83de99f3
|
refs/heads/master
| 2023-05-03T11:55:57.959617
| 2018-12-02T00:05:43
| 2018-12-02T00:05:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,764
|
py
|
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__date__ = '1 Jan 2001'
import sys, os, types, string, re, dis, imp, tokenize
def ismodule(object):
return isinstance(object, types.ModuleType)
def isclass(object):
return isinstance(object, types.ClassType) or hasattr(object, '__bases__')
def ismethod(object):
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
return hasattr(object, '__get__') and not hasattr(object, '__set__') and not ismethod(object) and not isfunction(object) and not isclass(object)
def isfunction(object):
return isinstance(object, types.FunctionType)
def istraceback(object):
return isinstance(object, types.TracebackType)
def isframe(object):
return isinstance(object, types.FrameType)
def iscode(object):
return isinstance(object, types.CodeType)
def isbuiltin(object):
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
return isbuiltin(object) or isfunction(object) or ismethod(object) or ismethoddescriptor(object)
def getmembers(object, predicate=None):
results = []
for key in dir(object):
value = getattr(object, key)
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
def classify_class_attrs(cls):
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
if name in cls.__dict__:
obj = cls.__dict__[name]
else:
obj = getattr(cls, name)
homecls = getattr(obj, '__objclass__', None)
if homecls is None:
for base in mro:
if name in base.__dict__:
homecls = base
break
if homecls is not None and name in homecls.__dict__:
obj = homecls.__dict__[name]
obj_via_getattr = getattr(cls, name)
if isinstance(obj, staticmethod):
kind = 'static method'
else:
if isinstance(obj, classmethod):
kind = 'class method'
else:
if isinstance(obj, property):
kind = 'property'
else:
if ismethod(obj_via_getattr) or ismethoddescriptor(obj_via_getattr):
kind = 'method'
else:
kind = 'data'
result.append((name, kind, homecls, obj))
return result
return
def _searchbases(cls, accum):
if cls in accum:
return
accum.append(cls)
for base in cls.__bases__:
_searchbases(base, accum)
def getmro(cls):
if hasattr(cls, '__mro__'):
return cls.__mro__
else:
result = []
_searchbases(cls, result)
return tuple(result)
def indentsize(line):
expline = string.expandtabs(line)
return len(expline) - len(string.lstrip(expline))
def getdoc(object):
try:
doc = object.__doc__
except AttributeError:
return None
else:
if not isinstance(doc, (str, unicode)):
return None
try:
lines = string.split(string.expandtabs(doc), '\n')
except UnicodeError:
return None
else:
margin = None
for line in lines[1:]:
content = len(string.lstrip(line))
if not content:
continue
indent = len(line) - content
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if margin is not None:
for i in range(1, len(lines)):
lines[i] = lines[i][margin:]
return string.join(lines, '\n')
return
def getfile(object):
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError, 'arg is a built-in module'
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError, 'arg is a built-in class'
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError, 'arg is not a module, class, method, function, traceback, frame, or code object'
def getmoduleinfo(path):
filename = os.path.basename(path)
suffixes = map(lambda (suffix, mode, mtype): (-len(suffix), suffix, mode, mtype), imp.get_suffixes())
suffixes.sort()
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return (filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
info = getmoduleinfo(path)
if info:
return info[0]
def getsourcefile(object):
filename = getfile(object)
if string.lower(filename[-4:]) in ['.pyc', '.pyo']:
filename = filename[:-4] + '.py'
for suffix, mode, kind in imp.get_suffixes():
if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
return None
if os.path.exists(filename):
return filename
return
def getabsfile(object):
return os.path.normcase(os.path.abspath(getsourcefile(object) or getfile(object)))
modulesbyfile = {}
def getmodule(object):
if ismodule(object):
return object
if isclass(object):
return sys.modules.get(object.__module__)
try:
file = getabsfile(object)
except TypeError:
return None
else:
if modulesbyfile.has_key(file):
return sys.modules[modulesbyfile[file]]
for module in sys.modules.values():
if hasattr(module, '__file__'):
modulesbyfile[getabsfile(module)] = module.__name__
if modulesbyfile.has_key(file):
return sys.modules[modulesbyfile[file]]
main = sys.modules['__main__']
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
builtin = sys.modules['__builtin__']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
return
def findsource(object):
try:
file = open(getsourcefile(object))
except (TypeError, IOError):
raise IOError, 'could not get source code'
else:
lines = file.readlines()
file.close()
if ismodule(object):
return (lines, 0)
if isclass(object):
name = object.__name__
pat = re.compile('^\\s*class\\s*' + name + '\\b')
for i in range(len(lines)):
if pat.match(lines[i]):
return (lines, i)
else:
raise IOError, 'could not find class definition'
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError, 'could not find function definition'
lnum = object.co_firstlineno - 1
pat = re.compile('^\\s*def\\s')
while lnum > 0:
if pat.match(lines[lnum]):
break
lnum = lnum - 1
return (lines, lnum)
raise IOError, 'could not find code object'
def getcomments(object):
try:
lines, lnum = findsource(object)
except IOError:
return None
else:
if ismodule(object):
start = 0
if lines and lines[0][:2] == '#!':
start = 1
while start < len(lines) and string.strip(lines[start]) in ['', '#']:
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(string.expandtabs(lines[end]))
end = end + 1
return string.join(comments, '')
if lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and string.lstrip(lines[end])[:1] == '#' and indentsize(lines[end]) == indent:
comments = [string.lstrip(string.expandtabs(lines[end]))]
if end > 0:
end = end - 1
comment = string.lstrip(string.expandtabs(lines[end]))
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [
comment]
end = end - 1
if end < 0:
break
comment = string.lstrip(string.expandtabs(lines[end]))
while comments and string.strip(comments[0]) == '#':
comments[:1] = []
while comments and string.strip(comments[-1]) == '#':
comments[(-1):] = []
return string.join(comments, '')
return
class ListReader:
__module__ = __name__
def __init__(self, lines):
self.lines = lines
self.index = 0
def readline(self):
i = self.index
if i < len(self.lines):
self.index = i + 1
return self.lines[i]
else:
return ''
class EndOfBlock(Exception):
__module__ = __name__
class BlockFinder:
__module__ = __name__
def __init__(self):
self.indent = 0
self.started = 0
self.last = 0
def tokeneater(self, type, token, (srow, scol), (erow, ecol), line):
if not self.started:
if type == tokenize.NAME:
self.started = 1
else:
if type == tokenize.NEWLINE:
self.last = srow
else:
if type == tokenize.INDENT:
self.indent = self.indent + 1
else:
if type == tokenize.DEDENT:
self.indent = self.indent - 1
if self.indent == 0:
raise EndOfBlock, self.last
def getblock(lines):
try:
tokenize.tokenize(ListReader(lines).readline, BlockFinder().tokeneater)
except EndOfBlock, eob:
return lines[:eob.args[0]]
def getsourcelines(object):
lines, lnum = findsource(object)
if ismodule(object):
return (lines, 0)
else:
return (
getblock(lines[lnum:]), lnum + 1)
def getsource(object):
lines, lnum = getsourcelines(object)
return string.join(lines, '')
def walktree(classes, children, parent):
results = []
classes.sort(lambda a, b: cmp(a.__name__, b.__name__))
for c in classes:
results.append((c, c.__bases__))
if children.has_key(c):
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=0):
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not children.has_key(parent):
children[parent] = []
children[parent].append(c)
if unique and parent in classes:
break
else:
if c not in roots:
roots.append(c)
for parent in children.keys():
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
return
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = (
1, 2, 4, 8)
def getargs(co):
if not iscode(co):
raise TypeError, 'arg is not a code object'
code = co.co_code
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
step = 0
for i in range(nargs):
if args[i][:1] in ['', '.']:
stack, remain, count = ([], [], [])
while step < len(code):
op = ord(code[step])
step = step + 1
if op >= dis.HAVE_ARGUMENT:
opname = dis.opname[op]
value = ord(code[step]) + ord(code[step + 1]) * 256
step = step + 2
if opname in ['UNPACK_TUPLE', 'UNPACK_SEQUENCE']:
remain.append(value)
count.append(value)
else:
if opname == 'STORE_FAST':
stack.append(names[value])
remain[-1] = remain[-1] - 1
while remain[-1] == 0:
remain.pop()
size = count.pop()
stack[(-size):] = [stack[-size:]]
if not remain:
break
remain[-1] = remain[-1] - 1
if not remain:
break
args[i] = stack[0]
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return (args, varargs, varkw)
return
def getargspec(func):
if not isfunction(func):
raise TypeError, 'arg is not a Python function'
args, varargs, varkw = getargs(func.func_code)
return (
args, varargs, varkw, func.func_defaults)
def getargvalues(frame):
args, varargs, varkw = getargs(frame.f_code)
return (
args, varargs, varkw, frame.f_locals)
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + string.join(seq, ', ') + ')'
def strseq(object, convert, join=joinseq):
if type(object) in [types.ListType, types.TupleType]:
return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), join=joinseq):
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = strseq(args[i], formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs:
specs.append(formatvarargs(varargs))
if varkw:
specs.append(formatvarkw(varkw))
return '(' + string.join(specs, ', ') + ')'
def formatargvalues(args, varargs, varkw, locals, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), join=joinseq):
def convert(name, locals=locals, formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + string.join(specs, ', ') + ')'
def getframeinfo(frame, context=1):
if istraceback(frame):
frame = frame.tb_frame
if not isframe(frame):
raise TypeError, 'arg is not a frame or traceback object'
filename = getsourcefile(frame)
lineno = getlineno(frame)
if context > 0:
start = lineno - 1 - context // 2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = min(start, len(lines) - context)
lines = lines[start:start + context]
index = lineno - 1 - start
else:
lines = index = None
return (
filename, lineno, frame.f_code.co_name, lines, index)
return
def getlineno(frame):
lineno = frame.f_lineno
code = frame.f_code
if hasattr(code, 'co_lnotab'):
table = code.co_lnotab
lineno = code.co_firstlineno
addr = 0
for i in range(0, len(table), 2):
addr = addr + ord(table[i])
if addr > frame.f_lasti:
break
lineno = lineno + ord(table[i + 1])
return lineno
def getouterframes(frame, context=1):
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
def currentframe():
try:
raise 'catch me'
except:
return sys.exc_traceback.tb_frame.f_back
if hasattr(sys, '_getframe'):
currentframe = sys._getframe
def stack(context=1):
return getouterframes(currentframe().f_back, context)
def trace(context=1):
return getinnerframes(sys.exc_traceback, context)
|
[
"flamingdog101@gmail.com"
] |
flamingdog101@gmail.com
|
e296418c14e85e3c5c9fb02e21c897c1c445ec6d
|
83b46306f0ff2f7374e3a1b1edfdd858909c012a
|
/sft/migrations/0001_initial.py
|
342fe5e8e60ac9bdfb5be1378f15415c030d9425
|
[] |
no_license
|
TsukitoIwasaki/ToyoApps
|
045cd6ae5613756001e46433276592854aa5210b
|
d3fce9aa725f184391069c30c03e99964bf885c0
|
refs/heads/master
| 2022-11-07T05:30:52.491235
| 2020-06-26T07:48:43
| 2020-06-26T07:48:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,900
|
py
|
# Generated by Django 2.0.4 on 2020-06-03 09:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userId', models.CharField(max_length=255, verbose_name='code')),
('start_time', models.TimeField(blank=True, null=True, verbose_name='startTime')),
('end_time', models.TimeField(blank=True, null=True, verbose_name='endTime')),
('startDate', models.DateField(blank=True, null=True, verbose_name='startDate')),
('comment', models.TextField(blank=True, max_length=5000, null=True, verbose_name='comment')),
('title', models.CharField(blank=True, max_length=255, null=True, verbose_name='title')),
('status', models.IntegerField(blank=True, choices=[(10, '仮登録'), (20, '確定')], default=10, null=True, verbose_name='ステータス')),
],
),
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='名前')),
('user_code', models.CharField(blank=True, max_length=255, null=True, verbose_name='社員コード')),
('employment_status', models.IntegerField(blank=True, choices=[(10, '正社員'), (20, 'パート'), (30, 'アルバイト')], null=True, verbose_name='雇用形態')),
('comment', models.TextField(blank=True, max_length=5000, null=True, verbose_name='comment')),
],
),
]
|
[
"ktakahashi@toyo-group.co.jp"
] |
ktakahashi@toyo-group.co.jp
|
e470fdcb2ba36e4b85c43eaf7b1a7524a82c12d4
|
6bb6b01270a11e6e08efaebd841bac9645a2d3e0
|
/dev/seafile_dev/seafes.py
|
ff3e15ed1c4355fce84c9cf5563a300e580405ea
|
[] |
no_license
|
dolevu/seafile-vagrant
|
44767bfd772ddda3c3106aeaa9260949a1b1bb95
|
6140e70ab3922061e8f2adb4e2ec3656e14a213e
|
refs/heads/master
| 2020-03-29T08:20:18.329597
| 2017-03-27T08:14:12
| 2017-03-27T08:14:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
from fabric.api import task, run, cd
def _run_default(command, *a, **kw):
"""Source /etc/default/seafile-server and run the given command"""
command = ". /etc/default/seafile-server; " + command
return run(command, *a, **kw)
def _index_op(op):
with cd('/vagrant/src/seafes'):
_run_default('python -m seafes.update_repos --loglevel debug {}'.format(op))
@task
def test(*args):
with cd('/vagrant/src/seafes'):
_run_default('py.test ' + ' '.join(args))
@task
def update():
_index_op('update')
@task
def clear():
_index_op('clear')
|
[
"linshuai2012@gmail.com"
] |
linshuai2012@gmail.com
|
fad4a73a32f95e0c12039344043f8aa6186ca2d7
|
4c89545d41c16e33ace5eec50a7b8eb0b11780ee
|
/BookCommerce/booktime/settings.py
|
73ff78715e7d1a3833b7b7d1b0f0c81cd7ad9cc0
|
[] |
no_license
|
envs/DjangoProjects
|
8782235ad1a0e77d491849ebef6c4e7cc9705537
|
bd9bf356421d4f4dca9593a79e954b9f15712570
|
refs/heads/master
| 2022-12-02T15:46:02.010763
| 2020-08-19T12:22:55
| 2020-08-19T12:22:55
| 248,914,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,044
|
py
|
"""
Django settings for booktime project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fo#f@p&^oy=)&3+q#&4*@#q3btcvb-e6)etb^%zhw8)6xprr^7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main.apps.MainConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'booktime.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'booktime.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'bookcommerce',
# 'USER': 'envs',
# 'PASSWORD': 'test123',
# 'HOST': '127.0.0.1',
# 'PORT': '5432',
# }
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/' # This is for Development Environment
#STATIC_ROOT = '' # To be specified for Production envrionment
# NB: MEDIA_ROOT is the location on the local drive where all the user files will be uploaded
# It will also be automatically available for download and their URL prefixed with MEDIA_URL
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
if not DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST_USER = "username"
EMAIL_HOST = "smtp.domain.com"
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_PASSWORD = 'password'
else:
EMAIL_BACKEND = (
"django.core.mail.backends.console.EmailBackend"
)
|
[
"olaonipekun2001@yahoo.com"
] |
olaonipekun2001@yahoo.com
|
e806fa4c23923d883b578cf5c175446980903fae
|
b13d852ef0a7f847f0c0a39334a7b4e0ff845f85
|
/apps/track/migrations/0018_comment_datetime.py
|
c51eb831b9cc965368d128af7a85c0e6bb2167b2
|
[
"MIT"
] |
permissive
|
martinlehoux/django_bike
|
357e2a5ea3c6ba8a79df92eeaec214a89a1eb165
|
05373d2649647fe8ebadb0aad54b9a7ec1900fe7
|
refs/heads/master
| 2023-08-21T23:37:25.273766
| 2021-04-20T13:01:01
| 2021-04-20T13:01:01
| 273,638,471
| 1
| 0
|
MIT
| 2021-09-22T18:00:45
| 2020-06-20T04:32:38
|
Python
|
UTF-8
|
Python
| false
| false
| 429
|
py
|
# Generated by Django 3.0.8 on 2020-09-06 19:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("track", "0017_auto_20200906_1222"),
]
operations = [
migrations.AddField(
model_name="comment",
name="datetime",
field=models.DateTimeField(auto_now_add=True),
preserve_default=False,
),
]
|
[
"noreply@github.com"
] |
martinlehoux.noreply@github.com
|
3364dc89e4b20acc4a05c2118b63545ff7459022
|
55113ee5779138b22d6404c2b138eeaa77120e3a
|
/knn.py
|
bad4366016094d312a8b78ec01be352132a8f46d
|
[] |
no_license
|
timting/titanic
|
96e536dec9d2defb77d5657e865b66d6d2dc5da7
|
c808e08bf5dacf06709a0dde25aea7b7a0300253
|
refs/heads/master
| 2021-08-15T16:47:10.893023
| 2017-11-17T23:59:52
| 2017-11-17T23:59:52
| 106,059,252
| 0
| 1
| null | 2017-11-17T23:59:53
| 2017-10-06T23:46:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
#!/usr/bin/env python
from numpy import *
import operator
import titanic
def distances(data, point):
point_matrix = tile(point, (data.shape[0],1))
diffs = point_matrix - data
square_diffs = diffs**2
square_distances = square_diffs.sum(axis=1)
dist = square_distances**0.5
return dist
def nearest_neighbors(data, point, labels, k):
d = distances(data, point)
distance_indices = d.argsort()
votes = {}
for i in range(k):
label = labels[ distance_indices[i] ]
votes[label] = votes.get(label, 0) + 1
sorted_labels = sorted(votes.iteritems(),
key=operator.itemgetter(1), reverse=True)
return sorted_labels[0][0]
def main():
data,labels,_ = titanic.read_data("normalized-train.csv")
test,test_labels,_ = titanic.read_data("normalized-test.csv")
successes = 0
totals = 0
for i in range(len(test)):
point = test[i]
knn_label = nearest_neighbors(data, point, labels, 3)
# print " want %s, got %s" % (test_labels[i], knn_label)
if knn_label == test_labels[i]:
successes += 1
totals += 1
print "accuracy: %2.4f %%" % (float(successes) / float(totals) * 100.0)
if __name__ == "__main__":
main()
|
[
"alexycodes@gmail.com"
] |
alexycodes@gmail.com
|
b1c863166fcc509a61ec6e7df70babc7d5e2e3e4
|
cf19882e1326e152238719b5c530ea02cd357062
|
/Quiz2.py
|
15e9c2ede25499c559426c083c9ea21c74e7f0f6
|
[] |
no_license
|
kiryeong/python_basic_study
|
02ea260f3ea47c0957635c44d1ed66fb736374c1
|
72658df8cf137da3a6803f2ec98d5794da7b2175
|
refs/heads/main
| 2022-12-26T08:43:03.742975
| 2020-10-09T03:11:35
| 2020-10-09T03:11:35
| 302,518,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,377
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 22:23:53 2020
@author: SAMSUNG
"""
sentence = '나는 소년입니다'
print(sentence)
sentence2 = "파이썬은 쉬워요"
print(sentence2)
sentence3 = """
나는 소년이고,
파이썬은 쉬워요
"""
print(sentence3)
jumin = "990120-1234567"
print("성별: " + jumin[7])
print("연: " + jumin[0:2]) #0부터 2직전까지 (0,1)
print("월: " + jumin[2:4])
print("일: " + jumin[4:6])
print("생년월일: " + jumin[:6]) #처음부터 6직전까지
print("뒤 7자리: " + jumin[7:]) #7부터 끝까지
print("뒤 7자리 (뒤에부터): " + jumin[-7:]) #맨 뒤에서 7번째부터 끝까지
python = "Python is Amazing"
print(python.lower()) #소문자로
print(python.upper()) #대문자로
print(python[0].isupper())
print(len(python)) #길이
print(python.replace("Python", "Java")) #Python을 Java로 바꾼다.
index = python.index("n")
print(index)
index = python.index("n",index + 1)
print(index)
'''
print(python.find("Java")) #원하는 값이 없을때는 -1
print(python.index("Java")) #원하는 값이 없을때는 오류가 나고 종류됨
print(python.count("n")) #n이 총 몇번 등장하느냐
'''
#방법1
print("나는 %d살입니다." % 20)
print("나는 %s을 좋아해요" % "파이썬") # %s는 문자열
print("Apple 은 %c로 시작해요." % "A") # %c는 한 글자
print("나는 %s색과 %s색을 좋아해요." % ("파란", "빨간"))
#방법2
print("나는 {}살입니다.".format(20))
print("나는 {}색과 {}색을 좋아해요.".format("파란", "빨간"))
print("나는 {0}색과 {1}색을 좋아해요.".format("파란", "빨간"))
print("나는 {1}색과 {0}색을 좋아해요.".format("파란", "빨간"))
#방법3
print("나는 {age}살이며, {color}색을 좋아해요.".format(age = 20, color = "빨간"))
#방법4
age = 20
color = "빨간"
print(f"나는 {age}살이며, {color}색을 좋아해요.")
print("백문이 불여일견\n백견이 불여일타") #\n줄바꿈
#저는 "나도코딩" 입니다.
print("저는 \"나도코딩\"입니다.") #\" 또는 \' : 문장 내에서 따옴표
#\\ : 문장 내에서 \
#\r : 커서를 맨 앞으로 이동
print("Red Apple\rPine")
#\b : 백스페이스 (한 글자 삭제)
print("Redd\bApple")
#\t : 탭
print("Red\tApple")
|
[
"noreply@github.com"
] |
kiryeong.noreply@github.com
|
0d1437cc6dc02b645e276fa0d6dea694361c7720
|
ad3a0a338ae77063232cb2113329e0a04cef9f87
|
/anonymization/REM.py
|
0e3249904956dbdf95cea2ac8eaffac7719c5988
|
[] |
no_license
|
CommunityDeception/CommunityDeceptor
|
646c3ca182cc74456c039a37ad251ea26e876c47
|
c06a8e909cd74ba8b2ec3d5f65888d7551946c4f
|
refs/heads/master
| 2023-02-21T18:09:16.662721
| 2019-10-27T10:53:26
| 2019-10-27T10:53:26
| 199,795,530
| 1
| 0
| null | 2023-02-11T00:22:34
| 2019-07-31T06:40:29
|
Python
|
UTF-8
|
Python
| false
| false
| 7,194
|
py
|
import logging.config
import sys
import time
from typing import List
from igraph import Graph
from igraph.clustering import VertexClustering
from settings import master
from similarity.jaccard import count_jaccard_index_and_recall_index
from utils.counter_pre import count_security_index_by_pre
from utils.pre_counter import count_pre_security_index
from utils.timer import time_mark
logging.config.dictConfig(master.LOGGING_SETTINGS)
logger = logging.getLogger('normal')
class REMAnonymize(object):
def __init__(self, graph, edges_sum, detection_func, func_args, interval, partitions, path, **kwargs):
self.__graph: Graph = graph
self.__edges_sum = edges_sum
self.__detection_func = detection_func
self.__func_args: dict = func_args
self.__interval = interval
self.__partitions: VertexClustering = partitions
self.__path = path
self.__start_time = time.time()
self.__total_edge_set: set = set()
self.__partitions_degree: List[int] = list()
self.__partitions_volume: List[int] = list()
self.__degree_distribute: List[int] = list()
self.__sorted_partitions: List[List[int]] = list()
self.__partitions_num = 0
self.__available_edges = list()
self.__end_time = None
def __start(self):
logger.info("=" * 60)
logger.info("REMAnonymize")
logger.info(f'Time : {time_mark(self.__start_time)}')
logger.info(f'Graph: {self.__path}')
logger.info(f'Info : {self.__graph.vcount()} {self.__graph.ecount()}')
logger.info(f'Edges: {self.__edges_sum}')
logger.info(f'Func : {self.__detection_func.__name__}')
logger.info(f'Args : {self.__func_args}')
logger.info(f'Gap : {self.__interval}')
logger.info(f'Parts: {len(self.__partitions)}')
logger.info("=" * 60)
def __quit(self):
self.__end_time = time.time()
logger.info("=" * 60)
logger.info(f'Time : {time_mark(self.__end_time)}')
logger.info(f'Total: {(self.__end_time - self.__start_time): 10.4f} s')
logger.info("=" * 60)
logger.info("\n\n")
def __preprocess(self):
self.__total_edge_set = set(self.__graph.get_edgelist())
self.__partitions_num = len(self.__partitions)
self.__degree_distribute = self.__graph.degree(self.__graph.vs)
self.__set_necessary_info()
def __set_necessary_info(self):
for index, part in enumerate(self.__partitions):
subgraph: Graph = self.__partitions.subgraph(index)
self.__partitions_degree.append(2 * subgraph.ecount())
self.__partitions_volume.append(sum(self.__graph.degree(part)))
self.__sorted_partitions.append(sorted(part, key=lambda x: self.__graph.degree(x)))
def __get_available_edges(self):
available_edges = list()
degree_distribute = self.__degree_distribute
for si in range(self.__partitions_num):
for ti in range(si, self.__partitions_num):
s_order, t_order = self.__sorted_partitions[si], self.__sorted_partitions[ti]
u, v = s_order[0], t_order[0]
if degree_distribute[u] > degree_distribute[v]:
u, v = v, u
s_order, t_order = t_order, s_order
u_neighbors = set(self.__graph.neighbors(u))
for node in t_order:
if node not in u_neighbors:
v = node
break
du, dv = degree_distribute[u], degree_distribute[v]
upper_bound = du + dv
for i in t_order:
if degree_distribute[i] >= dv:
edge = (u, v) if u < v else (v, u)
available_edges.append(edge)
break
else:
i_neighbors = set(self.__graph.neighbors(i))
for j in s_order:
if j not in i_neighbors:
break
di, dj = degree_distribute[i], degree_distribute[j]
if di + dj < upper_bound:
edge = (i, j) if i < j else (j, i)
if edge not in self.__total_edge_set:
available_edges.append(edge)
self.__available_edges = available_edges
def __choose_edge(self):
self.__get_available_edges()
partitions = self.__partitions
optimal_edge = None
edge_partitions = None
min_security = sys.maxsize
total_degree = 2 * self.__graph.ecount()
degree_distribute = self.__degree_distribute
membership = partitions.membership
pre_count = count_pre_security_index(self.__graph, partitions, self.__partitions_degree, self.__partitions_volume)
for edge in self.__available_edges:
src_des = (membership[edge[0]], membership[edge[1]])
security_index = count_security_index_by_pre(pre_count, edge, src_des, total_degree + 2, self.__partitions_degree, self.__partitions_volume, degree_distribute)
if security_index < min_security:
min_security = security_index
optimal_edge = edge
edge_partitions = src_des
self.__graph.add_edge(*optimal_edge)
self.__total_edge_set.add(optimal_edge)
self.__partitions_volume[edge_partitions[0]] += 1
self.__partitions_volume[edge_partitions[1]] += 1
self.__degree_distribute[optimal_edge[0]] += 1
self.__degree_distribute[optimal_edge[1]] += 1
self.__sorted_partitions[edge_partitions[0]].sort(key=lambda x: self.__graph.degree(x))
self.__sorted_partitions[edge_partitions[1]].sort(key=lambda x: self.__graph.degree(x))
return min_security
def __should_count(self, count):
return divmod(count, self.__interval)[1]
def __anonymize(self):
edge_sum = self.__edges_sum
pre_partitions = self.__partitions
count = 1
while count <= edge_sum:
try:
security_index = self.__choose_edge()
except ValueError:
logger.info(f'{count:<5d} Not enough edges to add.')
return -1
if not self.__should_count(count):
fin_partitions = self.__detection_func(self.__graph, **self.__func_args)
jaccard_index, recall_index = count_jaccard_index_and_recall_index(pre_partitions, fin_partitions)
modularity = self.__graph.modularity(pre_partitions.membership)
NMI = pre_partitions.compare_to(fin_partitions, method="NMI")
logger.info(f"{count:<5d} jaccard index: ({jaccard_index:8.7f}), recall index: ({recall_index:8.7f}), "
f"security_index: ({security_index:8.7f}), modularity: ({modularity:8.7f}), NMI: ({NMI:8.7f})")
count += 1
def run(self):
self.__preprocess()
self.__start()
self.__anonymize()
self.__quit()
|
[
"47921233+CommunityDeception@users.noreply.github.com"
] |
47921233+CommunityDeception@users.noreply.github.com
|
78fad504f2c59435075a0fa7a5d366d4a88286bf
|
6fd26735b9dfd1d3487c1edfebf9e1e595196168
|
/2015/06a-lights.py
|
6896bb9a50622d912542f360a119c0278957a812
|
[
"BSD-3-Clause"
] |
permissive
|
Kwpolska/adventofcode
|
bc3b1224b5272aa8f3a5c4bef1d8aebe04dcc677
|
8e55ef7b31a63a39cc2f08b3f28e15c2e4720303
|
refs/heads/master
| 2021-01-10T16:48:38.816447
| 2019-12-03T20:46:07
| 2019-12-03T20:46:07
| 47,507,587
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
#!/usr/bin/python3
import numpy
import re
import kwpbar
R = re.compile('(turn on|toggle|turn off) (\d+),(\d+) through (\d+),(\d+)')
a = numpy.zeros((1000, 1000), numpy.bool)
kwpbar.pbar(0, 300)
with open('06-input.txt') as fh:
for nl, l in enumerate(fh, 1):
m = R.match(l)
action = m.groups()[0]
x1, y1, x2, y2 = map(int, m.groups()[1:])
for x in range(x1, x2 + 1):
for y in range(y1, y2 + 1):
if action == 'turn on':
a[x][y] = True
elif action == 'turn off':
a[x][y] = False
elif action == 'toggle':
a[x][y] = not a[x][y]
kwpbar.pbar(nl, 300)
print()
print(numpy.count_nonzero(a))
|
[
"kwpolska@gmail.com"
] |
kwpolska@gmail.com
|
f20f1601f58c5bf295a656d70950e2cd2d417daf
|
b613ff2da6ce8908198deef22f11b4112b29150a
|
/user_content/context_processor.py
|
0db85264811e952b480e9c601720e133a04df5d2
|
[] |
no_license
|
maddrum/Rady_and_the_Stars
|
2c0e38b78ecd3aa425cd558a633815de879186c7
|
835d0fdbb32bd63d0b5f0961d33f54a2e1736494
|
refs/heads/master
| 2020-03-14T02:43:22.418681
| 2018-05-23T21:08:05
| 2018-05-23T21:08:05
| 131,249,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
from user_content.models import SiteUser
def profile_pic_address(request):
user_id = request.user.id
avatar_address = {}
if user_id == None:
return avatar_address
get_profile_pic = SiteUser.objects.get(user_id=user_id)
print("this is")
if get_profile_pic.profile_pic != '':
avatar_address['address'] = get_profile_pic.profile_pic
else:
avatar_address['address'] = '/profile_pics/default_avatar.png'
print(avatar_address['address'])
return avatar_address
|
[
"maddrum9@gmail.com"
] |
maddrum9@gmail.com
|
84fac0547cade475c71b06d34ed74617d7d87a4f
|
813f9d4eadd82c6bd2441a26e135d9a16d815b8a
|
/image_iter.py
|
ff7fa488cd50e1b722bea675a91d653bd09a1bd0
|
[] |
no_license
|
NyangUk/Face-Transformer
|
2082cc590aa7b24966b13dff80535cb243366420
|
d74725c25cc4c8b06eb8344200bf79395a350c2b
|
refs/heads/main
| 2023-07-02T00:53:50.556381
| 2021-08-12T13:59:46
| 2021-08-12T13:59:46
| 394,569,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,074
|
py
|
#!/usr/bin/env python
# encoding: utf-8
'''
@author: yaoyaozhong
@contact: zhongyaoyao@bupt.edu.cn
@file: image_iter_yy.py
@time: 2020/06/03
@desc: training dataset loader for .rec
'''
import torchvision.transforms as transforms
import torch.utils.data as data
import numpy as np
import cv2
import os
import torch
import mxnet as mx
from mxnet import ndarray as nd
from mxnet import io
from mxnet import recordio
import logging
import numbers
import random
logger = logging.getLogger()
from IPython import embed
class FaceDataset(data.Dataset):
def __init__(self, path_imgrec, rand_mirror):
self.rand_mirror = rand_mirror
assert path_imgrec
if path_imgrec:
logging.info('loading recordio %s...',
path_imgrec)
path_imgidx = path_imgrec[0:-4] + ".idx"
print(path_imgrec, path_imgidx)
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
s = self.imgrec.read_idx(0)
header, _ = recordio.unpack(s)
if header.flag > 0:
print('header0 label', header.label)
self.header0 = (int(header.label[0]), int(header.label[1]))
# assert(header.flag==1)
# self.imgidx = range(1, int(header.label[0]))
self.imgidx = []
self.id2range = {}
self.seq_identity = range(int(header.label[0]), int(header.label[1]))
for identity in self.seq_identity:
s = self.imgrec.read_idx(identity)
header, _ = recordio.unpack(s)
a, b = int(header.label[0]), int(header.label[1])
count = b - a
self.id2range[identity] = (a, b)
self.imgidx += range(a, b)
print('id2range', len(self.id2range))
else:
self.imgidx = list(self.imgrec.keys)
self.seq = self.imgidx
def __getitem__(self, index):
idx = self.seq[index]
s = self.imgrec.read_idx(idx)
header, s = recordio.unpack(s)
label = header.label
if not isinstance(label, numbers.Number):
label = label[0]
_data = mx.image.imdecode(s)
if self.rand_mirror:
_rd = random.randint(0, 1)
if _rd == 1:
_data = mx.ndarray.flip(data=_data, axis=1)
_data = nd.transpose(_data, axes=(2, 0, 1))
_data = _data.asnumpy()
img = torch.from_numpy(_data)
return img, label
def __len__(self):
return len(self.seq)
if __name__ == '__main__':
root = '/raid/Data/faces_webface_112x112/train.rec'
embed()
dataset = FaceDataset(path_imgrec =root, rand_mirror = False)
trainloader = data.DataLoader(dataset, batch_size=32, shuffle=True, num_workers=2, drop_last=False)
print(len(dataset))
for data, label in trainloader:
print(data.shape, label)
|
[
"lobgd9150@gmail.com"
] |
lobgd9150@gmail.com
|
f87ac30ee8177d8d46472777d034607729e57115
|
ef15f6538f14db18ab8161a2a6aacd0d29fbdb7a
|
/wsgi.py
|
7f7ff782263880b8199345acd3fe7f846ce3c6db
|
[
"MIT"
] |
permissive
|
suricats/surirobot-api-services
|
5f6d8536f62de874db8769144239d7924eb68b27
|
b23b440649a759d638cbc8644acc4aeb7f118674
|
refs/heads/dev
| 2020-03-22T07:15:14.029175
| 2019-05-22T16:26:08
| 2019-05-22T16:26:08
| 139,689,048
| 0
| 1
|
MIT
| 2019-05-22T16:25:03
| 2018-07-04T08:00:59
|
Python
|
UTF-8
|
Python
| false
| false
| 27
|
py
|
from api.server import app
|
[
"alain.berrier@outlook.com"
] |
alain.berrier@outlook.com
|
01c576eada6417a47049095bda6e06b430144f70
|
5b3bf81b22f4eb78a1d9e801b2d1d6a48509a236
|
/leetcode/778.py
|
86a36e9731e1dc2184d96130dd07e572c914cf36
|
[] |
no_license
|
okoks9011/problem_solving
|
42a0843cfdf58846090dff1a2762b6e02362d068
|
e86d86bb5e3856fcaaa5e20fe19194871d3981ca
|
refs/heads/master
| 2023-01-21T19:06:14.143000
| 2023-01-08T17:45:16
| 2023-01-08T17:45:16
| 141,427,667
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
class Solution:
def bfs(self, grid: List[List[int]], k) -> bool:
n = len(grid)
ds = [(-1, 0), (0, 1), (1, 0), (0, -1)]
visited = [[False] * n for _ in range(n)]
visited[0][0] = True
cur_q = [(0, 0)]
while cur_q:
next_q = []
for i, j in cur_q:
for di, dj in ds:
ni, nj = i + di, j + dj
if (not (0 <= ni < n)) or (not (0 <= nj < n)):
continue
if grid[ni][nj] > k:
continue
if visited[ni][nj]:
continue
if ni == nj == (n - 1):
return True
visited[ni][nj] = True
next_q.append((ni, nj))
cur_q = next_q
return False
def swimInWater(self, grid: List[List[int]]) -> int:
n = len(grid)
left = grid[0][0]
right = n * n - 1
result = -1
while left <= right:
mid = left + (right - left) // 2
if self.bfs(grid, mid):
result = mid
right = mid - 1
else:
left = mid + 1
return result
|
[
"okoks9011@gmail.com"
] |
okoks9011@gmail.com
|
26b561263262ae3cea908b5ad26d60a5289578c4
|
ffe23a787537b9706c9ec4d5f7f6ada44ca658f5
|
/venv/Scripts/pilconvert.py
|
43655490dcc142a3076c5994950ab0333bca10a1
|
[] |
no_license
|
zhouli01/python_test01
|
cf966d8d16167f3ab752254d66cef8a94663bbdf
|
658d69d33b8255d612ff79e1df0ffe734d8971bd
|
refs/heads/master
| 2020-03-07T03:15:33.608727
| 2018-04-21T03:47:06
| 2018-04-21T03:47:06
| 127,231,004
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,376
|
py
|
#!d:\test01\venv\scripts\python.exe
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
from __future__ import print_function
import getopt
import string
import sys
from PIL import Image
def usage():
print("PIL Convert 0.5/1998-12-30 -- convert image files")
print("Usage: pilconvert [option] infile outfile")
print()
print("Options:")
print()
print(" -c <format> convert to format (default is given by extension)")
print()
print(" -g convert to greyscale")
print(" -p convert to palette image (using standard palette)")
print(" -r convert to rgb")
print()
print(" -o optimize output (trade speed for size)")
print(" -q <value> set compression quality (0-100, JPEG only)")
print()
print(" -f list supported file formats")
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error as v:
print(v)
sys.exit(1)
output_format = None
convert = None
options = {}
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats (* indicates output format):")
for i in id:
if i in Image.SAVE:
print(i+"*", end=' ')
else:
print(i, end=' ')
sys.exit(1)
elif o == "-c":
output_format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
im.draft(convert, im.size)
im = im.convert(convert)
if output_format:
im.save(argv[1], output_format, **options)
else:
im.save(argv[1], **options)
except:
print("cannot convert image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
|
[
"13717550873@163.com"
] |
13717550873@163.com
|
b0c0ff46322cacf76ad3221323a84d906c19a028
|
1a428731009d455773451aca158c2e77e10bccb1
|
/sample_sim/planning/pomcp_rollout_allocation/bezier_curve.py
|
f39f473ace21fdae00958727dd469c51a090db39
|
[] |
no_license
|
uscresl/AdaptiveSamplingPOMCP
|
6f2c3bc6ac18d175eaf27f7cc9e65a46390ff954
|
c0717a4f07dd33b5d583ea977315eedb9b74f9b6
|
refs/heads/master
| 2023-04-02T03:12:23.636049
| 2021-04-08T23:18:15
| 2021-04-08T23:18:15
| 350,460,334
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,705
|
py
|
import numpy as np
from scipy.special import comb
from sample_sim.planning.pomcp_rollout_allocation.base_rollout_allocator import PrecalculatedBaseRolloutAllocator
def bernstein_poly(i, n, t):
"""
The Bernstein polynomial of n, i as a function of t
"""
return comb(n, i) * (t ** (n - i)) * (1 - t) ** i
def bezier_curve(points, nTimes=1000):
"""
Given a set of control points, return the
bezier curve defined by the control points.
points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
nTimes is the number of time steps, defaults to 1000
See http://processingjs.nihongoresources.com/bezierinfo/
"""
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([bernstein_poly(i, nPoints - 1, t) for i in range(0, nPoints)])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
class ThreePointBezierCurve(PrecalculatedBaseRolloutAllocator):
def __init__(self, max_budget, currently_used_budget, max_rollout, currently_used_rollout, control_point):
self.max_budget = max_budget
self.currently_used_budget = currently_used_budget
self.max_rollout = max_rollout
self.currently_used_rollout = currently_used_rollout
self.control_point = control_point
super().__init__()
def allocated_rollouts(self):
end_point = [1, 1]
start_point = [0, 0]
control_point = self.control_point
points = np.array([start_point, control_point, end_point])
fraction_of_budget_used = (self.currently_used_budget + 1) / self.max_budget
xvals,yvals = bezier_curve(points)
xval_idx = None
closest_xval_dist = float("inf")
for i,xval in enumerate(xvals):
dist = abs(fraction_of_budget_used - xval)
if dist < closest_xval_dist:
xval_idx = i
closest_xval_dist = dist
fractional_improvment_we_should_be_at = yvals[xval_idx]
# This is based on the total overall improvement how much effort should we have spent
number_of_rollouts_we_shouldve_used = fractional_improvment_we_should_be_at * self.max_rollout
# Subtracts the already spent rollouts from the rollouts we shouldve used
number_of_free_rollouts = number_of_rollouts_we_shouldve_used - self.currently_used_rollout
assert number_of_free_rollouts > 0
return int(number_of_free_rollouts)
|
[
"gautams3@users.noreply.github.com"
] |
gautams3@users.noreply.github.com
|
75fcfc95a8816ed00220c22417d87667b7e098c2
|
79cc112bd3490a72c5f0ef978688b8c6adf22542
|
/SimplePython/flatten.py
|
68ee7ab765cc9ebf996dcf9b6f34a49191c63a9d
|
[] |
no_license
|
patarapolw/SimplePython
|
40aee78bd3403c20d9007d47455f16f41210651e
|
1f8af49d3b7672ec9597dda9f45c4aca7ab35d41
|
refs/heads/master
| 2020-03-13T23:02:39.219466
| 2018-12-18T00:33:42
| 2018-12-18T00:33:42
| 131,328,043
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
from __future__ import print_function
import collections
import xmltodict
import lxml.etree as etree
import re
def flatten(d, parent_key='', sep='-'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def xml2flatDict(xml, sep='-'):
i = 0
result = {}
allKeys = flatten(xmltodict.parse(xml), sep=sep).keys()
for key in allKeys:
key = re.sub('{}?[@#][^/]*{}?'.format(sep, sep), '', key)
xpath = '/{}'.format('/'.join(key.split(sep)))
if '#' in xpath:
print(xpath)
i += 1
continue
if '@' in xpath:
print(xpath)
i += 1
continue
tree = etree.fromstring(xml)
values = tree.xpath(xpath)
try:
if key not in result.keys():
result[key] = [x.text.strip() for x in values]
else:
result[key] += [x.text.strip() for x in values]
except AttributeError:
pass
if i != 0:
print("Missed {} paths".format(i))
return result
|
[
"patarapolw@gmail.com"
] |
patarapolw@gmail.com
|
d3e43d2268aaab367d295f1cc66d38cdeac8892c
|
98fc3d1d2fe3fffbe49cd7497b8e4a5183b40029
|
/lost_found/urls.py
|
29cd7907123712bb6202c3b67b2d1df50dbe0baf
|
[
"Apache-2.0"
] |
permissive
|
lleyer/lost-found
|
5e1b9f34079bf415643aa5ba77bd56f7bdae6164
|
36f8246f6ae00b7d926d3a28c5a02749359ba168
|
refs/heads/master
| 2020-03-31T22:12:10.301979
| 2018-10-11T14:55:56
| 2018-10-11T14:55:56
| 152,609,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,698
|
py
|
"""lost_found URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
#from lost_and_found import views, models
from django.conf.urls import *
from django.views.generic import RedirectView
urlpatterns = patterns('lost_and_found.views',
# url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'see'),
(r'^home/$', 'index'),
url(r'^see/$', 'insert'),
#url(r'^find/$', 'method_splitter', {'GET': 'some_page_get', 'POST': 'find'}),
url(r'^find/$','find'),
url(r'^seethings/$', 'see'),
#url(r'^contact/$', 'lost_and_found.contact.contact', name='contact'),
url(r'^detailed/$', 'detail'),
url(r'^ChangeToFound/$','ChangeToFound'),
url(r'^Addinfo/$','Addinfo'),
url(r'^Login/$','Login'),
url(r'^Create_success/$','C_success'),
url(r'^Create_fail/$','C_fail'),
url(r'^CreateUser/$','CreateUser'),
url(r'^mythings/$', 'mythings'),
#url(r'^mythings/$', views.mythings),
(r'^find/$', RedirectView.as_view(url='/home/')),
url(r'^tests/$', 'tests'), )
urlpatterns += patterns('lost_and_found.contact',
(r'^contact/$', 'contact'),
)
|
[
"31896557+lleyer@users.noreply.github.com"
] |
31896557+lleyer@users.noreply.github.com
|
f8feea7a777cd867d3ba8a84a3de6dbd99017a2f
|
5ac978fc54e8d2cd9ecb098fe5e18d376cef9c44
|
/img_wall.py
|
dbae8b9cba358be9f8ec36a9cebfb3c4e2c595df
|
[] |
no_license
|
at68701141/img_wall
|
4c41a3a4106399c0b6f456bb70b3e102965fc884
|
e2933ff0ae08861ac50bbb771fd844f5b9068551
|
refs/heads/master
| 2020-04-27T02:03:29.845781
| 2019-03-08T12:28:40
| 2019-03-08T12:28:40
| 173,983,024
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
from PIL import Image
import os,sys
import random
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--mw', default=100, help='照片的长宽', type=int)
args = parser.parse_args()
return args
def main():
args = parse_args()
mw = args.mw
raw_img = Image.open('./_raw.png')
raw_img_1 = raw_img.convert('1')
raw_img_1.save("./_deal.jpg")
out_width = raw_img_1.size[0]
out_height = raw_img_1.size[1]
text_img = Image.open("./_deal.jpg")
rows = text_img.size[0] + 1
columns = text_img.size[1] + 1
imgs = 0
for y in range(1,columns):
for x in range(1,rows):
if 0 == text_img.getpixel((x-1,y-1)):
imgs += 1
print('need imgs:',imgs)
#搜索路径下所有图片
dir_list = os.listdir("./")
list_fromImage = []
for i in dir_list:
if -1 == i.find('.py'):
if -1 == i.find('_raw') and -1 == i.find('_deal') and -1 == i.find('_out'):
fromImagetmp = Image.open("./"+i)
list_fromImage.append(fromImagetmp)
print('total imgs:',len(list_fromImage))
#选一张画布,关键确定画布的大小
toImage = Image.new('RGBA',(out_width*mw,out_height*mw))
for y in range(1,columns):
for x in range(1,rows):
try:
if 255 == text_img.getpixel((x-1,y-1)):
pass
elif 0 == text_img.getpixel((x-1,y-1)):
#选取照片,按照自己想要的样式,依次选取
fromImage = list_fromImage[random.randint(0, len(list_fromImage)-1)].copy()
# fromImage = fromImage.rotate(random.randint(0, 360))
#粘贴照片,将照片粘贴到设计的位置上
fromImage = fromImage.resize((mw,mw),Image.ANTIALIAS)
toImage.paste(fromImage,((x-1)*mw,(y-1)*mw))
except IOError:
pass
toImage.save('./_out.png')
if __name__ == '__main__':
main()
|
[
"2278481764@qq.com"
] |
2278481764@qq.com
|
a799ecc070436733bb2d7276e5b86ee6ed334c39
|
01870d5d2dcc96302ca9143ba64e8f871aae5794
|
/InsertDinnerMenu.py
|
43410ce2148989ea3b5ddd5d36ff941c82743f01
|
[] |
no_license
|
isbobby/eusofftelebot
|
8a6470c6267d7d19713e33560cbef2bc21d0d6cf
|
1b9cd215c9d0258f68798eec23d7ff9e7ac6b226
|
refs/heads/master
| 2022-12-15T15:48:18.669412
| 2020-05-05T13:48:10
| 2020-05-05T13:48:10
| 201,722,435
| 1
| 1
| null | 2022-09-16T18:16:07
| 2019-08-11T05:35:54
|
Python
|
UTF-8
|
Python
| false
| false
| 529
|
py
|
import json
import sqlalchemy
from flask import Blueprint, render_template, url_for, redirect, request, flash
from datetime import datetime, timedelta
from eusoffweb import db
from eusoffweb.models import Dinner,Breakfast,Event
main = Blueprint('main', __name__)
@main.route("/")
@main.route("/home")
def home():
# entry = Dinner(date='2020-01-01',main='Main',side='Side',soup='Soup',dessert='Dessert', special="special")
# db.session.add(entry)
# db.session.commit()
return render_template('/index.html')
|
[
"bobbyclex8@gmail.com"
] |
bobbyclex8@gmail.com
|
bc74953b7cf6838055ecf038a3f0c133024cf95b
|
d7589054c9dbcccdfee4213fda2df10f249a60a8
|
/venv/Lib/site-packages/django/contrib/gis/gdal/raster/band.py
|
c5b092b7b00a8efe670667cf5d06fcf5d195bdeb
|
[
"BSD-3-Clause"
] |
permissive
|
Ruckaiya/djangoblog
|
aa3e16ce84f37a70b830a795acf450b04b5c5bca
|
a76c5d223477d29b391915c3778219a36f9f34ce
|
refs/heads/master
| 2020-06-09T00:26:51.396663
| 2019-06-23T10:47:43
| 2019-06-23T10:47:43
| 193,334,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,248
|
py
|
from ctypes import byref, c_double, c_int, c_void_p
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.gdal.raster.base import GDALRasterBase
from django.contrib.gis.shortcuts import numpy
from django.utils.encoding import force_text
from .const import (
GDAL_COLOR_TYPES, GDAL_INTEGER_TYPES, GDAL_PIXEL_TYPES, GDAL_TO_CTYPES,
)
class GDALBand(GDALRasterBase):
"""
Wrap a GDAL raster band, needs to be obtained from a GDALRaster object.
"""
def __init__(self, source, index):
self.source = source
self._ptr = capi.get_ds_raster_band(source._ptr, index)
def _flush(self):
"""
Call the flush method on the Band's parent raster and force a refresh
of the statistics attribute when requested the next time.
"""
self.source._flush()
self._stats_refresh = True
@property
def description(self):
"""
Return the description string of the band.
"""
return force_text(capi.get_band_description(self._ptr))
@property
def width(self):
"""
Width (X axis) in pixels of the band.
"""
return capi.get_band_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels of the band.
"""
return capi.get_band_ysize(self._ptr)
@property
def pixel_count(self):
"""
Return the total number of pixels in this band.
"""
return self.width * self.height
_stats_refresh = False
def statistics(self, refresh=False, approximate=False):
"""
Compute statistics on the pixel values of this band.
The return value is a tuple with the following structure:
(minimum, maximum, mean, standard deviation).
If approximate=True, the statistics may be computed based on overviews
or a subset of image tiles.
If refresh=True, the statistics will be computed from the data directly,
and the cache will be updated where applicable.
For empty bands (where all pixel values are nodata), all statistics
values are returned as None.
For raster formats using Persistent Auxiliary Metadata (PAM) services,
the statistics might be cached in an auxiliary file.
"""
# Prepare array with arguments for capi function
smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double()
stats_args = [
self._ptr, c_int(approximate), byref(smin), byref(smax),
byref(smean), byref(sstd), c_void_p(), c_void_p(),
]
if refresh or self._stats_refresh:
func = capi.compute_band_statistics
else:
# Add additional argument to force computation if there is no
# existing PAM file to take the values from.
force = True
stats_args.insert(2, c_int(force))
func = capi.get_band_statistics
# Computation of statistics fails for empty bands.
try:
func(*stats_args)
result = smin.value, smax.value, smean.value, sstd.value
except GDALException:
result = (None, None, None, None)
self._stats_refresh = False
return result
@property
def min(self):
"""
Return the minimum pixel value for this band.
"""
return self.statistics()[0]
@property
def max(self):
"""
Return the maximum pixel value for this band.
"""
return self.statistics()[1]
@property
def mean(self):
"""
Return the mean of all pixel values of this band.
"""
return self.statistics()[2]
@property
def std(self):
"""
Return the standard deviation of all pixel values of this band.
"""
return self.statistics()[3]
@property
def nodata_value(self):
"""
Return the nodata value for this band, or None if it isn't set.
"""
# Get value and nodata exists flag
nodata_exists = c_int()
value = capi.get_band_nodata_value(self._ptr, nodata_exists)
if not nodata_exists:
value = None
# If the pixeltype is an integer, convert to int
elif self.datatype() in GDAL_INTEGER_TYPES:
value = int(value)
return value
@nodata_value.setter
def nodata_value(self, value):
"""
Set the nodata value for this band.
"""
if value is None:
if not capi.delete_band_nodata_value:
raise ValueError('GDAL >= 2.1 required to delete nodata values.')
capi.delete_band_nodata_value(self._ptr)
elif not isinstance(value, (int, float)):
raise ValueError('Nodata value must be numeric or None.')
else:
capi.set_band_nodata_value(self._ptr, value)
self._flush()
def datatype(self, as_string=False):
"""
Return the GDAL Pixel Datatype for this band.
"""
dtype = capi.get_band_datatype(self._ptr)
if as_string:
dtype = GDAL_PIXEL_TYPES[dtype]
return dtype
def color_interp(self, as_string=False):
"""Return the GDAL color interpretation for this band."""
color = capi.get_band_color_interp(self._ptr)
if as_string:
color = GDAL_COLOR_TYPES[color]
return color
def data(self, data=None, offset=None, size=None, shape=None, as_memoryview=False):
"""
Read or writes pixel values for this band. Blocks of data can
be accessed by specifying the width, height and offset of the
desired block. The same specification can be used to update
parts of a raster by providing an array of values.
Allowed input data types are bytes, memoryview, list, tuple, and array.
"""
offset = offset or (0, 0)
size = size or (self.width - offset[0], self.height - offset[1])
shape = shape or size
if any(x <= 0 for x in size):
raise ValueError('Offset too big for this raster.')
if size[0] > self.width or size[1] > self.height:
raise ValueError('Size is larger than raster.')
# Create ctypes type array generator
ctypes_array = GDAL_TO_CTYPES[self.datatype()] * (shape[0] * shape[1])
if data is None:
# Set read mode
access_flag = 0
# Prepare empty ctypes array
data_array = ctypes_array()
else:
# Set write mode
access_flag = 1
# Instantiate ctypes array holding the input data
if isinstance(data, (bytes, memoryview)) or (numpy and isinstance(data, numpy.ndarray)):
data_array = ctypes_array.from_buffer_copy(data)
else:
data_array = ctypes_array(*data)
# Access band
capi.band_io(self._ptr, access_flag, offset[0], offset[1],
size[0], size[1], byref(data_array), shape[0],
shape[1], self.datatype(), 0, 0)
# Return data as numpy array if possible, otherwise as list
if data is None:
if as_memoryview:
return memoryview(data_array)
elif numpy:
# reshape() needs a reshape parameter with the height first.
return numpy.frombuffer(
data_array, dtype=numpy.dtype(data_array)
).reshape(tuple(reversed(size)))
else:
return list(data_array)
else:
self._flush()
class BandList(list):
def __init__(self, source):
self.source = source
super().__init__()
def __iter__(self):
for idx in range(1, len(self) + 1):
yield GDALBand(self.source, idx)
def __len__(self):
return capi.get_ds_raster_count(self.source._ptr)
def __getitem__(self, index):
try:
return GDALBand(self.source, index + 1)
except GDALException:
raise GDALException('Unable to get band index %d' % index)
|
[
"ruckaiya.awf5@gmail.com"
] |
ruckaiya.awf5@gmail.com
|
1dacce4b787bbdf7a0d9686693f68f0e2e663a54
|
ae81e1685061552dd34e1af5f71cbd549bd05706
|
/python/paddle/fluid/contrib/layers/rnn_impl.py
|
e6a868ada37ab9fb27f973b4bfe648387bb4279f
|
[
"Apache-2.0"
] |
permissive
|
cryoco/Paddle
|
3917982f4d27a5be2abee0f45e74812c7d383c03
|
39ac41f137d685af66078adf2f35d65473978b4a
|
refs/heads/develop
| 2021-11-07T18:47:43.406188
| 2019-09-23T05:04:48
| 2019-09-23T05:04:48
| 197,357,973
| 3
| 1
|
Apache-2.0
| 2021-08-12T06:24:30
| 2019-07-17T09:25:19
|
C++
|
UTF-8
|
Python
| false
| false
| 30,946
|
py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid import layers
from paddle.fluid.dygraph import Layer
from paddle.fluid.layers.control_flow import StaticRNN
__all__ = ['BasicGRUUnit', 'basic_gru', 'BasicLSTMUnit', 'basic_lstm']
class BasicGRUUnit(Layer):
"""
****
BasicGRUUnit class, using basic operators to build GRU
The algorithm can be described as the equations below.
.. math::
u_t & = actGate(W_ux xu_{t} + W_uh h_{t-1} + b_u)
r_t & = actGate(W_rx xr_{t} + W_rh h_{t-1} + b_r)
m_t & = actNode(W_cx xm_t + W_ch dot(r_t, h_{t-1}) + b_m)
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
Args:
name_scope(string) : The name scope used to identify parameters and biases
hidden_size (integer): The hidden size used in the Unit.
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of GRU unit.
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cell (actNode).
Default: 'fluid.layers.tanh'
dtype(string): data type used in this unit
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import BasicGRUUnit
input_size = 128
hidden_size = 256
input = layers.data( name = "input", shape = [-1, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
gru_unit = BasicGRUUnit( "gru_unit", hidden_size )
new_hidden = gru_unit( input, pre_hidden )
"""
def __init__(self,
name_scope,
hidden_size,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
dtype='float32'):
super(BasicGRUUnit, self).__init__(name_scope, dtype)
self._name = name_scope
self._hiden_size = hidden_size
self._param_attr = param_attr
self._bias_attr = bias_attr
self._gate_activation = gate_activation or layers.sigmoid
self._activation = activation or layers.tanh
self._dtype = dtype
def _build_once(self, input, pre_hidden):
self._input_size = input.shape[-1]
assert (self._input_size > 0)
self._gate_weight = self.create_parameter(
attr=self._param_attr,
shape=[self._input_size + self._hiden_size, 2 * self._hiden_size],
dtype=self._dtype)
self._candidate_weight = self.create_parameter(
attr=self._param_attr,
shape=[self._input_size + self._hiden_size, self._hiden_size],
dtype=self._dtype)
self._gate_bias = self.create_parameter(
self._bias_attr,
shape=[2 * self._hiden_size],
dtype=self._dtype,
is_bias=True)
self._candidate_bias = self.create_parameter(
self._bias_attr,
shape=[self._hiden_size],
dtype=self._dtype,
is_bias=True)
def forward(self, input, pre_hidden):
concat_input_hidden = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=concat_input_hidden, y=self._gate_weight)
gate_input = layers.elementwise_add(gate_input, self._gate_bias)
gate_input = self._gate_activation(gate_input)
r, u = layers.split(gate_input, num_or_sections=2, dim=1)
r_hidden = r * pre_hidden
candidate = layers.matmul(
layers.concat([input, pre_hidden], 1), self._candidate_weight)
candidate = layers.elementwise_add(candidate, self._candidate_bias)
c = self._activation(candidate)
new_hidden = u * pre_hidden + (1 - u) * c
return new_hidden
def basic_gru(input,
init_hidden,
hidden_size,
num_layers=1,
sequence_length=None,
dropout_prob=0.0,
bidirectional=False,
batch_first=True,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
dtype='float32',
name='basic_gru'):
"""
GRU implementation using basic operator, supports multiple layers and bidirection gru.
.. math::
u_t & = actGate(W_ux xu_{t} + W_uh h_{t-1} + b_u)
r_t & = actGate(W_rx xr_{t} + W_rh h_{t-1} + b_r)
m_t & = actNode(W_cx xm_t + W_ch dot(r_t, h_{t-1}) + b_m)
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
Args:
input (Variable): GRU input tensor,
if batch_first = False, shape should be ( seq_len x batch_size x input_size )
if batch_first = True, shape should be ( batch_size x seq_len x hidden_size )
init_hidden(Variable|None): The initial hidden state of the GRU
This is a tensor with shape ( num_layers x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
and can be reshaped to tensor with ( num_layers x 2 x batch_size x hidden_size) to use.
If it's None, it will be set to all 0.
hidden_size (int): Hidden size of the GRU
num_layers (int): The total number of layers of the GRU
sequence_length (Variabe|None): A Tensor (shape [batch_size]) stores each real length of each instance,
This tensor will be convert to a mask to mask the padding ids
If it's None means NO padding ids
dropout_prob(float|0.0): Dropout prob, dropout ONLY works after rnn output of earch layers,
NOT between time steps
bidirectional (bool|False): If it is bidirectional
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of GRU unit.
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cell (actNode).
Default: 'fluid.layers.tanh'
dtype(string): data type used in this unit
name(string): name used to identify parameters and biases
Returns:
rnn_out(Tensor),last_hidden(Tensor)
- rnn_out is result of GRU hidden, with shape (seq_len x batch_size x hidden_size) \
if is_bidirec set to True, shape will be ( seq_len x batch_sze x hidden_size*2)
- last_hidden is the hidden state of the last step of GRU \
shape is ( num_layers x batch_size x hidden_size ) \
if is_bidirec set to True, shape will be ( num_layers*2 x batch_size x hidden_size),
can be reshaped to a tensor with shape( num_layers x 2 x batch_size x hidden_size)
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import basic_gru
batch_size = 20
input_size = 128
hidden_size = 256
num_layers = 2
dropout = 0.5
bidirectional = True
batch_first = False
input = layers.data( name = "input", shape = [-1, batch_size, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
sequence_length = layers.data( name="sequence_length", shape=[-1], dtype='int32')
rnn_out, last_hidden = basic_gru( input, pre_hidden, hidden_size, num_layers = num_layers, \
sequence_length = sequence_length, dropout_prob=dropout, bidirectional = bidirectional, \
batch_first = batch_first)
"""
fw_unit_list = []
for i in range(num_layers):
new_name = name + "_layers_" + str(i)
fw_unit_list.append(
BasicGRUUnit(new_name, hidden_size, param_attr, bias_attr,
gate_activation, activation, dtype))
if bidirectional:
bw_unit_list = []
for i in range(num_layers):
new_name = name + "_reverse_layers_" + str(i)
bw_unit_list.append(
BasicGRUUnit(new_name, hidden_size, param_attr, bias_attr,
gate_activation, activation, dtype))
if batch_first:
input = layers.transpose(input, [1, 0, 2])
mask = None
if sequence_length:
max_seq_len = layers.shape(input)[0]
mask = layers.sequence_mask(
sequence_length, maxlen=max_seq_len, dtype='float32')
mask = layers.transpose(mask, [1, 0])
direc_num = 1
if bidirectional:
direc_num = 2
if init_hidden:
init_hidden = layers.reshape(
init_hidden, shape=[num_layers, direc_num, -1, hidden_size])
def get_single_direction_output(rnn_input,
unit_list,
mask=None,
direc_index=0):
rnn = StaticRNN()
with rnn.step():
step_input = rnn.step_input(rnn_input)
if mask:
step_mask = rnn.step_input(mask)
for i in range(num_layers):
if init_hidden:
pre_hidden = rnn.memory(init=init_hidden[i, direc_index])
else:
pre_hidden = rnn.memory(
batch_ref=rnn_input,
shape=[-1, hidden_size],
ref_batch_dim_idx=1)
new_hidden = unit_list[i](step_input, pre_hidden)
if mask:
new_hidden = layers.elementwise_mul(
new_hidden, step_mask, axis=0) - layers.elementwise_mul(
pre_hidden, (step_mask - 1), axis=0)
rnn.update_memory(pre_hidden, new_hidden)
rnn.step_output(new_hidden)
step_input = new_hidden
if dropout_prob != None and dropout_prob > 0.0:
step_input = layers.dropout(
step_input,
dropout_prob=dropout_prob, )
rnn.step_output(step_input)
rnn_out = rnn()
last_hidden_array = []
rnn_output = rnn_out[-1]
for i in range(num_layers):
last_hidden = rnn_out[i]
last_hidden = last_hidden[-1]
last_hidden_array.append(last_hidden)
last_hidden_output = layers.concat(last_hidden_array, axis=0)
last_hidden_output = layers.reshape(
last_hidden_output, shape=[num_layers, -1, hidden_size])
return rnn_output, last_hidden_output
# seq_len, batch_size, hidden_size
fw_rnn_out, fw_last_hidden = get_single_direction_output(
input, fw_unit_list, mask, direc_index=0)
if bidirectional:
bw_input = layers.reverse(input, axis=[0])
bw_mask = None
if mask:
bw_mask = layers.reverse(mask, axis=[0])
bw_rnn_out, bw_last_hidden = get_single_direction_output(
bw_input, bw_unit_list, bw_mask, direc_index=1)
bw_rnn_out = layers.reverse(bw_rnn_out, axis=[0])
rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2)
last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1)
last_hidden = layers.reshape(
last_hidden, shape=[num_layers * direc_num, -1, hidden_size])
if batch_first:
rnn_out = layers.transpose(rnn_out, [1, 0, 2])
return rnn_out, last_hidden
else:
rnn_out = fw_rnn_out
last_hidden = fw_last_hidden
if batch_first:
rnn_out = fluid.layser.transpose(rnn_out, [1, 0, 2])
return rnn_out, last_hidden
def basic_lstm(input,
init_hidden,
init_cell,
hidden_size,
num_layers=1,
sequence_length=None,
dropout_prob=0.0,
bidirectional=False,
batch_first=True,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
forget_bias=1.0,
dtype='float32',
name='basic_lstm'):
"""
LSTM implementation using basic operators, supports multiple layers and bidirection LSTM.
.. math::
i_t &= \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_i)
f_t &= \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_f + forget_bias )
o_t &= \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_o)
\\tilde{c_t} &= tanh(W_{cx}x_t + W_{ch}h_{t-1} + b_c)
c_t &= f_t \odot c_{t-1} + i_t \odot \\tilde{c_t}
h_t &= o_t \odot tanh(c_t)
Args:
input (Variable): lstm input tensor,
if batch_first = False, shape should be ( seq_len x batch_size x input_size )
if batch_first = True, shape should be ( batch_size x seq_len x hidden_size )
init_hidden(Variable|None): The initial hidden state of the LSTM
This is a tensor with shape ( num_layers x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
and can be reshaped to a tensor with shape ( num_layers x 2 x batch_size x hidden_size) to use.
If it's None, it will be set to all 0.
init_cell(Variable|None): The initial hidden state of the LSTM
This is a tensor with shape ( num_layers x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
and can be reshaped to a tensor with shape ( num_layers x 2 x batch_size x hidden_size) to use.
If it's None, it will be set to all 0.
hidden_size (int): Hidden size of the LSTM
num_layers (int): The total number of layers of the LSTM
sequence_length (Variabe|None): A tensor (shape [batch_size]) stores each real length of each instance,
This tensor will be convert to a mask to mask the padding ids
If it's None means NO padding ids
dropout_prob(float|0.0): Dropout prob, dropout ONLY work after rnn output of earch layers,
NOT between time steps
bidirectional (bool|False): If it is bidirectional
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of LSTM unit.
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cell (actNode).
Default: 'fluid.layers.tanh'
forget_bias (float|1.0) : Forget bias used to compute the forget gate
dtype(string): Data type used in this unit
name(string): Name used to identify parameters and biases
Returns:
rnn_out(Tensor), last_hidden(Tensor), last_cell(Tensor)
- rnn_out is the result of LSTM hidden, shape is (seq_len x batch_size x hidden_size) \
if is_bidirec set to True, it's shape will be ( seq_len x batch_sze x hidden_size*2)
- last_hidden is the hidden state of the last step of LSTM \
with shape ( num_layers x batch_size x hidden_size ) \
if is_bidirec set to True, it's shape will be ( num_layers*2 x batch_size x hidden_size),
and can be reshaped to a tensor ( num_layers x 2 x batch_size x hidden_size) to use.
- last_cell is the hidden state of the last step of LSTM \
with shape ( num_layers x batch_size x hidden_size ) \
if is_bidirec set to True, it's shape will be ( num_layers*2 x batch_size x hidden_size),
and can be reshaped to a tensor ( num_layers x 2 x batch_size x hidden_size) to use.
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import basic_lstm
batch_size = 20
input_size = 128
hidden_size = 256
num_layers = 2
dropout = 0.5
bidirectional = True
batch_first = False
input = layers.data( name = "input", shape = [-1, batch_size, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
pre_cell = layers.data( name = "pre_cell", shape=[-1, hidden_size], dtype='float32')
sequence_length = layers.data( name="sequence_length", shape=[-1], dtype='int32')
rnn_out, last_hidden, last_cell = basic_lstm( input, pre_hidden, pre_cell, \
hidden_size, num_layers = num_layers, \
sequence_length = sequence_length, dropout_prob=dropout, bidirectional = bidirectional, \
batch_first = batch_first)
"""
fw_unit_list = []
for i in range(num_layers):
new_name = name + "_layers_" + str(i)
fw_unit_list.append(
BasicLSTMUnit(
new_name,
hidden_size,
param_attr=param_attr,
bias_attr=bias_attr,
gate_activation=gate_activation,
activation=activation,
forget_bias=forget_bias,
dtype=dtype))
if bidirectional:
bw_unit_list = []
for i in range(num_layers):
new_name = name + "_reverse_layers_" + str(i)
bw_unit_list.append(
BasicLSTMUnit(
new_name,
hidden_size,
param_attr=param_attr,
bias_attr=bias_attr,
gate_activation=gate_activation,
activation=activation,
forget_bias=forget_bias,
dtype=dtype))
if batch_first:
input = layers.transpose(input, [1, 0, 2])
mask = None
if sequence_length:
max_seq_len = layers.shape(input)[0]
mask = layers.sequence_mask(
sequence_length, maxlen=max_seq_len, dtype='float32')
mask = layers.transpose(mask, [1, 0])
direc_num = 1
if bidirectional:
direc_num = 2
# convert to [num_layers, 2, batch_size, hidden_size]
if init_hidden:
init_hidden = layers.reshape(
init_hidden, shape=[num_layers, direc_num, -1, hidden_size])
init_cell = layers.reshape(
init_cell, shape=[num_layers, direc_num, -1, hidden_size])
# forward direction
def get_single_direction_output(rnn_input,
unit_list,
mask=None,
direc_index=0):
rnn = StaticRNN()
with rnn.step():
step_input = rnn.step_input(rnn_input)
if mask:
step_mask = rnn.step_input(mask)
for i in range(num_layers):
if init_hidden:
pre_hidden = rnn.memory(init=init_hidden[i, direc_index])
pre_cell = rnn.memory(init=init_cell[i, direc_index])
else:
pre_hidden = rnn.memory(
batch_ref=rnn_input, shape=[-1, hidden_size])
pre_cell = rnn.memory(
batch_ref=rnn_input, shape=[-1, hidden_size])
new_hidden, new_cell = unit_list[i](step_input, pre_hidden,
pre_cell)
if mask:
new_hidden = layers.elementwise_mul(
new_hidden, step_mask, axis=0) - layers.elementwise_mul(
pre_hidden, (step_mask - 1), axis=0)
new_cell = layers.elementwise_mul(
new_cell, step_mask, axis=0) - layers.elementwise_mul(
pre_cell, (step_mask - 1), axis=0)
rnn.update_memory(pre_hidden, new_hidden)
rnn.update_memory(pre_cell, new_cell)
rnn.step_output(new_hidden)
rnn.step_output(new_cell)
step_input = new_hidden
if dropout_prob != None and dropout_prob > 0.0:
step_input = layers.dropout(
step_input,
dropout_prob=dropout_prob,
dropout_implementation='upscale_in_train')
rnn.step_output(step_input)
rnn_out = rnn()
last_hidden_array = []
last_cell_array = []
rnn_output = rnn_out[-1]
for i in range(num_layers):
last_hidden = rnn_out[i * 2]
last_hidden = last_hidden[-1]
last_hidden_array.append(last_hidden)
last_cell = rnn_out[i * 2 + 1]
last_cell = last_cell[-1]
last_cell_array.append(last_cell)
last_hidden_output = layers.concat(last_hidden_array, axis=0)
last_hidden_output = layers.reshape(
last_hidden_output, shape=[num_layers, -1, hidden_size])
last_cell_output = layers.concat(last_cell_array, axis=0)
last_cell_output = layers.reshape(
last_cell_output, shape=[num_layers, -1, hidden_size])
return rnn_output, last_hidden_output, last_cell_output
# seq_len, batch_size, hidden_size
fw_rnn_out, fw_last_hidden, fw_last_cell = get_single_direction_output(
input, fw_unit_list, mask, direc_index=0)
if bidirectional:
bw_input = layers.reverse(input, axis=[0])
bw_mask = None
if mask:
bw_mask = layers.reverse(mask, axis=[0])
bw_rnn_out, bw_last_hidden, bw_last_cell = get_single_direction_output(
bw_input, bw_unit_list, bw_mask, direc_index=1)
bw_rnn_out = layers.reverse(bw_rnn_out, axis=[0])
rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2)
last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1)
last_hidden = layers.reshape(
last_hidden, shape=[num_layers * direc_num, -1, hidden_size])
last_cell = layers.concat([fw_last_cell, bw_last_cell], axis=1)
last_cell = layers.reshape(
last_cell, shape=[num_layers * direc_num, -1, hidden_size])
if batch_first:
rnn_out = layers.transpose(rnn_out, [1, 0, 2])
return rnn_out, last_hidden, last_cell
else:
rnn_out = fw_rnn_out
last_hidden = fw_last_hidden
last_cell = fw_last_cell
if batch_first:
rnn_out = layers.transpose(rnn_out, [1, 0, 2])
return rnn_out, last_hidden, last_cell
class BasicLSTMUnit(Layer):
"""
****
BasicLSTMUnit class, Using basic operator to build LSTM
The algorithm can be described as the code below.
.. math::
i_t &= \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_i)
f_t &= \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_f + forget_bias )
o_t &= \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_o)
\\tilde{c_t} &= tanh(W_{cx}x_t + W_{ch}h_{t-1} + b_c)
c_t &= f_t \odot c_{t-1} + i_t \odot \\tilde{c_t}
h_t &= o_t \odot tanh(c_t)
- $W$ terms denote weight matrices (e.g. $W_{ix}$ is the matrix
of weights from the input gate to the input)
- The b terms denote bias vectors ($bx_i$ and $bh_i$ are the input gate bias vector).
- sigmoid is the logistic sigmoid function.
- $i, f, o$ and $c$ are the input gate, forget gate, output gate,
and cell activation vectors, respectively, all of which have the same size as
the cell output activation vector $h$.
- The :math:`\odot` is the element-wise product of the vectors.
- :math:`tanh` is the activation functions.
- :math:`\\tilde{c_t}` is also called candidate hidden state,
which is computed based on the current input and the previous hidden state.
Args:
name_scope(string) : The name scope used to identify parameter and bias name
hidden_size (integer): The hidden size used in the Unit.
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of LSTM unit.
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized as zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cells (actNode).
Default: 'fluid.layers.tanh'
forget_bias(float|1.0): forget bias used when computing forget gate
dtype(string): data type used in this unit
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import BasicLSTMUnit
input_size = 128
hidden_size = 256
input = layers.data( name = "input", shape = [-1, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
pre_cell = layers.data( name = "pre_cell", shape=[-1, hidden_size], dtype='float32')
lstm_unit = BasicLSTMUnit( "gru_unit", hidden_size)
new_hidden, new_cell = lstm_unit( input, pre_hidden, pre_cell )
"""
def __init__(self,
name_scope,
hidden_size,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
forget_bias=1.0,
dtype='float32'):
super(BasicLSTMUnit, self).__init__(name_scope, dtype)
self._name = name_scope
self._hiden_size = hidden_size
self._param_attr = param_attr
self._bias_attr = bias_attr
self._gate_activation = gate_activation or layers.sigmoid
self._activation = activation or layers.tanh
self._forget_bias = layers.fill_constant(
[1], dtype=dtype, value=forget_bias)
self._forget_bias.stop_gradient = False
self._dtype = dtype
def _build_once(self, input, pre_hidden, pre_cell):
self._input_size = input.shape[-1]
assert (self._input_size > 0)
self._weight = self.create_parameter(
attr=self._param_attr,
shape=[self._input_size + self._hiden_size, 4 * self._hiden_size],
dtype=self._dtype)
self._bias = self.create_parameter(
attr=self._bias_attr,
shape=[4 * self._hiden_size],
dtype=self._dtype,
is_bias=True)
def forward(self, input, pre_hidden, pre_cell):
concat_input_hidden = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=concat_input_hidden, y=self._weight)
gate_input = layers.elementwise_add(gate_input, self._bias)
i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
new_cell = layers.elementwise_add(
layers.elementwise_mul(
pre_cell,
layers.sigmoid(layers.elementwise_add(f, self._forget_bias))),
layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j)))
new_hidden = layers.tanh(new_cell) * layers.sigmoid(o)
return new_hidden, new_cell
|
[
"noreply@github.com"
] |
cryoco.noreply@github.com
|
b2a7242000eaaed258617f34c401b0cd616456ec
|
6bf7149077f539ab599db1f717c93aca82724512
|
/encapsulation/resturant/food/dessert.py
|
88c978d97947fac520aeff1a4ad8afede7e62b96
|
[] |
no_license
|
KalinHar/OOP-Python-SoftUni
|
8b53e8b734b364878c5372525c4249fdd32f0899
|
9787eea7ab5101e887ed4aaeb0a8b3b80efcfdd7
|
refs/heads/master
| 2023-07-09T08:15:59.765422
| 2021-08-16T06:01:08
| 2021-08-16T06:01:19
| 380,813,294
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
from encapsulation.resturant.food.food import Food
class Dessert(Food):
def __init__(self, name, price, grams, calories):
super().__init__(name, price, grams)
self.__calories = calories
@property
def calories(self):
return self.__calories
|
[
"kalix@abv.bg"
] |
kalix@abv.bg
|
2927ecf4d1d50234c872853d0c93df428cb23a84
|
68de0dc251ebf950ca698489a5f7a2959282c23f
|
/pw.py
|
080c69b933c28bb1f17d3d99a1aa249fda8e1e0d
|
[
"MIT"
] |
permissive
|
JKakaofanatiker/pw
|
001f70dd41b236556079b5167b4b2dc5c99c3fc9
|
3a8d74ae87b1f96c7476254a64d4ddd898bf069b
|
refs/heads/master
| 2023-02-17T04:42:47.373681
| 2021-01-11T16:00:07
| 2021-01-11T16:00:07
| 328,191,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
import secrets
import string
from colorama import Fore # needed for color
import pyperclip
print(Fore.GREEN + "Enter password length:") # tell the user to type the length of the password
length = input() # user input
length = int(length)
chars = string.digits + string.ascii_letters + string.punctuation # possible chars
print(Fore.RED + "Here is your password:")
# print(''.join(secrets.choice(chars) for _ in range(length))) # prints password
password = ''.join(secrets.choice(chars) for _ in range(length))
print(password)
pyperclip.copy(password)
|
[
"noreply@github.com"
] |
JKakaofanatiker.noreply@github.com
|
f852c7fc24ea0e2287c4336eed6a938ebfab3136
|
410c6696feb4b3c3ccda92d4ff4c1c251f176d86
|
/code/imu.py
|
d0afd83a98e7135dec0ecc9462123bee756f1845
|
[] |
no_license
|
sourabhraghav2/Self_balancing_quadcopter
|
06a706ee349be6941a80cf6f6a0fd3c963af24f1
|
99d114d9308a62b8cd346f29871b9984ca41797a
|
refs/heads/master
| 2020-05-26T15:05:14.420991
| 2019-05-23T18:08:05
| 2019-05-23T18:08:05
| 188,277,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,617
|
py
|
import math
deltat =(0.01) #sampling period in seconds (shown as 1 ms)
gyroMeasError=float(3.14159265358979* (5.0 / 180.0)) # gyroscope measurement error in /s (shown as 5 deg/s)
beta =float(math.sqrt(3.0/ 4.0) * gyroMeasError)
SEq_1 = 1.0
SEq_2 = 0.0
SEq_3 = 0.0
SEq_4 = 0.0 # estimated orientation quaternion elements with initial conditions
def filterUpdate( w_x, w_y, w_z, a_x, a_y, a_z):
global SEq_1,SEq_2,SEq_3,SEq_4
halfSEq_1 = float(0.5 * SEq_1)
halfSEq_2 = float(0.5 * SEq_2)
halfSEq_3 = float(0.5 * SEq_3)
halfSEq_4 = float(0.5 * SEq_4)
twoSEq_1 = float(2.0 * SEq_1)
twoSEq_2 = float(2.0 * SEq_2)
twoSEq_3 = float(2.0 * SEq_3)
# Normalise the accelerometer measurement
norm = math.sqrt(a_x * a_x + a_y * a_y + a_z * a_z)
a_x /= norm
a_y /= norm
a_z /= norm
# Compute the objective function and Jacobian
f_1 = twoSEq_2 * SEq_4 - twoSEq_1 * SEq_3 - a_x
f_2 = twoSEq_1 * SEq_2 + twoSEq_3 * SEq_4 - a_y
f_3 = 1.0 - twoSEq_2 * SEq_2 - twoSEq_3 * SEq_3 - a_z
J_11or24 = twoSEq_3 # J_11 negated in matrix multiplication
J_12or23 = 2.0 * SEq_4
J_13or22 = twoSEq_1 # J_12 negated in matrix multiplication
J_14or21 = twoSEq_2
J_32 = 2.0 * J_14or21 # negated in matrix multiplication
J_33 = 2.0 * J_11or24 # negated in matrix multiplication
# Compute the gradient (matrix multiplication)
SEqHatDot_1 = J_14or21 * f_2 - J_11or24 * f_1
SEqHatDot_2 = J_12or23 * f_1 + J_13or22 * f_2 - J_32 * f_3
SEqHatDot_3 = J_12or23 * f_2 - J_33 * f_3 - J_13or22 * f_1
SEqHatDot_4 = J_14or21 * f_1 + J_11or24 * f_2
# Normalise the gradient
norm = math.sqrt(SEqHatDot_1 * SEqHatDot_1 + SEqHatDot_2 * SEqHatDot_2 + SEqHatDot_3 * SEqHatDot_3 + SEqHatDot_4 * SEqHatDot_4)
SEqHatDot_1 /= norm
SEqHatDot_2 /= norm
SEqHatDot_3 /= norm
SEqHatDot_4 /= norm
# Compute the quaternion derrivative measured by gyroscopes
SEqDot_omega_1 = -halfSEq_2 * w_x - halfSEq_3 * w_y - halfSEq_4 * w_z
SEqDot_omega_2 = halfSEq_1 * w_x + halfSEq_3 * w_z - halfSEq_4 * w_y
SEqDot_omega_3 = halfSEq_1 * w_y - halfSEq_2 * w_z + halfSEq_4 * w_x
SEqDot_omega_4 = halfSEq_1 * w_z + halfSEq_2 * w_y - halfSEq_3 * w_x
# Compute then integrate the estimated quaternion derrivative
SEq_1 += (SEqDot_omega_1 - (beta * SEqHatDot_1)) * deltat
SEq_2 += (SEqDot_omega_2 - (beta * SEqHatDot_2)) * deltat
SEq_3 += (SEqDot_omega_3 - (beta * SEqHatDot_3)) * deltat
SEq_4 += (SEqDot_omega_4 - (beta * SEqHatDot_4)) * deltat
# Normalise quaternion
norm = math.sqrt(SEq_1 * SEq_1 + SEq_2 * SEq_2 + SEq_3 * SEq_3 + SEq_4 * SEq_4)
SEq_1 /= norm
SEq_2 /= norm
SEq_3 /= norm
SEq_4 /= norm
return [SEq_1,SEq_2,SEq_3,SEq_4]
|
[
"sourabhraghav2@gmail.com"
] |
sourabhraghav2@gmail.com
|
6232b159f028a44b8efe990437ef82bf78cad220
|
e1ee8e7c4e92fef77277cbd0c19079ddd769069e
|
/docs/runtimes/Python27/Lib/site-packages/pythonwin/pywin/tools/regpy.py
|
5759468f09b8acb106023a0a62a11319af2e1a68
|
[] |
no_license
|
van7hu/fanca
|
3c278f0ea0855eb2f0f7a7394788088e7e1c1ad2
|
a8864116246bcfe2d517c48831d38e02f107e534
|
refs/heads/master
| 2021-01-10T02:04:04.361617
| 2015-10-18T15:34:34
| 2015-10-18T15:34:34
| 43,810,422
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,885
|
py
|
# (sort-of) Registry editor
import win32ui
import dialog
import win32con
import commctrl
class RegistryControl:
def __init__(self, key):
self.key = key
class RegEditPropertyPage(dialog.PropertyPage):
IDC_LISTVIEW = 1000
def GetTemplate(self):
"Return the template used to create this dialog"
w = 152 # Dialog width
h = 122 # Dialog height
SS_STD = win32con.WS_CHILD | win32con.WS_VISIBLE
FRAMEDLG_STD = win32con.WS_CAPTION | win32con.WS_SYSMENU
style = FRAMEDLG_STD | win32con.WS_VISIBLE | win32con.DS_SETFONT | win32con.WS_MINIMIZEBOX
template = [[self.caption, (0, 0, w, h), style, None, (8, 'Helv')], ]
lvStyle = SS_STD | commctrl.LVS_EDITLABELS | commctrl.LVS_REPORT | commctrl.LVS_AUTOARRANGE | commctrl.LVS_ALIGNLEFT | win32con.WS_BORDER | win32con.WS_TABSTOP
template.append(["SysListView32", "", self.IDC_LISTVIEW, (10, 10, 185, 100), lvStyle])
return template
class RegistryPage(RegEditPropertyPage):
def __init__(self):
self.caption="Path"
RegEditPropertyPage.__init__(self, self.GetTemplate())
def OnInitDialog(self):
self.listview = self.GetDlgItem(self.IDC_LISTVIEW)
RegEditPropertyPage.OnInitDialog(self)
# Setup the listview columns
itemDetails = (commctrl.LVCFMT_LEFT, 100, "App", 0)
self.listview.InsertColumn(0, itemDetails)
itemDetails = (commctrl.LVCFMT_LEFT, 1024, "Paths", 0)
self.listview.InsertColumn(1, itemDetails)
index = self.listview.InsertItem(0,"App")
self.listview.SetItemText(index, 1, "Path")
class RegistrySheet(dialog.PropertySheet):
def __init__(self, title):
dialog.PropertySheet.__init__(self, title)
self.HookMessage(self.OnActivate, win32con.WM_ACTIVATE)
def OnActivate(self, msg):
print "OnAcivate"
def t():
ps=RegistrySheet('Registry Settings')
ps.AddPage(RegistryPage())
ps.DoModal()
if __name__=='__main__':
t()
|
[
"van7hu@gmail.com"
] |
van7hu@gmail.com
|
5cdb0501fd8c8c30df2605856090ccd991199256
|
19f2fb3f384c57339e78f85fe21567068cef87a8
|
/budgetr/migrations/0002_flow_start_date.py
|
1a53a94cb88c217b364d899252bc67de742bad77
|
[] |
no_license
|
rrlittle/budgetr_site
|
bc168812a0e1117311c59044dcdfae9e6c3908ad
|
10a214ede5f61735f9982500f605fa9fb11cba2f
|
refs/heads/master
| 2021-01-20T06:29:26.726011
| 2017-05-10T22:02:40
| 2017-05-10T22:02:40
| 89,885,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-30 21:31
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('budgetr', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='flow',
name='start_date',
field=models.DateField(default=datetime.datetime(2017, 4, 30, 21, 31, 17, 475887, tzinfo=utc)),
preserve_default=False,
),
]
|
[
"rrlittle2@gmail.com"
] |
rrlittle2@gmail.com
|
ce51e6a69701a01c52fd2b57c4df7eb531aff74a
|
622e42704408473c3b2b7ac4d39c5fcd493f40b5
|
/test/toppra.py
|
2276e924b5da20ddf3a793437f3417d4eb2e6e4b
|
[
"MIT"
] |
permissive
|
zzz622848/ruckig
|
06b97c291b78faf7c11bb85e86d8f16887aa99b9
|
60a40b32c4b73b78c8d37b06c6afa7cbaff4f0e8
|
refs/heads/master
| 2023-07-05T00:38:32.287582
| 2021-08-30T09:25:53
| 2021-08-30T09:25:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,480
|
py
|
from pathlib import Path
from sys import path
import time
import numpy as np
import toppra as ta
import toppra.constraint as constraint
import toppra.algorithm as algo
from plotter import Plotter
path.insert(0, str(Path(__file__).parent.absolute().parent / 'build'))
from ruckig import InputParameter, Ruckig
class SimpleOut:
time = None
new_position = []
new_velocity = []
new_acceleration = []
def generate_new_problem(i, seed=9):
way_pts = np.random.uniform(-2, 2, size=(4, 3))
# way_pts = np.array([
# np.linspace(0.0, 1.0, 20),
# np.linspace(0.01, 1.01, 20),
# np.linspace(0.02, 1.02, 20)
# ]).T
way_pts = np.concatenate([[[0, 0, 0]], way_pts, [[1, 1, 1]]])
return (
np.linspace(0, 1, way_pts.shape[0]),
way_pts,
[1, 1, 1],
[2, 2, 2],
)
if __name__ == '__main__':
np.random.seed(42)
ta.setup_logging("INFO")
# durations = 0.0
# for i in range(250):
# ss, way_pts, vlims, alims = generate_new_problem(i)
# # print(way_pts)
# path = ta.SplineInterpolator(ss, way_pts)
# pc_vel = constraint.JointVelocityConstraint(vlims)
# pc_acc = constraint.JointAccelerationConstraint(alims)
# instance = algo.TOPPRA([pc_vel, pc_acc], path, parametrizer="ParametrizeConstAccel")
# s = time.time()
# jnt_traj = instance.compute_trajectory()
# e = time.time()
# durations += (e - s) * 1000
# # durations += jnt_traj.duration
# print(durations/250)
ss, way_pts, vlims, alims = generate_new_problem(None)
path = ta.SplineInterpolator(ss, way_pts)
pc_vel = constraint.JointVelocityConstraint(vlims)
pc_acc = constraint.JointAccelerationConstraint(alims)
instance = algo.TOPPRA([pc_vel, pc_acc], path, parametrizer="ParametrizeConstAccel")
s = time.time()
jnt_traj = instance.compute_trajectory()
otg = Ruckig(3, 0.01)
inp = InputParameter(3)
inp.max_jerk = [1000, 1000, 1000]
inp.max_acceleration = alims
inp.max_velocity = vlims
out_list = []
ts_sample = np.linspace(0, jnt_traj.duration, 100)
for t in ts_sample:
out = SimpleOut()
out.time = t
out.new_position = jnt_traj(t)
out.new_velocity = jnt_traj(t, 1)
out.new_acceleration = jnt_traj(t, 2)
out_list.append(out)
Plotter.plot_trajectory('otg_trajectory_toppra.png', otg, inp, out_list, plot_jerk=False)
|
[
"lars.berscheid@kit.edu"
] |
lars.berscheid@kit.edu
|
da1a5b8ba1d97d5761d2041d3878d7777f846781
|
fd36f1935a2c03ab4ec6d061a170bcf62ec4fcd9
|
/tools/sense_studio/utils.py
|
d20f57ecdfc7aab7e78fcba130163900dd995e2a
|
[] |
no_license
|
346644054/FlashFinger
|
a7a65f2312bf4d826def349a56b8959d4efaa3f3
|
2cd66a60f2029e3d23434be39f53e6642a01dd07
|
refs/heads/master
| 2023-04-28T16:56:26.382632
| 2021-05-24T05:48:10
| 2021-05-24T05:48:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,816
|
py
|
import json
import numpy as np
import os
from joblib import dump
from sklearn.linear_model import LogisticRegression
from sense.engine import InferenceEngine
from sense.loading import build_backbone_network
from sense.loading import get_relevant_weights
from sense.loading import ModelConfig
from tools import directories
from tools.sense_studio.project_utils import get_project_setting
SUPPORTED_MODEL_CONFIGURATIONS = [
ModelConfig('StridedInflatedEfficientNet', 'pro', []),
ModelConfig('StridedInflatedMobileNetV2', 'pro', []),
ModelConfig('StridedInflatedEfficientNet', 'lite', []),
ModelConfig('StridedInflatedMobileNetV2', 'lite', []),
]
BACKBONE_MODELS = [model_name.combined_model_name for model_name in SUPPORTED_MODEL_CONFIGURATIONS]
def load_feature_extractor(project_path):
# Load weights
model_config, weights = get_relevant_weights(SUPPORTED_MODEL_CONFIGURATIONS)
# Setup backbone network
backbone_network = build_backbone_network(model_config, weights['backbone'])
# Create Inference Engine
use_gpu = get_project_setting(project_path, 'use_gpu')
inference_engine = InferenceEngine(backbone_network, use_gpu=use_gpu)
return inference_engine, model_config
def is_image_file(filename):
""" Returns `True` if the file has a valid image extension. """
return '.' in filename and filename.rsplit('.', 1)[1] in ('png', 'jpg', 'jpeg', 'gif', 'bmp')
def get_class_name_and_tags(form_data):
"""
Extract 'className', 'tag1' and 'tag2' from the given form data and make sure that the tags
are not empty or the same.
"""
class_name = form_data['className']
tag1 = form_data['tag1'] or f'{class_name}_tag1'
tag2 = form_data['tag2'] or f'{class_name}_tag2'
if tag2 == tag1:
tag1 = f'{tag1}_1'
tag2 = f'{tag2}_2'
return class_name, tag1, tag2
def train_logreg(path, split, label):
"""
(Re-)Train a logistic regression model on all annotations that have been submitted so far.
"""
_, model_config = load_feature_extractor(path)
features_dir = directories.get_features_dir(path, split, model_config, label=label)
tags_dir = directories.get_tags_dir(path, split, label)
logreg_dir = directories.get_logreg_dir(path, model_config, label)
logreg_path = os.path.join(logreg_dir, 'logreg.joblib')
annotations = os.listdir(tags_dir) if os.path.exists(tags_dir) else None
if not annotations:
return
features = [os.path.join(features_dir, x.replace('.json', '.npy')) for x in annotations]
annotations = [os.path.join(tags_dir, x) for x in annotations]
x = []
y = []
class_weight = {0: 0.5}
for feature in features:
feature = np.load(feature)
for f in feature:
x.append(f.mean(axis=(1, 2)))
for annotation in annotations:
with open(annotation, 'r') as f:
annotation = json.load(f)['time_annotation']
pos1 = np.where(np.array(annotation).astype(int) == 1)[0]
if len(pos1) > 0:
class_weight.update({1: 2})
for p in pos1:
if p + 1 < len(annotation):
annotation[p + 1] = 1
pos1 = np.where(np.array(annotation).astype(int) == 2)[0]
if len(pos1) > 0:
class_weight.update({2: 2})
for p in pos1:
if p + 1 < len(annotation):
annotation[p + 1] = 2
for a in annotation:
y.append(a)
x = np.array(x)
y = np.array(y)
if len(class_weight) > 1:
logreg = LogisticRegression(C=0.1, class_weight=class_weight)
logreg.fit(x, y)
dump(logreg, logreg_path)
|
[
"QT@users.noreply.github.com"
] |
QT@users.noreply.github.com
|
119622240c9b5ec29a6dece9fae33d21d3223d64
|
fe934523d8f9b17763baffec09498ed6740b3420
|
/src/devilry_qualifiesforexam/devilry_qualifiesforexam/tests/statusprintview.py
|
ff509bf734285db488f1db9a0a17372f592e5351
|
[] |
no_license
|
tworide/devilry-django
|
b13017dd3904cef6972993ec889e2b513e4a9b28
|
af554253ab5896806d88414694f3c7ba5d523d74
|
refs/heads/master
| 2021-01-15T20:43:17.985336
| 2013-07-09T18:55:58
| 2013-07-09T18:55:58
| 1,872,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from devilry.apps.core.testhelper import TestHelper
from devilry_qualifiesforexam.models import Status
class TestStatusPrintView(TestCase):
def setUp(self):
self.testhelper = TestHelper()
self.testhelper.create_superuser('superuser')
def _get_url(self, status_id):
return reverse('devilry_qualifiesforexam_statusprint', kwargs={'status_id': status_id})
def _getas(self, username, status_id):
self.client.login(username=username, password='test')
return self.client.get(self._get_url(status_id))
def test_status_not_found(self):
response = self._getas('superuser', 1)
self.assertEqual(response.status_code, 404)
def test_status_forbidden(self):
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:admin(periodadmin):begins(-3):ends(6)'])
status = Status.objects.create(
user=self.testhelper.superuser,
period=self.testhelper.sub_p1,
status=Status.READY)
self.testhelper.create_user('nobody')
response = self._getas('nobody', status.pk)
self.assertEqual(response.status_code, 403)
def test_status_not_ready(self):
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:admin(periodadmin):begins(-3):ends(6)'])
status = Status.objects.create(
user=self.testhelper.superuser,
period=self.testhelper.sub_p1,
status=Status.NOTREADY)
response = self._getas('superuser', status.pk)
self.assertEqual(response.status_code, 404)
|
[
"post@espenak.net"
] |
post@espenak.net
|
5e31439f6ef8877bc551eb574e79ba5a5c45772f
|
8254970811a8aa76ad22aeacec0455fc15b43000
|
/hello.py
|
fef48f3af8068de5139a60b33151b177db42be0a
|
[] |
no_license
|
Daarh/Hello-world
|
3ffaf8b54ad0366a88048c96d7aef7c6d4b13345
|
e04fee12f67efe820356108072219db1a8617046
|
refs/heads/master
| 2021-01-19T07:40:42.016907
| 2013-04-07T07:27:45
| 2013-04-07T07:27:45
| 1,606,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 100
|
py
|
print ("Hello Git!")
print ("This is work?")
print ("Yep! I do it, but understand how this work")
|
[
"satimon@gmail.com"
] |
satimon@gmail.com
|
2775c0868b4c25f04216a4b97a154405231a3abd
|
34bac3e5b17a8b646ecfc0e784f2e683cde67760
|
/cpe101/hw5/collisions.py
|
ab310458a5306fc1bcb93c27fa59b4a566ddc502
|
[] |
no_license
|
bertair7/schoolprojects
|
ac37b288ce1a586947e47a5e8511bb49f3cf1fda
|
5f757738d39c02642cbd89dbec7cd48aed4cd919
|
refs/heads/master
| 2020-03-24T22:02:39.053866
| 2018-07-31T20:40:42
| 2018-07-31T20:40:42
| 143,063,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
import data
from vector_math import *
def sphere_intersection_point(ray, sphere):
A = dot_vector(ray.dir, ray.dir)
B = 2 * dot_vector((difference_point(ray.pt, sphere.center)), ray.dir)
C = dot_vector((difference_point(ray.pt, sphere.center)),
(difference_point(ray.pt, sphere.center))) - sphere.radius ** 2
v = discriminant(A, B, C)
if v >= 0:
t = (-B - math.sqrt(v)) / (2 * A)
t2 = (-B + math.sqrt(v)) / (2 * A)
if t >= 0:
return translate_point(ray.pt, scale_vector(ray.dir, t))
elif t2 >= 0:
return translate_point(ray.pt, scale_vector(ray.dir, t2))
def discriminant(a, b, c):
return (b ** 2) - (4 * a * c)
def find_intersection_points(sphere_list, ray):
E = []
N = [sphere_intersection_point(ray, x) for x in sphere_list]
i = 0
for n in N:
if n != None:
E.append((sphere_list[i], n))
i += 1
else:
i += 1
return E
def sphere_normal_at_point(sphere, point):
vec = vector_from_to(sphere.center, point)
return normalize_vector(vec)
|
[
"rablair@calpoly.edu"
] |
rablair@calpoly.edu
|
25bc2aa94cd161d7305f5cf63cbbae7bb8932c76
|
ad5d38fce4785037c108186f17eb1c64380355ef
|
/sddsd/google-cloud-sdk/lib/surface/kuberun/applications/describe.py
|
2e78f1b2d7338dbfae878c76270492f94624b95e
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
saranraju90/multik8s
|
75864b605a139ddb7947ed4de4ae8466bdd49acb
|
428576dedef7bb9cd6516e2c1ab2714581e1137c
|
refs/heads/master
| 2023-03-03T21:56:14.383571
| 2021-02-20T14:56:42
| 2021-02-20T14:56:42
| 339,665,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,713
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Describe a KubeRun application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.kuberun import kuberun_command
from googlecloudsdk.core import log
_DETAILED_HELP = ({
'EXAMPLES':
"""
To show all the data about the KubeRun application associated with the
current working directory, run:
$ {command}
""",
})
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Describe(kuberun_command.KubeRunCommand, base.DescribeCommand):
"""Describes a KubeRun application."""
detailed_help = _DETAILED_HELP
flags = []
@classmethod
def Args(cls, parser):
super(Describe, cls).Args(parser)
def Command(self):
return ['applications', 'describe']
def FormatOutput(self, out, args):
# TODO(b/170429098): handle this as JSON
if not out:
return out
return out + '\n'
# TODO(b/170429098): remove this workaround
def Display(self, args, output):
log.out.write(output)
|
[
"saranraju90@gmail.com"
] |
saranraju90@gmail.com
|
a7f29204a83dfb2884b624f10967ca5bc0854d93
|
0946a8c53c6e625da2f5098cc8fee14126b5fbe5
|
/ex023.py
|
ae0e1d3ebf3329a0334782b0952d3909f617dd8e
|
[] |
no_license
|
humana42/curso_em_video_Python
|
5566a35b762589c0835bb4396265725fe252613a
|
0c7a4a593bd118c323f2d27ce35cb0d9899198c2
|
refs/heads/main
| 2023-02-01T10:31:30.308634
| 2020-12-16T12:38:52
| 2020-12-16T12:38:52
| 306,768,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
n = int(input('Digite um numero: '))
u = n//1 % 10
d = n//10 % 10
c = n//100 % 10
m = n//1000 % 10
print('o numero possui {} unidades'.format(u))
print('o numero possui {} dezena'.format(d))
print('o numero possui {} centena'.format(c))
print('o numero possui {} milhar'.format(m))
|
[
"noreply@github.com"
] |
humana42.noreply@github.com
|
a79010c3fed37025cd48242acb74512a5d588593
|
0e55ea6ca2c36e3f9b5c58cb3f44f5aaec75560c
|
/youtadata/youtadata/app_accounts/views.py
|
40f612a2561642b8f3ac610124122b28dedc29f2
|
[] |
no_license
|
amin7mazaheri/youtadata-site
|
528bbabef4b9a430672194e4e8d71f54f1d6e51d
|
db770cccdb39a936111f06f9ccb7f9ca7a854e94
|
refs/heads/master
| 2022-12-13T08:40:07.533557
| 2019-09-04T15:07:23
| 2019-09-04T15:07:23
| 200,056,433
| 1
| 0
| null | 2022-11-22T03:35:50
| 2019-08-01T13:31:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 382
|
py
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from app_accounts.models import RegisteredCourse
@login_required
def profile(request):
# import ipdb ; ipdb.set_trace();
ctx = {}
ctx['registered_course'] = [rc.course for rc in request.user.registeredcourse_set.all()]
return render(request, 'student-dashbord.html', ctx)
|
[
"mazaheri7amin@gmail.com"
] |
mazaheri7amin@gmail.com
|
e44c17545c6248e453002fa7fd8fb168fdb50244
|
7ef3992d882ae700d153f7985a09713cc8bbf260
|
/nomadgram/notifications/migrations/0012_auto_20190315_0101.py
|
a4df07540d095e195f9e2f93eda71513bb3ba389
|
[
"MIT"
] |
permissive
|
kode-soar/nomadgram
|
68df70445a52a18913aab22e3f03ca6ac8ca94b7
|
1abb40135a0cf9f7d5d9f7363e5f555ecc940ece
|
refs/heads/master
| 2023-01-09T11:36:36.738209
| 2019-03-28T14:33:07
| 2019-03-28T14:33:07
| 171,076,140
| 0
| 0
|
MIT
| 2023-01-03T18:18:09
| 2019-02-17T03:04:53
|
Python
|
UTF-8
|
Python
| false
| false
| 675
|
py
|
# Generated by Django 2.0.13 on 2019-03-14 16:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('images', '0004_image_tags'),
('notifications', '0011_auto_20190315_0100'),
]
operations = [
migrations.RemoveField(
model_name='notification',
name='image',
),
migrations.AddField(
model_name='notification',
name='imagess',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='imagess', to='images.Image'),
),
]
|
[
"kode.soar@gmail.com"
] |
kode.soar@gmail.com
|
71e26ad654e533b1909884dadced09b8972863a8
|
bf271b1e6d055a298c37d7f400f1a3c877b58a09
|
/test.py
|
3a18e82f72afb811b054333d5bc05518a7edaabe
|
[] |
no_license
|
SinForest/color_die_trying
|
68d2386abf040959907492f0afba8748fd78be1b
|
358a06a86795b26a060e40e6dd8ad37d6ad46b27
|
refs/heads/master
| 2020-03-18T00:25:57.203092
| 2019-03-19T09:21:28
| 2019-03-19T09:21:28
| 134,093,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,886
|
py
|
#!/usr/bin/env python3
from errors import *
from field import Field
from game import Game
import json
import itertools
class Tests:
def __init__(self):
self.field_cases = {}
def test_func_err(self, name, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
raise NestedTestError(name, e)
def test_assert_equal(self, name, real, should):
try:
assert real == should, "\n".join(["{} <-r-s-> {}".format(
"".join(r),"".join(s))
for r, s in zip(real, should)])
except Exception as e:
raise NestedTestError(name, e)
def test_should_except(self, func, *args, catch=[], **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
if catch and type(catch) == list:
return type(e) in catch
elif catch:
return type(e) == catch
else:
return True # did except with unspecified exception
else:
return False # did not except
def test_game(self):
s = ""
try:
s = "[init] creating game"
g = Game() # players: 2; size: 50
s = "[start] not enough players"
assert not g.start()
s = "[get_cp] get (no) current player"
self.test_should_except(g.get_curr_player, catch=GameError)
s = "[play] play when nobody is in"
self.test_should_except(g.play, None, catch=GameError)
s = "[reg] add first player"
g.reg_player("Test Osteron")
s = "[start] start game"
g.start()
s = "[start] start game again"
self.test_should_except(g.start, catch=GameError)
s = "[reg] add player on started game"
self.test_should_except(g.reg_player, catch=GameError)
#TODO: has_started x2
except:
raise
def test_defaults(self):
s = ""
try:
s = "[init] creating 20x20-field"
field = Field(20)
s = "[play] playing card on (0,0)"
field.play_card(0, 0, "r")
s = "[fill] playing card on (0,19)"
field.play_card(0, 19, "r")
s = "[mix] playing card on (19,19)"
field.play_card(19, 19, "y")
s = "[get] get mixed color on (9,19)"
color = field.get_color(9, 19)
s = "[check] color should be orange"
assert color == "o"
s = "[get] get mixed color on (0,9)"
color = field.get_color(0, 9)
s = "[check] color should be red"
assert color == "r"
except Exception as e:
print("Default Tests failed at:\n" + s)
print("Field:\n" + str(field))
raise e
def add_case_field(self, before, position, color, after, name=None):
if name is None:
for i,e in enumerate(sorted([x for x in self.field_cases.keys() if type(x) == int])):
if i != e:
name = i
break
while name in self.field_cases.keys():
name = str(name) + "|"
self.field_cases[name] = ([list(s) for s in before], tuple(position), color, [list(s) for s in after])
def field_test(self, name):
(b, p, c, a) = self.field_cases[name]
field = self.test_func_err("Creating Field", Field, field=b)
self.test_func_err("Setting Color", field.play_card, *p, c)
self.test_assert_equal("Testing equality", field._data, a)
def print_passed(self, name, fail, amount):
s = "{}/{}".format(amount - fail, amount) if fail else "all"
print("{} Tests: {} tests passed.".format(name, s))
def test(self, verbose=True):
failed = {}
# test default cases:
self.test_defaults() # Doesn't continue if this fails!
print("Default Tests passed")
# test field tests:
fail_count = 0
for name in sorted(self.field_cases.keys()):
try:
self.field_test(name)
except NestedTestError as e:
fail_count += 1
failed[name] = e
self.print_passed("Field", fail_count, len(self.field_cases))
if fail_count > 0:
for name, e in failed.items():
print('"{}": '.format(name), e)
self.test_game()
print("Game Tests passed")
def read_field_cases(self, fp="./field_tests.json"):
for name, case in json.load(open(fp, 'r')).items():
self.add_case_field(**case, name=name)
return self
def main():
tests = Tests().read_field_cases()
tests.test()
if __name__ == '__main__':
main()
|
[
"github@thesinforest.eu"
] |
github@thesinforest.eu
|
d62f327f6d19068f498b05188f56a0b396577377
|
82fb1aa92d5203770ff065d1389e6bcaf5ed92b3
|
/scripts/histogram_equalization.py
|
3fb51a171261225df745b4f6724e1202fbd3b631
|
[] |
no_license
|
dinabandhu50/opencv_projects
|
7006bd6d4cacd12f2540a8091030b53cf0d7d9d4
|
32eeec978dd94713d02d5a3f6cd0cc56583aef85
|
refs/heads/master
| 2021-01-08T21:50:03.555643
| 2020-03-07T03:55:58
| 2020-03-07T03:55:58
| 242,151,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,321
|
py
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
# %%
img = cv2.imread('./data/image_3.jpg', 0)
print(img.shape)
# %%
def bgr2rgb(img):
"""
The cv2 reads image in BGR format while plt showes image in RGB format.
To overcome this follow:
b,g,r = cv2.split(img)
img = cv2.merge([r,g,b])
"""
if len(img.shape) == 3 and img.shape[2] == 3:
b, g, r = cv2.split(img)
out = cv2.merge([r, g, b])
else:
out = img
return out
# %% Histogram eqalization
hist, bins = np.histogram(img.flatten(), 256, (0, 256))
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/hist.cumsum().max()
# Histogram Equalization
equ = cv2.equalizeHist(img)
hist_equ, bins_equ = np.histogram(equ.flatten(), 256, (0, 256))
# Contrast Limited Adaptive Histogram Equalization
clahe = cv2.createCLAHE(clipLimit=20.0, tileGridSize=(8, 8))
cl1 = clahe.apply(img)
hist_cl1, bins_cl1 = np.histogram(cl1.flatten(), 256, (0, 256))
# Horizontal stack of images
res = np.hstack((img, equ, cl1))
plt.figure(1)
plt.imshow(bgr2rgb(res), cmap='gray')
# %%
plt.figure(2)
plt.plot(hist, color='b', label="hist")
plt.plot(hist_equ, color='g', label="hist equalized")
plt.plot(hist_cl1, color='r', label="hist CLAHE")
plt.plot(cdf_normalized, 'y', label='CDF normalized')
plt.legend()
plt.show()
|
[
"beheradinabandhu50@gmail.com"
] |
beheradinabandhu50@gmail.com
|
215f63c56fcb7e4ed600067263c981ea47694774
|
3388ea58c1fcae5f8d5dfa7e6e22033d3676d866
|
/messagebox/messagebox/urls.py
|
4a80041de8eae0aa1307aae4b7f69a87e595b596
|
[] |
no_license
|
ssmi4th98/django
|
58763d8f628de2d1391458572d143b2a7e6564e7
|
db577514398ebf3d8f9f9e24de22c3f4084d9e17
|
refs/heads/master
| 2022-12-17T07:31:35.780528
| 2020-09-14T13:33:07
| 2020-09-14T13:33:07
| 295,007,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
"""messagebox URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url # can also use "from django.urls import path" to set paths
from main import views as my_views
urlpatterns = [
#path('admin/', admin.site.urls),
url(r'^admin/', admin.site.urls),
url(r'^$',my_views.home,name='home')
]
|
[
"s.e.smith@accenturefederal.com"
] |
s.e.smith@accenturefederal.com
|
3677902b71b928321cf328fcb9adce33ea2b84a5
|
1083a3e39e10baad0ab37bb0777b2209ccfbca18
|
/supercache/engine/memory.py
|
662336d44c462304b2b5f94e8721da8b5b0d3e73
|
[
"MIT"
] |
permissive
|
huntfx/supercache
|
6823cf5ef189b1ee54ca14b14f6c14f9b16830f3
|
e85ae87e4c2fead6e2a6aa55c0983d249512f34d
|
refs/heads/master
| 2023-06-29T18:32:06.106821
| 2021-08-06T10:02:59
| 2021-08-06T10:02:59
| 244,196,609
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,450
|
py
|
import time
from collections import defaultdict
from .. import exceptions, utils
class Memory(object):
"""Cache directly in memory.
This is by far the fastest solution, but the cache cannot be shared
outside the current process.
This is not completely thread safe, but care has been taken to
avoid any errors from stopping the code working.
"""
FIFO = FirstInFirstOut = 0
FILO = FirstInLastOut = 1
LRU = LeastRecentlyUsed = 2
MRU = MostRecentlyUsed = 3
LFU = LeastFrequentlyUsed = 4
def __init__(self, ttl=None, mode=LRU, count=None, size=None):
"""Create a new engine.
Parameters:
mode (int): How to purge the old keys.
ttl (int): Time the cache is valid for.
Set to None for infinite.
count (int): Maximum cache results to store.
Set to None or 0 for infinite.
size (int): Maximum size of cache in bytes.
This is a soft limit, where the memory will be
allocated first, and any extra cache purged later.
The latest cache item will always be stored.
Set to None for infinite.
"""
self.data = dict(
result={},
hits=defaultdict(int),
misses=defaultdict(int),
size={None: 0},
ttl={},
insert={},
access={}
)
self.mode = mode
self.ttl = ttl
self.count = count
self.size = size
self._next_ttl = float('inf')
def keys(self):
"""Get the current stored cache keys."""
return list(iter(self))
def __iter__(self):
"""Iterate through all the keys."""
self._purge()
return iter(self.data['result'])
def exists(self, key):
"""Find if cache currently exists for a given key.
Any key past its ttl will be removed.
"""
if key in self.data['result']:
if self.expired(key):
self.delete(key)
return False
return True
return False
def expired(self, key, _current_time=None):
"""Determine is a key has expired."""
if key not in self.data['ttl']:
return False
if _current_time is None:
_current_time = time.time()
try:
return self.data['ttl'][key] <= _current_time
except KeyError:
return True
def get(self, key, purge=False):
"""Get the value belonging to a key.
An error will be raised if the cache is expired or doesn't
exist.
"""
if purge:
self._purge()
if not self.exists(key):
raise exceptions.CacheNotFound(key)
# If a purge was done, then skip the expiry check
if not purge and self.expired(key):
raise exceptions.CacheExpired(key)
try:
self.data['hits'][key] += 1
self.data['access'][key] = time.time()
return self.data['result'][key]
except KeyError:
raise exceptions.CacheExpired(key)
def put(self, key, value, ttl=None, purge=True):
"""Add a new value to cache.
This will overwrite any old cache with the same key.
"""
if ttl is None:
ttl = self.ttl
self.data['result'][key] = value
try:
self.data['misses'][key] += 1
except KeyError:
self.data['misses'][key] = 1
# Calculate size
if self.size is not None:
size = utils.getsize(value)
self.data['size'][None] += size - self.data['size'].get(key, 0)
self.data['size'][key] = size
# Set insert/access time
current_time = time.time()
self.data['insert'][key] = self.data['access'][key] = current_time
# Set timeout
if ttl is None or ttl <= 0:
try:
del self.data['ttl'][key]
except KeyError:
pass
else:
self.data['ttl'][key] = current_time + ttl
self._next_ttl = min(self._next_ttl, self.data['ttl'][key])
# Clean old keys
if purge:
self._purge(ignore=key)
def delete(self, key):
"""Delete an item of cache.
This will not remove the hits or misses.
"""
if key in self.data['result']:
try:
del self.data['result'][key]
del self.data['insert'][key]
del self.data['access'][key]
if key in self.data['ttl']:
del self.data['ttl'][key]
if self.size is not None:
self.data['size'][None] -= self.data['size'].pop(key)
except KeyError:
pass
return True
return False
def hits(self, key):
"""Return the number of hits on an item of cache."""
return self.data['hits'].get(key, 0)
def misses(self, key):
"""Return the number of misses on an item of cache."""
return self.data['misses'].get(key, 0)
def _purge(self, ignore=None):
"""Remove old cache."""
count = self.count
size = self.size
purged = 0
# Delete expired
if self.data['ttl']:
current_time = time.time()
if current_time > self._next_ttl:
self._next_ttl = float('inf')
for key in tuple(self.data['result']):
if self.expired(key, _current_time=current_time):
self.delete(key)
elif key in self.data['ttl']:
try:
self._next_ttl = min(self._next_ttl, self.data['ttl'][key])
except KeyError:
pass
# Determine if we can skip
if count is not None and len(self.data['result']) < count:
count = None
if size is not None and self.data['size'][None] < size:
size = None
if count is None and size is None:
return purged
# Order the keys
if self.mode == self.FirstInFirstOut:
order_by = lambda k: self.data['insert'][k]
elif self.mode == self.FirstInLastOut:
order_by = lambda k: -self.data['insert'][k]
elif self.mode == self.LeastRecentlyUsed:
order_by = lambda k: self.data['access'][k]
elif self.mode == self.MostRecentlyUsed:
order_by = lambda k: -self.data['access'][k]
elif self.mode == self.LeastFrequentlyUsed:
order_by = lambda k: self.data['hits'][k]
else:
raise NotImplementedError(self.mode)
ordered_keys = sorted(self.data['result'], key=order_by, reverse=True)
# Remove the cache data
if count is not None:
for key in ordered_keys[count:]:
if key == ignore:
continue
self.delete(key)
purged += 1
if size is not None:
total_size = 0
for key in ordered_keys:
if key == ignore:
continue
total_size += self.data['size'][key]
if total_size > size:
self.delete(key)
purged += 1
return purged
|
[
"peter@huntfx.uk"
] |
peter@huntfx.uk
|
f9fef7293b9342137c66bb53e33f238396d0d3a5
|
0f30f4b1f10ae5ade009d0ff729aa9ec4849f953
|
/learning/pmath/add.py
|
4757f1c4bdf84f86516a5781d68b6a5101737fb1
|
[] |
no_license
|
pan1394/hello-world
|
c0e5aa53089ed9d5bdf2158bbcb0ea759b1d7d74
|
fe6633eb42dcccf136fe7334334921b82e2aa90b
|
refs/heads/master
| 2021-01-19T03:59:09.014342
| 2018-12-10T06:15:42
| 2018-12-10T06:15:42
| 60,680,368
| 0
| 0
| null | 2016-06-08T08:23:33
| 2016-06-08T08:08:55
| null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
def add(a, b):
return a + b
if __name__ == "__main__":
print(add(2,3))
|
[
"pan1394@126.com"
] |
pan1394@126.com
|
b8ae9efc79a2a5b1389e6e8cfc793a8d2a5c06a4
|
8514fc79d48f702b14c60310b236a92b767393ff
|
/server/wsgi.py
|
750be1ff7d11264c73c3db736b3af1e83d9501d2
|
[] |
no_license
|
sungsooha/ChimbukoVisualizationII
|
13c188cddd53413be6534600b2b6137e2c589159
|
1df77357a73f30b954e75dc080fd96f9e6738898
|
refs/heads/master
| 2020-06-28T20:07:04.124297
| 2019-10-31T15:05:59
| 2019-10-31T15:05:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
import os
from server import create_app
# Create an application instance that web servers can use. We store it as
# "application" (the wsgi default) and also the much shorter and convenient
# "app".
application = app = create_app(os.environ.get('SERVER_CONFIG', 'production'))
|
[
"sungsooha@visws.csi.bnl.gov"
] |
sungsooha@visws.csi.bnl.gov
|
eaf2eb10fb5b8d84bf77fa6e42a564c33cb05411
|
af38a64ba0908f6fff36760e8e2c6807991fb2db
|
/dict.py
|
2bf1c93afeaba0c33aceda5940afa5bf9e3e23e9
|
[] |
no_license
|
Vinod096/learn-python
|
badbeac9a27ee6c22c70a5a057837586cede0849
|
0017174e3dbeea19ca25aaa348e75b2b8203caef
|
refs/heads/master
| 2022-05-26T13:22:07.490484
| 2022-05-01T10:19:32
| 2022-05-01T10:19:32
| 244,166,014
| 1
| 0
| null | 2020-03-04T08:16:14
| 2020-03-01T14:41:23
|
Python
|
UTF-8
|
Python
| false
| false
| 439
|
py
|
# Dictionaries
# Lists
# Tuples
# Sets
age = [1,2,3,4,5,1,1,1,2,2,2]
ages_1 = {"1": 23, "2":33, "3":43, "10":12,"5":4}
ages_2 = {1: 23, 2:33, 3:43}
print(ages_1["5"])
print(ages_2[2])
#print(days)
ages = set((1, 2, 3, 4, 5, 1, 1, 1, 2, 2, 2))
print(ages)
for age in ages:
print(age)
tup = (1,2,3,4,4,2,1)
lis = [1,2,3,4,4,2,1]
lis[0] = 7 #Mutable
# tup[0] = 6 #Immutable
print(lis)
print(tup)
print(set(lis))
print(set(tup))
|
[
"vinod.raipati@hotmail.com"
] |
vinod.raipati@hotmail.com
|
55b806a9c642a5668201a964324cdf6e28509ea8
|
56abe97d9da9a71eb497afaf51dffd2158b30e0f
|
/test/sclusterTest.py
|
f28a5cd8963090a59fac58a44c78815e2fa5ab05
|
[
"Apache-2.0"
] |
permissive
|
dice-project/DICE-Anomaly-Detection-Tool
|
0875ee93e55fbaa7d517aa242e9735e121cffdf4
|
a5eeacb9e888348adbe97be0c26a500f2f03ec6f
|
refs/heads/master
| 2020-05-21T20:31:37.667770
| 2017-10-02T21:05:55
| 2017-10-02T21:05:55
| 64,146,313
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
from dmonscikit import dmonscilearncluster
import os
import pandas as pd
dataDir = os.path.join(os.path.dirname(os.path.abspath('')), 'data')
modelDir = os.path.join(os.path.dirname(os.path.abspath('')), 'models')
data = os.path.join(dataDir, 'Final_Merge.csv')
data_df = pd.read_csv(data)
print data_df
dbscan = dmonscilearncluster.SciCluster(modelDir=modelDir)
settings = {'eps': 0.9, 'min_samples': 10, 'metric': 'euclidean', 'algorithm': 'auto', 'leaf_size': 30, 'p': 0.2, 'n_jobs':1}
# mname = os.path.join(dataDir, 'sdbscan_test.pkl')
mname = 'test'
dbscan.sdbscanTrain(settings=settings, mname=mname, data=data_df)
#
isolationFrst = dmonscilearncluster.SciCluster(modelDir=modelDir)
settings2 = {'n_estimators': 100, 'max_samples': 100, 'contamination': 0.01, 'bootstrap': False,
'max_features': 1.0, 'n_jobs': -1, 'random_state': None, 'verbose': 0}
#
# mname = 'test'
#
isolationFrst.isolationForest(settings2, mname, data=data_df)
print isolationFrst.detect('isoforest', 'test', data_df)
print isolationFrst.detect('sdbscan', 'test', data_df)
|
[
"juhasz_gabriel@yahoo.com"
] |
juhasz_gabriel@yahoo.com
|
9a7bbd3a168fcf5c31668bd1880b909ef7164f5c
|
5e7b5697c8648d953933d701962093c714dc37ea
|
/cam/video_record.py
|
fc0e82fdbccc1cd6aecd6d52f5e3f3ed3562963a
|
[] |
no_license
|
santsaran1/new_farmland
|
b02191485779353c5462a46d6aec59fc4e335e5e
|
f4343c09a6a552eb41361bcb4bcd2e5d26b1286d
|
refs/heads/master
| 2020-12-25T11:52:45.097720
| 2016-04-25T18:22:19
| 2016-04-25T18:22:19
| 58,182,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
avconv -loglevel quiet -f video4linux2 -r 10 -t 00:00:40 -i /dev/video0 test.avi
fswebcam -r 640x480 --jpeg 85 -D 1 shot.jpg
|
[
"shtl.borganve@gmail.com"
] |
shtl.borganve@gmail.com
|
6abc99b0ea036e849423e18416801494532d591a
|
e4b35222d08123e551e230f84d4c751df94c4a76
|
/randomgamever2.py
|
3e246b916162c67269b4fb7589f7aefb1d1789e2
|
[] |
no_license
|
Rohithuppalapati/Python-Module-practice
|
7eb4814cb41e40038e3cfdc6889f859ad07ed9e9
|
3642ad04d31e1923446a612b7e2536a003d1645d
|
refs/heads/master
| 2023-07-06T15:40:47.852959
| 2021-08-22T15:45:12
| 2021-08-22T15:45:12
| 391,865,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from random import randint
import sys
answer = randint(int(sys.argv[1]), int(sys.argv[2]))
while True:
try:
guess = int(input(f'guess a number {sys.argv[1]}~{sys.argv[2]}: '))
if 0 < guess < 11:
if guess == answer:
print('you are a genius!')
break
else:
print('hey bozo, I said 1~10')
except ValueError:
print('please enter a number')
continue
|
[
"rohithuppalapati77@gmail.com"
] |
rohithuppalapati77@gmail.com
|
19936eae1665b6a18075613db82903db8c65342f
|
95c441c94991931d53e6fce1e93441443517db68
|
/proyecto009/wsgi.py
|
a9be1896d61d8b1657374a0ed85fc079dd517c82
|
[] |
no_license
|
FranciscoVF/proyecto009
|
05d4e57a908f27ec6e6b7dacae9abd28002c4ef2
|
f90c2040afbf4d43768ffebafadd1d61780c112d
|
refs/heads/master
| 2023-01-23T03:07:48.724455
| 2020-11-30T05:17:09
| 2020-11-30T05:17:09
| 317,118,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for proyecto009 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proyecto009.settings')
application = get_wsgi_application()
|
[
"francisco.villarreal16@tectijuana.edu.mx"
] |
francisco.villarreal16@tectijuana.edu.mx
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.