blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
82e56263eeee8167a26929e00f78250969c87ab4
|
7b12bcdb390c5beaf3fe0bc6a994222fb2c85dd9
|
/scripts/moon/shotgun/__init__.py
|
237e0d1187f2b88ecf53c84dafc5dddf8f545ab7
|
[] |
no_license
|
you4jang/moonlight
|
803350ddf1174b9c51f7048ed8b8ae5ba12eabeb
|
758f61b976339f8707b0a1ec3a839ffedbbc2770
|
refs/heads/master
| 2023-04-06T17:59:33.824614
| 2021-04-19T08:38:44
| 2021-04-19T08:38:44
| 337,308,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,463
|
py
|
# -*- coding: utf-8 -*-
import shotgun_api3
# SUPERVISORS_GROUP_ID = 6
# SG_PERMISSION_RULE_SET_MANAGER = {'type': 'PermissionRuleSet', 'id': 7}
# SG_PERMISSION_RULE_SET_ARTIST = {'type': 'PermissionRuleSet', 'id': 8}
# Groups
# SG_GROUP_SUPERVISORS = {'type': 'Group', 'id': 6}
# Departments
# SG_DEPARTMENT_PD = {'type': 'Department', 'id': 75}
# SG_DEPARTMENT_DEGISN = {'type': 'Department', 'id': 74}
# SG_DEPARTMENT_MODELING = {'type': 'Department', 'id': 107}
# SG_DEPARTMENT_RIGGING = {'type': 'Department', 'id': 41}
# SG_DEPARTMENT_ANIMATION = {'type': 'Department', 'id': 42}
# SG_DEPARTMENT_LIGHTING = {'type': 'Department', 'id': 140}
# SG_DEPARTMENT_FX = {'type': 'Department', 'id': 141}
# SG_DEPARTMENT_COMP = {'type': 'Department', 'id': 142}
# SG_DEPARTMENT_DIRECTOR = {'type': 'Department', 'id': 143}
# SG_DEPARTMENT_STORY = {'type': 'Department', 'id': 144}
class Shotgun(shotgun_api3.Shotgun):
SHOTGUN_URL = 'https://pinkmoon.shotgunstudio.com'
def __init__(self, appname, username=None, password=None, **kwargs):
appset = {
'admin_api': {
'name': 'admin_api',
'key': 'npauxuwxbzjdgbramOeqp0iz(',
},
}
super(Shotgun, self).__init__(
self.SHOTGUN_URL,
script_name=appset[appname]['name'],
api_key=appset[appname]['key'],
login=username,
password=password,
**kwargs
)
|
[
"gmdirect@naver.com"
] |
gmdirect@naver.com
|
2334b622780a949833143ed370cf64dec3daacdc
|
8d3e70a2d827ae849a3274b6cd75fddb63c7aca1
|
/src/app/main.py
|
830c7993448e57ce1e9de30b06a278888a48bc50
|
[
"MIT"
] |
permissive
|
fabianoa/mlops-deploy-alura
|
dc6028d796d9da49637f3d47f9d8538cc3498e42
|
ed76419289293f69bf0261e90cd6808c85f34bd0
|
refs/heads/master
| 2023-07-17T06:52:45.796073
| 2021-08-31T10:12:41
| 2021-08-31T10:12:41
| 401,658,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
from flask import Flask,request,jsonify
from flask_basicauth import BasicAuth
from textblob import TextBlob
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import pickle
import os
#df = pd.read_csv('casas.csv')
#X = df.drop('preco', axis=1)
#y = df['preco']
#X_train,X_test,y_train,y_teste = train_test_split(X,y,test_size=0.3, random_state=42)
#modelo = LinearRegression()
#modelo.fit(X_train,y_train)
modelo = pickle.load(open('../../models/modelo.sav','rb'))
app = Flask(__name__)
app.config['BASIC_AUTH_USERNAME'] =os.environ.get('BASIC_AUTH_USERNAME')
app.config['BASIC_AUTH_PASSWORD'] =os.environ.get('BASIC_AUTH_PASSWORD')
basic_auth = BasicAuth(app)
@app.route('/')
def home():
return "Minha primeira API."
@app.route('/sentimento/<frase>')
@basic_auth.required
def sentimento(frase):
tb = TextBlob(frase)
tb_en = tb.translate(to='en')
polaridade = tb_en.sentiment.polarity
return "Polaridade: {}".format(polaridade)
@app.route('/cotacao/',methods=['POST'])
@basic_auth.required
def cotacao():
colunas = ['tamanho','ano','garagem']
dados = request.get_json()
dados_input = [dados[col] for col in colunas]
preco = modelo.predict([dados_input])
return jsonify(preco=preco[0])
app.run(debug=True, host='0.0.0.0')
|
[
"fabiano.alencar@gmail.com"
] |
fabiano.alencar@gmail.com
|
9f3a6da4bd3f71cca3c96be91ba169f4415dabfe
|
c5d68f58c9523257a8b41954553f5cff2cd5f487
|
/Secao_05_Lista_Ex_41e/ex_26.py
|
5e1ced6cfbac38f24f677470fbda8bfb4d233128
|
[] |
no_license
|
SouzaCadu/guppe
|
04bfcde82d4404eb9ec795006c6931ba07dc72b6
|
1f8a672230c5c27712f522e1e34516591c012453
|
refs/heads/master
| 2023-03-13T01:32:51.019871
| 2021-02-25T17:02:59
| 2021-02-25T17:02:59
| 320,908,119
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
"""
Dada a quilometragem e quantidade de litros consumidos
calcule o consumo em km/l
"""
print("Para saber a eficiência do carro informe:")
km = float(input("A distância em quilômetros:"))
l = float(input("A quantidade de litros de gasolina consumidos \n"
"por quilômetro percorrido:"))
km_l = km / l
if km_l < 8.0:
print("Venda o carro!")
elif 8.0 <= km_l <= 14.0:
print("Econômico!")
else:
print("Super econômico!")
|
[
"cadu.souza81@gmail.com"
] |
cadu.souza81@gmail.com
|
5e3c688cfd1469d2ae26ef1105ca7e99c9ee3da6
|
8189c3d355f71db573483fa1e5d181044cf7e892
|
/face-opencv-project/faceenv/bin/f2py
|
4347e453fadf17eea092082ca5c89742ee74a56a
|
[] |
no_license
|
quoc-dev/My-Python
|
2ae3585ca1aea7a743f60ee5a5fdb2c9ecce38ed
|
8d1bdb66f013950c75638226255e713faf4ae56d
|
refs/heads/master
| 2020-03-17T22:56:02.052950
| 2018-07-15T15:13:08
| 2018-07-15T15:13:08
| 134,024,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
#!/Users/quocnguyenp./QuocDEV/MyPy/My-Python/face-opencv-project/faceenv/bin/python3.6
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
|
[
"phucquoc.dev@gmail.com"
] |
phucquoc.dev@gmail.com
|
|
6d30fd2f9ef98ebd534565d6e63baf02aae1ad8d
|
863c3154ba19251bd37fac8459592dbb9e748829
|
/jejuRSScrawler.py
|
acdefdb8dfb9f0c374372c29ddb385bf5875e354
|
[
"MIT"
] |
permissive
|
LiveCoronaDetector/covid-19-crawler
|
7e852c1c3c32941c8a726aa0b7a33d5c83e7ac59
|
dc089f2e1e340ce9a3f7fbef1024db2e38cc6344
|
refs/heads/master
| 2022-11-21T05:16:41.041077
| 2020-07-21T03:39:46
| 2020-07-21T03:39:46
| 237,893,675
| 9
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,669
|
py
|
"""
제주특별자치도 보건서비스 현황 및 브리핑자료
http://www.jeju.go.kr/wel/healthCare/corona/coronaNotice.htm
Author: Eunhak Lee (@return0927)
"""
import re
import requests
from bs4 import BeautifulSoup as Soup
from bs4.element import Tag
from datetime import datetime
# Preferences
url = "http://www.jeju.go.kr/wel/healthCare/corona/coronaNotice.htm?act=rss"
# Model
def parse():
req = requests.get(url)
soup = Soup(req.text, 'html.parser')
title = getattr(soup.find("title"), 'text', 'Empty Title')
description = getattr(soup.find('description'), 'text', 'Empty Description')
items = []
for elem in soup.findAll("item"):
elem_title = getattr(elem.find("title"), 'text', '')
# elem_link = getattr(elem.find("link"), 'text', '') -> TODO: soup load 시 item -> link 가 깨지는 이유 밝히기
elem_link = re.findall(
r'((http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?)',
elem.text)[-1][0]
elem_description = getattr(elem.find("description"), 'text', '')
elem_author = getattr(elem.find("author"), 'text', '')
_bare_date = getattr(elem.find("pubdate"), 'text', '')
elem_pubDate = datetime.strptime(_bare_date, "%a, %d %b %Y %H:%M:%S GMT")
items.append({
"title": elem_title,
"link": elem_link,
"description": elem_description,
"pubDate": elem_pubDate,
"author": elem_author
})
return {
'title': title,
'description': description,
'items': items
}
if __name__ == "__main__":
parse()
|
[
"admin@return0927.xyz"
] |
admin@return0927.xyz
|
5060c8a8f910847c90a31e3b29f64bb357fe38d7
|
8111d62ccff6137f9770e591299ee01b928099ff
|
/nadmin/plugins/details.py
|
b9ebbd94cc68ec117b03982ec1ff99a6b03dc9e0
|
[
"MIT"
] |
permissive
|
A425/django-nadmin
|
73cd9a5d0095e0a7b3e1f4b5035acac83bfc9c0a
|
9ab06192311b22ec654778935ce3e3c5ffd39a00
|
refs/heads/master
| 2021-01-24T22:35:54.665401
| 2015-10-20T05:24:16
| 2015-10-20T05:24:16
| 43,421,651
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,923
|
py
|
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db import models
from nadmin.sites import site
from nadmin.views import BaseAdminPlugin, ListAdminView
class DetailsPlugin(BaseAdminPlugin):
show_detail_fields = []
show_all_rel_details = True
def result_item(self, item, obj, field_name, row):
if (self.show_all_rel_details or (field_name in self.show_detail_fields)):
rel_obj = None
if hasattr(item.field, 'rel') and isinstance(item.field.rel, models.ManyToOneRel):
rel_obj = getattr(obj, field_name)
elif field_name in self.show_detail_fields:
rel_obj = obj
if rel_obj:
if rel_obj.__class__ in site._registry:
try:
model_admin = site._registry[rel_obj.__class__]
has_view_perm = model_admin(self.admin_view.request).has_view_permission(rel_obj)
has_change_perm = model_admin(self.admin_view.request).has_change_permission(rel_obj)
except:
has_view_perm = self.admin_view.has_model_perm(rel_obj.__class__, 'view')
has_change_perm = self.has_model_perm(rel_obj.__class__, 'change')
else:
has_view_perm = self.admin_view.has_model_perm(rel_obj.__class__, 'view')
has_change_perm = self.has_model_perm(rel_obj.__class__, 'change')
if rel_obj and has_view_perm:
opts = rel_obj._meta
try:
item_res_uri = reverse(
'%s:%s_%s_detail' % (self.admin_site.app_name,
opts.app_label, opts.model_name),
args=(getattr(rel_obj, opts.pk.attname),))
if item_res_uri:
if has_change_perm:
edit_url = reverse(
'%s:%s_%s_change' % (self.admin_site.app_name, opts.app_label, opts.model_name),
args=(getattr(rel_obj, opts.pk.attname),))
else:
edit_url = ''
item.btns.append('<a data-res-uri="%s" data-edit-uri="%s" class="details-handler" rel="tooltip" title="%s"><i class="fa fa-info-circle"></i></a>'
% (item_res_uri, edit_url, _(u'Details of %s') % str(rel_obj)))
except NoReverseMatch:
pass
return item
# Media
def get_media(self, media):
if self.show_all_rel_details or self.show_detail_fields:
media = media + self.vendor('nadmin.plugin.details.js', 'nadmin.form.css')
return media
site.register_plugin(DetailsPlugin, ListAdminView)
|
[
"liu170045@gmail.com"
] |
liu170045@gmail.com
|
4cca3bbdf60ee174427fb018e22fe9972c04e563
|
b336674934022040fe8fed3dc88ad06a0edf0014
|
/To find a leap year.py
|
0e0b6d6fbb7048180f73c7f48e4d282649addd55
|
[] |
no_license
|
rajuraj-rgb/Python-projects
|
7332d8bbfb8853ad8dfb8a1e39a3f1220805f9c7
|
5cd8b6b553778139ee86ef429ee7abb7c3444606
|
refs/heads/master
| 2023-05-29T22:43:47.422943
| 2021-06-12T17:55:40
| 2021-06-12T17:55:40
| 286,728,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
def is_leap(year):
leap = False
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
return True
else:
return False
else:
return True
else:
return False
year = int(input())
print(is_leap(year))
# Created by me
def leap_year(year):
if year % 4 == 0:
return True
else:
return False
if year % 100 == 0:
return False
else:
return True
if year % 400 == 0:
return True
else:
return False
while(True):
year = int(input("Enter a year:\n"))
print(leap_year(year))
|
[
"noreply@github.com"
] |
rajuraj-rgb.noreply@github.com
|
d24f038c49fabcf817d8e83e5985d312402b09ee
|
f4b60f5e49baf60976987946c20a8ebca4880602
|
/lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/fv/rttoepipforeptoeptask.py
|
72948c45ecf44d8a43c5c7675999f2c87dff7f63
|
[] |
no_license
|
cqbomb/qytang_aci
|
12e508d54d9f774b537c33563762e694783d6ba8
|
a7fab9d6cda7fadcc995672e55c0ef7e7187696e
|
refs/heads/master
| 2022-12-21T13:30:05.240231
| 2018-12-04T01:46:53
| 2018-12-04T01:46:53
| 159,911,666
| 0
| 0
| null | 2022-12-07T23:53:02
| 2018-12-01T05:17:50
|
Python
|
UTF-8
|
Python
| false
| false
| 17,087
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtToEpIpForEpToEpTask(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.fv.RtToEpIpForEpToEpTask")
meta.moClassName = "fvRtToEpIpForEpToEpTask"
meta.rnFormat = "fvRtToEpIpForEpToEpTask-%(id)s"
meta.category = MoCategory.TASK
meta.label = "None"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.action.TopomgrSubj")
meta.parentClasses.add("cobra.model.action.ObserverSubj")
meta.parentClasses.add("cobra.model.action.VmmmgrSubj")
meta.parentClasses.add("cobra.model.action.SnmpdSubj")
meta.parentClasses.add("cobra.model.action.ScripthandlerSubj")
meta.parentClasses.add("cobra.model.action.ConfelemSubj")
meta.parentClasses.add("cobra.model.action.EventmgrSubj")
meta.parentClasses.add("cobra.model.action.OspaelemSubj")
meta.parentClasses.add("cobra.model.action.VtapSubj")
meta.parentClasses.add("cobra.model.action.OshSubj")
meta.parentClasses.add("cobra.model.action.DhcpdSubj")
meta.parentClasses.add("cobra.model.action.ObserverelemSubj")
meta.parentClasses.add("cobra.model.action.DbgrelemSubj")
meta.parentClasses.add("cobra.model.action.VleafelemSubj")
meta.parentClasses.add("cobra.model.action.NxosmockSubj")
meta.parentClasses.add("cobra.model.action.DbgrSubj")
meta.parentClasses.add("cobra.model.action.AppliancedirectorSubj")
meta.parentClasses.add("cobra.model.action.OpflexpSubj")
meta.parentClasses.add("cobra.model.action.BootmgrSubj")
meta.parentClasses.add("cobra.model.action.AeSubj")
meta.parentClasses.add("cobra.model.action.PolicymgrSubj")
meta.parentClasses.add("cobra.model.action.ExtXMLApiSubj")
meta.parentClasses.add("cobra.model.action.OpflexelemSubj")
meta.parentClasses.add("cobra.model.action.PolicyelemSubj")
meta.parentClasses.add("cobra.model.action.IdmgrSubj")
meta.superClasses.add("cobra.model.action.RInst")
meta.superClasses.add("cobra.model.pol.ComplElem")
meta.superClasses.add("cobra.model.task.Inst")
meta.superClasses.add("cobra.model.action.Inst")
meta.rnPrefixes = [
('fvRtToEpIpForEpToEpTask-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "data", "data", 52, PropCategory.REGULAR)
prop.label = "Data"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("data", prop)
prop = PropMeta("str", "descr", "descr", 33, PropCategory.REGULAR)
prop.label = "Description"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "endTs", "endTs", 15575, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("endTs", prop)
prop = PropMeta("str", "fail", "fail", 46, PropCategory.REGULAR)
prop.label = "Fail"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("fail", prop)
prop = PropMeta("str", "id", "id", 24129, PropCategory.REGULAR)
prop.label = "ID"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("DbgacEpIpForEpToEp", "dbgacepipforeptoep", 2127)
prop._addConstant("none", "none", 0)
meta.props.add("id", prop)
prop = PropMeta("str", "invErrCode", "invErrCode", 49, PropCategory.REGULAR)
prop.label = "Remote Error Code"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("ERR-FILTER-illegal-format", None, 1140)
prop._addConstant("ERR-FSM-no-such-state", None, 1160)
prop._addConstant("ERR-HTTP-set-error", None, 1551)
prop._addConstant("ERR-HTTPS-set-error", None, 1552)
prop._addConstant("ERR-MO-CONFIG-child-object-cant-be-configured", None, 1130)
prop._addConstant("ERR-MO-META-no-such-object-class", None, 1122)
prop._addConstant("ERR-MO-PROPERTY-no-such-property", None, 1121)
prop._addConstant("ERR-MO-PROPERTY-value-out-of-range", None, 1120)
prop._addConstant("ERR-MO-access-denied", None, 1170)
prop._addConstant("ERR-MO-deletion-rule-violation", None, 1107)
prop._addConstant("ERR-MO-duplicate-object", None, 1103)
prop._addConstant("ERR-MO-illegal-containment", None, 1106)
prop._addConstant("ERR-MO-illegal-creation", None, 1105)
prop._addConstant("ERR-MO-illegal-iterator-state", None, 1100)
prop._addConstant("ERR-MO-illegal-object-lifecycle-transition", None, 1101)
prop._addConstant("ERR-MO-naming-rule-violation", None, 1104)
prop._addConstant("ERR-MO-object-not-found", None, 1102)
prop._addConstant("ERR-MO-resource-allocation", None, 1150)
prop._addConstant("ERR-aaa-config-modify-error", None, 1520)
prop._addConstant("ERR-acct-realm-set-error", None, 1513)
prop._addConstant("ERR-add-ctrlr", None, 1574)
prop._addConstant("ERR-admin-passwd-set", None, 1522)
prop._addConstant("ERR-api", None, 1571)
prop._addConstant("ERR-auth-issue", None, 1548)
prop._addConstant("ERR-auth-realm-set-error", None, 1514)
prop._addConstant("ERR-authentication", None, 1534)
prop._addConstant("ERR-authorization-required", None, 1535)
prop._addConstant("ERR-connect", None, 1572)
prop._addConstant("ERR-create-domain", None, 1562)
prop._addConstant("ERR-create-keyring", None, 1560)
prop._addConstant("ERR-create-role", None, 1526)
prop._addConstant("ERR-create-user", None, 1524)
prop._addConstant("ERR-delete-domain", None, 1564)
prop._addConstant("ERR-delete-role", None, 1528)
prop._addConstant("ERR-delete-user", None, 1523)
prop._addConstant("ERR-domain-set-error", None, 1561)
prop._addConstant("ERR-http-initializing", None, 1549)
prop._addConstant("ERR-incompat-ctrlr-version", None, 1568)
prop._addConstant("ERR-internal-error", None, 1540)
prop._addConstant("ERR-invalid-args", None, 1569)
prop._addConstant("ERR-invalid-domain-name", None, 1582)
prop._addConstant("ERR-ldap-delete-error", None, 1510)
prop._addConstant("ERR-ldap-get-error", None, 1509)
prop._addConstant("ERR-ldap-group-modify-error", None, 1518)
prop._addConstant("ERR-ldap-group-set-error", None, 1502)
prop._addConstant("ERR-ldap-set-error", None, 1511)
prop._addConstant("ERR-missing-method", None, 1546)
prop._addConstant("ERR-modify-ctrlr-access", None, 1567)
prop._addConstant("ERR-modify-ctrlr-dvs-version", None, 1576)
prop._addConstant("ERR-modify-ctrlr-rootcont", None, 1575)
prop._addConstant("ERR-modify-ctrlr-scope", None, 1573)
prop._addConstant("ERR-modify-ctrlr-trig-inventory", None, 1577)
prop._addConstant("ERR-modify-domain", None, 1563)
prop._addConstant("ERR-modify-domain-encapmode", None, 1581)
prop._addConstant("ERR-modify-domain-enfpref", None, 1578)
prop._addConstant("ERR-modify-domain-mcastpool", None, 1579)
prop._addConstant("ERR-modify-domain-mode", None, 1580)
prop._addConstant("ERR-modify-role", None, 1527)
prop._addConstant("ERR-modify-user", None, 1525)
prop._addConstant("ERR-modify-user-domain", None, 1565)
prop._addConstant("ERR-modify-user-role", None, 1532)
prop._addConstant("ERR-no-buf", None, 1570)
prop._addConstant("ERR-passwd-set-failure", None, 1566)
prop._addConstant("ERR-provider-group-modify-error", None, 1519)
prop._addConstant("ERR-provider-group-set-error", None, 1512)
prop._addConstant("ERR-radius-global-set-error", None, 1505)
prop._addConstant("ERR-radius-group-set-error", None, 1501)
prop._addConstant("ERR-radius-set-error", None, 1504)
prop._addConstant("ERR-request-timeout", None, 1545)
prop._addConstant("ERR-role-set-error", None, 1515)
prop._addConstant("ERR-secondary-node", None, 1550)
prop._addConstant("ERR-service-not-ready", None, 1539)
prop._addConstant("ERR-set-password-strength-check", None, 1543)
prop._addConstant("ERR-store-pre-login-banner-msg", None, 1521)
prop._addConstant("ERR-tacacs-enable-error", None, 1508)
prop._addConstant("ERR-tacacs-global-set-error", None, 1507)
prop._addConstant("ERR-tacacs-group-set-error", None, 1503)
prop._addConstant("ERR-tacacs-set-error", None, 1506)
prop._addConstant("ERR-user-account-expired", None, 1536)
prop._addConstant("ERR-user-set-error", None, 1517)
prop._addConstant("ERR-xml-parse-error", None, 1547)
prop._addConstant("communication-error", "communication-error", 1)
prop._addConstant("none", "none", 0)
meta.props.add("invErrCode", prop)
prop = PropMeta("str", "invErrDescr", "invErrDescr", 50, PropCategory.REGULAR)
prop.label = "Remote Error Description"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("invErrDescr", prop)
prop = PropMeta("str", "invRslt", "invRslt", 48, PropCategory.REGULAR)
prop.label = "Remote Result"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "not-applicable"
prop._addConstant("capability-not-implemented-failure", "capability-not-implemented-failure", 16384)
prop._addConstant("capability-not-implemented-ignore", "capability-not-implemented-ignore", 8192)
prop._addConstant("capability-not-supported", "capability-not-supported", 32768)
prop._addConstant("capability-unavailable", "capability-unavailable", 65536)
prop._addConstant("end-point-failed", "end-point-failed", 32)
prop._addConstant("end-point-protocol-error", "end-point-protocol-error", 64)
prop._addConstant("end-point-unavailable", "end-point-unavailable", 16)
prop._addConstant("extend-timeout", "extend-timeout", 134217728)
prop._addConstant("failure", "failure", 1)
prop._addConstant("fru-identity-indeterminate", "fru-identity-indeterminate", 4194304)
prop._addConstant("fru-info-malformed", "fru-info-malformed", 8388608)
prop._addConstant("fru-not-ready", "fru-not-ready", 67108864)
prop._addConstant("fru-not-supported", "fru-not-supported", 536870912)
prop._addConstant("fru-state-indeterminate", "fru-state-indeterminate", 33554432)
prop._addConstant("fw-defect", "fw-defect", 256)
prop._addConstant("hw-defect", "hw-defect", 512)
prop._addConstant("illegal-fru", "illegal-fru", 16777216)
prop._addConstant("intermittent-error", "intermittent-error", 1073741824)
prop._addConstant("internal-error", "internal-error", 4)
prop._addConstant("not-applicable", "not-applicable", 0)
prop._addConstant("resource-capacity-exceeded", "resource-capacity-exceeded", 2048)
prop._addConstant("resource-dependency", "resource-dependency", 4096)
prop._addConstant("resource-unavailable", "resource-unavailable", 1024)
prop._addConstant("service-not-implemented-fail", "service-not-implemented-fail", 262144)
prop._addConstant("service-not-implemented-ignore", "service-not-implemented-ignore", 131072)
prop._addConstant("service-not-supported", "service-not-supported", 524288)
prop._addConstant("service-protocol-error", "service-protocol-error", 2097152)
prop._addConstant("service-unavailable", "service-unavailable", 1048576)
prop._addConstant("sw-defect", "sw-defect", 128)
prop._addConstant("task-reset", "task-reset", 268435456)
prop._addConstant("timeout", "timeout", 8)
prop._addConstant("unidentified-fail", "unidentified-fail", 2)
meta.props.add("invRslt", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "oDn", "oDn", 51, PropCategory.REGULAR)
prop.label = "Subject DN"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("oDn", prop)
prop = PropMeta("str", "operSt", "operSt", 15674, PropCategory.REGULAR)
prop.label = "Completion"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "scheduled"
prop._addConstant("cancelled", "cancelled", 3)
prop._addConstant("completed", "completed", 2)
prop._addConstant("crashsuspect", "crash-suspect", 7)
prop._addConstant("failed", "failed", 4)
prop._addConstant("indeterminate", "indeterminate", 5)
prop._addConstant("processing", "processing", 1)
prop._addConstant("ready", "ready", 8)
prop._addConstant("scheduled", "scheduled", 0)
prop._addConstant("suspended", "suspended", 6)
meta.props.add("operSt", prop)
prop = PropMeta("str", "originMinority", "originMinority", 54, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("originMinority", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "runId", "runId", 45, PropCategory.REGULAR)
prop.label = "ID"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("runId", prop)
prop = PropMeta("str", "startTs", "startTs", 36, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("startTs", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "try", "try", 15574, PropCategory.REGULAR)
prop.label = "Try"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("try", prop)
prop = PropMeta("str", "ts", "ts", 47, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("ts", prop)
meta.namingProps.append(getattr(meta.props, "id"))
def __init__(self, parentMoOrDn, id, markDirty=True, **creationProps):
namingVals = [id]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"collinsctk@qytang.com"
] |
collinsctk@qytang.com
|
22380181ee62e3e0c5ad86e1f55f3efe751586f2
|
4bc19f4dd098ebedcb6ee78af0ae12cb633671fe
|
/static/models.py
|
6b20f6e53773a0c91a0dea611f37cb86192e1a7f
|
[] |
no_license
|
StanislavKraev/rekvizitka
|
958ab0e002335613a724fb14a8e4123f49954446
|
ac1f30e7bb2e987b3b0bda4c2a8feda4d3f5497f
|
refs/heads/master
| 2021-01-01T05:44:56.372748
| 2016-04-27T19:20:26
| 2016-04-27T19:20:26
| 57,240,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
# -*- coding: utf-8 -*-
import bson
from rek.mongo.models import ObjectManager
class StaticPage(object):
objects = None
def __init__(self, name="", alias="", preview="", content="", enabled=False, _id=None):
self.name = name # = models.CharField (max_length=255, unique=True, verbose_name='Название')
self.alias = alias #= models.CharField (max_length=255, unique=True, verbose_name='Alias URL')
self.preview = preview #= models.TextField (blank=True, verbose_name='Краткое описание (превью)')
self.content = content #= models.TextField (verbose_name='Содержимое (полный текст)')
self.enabled = enabled #= models.BooleanField (verbose_name='Включить и отображать страницу')
self._id = _id
def save (self):
if not self._id:
result = self.objects.collection.insert({
'name' : self.name,
'alias' : self.alias,
'preview' : self.preview,
'content' : self.content,
'enabled' : self.enabled
})
if isinstance(result, bson.ObjectId):
self._id = result
return result
else:
raise Exception('Can not add static page')
self.objects.collection.update({'_id' : self._id}, {
'name' : self.name,
'alias' : self.alias,
'preview' : self.preview,
'content' : self.content,
'enabled' : self.enabled
})
return self._id
def delete(self):
if not self._id:
return
self.objects.collection.remove({'_id' : self._id})
StaticPage.objects = ObjectManager(StaticPage, 'static_pages', indexes = [('alias', 1)])
|
[
"kraevst@yandex.ru"
] |
kraevst@yandex.ru
|
b29bef8a54edc0cb2a105e7d80570805fa5d4ef6
|
395e64776ee7c435e9c8ccea6c2bf0d987770153
|
/mayaSDK/maya/app/renderSetup/model/issue.py
|
89befa2f2c99883385ec3b76ee72ca01b3c505c5
|
[
"MIT"
] |
permissive
|
FXTD-ODYSSEY/vscode-mayapy
|
e1ba63021a2559287073ca2ddf90b634f95ba7cb
|
5766a0bf0a007ca61b8249f7dfb329f1dfcdbfbb
|
refs/heads/master
| 2023-03-07T08:01:27.965144
| 2022-04-02T02:46:31
| 2022-04-02T02:46:31
| 208,568,717
| 29
| 11
|
MIT
| 2023-03-03T06:45:31
| 2019-09-15T09:10:58
|
Python
|
UTF-8
|
Python
| false
| false
| 695
|
py
|
class Issue(object):
"""
Class representing an issue that contains
- a description (a short string explaining what's the issue)
- a type, mostly used for UI purpose (icon for the issue will be RS_<type>.png)
- a callback to resolve the issue (assisted resolve).
"""
def __eq__(self, o):
pass
def __hash__(self):
pass
def __init__(self, description, type="'warning'", resolveCallback='None'):
pass
def __str__(self):
pass
def resolve(self):
pass
__dict__ = None
__weakref__ = None
description = None
type = None
|
[
"820472580@qq.com"
] |
820472580@qq.com
|
1529ed683976e4891a4de141925cc416e43d3162
|
1e5f5e4cb512663b5017b9a08b409154678f0bef
|
/product/forms.py
|
56fb76d226633d363aa26408be65cf6a1c46ecaa
|
[] |
no_license
|
dynamodenis/dynamoshop
|
d2159e9c0271825a85e8a1d6322ca6f11cdb2e4b
|
6f8f99149cf9594b8f0dd79f49a49c4fa00eaaaa
|
refs/heads/master
| 2022-12-29T13:09:49.848510
| 2020-10-22T07:19:43
| 2020-10-22T07:19:43
| 306,121,092
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,311
|
py
|
from django import forms
from django_countries.fields import CountryField
from django_countries.widgets import CountrySelectWidget
PAYMENT_CHOICES = (
('S', 'Stripe'),
('P', 'PayPal')
)
class CheckoutForm(forms.Form):
# Shipping Fields
shipping_address = forms.CharField(required=False)
shipping_address2 = forms.CharField(required=False)
shipping_country = CountryField(blank_label='select country').formfield( required = False, widget=CountrySelectWidget(attrs={
'class':'custom-select d-block w-100 form-control',
}))
shipping_zip=forms.CharField(required=False)
same_billing_address = forms.BooleanField( required=False)
set_default_shipping = forms.BooleanField( required=False)
use_default_shipping = forms.BooleanField( required=False)
# Billing Fields
billing_address = forms.CharField(required=False)
billing_address2 = forms.CharField(required=False)
billing_country = CountryField(blank_label='select country').formfield( required = False, widget=CountrySelectWidget(attrs={
'class':'custom-select d-block w-100 form-control',
}))
billing_zip=forms.CharField(required=False)
same_shipping_address = forms.BooleanField( required=False)
set_default_billing = forms.BooleanField( required=False)
use_default_billing = forms.BooleanField( required=False)
payment_options = forms.ChoiceField(widget=forms.RadioSelect, choices=PAYMENT_CHOICES)
# coupon form
class CouponForm(forms.Form):
code = forms.CharField(widget = forms.TextInput(attrs={
'class':'form-control',
'placeholder':'Promo Code',
'aria-label':"Recipient's username",
'aria-describedby':"basic-addon2"
}))
# Request refunds
class RequestRefundForm(forms.Form):
ref_code = forms.CharField(widget = forms.TextInput(attrs={
'class':'form-control'
}))
message = forms.CharField(widget=forms.Textarea(attrs={
'class':'md-textarea form-control',
'rows':2
}))
email = forms.EmailField(widget = forms.TextInput(attrs={
'class':'form-control'
}))
class PaymentForm(forms.Form):
stripeToken = forms.CharField(required=False)
save = forms.BooleanField(required=False)
use_default = forms.BooleanField(required=False)
|
[
"dmbugua66@gmail.com"
] |
dmbugua66@gmail.com
|
bf94ef910d83505e2e42f417c333a310b9b7bc7b
|
b6c4b9ff1d9c10f5bc0a0ad657c00c2513ddaafb
|
/PY4E/gmane/gmane.py
|
c2c4e65c9aad65bab9ed843f9ec3fe0f6c70ae1d
|
[] |
no_license
|
wjasonhuang/python_utils
|
469c88066ff3055e39e3a30a6ec68f85a196b259
|
025f7257445bd7d97b605dc062d3935b14bf0a19
|
refs/heads/master
| 2021-06-18T14:29:32.487568
| 2021-06-17T03:28:57
| 2021-06-17T03:28:57
| 206,687,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,639
|
py
|
import re
import sqlite3
import ssl
import time
import urllib.request
from datetime import datetime
# Not all systems have this so conditionally define parser
try:
import dateutil.parser as parser
except:
pass
def parsemaildate(md):
# See if we have dateutil
try:
pdate = parser.parse(tdate)
test_at = pdate.isoformat()
return test_at
except:
pass
# Non-dateutil version - we try our best
pieces = md.split()
notz = " ".join(pieces[:4]).strip()
# Try a bunch of format variations - strptime() is *lame*
dnotz = None
for form in ['%d %b %Y %H:%M:%S', '%d %b %Y %H:%M:%S',
'%d %b %Y %H:%M', '%d %b %Y %H:%M', '%d %b %y %H:%M:%S',
'%d %b %y %H:%M:%S', '%d %b %y %H:%M', '%d %b %y %H:%M']:
try:
dnotz = datetime.strptime(notz, form)
break
except:
continue
if dnotz is None:
# print 'Bad Date:',md
return None
iso = dnotz.isoformat()
tz = "+0000"
try:
tz = pieces[4]
ival = int(tz) # Only want numeric timezone values
if tz == '-0000': tz = '+0000'
tzh = tz[:3]
tzm = tz[3:]
tz = tzh + ":" + tzm
except:
pass
return iso + tz
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
conn = sqlite3.connect('content.sqlite')
cur = conn.cursor()
baseurl = "http://mbox.dr-chuck.net/sakai.devel/"
cur.execute('''CREATE TABLE IF NOT EXISTS Messages
(id INTEGER UNIQUE, email TEXT, sent_at TEXT,
subject TEXT, headers TEXT, body TEXT)''')
# Pick up where we left off
start = None
cur.execute('SELECT max(id) FROM Messages')
try:
row = cur.fetchone()
if row is None:
start = 0
else:
start = row[0]
except:
start = 0
if start is None: start = 0
many = 0
count = 0
fail = 0
while True:
if (many < 1):
conn.commit()
sval = input('How many messages:')
if (len(sval) < 1): break
many = int(sval)
start = start + 1
cur.execute('SELECT id FROM Messages WHERE id=?', (start,))
try:
row = cur.fetchone()
if row is not None: continue
except:
row = None
many = many - 1
url = baseurl + str(start) + '/' + str(start + 1)
text = "None"
try:
# Open with a timeout of 30 seconds
document = urllib.request.urlopen(url, None, 30, context=ctx)
text = document.read().decode()
if document.getcode() != 200:
print("Error code=", document.getcode(), url)
break
except KeyboardInterrupt:
print('')
print('Program interrupted by user...')
break
except Exception as e:
print("Unable to retrieve or parse page", url)
print("Error", e)
fail = fail + 1
if fail > 5: break
continue
print(url, len(text))
count = count + 1
if not text.startswith("From "):
print(text)
print("Did not find From ")
fail = fail + 1
if fail > 5: break
continue
pos = text.find("\n\n")
if pos > 0:
hdr = text[:pos]
body = text[pos + 2:]
else:
print(text)
print("Could not find break between headers and body")
fail = fail + 1
if fail > 5: break
continue
email = None
x = re.findall('\nFrom: .* <(\S+@\S+)>\n', hdr)
if len(x) == 1:
email = x[0];
email = email.strip().lower()
email = email.replace("<", "")
else:
x = re.findall('\nFrom: (\S+@\S+)\n', hdr)
if len(x) == 1:
email = x[0];
email = email.strip().lower()
email = email.replace("<", "")
date = None
y = re.findall('\Date: .*, (.*)\n', hdr)
if len(y) == 1:
tdate = y[0]
tdate = tdate[:26]
try:
sent_at = parsemaildate(tdate)
except:
print(text)
print("Parse fail", tdate)
fail = fail + 1
if fail > 5: break
continue
subject = None
z = re.findall('\Subject: (.*)\n', hdr)
if len(z) == 1: subject = z[0].strip().lower();
# Reset the fail counter
fail = 0
print(" ", email, sent_at, subject)
cur.execute('''INSERT OR IGNORE INTO Messages (id, email, sent_at, subject, headers, body)
VALUES ( ?, ?, ?, ?, ?, ? )''', (start, email, sent_at, subject, hdr, body))
if count % 50 == 0: conn.commit()
if count % 100 == 0: time.sleep(1)
conn.commit()
cur.close()
|
[
"wjasonhuang@users.noreply.github.com"
] |
wjasonhuang@users.noreply.github.com
|
b68ebcc7b6694ad09b338b4dd3df248d51b36215
|
42516b0348936e257d04113c2e632dc72ba58e91
|
/test_env/test_suit_grouptest/test_suit_grouptest_case02.py
|
424b547e43f3826b8fb303c0a6bdc2f24797de7d
|
[] |
no_license
|
wwlwwlqaz/Qualcomm
|
2c3a225875fba955d771101f3c38ca0420d8f468
|
a04b717ae437511abae1e7e9e399373c161a7b65
|
refs/heads/master
| 2021-01-11T19:01:06.123677
| 2017-04-05T07:57:21
| 2017-04-05T07:57:21
| 79,292,426
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,243
|
py
|
#coding=utf-8
import fs_wrapper
import settings.common as SC
from qrd_shared.case import *
from case_utility import *
from logging_wrapper import log_test_case, take_screenshot
from test_case_base import TestCaseBase
from thrift_gen_qsstservice.GroupTest.constants import *
from settings import common
import test_suit_ui_message.test_suit_ui_message as uiMessage
import test_suit_grouptest as GT
expected_group_member = 2
group_name = 'group_sms_send_receive'
role_name_A = 'A'
role_name_B = 'B'
action_read_sms = "read sms"
class test_suit_grouptest_case02(TestCaseBase):
'''
@see: L{TestCaseBase <TestCaseBase>}
'''
TAG = "test_suit_grouptest_case02"
def test_case_main(self, case_results):
log_test_case(self.TAG, 'start')
#(DEVICE_NAME_KEY,SLOT1_PHONE_NUMBER_KEY,ACT_AS_HOST_KEY,ROLE_NAME_KEY,GROUP_NAME_KEY)
deviceInformation = dict()
GT.deleteOldSerialNumber()
deviceName = get_serial_number()
deviceInformation[DEVICE_NAME_KEY] = deviceName
deviceInformation[ACT_AS_HOST_KEY] = str(common.PUBLIC_ACT_AS_GROUP_TEST_HOST)
deviceInformation[GROUP_NAME_KEY] = group_name
deviceInformation[ROLE_NAME_KEY] = ' '
deviceInformation[SLOT1_PHONE_NUMBER_KEY] = common.PUBLIC_SLOT1_PHONE_NUMBER
log_test_case('deviceInformation', 'end')
try:
init_group_database()
attend_group(deviceInformation)
log_test_case('attend_group()', 'end')
wait_for_group_members(expected_group_member,group_name,deviceInformation[ACT_AS_HOST_KEY])
log_test_case('wait_for_group_members()', 'end')
if(common.PUBLIC_ACT_AS_GROUP_TEST_HOST == True):
log_test_case('HOST', 'start')
set_role_name(deviceName,role_name_A)
set_status(STATUS_READY_VALUE,deviceName)
#assign the roles
members = get_group_members(group_name)
log_test_framework(self.TAG, "get_group_members:" +str(members))
roleArray = [role_name_B]
i = 0
for member in members:
if(cmp(member[ACT_AS_HOST_KEY],'True')!=0):
set_role_name(member[DEVICE_NAME_KEY],roleArray[i])
i+=1
log_test_framework(self.TAG, "assign roles finished")
#wait for all members ready
wait_for_members_ready(expected_group_member,group_name)
log_test_framework(self.TAG, "all members are ready")
#send sms to B
log_test_case('HOST', 'send sms start')
phoneNumberB = get_slot1_number_by_role_name(role_name_B,group_name)
log_test_case('slot1 number in phone B',phoneNumberB)
content = 'This is group test, send sms'
log_test_case('send mms','start')
send_mms(phoneNumberB, content)
log_test_case('send mms','end')
sleep(20)
log_test_case('HOST', 'deliver action to B')
deliver_action_by_role_name(action_read_sms,group_name)
set_status(STATUS_FINISHED_VALUE,deviceName)
wait_for_members_finished(expected_group_member,group_name);
log_test_case('HOST', 'wait_for_members_finished() finished')
resultB = get_test_result_by_role_name(role_name_B,group_name)
if( cmp(resultB,RESULT_SUCCESS_VALUE)==0 ):
destroy_group(group_name)
return True
else:
destroy_group(group_name)
return False
else:# here is slave B' code
log_test_case('SLAVE', 'start')
roleName = wait_for_role_name(deviceName)
if(cmp(roleName,role_name_B)==0):
set_status(STATUS_READY_VALUE,deviceName)
log_test_case('SLAVE', "B is ready")
launcher.launch_from_launcher('mms')
num1 = uiMessage.get_unread_number()
func = lambda:uiMessage.get_unread_number()>num1
if wait_for_fun(func, True, 500000):
#read the message
phoneNumberA = get_slot1_number_by_role_name(role_name_A,group_name)
content = 'This is group test, send sms'
log_test_case('Host phoneNumber',phoneNumberA)
if uiMessage.msg_exist(phoneNumberA,content) is True:
set_test_result(RESULT_SUCCESS_VALUE,deviceName)
set_status(STATUS_FINISHED_VALUE,deviceName)
set_test_result(RESULT_FAILURE_VALUE,deviceName)
set_status(STATUS_FINISHED_VALUE,deviceName)
wait_for_group_destroyed(group_name)
return False
except Exception, tx:
set_status(STATUS_FINISHED_VALUE,deviceName)
log_test_framework(self.TAG, "exception is "+str(tx))
wait_for_group_destroyed(group_name)
return True
|
[
"c_wwan@qti.qualcomm.com"
] |
c_wwan@qti.qualcomm.com
|
864076061ac7c68a3e7586dad3541c298d749288
|
62c634097cd25945541c64f8accdc2c6393cfe7d
|
/NLP_Project/ObjectSegmentation/exp-referit/exp_train_referit_det.py
|
2bbb8c1345150c383baa8323cae864ac83770a58
|
[] |
no_license
|
aftaabmd/Object-segmentation-defined-by-Query-expressions
|
c9c9321d7b5c0d3771ca7cc164be73c75b8fb7d5
|
5fc3c5d00b1dc2bbb4cff2095a85f494f08271fb
|
refs/heads/master
| 2022-11-27T11:25:16.351576
| 2018-03-08T04:50:55
| 2018-03-08T04:50:55
| 124,337,512
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,135
|
py
|
from __future__ import absolute_import, division, print_function
import sys
import os; os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]
import tensorflow as tf
import numpy as np
from models import text_objseg_model as segmodel
from util import data_reader
from util import loss
# Parameters
# Model Params
T = 20
N = 50
vocab_size = 8803
embedded_dim = 1000
lstm_dim = 1000
mlp_hidden_dims = 500
# Initialization Params
convnet_params = './models/convert_caffemodel/params/vgg_params.npz'
mlp_l1_std = 0.05
mlp_l2_std = 0.1
# Training Params
positive_loss_multiplier = 1.
negative_loss_multiplier = 1.
start_learningrate = 0.01
learningrate_decay_step = 10000
learningrate_decay_rate = 0.1
weight_decay = 0.005
momentum = 0.8
max_iter = 25000
fix_convnet = True
vgg_dropout = False
mlp_dropout = False
vgg_learningrate_mult = 1.
# Data Params
data_folder = './exp-referit/data/train_batch_det/'
data_prefix = 'referit_train_det'
# Snapshot Params
snapshot = 5000
snapshot_file = './exp-referit/tfmodel/referit_fc8_detect_iteration_%d.tfmodel'
# The model
# Inputs
text_seq_batch = tf.placeholder(tf.int32, [T, N])
imagecrop_batch = tf.placeholder(tf.float32, [N, 224, 224, 3])
spatial_batch = tf.placeholder(tf.float32, [N, 8])
label_batch = tf.placeholder(tf.float32, [N, 1])
# Outputs
scores = segmodel.text_objseg_region(text_seq_batch, imagecrop_batch,
spatial_batch, vocab_size, embedded_dim, lstm_dim, mlp_hidden_dims,
vgg_dropout=vgg_dropout, mlp_dropout=mlp_dropout)
# Collect trainable variables, regularized variables and learning rates
# Only train the fc layers of convnet and keep conv layers fixed
if fix_convnet:
train_variable_list = [variable for variable in tf.trainable_variables()
if not variable.name.startswith('vgg_local/')]
else:
train_variable_list = [variable for variable in tf.trainable_variables()
if not variable.name.startswith('vgg_local/conv')]
print('Collecting variables to train:')
for variable in train_variable_list: print('\t%s' % variable.name)
print('Done.')
# Add regularization to weight matrices (excluding bias)
regularization_variable_list = [variable for variable in tf.trainable_variables()
if (variable in train_variable_list) and
(variable.name[-9:-2] == 'weights' or variable.name[-8:-2] == 'Matrix')]
print('Collecting variables for regularization:')
for variable in regularization_variable_list: print('\t%s' % variable.name)
print('Done.')
# Collect learning rate for trainable variables
variable_learningrate_mult = {variable: (vgg_learningrate_mult if variable.name.startswith('vgg_local') else 1.0)
for variable in train_variable_list}
print('Variable learning rate multiplication:')
for variable in train_variable_list:
print('\t%s: %f' % (variable.name, variable_learningrate_mult[variable]))
print('Done.')
# Loss function and accuracy
classification_loss = loss.weighed_logistic_loss(scores, label_batch, positive_loss_multiplier, negative_loss_multiplier)
regularization_loss = loss.l2_regularization_loss(regularization_variable_list, weight_decay)
total_loss = classification_loss + regularization_loss
def compute_accuracy(scores, labels):
is_positive = (labels != 0)
is_negative = np.logical_not(is_positive)
num_all = labels.shape[0]
num_positive = np.sum(is_positive)
num_negative = num_all - num_positive
is_correct = np.logical_xor(scores < 0, is_positive)
accuracy_all = np.sum(is_correct) / num_all
accuracy_positive = np.sum(is_correct[is_positive]) / num_positive
accuracy_negative = np.sum(is_correct[is_negative]) / num_negative
return accuracy_all, accuracy_positive, accuracy_negative
# Solver
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(start_learningrate, global_step, learningrate_decay_step,
learningrate_decay_rate, staircase=True)
solver = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum)
# Compute gradients
grads_and_variables = solver.compute_gradients(total_loss, variable_list=train_variable_list)
# Apply learning rate multiplication to gradients
grads_and_variables = [((g if variable_learningrate_mult[v] == 1 else tf.mul(variable_learningrate_mult[v], g)), v)
for g, v in grads_and_variables]
# Apply gradients
train_step = solver.apply_gradients(grads_and_variables, global_step=global_step)
# Initialize parameters and load data
init_ops = []
# Initialize CNN Parameters
convnet_layers = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2',
'conv3_1', 'conv3_2', 'conv3_3',
'conv4_1', 'conv4_2', 'conv4_3',
'conv5_1', 'conv5_2', 'conv5_3', 'fc6', 'fc7', 'fc8']
processed_params = np.load(convnet_params)
processed_W = processed_params['processed_W'][()]
processed_B = processed_params['processed_B'][()]
with tf.variable_scope('vgg_local', reuse=True):
for l_name in convnet_layers:
assign_W = tf.assign(tf.get_variable(l_name + '/weights'), processed_W[l_name])
assign_B = tf.assign(tf.get_variable(l_name + '/biases'), processed_B[l_name])
init_ops += [assign_W, assign_B]
# Initialize classifier Parameters
with tf.variable_scope('classifier', reuse=True):
mlp_l1 = tf.get_variable('mlp_l1/weights')
mlp_l2 = tf.get_variable('mlp_l2/weights')
init_mlp_l1 = tf.assign(mlp_l1, np.random.normal(
0, mlp_l1_std, mlp_l1.get_shape().as_list()).astype(np.float32))
init_mlp_l2 = tf.assign(mlp_l2, np.random.normal(
0, mlp_l2_std, mlp_l2.get_shape().as_list()).astype(np.float32))
init_ops += [init_mlp_l1, init_mlp_l2]
processed_params.close()
# Load data
reader = data_reader.DataReader(data_folder, data_prefix)
snapshot_saver = tf.train.Saver()
sess = tf.Session()
# Run Initialization operations
sess.run(tf.initialize_all_variables())
sess.run(tf.group(*init_ops))
# Optimization loop
classification_loss_avg = 0
avg_accuracy_all, avg_accuracy_positive, avg_accuracy_negative = 0, 0, 0
decay = 0.99
# Run optimization
for n_iter in range(max_iter):
# Read one batch
batch = reader.read_batch()
text_seq_val = batch['text_seq_batch']
imagecrop_val = batch['imagecrop_batch'].astype(np.float32) - segmodel.vgg_net.channel_mean
spatial_batch_val = batch['spatial_batch']
label_val = batch['label_batch'].astype(np.float32)
loss_mult_val = label_val * (positive_loss_multiplier - negative_loss_multiplier) + negative_loss_multiplier
# Forward and Backward pass
scores_val, classification_loss_val, _, learningrate_val = sess.run([scores, classification_loss, train_step, learning_rate],
feed_dict={
text_seq_batch : text_seq_val,
imagecrop_batch : imagecrop_val,
spatial_batch : spatial_batch_val,
label_batch : label_val
})
classification_loss_avg = decay*classification_loss_avg + (1-decay)*classification_loss_val
print('\titer = %d, classification_loss (cur) = %f, classification_loss (avg) = %f, learningrate = %f'
% (n_iter, classification_loss_val, classification_loss_avg, learningrate_val))
# Accuracy
accuracy_all, accuracy_positive, accuracy_negative = segmodel.compute_accuracy(scores_val, label_val)
avg_accuracy_all = decay*avg_accuracy_all + (1-decay)*accuracy_all
avg_accuracy_positive = decay*avg_accuracy_positive + (1-decay)*accuracy_positive
avg_accuracy_negative = decay*avg_accuracy_negative + (1-decay)*accuracy_negative
print('\titer = %d, accuracy (cur) = %f (all), %f (positive), %f (negative)'
% (n_iter, accuracy_all, accuracy_positive, accuracy_negative))
print('\titer = %d, accuracy (avg) = %f (all), %f (positive), %f (negative)'
% (n_iter, avg_accuracy_all, avg_accuracy_positive, avg_accuracy_negative))
# Save snapshot
if (n_iter+1) % snapshot == 0 or (n_iter+1) == max_iter:
snapshot_saver.save(sess, snapshot_file % (n_iter+1))
print('snapshot saved to ' + snapshot_file % (n_iter+1))
print('Optimization done.')
sess.close()
|
[
"noreply@github.com"
] |
aftaabmd.noreply@github.com
|
1c9a0eec248e3af6b5145a5db61402cf61abf9ef
|
cc3fff6abe2d9b33d11cd384b263e1c4f07e5b6c
|
/dir/dir2/t2.py
|
ee41f0aa2921ddfc366cbfd39c899af600e8c0fe
|
[] |
no_license
|
letianccc/latin_httpserver
|
22afa1e15f542c256a232a933edd974f6f90533c
|
8f7aad21ba8abffb011d5410bce29c591e9ef990
|
refs/heads/master
| 2018-11-11T12:12:32.264360
| 2018-09-05T00:39:26
| 2018-09-05T00:39:26
| 111,545,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 57
|
py
|
a = 'a'
print('t2', a)
import t1
b = 'b'
print('t2', b)
|
[
"704364447@qq.com"
] |
704364447@qq.com
|
250866828c24d630e5b8d134eec9b042585d819a
|
716ed7eac59b70cb3ec376f8247b386b50ce9e23
|
/sktime/forecasting/model_selection/_tune.py
|
ab0c9de58b4ea2309fe8b6c58d74eb763f9202a8
|
[
"BSD-3-Clause"
] |
permissive
|
earthinversion/sktime
|
4f3b657cebe3587958d2a3276b65b38c83a7d8ed
|
012d11e6b879d29b0a36c7e2e7172355992348f3
|
refs/heads/main
| 2023-04-17T04:01:47.841308
| 2021-04-09T16:58:14
| 2021-04-09T16:58:14
| 356,362,984
| 1
| 1
|
BSD-3-Clause
| 2021-04-09T18:22:00
| 2021-04-09T18:21:59
| null |
UTF-8
|
Python
| false
| false
| 17,456
|
py
|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["Markus Löning"]
__all__ = ["ForecastingGridSearchCV", "ForecastingRandomizedSearchCV"]
import pandas as pd
from joblib import Parallel
from joblib import delayed
from sklearn.base import clone
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection import check_cv
from sklearn.model_selection._search import _check_param_grid
from sklearn.utils.metaestimators import if_delegate_has_method
from sktime.exceptions import NotFittedError
from sktime.forecasting.base import BaseForecaster
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.model_evaluation import evaluate
from sktime.utils.validation.forecasting import check_scoring
from sktime.utils.validation.forecasting import check_y_X
class BaseGridSearch(BaseForecaster):
def __init__(
self,
forecaster,
cv,
strategy="refit",
n_jobs=None,
pre_dispatch=None,
refit=False,
scoring=None,
verbose=0,
):
self.forecaster = forecaster
self.cv = cv
self.strategy = strategy
self.n_jobs = n_jobs
self.pre_dispatch = pre_dispatch
self.refit = refit
self.scoring = scoring
self.verbose = verbose
super(BaseGridSearch, self).__init__()
@if_delegate_has_method(delegate=("best_forecaster_", "forecaster"))
def update(self, y, X=None, update_params=False):
"""Call predict on the forecaster with the best found parameters."""
self.check_is_fitted("update")
self.best_forecaster_.update(y, X, update_params=update_params)
return self
@if_delegate_has_method(delegate=("best_forecaster_", "forecaster"))
def update_predict(
self,
y,
cv=None,
X=None,
update_params=False,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Call update_predict on the forecaster with the best found
parameters.
"""
self.check_is_fitted("update_predict")
return self.best_forecaster_.update_predict(
y,
cv=cv,
X=X,
update_params=update_params,
return_pred_int=return_pred_int,
alpha=alpha,
)
@if_delegate_has_method(delegate=("best_forecaster_", "forecaster"))
def update_predict_single(
self,
y,
fh=None,
X=None,
update_params=False,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Call predict on the forecaster with the best found parameters."""
self.check_is_fitted("update_predict_single")
return self.best_forecaster_.update_predict_single(
y,
fh=fh,
X=X,
update_params=update_params,
return_pred_int=return_pred_int,
alpha=alpha,
)
@if_delegate_has_method(delegate=("best_forecaster_", "forecaster"))
def predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""Call predict on the forecaster with the best found parameters."""
self.check_is_fitted("predict")
return self.best_forecaster_.predict(
fh, X, return_pred_int=return_pred_int, alpha=alpha
)
@if_delegate_has_method(delegate=("best_forecaster_", "forecaster"))
def compute_pred_int(self, y_pred, alpha=DEFAULT_ALPHA):
"""Call compute_pred_int on the forecaster with the best found parameters."""
self.check_is_fitted("compute_pred_int")
return self.best_forecaster_.compute_pred_int(y_pred, alpha=alpha)
@if_delegate_has_method(delegate=("best_forecaster_", "forecaster"))
def transform(self, y, X=None):
"""Call transform on the forecaster with the best found parameters."""
self.check_is_fitted("transform")
return self.best_forecaster_.transform(y, X)
@if_delegate_has_method(delegate=("best_forecaster_", "forecaster"))
def get_fitted_params(self):
"""Get fitted parameters
Returns
-------
fitted_params : dict
"""
self.check_is_fitted("get_fitted_params")
return self.best_forecaster_.get_fitted_params()
@if_delegate_has_method(delegate=("best_forecaster_", "forecaster"))
def inverse_transform(self, y, X=None):
"""Call inverse_transform on the forecaster with the best found params.
Only available if the underlying forecaster implements
``inverse_transform`` and ``refit=True``.
Parameters
----------
y : indexable, length n_samples
Must fulfill the input assumptions of the
underlying forecaster.
"""
self.check_is_fitted("inverse_transform")
return self.best_forecaster_.inverse_transform(y, X)
def score(self, y, X=None, fh=None):
"""Returns the score on the given data, if the forecaster has been
refit.
This uses the score defined by ``scoring`` where provided, and the
``best_forecaster_.score`` method otherwise.
Parameters
----------
y : pandas.Series
Target time series to which to compare the forecasts.
X : pandas.DataFrame, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d dataframe of exogenous variables.
Returns
-------
score : float
"""
self.check_is_fitted("score")
if self.scoring is None:
return self.best_forecaster_.score(y, X=X, fh=fh)
else:
y_pred = self.best_forecaster_.predict(fh, X=X)
return self.scoring(y, y_pred)
def _run_search(self, evaluate_candidates):
raise NotImplementedError("abstract method")
def check_is_fitted(self, method_name=None):
"""Has `fit` been called?
Parameters
----------
method_name : str
Name of the calling method.
Raises
------
NotFittedError
If forecaster has not been fitted yet.
"""
super(BaseGridSearch, self).check_is_fitted()
# We additionally check if the tuned forecaster has been fitted.
if method_name is not None:
if not self.refit:
raise NotFittedError(
"This %s instance was initialized "
"with refit=False. %s is "
"available only after refitting on the "
"best parameters. You can refit an forecaster "
"manually using the ``best_params_`` "
"attribute" % (type(self).__name__, method_name)
)
else:
self.best_forecaster_.check_is_fitted()
def fit(self, y, X=None, fh=None, **fit_params):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
y, X = check_y_X(y, X)
cv = check_cv(self.cv)
scoring = check_scoring(self.scoring)
scoring_name = f"test_{scoring.name}"
parallel = Parallel(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch)
def _fit_and_score(params):
# Clone forecaster.
forecaster = clone(self.forecaster)
# Set parameters.
forecaster.set_params(**params)
# Evaluate.
out = evaluate(
forecaster,
cv,
y,
X,
strategy=self.strategy,
scoring=scoring,
fit_params=fit_params,
)
# Filter columns.
out = out.filter(items=[scoring_name, "fit_time", "pred_time"], axis=1)
# Aggregate results.
out = out.mean()
out = out.add_prefix("mean_")
# Add parameters to output table.
out["params"] = params
return out
def evaluate_candidates(candidate_params):
candidate_params = list(candidate_params)
if self.verbose > 0:
n_candidates = len(candidate_params)
n_splits = cv.get_n_splits(y)
print( # noqa
"Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits
)
)
out = parallel(
delayed(_fit_and_score)(params) for params in candidate_params
)
if len(out) < 1:
raise ValueError(
"No fits were performed. "
"Was the CV iterator empty? "
"Were there no candidates?"
)
return out
# Run grid-search cross-validation.
results = self._run_search(evaluate_candidates)
results = pd.DataFrame(results)
# Rank results, according to whether greater is better for the given scoring.
results[f"rank_{scoring_name}"] = results.loc[:, f"mean_{scoring_name}"].rank(
ascending=~scoring.greater_is_better
)
self.cv_results_ = results
# Select best parameters.
self.best_index_ = results.loc[:, f"rank_{scoring_name}"].argmin()
self.best_score_ = results.loc[self.best_index_, f"mean_{scoring_name}"]
self.best_params_ = results.loc[self.best_index_, "params"]
self.best_forecaster_ = clone(self.forecaster).set_params(**self.best_params_)
# Refit model with best parameters.
if self.refit:
self.best_forecaster_.fit(y, X, fh)
self._is_fitted = True
return self
class ForecastingGridSearchCV(BaseGridSearch):
"""
Performs grid-search cross-validation to find optimal model parameters.
The forecaster is fit on the initial window and then temporal
cross-validation is used to find the optimal parameter
Grid-search cross-validation is performed based on a cross-validation
iterator encoding the cross-validation scheme, the parameter grid to
search over, and (optionally) the evaluation metric for comparing model
performance. As in scikit-learn, tuning works through the common
hyper-parameter interface which allows to repeatedly fit and evaluate
the same forecaster with different hyper-parameters.
Parameters
----------
forecaster : estimator object
The estimator should implement the sktime or scikit-learn estimator
interface. Either the estimator must contain a "score" function,
or a scoring function must be passed.
cv : cross-validation generator or an iterable
e.g. SlidingWindowSplitter()
param_grid : dict or list of dictionaries
Model tuning parameters of the forecaster to evaluate
scoring: function, optional (default=None)
Function to score models for evaluation of optimal parameters
n_jobs: int, optional (default=None)
Number of jobs to run in parallel.
None means 1 unless in a joblib.parallel_backend context.
-1 means using all processors.
refit: bool, optional (default=True)
Refit the forecaster with the best parameters on all the data
verbose: int, optional (default=0)
pre_dispatch: str, optional (default='2*n_jobs')
error_score: numeric value or the str 'raise', optional (default=np.nan)
The test score returned when a forecaster fails to be fitted.
return_train_score: bool, optional (default=False)
Attributes
----------
best_index_ : int
best_score_: float
Score of the best model
best_params_ : dict
Best parameter values across the parameter grid
best_forecaster_ : estimator
Fitted estimator with the best parameters
cv_results_ : dict
Results from grid search cross validation
n_splits_: int
Number of splits in the data for cross validation}
refit_time_ : float
Time (seconds) to refit the best forecaster
scorer_ : function
Function used to score model
"""
_required_parameters = ["forecaster", "cv", "param_grid"]
def __init__(
self,
forecaster,
cv,
param_grid,
scoring=None,
strategy="refit",
n_jobs=None,
refit=True,
verbose=0,
pre_dispatch="2*n_jobs",
):
super(ForecastingGridSearchCV, self).__init__(
forecaster=forecaster,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
cv=cv,
strategy=strategy,
verbose=verbose,
pre_dispatch=pre_dispatch,
)
self.param_grid = param_grid
def _run_search(self, evaluate_candidates):
"""Search all candidates in param_grid"""
_check_param_grid(self.param_grid)
return evaluate_candidates(ParameterGrid(self.param_grid))
class ForecastingRandomizedSearchCV(BaseGridSearch):
"""
Performs randomized-search cross-validation to find optimal model parameters.
The forecaster is fit on the initial window and then temporal
cross-validation is used to find the optimal parameter
Randomized cross-validation is performed based on a cross-validation
iterator encoding the cross-validation scheme, the parameter distributions to
search over, and (optionally) the evaluation metric for comparing model
performance. As in scikit-learn, tuning works through the common
hyper-parameter interface which allows to repeatedly fit and evaluate
the same forecaster with different hyper-parameters.
Parameters
----------
forecaster : estimator object
The estimator should implement the sktime or scikit-learn estimator
interface. Either the estimator must contain a "score" function,
or a scoring function must be passed.
cv : cross-validation generator or an iterable
e.g. SlidingWindowSplitter()
param_distributions : dict or list of dicts
Dictionary with parameters names (`str`) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
If a list of dicts is given, first a dict is sampled uniformly, and
then a parameter is sampled using that dict as above.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring: function, optional (default=None)
Function to score models for evaluation of optimal parameters
n_jobs: int, optional (default=None)
Number of jobs to run in parallel.
None means 1 unless in a joblib.parallel_backend context.
-1 means using all processors.
refit: bool, optional (default=True)
Refit the forecaster with the best parameters on all the data
verbose: int, optional (default=0)
random_state : int, RandomState instance or None, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Pass an int for reproducible output across multiple
function calls.
pre_dispatch: str, optional (default='2*n_jobs')
Attributes
----------
best_index_ : int
best_score_: float
Score of the best model
best_params_ : dict
Best parameter values across the parameter grid
best_forecaster_ : estimator
Fitted estimator with the best parameters
cv_results_ : dict
Results from grid search cross validation
"""
_required_parameters = ["forecaster", "cv", "param_distributions"]
def __init__(
self,
forecaster,
cv,
param_distributions,
n_iter=10,
scoring=None,
strategy="refit",
n_jobs=None,
refit=True,
verbose=0,
random_state=None,
pre_dispatch="2*n_jobs",
):
super(ForecastingRandomizedSearchCV, self).__init__(
forecaster=forecaster,
scoring=scoring,
strategy=strategy,
n_jobs=n_jobs,
refit=refit,
cv=cv,
verbose=verbose,
pre_dispatch=pre_dispatch,
)
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def _run_search(self, evaluate_candidates):
"""Search n_iter candidates from param_distributions"""
return evaluate_candidates(
ParameterSampler(
self.param_distributions, self.n_iter, random_state=self.random_state
)
)
|
[
"noreply@github.com"
] |
earthinversion.noreply@github.com
|
f30b3c1cfff41d927c49ec54afc16c9185ec7195
|
3282ccae547452b96c4409e6b5a447f34b8fdf64
|
/SimModel_Python_API/simmodel_swig/Release/SimInternalLoad_Lights_Default.py
|
9cb959cded414dc66d1ee592c294e6008ab6b58b
|
[
"MIT"
] |
permissive
|
EnEff-BIM/EnEffBIM-Framework
|
c8bde8178bb9ed7d5e3e5cdf6d469a009bcb52de
|
6328d39b498dc4065a60b5cc9370b8c2a9a1cddf
|
refs/heads/master
| 2021-01-18T00:16:06.546875
| 2017-04-18T08:03:40
| 2017-04-18T08:03:40
| 28,960,534
| 3
| 0
| null | 2017-04-18T08:03:40
| 2015-01-08T10:19:18
|
C++
|
UTF-8
|
Python
| false
| false
| 11,162
|
py
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SimInternalLoad_Lights_Default', [dirname(__file__)])
except ImportError:
import _SimInternalLoad_Lights_Default
return _SimInternalLoad_Lights_Default
if fp is not None:
try:
_mod = imp.load_module('_SimInternalLoad_Lights_Default', fp, pathname, description)
finally:
fp.close()
return _mod
_SimInternalLoad_Lights_Default = swig_import_helper()
del swig_import_helper
else:
import _SimInternalLoad_Lights_Default
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import base
import SimInternalLoad_Equipment_Electric
class SimInternalLoad_Lights(SimInternalLoad_Equipment_Electric.SimInternalLoad):
__swig_setmethods__ = {}
for _s in [SimInternalLoad_Equipment_Electric.SimInternalLoad]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimInternalLoad_Lights, name, value)
__swig_getmethods__ = {}
for _s in [SimInternalLoad_Equipment_Electric.SimInternalLoad]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimInternalLoad_Lights, name)
__repr__ = _swig_repr
def SimInternalLoad_Name(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_Name(self, *args)
def SimInternalLoad_ZoneOrZoneListName(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_ZoneOrZoneListName(self, *args)
def SimInternalLoad_FracRadiant(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_FracRadiant(self, *args)
def SimInternalLoad_SchedName(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_SchedName(self, *args)
def SimInternalLoad_DesignLevelCalcMeth(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_DesignLevelCalcMeth(self, *args)
def SimInternalLoad_LightLevel(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_LightLevel(self, *args)
def SimInternalLoad_PowerPerZoneFloorArea(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_PowerPerZoneFloorArea(self, *args)
def SimInternalLoad_PowerPerPerson(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_PowerPerPerson(self, *args)
def SimInternalLoad_RtnAirFrac(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_RtnAirFrac(self, *args)
def SimInternalLoad_FracVisible(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_FracVisible(self, *args)
def SimInternalLoad_FracReplaceable(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_FracReplaceable(self, *args)
def SimInternalLoad_EndUseSubCat(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_EndUseSubCat(self, *args)
def SimInternalLoad_RtnAirFracCalcFromPlenTemp(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_RtnAirFracCalcFromPlenTemp(self, *args)
def SimInternalLoad_RtnAirFracFuncofPlenumTempCoef1(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_RtnAirFracFuncofPlenumTempCoef1(self, *args)
def SimInternalLoad_RtnAirFracFuncofPlenumTempCoef2(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_SimInternalLoad_RtnAirFracFuncofPlenumTempCoef2(self, *args)
def __init__(self, *args):
this = _SimInternalLoad_Lights_Default.new_SimInternalLoad_Lights(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights__clone(self, f, c)
__swig_destroy__ = _SimInternalLoad_Lights_Default.delete_SimInternalLoad_Lights
__del__ = lambda self: None
SimInternalLoad_Lights_swigregister = _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_swigregister
SimInternalLoad_Lights_swigregister(SimInternalLoad_Lights)
class SimInternalLoad_Lights_Default(SimInternalLoad_Lights):
__swig_setmethods__ = {}
for _s in [SimInternalLoad_Lights]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimInternalLoad_Lights_Default, name, value)
__swig_getmethods__ = {}
for _s in [SimInternalLoad_Lights]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimInternalLoad_Lights_Default, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimInternalLoad_Lights_Default.new_SimInternalLoad_Lights_Default(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default__clone(self, f, c)
__swig_destroy__ = _SimInternalLoad_Lights_Default.delete_SimInternalLoad_Lights_Default
__del__ = lambda self: None
SimInternalLoad_Lights_Default_swigregister = _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_swigregister
SimInternalLoad_Lights_Default_swigregister(SimInternalLoad_Lights_Default)
class SimInternalLoad_Lights_Default_sequence(base.sequence_common):
__swig_setmethods__ = {}
for _s in [base.sequence_common]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimInternalLoad_Lights_Default_sequence, name, value)
__swig_getmethods__ = {}
for _s in [base.sequence_common]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimInternalLoad_Lights_Default_sequence, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimInternalLoad_Lights_Default.new_SimInternalLoad_Lights_Default_sequence(*args)
try:
self.this.append(this)
except:
self.this = this
def assign(self, n, x):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_assign(self, n, x)
def begin(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_begin(self, *args)
def end(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_end(self, *args)
def rbegin(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_rbegin(self, *args)
def rend(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_rend(self, *args)
def at(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_at(self, *args)
def front(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_front(self, *args)
def back(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_back(self, *args)
def push_back(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_push_back(self, *args)
def pop_back(self):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_pop_back(self)
def detach_back(self, pop=True):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_detach_back(self, pop)
def insert(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_insert(self, *args)
def erase(self, *args):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_erase(self, *args)
def detach(self, position, r, erase=True):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_detach(self, position, r, erase)
def swap(self, x):
return _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_swap(self, x)
__swig_destroy__ = _SimInternalLoad_Lights_Default.delete_SimInternalLoad_Lights_Default_sequence
__del__ = lambda self: None
SimInternalLoad_Lights_Default_sequence_swigregister = _SimInternalLoad_Lights_Default.SimInternalLoad_Lights_Default_sequence_swigregister
SimInternalLoad_Lights_Default_sequence_swigregister(SimInternalLoad_Lights_Default_sequence)
# This file is compatible with both classic and new-style classes.
|
[
"cao@e3d.rwth-aachen.de"
] |
cao@e3d.rwth-aachen.de
|
2dc6a583543b27045a5bd797ae25ea06a0711562
|
ab0abbf3b68de565a0a5bfb96c094551ec26c9a1
|
/NLPWebApplication/NLP/temp.py
|
05d796665f76bd2aa11d42b42a27e0578c667134
|
[] |
no_license
|
wholemilk2/finance-news-sentiment-analysis
|
7273467db14c82cbf7df4bda9161a1d3afac0830
|
3523a2b19cd106f7abbed61f2c1c65a156ea0e8f
|
refs/heads/master
| 2020-06-27T13:34:59.319707
| 2019-07-31T07:35:50
| 2019-07-31T07:35:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
## dosyayı okuyup bir değişkene atalım.
#Bu değişkenin tipi, karakter dizisi (string) sınıfından (class str) olacak.
import pandas as pd
data = pd.read_csv(r"h.csv",encoding = "utf-8" )
data = pd.concat([data.strBaslik])
data=data.to_string()
print('"data" değişkeninin tipi:', type(data), '\n')
print('Haberlerin boşluklar dahil uzunluğu:', len(data), 'karakter.\n\n')
# noktalama işaretlerini ve sayıları temizle
from string import punctuation, digits
converter = str.maketrans('', '', punctuation)
data = data.translate(converter)
converter = str.maketrans('', '', digits)
data = data.translate(converter)
#küçük-büyük harf farkı
data = data.lower()
#Bag of words (her kelimeyi ve bu kelimenin metinde kaç kez geçtiği)
words = data.split()
print(len(words))
from collections import Counter
# Kelime çantasını hazırlamak bir satır
countsOfWords = Counter(words)
print(type(countsOfWords), '\n')
# En sık kullanılan 10 kelimeye
# tek bir fonksiyon ile bakabiliyoruz.
for word in countsOfWords.most_common(10):
print(word)
#kök çıkarma (stemming) işini Türkçe için de yaoiyor
from snowballstemmer import stemmer
kokbul1 = stemmer('turkish')
print(kokbul1.stemWords('arttı art'.split()))
from sys import path
# Python'a indirdiğimiz paketi tanıtalım
path.append('/Users/zisanyalcinkaya/turkish-stemmer-python')
from TurkishStemmer import TurkishStemmer
kokbul2 = TurkishStemmer()
print(kokbul2.stem('arttı art'.split()))
print('\n\nMetinde "ADEL" kelimesi', countsOfWords['adel'], 'kez geçiyor.')
|
[
"zisanyalcinkaya@Zisan-MacBook-Pro-5.local"
] |
zisanyalcinkaya@Zisan-MacBook-Pro-5.local
|
1f20f9066dc5a3eadcf02517149e514b676948c8
|
63e2bed7329c79bf67279f9071194c9cba88a82c
|
/SevOneApi/python-client/swagger_client/api/policies_api.py
|
69f1266b039b430644d519fa6d6ec6944b77a7df
|
[] |
no_license
|
jsthomason/LearningPython
|
12422b969dbef89578ed326852dd65f65ab77496
|
2f71223250b6a198f2736bcb1b8681c51aa12c03
|
refs/heads/master
| 2021-01-21T01:05:46.208994
| 2019-06-27T13:40:37
| 2019-06-27T13:40:37
| 63,447,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,477
|
py
|
# coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class PoliciesApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_policies(self, **kwargs): # noqa: E501
"""Get all policies # noqa: E501
Endpoint for retrieving all policies that supports pagination # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_policies(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The number of the requested page, defaults to 0
:param int size: The size of the requested page, defaults to 20; limited to a configurable maximum (10000 by default)
:param bool include_count: Whether to query for total elements count; defaults to true, set to false for performance boost
:param str sort_by: String array of format \"parameter, -parameter, natural\\*parameter, -natural\\*parameter\", where minus is for descending, natural* is for natural sort
:param str fields: String array of format \"id,name,objects(id,pluginId)\"; Defines which fields are returned
:return: PagerPolicyDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_policies_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_policies_with_http_info(**kwargs) # noqa: E501
return data
def get_policies_with_http_info(self, **kwargs): # noqa: E501
"""Get all policies # noqa: E501
Endpoint for retrieving all policies that supports pagination # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_policies_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The number of the requested page, defaults to 0
:param int size: The size of the requested page, defaults to 20; limited to a configurable maximum (10000 by default)
:param bool include_count: Whether to query for total elements count; defaults to true, set to false for performance boost
:param str sort_by: String array of format \"parameter, -parameter, natural\\*parameter, -natural\\*parameter\", where minus is for descending, natural* is for natural sort
:param str fields: String array of format \"id,name,objects(id,pluginId)\"; Defines which fields are returned
:return: PagerPolicyDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'include_count', 'sort_by', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_policies" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'include_count' in params:
query_params.append(('includeCount', params['include_count'])) # noqa: E501
if 'sort_by' in params:
query_params.append(('sortBy', params['sort_by'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/policies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagerPolicyDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_policies1(self, **kwargs): # noqa: E501
"""Get all policies # noqa: E501
Endpoint for retrieving all policies that supports pagination # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_policies1(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The number of the requested page, defaults to 0
:param int size: The size of the requested page, defaults to 20; limited to a configurable maximum (10000 by default)
:param bool include_count: Whether to query for total elements count; defaults to true, set to false for performance boost
:param str sort_by: String array of format \"parameter, -parameter, natural\\*parameter, -natural\\*parameter\", where minus is for descending, natural* is for natural sort
:param str fields: String array of format \"id,name,objects(id,pluginId)\"; Defines which fields are returned
:return: PagerPolicyDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_policies1_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_policies1_with_http_info(**kwargs) # noqa: E501
return data
def get_policies1_with_http_info(self, **kwargs): # noqa: E501
"""Get all policies # noqa: E501
Endpoint for retrieving all policies that supports pagination # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_policies1_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: The number of the requested page, defaults to 0
:param int size: The size of the requested page, defaults to 20; limited to a configurable maximum (10000 by default)
:param bool include_count: Whether to query for total elements count; defaults to true, set to false for performance boost
:param str sort_by: String array of format \"parameter, -parameter, natural\\*parameter, -natural\\*parameter\", where minus is for descending, natural* is for natural sort
:param str fields: String array of format \"id,name,objects(id,pluginId)\"; Defines which fields are returned
:return: PagerPolicyDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'include_count', 'sort_by', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_policies1" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'include_count' in params:
query_params.append(('includeCount', params['include_count'])) # noqa: E501
if 'sort_by' in params:
query_params.append(('sortBy', params['sort_by'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v2/policies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagerPolicyDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_policy(self, id, **kwargs): # noqa: E501
"""Get policy by Id # noqa: E501
Gets policy information # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_policy(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The id of the requested policy (required)
:return: PolicyDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_policy_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_policy_with_http_info(id, **kwargs) # noqa: E501
return data
def get_policy_with_http_info(self, id, **kwargs): # noqa: E501
"""Get policy by Id # noqa: E501
Gets policy information # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_policy_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The id of the requested policy (required)
:return: PolicyDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_policy`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/policies/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PolicyDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_policy1(self, id, **kwargs): # noqa: E501
"""Get policy by Id # noqa: E501
Gets policy information # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_policy1(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The id of the requested policy (required)
:return: PolicyDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_policy1_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_policy1_with_http_info(id, **kwargs) # noqa: E501
return data
def get_policy1_with_http_info(self, id, **kwargs): # noqa: E501
"""Get policy by Id # noqa: E501
Gets policy information # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_policy1_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The id of the requested policy (required)
:return: PolicyDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_policy1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_policy1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v2/policies/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PolicyDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"johnsthomason@gmail.com"
] |
johnsthomason@gmail.com
|
96387e22a6b335fe685dfcb6afa0d49458d4f2a1
|
7869e3cd307e859c91db4f2df13cb11fabadb76f
|
/sahara/tests/unit/plugins/cdh/test_versionfactory.py
|
74db0f738c14c3cabbc848247a134b38b6fc8579
|
[
"Apache-2.0"
] |
permissive
|
jaxonwang/sahara
|
1c748bb287be95f45d3ec4aaa4d918884a89cad5
|
d5860557145b99cd92f283639ab034782423ff21
|
refs/heads/master
| 2021-01-22T18:24:27.546073
| 2016-02-03T17:14:23
| 2016-02-03T17:14:23
| 45,181,892
| 1
| 0
| null | 2015-10-29T12:32:04
| 2015-10-29T12:32:03
| null |
UTF-8
|
Python
| false
| false
| 1,651
|
py
|
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import abstractversionhandler as avh
from sahara.plugins.cdh import versionfactory as vf
from sahara.tests.unit import base
class VersionFactoryTestCase(base.SaharaTestCase):
def test_get_instance(self):
self.assertFalse(vf.VersionFactory.initialized)
factory = vf.VersionFactory.get_instance()
self.assertIsInstance(factory, vf.VersionFactory)
self.assertTrue(vf.VersionFactory.initialized)
def test_get_versions(self):
factory = vf.VersionFactory.get_instance()
versions = factory.get_versions()
expected_versions = self.get_support_versions()
self.assertEqual(expected_versions, versions)
def test_get_version_handler(self):
factory = vf.VersionFactory.get_instance()
versions = self.get_support_versions()
for version in versions:
hander = factory.get_version_handler(version)
self.assertIsInstance(hander, avh.AbstractVersionHandler)
def get_support_versions(self):
return ['5', '5.3.0', '5.4.0']
|
[
"jiexingx.wang@intel.com"
] |
jiexingx.wang@intel.com
|
0b306a81e32ae2b0da4df778e7a8f6624b97cf34
|
b2b97f4887afb488be9bee70e9ffeea03ec37c5c
|
/webapp/generate/view.py
|
00d8a776af255965646908241643f73d99b39495
|
[] |
no_license
|
suxiaochuan/WebBaoBiao
|
11415181d50704c0aec67c10af4deaf1bb49b835
|
9e213beac3597326ead8ccba6c197424e263313c
|
refs/heads/master
| 2020-05-07T05:42:31.150342
| 2019-04-09T09:26:58
| 2019-04-09T09:26:58
| 180,281,377
| 0
| 0
| null | 2019-04-09T09:06:41
| 2019-04-09T03:57:15
|
Python
|
UTF-8
|
Python
| false
| false
| 5,210
|
py
|
# _*_ coding: utf-8 _*_
from . import _generate
from flask import render_template, request, send_from_directory, abort, flash, redirect, send_file
from flask_login import login_required, current_user
import os
import xlrd
import xlwt
from xlutils.copy import copy
from .form import GenerateForm, excels
from .. import conn
from pypinyin import lazy_pinyin
pardir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# print(pardir)
basedir = os.path.abspath(os.path.dirname(__file__))
# print(basedir)
FILE_TO_DOWNLOAD = {'1': '资金期限表', '2': 'G25', '3': 'Q02'}
@_generate.route('/')
@login_required
def generate():
form = GenerateForm()
generatelist = request.values.getlist('excels')
generatedate = request.values.get('generatedate')
if generatelist == []:
return render_template('generate.html', form=form)
else:
filedir = os.path.join(basedir, 'upload')
# print(filedir)
# print(basedir)
generatedate = generatedate.split('-')[0] + '_' + generatedate.split('-')[1]
for generatefile in generatelist:
filetogenerate_chinese = FILE_TO_DOWNLOAD[generatefile]
# call the function to generate filetogenerate
print(generatedate)
generateFile(filetogenerate_chinese, generatedate)
return render_template('generate.html', form=form)
def generateFile(filetogenerate_chinese, generatedate):
conn.ping(reconnect=True)
cursor = conn.cursor()
filetogenerate = ''.join(lazy_pinyin(filetogenerate_chinese))
# 创建新表
sql = 'create table if not exists ' + filetogenerate + '_' + generatedate + \
' select * from ' + filetogenerate + ';'
cursor.execute(sql)
# 从模板拿需要填写的格子
sql = 'select distinct position, content from ' + filetogenerate + ' where editable=True;'
cursor.execute(sql)
conn.commit()
sqlresult = cursor.fetchall()
for i in range(len(sqlresult)):
# 获取哪个格子
position = sqlresult[i][0]
print(position)
userlist = []
userset = {}
alertlist = []
# 获取用户和内容
content_list = sqlresult[i][1].lstrip('|').split('|')
for content in content_list:
userandvalue = content.split(':')
if len(userandvalue) == 1:
userandvalue = content.split(':')
user = ''.join(lazy_pinyin(userandvalue[0]))
if len(userandvalue) > 1:
value = userandvalue[1]
else:
value = None
if user not in userlist:
userlist.append(user)
userset[user] = []
userset[user].append((position, value))
positionvaluelist = []
for user in userlist:
for i in range(len(userset[user])):
position = userset[user][i][0]
# value = userset[user][i][0]
try:
sql = 'select value from ' + user + \
' where baobiao="' + filetogenerate_chinese + '" and position="' + position + '";'
# print(sql)
cursor.execute(sql)
result = cursor.fetchall()
value = result[0][0]
positionvaluelist.append(value)
if value is None:
alertlist.append(user)
except:
alertlist.append(user)
finally:
pass
positionvalue = sum([x if x is not None else 0 for x in positionvaluelist])
print(alertlist)
sql = 'update ' + filetogenerate + '_' + generatedate + ' set content="' + str(positionvalue) + \
'" where position="' + str(position) + '";'
print(sql)
cursor.execute(sql)
conn.commit()
# 把带公式计算的格子自动计算
# sql = 'select distinct position, content from ' + filetogenerate + ' where content like "=%";'
# cursor.execute(sql)
# conn.commit()
# sqlresult = cursor.fetchall()
# print(sqlresult)
######################
# 生成excel
# 计算行数列数
wb = xlrd.open_workbook(pardir + '/api/upload/' + filetogenerate_chinese + '/' + filetogenerate_chinese + '.xlsx')
wbnew = copy(wb)
sh = wbnew.get_sheet(0)
# book = xlwt.Workbook(encoding='utf-8')
# sheet1 = book.add_sheet('Sheet1')
sql = 'select distinct position, content from ' + filetogenerate + '_' + generatedate + ';'
cursor.execute(sql)
conn.commit()
sqlresult = cursor.fetchall()
positionlist = [x[0] for x in sqlresult]
contentlist = [x[1] for x in sqlresult]
# row = list(set([x[1:] for x in positionlist]))
# column = list(set(x[0] for x in positionlist))
for i in range(len(positionlist)):
row = int(positionlist[i][1:]) - 1
col = ord(positionlist[i][0]) - ord('A')
sh.write(row, col, contentlist[i])
filedir = os.path.join(basedir, filetogenerate_chinese)
if not os.path.exists(filedir):
os.mkdir(filedir)
wbnew.save(filedir + '/' + filetogenerate_chinese + '_' + generatedate + '.xls')
|
[
"lyfgerrard8@gmail.com"
] |
lyfgerrard8@gmail.com
|
83ccb76308d734373287a43059fad211f3c6071c
|
db006e9229d47146c47ca264f4f15df22df40d17
|
/file_1_btc_price_prediction.py
|
96093d8bcd57fc20fa48103d3a4bc1721e405044
|
[] |
no_license
|
UiiKyra/CryptocurrencyPrediction
|
6fad980648f932f96f6b5359c6a1d28416c38419
|
b31981c1280d51ddf3075b90693c81292be535de
|
refs/heads/master
| 2022-09-17T20:16:42.740364
| 2020-06-04T03:42:40
| 2020-06-04T03:42:40
| 269,240,010
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,618
|
py
|
# -*- coding: utf-8 -*-
"""File_1_BTC_Price_Prediction.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1NGjA41_DZBvOUH_q_o8SrEfCKc_Jha6J
"""
# The file utilizes Bitcoin price daily data, return predictions and save them along with actual data to csv file
# The model to predict daily (24h) Bitcoin close price.
# The final prediction model consits of 2 models:
# 1. Regression LSTM model which uses only Bitcoin close price data (window size = 10).
# 2. Regression LSTM model which uses multivariate data (Bitcoin close price and Fear and Greed Index).
# The models are combined using multiple linear regression.
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 1.x
pip install scikit-learn==0.22.2.post1
# Commented out IPython magic to ensure Python compatibility.
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.models import load_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import requests
from joblib import dump, load
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
# %matplotlib inline
# Specify the datafile which is used as input, for further return of predictions
file = r'C:\Users\ketap\Downloads\drive-download-20200322T222027Z-001\BTCUSD_1d_2011-09-13_to_2019-10-23_bitstamp.csv'
# # Commented out IPython magic to ensure Python compatibility.
# # Connect to Google drive, to upload the data
# from google.colab import drive
# drive.mount('/content/gdrive')
# # %cd /content/gdrive/My\ Drive/Colab\ Notebooks/data
# Function for data preprocessing. Returns 4 variables: X1 (for model 1), X2 (dataframe for model 2), y (actual y) and scaler for further use inside the model
def preprocess(file_path_name):
filename = str(file_path_name)
data = pd.read_csv(filename)
data['date'] = pd.to_datetime(data['date'])
df = data.filter(items=['date', 'close'])
df = df.dropna(axis = 0, how ='any')
df.index = df.date
df.drop('date', axis=1, inplace=True)
x = df.values
# Normalize the data
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(x)
# Data preparation for further use in the LSTM model
# X1 is vector of inputs, y is labels (values needed to be predicted)
X1 = []
y = []
# History_size if a number of previous time steps to use as input variables
history_size = 10
for i in range(history_size, len(scaled)):
X1.append(scaled[i - history_size:i, 0])
y.append(scaled[i])
X1, y = np.array(X1), np.array(y)
X1 = np.reshape(X1, (X1.shape[0], X1.shape[1], 1))
# Load Fear and Greed Index historical data using API
url = 'https://api.alternative.me/fng/'
resp = requests.get(url,params={'limit': '2000', 'format': 'csv', 'date_format': 'cn'})
resp_list = []
for line in resp.text.splitlines():
resp_list.append(line)
resp_list = resp_list[4:-5]
fg_df = pd.DataFrame([sub.split(",") for sub in resp_list])
fg_df.columns = ['Date', 'F&G Index', 'Outcome']
fg_df['Date'] = pd.to_datetime(fg_df['Date'])
fg_df = fg_df.set_index('Date')
fg_df = fg_df.drop(['Outcome'], axis = 1)
fg_df["F&G Index"] = fg_df["F&G Index"].astype(float)
fg_df = fg_df.sort_index(ascending=True)
# Temporal alignment of F&G Index with Bitcoin price data and combining then into one data frame
fg_df_new = fg_df.loc[:df.index.max()]
fg_df_new = fg_df_new.join(df, lsuffix='_date1', rsuffix='_date2')
fg_df_new = fg_df_new[['close', 'F&G Index']]
fg_df_new = fg_df_new.dropna()
X2 = fg_df_new
return X1, X2, y, scaler
# Preprocess the given data
X1, X2, y, scaler = preprocess(file)
y = scaler.inverse_transform(y)
# Function which loads the model and the data, and returns the array of predictions (Model 1)
def model(file_path_name, input):
lstm_model = load_model(str(file_path_name))
y1_hat = lstm_model.predict(input)
y1_hat = scaler.inverse_transform(y1_hat)
return y1_hat
# Load the Model 1 file and make predictions for X1
y1_hat = model(r'C:\Users\ketap\Downloads\drive-download-20200322T222027Z-001\Bitcoin_LSTM_w10d_final.h5', X1)
# Function which loads the model and the data, and returns the array of predictions (Model 2: uses Bitcoin price and Fear&Greed Index)
# Lookback period = 1
def additional_model(file_path_name, input):
scaled= scaler.fit_transform(input)
scaled = pd.DataFrame(scaled)
lookback = 1
pred_col = 0
t = scaled.copy()
t['id'] = range(1, len(scaled) + 1)
t = t.iloc[:-lookback,:]
t.set_index('id', inplace=True)
pred_value = scaled.copy()
pred_value = pred_value.iloc[lookback:, pred_col]
pred_value.columns = ["Pred"]
pred_value = pd.DataFrame(pred_value)
pred_value["id"] = range(1, len(pred_value) + 1)
pred_value.set_index('id', inplace=True)
final_df = pd.concat([t, pred_value], axis=1)
values = final_df.values
x = values[:,:-1]
x = np.reshape(x, (x.shape[0], x.shape[1], 1))
add_model = load_model(str(file_path_name))
y2_hat = add_model.predict(x)
x = x.reshape((x.shape[0], x.shape[1]))
y2_hat = np.concatenate((y2_hat, x[:,1:]), axis=1)
y2_hat = scaler.inverse_transform(y2_hat)
y2_hat = y2_hat[:,0]
y2_hat = y2_hat.reshape(-1,1)
return y2_hat
# Load the Model 2 file and make predictions for X2
y2_hat = additional_model(r'C:\Users\ketap\Downloads\drive-download-20200322T222027Z-001\Bitcoin_and_FG_Index_2.h5', X2)
# Function loads Model 3 (simple linear model) weights and makes prediction based on y1_hat (obtained from Model 1) and y2_hat (obtained from Model 2)4
# Returns y3_hat - the improved predictions
def combiner(file_path_name, y1_hat, y2_hat):
if y1_hat.shape[0] > y2_hat.shape[0]:
y1_hat_aligned = y1_hat[-y2_hat.shape[0]:]
y2_hat_aligned = y2_hat
elif y1_hat.shape[0] < y2_hat.shape[0]:
y2_hat_aligned = y2_hat[-y1_hat.shape[0]:]
y1_hat_aligned = y1_hat
else:
y2_hat_aligned = y2_hat
y1_hat_aligned = y1_hat
model_final = load(str(file_path_name))
y3_hat = model_final.predict(np.concatenate((y1_hat_aligned, y2_hat_aligned), axis=1))
return y3_hat
y3_hat = combiner(r'C:\Users\ketap\Downloads\drive-download-20200322T222027Z-001\LinearCombiner3.joblib', y1_hat, y2_hat)
y_aligned = y[-y3_hat.shape[0]:]
# Plot the graph of actual vs predicted values
fig = plt.figure(figsize=[20,12])
ax = fig.add_subplot(111)
time_steps = np.arange(1, len(y3_hat) + 1)
plt.plot(time_steps, y_aligned, label = 'Actual price')
plt.plot(time_steps, y3_hat, label = 'Predicted price')
ax.legend()
plt.show()
# MSE obtained by the LSTM model
mse = mean_squared_error(y_aligned, y3_hat)
mse
# Naive forecast function
def persistence_model(x):
return x[:-1]
y_aligned[1:]
# MSE obtained by naive forecast model
mse_1 = mean_squared_error(y_aligned[1:], persistence_model(y_aligned))
mse_1
# Create a dataframe containing date, actual close price, F&G index and predicted close price
df = X2[-y3_hat.shape[0]:]
df.loc[:,'close forecast'] = y3_hat
df.reset_index(level=0, inplace=True)
df = df.rename(columns={"index": "date"})
# Join the predictions and the original data file, delete missing values and save the result into csv file
data = pd.read_csv(file)
data['date'] = pd.to_datetime(data['date'])
data = data.join(df.drop(columns=['close', 'F&G Index']).set_index('date'), on='date').dropna().reset_index(drop=True)
data.to_csv(r'C:\Users\ketap\Downloads\drive-download-20200322T222027Z-001\BTC_data_and_forecast.csv', index=False)
|
[
"noreply@github.com"
] |
UiiKyra.noreply@github.com
|
8340f9d1f11c1a959adf75a6cde2e575801cc99d
|
8c5fdc395d365706583065b0b40169a6196c503f
|
/app/forms.py
|
8c7f6477d745c97cb95f682429655151ee2598bd
|
[] |
no_license
|
manasashanubhogue/LibraryApp
|
2122608a8d383108b9acd8c121ccb6daec426ec3
|
0142920d2f37f24c2ed6531586c359fbb1ffc10d
|
refs/heads/main
| 2023-03-02T17:18:49.983193
| 2021-02-06T20:33:13
| 2021-02-06T20:33:13
| 335,926,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import InputRequired, Email
from app.models import BookRequest
from app import ma
class BookRequestForm(FlaskForm):
title = StringField('Book Title', validators=[InputRequired()])
email = StringField('Email', validators=[InputRequired(), Email()])
submit = SubmitField('Request')
class BookRequestSchema(ma.ModelSchema):
class Meta:
model = BookRequest
|
[
"m.manasa21@gmail.com"
] |
m.manasa21@gmail.com
|
fb4bb02fa24fcff5afdd1ff8a166d0b16e3efe7e
|
df5682f7bf97272a62765ff430de84f709893422
|
/MRE_ALBERT_Xlarge/albert/run_classifier.py
|
ebb39290a46da9693747d72d1692a53b492f16a4
|
[
"Apache-2.0"
] |
permissive
|
nikhil-krishna/Improving-Multihead-Selection-Model
|
88f82bb93810ed1f3685dd0848209c0180cde7a4
|
087a4784f0643218f24a4dc265043c7eda6dd5ca
|
refs/heads/main
| 2023-02-20T06:51:19.981019
| 2021-01-10T02:40:44
| 2021-01-10T02:40:44
| 328,287,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,370
|
py
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import time
from albert import classifier_utils
from albert import fine_tuning_utils
from albert import modeling
import tensorflow.compat.v1 as tf
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"albert_config_file", None,
"The config json file corresponding to the pre-trained ALBERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string(
"vocab_file", None,
"The vocabulary file that the ALBERT model was trained on.")
flags.DEFINE_string("spm_model_file", None,
"The model file for sentence piece tokenization.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string("cached_dir", None,
"Path to cached training and dev tfrecord file. "
"The file will be generated if not exist.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string(
"albert_hub_module_handle", None,
"If set, the ALBERT hub module to use.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 512,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("train_step", 1000,
"Total number of training steps to perform.")
flags.DEFINE_integer(
"warmup_step", 0,
"number of steps to perform linear learning rate warmup for.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("keep_checkpoint_max", 5,
"How many checkpoints to keep.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string("optimizer", "adamw", "Optimizer to use")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_string(
"export_dir", None,
"The directory where the exported SavedModel will be stored.")
flags.DEFINE_float(
"threshold_to_export", float("nan"),
"The threshold value that should be used with the exported classifier. "
"When specified, the threshold will be attached to the exported "
"SavedModel, and served along with the predictions. Please use the "
"saved model cli ("
"https://www.tensorflow.org/guide/saved_model#details_of_the_savedmodel_command_line_interface"
") to view the output signature of the threshold.")
def _serving_input_receiver_fn():
"""Creates an input function for serving."""
seq_len = FLAGS.max_seq_length
serialized_example = tf.placeholder(
dtype=tf.string, shape=[None], name="serialized_example")
features = {
"input_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64),
"input_mask": tf.FixedLenFeature([seq_len], dtype=tf.int64),
"segment_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64),
}
feature_map = tf.parse_example(serialized_example, features=features)
feature_map["is_real_example"] = tf.constant(1, dtype=tf.int32)
feature_map["label_ids"] = tf.constant(0, dtype=tf.int32)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in feature_map.keys():
t = feature_map[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
feature_map[name] = t
return tf.estimator.export.ServingInputReceiver(
features=feature_map, receiver_tensors=serialized_example)
def _add_threshold_to_model_fn(model_fn, threshold):
"""Adds the classifier threshold to the given model_fn."""
def new_model_fn(features, labels, mode, params):
spec = model_fn(features, labels, mode, params)
threshold_tensor = tf.constant(threshold, dtype=tf.float32)
default_serving_export = spec.export_outputs[
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
default_serving_export.outputs["threshold"] = threshold_tensor
return spec
return new_model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": classifier_utils.ColaProcessor,
"mnli": classifier_utils.MnliProcessor,
"mismnli": classifier_utils.MisMnliProcessor,
"mrpc": classifier_utils.MrpcProcessor,
"rte": classifier_utils.RteProcessor,
"sst-2": classifier_utils.Sst2Processor,
"sts-b": classifier_utils.StsbProcessor,
"qqp": classifier_utils.QqpProcessor,
"qnli": classifier_utils.QnliProcessor,
"wnli": classifier_utils.WnliProcessor,
}
if not (FLAGS.do_train or FLAGS.do_eval or FLAGS.do_predict or
FLAGS.export_dir):
raise ValueError(
"At least one of `do_train`, `do_eval`, `do_predict' or `export_dir` "
"must be True.")
if not FLAGS.albert_config_file and not FLAGS.albert_hub_module_handle:
raise ValueError("At least one of `--albert_config_file` and "
"`--albert_hub_module_handle` must be set")
if FLAGS.albert_config_file:
albert_config = modeling.AlbertConfig.from_json_file(
FLAGS.albert_config_file)
if FLAGS.max_seq_length > albert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the ALBERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, albert_config.max_position_embeddings))
else:
albert_config = None # Get the config from TF-Hub.
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name](
use_spm=True if FLAGS.spm_model_file else False,
do_lower_case=FLAGS.do_lower_case)
label_list = processor.get_labels()
tokenizer = fine_tuning_utils.create_vocab(
vocab_file=FLAGS.vocab_file,
do_lower_case=FLAGS.do_lower_case,
spm_model_file=FLAGS.spm_model_file,
hub_module=FLAGS.albert_hub_module_handle)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
if FLAGS.do_train:
iterations_per_loop = int(min(FLAGS.iterations_per_loop,
FLAGS.save_checkpoints_steps))
else:
iterations_per_loop = FLAGS.iterations_per_loop
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=int(FLAGS.save_checkpoints_steps),
keep_checkpoint_max=0,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
model_fn = classifier_utils.model_fn_builder(
albert_config=albert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.train_step,
num_warmup_steps=FLAGS.warmup_step,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
task_name=task_name,
hub_module=FLAGS.albert_hub_module_handle,
optimizer=FLAGS.optimizer)
if not math.isnan(FLAGS.threshold_to_export):
model_fn = _add_threshold_to_model_fn(model_fn, FLAGS.threshold_to_export)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size,
export_to_tpu=False) # http://yaqs/4707241341091840
if FLAGS.do_train:
cached_dir = FLAGS.cached_dir
if not cached_dir:
cached_dir = FLAGS.output_dir
train_file = os.path.join(cached_dir, task_name + "_train.tf_record")
if not tf.gfile.Exists(train_file):
classifier_utils.file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer,
train_file, task_name)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", FLAGS.train_step)
train_input_fn = classifier_utils.file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
task_name=task_name,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.train_batch_size)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_step)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(classifier_utils.PaddingInputExample())
cached_dir = FLAGS.cached_dir
if not cached_dir:
cached_dir = FLAGS.output_dir
eval_file = os.path.join(cached_dir, task_name + "_eval.tf_record")
if not tf.gfile.Exists(eval_file):
classifier_utils.file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer,
eval_file, task_name)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = classifier_utils.file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder,
task_name=task_name,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.eval_batch_size)
best_trial_info_file = os.path.join(FLAGS.output_dir, "best_trial.txt")
def _best_trial_info():
"""Returns information about which checkpoints have been evaled so far."""
if tf.gfile.Exists(best_trial_info_file):
with tf.gfile.GFile(best_trial_info_file, "r") as best_info:
global_step, best_metric_global_step, metric_value = (
best_info.read().split(":"))
global_step = int(global_step)
best_metric_global_step = int(best_metric_global_step)
metric_value = float(metric_value)
else:
metric_value = -1
best_metric_global_step = -1
global_step = -1
tf.logging.info(
"Best trial info: Step: %s, Best Value Step: %s, "
"Best Value: %s", global_step, best_metric_global_step, metric_value)
return global_step, best_metric_global_step, metric_value
def _remove_checkpoint(checkpoint_path):
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tf.logging.info("removing {}".format(src_ckpt))
tf.gfile.Remove(src_ckpt)
def _find_valid_cands(curr_step):
filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
candidates = []
for filename in filenames:
if filename.endswith(".index"):
ckpt_name = filename[:-6]
idx = ckpt_name.split("-")[-1]
if int(idx) > curr_step:
candidates.append(filename)
return candidates
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
if task_name == "sts-b":
key_name = "pearson"
elif task_name == "cola":
key_name = "matthew_corr"
else:
key_name = "eval_accuracy"
global_step, best_perf_global_step, best_perf = _best_trial_info()
writer = tf.gfile.GFile(output_eval_file, "w")
while global_step < FLAGS.train_step:
steps_and_files = {}
filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
for filename in filenames:
if filename.endswith(".index"):
ckpt_name = filename[:-6]
cur_filename = os.path.join(FLAGS.output_dir, ckpt_name)
if cur_filename.split("-")[-1] == "best":
continue
gstep = int(cur_filename.split("-")[-1])
if gstep not in steps_and_files:
tf.logging.info("Add {} to eval list.".format(cur_filename))
steps_and_files[gstep] = cur_filename
tf.logging.info("found {} files.".format(len(steps_and_files)))
if not steps_and_files:
tf.logging.info("found 0 file, global step: {}. Sleeping."
.format(global_step))
time.sleep(60)
else:
for checkpoint in sorted(steps_and_files.items()):
step, checkpoint_path = checkpoint
if global_step >= step:
if (best_perf_global_step != step and
len(_find_valid_cands(step)) > 1):
_remove_checkpoint(checkpoint_path)
continue
result = estimator.evaluate(
input_fn=eval_input_fn,
steps=eval_steps,
checkpoint_path=checkpoint_path)
global_step = result["global_step"]
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
writer.write("best = {}\n".format(best_perf))
if result[key_name] > best_perf:
best_perf = result[key_name]
best_perf_global_step = global_step
elif len(_find_valid_cands(global_step)) > 1:
_remove_checkpoint(checkpoint_path)
writer.write("=" * 50 + "\n")
writer.flush()
with tf.gfile.GFile(best_trial_info_file, "w") as best_info:
best_info.write("{}:{}:{}".format(
global_step, best_perf_global_step, best_perf))
writer.close()
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = "model.ckpt-{}.{}".format(best_perf_global_step, ext)
tgt_ckpt = "model.ckpt-best.{}".format(ext)
tf.logging.info("saving {} to {}".format(src_ckpt, tgt_ckpt))
tf.io.gfile.rename(
os.path.join(FLAGS.output_dir, src_ckpt),
os.path.join(FLAGS.output_dir, tgt_ckpt),
overwrite=True)
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(classifier_utils.PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
classifier_utils.file_based_convert_examples_to_features(
predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file, task_name)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = classifier_utils.file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder,
task_name=task_name,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.predict_batch_size)
checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best")
result = estimator.predict(
input_fn=predict_input_fn,
checkpoint_path=checkpoint_path)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
output_submit_file = os.path.join(FLAGS.output_dir, "submit_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as pred_writer,\
tf.gfile.GFile(output_submit_file, "w") as sub_writer:
sub_writer.write("index" + "\t" + "prediction\n")
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, (example, prediction)) in\
enumerate(zip(predict_examples, result)):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
pred_writer.write(output_line)
if task_name != "sts-b":
actual_label = label_list[int(prediction["predictions"])]
else:
actual_label = str(prediction["predictions"])
sub_writer.write(example.guid + "\t" + actual_label + "\n")
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if FLAGS.export_dir:
tf.gfile.MakeDirs(FLAGS.export_dir)
checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best")
tf.logging.info("Starting to export model.")
subfolder = estimator.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=_serving_input_receiver_fn,
checkpoint_path=checkpoint_path)
tf.logging.info("Model exported to %s.", subfolder)
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("spm_model_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
[
"noreply@github.com"
] |
nikhil-krishna.noreply@github.com
|
cf8cf08e7b21f0e531c165687dd44ac0c4bc71f9
|
478fc6021f7d26137a33ab1df102d4b518d1dda4
|
/myenv/bin/gunicorn
|
eb1bb784fc0549eaba2afbd58d51847aa2896b58
|
[] |
no_license
|
ItaloPerez2019/GYM_tracker
|
bd45740d7a34e7238df282ad33533cc3f310e31b
|
622e7c85ac783ddb6196d44e726b75669e3ed525
|
refs/heads/master
| 2022-12-06T17:47:30.021706
| 2020-01-15T23:07:12
| 2020-01-15T23:07:12
| 234,186,109
| 0
| 0
| null | 2022-11-22T02:24:49
| 2020-01-15T22:28:39
|
Python
|
UTF-8
|
Python
| false
| false
| 262
|
#!/home/italo/Documents/gymDjango/GYM_tracker/myenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"iperezmba@gmail.com"
] |
iperezmba@gmail.com
|
|
d344a2055bcf1ac850fa0ceab4a023f8f296b1a2
|
59a4fb975ab3739a40254f52b6c248215d6925a0
|
/oopnotes/observer/observer.py
|
7930209c6d88b73c6faf9f00e5f6f372a020de7c
|
[] |
no_license
|
MargoSolo/stared-repos-p2
|
2356d1759dfc0d52b49e9a194bf22fa582418bcb
|
c88743a89805a3ffe9db4c4865aaab7fc5b03676
|
refs/heads/master
| 2023-08-23T21:44:29.686066
| 2021-09-11T16:32:56
| 2021-09-11T16:32:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
class Publisher:
def __init__(self):
self.observers = []
def add(self, observer):
if observer not in self.observers:
self.observers.append(observer)
else:
print(f'Failed to add: {observer}')
def remove(self, observer):
try:
self.observers.remove(observer)
except ValueError:
print(f'Failed to remove: {observer}')
def notify(self):
[o.notify(self) for o in self.observers]
class DefaultFormatter(Publisher):
def __init__(self, name):
Publisher.__init__(self)
self.name = name
self._data = 0
def __str__(self):
return f"{type(self).__name__}: '{self.name}' has data = {self._data}"
@property
def data(self):
return self._data
@data.setter
def data(self, new_value):
try:
self._data = int(new_value)
except ValueError as e:
print(f'Error: {e}')
else:
self.notify()
class HexFormatterObs:
def notify(self, publisher):
value = hex(publisher.data)
print(f"{type(self).__name__}: '{publisher.name}' has now hex data = {value}")
class BinaryFormatterObs:
def notify(self, publisher):
value = bin(publisher.data)
print(f"{type(self).__name__}: '{publisher.name}' has now bin data = {value}")
def main():
df = DefaultFormatter('test1')
print(df)
print()
hf = HexFormatterObs()
df.add(hf)
df.data = 3
print(df)
print()
bf = BinaryFormatterObs()
df.add(bf)
df.data = 21
print(df)
if __name__ == '__main__':
main()
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
01b6d5021de95d05dfdbea53e5955e31476f4b72
|
35707b0a7bcd675dac48c42adbcee4c565451a56
|
/book_project/page_app/admin.py
|
87cf72db36b2366ab410aeb1ff7fec8a4b2cd053
|
[
"Apache-2.0"
] |
permissive
|
cs-fullstack-2019-fall/django-models3-cw-b-Deltonjr2
|
85448dc8d1d8d4734267545ea468276e35b315b0
|
9cdec76e2fde3c85bee47cbd34da2545e4831e92
|
refs/heads/master
| 2020-08-01T05:54:16.920148
| 2019-09-25T21:10:50
| 2019-09-25T21:10:50
| 210,889,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
from django.contrib import admin
from .models import Book
# Register your models here.
class Book:
admin.site.register (Book)
|
[
"Demetria.Farewell@yahoo.com"
] |
Demetria.Farewell@yahoo.com
|
fbb9a8da1a975242b6d67efcd8c9ff228d4b47ba
|
19b71348f25d62d1147cf45eda030788a8b988e4
|
/Class_OKEX_API.py
|
7efe8b7c8a6a81e58b3371cbdc4b7ae5b5133d7e
|
[] |
no_license
|
xiaomeixw/BTC_OKEX
|
a655648180811f11b8af0a748f5444507ecc1d94
|
e58743a14d89b642483f40057a3fe23e88de6079
|
refs/heads/master
| 2020-03-23T07:18:48.174503
| 2018-05-23T00:38:47
| 2018-05-23T00:38:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,437
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# encoding: utf-8
"""
Created on Thurs Feb 8 2018
@author: Sjymmd
E-mail:1005965744@qq.com
"""
#
from OkcoinSpotAPI import *
import pandas as pd
import time
import datetime
import warnings
import numpy as np
from config import apikey,secretkey
warnings.filterwarnings("ignore")
#
okcoinRESTURL = 'www.okex.com'
apikey=apikey
secretkey=secretkey
okcoinSpot = OKCoinSpot(okcoinRESTURL, apikey, secretkey)
okcoinfuture = OKCoinFuture(okcoinRESTURL, apikey, secretkey)
#
class Okex_Api:
def __init__(self):
self._Kline={'1min':'1min','3min':'3min','5min':'5min','15min':'15min','30min':'30min','1day':'1day','3day':'3day','1week':'1week','1hour':'1hour','2hour':'2hour','4hour':'4hour','6hour':'6hour','12hour':'12hour'}
self._Lenth = 24
self._KlineChosen = '4hour'
self._Watch_Coin = 'snt'
while True:
try:
self._USDT_CNY = okcoinfuture.exchange_rate()['rate']
break
except:
print('Get_USDT_Error~6.3')
self._USDT_CNY = 6.3
break
# time.sleep(60)
self._EndLenth = 0
def Input(self):
Str = '\n'.join(self._Kline.values())
Input_Kline = input('输入时间区间,选择如下\n %s\n(default 1hour):'%Str)
if Input_Kline:
self._KlineChosen = self._Kline[Input_Kline]
Input_Num = input('输入数量(default 24):')
if Input_Num:
self._Lenth = Input_Num
Input_Coin_Num = input('输入币循环数量(default %s):'%self._CoinLenth)
if Input_Coin_Num:
self._CoinLenth = Input_Coin_Num
Input_Watch_Coin = input('输入紧盯币(default %s):'%self._Watch_Coin)
if Input_Watch_Coin:
self._Watch_Coin = Input_Watch_Coin
def GetCoin(self):
global true
true = ''
global false
false = ''
while True:
try:
CoinType = eval(okcoinSpot.userinfo())['info']['funds']['free']
break
except:
print('GetCoin_Error')
continue
Coin = []
for (key, value) in CoinType.items():
key = str(key + '_usdt')
Coin.append(key)
self._CoinLenth = len(Coin)
return Coin
def GetKline(self,Coin):
data = pd.DataFrame(okcoinSpot.getKline(self._Kline[self._KlineChosen], self._Lenth, self._EndLenth, Coin)).iloc[:, ]
data = data.iloc[:-1,:]
data[5] = data[5].apply(pd.to_numeric)
if data.iloc[-1, 5] < 1000:
# print('上一小时成交量小于1K不计数')
return 0,0,0,0,0,0
else:
data = data[data[5]>=1000]
data.reset_index(drop=True)
Increase = (float(data.iloc[ -1, 4]) - float(data.iloc[0, 1])) / float(data.iloc[0, 1]) * 100
Increase = str('%.2f'%(Increase)+'%')
price = float(data.iloc[- 1, 4])
Cny = round(price*self._USDT_CNY,2)
Volume = data[5]
# Volume = data.iloc[:, 5].apply(pd.to_numeric)
Volume_Mean = round(Volume.mean()/1000,2)
Volume_Pre = round(Volume.iloc[-1]/1000,2)
Volume_Pre_P = int(((Volume.iloc[-1]/Volume.iloc[-2])-1)*100)
Volume_Inc = int(((Volume_Pre-Volume_Mean)/Volume_Mean)*100)
return Cny,Increase,Volume_Mean,Volume_Pre,Volume_Pre_P,Volume_Inc
def GetDataframe(self,DataFrame,Coin):
Cny, Increase, Volume_Mean, Volume_Pre, Volume_Pre_P,Volume_Inc = self.GetKline(Coin)
Timeshrft = pd.Series({'Coin': Coin, 'Cny': Cny, 'Inc': Increase, 'Volume_Pre_K': Volume_Pre,
'Mean_Volume_K': Volume_Mean, '_VolumeS': Volume_Pre_P,'_VolumeM':Volume_Inc})
DataFrame = DataFrame.append(Timeshrft, ignore_index=True)
return DataFrame
def GetDataCoin(self,Coin,Clean = False):
try:
DataFrame = pd.DataFrame(columns=(
"Coin", "Cny", "High", "Low", "Inc", "Volume_Pre_K", "Mean_Volume_K", "_VolumeS", "_VolumeM","Highest"))
data = pd.DataFrame(
okcoinSpot.getKline(self._Kline[self._KlineChosen], self._Lenth, self._EndLenth,
Coin)).iloc[:self._Lenth, ]
data[5] = data.iloc[:, 5].apply(pd.to_numeric)
if Clean:
data = data[data[5] >= 1000]
data = data.reset_index(drop=True)
Increase = (float(data.iloc[0, 4]) - float(data.iloc[0, 1])) / float(data.iloc[0, 1]) * 100
# Increase = str('%.2f' % (Increase) + '%')
price = float(data.iloc[0, 4])
Hi_price = float((data.iloc[0, 2])) * self._USDT_CNY
Lo_price = float((data.iloc[0, 3])) * self._USDT_CNY
Cny = price * self._USDT_CNY
Volume = float(data.iloc[0, 5])
Volume_Mean = Volume / 1000
Volume_Pre = Volume / 1000
Volume_Pre_P = 0
Highest = float(max([Cny]))
if Volume_Mean == 0:
Volume_Inc = 0
else:
Volume_Inc = ((Volume_Pre - Volume_Mean) / Volume_Mean)
Timeshrft = pd.Series({'Coin': Coin, 'Cny': Cny, 'High': Hi_price, 'Low': Lo_price, 'Inc': Increase,
'Volume_Pre_K': Volume_Pre,
'Mean_Volume_K': Volume_Mean, '_VolumeS': Volume_Pre_P, '_VolumeM': Volume_Inc,'Highest':Highest})
DataFrame = DataFrame.append(Timeshrft, ignore_index=True)
for lenth in range(1, len(data)-1):
try:
Increase = (float(data.iloc[lenth, 4]) - float(data.iloc[0, 1])) / float(data.iloc[0, 1]) * 100
# Increase = str('%.2f' % (Increase) + '%')
price = float(data.iloc[lenth, 4])
Hi_price = float((data.iloc[lenth, 2])) * self._USDT_CNY
Lo_price = float((data.iloc[lenth, 3])) * self._USDT_CNY
Cny = price * self._USDT_CNY
Volume = data.iloc[:lenth + 1, 5].apply(pd.to_numeric)
Volume_Mean = Volume.mean() / 1000
Volume_Pre = Volume.iloc[lenth] / 1000
Volume_Pre_P = (Volume[lenth] / Volume[lenth - 1]) - 1
Volume_Inc = ((Volume_Pre - Volume_Mean) / Volume_Mean)
Highest = float(max(data.iloc[:lenth+1, 4]))*self._USDT_CNY
Timeshrft = pd.Series(
{'Coin': Coin, 'Cny': Cny, 'High': Hi_price, 'Low': Lo_price, 'Inc': Increase,
'Volume_Pre_K': Volume_Pre,
'Mean_Volume_K': Volume_Mean, '_VolumeS': Volume_Pre_P, '_VolumeM': Volume_Inc,'Highest':Highest})
DataFrame = DataFrame.append(Timeshrft, ignore_index=True)
except:
break
if Clean != True:
for x in range(len(DataFrame)):
if np.isnan(DataFrame.iloc[x, -3]):
DataFrame.iloc[x, -3] = DataFrame.iloc[x - 1, -3]
elif np.isinf(DataFrame.iloc[x, -3]):
DataFrame.iloc[x, -3] = 1000
if pd.isnull(DataFrame.iloc[x, -2]):
DataFrame.iloc[x, -2] = 0
return DataFrame
# print(DataFrame)
except:
time.sleep(5)
print('%s error' % Coin)
def Run(default = True):
Main = Okex_Api()
try:
Coin = Main.GetCoin()
now = datetime.datetime.now()
now = now.strftime('%Y-%m-%d %H:%M:%S')
print(now)
StartTime = time.time()
except:
time.sleep(5)
print('MainGetCoin_Error')
if default :
Main.Input()
else:
print('使用默认参数配置')
DataFrame = pd.DataFrame(columns=("Coin", "Cny", "Inc", "Volume_Pre_K", "Mean_Volume_K", "_VolumeS", "_VolumeM"))
for x in Coin[:int(Main._CoinLenth)]:
try:
DataFrame = Main.GetDataframe(DataFrame, x)
except:
# print('%s 读取失败' % x)
continue
DataFrame['Volume_Cny_K'] =DataFrame['Cny']*DataFrame['Mean_Volume_K']
Mean_Mean_Volume_K = DataFrame['Volume_Cny_K'].mean()
DataFrame = DataFrame[DataFrame.Volume_Cny_K >=Mean_Mean_Volume_K]
DataFrame = DataFrame[DataFrame._VolumeS >1]
DataFrame = DataFrame.sort_values(by='_VolumeS', ascending=False)
DataFrame.pop('Volume_Cny_K')
DataFrame = DataFrame.iloc[:10, ]
Watch_Coin = str(Main._Watch_Coin + '_usdt')
DataFrame = Main.GetDataframe(DataFrame, Watch_Coin)
DataFrame =DataFrame.drop_duplicates(['Coin'])
DataFrame = DataFrame.sort_values(by='_VolumeS', ascending=False)
DataFrame=DataFrame.reset_index(drop=True)
for x in (DataFrame.index):
for columns in (-2,-1):
DataFrame.iloc[x, columns] = str('%d' %DataFrame.iloc[x, columns] + '%')
if DataFrame.empty:
print('没有符合的币种')
wechatmsg = '没有符合的币种'
else:
print(DataFrame)
wechatmsg =DataFrame.to_string()
now = datetime.datetime.now()
now = now.strftime('%Y-%m-%d %H:%M:%S')
Wechat.msg(now)
Wechat.msg(wechatmsg)
EndTime = time.time()
print('Using_Time: %d sec'%int(EndTime - StartTime))
if __name__=='__main__':
from Class_Wechat import Wechat
Wechat = Wechat('Initializing Robot','@@98e2290e631e5dceb8d91aab05775454e78f94640ba3ab2c7e7de23c2840f6b6')
def job():
Run(False)
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
while True:
sched.add_job(job,'cron', minute = 5)
# sched.add_job(job, 'interval', seconds=30)
try:
sched.start()
except:
print('定时任务出错')
time.sleep(20)
continue
# print(okcoinSpot.ticker('btc_usdt')['ticker']['last'])
# Okex_Api = Okex_Api()
# Okex_Api._KlineChosen = '4hour'
# data = Okex_Api.GetDataCoin('snt_usdt')
# print(data)
|
[
"1005965744@qq.com"
] |
1005965744@qq.com
|
0529a4a97890fe8ef779719100db7ca91eadb88c
|
085b9550dfbcd2d82eafa80d2c1699f9ecf50933
|
/scrapy/bin/scrapy
|
eb5d678caeeb4233a7ac24ac3b86313a3df742bd
|
[] |
no_license
|
joelstanner/realpython-book2-client-side-API
|
f0aaa0e7fe070d5181de85df53f99fddab7aab09
|
0c98d847ce074b9b08474fbd145eb42fdc0ad56e
|
refs/heads/master
| 2020-04-14T14:36:26.377059
| 2014-07-01T08:56:47
| 2014-07-01T08:56:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
#!/Users/Joel/Documents/python_programs/real_python/book2/RealPython_book2/scrapy/bin/python
from scrapy.cmdline import execute
execute()
|
[
"eyftickid@poolbath1.33mail.com"
] |
eyftickid@poolbath1.33mail.com
|
|
b3de0aab9ec822c86d8438c1bf79250104733ec8
|
ee0d36aec70912b99fc282f1de9f33431967cbe4
|
/ray_casting.py
|
c8ce84988f0e41c35bed389dc9c4587aa8c8a035
|
[] |
no_license
|
FasePlay/RayCastingPythonBug
|
645268fe4652c0744c9bf2c6f2cd108d36f00aec
|
029a4a467390825b3764af6f670fd1ff8d48b32c
|
refs/heads/master
| 2022-12-17T00:54:32.234678
| 2020-09-13T18:50:26
| 2020-09-13T18:50:26
| 294,997,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,442
|
py
|
import pygame
from settings import *
from map import world_map
# def ray_casting(sc, player_pos, player_angle):
# cur_angle = player_angle - HALF_FOV
# xo, yo = player_pos
# for ray in range(NUM_RAYS):
# sin_a = math.sin(cur_angle)
# cos_a = math.cos(cur_angle)
# for depth in range(MAX_DEPTH):
# x = xo + depth * cos_a
# y = yo + depth * sin_a
# if (x // TILE * TILE, y // TILE * TILE) in world_map:
# depth *= math.cos(player_angle - cur_angle)
# proj_height = PROJ_COEFF / depth
# c = 255 / (1 + depth * depth * 0.00002)
# color = (c, c // 2, c // 3)
# pygame.draw.rect(sc, color, (ray * SCALE, HALF_HEIGHT - proj_height // 2, SCALE, proj_height))
# break;
# # pygame.draw.line(sc, DARKGRAY, player_pos, (x, y), 2)
# cur_angle += DELTA_ANGLE
def mapping(a, b):
return (a // TILE) * TILE, (b // TILE) * TILE
def ray_casting(sc, player_pos, player_angle, texture):
ox, oy = player_pos
xm, ym = mapping(ox, oy)
cur_angle = player_angle - HALF_FOV
for ray in range(NUM_RAYS):
sin_a = math.sin(cur_angle)
cos_a = math.cos(cur_angle)
#verticals
x, dx = (xm + TILE, 1) if cos_a >= 0 else (xm, -1)
for i in range(0, WIDTH, TILE):
depth_v = (x - ox) / cos_a
yv = oy + depth_v * sin_a
if mapping(x + dx, yv) in world_map:
break
x += dx * TILE
# horisontals
y, dy = (ym + TILE, 1) if sin_a >= 0 else (ym, -1)
for i in range(0, HEIGHT, TILE):
depth_h = (y - oy) / sin_a
xh = ox + depth_h * cos_a
if mapping(xh, y + dy) in world_map:
break
y += dy * TILE
#projection
depth, offset = (depth_v, yv) if depth_v < depth_h else (depth_h, xh)
offset = int(offset) % TILE
depth *= math.cos(player_angle - cur_angle)
depth = max(depth, 1)
proj_height = min(int(PROJ_COEFF / depth), 2 * HEIGHT)
wall_column = texture.subsurface(offset * TEXTURE_SCALE, 0, TEXTURE_SCALE, TEXTURE_HEIGHT)
wall_column = pygame.transform.scale(wall_column, (SCALE, proj_height))
sc.blit(wall_column, (ray * SCALE, HALF_HEIGHT - proj_height // 2))
cur_angle += DELTA_ANGLE
|
[
"playfase228@gmail.com"
] |
playfase228@gmail.com
|
d81e74062c20af675813df97d7c16928f0a75878
|
26e4bea46942b9afa5a00b9cde9a84f2cc58e3c9
|
/pygame/makinggamewithpygame/slidepuzzle/slidepuzzle3.py
|
ac2d35918d1f4d75137ff2de51b25753f155e5cb
|
[] |
no_license
|
MeetLuck/works
|
46da692138cb9741a913d84eff6822f107510dc7
|
ab61175bb7e2ed5c5113bf150e0541ae18eb04c4
|
refs/heads/master
| 2020-04-12T05:40:25.143075
| 2017-08-21T17:01:06
| 2017-08-21T17:01:06
| 62,373,576
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,213
|
py
|
from Component import *
import winsound
def main():
pygame.init()
surface = pygame.display.set_mode(resolution)
pygame.display.set_caption('Slide Puzzle')
fpsclock = pygame.time.Clock()
mainboard = Board(surface)
while True:
slideTo = None
if mainboard.isSolved():
print 'Solved'
winsound.Beep(random.randint(5,20)*50,100)
surface.fill(bgcolor)
mainboard.drawBoard(surface)
checkForQuit()
if checkForKeyUp():
slideTo = checkForKeyUp()
for e in pygame.event.get():
if e.type == MOUSEBUTTONUP:
boardpos = mainboard.converToBoardPos(e.pos[0],e.pos[1])
if boardpos == boardPos(None,None):
pass
else: # check if the clicked tile was next to the blank spot
blank = mainboard.getBlankTile()
if boardpos.x == blank.x + 1 and boardpos.y == blank.y:
slideTo = left
elif boardpos.x == blank.x - 1 and boardpos.y == blank.y:
slideTo = right
elif boardpos.x == blank.x and boardpos.y == blank.y + 1:
slideTo = up
elif boardpos.x == blank.x and boardpos.y == blank.y -1:
slideTo = down
if slideTo:
winsound.Beep(1000,10)
mainboard.makeMove(slideTo)
mainboard.sequence.append(slideTo) # record the slide
pygame.display.update()
fpsclock.tick(fps)
# --------------------------------- helper fuctions ----------------------------
def checkForQuit():
for e in pygame.event.get(QUIT):
pygame.quit(); sys.exit()
def checkForKeyUp():
slideTo = None
for e in pygame.event.get(KEYUP):
if e.key == K_ESCAPE: pygame.quit(); sys.exit()
elif e.key in (K_LEFT,K_a): slideTo = left
elif e.key in (K_RIGHT,K_d): slideTo = right
elif e.key in (K_UP,K_w): slideTo = up
elif e.key in (K_DOWN,K_s): slideTo = down
pygame.event.post(e)
return slideTo
if __name__ == '__main__':
main()
|
[
"withpig1994@hanmail.net"
] |
withpig1994@hanmail.net
|
f8b91229f9a677c3675abec47966a02723258804
|
37b478a9da286b65cdf71de02796905aead6c6f0
|
/twitter.py
|
5b5e9d3dec1bebe2477f7dabb18b9e7a329214bb
|
[] |
no_license
|
jeaniewhang/mywork
|
785e13652f9336558868ac422c0ae645b32d3e3f
|
1cf8c4812cd31a6a3834e34cbca99f5837c6064c
|
refs/heads/master
| 2020-03-24T21:57:01.321551
| 2018-07-31T19:53:27
| 2018-07-31T19:53:27
| 143,058,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
import tweepy
# Keys and Access Tokens
CONSUMER_KEY = 'otMhi9PbrOtx6SxN7hRxu6U1J'
CONSUMER_SECRET = 'oIiGNxr9H2DG89qPE5OYUdRrE7uigZrqo6KhKuffIhBHiSZxr1'
ACCESS_TOKEN = '1017154796027969536-YLdIxq7kMgHvP72BT9pfWzIXnrBGsp'
ACCESS_SECRET = 'Q4RyyMZMHpyfoP3y5M2qfzvXEeUOTT43F8GKgVn1NioGJ'
# Authentication
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
# Update Status
api.update_status("send help sos")
|
[
"noreply@github.com"
] |
jeaniewhang.noreply@github.com
|
a0cd9489c8746040a371a0a2b32e669e01919a90
|
6e449028abb5050dc2d39cc7066dfaf752113f1e
|
/test/systemutil/test_itertools.py
|
03145b6e8456a4f4782a1f695d06e4dca8aabaaa
|
[] |
no_license
|
horacn/test_python
|
3f88fde0b4fe7d839c58f68a063591efaf508189
|
e62c118f9038d0694007a85bed5e340c9b5a3433
|
refs/heads/master
| 2021-09-15T05:54:46.334840
| 2018-05-27T13:23:12
| 2018-05-27T13:23:12
| 127,876,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,760
|
py
|
# itertools | 操作迭代对象
import itertools
# natuals = itertools.count(1)
# for n in natuals:
# print(n)
# count()会创建一个无限的迭代器,所以上述代码会打印出自然数序列,根本停不下来,只能按Ctrl+C退出
# cycle()会把传入的一个序列无限重复下去
# cs = itertools.cycle('ABC') # 注意字符串也是序列的一种
# for c in cs:
# print(c)
# repeat()负责把一个元素无限重复下去,不过如果提供第二个参数就可以限定重复次数
ns = itertools.repeat("A", 3)
for n in ns:
print(n)
# 无限序列虽然可以无限迭代下去,但是通常我们会通过takewhile()等函数根据条件判断来截取出一个有限的序列
natuals = itertools.count(1)
ns = itertools.takewhile(lambda x: x <= 10, natuals)
print(list(ns))
# itertools提供的几个迭代器操作函数更加有用:
#
# chain()
# chain()可以把一组迭代对象串联起来,形成一个更大的迭代器
for c in itertools.chain('ABC', 'XYZ'):
print(c)
# groupby()
for key, group in itertools.groupby('AAABBBCCAAA'):
print(key, list(group))
for key ,group in itertools.groupby('AaABbBbCcAaa', lambda c: c.upper()):
print(key, list(group))
# 练习 计算圆周率
def pi(N):
#直接用count即可,从1开始每项加2
odd=itertools.count(1,2)
#用takewhile取出前N项
needed=itertools.takewhile(lambda x:x<2*N,odd)
list=[x for x in needed]
#分别求和
return (sum(4/x for x in list if x%4==1)+sum(-4/x for x in list if x%4==3))
print(pi(100000))
# 小结
# itertools模块提供的全部是处理迭代功能的函数,它们的返回值不是list,而是Iterator,只有用for循环迭代的时候才真正计算。
|
[
"1439293823@qq.com"
] |
1439293823@qq.com
|
603482f7e93af7ea9da0940c4cdddbddf5a7a810
|
589a4d5f9e7222d9f38b92affd27acb06c7c577d
|
/examples/count_vec.py
|
4d0d8137893c9d5101ce34c06ae2d910af929565
|
[
"MIT"
] |
permissive
|
Swayam003/DVC_NLP_Practice
|
2ba17616673ee65774e983ad90bf0f6d71583f8d
|
93c9c50502be0c0800b789b3ae60aa31af711218
|
refs/heads/main
| 2023-08-24T16:20:43.092705
| 2021-11-08T16:09:38
| 2021-11-08T16:09:38
| 422,960,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,356
|
py
|
## REFERENCE https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html
from sklearn.feature_extraction.text import CountVectorizer
corpus = [
"apple ball cat",
"ball cat dog",
]
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(corpus)
print(f"Converting it to distinct vectors: \n {vectorizer.get_feature_names_out()} ")
print(f"Converting it to array format: \n {X.toarray()}")
""" Terminal Solutions :-
Converting it to distinct vectors:
['apple' 'ball' 'cat' 'dog']
Converting it to array format:
[[1 1 1 0]
[0 1 1 1]]
"""
max_features = 100 ## no of words you want to consider
ngrams = 3 ## pair of words you want to consider
#Converting text to bags of words
vectorizer2 = CountVectorizer(max_features=max_features, ngram_range=(1, ngrams))
X2 = vectorizer2.fit_transform(corpus)
print(f"Bags of Words: \n {vectorizer2.get_feature_names_out()}")
print(f"Converting it to array format: \n {X2.toarray()}")
""" Terminal Solutions :-
Bags of Words:
['apple' 'apple ball' 'apple ball cat' 'ball' 'ball cat' 'ball cat dog'
'cat' 'cat dog' 'dog']
Converting it to array format:
[[1 1 1 1 1 0 1 0 0]
[0 0 0 1 1 1 1 1 1]]
"""
""" Another example of words you want to consider :-
corpus = [
"Zebra apple ball cat cat",
"ball cat dog elephant",
"very very unique"
]"""
|
[
"swayam.roxx@gmail.com"
] |
swayam.roxx@gmail.com
|
6f019df06f5a479239416d6f5a28e2f5aaaf99e2
|
09d4bd9f455c070c8bb8cac140b094598637ca38
|
/bin_plot_example.py
|
d84a759b9cb7718b9f7222a6f65a75aff2b27a0b
|
[] |
no_license
|
RossHart/astro_codes
|
2f26c1179d8d5a2917c468df69308a32ab9c633c
|
d9c5fac26a77f2e58e32f6bc8c16e7a25d92571c
|
refs/heads/master
| 2021-01-09T20:22:29.947825
| 2018-07-02T10:49:05
| 2018-07-02T10:49:05
| 59,846,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,430
|
py
|
from astropy.table import Table
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy.stats import binned_statistic
def bin_by_column(column, nbins, fixedcount=True):
sorted_indices = np.argsort(column)
if fixedcount:
bin_edges = np.linspace(0, 1, nbins + 1)
bin_edges[-1] += 1
values = np.empty(len(column))
values[sorted_indices] = np.linspace(0, 1, len(column))
bins = np.digitize(values, bins=bin_edges)
else:
bin_edges = np.linspace(np.min(column),np.max(column), nbins + 1)
bin_edges[-1] += 1
values = column
bins = np.digitize(values, bins=bin_edges)
x, b, n = binned_statistic(values, column, bins=bin_edges)
return x, bins
def get_fraction_and_error(column_data,bins):
bv = np.unique(bins)
Nb = len(bv)
values = np.zeros((Nb,2))
for n,b in enumerate(bv):
col_z = column_data[bins == b]
values[n] = [np.mean(col_z),np.std(col_z)/np.sqrt(len(col_z))]
values = Table(values,names=('mean','sigma'))
return values
x = np.linspace(0,100,100)
y = x**2 + 10*x*np.random.randn(len(x))
x_plot, bins = bin_by_column(x,10,fixedcount=True)
values = get_fraction_and_error(y,bins)
_ = plt.plot(x_plot,values['mean'])
_ = plt.fill_between(x_plot,values['mean']-values['sigma'],values['mean']+values['sigma'],alpha=0.5)
_ = plt.scatter(x,y)
plt.show()
|
[
"ross.hart@nottingham.ac.uk"
] |
ross.hart@nottingham.ac.uk
|
7f6042ee7b7b90ac27e0ba2be20e4ee4f6491b7d
|
5e4b59b62bc596caedef40640155741cc26db3c6
|
/日常/766.py
|
5fab7be3afe7dba2804d21775afb6688e23aa6b3
|
[] |
no_license
|
kz33/leetcode_daily
|
5ebca018fdba7ff0b5ee460c26bd99d55c02e2fb
|
7dd23adccef128802137748e14e5106b6c63c6e0
|
refs/heads/master
| 2021-07-15T07:07:31.700699
| 2020-06-11T08:52:58
| 2020-06-11T08:52:58
| 167,475,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
# 如果一个矩阵的每一方向由左上到右下的对角线上具有相同元素,那么这个矩阵是托普利茨矩阵。
#
# 给定一个 M x N 的矩阵,当且仅当它是托普利茨矩阵时返回 True。
#
# 示例 1:
#
# 输入:
# matrix = [
# [1,2,3,4],
# [5,1,2,3],
# [9,5,1,2]
# ]
# 输出: True
# 解释:
# 在上述矩阵中, 其对角线为:
# "[9]", "[5, 5]", "[1, 1, 1]", "[2, 2, 2]", "[3, 3]", "[4]"。
# 各条对角线上的所有元素均相同, 因此答案是True。
# 示例 2:
#
# 输入:
# matrix = [
# [1,2],
# [2,2]
# ]
# 输出: False
# 解释:
# 对角线"[1, 2]"上的元素不同。
# 说明:
#
# matrix 是一个包含整数的二维数组。
# matrix 的行数和列数均在 [1, 20]范围内。
# matrix[i][j] 包含的整数在 [0, 99]范围内。
# 进阶:
#
# 如果矩阵存储在磁盘上,并且磁盘内存是有限的,因此一次最多只能将一行矩阵加载到内存中,该怎么办?
# 如果矩阵太大以至于只能一次将部分行加载到内存中,该怎么办?
class Solution(object):
def isToeplitzMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: bool
"""
l = len(matrix)
if l < 2:
return True
for i in range(l - 1):
before = matrix[i][:-1]
after = matrix[i + 1][1:]
if before != after:
return False
return True
s = Solution()
matrix = [
[36,59,71,15,26,82,87],
[56,36,59,71,15,26,82],
[15,0,36,59,71,15,26]
]
a = s.isToeplitzMatrix(matrix)
print(a)
|
[
"zhangjiekun@caicloud.io"
] |
zhangjiekun@caicloud.io
|
40890c9e4989332177063a704eedfea45b0993ad
|
b0fdc04fab517802ca3277d19099c61211a609f5
|
/mediablog/migrations/0005_auto_20200626_1249.py
|
865bea71eb36f614dc089cc5fa83d8689cae614a
|
[] |
no_license
|
js-tutul/Jsblog
|
38aff00d9be652a9f83e30ff3058acaf5a04dbed
|
da001fd7eac1a60e1785669f96cf2dbf73212b33
|
refs/heads/master
| 2022-12-12T16:24:38.466319
| 2020-09-12T17:49:14
| 2020-09-12T17:49:14
| 275,648,059
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
# Generated by Django 2.2.7 on 2020-06-26 19:49
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mediablog', '0004_allreact'),
]
operations = [
migrations.AlterUniqueTogether(
name='reaction',
unique_together={('user', 'post')},
),
migrations.DeleteModel(
name='Allreact',
),
]
|
[
"jstutul90.gmail.com"
] |
jstutul90.gmail.com
|
e56a8b3b3612b9d0e057873ce14bdc8b1cfde052
|
742c4381ba2d6f87e181db77a8c73d042835f381
|
/experiment_2/src/ml_helpers/.ipynb_checkpoints/make_ml_dataset-checkpoint.py
|
a2de20cff6a4f200f6d2f0b511b1b3cd9b4ea84c
|
[] |
no_license
|
unhcr/Jetson
|
58aff3f1387c62fc6989c4cd0ed6d5fccad0767f
|
86e63272c700cac5734213d70f893f11e3fa9593
|
refs/heads/master
| 2023-05-25T08:28:27.987887
| 2023-05-17T17:40:56
| 2023-05-17T17:40:56
| 111,537,294
| 21
| 7
| null | 2022-12-09T08:21:44
| 2017-11-21T11:00:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,822
|
py
|
import pandas as pd
import numpy as np
from dateutil.relativedelta import *
import os
def make_ml_dataset(df, current_month, lag=3, admin_level='admin1', shifts=[3,4,5,6,12]):
################################
### Read in the data
df = pd.read_csv(f"data/compiled/master_{admin_level}.csv", parse_dates=['date'], index_col=['date', 'region'])
if admin_level=='admin1':
admin_unit='region'
################################
### Set up the month lags
# Get list of current and future months
current_month = pd.to_datetime(current_month)
future_months = [ current_month + relativedelta(months=i) for i in range(1, lag+1)]
# Insert extra regions and dates for lagged predictions
regions = df.index.get_level_values(admin_unit)
for d in future_months:
for r in regions:
df.ix[(d, r), :] = np.nan
# df.dropna(subset=['arrivals'], inplace=True)
df.sort_index(inplace=True)
################################
### Create features
constant_cols = ['distance_straight',
'shared_border',
'distance_driving_km',
'distance_driving_hr'] #i for i in df.columns if "riverlevel" in i or 'distance' in i or 'shared_border' in i]
river_cols = [i for i in df.columns if "river_" in i ]
varying_cols = [i for i in df.columns if i not in constant_cols and i not in river_cols]
################################
### First, add features that don't need to be shifted
# Initialize dataframe with constant columns
learn_df = df[['arrivals']].copy()
# One-hot encode the regions and months
learn_df['region_dummies'] = learn_df.index.get_level_values(admin_unit).astype(str)
learn_df['month_dummies'] = learn_df.index.get_level_values('date' ).month.astype(str)
learn_df = pd.get_dummies(learn_df)
# Linear time var
learn_df['months_since_2010'] = (learn_df.index.get_level_values('date').to_period('M') -
pd.to_datetime('2010-01-01').to_period('M'))
################################
### Then, add the shift for the target region
for n in shifts:
shifted_df = df.groupby(level=admin_unit).shift(n)
shifted_df.columns = [i + "_lag" + str(n) for i in shifted_df.columns]
learn_df = pd.concat([learn_df, shifted_df], axis=1, join='outer')
################################
### And, add the historical mean values (with a shift of n) for the target region
hm= df.unstack(level=admin_unit).rolling(window=12, center=False).mean().stack(dropna=False)
hm.columns = [i+f'_hm{lag}' for i in hm.columns]
# Shift it backwards
hm = hm.groupby(admin_unit).shift(lag)
learn_df = pd.concat([learn_df, hm], axis=1, join='outer')
### Shifted values of the data <- for all other regions
for n in shifts:
shift = df[varying_cols].copy()
shift.columns = [i + "_lag" + str(n) for i in shift.columns]
shift = shift.unstack(level=admin_unit).shift(n)
shift.columns = ['_'.join(col).strip() for col in shift.columns.values]
learn_df.reset_index(level=admin_unit, inplace=True)
learn_df = pd.concat([learn_df, shift], axis=1, join='outer')
learn_df.set_index(admin_unit, inplace=True, append=True)
################################
### One-hot encode the missing values
cols = [i for i in learn_df.columns if i!='arrivals']
for c in cols:
if learn_df[c].isna().max()==True:
learn_df[f'miss_{c}'] = np.where(learn_df[c].isna(),1,0)
#learn_df[c] = learn_df[c].fillna(0)
## Pare down dataset
# Since 2011-01-01
start_prmn = pd.to_datetime('2011-01-01')
start_df = start_prmn + pd.DateOffset(months=lag)
learn_df = learn_df.loc[start_df:]
# Remove columns which are completely missing
learn_df.dropna(axis=1, how='all', inplace=True)
# Remove columns which never vary
keep = [c for c in learn_df.columns if len(learn_df[c].unique()) > 1]
learn_df = learn_df[keep]
# Remove columns which are missing the target variable (arrivals) and are in the past
learn_df = learn_df[
(learn_df.arrivals.isna() &
(learn_df.index.get_level_values('date') <= current_month))==False
].copy()
## Save
learn_df.to_csv(f"ml/input_data/learn_df_{admin_level}_lag{lag}.csv")
if not os.path.exists(f"ml/output_data/{admin_level}_lag{lag}/"):
os.mkdir(f"ml/output_data/{admin_level}_lag{lag}/")
learn_df[['arrivals']].to_csv(f'ml/output_data/{admin_level}_lag{lag}/true.csv')
return
|
[
"khof312@gmail.com"
] |
khof312@gmail.com
|
51fe054e2fe5d69c097f42f7ee838c0455ef17f8
|
c8616b2e102e87c93b2cfcaa6a226f7bdb1ec2c5
|
/paho_mqtt_test/mqtt_client_test.py
|
a435774d3e0bc1baefc40b62452d97f002785048
|
[] |
no_license
|
noahcroit/MQTT_over_LoRa_pycom
|
cb73fb0b9a63fc7f5644e3cce7a6044442f5cfbf
|
1a15a8349764a8347386f871f5cf9413c16400cc
|
refs/heads/master
| 2022-03-28T04:13:40.598422
| 2019-12-17T08:01:46
| 2019-12-17T08:01:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,942
|
py
|
import paho.mqtt.client as mqtt
import time
#####################################################################################
# MQTT Initialize
mqtt_client = mqtt.Client()
MQTT_SERVER = "192.168.2.220"
MQTT_PORT = 1883
# Callback function after received the mqtt message
def on_message(mqttc, obj, msg):
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
mqtt_sub_msg = msg.payload.decode('ascii')
topic_dict = mqtt_subscribe_decoding(mqtt_sub_msg)
print(topic_dict)
print("\n")
mqtt_client.on_message = on_message
# Callback function after subscribed the mqtt topic
def sub_callback(mqttc, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
mqtt_client.on_subscribe = sub_callback
topic = "ICTLab_LoRa/node2"
#####################################################################################
def mqtt_subscribe_decoding(mqtt_sub_message):
""" This function is used to decode a subscribed message into topic's dictionary which contain all tags of lora device.
@argument : mqtt_sub_message (mqtt subscribe message)
@return : topic_dict (dictionary of a given subscribed topic)
"""
topic_dict = {}
# Split comma as for each value field in LoRa message
# split_comma = [( : ), ( : ), ...]
split_comma = mqtt_sub_message.split(",")
for i in range(len(split_comma)):
try:
# Split colon as for dictionary format within LoRa message
# split_colon = [key, value]
split_colon = split_comma[i].split(":")
key = split_colon[0]
value = split_colon[1]
# Update dictionary of LoRa device
topic_dict.update({key:value})
except Exception as e:
print(e)
return topic_dict
mqtt_client.connect(host=MQTT_SERVER, port=MQTT_PORT, keepalive=60)
mqtt_client.subscribe(topic=topic, qos=1)
mqtt_client.loop_forever()
|
[
"jazzpiano1004@gmail.com"
] |
jazzpiano1004@gmail.com
|
0af5d606b48acfcaece9af7088f053901fed4b5e
|
cacb5954c86544c1c8ca9d5054f94afba9e4bc86
|
/lib/config.py
|
3c7cd674d07fef0ce86e79cb4f36f44f9c4388d2
|
[] |
no_license
|
peternara/MCNet-MobileNetv3-CenterNet
|
122a0fd0d224b0d90b60e7e437208c340a449d60
|
7b89b82bbbc12952d8bd47872570a47e842e06bc
|
refs/heads/master
| 2022-07-04T23:41:41.616275
| 2020-05-13T09:48:31
| 2020-05-13T09:48:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,535
|
py
|
import numpy as np
import os
import json
current_path = os.path.dirname(__file__)
class Config(object):
def __init__(self):
self._configs = {}
# _Detector
self._configs["_Detector"] = {}
self._Detector = self._configs["_Detector"]
self._Detector["max_objs"] = 256
self._Detector["num_class"] = 1
self._Detector["threshold"] = 0.8
self._Detector["down_ration"] = 4
# _DLTrain
self._configs["_DLTrain"] = {}
self._DLTrain = self._configs["_DLTrain"]
self._DLTrain["train_with_multi_gpu"] = True
self._DLTrain["hm_weight"] = 1.0
self._DLTrain["wh_weight"] = 0.1
self._DLTrain["off_weight"] = 1.0
self._DLTrain["lr"] = 0.001
self._DLTrain["batch_size"] = 12
self._DLTrain["max_epoch"] = 100
@property
def Detector(self):
return self._Detector
@property
def DLTrain(self):
return self._DLTrain
def update_config(self, new):
for key in new:
if key == "Detector":
for sub_key in new["Detector"]:
self._Detector[sub_key] = new["Detector"][sub_key]
elif key == "DLTrain":
for sub_key in new["DLTrain"]:
self._DLTrain[sub_key] = new["DLTrain"][sub_key]
system_config = Config()
config_file_path = os.path.join(current_path, "config.json")
with open(config_file_path, 'r') as f:
system_config.update_config(json.load(f))
|
[
"liuwei79@lenovo.com"
] |
liuwei79@lenovo.com
|
91a5e092403c3b48409a5295164f514cf3a350a4
|
f5d84b5b3875e2266e14337763da1ba1a7a94b3e
|
/www/urls.py
|
98a64776f07f47717f32f76d46f1fb48c721fd8a
|
[] |
no_license
|
WRuping/python-webapp
|
ca7896473ec74fe9a4cb9ac8411f57b7bd5b08e1
|
6ec84c66e828546ede928a1033036ada5717a138
|
refs/heads/master
| 2021-01-09T23:38:03.606572
| 2018-04-09T15:07:27
| 2018-04-09T15:07:27
| 73,192,600
| 0
| 1
| null | 2016-11-09T15:25:40
| 2016-11-08T14:18:03
|
Python
|
UTF-8
|
Python
| false
| false
| 9,599
|
py
|
#!/usr/bin/env python
#_*_ coding: utf-8 _*_
__author__ = 'LYleonard'
import os, re, time, base64, hashlib, logging, markdown2
from transwarp.web import get, view, post, ctx, interceptor, seeother, notfound
from apis import api,Page, APIError, APIValueError, APIPermissionError, APIResourceNotFoundError
from models import User,Blog, Comment
from config import configs
# @view('test_users.html')
# @get('/')
# def test_users():
# users = User.find_all()
# return dict(users=users)
_COOKIE_NAME = 'awesession'
_COOKIE_KEY = configs.session.secret
def make_signed_cookie(id, password, max_age):
# build cookie string by :id-expires-md5
expires = str(int(time.time() + (max_age or 86400)))
L = [id, expires, hashlib.md5('%s-%s-%s-%s' % (id, password, expires, _COOKIE_KEY)).hexdigest()]
return '-'.join(L)
def parse_signed_cookie(cookie_str):
try:
L = cookie_str.split('-')
if len(L) != 3:
return None
id, expires, md5 = L
if int(expires) < time.time():
return None
user = User.get(id)
if user is None:
return None
if md5 != hashlib.md5('%s-%s-%s-%s' % (id, user.password, expires, _COOKIE_KEY)).hexdigest():
return None
return user
except:
return None
def check_admin():
user = ctx.request.user
if user and user.admin:
return
raise APIPermissionError('No permission.')
@interceptor('/')
def user_interceptor(next):
logging.info('try to bind user from session cookie...')
user = None
cookie = ctx.request.cookies.get(_COOKIE_NAME)
if cookie:
logging.info('parse session cookie...')
user = parse_signed_cookie(cookie)
if user:
logging.info('Bind user <%s> to session...' % user.email)
ctx.request.user = user
return next()
@interceptor('/manage/')
def manage_interceptor(next):
user = ctx.request.user
if user and user.admin:
return next()
raise seeother('/signin')
@view('blogs.html')
@get('/')
def index():
blogs, page = _get_blogs_by_page()
for blog in blogs:
blog.created_at = time.strftime('%Y-%m-%d %H:%M:%S %W', time.localtime(blog.created_at))
return dict(page=page, blogs=blogs, user=ctx.request.user)
@view('blog.html')
@get('/blog/:blog_id')
def blog(blog_id):
blog = Blog.get(blog_id)
if blog is None:
raise notfound()
blog.html_content = markdown2.markdown(blog.content)
blog.created_at = time.strftime('%Y-%m-%d %H:%M:%S %W', time.localtime(blog.created_at))
comments = Comment.find_by('where blog_id=? order by created_at desc limit 1000', blog_id)
for comment in comments:
comment.created_at = time.strftime('%Y-%m-%d %H:%M:%S %W', time.localtime(comment.created_at))
return dict(blog=blog, comments=comments, user=ctx.request.user)
@view('signin.html')
@get('/signin')
def signin():
return dict()
@get('/signout')
def signout():
ctx.response.delete_cookie(_COOKIE_NAME)
raise seeother('/')
@api
@post('/api/authenticate')
def authenticate():
i = ctx.request.input(remember='')
email = i.email.strip().lower()
password = i.password
remember = i.remember
user = User.find_first('where email=?', email)
if user is None:
raise APIError('auth:failed', 'email', 'Invalid email.')
elif user.password != password:
raise APIError('auth:failed', 'password', 'Invalid password.')
# make session cookie:
max_age = 604800 if remember=='true' else None
cookie = make_signed_cookie(user.id, user.password, max_age)
ctx.response.set_cookie(_COOKIE_NAME, cookie, max_age=max_age)
user.password = '******'
return user
_RE_EMAIL = re.compile(r'^[a-z0-9\.\-\_]+\@[a-z0-9\-\_]+(\.[a-z0-9\-\_]+){1,4}$')
_RE_MD5 = re.compile(r'^[0-9a-f]{32}$')
@api
@post('/api/users')
def register_user():
i = ctx.request.input(name='', email='', password='')
name = i.name.strip()
email = i.email.strip().lower()
password = i.password
if not name:
raise APIValueError('name')
if not email or not _RE_EMAIL.match(email):
raise APIValueError('email')
if not password or not _RE_MD5.match(password):
raise APIValueError('password')
user = User.find_first('where email=?', email)
if user:
raise APIError('register:failed', 'email', 'Email is already in use.')
user = User(name=name, email=email, password=password, image='http://www.gravatar.com/avatar/%s?d=mm&s=120' % hashlib.md5(email).hexdigest())
user.insert()
#make session cookie
cookie = make_signed_cookie(user.id, user.password, None)
ctx.response.set_cookie(_COOKIE_NAME, cookie)
return user
@view('register.html')
@get('/register')
def register():
return dict()
@view('manage_blog_edit.html')
@get('/manage/blogs/create')
def manage_blogs_create():
return dict(id=None, action='/api/blogs', redirect='/manage/blogs', user=ctx.request.user)
@api
@get('/api/blogs/:blog_id')
def api_get_blog(blog_id):
blog = Blog.get(blog_id)
if blog:
return blog
raise APIResourceNotFoundError('Blog')
@api
@post('/api/blogs')
def api_create_blog():
check_admin()
i = ctx.request.input(name='', summary='', content='')
name = i.name.strip()
summary = i.summary.strip()
content = i.content.strip()
if not name:
raise APIValueError('name', 'name connot be empty.')
if not summary:
raise APIValueError('summary', 'summary connot be empty.')
if not content:
raise APIValueError('content', 'content cannot be empty.')
user = ctx.request.user
blog = Blog(user_id=user.id, user_name=user.name, name=name, summary=summary, content=content)
blog.insert()
return blog
@api
@post('/api/blogs/:blog_id')
def api_update_blog(blog_id):
check_admin()
i = ctx.request.input(name='', summary='', content='')
name = i.name.strip()
summary = i.summary.strip()
content = i.content.strip()
if not name:
raise APIValueError('name', 'name connot be empty.')
if not summary:
raise APIValueError('summary', 'summary cannot be empty.')
if not content:
raise APIValueError('content', 'connent connot be empty.')
blog = Blog.get(blog_id)
if blog is None:
raise APIResourceNotFoundError('Blog')
blog.name = name
blog.summary = summary
blog.content = content
blog.update()
return blog
@api
@post('/api/blogs/:blog_id/delete')
def api_delete_blog(blog_id):
check_admin()
blog = Blog.get(blog_id)
if blog is None:
raise APIResourceNotFoundError('Blog')
blog.delete()
return dict(id=blog_id)
@api
@post('/api/blogs/:blog_id/comments')
def api_create_blog_comment(blog_id):
user = ctx.request.user
if user is None:
raise APIPermissionError('Need signin.')
blog = Blog.get(blog_id)
if blog is None:
raise APIResourceNotFoundError('Blog')
content = ctx.request.input(content='').content.strip()
if content is None:
raise APIValueError('content')
c = Comment(blog_id=blog_id, user_id=user.id, user_name=user.name, user_image=user.image, content=content)
c.insert()
return dict(conten=c)
@api
@post('/api/comments/:comment_id/delete')
def api_delete_commnet(comment_id):
check_admin()
comment = Comment.get(comment_id)
if comment is None:
raise APIResourceNotFoundError('Comment')
comment.delete()
return dict(id=comment_id)
@api
@get('/api/comments')
def api_get_comments():
total = Comment.count_all()
page = Page(total, _get_page_index())
comments = Comment.find_by('order by created_at desc limit ?,?', page.offset, page.limit)
return dict(comments=comments, page=page)
def _get_page_index():
page_index = 1
try:
page_index = int(ctx.request.get('page', '1'))
except ValueError:
pass
return page_index
def _get_blogs_by_page():
total = Blog.count_all()
page = Page(total, _get_page_index())
blogs = Blog.find_by('order by created_at desc limit ?,?', page.offset, page.limit)
return blogs, page
@api
@get('/api/blogs')
def api_get_blogs():
format = ctx.request.get('format', '')
blogs, page = _get_blogs_by_page()
if format == 'html':
for blog in blogs:
blog.content = markdown2.markdown(blog.content)
return dict(blogs=blogs, page=page)
@view('manage_blog_list.html')
@get('/manage/blogs')
def manage_blogs():
return dict(page_index=_get_page_index(), user=ctx.request.user)
@api
@get('/api/users')
def api_get_users():
total = User.count_all()
page = Page(total, _get_page_index())
users = User.find_by('order by created_at desc limit ?,?', page.offset, page.limit)
for u in users:
u.password = '******'
return dict(users=users, page=page)
@get('/manage/')
def manage_index():
raise seeother('/manage/blogs')
@view('manage_comment_list.html')
@get('/manage/comments')
def manage_comments():
return dict(page_index=_get_page_index(), user=ctx.request.user)
@view('manage_blog_edit.html')
@get('/manage/blogs/edit/:blog_id')
def manage_blogs_edit(blog_id):
blog = Blog.get(blog_id)
if blog is None:
raise notfound()
return dict(id=blog_id, name=blog.name, summary=blog.summary, content=blog.content, action='/api/blogs/%s' % blog_id, redirect='/manage/blogs', user=ctx.request.user)
@view('manage_user_list.html')
@get('/manage/users')
def manage_users():
return dict(page_index=_get_page_index(), user=ctx.request.user)
|
[
"aygxywrp@163.com"
] |
aygxywrp@163.com
|
85e370da93086304f9695626f24e7d01e5884395
|
ee6c8f25f018e536a355054e7ee12415e37c8d52
|
/decorator/ortalamaHesabi.py
|
d5eb81a619483707556d4a7eead7840145d75623
|
[] |
no_license
|
musttafayildirim/PythonAlistirmalar
|
7107ec4e2cae3c45048ca64ac220023d726a93c3
|
713d13e2d1533146320b166c04e3042fab8fabe0
|
refs/heads/master
| 2020-03-22T01:00:54.491773
| 2018-07-18T11:22:52
| 2018-07-18T11:22:52
| 139,278,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
def ekstra(fonk):
def wrapper(sayilar):
ciftlerToplami = 0
ciftler = 0
teklerToplami = 0
tekler = 0
for sayi in sayilar:
if(sayi % 2 == 0 ):
ciftler += 1
ciftlerToplami += sayi
else:
tekler += 1
teklerToplami += sayi
print("Teklerin ortalamasi: ",(teklerToplami/tekler))
print("Çiftlerin ortalamasi: ",(ciftlerToplami/ciftler))
fonk(sayilar)
return wrapper
@ekstra
def ortalamaBul(sayilar):
toplam = 0
for i in sayilar:
toplam += i
print("Genel ortalama = ",(toplam/len(sayilar)))
ortalamaBul([1,2,3,5,64,86,45])
|
[
"musttafayildirim@gmail.com"
] |
musttafayildirim@gmail.com
|
f2a1d22895b39efddab6483f44bd354be5426b43
|
06b1f5883d4625aca67423df0b80a9ab66b5a89c
|
/test/logging/rotating.py
|
5a2a1e540caac0c7ad2553fa46bb1978acf36746
|
[] |
no_license
|
lariat/dqm
|
91c5cfd9d640d88787b5c4e8a66aa664097d623b
|
01e140cea3af47456c31b4932d96a0b2839b7a98
|
refs/heads/master
| 2020-05-20T04:16:21.850076
| 2015-11-28T14:37:57
| 2015-11-28T14:37:57
| 31,751,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
import subprocess
import logging
from logging.handlers import RotatingFileHandler
cmd = ['ls', '-l']
file_name = 'tmp_rotating.log'
format = '%(asctime)s %(levelname)s: %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(
fmt=format,
datefmt=date_format
)
handler = RotatingFileHandler(
filename=file_name,
mode='a',
maxBytes=50000000,
backupCount=10,
)
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger = logging.getLogger('rotating.py')
logger.setLevel(logging.INFO)
logger.addHandler(handler)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
line = proc.stdout.readline()
if not line:
break
logger.info(line.rstrip('\n'))
|
[
"lariatdqm@lariat-gateway00.fnal.gov"
] |
lariatdqm@lariat-gateway00.fnal.gov
|
6ee9e1c0a30a9daab9245b8dcc1cf13d21e33500
|
61d34c2be1ff7cfe4635cfb0cc6f2d30890b3c67
|
/collab_smach/src/collab_smach/policy.py
|
0fc68181de1a649688ba5d3aa78e682125448961
|
[] |
no_license
|
cpaxton/two_arm_collaboration
|
869945829afccd9720d39309b92086979f63efcc
|
662cfaa7eee94dfa4a1c9d7597463f2577f72021
|
refs/heads/master
| 2021-01-10T12:41:58.819199
| 2016-03-09T15:33:47
| 2016-03-09T15:33:47
| 53,901,153
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
# Python file for policy-based controllers
# These use weighted data from predicator to move around in joint space, and are the basis for our low-level controllers
# This includes
import rospy
import roslib; roslib.load_manifest("collab_smach")
import smach
import smach_ros
import actionlib
import tf
import numpy as np
'''
Dimensions:
7 degrees of freedom = 7 dimensions
+ gripper open/closed?
Variables:
distance to collision
distance to waypoints/grasp points
relative position
'''
class PolicyMoveNode(smach.State):
def __init__(self, robot):
smach.State.__init__(self, outcomes=['success','failure']
def execute(self, userdata):
pass
'''
We have another node which moves based on a predicate.
Performs some move until a predicate is met.
'''
class PredicateMoveNode(smach.State):
def __init__(self, robot, predicate):
smach.State.__init__(self, outcomes=['success','failure']
def execute(self, userdata):
pass
|
[
"cpaxton3@jhu.edu"
] |
cpaxton3@jhu.edu
|
fd13222758e243d11f336d7e4a598937319bf57b
|
2c3a01e6ec1dea20a23f0c89e8d4d9b84f179355
|
/networks.py
|
c54166431838582bdf11cdb9f019a6a2295890c5
|
[] |
no_license
|
jaeikjeon9919/BayesByHypernet
|
39af7823200e6934fc5b16286a0597cdc2dc0247
|
0d2f48afeaac9bf7e6e87bfa76c7968d7a165db8
|
refs/heads/master
| 2022-02-27T23:25:36.496305
| 2019-11-14T10:28:27
| 2019-11-14T10:28:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 60,728
|
py
|
import tensorflow as tf
import layers
import base_layers
import numpy as np
import copy
def get_bbh_mnist(ops, num_samples=5, sample_output=True, noise_shape=1,
layer_wise=False, slice_last_dim=False,
force_zero_mean=False,
num_slices=1, h_units=(256, 512),
aligned_noise=True):
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.int32, [None])
adv_eps = tf.placeholder_with_default(1e-2, [])
ops['x'] = x
ops['y'] = y
ops['adv_eps'] = adv_eps
x_inp = tf.reshape(x, [-1, 28, 28, 1])
h_use_bias = True
if layer_wise:
if slice_last_dim:
num_slices = 20
c1 = layers.BBHConvLayer('c1', 1, 20, 5, 'VALID',
num_samples=num_samples, num_slices=num_slices,
h_noise_shape=noise_shape,
h_units=h_units, h_use_bias=h_use_bias,
aligned_noise=aligned_noise)
if slice_last_dim:
num_slices = 50
c2 = layers.BBHConvLayer('c2', 20, 50, 5, 'VALID',
num_samples=num_samples, num_slices=num_slices,
h_noise_shape=noise_shape,
h_units=h_units, h_use_bias=h_use_bias,
aligned_noise=aligned_noise)
if slice_last_dim:
num_slices = 500
fc1 = layers.BBHDenseLayer('fc1', 800, 500, h_units=h_units,
num_samples=num_samples,
num_slices=num_slices,
h_noise_shape=noise_shape,
h_use_bias=h_use_bias,
aligned_noise=aligned_noise)
if slice_last_dim:
num_slices = 10
fc2 = layers.BBHDenseLayer('fc2', 500, 10, h_units=h_units,
num_samples=num_samples,
num_slices=num_slices,
h_noise_shape=noise_shape,
h_use_bias=h_use_bias,
aligned_noise=aligned_noise)
else:
cond_size = 130
cond = tf.eye(cond_size)
weight_shapes = {
'conv1_w': [5, 5, 1, 20],
'conv1_b': [20],
'conv2_w': [5, 5, 20, 50],
'conv2_b': [50],
'fc1_w': [800, 500],
'fc1_b': [500],
'fc2_w': [500, 10],
'fc2_b': [10],
}
weights = {}
z = tf.random_normal((num_samples, noise_shape))
z = tf.stack([tf.concat([
tf.tile(tf.expand_dims(z[s_dim], 0), [cond_size, 1]),
cond], 1) for s_dim in range(num_samples)])
# z_stack = []
# for s in range(num_samples):
# s_stack = []
# for c in range(cond_size):
# s_stack.append(tf.concat([z[s], cond[c]], 0))
# z_stack.append(tf.stack(s_stack)) # [c, -1]
# z = tf.stack(z_stack) # [noise, c, -1]
tf.add_to_collection('gen_weights_conds', z)
z = tf.reshape(z, [num_samples * cond_size, -1])
with tf.variable_scope(base_layers.hypernet_vs):
for unit in h_units:
z = tf.layers.dense(z, unit, lambda x: tf.maximum(x, 0.1 * x),
use_bias=h_use_bias)
z = tf.layers.dense(z, 3316, use_bias=h_use_bias)
z = tf.reshape(z, [num_samples, cond_size, -1])
tf.add_to_collection('gen_weights_raw', z) # [noise, c, -1]
z = tf.reshape(z, [num_samples, -1])
if force_zero_mean:
z = z - tf.reduce_mean(z, 0, keepdims=True)
tf.add_to_collection('gen_weights', z)
tf.add_to_collection('weight_samples', z)
idx = 0
for w, shape in weight_shapes.items():
end = idx + np.prod(shape)
weights[w] = tf.reshape(z[:, idx:end], [num_samples, ] + shape)
idx = end
# conv 1
def c1(x, sample=0):
x = tf.nn.conv2d(x, weights['conv1_w'][sample], [1, 1, 1, 1],
'VALID', use_cudnn_on_gpu=True)
x = x + weights['conv1_b'][sample][sample]
return x
# conv 2
def c2(x, sample=0):
x = tf.nn.conv2d(x, weights['conv2_w'][sample], [1, 1, 1, 1],
'VALID', use_cudnn_on_gpu=True)
x = x + weights['conv2_b'][sample][sample]
return x
def fc1(x, sample=0):
x = tf.matmul(x, weights['fc1_w'][sample])
x = x + weights['fc1_b'][sample]
return x
def fc2(x, sample=0):
x = tf.matmul(x, weights['fc2_w'][sample])
x = x + weights['fc2_b'][sample]
return x
output_ind = []
if sample_output:
output = []
for i in range(num_samples):
x = c1(x_inp, i)
tf.add_to_collection('c1_preact', x)
x = tf.nn.relu(x)
tf.add_to_collection('c1_act', x)
x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
x = c2(x, i)
tf.add_to_collection('c2_preact', x)
x = tf.nn.relu(x)
tf.add_to_collection('c2_act', x)
x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
x = tf.layers.flatten(x)
# fc 1
x = fc1(x, i)
tf.add_to_collection('fc1_preact', x)
x = tf.nn.relu(x)
tf.add_to_collection('fc1_act', x)
# fc 2
x = fc2(x, i)
tf.add_to_collection('fc2_preact', x)
output_ind.append(x)
x = tf.nn.softmax(x)
output.append(x)
act_names = ['c1_preact', 'c1_act', 'c2_preact', 'c2_act',
'fc1_preact', 'fc1_act', 'fc2_preact']
for name in act_names:
act = tf.stack(tf.get_collection(name))
mu, sig = tf.nn.moments(act, 0)
tf.summary.histogram('act/{}_mu'.format(name), mu)
tf.summary.histogram('act/{}_sig'.format(name), sig)
x = tf.log(tf.add_n(output) / float(num_samples) + 1e-8)
else:
x = c1(x_inp)
x = tf.nn.relu(x)
x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
x = c2(x)
x = tf.nn.relu(x)
x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
x = tf.layers.flatten(x)
# fc 1
x = fc1(x)
x = tf.nn.relu(x)
# fc 2
x = fc2(x)
output_ind.append(x)
ops['logits'] = x
# build function to hold predictions
pred = tf.argmax(ops['logits'], -1, output_type=tf.int32)
# create tensor to calculate accuracy of predictions
acc = tf.reduce_mean(tf.cast(tf.equal(pred, ops['y']), tf.float32))
ops['acc'] = acc
probs = tf.nn.softmax(ops['logits'])
ops['probs'] = probs
ce = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=ops['logits'],
labels=ops['y']))
ops['loss'] = ce
reg_losses = tf.losses.get_regularization_losses()
if len(reg_losses) > 0:
ops['loss'] += tf.add_n(reg_losses)
loss_grads = tf.gradients(ce, ops['x'])[0]
adv_data = ops['x'] + adv_eps * tf.sign(loss_grads)
ops['adv_data'] = adv_data
return ops
def get_cifar_image(ops):
x = ops['x']
is_eval = tf.placeholder(tf.bool, [])
def distort_input(single_image):
# Randomly crop a [height, width] section of the image.
distorted_image = tf.image.resize_image_with_crop_or_pad(
single_image, 36, 36)
distorted_image = tf.random_crop(distorted_image, [24, 24, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
# NOTE: since per_image_standardization zeros the mean and makes
# the stddev unit, this likely has no effect see tensorflow#1458.
distorted_image = tf.image.random_brightness(
distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(
distorted_image, lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(distorted_image)
# Set the shapes of tensors.
float_image.set_shape([24, 24, 3])
return float_image
def normalise_input(single_image):
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(
single_image, 24, 24)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(resized_image)
# Set the shapes of tensors.
float_image.set_shape([24, 24, 3])
return float_image
x = tf.cond(is_eval,
true_fn=lambda: tf.map_fn(normalise_input, x),
false_fn=lambda: tf.map_fn(distort_input, x))
# x = tf.map_fn(normalise_input, x)
return x, is_eval
def get_bbh_cifar_resnet(ops, num_samples=5, sample_output=True, noise_shape=1,
layer_wise=False, slice_last_dim=False,
force_zero_mean=False,
aligned_noise=True,
num_slices=1, h_units=(256, 512)):
x = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int32, [None])
adv_eps = tf.placeholder_with_default(1e-2, [])
filters = [16, 16, 32, 64]
strides = [1, 2, 2, 2]
num_units = 5
weight_shapes = {}
weight_shapes['conv1'] = {
'w': [3, 3, 3, filters[0]],
'b': [filters[0]],
}
weight_shapes['last'] = {
'w': [filters[-1], 5],
'b': [5],
}
old_filter = filters[0]
for scale, filter in enumerate(filters[1:]):
s = 'scale{}'.format(scale)
weight_shapes[s] = {}
for res_unit in range(num_units):
r = 'unit{}'.format(res_unit)
weight_shapes[s][r] = {
'conv1': {'w': [3, 3, old_filter, filter],
'b': [filter]},
'conv2': {'w': [3, 3, filter, filter],
'b': [filter]},
}
old_filter = filter
ops['x'] = x
ops['y'] = y
ops['adv_eps'] = adv_eps
x, is_eval = get_cifar_image(ops)
ops['is_eval'] = is_eval
ops['inp_x'] = x
h_use_bias = True
print('Building weights for:\n{}'.format(weight_shapes))
all_layers = {}
if layer_wise:
w_shape = weight_shapes['conv1']['w']
if slice_last_dim:
num_slices = w_shape[-1]
else:
num_slices = 1
all_layers['conv1'] = layers.BBHConvLayer(
'conv1', w_shape[-2], w_shape[-1], w_shape[0],
num_samples=num_samples, num_slices=num_slices,
h_noise_shape=noise_shape, strides=[1, strides[0], strides[0], 1],
h_units=h_units, h_use_bias=h_use_bias, aligned_noise=aligned_noise)
for scale, filter in enumerate(filters[1:]):
s = 'scale{}'.format(scale)
all_layers[s] = {}
stride = strides[scale + 1]
for res_unit in range(num_units):
r = 'unit{}'.format(res_unit)
all_layers[s][r] = {}
w_shape = weight_shapes[s][r]['conv1']['w']
if slice_last_dim:
num_slices = w_shape[-1]
else:
num_slices = 1
all_layers[s][r]['bn1'] = tf.layers.BatchNormalization()
all_layers[s][r]['bn2'] = tf.layers.BatchNormalization()
all_layers[s][r]['conv1'] = layers.BBHConvLayer(
'{}/{}/conv1'.format(s, r),
w_shape[-2], w_shape[-1], w_shape[0],
num_samples=num_samples, num_slices=num_slices,
h_noise_shape=noise_shape, aligned_noise=aligned_noise,
strides=[1, stride, stride, 1],
h_units=h_units, h_use_bias=h_use_bias)
all_layers[s][r]['conv2'] = layers.BBHConvLayer(
'{}/{}/conv2'.format(s, r),
w_shape[-1], w_shape[-1], w_shape[0],
num_samples=num_samples, num_slices=num_slices,
h_noise_shape=noise_shape, aligned_noise=aligned_noise,
strides=[1, 1, 1, 1],
h_units=h_units, h_use_bias=h_use_bias)
stride = 1
w_shape = weight_shapes['last']['w']
if slice_last_dim:
num_slices = w_shape[-1]
else:
num_slices = 1
all_layers['last'] = layers.BBHDenseLayer(
'last', filters[-1], w_shape[-1],
num_samples=num_samples, num_slices=num_slices,
h_noise_shape=noise_shape, aligned_noise=aligned_noise,
h_units=h_units, h_use_bias=h_use_bias)
else:
cond_size = 231
cond = tf.eye(cond_size)
z = tf.random_normal((num_samples, noise_shape))
z = tf.stack([tf.concat([
tf.tile(tf.expand_dims(z[s_dim], 0), [cond_size, 1]),
cond], 1) for s_dim in range(num_samples)])
tf.add_to_collection('gen_weights_conds', z)
z = tf.reshape(z, [num_samples * cond_size, -1])
with tf.variable_scope(base_layers.hypernet_vs):
for unit in h_units:
z = tf.layers.dense(z, unit, lambda x: tf.maximum(x, 0.1 * x),
use_bias=h_use_bias)
z = tf.layers.dense(z, 2003, use_bias=h_use_bias)
z = tf.reshape(z, [num_samples, cond_size, -1])
tf.add_to_collection('gen_weights_raw', z) # [noise, c, -1]
z = tf.reshape(z, [num_samples, -1])
if force_zero_mean:
z = z - tf.reduce_mean(z, 0, keepdims=True)
tf.add_to_collection('gen_weights', z)
tf.add_to_collection('weight_samples', z)
all_weights = {}
idx = 0
w_shape = weight_shapes['conv1']['w']
b_shape = weight_shapes['conv1']['b']
all_weights['conv1'] = {}
end = idx + np.prod(w_shape)
all_weights['conv1']['w'] = tf.reshape(
z[:, idx:end], [num_samples, ] + w_shape)
idx = end
end = idx + np.prod(b_shape)
all_weights['conv1']['b'] = tf.reshape(
z[:, idx:end], [num_samples, ] + b_shape)
def call_layer(x, sample=0):
x = tf.nn.conv2d(x, all_weights['conv1']['w'][sample],
[1, strides[0], strides[0], 1],
'SAME', use_cudnn_on_gpu=True)
x = x + all_weights['conv1']['b'][sample]
return x
all_layers['conv1'] = call_layer
for scale, filter in enumerate(filters[1:]):
s = 'scale{}'.format(scale)
all_layers[s] = {}
all_weights[s] = {}
stride = strides[scale + 1]
for res_unit in range(num_units):
r = 'unit{}'.format(res_unit)
all_layers[s][r] = {}
all_weights[s][r] = {}
all_layers[s][r]['bn1'] = tf.layers.BatchNormalization(
virtual_batch_size=1)
all_layers[s][r]['bn2'] = tf.layers.BatchNormalization(
virtual_batch_size=1)
w_shape = weight_shapes[s][r]['conv1']['w']
b_shape = weight_shapes[s][r]['conv1']['b']
all_weights[s][r]['conv1'] = {}
end = idx + np.prod(w_shape)
all_weights[s][r]['conv1']['w'] = tf.reshape(
z[:, idx:end], [num_samples, ] + w_shape)
idx = end
end = idx + np.prod(b_shape)
all_weights[s][r]['conv1']['b'] = tf.reshape(
z[:, idx:end], [num_samples, ] + b_shape)
def call_layer(s, r, stride, x, sample=0):
x = tf.nn.conv2d(
x, all_weights[s][r]['conv1']['w'][sample],
[1, stride, stride, 1],
'SAME', use_cudnn_on_gpu=True)
x = x + all_weights[s][r]['conv1']['b'][sample]
return x
all_layers[s][r]['conv1'] = call_layer
w_shape = weight_shapes[s][r]['conv2']['w']
b_shape = weight_shapes[s][r]['conv2']['b']
all_weights[s][r]['conv2'] = {}
end = idx + np.prod(w_shape)
all_weights[s][r]['conv2']['w'] = tf.reshape(
z[:, idx:end], [num_samples, ] + w_shape)
idx = end
end = idx + np.prod(b_shape)
all_weights[s][r]['conv2']['b'] = tf.reshape(
z[:, idx:end], [num_samples, ] + b_shape)
def call_layer(s, r, stride, x, sample=0):
x = tf.nn.conv2d(
x, all_weights[s][r]['conv2']['w'][sample],
[1, 1, 1, 1],
'SAME', use_cudnn_on_gpu=True)
x = x + all_weights[s][r]['conv2']['b'][sample]
return x
all_layers[s][r]['conv2'] = call_layer
stride = 1
w_shape = weight_shapes['last']['w']
b_shape = weight_shapes['last']['b']
all_weights['last'] = {}
end = idx + np.prod(w_shape)
all_weights['last']['w'] = tf.reshape(
z[:, idx:end], [num_samples, ] + w_shape)
idx = end
end = idx + np.prod(b_shape)
all_weights['last']['b'] = tf.reshape(
z[:, idx:end], [num_samples, ] + b_shape)
def call_layer(x, sample=0):
x = tf.matmul(x, all_weights['last']['w'][sample])
x = x + all_weights['last']['b'][sample]
return x
all_layers['last'] = call_layer
def call_resnet(x, sample=0):
def call_res_unit(x, c1, c2, bn1, bn2, strides):
in_filters = x.get_shape().as_list()[-1]
orig_x = x
if np.prod(strides) != 1:
orig_x = tf.nn.avg_pool(orig_x, ksize=strides, strides=strides,
padding='VALID')
with tf.variable_scope('sub_unit0', reuse=tf.AUTO_REUSE):
# x = bn1(x, training=tf.logical_not(is_eval))
x = bn1(x, training=True)
x = tf.nn.relu(x)
x = c1(x, sample)
with tf.variable_scope('sub_unit1', reuse=tf.AUTO_REUSE):
# x = bn2(x, training=tf.logical_not(is_eval))
x = bn2(x, training=True)
x = tf.nn.relu(x)
x = c2(x, sample)
# Add the residual
with tf.variable_scope('sub_unit_add'):
# Handle differences in input and output filter sizes
out_filters = x.get_shape().as_list()[-1]
if in_filters < out_filters:
orig_x = tf.pad(
tensor=orig_x,
paddings=[[0, 0]] * (
len(x.get_shape().as_list())
- 1) + [[int(np.floor((out_filters
- in_filters) / 2.)),
int(np.ceil((out_filters
- in_filters) / 2.))]])
x += orig_x
return x
x = all_layers['conv1'](x, sample)
for scale, filter in enumerate(filters[1:]):
s = 'scale{}'.format(scale)
stride = strides[scale + 1]
for res_unit in range(num_units):
r = 'unit{}'.format(res_unit)
with tf.variable_scope('unit_{}_{}'.format(scale, res_unit)):
if not layer_wise:
def c1(x, sample):
return all_layers[s][r]['conv1'](
s, r, stride, x, sample)
def c2(x, sample):
return all_layers[s][r]['conv2'](s, r, 1, x, sample)
else:
c1 = all_layers[s][r]['conv1']
c2 = all_layers[s][r]['conv2']
bn1 = all_layers[s][r]['bn1']
bn2 = all_layers[s][r]['bn2']
x = call_res_unit(
x, c1, c2, bn1, bn2,
[1, stride, stride, 1])
stride = 1
x = tf.nn.relu(x)
x = tf.reduce_mean(x, axis=[1, 2], name='global_avg_pool')
x = all_layers['last'](x, sample)
return x
output_ind = []
if sample_output:
output = []
for i in range(num_samples):
x = call_resnet(ops['inp_x'], i)
x = tf.nn.softmax(x)
output.append(x)
x = tf.log(tf.add_n(output) / float(num_samples) + 1e-8)
else:
x = call_resnet(ops['inp_x'])
output_ind.append(x)
ops['logits'] = x
# build function to hold predictions
pred = tf.argmax(ops['logits'], -1, output_type=tf.int32)
# create tensor to calculate accuracy of predictions
acc = tf.reduce_mean(tf.cast(tf.equal(pred, ops['y']), tf.float32))
ops['acc'] = acc
probs = tf.nn.softmax(ops['logits'])
ops['probs'] = probs
ce = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=ops['logits'],
labels=ops['y']))
ops['loss'] = ce
reg_losses = tf.losses.get_regularization_losses()
if len(reg_losses) > 0:
ops['loss'] += tf.add_n(reg_losses)
loss_grads = tf.gradients(ce, ops['inp_x'])[0]
adv_data = ops['inp_x'] + adv_eps * tf.sign(loss_grads)
ops['adv_data'] = adv_data
return ops
def get_bbb_mnist(ops, init_var=-15, prior_scale=1., aligned_noise=False):
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.int32, [None])
adv_eps = tf.placeholder_with_default(1e-2, [])
ops['x'] = x
ops['y'] = y
ops['adv_eps'] = adv_eps
x_inp = tf.reshape(x, [-1, 28, 28, 1])
c1 = layers.BBBConvLayer('c1', 1, 20, 5, 'VALID', init_var=init_var,
prior_scale=prior_scale,
aligned_noise=aligned_noise)
c2 = layers.BBBConvLayer('c2', 20, 50, 5, 'VALID', init_var=init_var,
prior_scale=prior_scale,
aligned_noise=aligned_noise)
fc1 = layers.BBBDenseLayer('fc1', 800, 500, init_var=init_var,
prior_scale=prior_scale,
aligned_noise=aligned_noise)
fc2 = layers.BBBDenseLayer('fc2', 500, 10, init_var=init_var,
prior_scale=prior_scale)
x = c1(x_inp)
x = tf.nn.relu(x)
x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
x = c2(x)
x = tf.nn.relu(x)
x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
x = tf.layers.flatten(x)
x = fc1(x)
x = tf.nn.relu(x)
x = fc2(x)
ops['logits'] = x
# build function to hold predictions
pred = tf.argmax(ops['logits'], -1, output_type=tf.int32)
# create tensor to calculate accuracy of predictions
acc = tf.reduce_mean(tf.cast(tf.equal(pred, ops['y']), tf.float32))
ops['acc'] = acc
probs = tf.nn.softmax(ops['logits'])
ops['probs'] = probs
ce = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=ops['logits'],
labels=ops['y']))
ops['loss'] = ce
reg_losses = tf.losses.get_regularization_losses()
if len(reg_losses) > 0:
ops['loss'] += tf.add_n(reg_losses)
loss_grads = tf.gradients(ce, ops['x'])[0]
adv_data = ops['x'] + adv_eps * tf.sign(loss_grads)
ops['adv_data'] = adv_data
return ops
def get_bbb_cifar_resnet(ops, init_var=-30, prior_scale=1.):
x = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int32, [None])
adv_eps = tf.placeholder_with_default(1e-2, [])
filters = [16, 16, 32, 64]
strides = [1, 2, 2, 2]
num_units = 5
weight_shapes = {}
weight_shapes['conv1'] = {
'w': [3, 3, 3, filters[0]],
'b': [filters[0]],
}
weight_shapes['last'] = {
'w': [filters[-1], 5],
'b': [5],
}
old_filter = filters[0]
for scale, filter in enumerate(filters[1:]):
s = 'scale{}'.format(scale)
weight_shapes[s] = {}
for res_unit in range(num_units):
r = 'unit{}'.format(res_unit)
weight_shapes[s][r] = {
'conv1': {'w': [3, 3, old_filter, filter],
'b': [filter]},
'conv2': {'w': [3, 3, filter, filter],
'b': [filter]},
}
old_filter = filter
ops['x'] = x
ops['y'] = y
ops['adv_eps'] = adv_eps
x, is_eval = get_cifar_image(ops)
ops['is_eval'] = is_eval
ops['inp_x'] = x
h_use_bias = True
print('Building weights for:\n{}'.format(weight_shapes))
all_layers = {}
w_shape = weight_shapes['conv1']['w']
all_layers['conv1'] = layers.BBBConvLayer(
'conv1', w_shape[-2], w_shape[-1], w_shape[0],
init_var=init_var,
prior_scale=prior_scale, strides=[1, strides[0], strides[0], 1],)
for scale, filter in enumerate(filters[1:]):
s = 'scale{}'.format(scale)
all_layers[s] = {}
stride = strides[scale + 1]
for res_unit in range(num_units):
r = 'unit{}'.format(res_unit)
all_layers[s][r] = {}
w_shape = weight_shapes[s][r]['conv1']['w']
all_layers[s][r]['bn1'] = tf.layers.BatchNormalization(
virtual_batch_size=1)
all_layers[s][r]['bn2'] = tf.layers.BatchNormalization(
virtual_batch_size=1)
all_layers[s][r]['conv1'] = layers.BBBConvLayer(
'{}/{}/conv1'.format(s, r),
w_shape[-2], w_shape[-1], w_shape[0],
init_var=init_var,
prior_scale=prior_scale,
strides=[1, stride, stride, 1])
all_layers[s][r]['conv2'] = layers.BBBConvLayer(
'{}/{}/conv2'.format(s, r),
w_shape[-1], w_shape[-1], w_shape[0],
init_var=init_var,
prior_scale=prior_scale,
strides=[1, 1, 1, 1])
stride = 1
w_shape = weight_shapes['last']['w']
all_layers['last'] = layers.BBBDenseLayer(
'last', filters[-1], w_shape[-1],
init_var=init_var,
prior_scale=prior_scale)
def call_resnet(x, sample=0):
def call_res_unit(x, c1, c2, bn1, bn2, strides):
in_filters = x.get_shape().as_list()[-1]
orig_x = x
if np.prod(strides) != 1:
orig_x = tf.nn.avg_pool(orig_x, ksize=strides, strides=strides,
padding='VALID')
with tf.variable_scope('sub_unit0', reuse=tf.AUTO_REUSE):
# x = bn1(x, training=tf.logical_not(is_eval))
x = bn1(x, training=True)
x = tf.nn.relu(x)
x = c1(x, sample)
with tf.variable_scope('sub_unit1', reuse=tf.AUTO_REUSE):
# x = bn2(x, training=tf.logical_not(is_eval))
x = bn2(x, training=True)
x = tf.nn.relu(x)
x = c2(x, sample)
# Add the residual
with tf.variable_scope('sub_unit_add'):
# Handle differences in input and output filter sizes
out_filters = x.get_shape().as_list()[-1]
if in_filters < out_filters:
orig_x = tf.pad(
tensor=orig_x,
paddings=[[0, 0]] * (
len(x.get_shape().as_list())
- 1) + [[int(np.floor((out_filters
- in_filters) / 2.)),
int(np.ceil((out_filters
- in_filters) / 2.))]])
x += orig_x
return x
x = all_layers['conv1'](x, sample)
for scale, filter in enumerate(filters[1:]):
s = 'scale{}'.format(scale)
stride = strides[scale + 1]
for res_unit in range(num_units):
r = 'unit{}'.format(res_unit)
with tf.variable_scope('unit_{}_{}'.format(scale, res_unit)):
c1 = all_layers[s][r]['conv1']
c2 = all_layers[s][r]['conv2']
bn1 = all_layers[s][r]['bn1']
bn2 = all_layers[s][r]['bn2']
x = call_res_unit(
x, c1, c2, bn1, bn2,
[1, stride, stride, 1])
stride = 1
x = tf.nn.relu(x)
x = tf.reduce_mean(x, axis=[1, 2], name='global_avg_pool')
x = all_layers['last'](x, sample)
return x
output_ind = []
x = call_resnet(ops['inp_x'])
output_ind.append(x)
ops['logits'] = x
# build function to hold predictions
pred = tf.argmax(ops['logits'], -1, output_type=tf.int32)
# create tensor to calculate accuracy of predictions
acc = tf.reduce_mean(tf.cast(tf.equal(pred, ops['y']), tf.float32))
ops['acc'] = acc
probs = tf.nn.softmax(ops['logits'])
ops['probs'] = probs
ce = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=ops['logits'],
labels=ops['y']))
ops['loss'] = ce
reg_losses = tf.losses.get_regularization_losses()
if len(reg_losses) > 0:
ops['loss'] += tf.add_n(reg_losses)
loss_grads = tf.gradients(ce, ops['inp_x'])[0]
adv_data = ops['inp_x'] + adv_eps * tf.sign(loss_grads)
ops['adv_data'] = adv_data
return ops
def get_mnf_mnist(ops):
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.int32, [None])
adv_eps = tf.placeholder_with_default(1e-2, [])
learn_p = False
ops['x'] = x
ops['y'] = y
ops['adv_eps'] = adv_eps
x_inp = tf.reshape(x, [-1, 28, 28, 1])
c1 = layers.MNFConvLayer('c1', 1, 20, 5, 'VALID',
thres_var=0.5, learn_p=learn_p)
c2 = layers.MNFConvLayer('c2', 20, 50, 5, 'VALID',
thres_var=0.5, learn_p=learn_p)
fc1 = layers.MNFDenseLayer('fc1', 800, 500,
thres_var=0.5, learn_p=learn_p)
fc2 = layers.MNFDenseLayer('fc2', 500, 10,
thres_var=0.5, learn_p=learn_p)
x = c1(x_inp)
x = tf.nn.relu(x)
x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
x = c2(x)
x = tf.nn.relu(x)
x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
x = tf.layers.flatten(x)
x = fc1(x)
x = tf.nn.relu(x)
x = fc2(x)
ops['logits'] = x
# build function to hold predictions
pred = tf.argmax(ops['logits'], -1, output_type=tf.int32)
# create tensor to calculate accuracy of predictions
acc = tf.reduce_mean(tf.cast(tf.equal(pred, ops['y']), tf.float32))
ops['acc'] = acc
probs = tf.nn.softmax(ops['logits'])
ops['probs'] = probs
ce = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=ops['logits'],
labels=ops['y']))
ops['loss'] = ce
reg_losses = tf.losses.get_regularization_losses()
if len(reg_losses) > 0:
ops['loss'] += tf.add_n(reg_losses)
loss_grads = tf.gradients(ce, ops['x'])[0]
adv_data = ops['x'] + adv_eps * tf.sign(loss_grads)
ops['adv_data'] = adv_data
return ops
def get_mnf_cifar_resnet(ops, learn_p=False, thres_var=0.3):
x = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int32, [None])
adv_eps = tf.placeholder_with_default(1e-2, [])
filters = [16, 16, 32, 64]
strides = [1, 2, 2, 2]
num_units = 5
weight_shapes = {}
weight_shapes['conv1'] = {
'w': [3, 3, 3, filters[0]],
'b': [filters[0]],
}
weight_shapes['last'] = {
'w': [filters[-1], 5],
'b': [5],
}
old_filter = filters[0]
for scale, filter in enumerate(filters[1:]):
s = 'scale{}'.format(scale)
weight_shapes[s] = {}
for res_unit in range(num_units):
r = 'unit{}'.format(res_unit)
weight_shapes[s][r] = {
'conv1': {'w': [3, 3, old_filter, filter],
'b': [filter]},
'conv2': {'w': [3, 3, filter, filter],
'b': [filter]},
}
old_filter = filter
ops['x'] = x
ops['y'] = y
ops['adv_eps'] = adv_eps
x, is_eval = get_cifar_image(ops)
ops['is_eval'] = is_eval
ops['inp_x'] = x
print('Building weights for:\n{}'.format(weight_shapes))
all_layers = {}
w_shape = weight_shapes['conv1']['w']
all_layers['conv1'] = layers.MNFConvLayer(
'conv1', w_shape[-2], w_shape[-1], w_shape[0],
learn_p=learn_p, thres_var=thres_var,
strides=[1, strides[0], strides[0], 1])
for scale, filter in enumerate(filters[1:]):
s = 'scale{}'.format(scale)
all_layers[s] = {}
stride = strides[scale + 1]
for res_unit in range(num_units):
r = 'unit{}'.format(res_unit)
all_layers[s][r] = {}
w_shape = weight_shapes[s][r]['conv1']['w']
all_layers[s][r]['bn1'] = tf.layers.BatchNormalization(
virtual_batch_size=1)
all_layers[s][r]['bn2'] = tf.layers.BatchNormalization(
virtual_batch_size=1)
all_layers[s][r]['conv1'] = layers.MNFConvLayer(
'{}/{}/conv1'.format(s, r),
w_shape[-2], w_shape[-1], w_shape[0],
learn_p=learn_p, thres_var=thres_var,
strides=[1, stride, stride, 1])
all_layers[s][r]['conv2'] = layers.MNFConvLayer(
'{}/{}/conv2'.format(s, r),
w_shape[-1], w_shape[-1], w_shape[0],
learn_p=learn_p, thres_var=thres_var,
strides=[1, 1, 1, 1])
stride = 1
w_shape = weight_shapes['last']['w']
all_layers['last'] = layers.MNFDenseLayer(
'last', filters[-1], w_shape[-1],
learn_p=learn_p, thres_var=thres_var)
def call_resnet(x, sample=0):
def call_res_unit(x, c1, c2, bn1, bn2, strides):
in_filters = x.get_shape().as_list()[-1]
orig_x = x
if np.prod(strides) != 1:
orig_x = tf.nn.avg_pool(orig_x, ksize=strides, strides=strides,
padding='VALID')
with tf.variable_scope('sub_unit0', reuse=tf.AUTO_REUSE):
# x = bn1(x, training=tf.logical_not(is_eval))
x = bn1(x, training=True)
x = tf.nn.relu(x)
x = c1(x)
with tf.variable_scope('sub_unit1', reuse=tf.AUTO_REUSE):
# x = bn2(x, training=tf.logical_not(is_eval))
x = bn2(x, training=True)
x = tf.nn.relu(x)
x = c2(x)
# Add the residual
with tf.variable_scope('sub_unit_add'):
# Handle differences in input and output filter sizes
out_filters = x.get_shape().as_list()[-1]
if in_filters < out_filters:
orig_x = tf.pad(
tensor=orig_x,
paddings=[[0, 0]] * (
len(x.get_shape().as_list())
- 1) + [[int(np.floor((out_filters
- in_filters) / 2.)),
int(np.ceil((out_filters
- in_filters) / 2.))]])
x += orig_x
return x
x = all_layers['conv1'](x)
for scale, filter in enumerate(filters[1:]):
s = 'scale{}'.format(scale)
stride = strides[scale + 1]
for res_unit in range(num_units):
r = 'unit{}'.format(res_unit)
with tf.variable_scope('unit_{}_{}'.format(scale, res_unit)):
c1 = all_layers[s][r]['conv1']
c2 = all_layers[s][r]['conv2']
bn1 = all_layers[s][r]['bn1']
bn2 = all_layers[s][r]['bn2']
x = call_res_unit(
x, c1, c2, bn1, bn2,
[1, stride, stride, 1])
stride = 1
x = tf.nn.relu(x)
x = tf.reduce_mean(x, axis=[1, 2], name='global_avg_pool')
x = all_layers['last'](x)
return x
output_ind = []
x = call_resnet(ops['inp_x'])
output_ind.append(x)
ops['logits'] = x
# build function to hold predictions
pred = tf.argmax(ops['logits'], -1, output_type=tf.int32)
# create tensor to calculate accuracy of predictions
acc = tf.reduce_mean(tf.cast(tf.equal(pred, ops['y']), tf.float32))
ops['acc'] = acc
probs = tf.nn.softmax(ops['logits'])
ops['probs'] = probs
ce = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=ops['logits'],
labels=ops['y']))
ops['loss'] = ce
loss_grads = tf.gradients(ce, ops['inp_x'])[0]
adv_data = ops['inp_x'] + adv_eps * tf.sign(loss_grads)
ops['adv_data'] = adv_data
return ops
def get_vanilla_mnist(ops, prior_scale=1.):
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.int32, [None])
adv_eps = tf.placeholder_with_default(1e-2, [])
ops['x'] = x
ops['y'] = y
ops['adv_eps'] = adv_eps
x_inp = tf.reshape(x, [-1, 28, 28, 1])
regularizer = tf.contrib.layers.l2_regularizer(scale=1. / prior_scale)
x = tf.layers.conv2d(inputs=x_inp, kernel_size=5, filters=20,
activation=tf.nn.relu, padding='VALID',
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
x = tf.layers.conv2d(inputs=x, kernel_size=5, filters=50,
activation=tf.nn.relu, padding='VALID',
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
x = tf.layers.flatten(x)
x = tf.layers.dense(inputs=x, units=500, activation=tf.nn.relu,
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
x = tf.layers.dense(inputs=x, units=10,
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
ops['logits'] = x
# build function to hold predictions
pred = tf.argmax(ops['logits'], -1, output_type=tf.int32)
# create tensor to calculate accuracy of predictions
acc = tf.reduce_mean(tf.cast(tf.equal(pred, ops['y']), tf.float32))
ops['acc'] = acc
probs = tf.nn.softmax(ops['logits'])
ops['probs'] = probs
ce = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=ops['logits'],
labels=ops['y']))
ops['loss'] = ce
loss_grads = tf.gradients(ce, ops['x'])[0]
adv_data = ops['x'] + adv_eps * tf.sign(loss_grads)
ops['adv_data'] = adv_data
return ops
def get_vanilla_cifar_resnet(ops, prior_scale=1.):
x = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int32, [None])
adv_eps = tf.placeholder_with_default(1e-2, [])
filters = [16, 16, 32, 64]
strides = [1, 2, 2, 2]
num_units = 5
num_classes = 5
ops['x'] = x
ops['y'] = y
ops['adv_eps'] = adv_eps
x, is_eval = get_cifar_image(ops)
ops['is_eval'] = is_eval
ops['inp_x'] = x
regularizer = tf.contrib.layers.l2_regularizer(scale=1. / prior_scale)
def res_unit(x, out_filters, stride=1):
strides = [1, stride, stride, 1]
in_filters = x.get_shape().as_list()[-1]
orig_x = x
if np.prod(strides) != 1:
orig_x = tf.nn.avg_pool(orig_x, ksize=strides,
strides=strides, padding='VALID')
with tf.variable_scope('sub_unit0'):
x = tf.layers.batch_normalization(
x, virtual_batch_size=1, training=True)
x = tf.nn.relu(x)
x = tf.layers.conv2d(
inputs=x, kernel_size=3, filters=out_filters, padding='SAME',
kernel_regularizer=regularizer, strides=stride,
kernel_initializer=tf.variance_scaling_initializer())
with tf.variable_scope('sub_unit1'):
x = tf.layers.batch_normalization(
x, virtual_batch_size=1, training=True)
x = tf.nn.relu(x)
x = tf.layers.conv2d(
inputs=x, kernel_size=3, filters=out_filters, padding='SAME',
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
# Add the residual
with tf.variable_scope('sub_unit_add'):
# Handle differences in input and output filter sizes
if in_filters < out_filters:
orig_x = tf.pad(
tensor=orig_x,
paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1) + [[
int(np.floor((out_filters - in_filters) / 2.)),
int(np.ceil((out_filters - in_filters) / 2.))]])
x += orig_x
return x
# init_conv
x = tf.layers.conv2d(
inputs=x, kernel_size=3, filters=filters[0], padding='SAME',
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
for scale in range(1, len(filters)):
with tf.variable_scope('unit_{}_0'.format(scale)):
x = res_unit(x, filters[scale], strides[scale])
for unit in range(1, num_units):
with tf.variable_scope('unit_{}_{}'.format(scale, unit)):
x = res_unit(x, filters[scale])
x = tf.layers.batch_normalization(x, virtual_batch_size=1, training=True)
x = tf.nn.relu(x)
x = tf.reduce_mean(x, axis=[1, 2], name='global_avg_pool')
# logits
x = tf.layers.dense(inputs=x, units=num_classes,
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
ops['logits'] = x
# build function to hold predictions
pred = tf.argmax(ops['logits'], -1, output_type=tf.int32)
# create tensor to calculate accuracy of predictions
acc = tf.reduce_mean(tf.cast(tf.equal(pred, ops['y']), tf.float32))
ops['acc'] = acc
probs = tf.nn.softmax(ops['logits'])
ops['probs'] = probs
ce = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=ops['logits'],
labels=ops['y']))
ops['loss'] = ce
loss_grads = tf.gradients(ce, ops['inp_x'])[0]
adv_data = ops['inp_x'] + adv_eps * tf.sign(loss_grads)
ops['adv_data'] = adv_data
return ops
def get_dropout_mnist(ops, prior_scale=1., keep_prob=0.5):
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.int32, [None])
adv_eps = tf.placeholder_with_default(1e-2, [])
ops['x'] = x
ops['y'] = y
ops['adv_eps'] = adv_eps
x_inp = tf.reshape(x, [-1, 28, 28, 1])
regularizer = tf.contrib.layers.l2_regularizer(scale=1. / prior_scale)
x = tf.layers.conv2d(inputs=x_inp, kernel_size=5, filters=20,
activation=tf.nn.relu, padding='VALID',
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
x = tf.nn.dropout(x, keep_prob)
x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
x = tf.layers.conv2d(inputs=x, kernel_size=5, filters=50,
activation=tf.nn.relu, padding='VALID',
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
x = tf.nn.dropout(x, keep_prob)
x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
x = tf.layers.flatten(x)
x = tf.layers.dense(inputs=x, units=500, activation=tf.nn.relu,
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
x = tf.nn.dropout(x, keep_prob)
x = tf.layers.dense(inputs=x, units=10,
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
ops['logits'] = x
# build function to hold predictions
pred = tf.argmax(ops['logits'], -1, output_type=tf.int32)
# create tensor to calculate accuracy of predictions
acc = tf.reduce_mean(tf.cast(tf.equal(pred, ops['y']), tf.float32))
ops['acc'] = acc
probs = tf.nn.softmax(ops['logits'])
ops['probs'] = probs
ce = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=ops['logits'],
labels=ops['y']))
ops['loss'] = ce
loss_grads = tf.gradients(ce, ops['x'])[0]
adv_data = ops['x'] + adv_eps * tf.sign(loss_grads)
ops['adv_data'] = adv_data
return ops
def get_dropout_cifar_resnet(ops, prior_scale=1., keep_prob=0.5):
x = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int32, [None])
adv_eps = tf.placeholder_with_default(1e-2, [])
filters = [16, 16, 32, 64]
strides = [1, 2, 2, 2]
num_units = 5
num_classes = 5
ops['x'] = x
ops['y'] = y
ops['adv_eps'] = adv_eps
x, is_eval = get_cifar_image(ops)
ops['is_eval'] = is_eval
ops['inp_x'] = x
regularizer = tf.contrib.layers.l2_regularizer(scale=1. / prior_scale)
def res_unit(x, out_filters, stride=1):
strides = [1, stride, stride, 1]
in_filters = x.get_shape().as_list()[-1]
orig_x = x
if np.prod(strides) != 1:
orig_x = tf.nn.avg_pool(orig_x, ksize=strides,
strides=strides, padding='VALID')
with tf.variable_scope('sub_unit0'):
x = tf.layers.batch_normalization(
x, virtual_batch_size=1, training=True)
x = tf.nn.relu(x)
x = tf.layers.conv2d(
inputs=x, kernel_size=3, filters=out_filters, padding='SAME',
kernel_regularizer=regularizer, strides=stride,
kernel_initializer=tf.variance_scaling_initializer())
with tf.variable_scope('sub_unit1'):
x = tf.layers.batch_normalization(
x, virtual_batch_size=1, training=True)
x = tf.nn.dropout(x, keep_prob)
x = tf.nn.relu(x)
x = tf.layers.conv2d(
inputs=x, kernel_size=3, filters=out_filters, padding='SAME',
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
# Add the residual
with tf.variable_scope('sub_unit_add'):
# Handle differences in input and output filter sizes
if in_filters < out_filters:
orig_x = tf.pad(
tensor=orig_x,
paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1) + [[
int(np.floor((out_filters - in_filters) / 2.)),
int(np.ceil((out_filters - in_filters) / 2.))]])
x += orig_x
return x
# init_conv
x = tf.layers.conv2d(
inputs=x, kernel_size=3, filters=filters[0], padding='SAME',
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
for scale in range(1, len(filters)):
with tf.variable_scope('unit_{}_0'.format(scale)):
x = res_unit(x, filters[scale], strides[scale])
for unit in range(1, num_units):
with tf.variable_scope('unit_{}_{}'.format(scale, unit)):
x = res_unit(x, filters[scale])
x = tf.layers.batch_normalization(x, virtual_batch_size=1, training=True)
x = tf.nn.relu(x)
x = tf.reduce_mean(x, axis=[1, 2], name='global_avg_pool')
# logits
x = tf.layers.dense(inputs=x, units=num_classes,
kernel_regularizer=regularizer,
kernel_initializer=tf.variance_scaling_initializer())
ops['logits'] = x
# build function to hold predictions
pred = tf.argmax(ops['logits'], -1, output_type=tf.int32)
# create tensor to calculate accuracy of predictions
acc = tf.reduce_mean(tf.cast(tf.equal(pred, ops['y']), tf.float32))
ops['acc'] = acc
probs = tf.nn.softmax(ops['logits'])
ops['probs'] = probs
ce = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=ops['logits'],
labels=ops['y']))
ops['loss'] = ce
loss_grads = tf.gradients(ce, ops['inp_x'])[0]
adv_data = ops['inp_x'] + adv_eps * tf.sign(loss_grads)
ops['adv_data'] = adv_data
return ops
def get_ensemble_mnist(ops):
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.int32, [None])
adv_eps = tf.placeholder_with_default(1e-2, [])
ops['x'] = x
ops['y'] = y
ops['adv_eps'] = adv_eps
x_inp = tf.reshape(x, [-1, 28, 28, 1])
adv_alpha = 0.5
# adv_eps = 1e-2
ops['logits'] = []
ops['acc'] = []
ops['probs'] = []
ops['loss'] = []
ops['adv_data'] = []
ops['tot_loss'] = []
for i in range(10):
with tf.variable_scope('ens{}'.format(i)):
conv1 = tf.layers.Conv2D(
kernel_size=5, filters=20,
activation=tf.nn.relu, padding='VALID',
kernel_initializer=tf.variance_scaling_initializer())
conv2 = tf.layers.Conv2D(
kernel_size=5, filters=50,
activation=tf.nn.relu, padding='VALID',
kernel_initializer=tf.variance_scaling_initializer())
fc1 = tf.layers.Dense(
units=500, activation=tf.nn.relu,
kernel_initializer=tf.variance_scaling_initializer())
fc2 = tf.layers.Dense(
units=10,
kernel_initializer=tf.variance_scaling_initializer())
def get_out(h):
h = conv1(h)
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
h = conv2(h)
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
h = tf.layers.flatten(h)
h = fc1(h)
h = fc2(h)
return h
logits = get_out(x_inp)
ops['logits'].append(logits)
# build function to hold predictions
pred = tf.argmax(logits, -1, output_type=tf.int32)
# create tensor to calculate accuracy of predictions
acc = tf.reduce_mean(tf.cast(tf.equal(pred, y), tf.float32))
ops['acc'].append(acc)
probs = tf.nn.softmax(logits)
ops['probs'].append(probs)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=y))
ops['loss'].append(loss)
loss_grads = tf.gradients(adv_alpha * loss, ops['x'])[0]
adv_data = ops['x'] + adv_eps * tf.sign(loss_grads)
adv_data = tf.stop_gradient(adv_data)
ops['adv_data'].append(adv_data)
adv_logits = get_out(tf.reshape(adv_data, [-1, 28, 28, 1]))
adv_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=adv_logits,
labels=y))
tot_loss = adv_alpha * loss + (1 - adv_alpha) * adv_loss
ops['tot_loss'].append(tot_loss)
return ops
def get_ensemble_cifar_resnet(ops):
x = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int32, [None])
adv_eps = tf.placeholder_with_default(1e-2, [])
filters = [16, 16, 32, 64]
strides = [1, 2, 2, 2]
num_units = 5
num_classes = 5
num_ensembles = 5
ops['x'] = x
ops['y'] = y
ops['adv_eps'] = adv_eps
x, is_eval = get_cifar_image(ops)
ops['is_eval'] = is_eval
ops['inp_x'] = x
adv_alpha = 0.5
# adv_eps = 1e-2
ops['logits'] = []
ops['acc'] = []
ops['probs'] = []
ops['loss'] = []
ops['adv_data'] = []
ops['tot_loss'] = []
def apply_resunit(x, layer_dict, stride):
stride = [1, stride, stride, 1]
in_filters = x.get_shape().as_list()[-1]
orig_x = x
if np.prod(stride) != 1:
orig_x = tf.nn.avg_pool(orig_x, ksize=stride, strides=stride,
padding='VALID')
with tf.variable_scope('sub_unit0'):
x = layer_dict['bn1'](x, training=True)
x = tf.nn.relu(x)
x = layer_dict['conv1'](x)
with tf.variable_scope('sub_unit1'):
x = layer_dict['bn2'](x, training=True)
x = tf.nn.relu(x)
x = layer_dict['conv2'](x)
out_filters = x.get_shape().as_list()[-1]
# Add the residual
with tf.variable_scope('sub_unit_add'):
# Handle differences in input and output filter sizes
if in_filters < out_filters:
orig_x = tf.pad(
tensor=orig_x,
paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1) + [[
int(np.floor((out_filters - in_filters) / 2.)),
int(np.ceil((out_filters - in_filters) / 2.))]])
x += orig_x
return x
for i in range(num_ensembles):
with tf.variable_scope('ens{}'.format(i)):
init_conv = tf.layers.Conv2D(
kernel_size=3, filters=filters[0],
padding='SAME',
kernel_initializer=tf.variance_scaling_initializer())
res_units = []
for scale in range(1, len(filters)):
with tf.variable_scope('unit_{}_0'.format(scale)):
res_dict = {
'bn1': tf.layers.BatchNormalization(
virtual_batch_size=1, name='bn1'),
'conv1': tf.layers.Conv2D(
kernel_size=3, filters=filters[scale],
padding='SAME', strides=strides[scale],
name='conv1',
kernel_initializer=tf.variance_scaling_initializer()),
'bn2': tf.layers.BatchNormalization(
virtual_batch_size=1, name='bn2'),
'conv2': tf.layers.Conv2D(
kernel_size=3, filters=filters[scale],
padding='SAME', name='conv2',
kernel_initializer=tf.variance_scaling_initializer())
}
res_units.append(res_dict)
for unit in range(1, num_units):
with tf.variable_scope('unit_{}_{}'.format(scale, unit)):
res_dict = {
'bn1': tf.layers.BatchNormalization(
virtual_batch_size=1, name='bn1'),
'conv1': tf.layers.Conv2D(
kernel_size=3, filters=filters[scale],
padding='SAME', name='conv1',
kernel_initializer=tf.variance_scaling_initializer()),
'bn2': tf.layers.BatchNormalization(
virtual_batch_size=1, name='bn2'),
'conv2': tf.layers.Conv2D(
kernel_size=3, filters=filters[scale],
padding='SAME', name='conv2',
kernel_initializer=tf.variance_scaling_initializer())
}
res_units.append(res_dict)
last_bn = tf.layers.BatchNormalization(
virtual_batch_size=1, name='last_bn')
last = tf.layers.Dense(
units=num_classes,
kernel_initializer=tf.variance_scaling_initializer())
def get_out(h):
h = init_conv(h)
i = 0
for scale in range(1, len(filters)):
with tf.variable_scope('unit_{}_0'.format(scale)):
h = apply_resunit(h, res_units[i], strides[scale])
i += 1
for unit in range(1, num_units):
with tf.variable_scope(
'unit_{}_{}'.format(scale, unit)):
h = apply_resunit(h, res_units[i], 1)
i += 1
h = last_bn(h, training=True)
h = tf.nn.relu(h)
h = tf.reduce_mean(h, [1, 2])
h = last(h)
return h
logits = get_out(x)
ops['logits'].append(logits)
# build function to hold predictions
pred = tf.argmax(logits, -1, output_type=tf.int32)
# create tensor to calculate accuracy of predictions
acc = tf.reduce_mean(tf.cast(tf.equal(pred, y), tf.float32))
ops['acc'].append(acc)
probs = tf.nn.softmax(logits)
ops['probs'].append(probs)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=y))
ops['loss'].append(loss)
loss_grads = tf.gradients(adv_alpha * loss, ops['inp_x'])[0]
adv_data = ops['inp_x'] + adv_eps * tf.sign(loss_grads)
adv_data = tf.stop_gradient(adv_data)
ops['adv_data'].append(adv_data)
adv_logits = get_out(tf.reshape(adv_data, [-1, 24, 24, 3]))
adv_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=adv_logits,
labels=y))
tot_loss = adv_alpha * loss + (1 - adv_alpha) * adv_loss
ops['tot_loss'].append(tot_loss)
return ops
|
[
"pawlowski.nick@gmail.com"
] |
pawlowski.nick@gmail.com
|
2920544a3b08538848ac5deea551712b8930c829
|
bfd30b74333c29a73e033336b618500621416465
|
/app.py
|
4837deaa044df49844cd25366e5b7c0b8ac2afed
|
[] |
no_license
|
thi131190/flaskbook
|
ca3a8c4e791df9631608d605bac2e8db80da90c6
|
cf2553ab2085d6feb161e244146f83580af6ca7b
|
refs/heads/master
| 2020-09-12T06:09:24.113898
| 2019-11-18T06:40:23
| 2019-11-18T06:40:23
| 222,336,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,025
|
py
|
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, render_template, request, flash, redirect, url_for
from flask_login import UserMixin, LoginManager, login_required, login_user, logout_user, current_user
from werkzeug.security import generate_password_hash, check_password_hash
app = Flask(__name__)
app.config['SECRET_KEY'] = 'thisissecret'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///flaskbook.db'
db = SQLAlchemy(app)
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False, unique=True)
email = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False)
def generate_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String, nullable=False)
user_id = db.Column(db.Integer, nullable=False)
created_at = db.Column(db.DateTime, server_default=db.func.now())
updated_at = db.Column(
db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now())
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String, nullable=False)
user_id = db.Column(db.Integer, nullable=False)
post_id = db.Column(db.Integer, nullable=False)
created_at = db.Column(db.DateTime, server_default=db.func.now())
updated_at = db.Column(
db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now())
db.create_all()
login_manager = LoginManager(app)
@login_manager.user_loader
def load_user(id):
return User.query.get(id)
login_manager.login_view = 'login'
@app.route('/')
@login_required
def root():
posts = Post.query.all()
for post in posts:
post.author = User.query.filter_by(id=post.user_id).first()
return render_template('views/index.html', posts=posts)
@app.route('/register', methods=['POST', 'GET'])
def register():
if current_user.is_authenticated:
return redirect(url_for('root'))
if request.method == 'POST':
check_email = User.query.filter_by(email=request.form['email']).first()
if check_email:
flash('Email already taken', 'warning')
return redirect(url_for('register'))
new_user = User(name=request.form['name'],
email=request.form['email'])
new_user.generate_password(request.form['password'])
db.session.add(new_user)
db.session.commit()
login_user(new_user)
flash('Successfully create an account and logged in', 'success')
return redirect(url_for('root'))
return render_template('views/register.html')
@app.route('/login', methods=['POST', 'GET'])
def login():
if current_user.is_authenticated:
return redirect(url_for('root'))
if request.method == 'POST':
user = User.query.filter_by(email=request.form['email']).first()
if not user:
flash('Email is not registered', 'warning')
return redirect(url_for('register'))
if user.check_password(request.form['password']):
login_user(user)
flash(f'Welcome back {current_user.name} !', 'success')
return redirect(url_for('root'))
flash('wrong password or email', 'warning')
return redirect(url_for('login'))
return render_template('views/login.html')
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/posts', methods=['POST'])
@login_required
def create_post():
if request.method == 'POST':
new_post = Post(body=request.form['body'],
user_id=current_user.id)
db.session.add(new_post)
db.session.commit()
return redirect(url_for('root'))
@app.route('/posts/<id>', methods=['POST', 'GET'])
def single_post(id):
action = request.args.get('action')
post = Post.query.get(id)
comments = Comment.query.filter_by(post_id=id).all()
if not post:
flash('Post not found', 'warning')
return redirect(url_for('root'))
post.author = User.query.get(post.user_id)
if request.method == "POST":
if post.user_id != current_user.id:
flash('not allow to do this', 'danger')
return redirect(url_for('root'))
if action == 'delete':
db.session.delete(post)
db.session.commit()
return redirect(url_for('root'))
elif action == 'update':
post.body = request.form['body']
db.session.commit()
return redirect(url_for('single_post', id=id))
elif action == 'edit':
return render_template('views/single_post.html', post=post, action=action)
if not action:
action = 'view'
for comment in comments:
comment.user_name = User.query.get(comment.user_id).name
return render_template('views/single_post.html', post=post, action=action, comments=comments)
@app.route('/posts/<id>/comments', methods=['POST', 'GET'])
def create_comment(id):
action = request.args.get('action')
post = Post.query.get(id)
if not post:
flash('Post not found', 'warning')
return redirect(url_for('root'))
if request.method == "POST":
comment = Comment(user_id=current_user.id, post_id=id,
body=request.form['body'])
db.session.add(comment)
db.session.commit()
flash('Thanks for your comment', 'success')
return redirect(url_for('single_post', id=id, action='view'))
@app.route('/posts/<id>/comments/<comment_id>', methods=['POST', 'GET'])
def edit_comment(id, comment_id):
action = request.args.get('action')
comment = Comment.query.get(comment_id)
print('ACTION', action)
print("Method", request.method)
if request.method == 'POST':
if comment.user_id != current_user.id:
flash('not allow to do this', 'danger')
return redirect(url_for('root'))
if action == 'update':
print("edit comment")
comment.body = request.form['body']
db.session.commit()
return redirect(url_for('single_post', id=id, action='view'))
if action == 'edit':
return render_template('views/edit_comment.html', comment=comment, action=action)
if action == 'delete':
print('deleting...')
db.session.delete(comment)
db.session.commit()
return redirect(url_for('single_post', id=comment.post_id))
return render_template('views/edit_comment.html', comment=comment)
if __name__ == "__main__":
app.run(debug=True)
|
[
"thi131190@gmail.com"
] |
thi131190@gmail.com
|
faf55b89db37f61553e87b59a232efca0d366685
|
a2ab9df14b6206a0c08b8610d1b5dcc283229914
|
/piko_people_detection/nodes/people_monitor.py
|
d915479574df2f36d183932fb128212bb2921532
|
[] |
no_license
|
pirobot/pi-kobuki-git
|
fa1d20624d709533df80ef610fb9d7d58ce36fd2
|
b7722b13a1e60f3b4d49e07449a3ce11314eedd2
|
refs/heads/master
| 2021-09-10T02:21:45.086523
| 2018-03-20T17:41:38
| 2018-03-20T17:41:38
| 126,058,637
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,291
|
py
|
#!/usr/bin/env python
"""
people_monitor.py - Version 1.0 2013-11-16
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2013 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
import thread
from cob_people_detection_msgs.msg import *
from scipy.spatial.distance import euclidean
from math import sqrt
class PeopleMonitor():
def __init__(self):
rospy.init_node("people_monitor")
rate = rospy.Rate(rospy.get_param('~rate', 1.0))
self.mutex = thread.allocate_lock()
rospy.Subscriber('people', DetectionArray, self.track_faces)
rospy.Subscriber('recognitions', DetectionArray, self.head_or_face)
self.people = {}
self.unknown = list()
while not rospy.is_shutdown():
message = "Known people: "
message += str(self.people.keys())
message += " N Unknown: " + str(len(self.unknown))
rospy.loginfo(message)
rate.sleep()
def head_or_face(self, msg):
self.unknown = list()
for detection in msg.detections:
pose = detection.pose.pose
label = detection.label
detector = detection.detector
for i in range(len(self.unknown)):
if self.already_tracking(self.unknown[i]):
del self.unknown[i]
if label == "UnknownHead":
if not self.already_tracking(pose):
self.unknown.append(pose)
def track_faces(self, msg):
self.people = {}
for detection in msg.detections:
pose = detection.pose.pose
label = detection.label
detector = detection.detector
self.people[label] = pose
def already_tracking(self, new_pose):
p1 = [new_pose.position.x, new_pose.position.y, new_pose.position.z]
for person, pose in self.people.iteritems():
p2 = [pose.position.x, pose.position.y, pose.position.z]
distance = euclidean(p1, p2)
if distance < 0.05:
return True
for pose in self.unknown:
p2 = [pose.position.x, pose.position.y, pose.position.z]
distance = euclidean(p1, p2)
if distance < 0.05:
return True
return False
if __name__ == '__main__':
try:
PeopleMonitor()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("People monitor node terminated.")
|
[
"pgoebel@stanford.edu"
] |
pgoebel@stanford.edu
|
2e0013802194373ba713edb929f64b4edfa31ae8
|
b66450f669095b0ad013ea82cb1ae575b83d74c3
|
/Technical Questions/027 - Remove Element Array.py
|
d9014362820117db85074b78a6c1f00254b51644
|
[] |
no_license
|
aulb/ToAsk
|
2649a3fad357820e3c8809816967dfb274704735
|
1e54c76ab9f7772316186db74496735ca1da65ce
|
refs/heads/master
| 2021-05-01T20:35:14.062678
| 2020-02-23T07:44:29
| 2020-02-23T07:44:29
| 33,289,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
# https://stackoverflow.com/questions/135041/should-you-always-favor-xrange-over-range
# In place
# Shuffle around doesn't work because you don't exactly know where to shuffle to
# def removeElement(nums, val):
# for i in range(len(nums) - 1, -1, -1):
# # Strategies:
# # Iterate backwards
# if nums[i] == val:
# del nums[i]
# return len(nums)
# DELETING DOES NOT WORK, NOT O(n) OPERATION
def removeElement(nums, val):
start = 0
for i in range(len(nums)):
if nums[i] != val:
nums[i], nums[start] = nums[start], nums[i]
start += 1
return nums[:start]
|
[
"aalbertuntung@gmail.com"
] |
aalbertuntung@gmail.com
|
4a85f74b2fd518ce37380ac7e08131e78f234266
|
8c4469b3d97d3a6e282c26da3135e0bc3bf3e347
|
/aggregator/group.py
|
a8659130a26726c6ca6adf7cfd20294ff9aa74e2
|
[] |
no_license
|
Ch4ngXu3Feng/seer
|
4bc9a84c83d1434ca5c776cbbea10a07b07e3a3f
|
afdd6a994cdf0e766fed6bc6fa275de1011a64c3
|
refs/heads/master
| 2020-04-06T22:25:53.494206
| 2019-01-04T13:49:51
| 2019-01-04T13:49:51
| 157,835,871
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
# coding=utf-8
import pandas as pd
from core.aggregator import Aggregator
class GroupFieldAggregator(Aggregator):
def __init__(self, aggregator: Aggregator, key: str, field: str, term: str) -> None:
super().__init__(field, term)
self.__aggregator: Aggregator = aggregator
self.__key: str = key
self.__field: str = field
self.__term: str = term
def method(self, name: str, data: pd.DataFrame) -> None:
raise RuntimeError()
def aggregate(self, name: str, data: pd.DataFrame) -> None:
for name, group in data.groupby(self.__term)[self.__key, self.__field]:
self.__aggregator.aggregate(name, group)
|
[
"changxuefeng.cxf@outlook.com"
] |
changxuefeng.cxf@outlook.com
|
0d5f8aa4289749dc418ab3dfb54737344de7bca6
|
79d77f1dd01fdf554833dd2b4f6210fbb8c36d45
|
/src/chapter02/cars.py
|
6c1c9a2c83b1b31e9bfe9e610646ce77837075b9
|
[] |
no_license
|
jayashelan/python-workbook
|
e5b87f9027b38dc53de24edc063eca673800e02d
|
7c71f2075a351f566e1d66908a6f044f909e5de0
|
refs/heads/master
| 2022-12-30T03:27:02.270384
| 2020-10-07T01:38:27
| 2020-10-07T01:38:27
| 290,066,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
cars = ['bmw','audi','toyota','subaru']
cars.sort()
print(cars)
cars.sort(reverse=True)
print(cars)
print("Here is the original list:")
print(cars)
print("\nHere is the sorted list:")
print(sorted(cars))
print("\nHere is the original list")
print(cars)
cars.reverse()
print(cars)
print(len(cars))
|
[
"jayashelan.boobalakrishnan@gmail.com"
] |
jayashelan.boobalakrishnan@gmail.com
|
f0c7af14ec0f438e016ae178d3f0fb3f738bca99
|
b144c5142226de4e6254e0044a1ca0fcd4c8bbc6
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/pcrequestmatchcriteria.py
|
e296cbd50daa3fbda7651bb3454ebd6967527e0f
|
[
"MIT"
] |
permissive
|
iwanb/ixnetwork_restpy
|
fa8b885ea7a4179048ef2636c37ef7d3f6692e31
|
c2cb68fee9f2cc2f86660760e9e07bd06c0013c2
|
refs/heads/master
| 2021-01-02T17:27:37.096268
| 2020-02-11T09:28:15
| 2020-02-11T09:28:15
| 239,721,780
| 0
| 0
|
NOASSERTION
| 2020-02-11T09:20:22
| 2020-02-11T09:20:21
| null |
UTF-8
|
Python
| false
| false
| 6,041
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class PcRequestMatchCriteria(Base):
"""PCRequest Match Criteria
The PcRequestMatchCriteria class encapsulates a required pcRequestMatchCriteria resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'pcRequestMatchCriteria'
def __init__(self, parent):
super(PcRequestMatchCriteria, self).__init__(parent)
@property
def Active(self):
"""Activate/Deactivate Configuration.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('active')
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def DestIpv4Address(self):
"""Destination IPv4 Address
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('destIpv4Address')
@property
def DestIpv6Address(self):
"""Destination IPv6 Address
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('destIpv6Address')
@property
def IpVersion(self):
"""IP Version
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('ipVersion')
@property
def IroType(self):
"""Match IRO Option
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('iroType')
@property
def MatchEndPoints(self):
"""Indicates Whether response parameters will be matched based on endpoints in the PCReq messaged received from PCC.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('matchEndPoints')
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def SrcIpv4Address(self):
"""Source IPv4 Address
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('srcIpv4Address')
@property
def SrcIpv6Address(self):
"""Source IPv6 Address
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('srcIpv6Address')
def update(self, Name=None):
"""Updates a child instance of pcRequestMatchCriteria on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args:
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def get_device_ids(self, PortNames=None, Active=None, DestIpv4Address=None, DestIpv6Address=None, IpVersion=None, IroType=None, MatchEndPoints=None, SrcIpv4Address=None, SrcIpv6Address=None):
"""Base class infrastructure that gets a list of pcRequestMatchCriteria device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args:
PortNames (str): optional regex of port names
Active (str): optional regex of active
DestIpv4Address (str): optional regex of destIpv4Address
DestIpv6Address (str): optional regex of destIpv6Address
IpVersion (str): optional regex of ipVersion
IroType (str): optional regex of iroType
MatchEndPoints (str): optional regex of matchEndPoints
SrcIpv4Address (str): optional regex of srcIpv4Address
SrcIpv6Address (str): optional regex of srcIpv6Address
Returns:
list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
|
[
"srvc_cm_packages@keysight.com"
] |
srvc_cm_packages@keysight.com
|
c3e6146d0f55c6b931a02a3ba658b48dfe28ae34
|
7219cc709375174a17364574c80cf670b23e0fd1
|
/lists/tests/test_views.py
|
842f95a5576f3152db7c67dcec046ec808fd0b34
|
[] |
no_license
|
juliatiemi/tdd-project
|
8853c4fb3f704e8db15dd5f508c04be9d6afc5bc
|
48ff95341eb3892d65e9f00be97833a341038c1d
|
refs/heads/master
| 2021-03-13T15:14:30.255370
| 2020-10-24T18:34:37
| 2020-10-24T18:34:37
| 246,691,183
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,696
|
py
|
from django.test import TestCase
from lists.models import Item, List
from django.utils.html import escape
class ListViewTest(TestCase):
def test_uses_list_template(self):
my_list = List.objects.create()
response = self.client.get(f'/lists/{my_list.id}/')
self.assertTemplateUsed(response, 'list.html')
def test_displays_only_items_for_that_list(self):
correct_list = List.objects.create()
Item.objects.create(text='itemey 1', list=correct_list)
Item.objects.create(text='itemey 2', list=correct_list)
other_list = List.objects.create()
Item.objects.create(text='other list item 1', list=other_list)
Item.objects.create(text='other list item 2', list=other_list)
response = self.client.get(f'/lists/{correct_list.id}/')
self.assertContains(response, 'itemey 1')
self.assertContains(response, 'itemey 2')
self.assertNotContains(response, 'other list item 1')
self.assertNotContains(response, 'other list item 2')
def test_passes_correct_list_to_template(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.get(f'/lists/{correct_list.id}/')
self.assertEqual(response.context['list'], correct_list)
def test_can_save_a_POST_request_to_an_existing_list(self):
other_list = List.objects.create()
correct_list = List.objects.create()
self.client.post(
f'/lists/{correct_list.id}/',
data={'item_text': 'A new item for an existing list'}
)
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new item for an existing list')
self.assertEqual(new_item.list, correct_list)
def test_redirects_to_list_view(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.post(
f'/lists/{correct_list.id}/',
data={'item_text': 'A new item for an existing list'}
)
self.assertRedirects(response, f'/lists/{correct_list.id}/')
def test_validation_errors_end_up_on_lists_page(self):
list_ = List.objects.create()
response = self.client.post(
f'/lists/{list_.id}/',
data={'item_text': ''}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'list.html')
expected_error = escape("You can't have an empty list item")
self.assertContains(response, expected_error)
class ListAndItemModelsTest(TestCase):
def test_saving_and_retrieving_items(self):
my_list = List()
my_list.save()
first_item = Item()
first_item.text = 'O primeiro item'
first_item.list = my_list
first_item.save()
second_item = Item()
second_item.text = 'O segundo item'
second_item.list = my_list
second_item.save()
saved_list = List.objects.first()
self.assertEqual(saved_list, my_list)
saved_items = Item.objects.all()
self.assertEqual(saved_items.count(), 2)
first_saved_item = saved_items[0]
second_saved_item = saved_items[1]
self.assertEqual(first_saved_item.text, 'O primeiro item')
self.assertEqual(first_saved_item.list, my_list)
self.assertEqual(second_saved_item.text, 'O segundo item')
self.assertEqual(second_saved_item.list, my_list)
class NewListTest(TestCase):
def test_can_save_a_POST_request(self):
self.client.post('/lists/new', data={'item_text': 'A new list item'})
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item')
def test_redirects_after_POST(self):
response = self.client.post(
'/lists/new', data={'item_text': 'A new list item'})
new_list = List.objects.first()
self.assertRedirects(response, f'/lists/{new_list.id}/')
def test_validation_errors_are_sent_back_to_home_page_template(self):
response = self.client.post('/lists/new', data={'item_text': ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
expected_error = escape("You can't have an empty list item")
self.assertContains(response, expected_error)
def test_invalid_list_items_arent_saved(self):
self.client.post('/lists/new', data={'item_text': ''})
self.assertEqual(List.objects.count(), 0)
self.assertEqual(Item.objects.count(), 0)
|
[
"julia.tiemi@bagy.com.br"
] |
julia.tiemi@bagy.com.br
|
18beb3d74496e14e2cf7136d77c9932a29d5ef61
|
460a1a125c9ae05dc0056b3b2369464868b8cee2
|
/ebook/forms.py
|
fb5ba232d7a5c5c0a1c8d02b9e250675de63242e
|
[] |
no_license
|
makyo-old/treebook
|
8fd7b258ede9e630f6840c64c2fe64aeb3257b89
|
c60fa421ed286ff722a390ff96b8a09145cd51c8
|
refs/heads/master
| 2020-04-21T03:15:03.640954
| 2010-10-14T04:36:16
| 2010-10-14T04:36:16
| 169,279,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
from django import forms
from ebook.models import *
class ChapterForm(forms.ModelForm):
body = forms.CharField(widget = forms.Textarea({'rows': 25, 'cols': 78}))
class Meta:
model = Manifesto
exclude = ('owner', 'views', 'stars', 'featured', 'weight', 'ctime')
class CommentForm(forms.ModelForm):
body = forms.CharField(widget = forms.Textarea({'cols': 78}))
class Meta:
model = Comment
exclude = ('post', 'parent', 'ctime', 'owner', 'published')
|
[
"mjs@mjs-svc.com"
] |
mjs@mjs-svc.com
|
5c3fb98bb4ef83fd5261cc0490e8816e9d1edbcd
|
2a86af7298ad3497814cd605c8961d18730d1a05
|
/emitr/migrations/0008_query_notified.py
|
4d12554f0649a8a904724687f1e02eccc8dc808a
|
[] |
no_license
|
mdakibg/SanskarEmitr
|
edeaec65cbc16e25ba208a18e45e5dc486adcc78
|
3e6d1d98db10247981ea594aae80ee822528242e
|
refs/heads/master
| 2023-02-25T04:20:43.353013
| 2021-01-28T10:41:04
| 2021-01-28T10:41:04
| 319,060,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
# Generated by Django 3.1.2 on 2020-11-25 11:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('emitr', '0007_query'),
]
operations = [
migrations.AddField(
model_name='query',
name='notified',
field=models.IntegerField(default=0),
),
]
|
[
"mohdakibgour@gmail.com"
] |
mohdakibgour@gmail.com
|
40f070590e60bf8d398a6a5af723c465aed2a574
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/containerregistry/v20201101preview/get_import_pipeline.py
|
50b25e7dbdd67fb56dc51ebfb0be2215a0287835
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 6,696
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetImportPipelineResult',
'AwaitableGetImportPipelineResult',
'get_import_pipeline',
]
@pulumi.output_type
class GetImportPipelineResult:
"""
An object that represents an import pipeline for a container registry.
"""
def __init__(__self__, id=None, identity=None, location=None, name=None, options=None, provisioning_state=None, source=None, system_data=None, trigger=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if options and not isinstance(options, list):
raise TypeError("Expected argument 'options' to be a list")
pulumi.set(__self__, "options", options)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source and not isinstance(source, dict):
raise TypeError("Expected argument 'source' to be a dict")
pulumi.set(__self__, "source", source)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if trigger and not isinstance(trigger, dict):
raise TypeError("Expected argument 'trigger' to be a dict")
pulumi.set(__self__, "trigger", trigger)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityPropertiesResponse']:
"""
The identity of the import pipeline.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the import pipeline.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def options(self) -> Optional[Sequence[str]]:
"""
The list of all options configured for the pipeline.
"""
return pulumi.get(self, "options")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the pipeline at the time the operation was called.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> 'outputs.ImportPipelineSourcePropertiesResponse':
"""
The source properties of the import pipeline.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def trigger(self) -> Optional['outputs.PipelineTriggerPropertiesResponse']:
"""
The properties that describe the trigger of the import pipeline.
"""
return pulumi.get(self, "trigger")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetImportPipelineResult(GetImportPipelineResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetImportPipelineResult(
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
options=self.options,
provisioning_state=self.provisioning_state,
source=self.source,
system_data=self.system_data,
trigger=self.trigger,
type=self.type)
def get_import_pipeline(import_pipeline_name: Optional[str] = None,
registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetImportPipelineResult:
"""
An object that represents an import pipeline for a container registry.
:param str import_pipeline_name: The name of the import pipeline.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
"""
__args__ = dict()
__args__['importPipelineName'] = import_pipeline_name
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:containerregistry/v20201101preview:getImportPipeline', __args__, opts=opts, typ=GetImportPipelineResult).value
return AwaitableGetImportPipelineResult(
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
options=__ret__.options,
provisioning_state=__ret__.provisioning_state,
source=__ret__.source,
system_data=__ret__.system_data,
trigger=__ret__.trigger,
type=__ret__.type)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
c3540c272c47b4195180a168ca75db6c5b50af69
|
93ca08c158960f67e81576dfa48a0d110af13f33
|
/flask_app/sipaccounts/sipaccount.py
|
9d9748d0629e86ec4eb35e683174bf2d0b67c2c4
|
[] |
no_license
|
alochym01/freeswitch_flask_gui
|
be49d869697b67d33c326ea01f3a1ffd75ccc4da
|
5a28ea256f7cc759f5bbf71230baab514d1b6abf
|
refs/heads/master
| 2020-03-09T01:26:24.954737
| 2018-11-12T15:53:59
| 2018-11-12T15:53:59
| 128,514,424
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,195
|
py
|
from flask_app.sipaccounts import sip_account
from flask_app.sipaccounts.sipform import SipAccount
from flask_app.models.sip_account import SipAcc
from flask import render_template, redirect, url_for, flash, request
from flask_login import login_required
from flask_app import db
import redis
@sip_account.route('/')
@login_required
def index():
sipaccs = SipAcc.query.all()
return render_template('sipaccs/index.html', sipaccs=sipaccs)
@sip_account.route('/create')
@login_required
def create():
form = SipAccount()
return render_template('sipaccs/create.html', form=form)
@sip_account.route('/store', methods=['POST'])
@login_required
def store():
form = SipAccount()
if form.validate_on_submit():
sipacc = SipAcc.query.filter_by(username=form.username.data).first()
if sipacc:
flash('username is already used')
return redirect(url_for('sip-account.create'))
print(form)
# todo should be save as a record in database
sip = SipAcc(
username=form.username.data,
domain=form.domain.data,
toll_allow=form.toll_allow.data,
context=form.context.data,
max_calls=form.max_calls.data,
caller_number=form.caller_number.data,
outbound_caller_number=form.outbound_caller_number.data,
caller_name=form.caller_name.data,
outbound_caller_name=form.outbound_caller_name.data
)
sip.set_password(form.password.data)
db.session.add(sip)
db.session.commit()
flash('Congratulations, Created successfully!')
return redirect(url_for('sip-account.index'))
# debug errors of form submit
# https://stackoverflow.com/questions/6463035/wtforms-getting-the-errors
# for field, errors in form.errors.items():
# print(form[field].label)
# print(', '.join(errors))
return redirect(url_for('sip-account.create', form=form))
@sip_account.route('/show/<int:id>')
@login_required
def show(id):
sipacc = SipAcc.query.filter_by(id=id).first()
return render_template('sipaccs/show.html', sipacc=sipacc)
@sip_account.route('/edit/<int:id>')
@login_required
def edit(id):
form = SipAccount()
sipacc = SipAcc.query.filter_by(id=id).first()
return render_template('sipaccs/edit.html', sipacc=sipacc, form=form)
@sip_account.route('/update/<int:id>', methods=['POST'])
@login_required
def update(id):
form = SipAccount()
sipacc = SipAcc.query.filter_by(id=id).first()
if form.validate_on_submit():
sipacc.username=form.username.data
sipacc.domain=form.domain.data
sipacc.toll_allow=form.toll_allow.data
sipacc.context=form.context.data
sipacc.max_calls=form.max_calls.data
sipacc.caller_number=form.caller_number.data
sipacc.outbound_caller_number=form.outbound_caller_number.data
sipacc.caller_name=form.caller_name.data
sipacc.outbound_caller_name=form.outbound_caller_name.data
sipacc.set_password(form.password.data)
db.session.commit()
redis_key = sipacc.username + '_' + sipacc.domain
try:
r = redis.StrictRedis(host='localhost', port=6379, db=0)
r.delete(redis_key)
r.connection_pool.disconnect()
except:
pass
flash('Update successfully')
return redirect(url_for('sip-account.index'))
return render_template('sipaccs/edit.html', sipacc=sipacc, form=form)
@sip_account.route('/delete/<int:id>', methods=['GET', 'POST'])
@login_required
def delete(id):
form = SipAccount()
sipacc = SipAcc.query.filter_by(id=id).first()
if request.method == 'GET':
return render_template('sipaccs/delete.html', sipacc=sipacc, form=form)
redis_key = sipacc.username + '_' + sipacc.domain
try:
r = redis.StrictRedis(host='localhost', port=6379, db=0)
r.delete(redis_key)
r.connection_pool.disconnect()
except:
pass
db.session.delete(sipacc)
db.session.commit()
flash('Delete successfully')
return redirect(url_for('sip-account.index'))
|
[
"hadn@ubuntu"
] |
hadn@ubuntu
|
54b20d9bc5ec32b30a49e6debd290d9196e93678
|
97e37192d4a695777c538596086c0be826b721e1
|
/tools/train.py
|
458b2151ceb9b534e946cb19704fd2931ea57c23
|
[
"Apache-2.0"
] |
permissive
|
Sayyam-Jain/vedastr
|
1b587adc1ff4dc79ab7acc71d7ee08fe600c8933
|
83511a408b68c264561a30daff5154cd0148bebd
|
refs/heads/master
| 2022-12-13T08:06:21.304845
| 2020-09-10T05:05:50
| 2020-09-10T05:05:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 961
|
py
|
import argparse
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from vedastr.runners import TrainRunner
from vedastr.utils import Config
def parse_args():
parser = argparse.ArgumentParser(description='Train a classification model')
parser.add_argument('config', type=str, help='config file path')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg_path = args.config
cfg = Config.fromfile(cfg_path)
_, fullname = os.path.split(cfg_path)
fname, ext = os.path.splitext(fullname)
root_workdir = cfg.pop('root_workdir')
workdir = os.path.join(root_workdir, fname)
os.makedirs(workdir, exist_ok=True)
train_cfg = cfg['train']
deploy_cfg = cfg['deploy']
common_cfg = cfg['common']
common_cfg['workdir'] = workdir
runner = TrainRunner(train_cfg, deploy_cfg, common_cfg)
runner()
if __name__ == '__main__':
main()
|
[
"jun.sun@media-smart.cn"
] |
jun.sun@media-smart.cn
|
37d92b233c3353b77e82660ad4e05c02f3447a27
|
d99f2eff92f90464d04e19448e71d1df4aa7264d
|
/trafficsim/shedule.py
|
e73065cb42dc2d152edca59ed322247040373b87
|
[] |
no_license
|
fprott/trafficSim
|
23c59a602340488a2734d98399f8b91b56294e67
|
dbd990b49ebeaa82607504972a8eb9eb387e682c
|
refs/heads/master
| 2021-01-20T02:28:21.326582
| 2017-07-18T17:02:27
| 2017-07-18T17:02:27
| 89,411,175
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
import numpy
class Shedule:
def __init__(self,time_step,x_init,v_init,a_init,goals,current_time):
#self.__init__()
self.time_step=time_step
self.x_now=x_init;
self.v_now=v_init;
self.a_now=a_init;
self.gaols=goals;
self.nr_cars=len(self.x_now);
self.current_time=current_time;
self.stop=0;
self.goalreached = [0]*self.nr_cars;
def nextstep(self,a):
self.a_now = a;
for i in range(0,self.nr_cars):
self.x_now[i]=self.x_now[i]+self.v_now[i]*self.time_step+0.5*self.a_now[i]*self.time_step*self.time_step;
self.v_now[i]=self.v_now[i]+self.a_now[i]*self.time_step;
self.current_time=self.current_time+self.time_step;
def goal_reached(self):
for i in range(0, self.nr_cars):
if x_now[i] >= self.goals[i]:
self.goalreached[i]=1; 'car i has reached goal'
def checkcollision(self): 'check for car collisions and set self.stop to 1 in case of collisions'
# work in progress
#time_step,x_init,v_init,a_init,gaols,current_time
#Test
sh= Shedule(1,[0,0,0],[0,0,0],[0,0,0],[14,0,20],0)
#print (sh.x_now)
#sh.nextstep([2,0,4])
#print (sh.x_now)
#sh.nextstep([2,0,4])
#print (sh.x_now)
#sh.nextstep([2,0,4])
#print (sh.x_now)
#sh.nextstep([2,0,4])
#print (sh.x_now)
#print(sh.stop)
#
|
[
"balog270891@yahoo.com"
] |
balog270891@yahoo.com
|
eb53ec3ed82d94c6cd342569b0d7e04e9f9f29ae
|
ba0cbdae81c171bd4be7b12c0594de72bd6d625a
|
/MyToontown/py2/toontown/coghq/CashbotMintPaintMixer_Action00.pyc.py
|
24a4404355d7a5da12ca0c6d3fe91af8ae4d0b8b
|
[] |
no_license
|
sweep41/Toontown-2016
|
65985f198fa32a832e762fa9c59e59606d6a40a3
|
7732fb2c27001264e6dd652c057b3dc41f9c8a7d
|
refs/heads/master
| 2021-01-23T16:04:45.264205
| 2017-06-04T02:47:34
| 2017-06-04T02:47:34
| 93,279,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,836
|
py
|
# 2013.08.22 22:18:18 Pacific Daylight Time
# Embedded file name: toontown.coghq.CashbotMintPaintMixer_Action00
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_10/models/cashbotHQ/ZONE10a',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10009: {'type': 'healBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(63.9741363525, -10.9343223572, 9.97696113586),
'hpr': Vec3(270.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'rewardPerGrab': 8,
'rewardPerGrabMax': 0},
10010: {'type': 'healBarrel',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 10009,
'pos': Point3(0.0, 0.0, 4.13999986649),
'hpr': Vec3(349.358764648, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'rewardPerGrab': 8,
'rewardPerGrabMax': 0},
10000: {'type': 'nodepath',
'name': 'mixers',
'comment': '',
'parentEntId': 0,
'pos': Point3(-19.2397289276, 0.0, 5.53999996185),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.758001744747, 0.758001744747, 0.758001744747)},
10004: {'type': 'paintMixer',
'name': 'mixer0',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0.0, 10.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Vec3(1.0, 1.0, 1.0),
'motion': 'easeInOut',
'offset': Point3(20.0, 20.0, 0.0),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 1,
'waitPercent': 0.1},
10005: {'type': 'paintMixer',
'name': 'mixer1',
'comment': '',
'parentEntId': 10000,
'pos': Point3(29.0, 10.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Vec3(1.0, 1.0, 1.0),
'motion': 'easeInOut',
'offset': Point3(0.0, -20.0, 0.0),
'period': 8.0,
'phaseShift': 0.5,
'shaftScale': 1,
'waitPercent': 0.1},
10006: {'type': 'paintMixer',
'name': 'mixer2',
'comment': '',
'parentEntId': 10000,
'pos': Point3(58.0, -8.94072246552, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Vec3(1.0, 1.0, 1.0),
'motion': 'easeInOut',
'offset': Point3(-20.0, -20.0, 0.0),
'period': 8.0,
'phaseShift': 0.5,
'shaftScale': 1,
'waitPercent': 0.1}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\coghq\CashbotMintPaintMixer_Action00.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:18:18 Pacific Daylight Time
|
[
"sweep14@gmail.com"
] |
sweep14@gmail.com
|
4ade516bc779fa57cb78f0ffa70584c99817367e
|
e257ea927ed059765c0ad308a43c4fb7670f6c28
|
/source/IDProcessor.py
|
c50c4e15feb8993974f515373a00865ba637a22f
|
[] |
no_license
|
parikhshyamal1993/IDScanner
|
10e6ee75f4b5d0c6d1ece78da67737ee573fe638
|
b0b199a0c1a63674d14a7bb2215839056c1267e5
|
refs/heads/master
| 2023-08-28T03:01:00.055516
| 2021-10-28T05:00:19
| 2021-10-28T05:00:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,300
|
py
|
import sys , os
import cv2
import numpy as np
from pytesseract import image_to_string
import re
class PanCardExtractor():
def __init__(self) -> None:
self.kernal = np.ones((2,2),np.uint8)
def Identifier(self,data):
"""
If distance of y between income tax deparment vs pan number is higher than 250 than old
variant else New variant
for x1[1][1] - x[1][1] >=250:
old
else:
new
"""
IncomeTaxIdentityList = ["INCOME TAX" ,"TAX","INCOME"]
PanCardIdentityList = ["Permanent", "Account" ,"Number"]
IncomeLine = 0
PanCard = 0
for i in range(len(data)):
for items in IncomeTaxIdentityList:
if re.findall(items , data[i][0]):
IncomeLine = data[i]
break
for items in PanCardIdentityList:
if re.findall(items , data[i][0]):
PanCard = data[i]
break
if PanCard[1][1] - IncomeLine[1][1] > 250:
return 2
else:
return 1
def basicTransform(self,img):
_, mask = cv2.threshold(img,80,255,cv2.THRESH_BINARY_INV)
img = cv2.bitwise_not(mask)
return img
def panExtract(self,image):
panColor = cv2.imread(image)
panColor = cv2.resize(panColor,(1200,743))
adjusted = cv2.convertScaleAbs(panColor, alpha=1.5, beta=0)
panImage = cv2.imread(image,0)
meanImg = panImage.mean()
#panImage = panImage / meanImg
print("panImage",panImage.shape)
panImage = cv2.resize(panImage,(1200,743))
_, mask = cv2.threshold(panImage,90,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY_INV)
dst = cv2.dilate(mask,self.kernal,iterations = 1)
dst = cv2.bitwise_not(dst)
kernel_ = cv2.getStructuringElement(cv2.MORPH_RECT,(31,5))
clossing = cv2.morphologyEx((255-dst),cv2.MORPH_CLOSE,kernel_)
contours , hierarchy = cv2.findContours(clossing,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_NONE)
allBoxes = []
typeIDList = []
for cnt , high in zip(contours,hierarchy[0]):
x,y,w,h = cv2.boundingRect(cnt)
if h > 20 and w >30 and x <550:
cv2.rectangle(panColor,(x,y),(x+w,y+h),(0,255,100),3)
cells = adjusted[y-5:y+h,x:x+w]
gray = cv2.cvtColor(cells,cv2.COLOR_BGR2GRAY)
data = image_to_string(cells,config='--psm 7')
allBoxes.append([data,[x,y,x+w,y+h]])
cv2.imshow("Binary",cv2.resize(panColor,(600,375)))
cv2.waitKey(0)
cv2.destroyAllWindows()
allBoxes.reverse()
return allBoxes
def run(self,Image):
HOCR = self.panExtract(Image)
#print("Output:",HOCR)
typeId = self.Identifier(HOCR)
print("pan type" , typeId)
if len(HOCR) >2:
if typeId == 2:
output = self.ExtractionType2(HOCR)
elif typeId == 1:
output = self.ExtractionType1(HOCR)
#print("Pan EXtract",output)
return output
else:
return " "
def ExtractionType2(self,data):
output = {}
IncomeTaxIdentityList = ["INCOME TAX" ,"TAX","INCOME"]
PanCardIdentityList = ["Permanent", "Account" ,"Number"]
IncomeLine = 0
PanCard = 0
for i in range(len(data)):
#print("items :",i)
for items in PanCardIdentityList:
if re.findall(items , data[i][0]):
PanCard = data[i]
output["PAN"] = re.sub(r'[^\w\s]','',re.sub('\n\x0c', '', data[i+1][0]))
break
for items in IncomeTaxIdentityList:
if re.findall(items , data[i][0]):
#print("ID name",data[i])
IncomeLine = data[i]
#print("Name:",data[i+1])
output["Name"] = re.sub(r'[^\w\s]','',re.sub('\n\x0c', '', data[i+1][0]))
#print("Fathers Name",data[i+2])
output["Fathers Name"] = re.sub(r'[^\w\s]','',re.sub('\n\x0c', '', data[i+2][0]))
#print("Date ",data[i+3])
output["Date"] = re.sub('\n\x0c', '', data[i+3][0])
break
return output
def ExtractionType1(self,data):
output = {}
IncomeTaxIdentityList = ["INCOME TAX" ,"TAX","INCOME"]
PanCardIdentityList = ["Permanent", "Account" ,"Number"]
DateList = ["Date of Birth","Date","Birth"]
IncomeLine = 0
PanCard = 0
for i in range(len(data)):
#print("items :",i)
for items in PanCardIdentityList:
if re.findall(items , data[i][0]):
PanCard = re.sub('\n\x0c', '', data[i][0])
output["PAN"] = re.sub(r'[^\w\s]','',re.sub('\n\x0c', '', data[i+1][0]))
#print("PAN",data[i][0],data[i+1][0])
#print("Name:",data[i+3])
output["Name"] = re.sub(r'[^\w\s]','', re.sub('\n\x0c', '', data[i+3][0]))
#print("Fathers Name",data[i+5])
output["Fathers Name"] = re.sub(r'[^\w\s]','',re.sub('\n\x0c', '', data[i+5][0]))
output["Data"] = re.sub('\n\x0c', '', data[i+8][0])
break
return output
class AadharExtraction():
def __init__(self) -> None:
self.kernal = np.ones((2,2),np.uint8)
def AadharExtract(self,image):
y , x = 1200 , 749
panColor = cv2.imread(image)
panColor = cv2.resize(panColor,(y,x))
#adjusted = cv2.convertScaleAbs(panColor, alpha=1.0, beta=0)
panImage = cv2.cvtColor(panColor,cv2.COLOR_BGR2GRAY)
#panImage = panImage / meanImg
print("panImage",panImage.shape)
thresh1 = cv2.adaptiveThreshold(panImage, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 199, 10)
# cv2.imshow("dst",cv2.resize(thresh1,(600,375)))
# cv2.waitKey(0)
# cv2.destroyAllWindows()
kernel_ = cv2.getStructuringElement(cv2.MORPH_RECT,(23,1))
clossing = cv2.morphologyEx(thresh1,cv2.MORPH_OPEN,kernel_)
#clossing[clossing<140] = 0
contours , hierarchy = cv2.findContours(clossing,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_NONE)
allBoxes = []
typeIDList = []
for cnt , high in zip(contours,hierarchy[0]):
x,y,w,h = cv2.boundingRect(cnt)
if h > 20 and w >30 and h <60:
cv2.rectangle(panColor,(x,y),(x+w,y+h),(0,255,100),3)
cells = panColor[y:y+h,x:x+w]
data = image_to_string(cells,config='--psm 7')
allBoxes.append([re.sub("\n\x0c","",data),[x,y,x+w,y+h]])
cv2.imshow("Binary",cv2.resize(panColor,(600,375)))
cv2.waitKey(0)
cv2.destroyAllWindows()
allBoxes.reverse()
return allBoxes
def run(self,image):
data = self.AadharExtract(image)
output = {}
HOCR = {}
output["Aadhar number"] = ""
IDIdentityList = ["Government of India" ,"Government","India"]
GenderIdentityList = ["male", "female" ,"transgender"]
DateList = ["Birth","Year","YoB"]
FatherList = ["Father"]
IncomeLine = 0
PanCard = 0
for i in range(len(data)):
print("items :",data[i][0])
for items in IDIdentityList:
if re.findall(items.lower() , data[i][0].lower()):
output["Name"] = data[i+2][0]
HOCR["Name"] = data[i+2][1]
#print("Fathers Name",data[i+5])
break
for items in GenderIdentityList:
if re.findall(items.lower() , data[i][0].lower()):
try:
gender = data[i][0].split("/")[-1]
except:
gender = data[i][0]
output["gender"] = gender
HOCR["gender"] = items
#print("Fathers Name",data[i+5])
break
for items in DateList:
if re.findall(items.lower() , data[i][0].lower()):
print("date",data[i][0])
date = "".join([inte for inte in data[i][0].split() if inte.isdigit()])
output["date"] = date
HOCR["date"] = data[i][1]
#print("Fathers Name",data[i+5])
break
if re.sub(" ", "",data[i][0]).isdigit():
print("numbers",data[i][0])
output["Aadhar number"] += data[i][0]
return output
class PassportExtractor():
def __init__(self) -> None:
pass
def panExtract(self,):
pass
class IDextract():
def __init__(self) -> None:
pass
def Application(self,Image):
image = cv2.imread(Image)
data = image_to_string(panImage)
if __name__ == "__main__":
pan = PanCardExtractor()
aadhar = AadharExtraction()
outPuts = pan.run(sys.argv[1])
print("output :",outPuts)
|
[
"parikhshyamal1993@gmail.com"
] |
parikhshyamal1993@gmail.com
|
ee14d431bef47b8ff12fcd0f4d706dcb62c076c4
|
bb5323dc6ac6e3fded6eab25b9a4a4088065b276
|
/docker/generate_production_ini.py
|
84c8176b73de6acb39771578c425b81938125cd9
|
[
"BSD-2-Clause"
] |
permissive
|
anderson-attilio/publication_stats
|
57f3a6c379ec5442205488727ca0bb29fb6a6b79
|
49e3839b930da005cc24406f83c068d104a5ac9d
|
refs/heads/master
| 2021-01-12T14:32:41.674395
| 2016-09-12T16:52:37
| 2016-09-12T16:52:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 715
|
py
|
# coding: utf-8
import os
from configparser import ConfigParser
PROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
production_template_ini_filepath = os.path.join(PROJECT_PATH + '/production.ini-TEMPLATE')
new_production_ini_filepath = os.path.join(PROJECT_PATH + '/production.ini')
config = ConfigParser()
config.read_file(open(production_template_ini_filepath))
config.set('app:main', 'elasticsearch', os.environ.get('ELASTICSEARCH', '127.0.0.1:9200'))
config.set('app:main', 'articlemeta', os.environ.get('ARTICLEMETA', 'articlemeta.scielo.org:11720'))
config.set('server:main', 'port', '8000')
with open(new_production_ini_filepath, 'w') as configfile:
config.write(configfile)
|
[
"fabiobatalha@gmail.com"
] |
fabiobatalha@gmail.com
|
ba71d321961cc6df103683482bba3c8c3883a4b2
|
b5904720e5267e242c911a4a0ba1d4be66e0a2f9
|
/robots/oat_viper_robot/adept_s650_morse/morse/simple_simulation/viper_with_gripper.py
|
e003bfb8665b07812bbdd10e502282781eb4a321
|
[
"BSD-2-Clause"
] |
permissive
|
dgerod/robots-in-morse-using-ros
|
ccd2416a13221decfae50c7e00d855947cf802b9
|
2fcb057f345d40cb948fa913cf226f17fd19eb9b
|
refs/heads/master
| 2021-01-22T05:20:11.366350
| 2020-08-30T11:24:08
| 2020-08-30T11:24:08
| 81,646,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,565
|
py
|
from morse_helpers import initialize
initialize(__file__)
import math
from morse_helpers.settings import SimulationLocalSettings
from morse_helpers.storage import FileStorage
from morse_helpers.adapters import ROSRegister
from morse.builder import Clock
from morse.builder import Environment, PassiveObject, FakeRobot
from simple_simulation.builder.ort_viper_s650 import create_robot
def _create_simulation_controller():
# A simulation controller to which attach all extra sensors used
# to manage the scene
simulation_controller = FakeRobot("simulation_controller")
simulation_controller.translate(z=3)
# Clock to synchronize ROS with MORSE execution
clock = Clock()
simulation_controller.append(clock)
ROSRegister.add_topic(clock, '/clock')
def _prepare_environment(robot):
file_storage = FileStorage()
table = PassiveObject(file_storage.find('furnitures.blend'), 'basic_desk')
table.translate(x=0.25)
table.rotate(z=-math.pi / 2)
box = PassiveObject(file_storage.find('objects.blend'), 'BlackBox')
box.name = 'BlackBox'
box.properties(Type='box', Label=box.name, Graspable=True)
box.translate(x=0.50, y=0, z=0.75)
robot.translate(x=0.0, y=-0.0, z=0.75)
env = Environment(file_storage.find("empty_world.blend"))
env.set_camera_location([1.0, -1.0, 2.5])
env.show_framerate(True)
def start_simulation():
SimulationLocalSettings().show_info()
_create_simulation_controller()
robot = create_robot()
_prepare_environment(robot.base)
start_simulation()
|
[
"dgerod@xyz-lab.org.es"
] |
dgerod@xyz-lab.org.es
|
43156a318809c4d33364e140432059e1ef4c9d8c
|
27ab47f633d38eaa003fa604f2210dd9ef4a9a1d
|
/MTG_Card_Identifier/dir_functions.py
|
5bb94fa129ff9dc3b314247978c4417505f5cede
|
[
"MIT"
] |
permissive
|
sschatz1997/Card_Identifier
|
17270c91285d99aa283ec9e72cf72bf830d9b800
|
82002bbfbd17aa14fdfc48c9f88956f433795cb3
|
refs/heads/main
| 2023-02-19T03:30:12.135140
| 2021-01-20T23:42:17
| 2021-01-20T23:42:17
| 331,459,443
| 0
| 0
|
MIT
| 2021-01-20T23:42:18
| 2021-01-20T23:25:08
|
Python
|
UTF-8
|
Python
| false
| false
| 5,319
|
py
|
# dir_functions.py
import os
import csv
import sys
import config
#from os import path
# make sure the file type is acceptable
def check_file_type(f1):
# good file types for imgs
good_file_types = ['jpg','jpeg','png']
file_type = os.path.splitext(f1)[1][1:]
if file_type in good_file_types:
return True
elif os.path.isdir(f1) == True:
return "Can not read from sub directories!"
else:
return ".{} is not acceptable image type!".format(file_type)
# function to check if a file exists
def check_exists(f1):
if os.path.isfile(f1) == True:
return True
elif os.path.isdir(f1) == True:
return "Path returned not file!"
else:
return False
"""
check to see wether the script needs to add paths to dir
"""
def check_target_dir(path1): #, file1):
# first check if path is in the dir of the script
script_path = os.path.dirname(os.path.realpath(__file__))
# create a test path by adding main and separator
tp = config.main_path + path1
if script_path == path1:
return script_path
elif os.path.isdir(tp) == True:
return tp + config.separator
else:
return path1 + config.separator
"""
batch dir checker function
-- takes in a dir, and list of all file
-- loops through the list checks the file
-- returns the good ones and tells the user which ones cant be searched for
"""
def dir_checker(mainP, path1):
# turn into a managable path test || maybe add this back later
#manageable_path = config.main_path + uploaded_path + config.separator + path1
good_files = [] # a good file list
bad_files = [] # bad file list
total_files = len(path1) # number of total file
bad_files_c = 0
target_path = check_target_dir(mainP)
for p in path1:
# temp var
tv = target_path + p
# check if exists
if check_exists(tv) == True:
# check the file type
if check_file_type(tv) == True:
good_files.append(tv)
else:
bad_files.append(tv)
bad_files_c += 1
else:
bad_files.append(tv)
bad_files_c += 1
for b in bad_files:
print('File {} can not be checked'.format(b))
return total_files, good_files
# just returns a basic csv file
def in_csv(file1):
data = []
with open(file1, 'r', newline='') as f:
r1 = csv.reader(f)
for row in r1:
data.append(row[-1])
f.close()
# delete the first row for the header
del data[0]
return data
# this is for a batch file that contains path links
def read_batch_txt_path(file1):
# check if file is real
#print(os.path.splitext(file1)[1][1:])
#print(os.path.isfile(file1))
if os.path.isfile(file1) == True:
file1 = open(file1, 'r')
Lines = file1.readlines()
good_files = []
bad_files = []
bad_files_c = 0
total_files = len(Lines)
for l in Lines:
# temp var
tv = l.rstrip('\n')
if check_exists(tv) == True:
# check the file type
if check_file_type(tv) == True:
good_files.append(tv)
else:
bad_files.append(tv)
bad_files_c += 1
else:
bad_files.append(tv)
bad_files_c += 1
else:
print('File does not exist!')
del Lines
for b in bad_files:
print('File {} can not be checked'.format(b))
return total_files, good_files
# this is for a batch file that contains url links
def read_batch_txt_url(file1):
from functions import check_url
# check if link file is real
#print(os.path.splitext(file1)[1][1:])
#print(os.path.isfile(file1))
if os.path.isfile(file1) == True:
file1 = open(file1, 'r')
Lines = file1.readlines()
good_files = []
bad_files = []
bad_files_c = 0
total_files = len(Lines)
for l in Lines:
# temp var
tv = l.rstrip('\n')
# check if link is good
if int(check_url(tv)) == 200: # its gonna check it twice
good_files.append(tv)
elif int(check_url(tv)) != 200:
bad_files.append(tv)
bad_files_c += 1
else:
print('File does not exist!')
del Lines
for b in bad_files:
print('File {} can not be checked'.format(b))
return total_files, good_files
# this is for a batch file that contains path links
def read_batch_csv_path(file1):
if os.path.isfile(file1) == True:
# read in data from csv
Lines = in_csv(file1)
good_files = []
bad_files = []
bad_files_c = 0
total_files = len(Lines)
for l in Lines:
# temp var
tv = l.rstrip('\n')
#print(check_exists(tv))
if check_exists(tv) == True:
# check the file type
if check_file_type(tv) == True:
good_files.append(tv)
else:
bad_files.append(tv)
bad_files_c += 1
else:
bad_files.append(tv)
bad_files_c += 1
else:
print('File does not exist!')
del Lines
for b in bad_files:
print('File {} can not be checked'.format(b))
return total_files, good_files
# this is for a batch file that contains url links
def read_batch_csv_url(file1):
from functions import check_url
if os.path.isfile(file1) == True:
# read in data from csv
Lines = in_csv(file1)
good_files = []
bad_files = []
bad_files_c = 0
total_files = len(Lines)
for l in Lines:
# temp var
tv = l.rstrip('\n')
# check if link is good
if int(check_url(tv)) == 200: # its gonna check it twice
good_files.append(tv)
elif int(check_url(tv)) != 200:
bad_files.append(tv)
bad_files_c += 1
else:
print('File does not exist!')
del Lines
for b in bad_files:
print('File {} can not be checked'.format(b))
return total_files, good_files
|
[
"s.schatz1997@gmail.com"
] |
s.schatz1997@gmail.com
|
2f65a4610e95d10f920a9d13cbb6f5c538a57fb6
|
159d4ae61f4ca91d94e29e769697ff46d11ae4a4
|
/venv/lib/python3.9/site-packages/pygments/styles/autumn.py
|
85fd8982a9d5a68b34291612f4711f7f46773562
|
[
"MIT"
] |
permissive
|
davidycliao/bisCrawler
|
729db002afe10ae405306b9eed45b782e68eace8
|
f42281f35b866b52e5860b6a062790ae8147a4a4
|
refs/heads/main
| 2023-05-24T00:41:50.224279
| 2023-01-22T23:17:51
| 2023-01-22T23:17:51
| 411,470,732
| 8
| 0
|
MIT
| 2023-02-09T16:28:24
| 2021-09-28T23:48:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,120
|
py
|
"""
pygments.styles.autumn
~~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class AutumnStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
default_style = ""
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #aaaaaa',
Comment.Preproc: 'noitalic #4c8317',
Comment.Special: 'italic #0000aa',
Keyword: '#0000aa',
Keyword.Type: '#00aaaa',
Operator.Word: '#0000aa',
Name.Builtin: '#00aaaa',
Name.Function: '#00aa00',
Name.Class: 'underline #00aa00',
Name.Namespace: 'underline #00aaaa',
Name.Variable: '#aa0000',
Name.Constant: '#aa0000',
Name.Entity: 'bold #800',
Name.Attribute: '#1e90ff',
Name.Tag: 'bold #1e90ff',
Name.Decorator: '#888888',
String: '#aa5500',
String.Symbol: '#0000aa',
String.Regex: '#009999',
Number: '#009999',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
|
[
"davidycliao@gmail.com"
] |
davidycliao@gmail.com
|
61d61023aa1ef3018c9190d8a52786b0dd5580cf
|
bf5d8cfa2ad9038130671187be3403f5b861d6ab
|
/cloud_web/sdr/apps.py
|
196b3822a5617e119112040f6fb4b9aaab7d9cde
|
[] |
no_license
|
lihao2333/rtlsdr
|
85b9503dc87770444b931b74c59bb66fdd27718a
|
c7a53687be01302945a58ab28f6731d3ab514a34
|
refs/heads/master
| 2021-05-04T20:32:16.489074
| 2018-04-08T06:12:44
| 2018-04-08T06:12:44
| 119,818,073
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
from django.apps import AppConfig
class SdrConfig(AppConfig):
name = 'sdr'
|
[
"lihao3016@gmail.com"
] |
lihao3016@gmail.com
|
9dd347b7212355ba4fa6a90d0497a8e5017cd3a0
|
ebf572010eaa11039709617c08c3723ebd40cf73
|
/models/__init__.py
|
5f267eaa4ae86e75f544dc992c86be506cebe9d5
|
[] |
no_license
|
daltonjoe/viseducat
|
341c8516fd20f6edf63e46ac4548f5b6b77d3f30
|
8b8e335a3c07fcbc4cf6df1c0dde6862a30b99c2
|
refs/heads/master
| 2023-03-01T16:22:42.811356
| 2021-02-07T14:12:28
| 2021-02-07T14:12:28
| 326,630,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
# -*- coding: utf-8 -*-
from . import library
from . import media
from . import media_unit
from . import publisher
from . import author
from . import media_type
from . import media_purchase
from . import media_queue
from . import media_movement
from . import tag
|
[
"talha_guzel1907@hotmail.com"
] |
talha_guzel1907@hotmail.com
|
2b697ad5a4e9a797fb564afb3d7afdb619c7eb61
|
6841db425d9b7d3b634b21fa2aeae5720e97b245
|
/happy.py
|
4e36172cbc66947f7d9dd3e1c7d6d4e3611de7bb
|
[] |
no_license
|
aakash003/zemoso
|
f1335387da6c10c82220e6f9e43f4650fa841cb7
|
e817b0c03dd8855c24db8624c1e31ef9d2d82a28
|
refs/heads/master
| 2021-05-05T19:52:41.989751
| 2018-02-11T15:30:08
| 2018-02-11T15:30:08
| 117,860,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36
|
py
|
if happy>2:
print("hello world")
|
[
"kumar.aakash10@gmail.com"
] |
kumar.aakash10@gmail.com
|
4ed6dd63e8f31c3e1e031534668ecf36ab9eef8b
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p2DJ/New/program/qiskit/simulator/startQiskit318.py
|
21cd9ab92fda16ef64bd2127105c730612c79e4c
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,278
|
py
|
# qubit number=2
# total number=18
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=6
prog.cz(input_qubit[0],input_qubit[1]) # number=7
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=8
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.cx(input_qubit[0],input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.h(input_qubit[0]) # number=15
prog.cz(input_qubit[1],input_qubit[0]) # number=16
prog.h(input_qubit[0]) # number=17
prog.x(input_qubit[0]) # number=13
prog.cx(input_qubit[1],input_qubit[0]) # number=14
prog.x(input_qubit[0]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit318.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
37d72fb2b0e493347ee626807598f8ad7459edb9
|
5942892f1a625370a048bc98d71b105368b8a155
|
/QandA_app/asgi.py
|
d89cf73b327877547b7f48e68cd1d6778ad62b37
|
[] |
no_license
|
Amansingh1202/QandA_app
|
fa533eb534bff2c519650ea020318ab50874dfee
|
ab0304304e7d7ac67b6bddbe79bc5b7f15bbcc74
|
refs/heads/main
| 2023-02-04T19:49:30.912014
| 2020-12-18T12:37:51
| 2020-12-18T12:37:51
| 303,122,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
ASGI config for QandA_app project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'QandA_app.settings')
application = get_asgi_application()
|
[
"aks001235@gmail.com"
] |
aks001235@gmail.com
|
d6cdf502d5552bc6727b119278719928dd220fd8
|
595dea2f5b5fec82415fee50ae524055a7258fef
|
/intercom/contact.py
|
1773ca503b9a6536ac05e5a1a3903d1b3fba900d
|
[
"MIT"
] |
permissive
|
rodolfofiuza/python-intercom
|
d7dcfc52136a9ae1abf814f3cc812607b9f31e08
|
2eea2a69e26d71a54ead96ae6e3c03f9ebebc873
|
refs/heads/master
| 2021-01-11T19:27:44.571901
| 2016-01-27T14:46:51
| 2016-01-27T14:46:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
# -*- coding: utf-8 -*-
from intercom.api_operations.save import Save
from intercom.api_operations.find import Find
from intercom.traits.api_resource import Resource
class Contact(Resource, Save, Find):
pass
|
[
"cesaraugusto@pingobox.com.br"
] |
cesaraugusto@pingobox.com.br
|
9f7aa71948b32baed70813dd38e5623a2815e93c
|
13ddb146cae9dd14e9a482c2ae98e3cc5900100b
|
/king_phisher/client/gui_utilities.py
|
04c241dfbbdb5721fb40e3753c6d0ea65f645f5b
|
[
"BSD-3-Clause"
] |
permissive
|
alleznei/king-phisher
|
a8ae4d83b08264f658e265b4d49c2ff96ba31248
|
6bab024ed81114295d3adbd210194e8e4508e69d
|
refs/heads/master
| 2022-12-22T08:00:44.677706
| 2018-02-20T20:30:34
| 2018-02-20T20:30:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,948
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/gui_utilities.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import contextlib
import copy
import datetime
import functools
import logging
import os
import socket
import threading
from king_phisher import find
from king_phisher import utilities
from gi.repository import Gdk
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import GtkSource
GObject.type_register(GtkSource.View)
GOBJECT_PROPERTY_MAP = {
'calendar': None, # delayed definition
'checkbutton': 'active',
'combobox': (
lambda c, v: c.set_active_iter(gtk_list_store_search(c.get_model(), v)),
lambda c: c.get_model().get_value(c.get_active_iter() or c.get_model().get_iter_first(), 0)
),
'entry': 'text',
'spinbutton': 'value',
'switch': 'active',
'textview': (
lambda t, v: t.get_buffer().set_text(v),
lambda t: t.get_buffer().get_text(t.get_buffer().get_start_iter(), t.get_buffer().get_end_iter(), False)
)
}
"""
The dictionary which maps GObjects to either the names of properties to
store text or a tuple which contains a set and get function. If a tuple
of two functions is specified the set function will be provided two
parameters, the object and the value and the get function will just be
provided the object.
"""
# official python3 work-around per https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons
_cmp = lambda i1, i2: (i1 > i2) - (i1 < i2)
def which_glade():
"""
Locate the glade data file which stores the UI information in a Gtk Builder
format.
:return: The path to the glade data file.
:rtype: str
"""
return find.data_file(os.environ.get('KING_PHISHER_GLADE_FILE', 'king-phisher-client.ui'))
def glib_idle_add_once(function, *args, **kwargs):
"""
Execute *function* in the main GTK loop using :py:func:`GLib.idle_add`
one time. This is useful for threads that need to update GUI data.
:param function function: The function to call.
:param args: The positional arguments to *function*.
:param kwargs: The key word arguments to *function*.
:return: The result of the function call.
"""
@functools.wraps(function)
def wrapper():
function(*args, **kwargs)
return False
return GLib.idle_add(wrapper)
def glib_idle_add_wait(function, *args, **kwargs):
"""
Execute *function* in the main GTK loop using :py:func:`GLib.idle_add`
and block until it has completed. This is useful for threads that need
to update GUI data.
:param function function: The function to call.
:param args: The positional arguments to *function*.
:param kwargs: The key word arguments to *function*.
:return: The result of the function call.
"""
gsource_completed = threading.Event()
results = []
@functools.wraps(function)
def wrapper():
results.append(function(*args, **kwargs))
gsource_completed.set()
return False
GLib.idle_add(wrapper)
gsource_completed.wait()
return results.pop()
def gobject_get_value(gobject, gtype=None):
"""
Retrieve the value of a GObject widget. Only objects with corresponding
entries present in the :py:data:`.GOBJECT_PROPERTY_MAP` can be processed by
this function.
:param gobject: The object to retrieve the value for.
:type gobject: :py:class:`GObject.Object`
:param str gtype: An explicit type to treat *gobject* as.
:return: The value of *gobject*.
:rtype: str
"""
gtype = (gtype or gobject.__class__.__name__)
gtype = gtype.lower()
if isinstance(GOBJECT_PROPERTY_MAP[gtype], (list, tuple)):
try:
value = GOBJECT_PROPERTY_MAP[gtype][1](gobject)
except AttributeError:
return None
else:
value = gobject.get_property(GOBJECT_PROPERTY_MAP[gtype])
return value
def gobject_set_value(gobject, value, gtype=None):
"""
Set the value of a GObject widget. Only objects with corresponding entries
present in the :py:data:`.GOBJECT_PROPERTY_MAP` can be processed by this
function.
:param gobject: The object to set the value for.
:type gobject: :py:class:`GObject.Object`
:param value: The value to set for the object.
:param str gtype: An explicit type to treat *gobject* as.
"""
gtype = (gtype or gobject.__class__.__name__)
gtype = gtype.lower()
if gtype not in GOBJECT_PROPERTY_MAP:
raise ValueError('unsupported gtype: ' + gtype)
if isinstance(GOBJECT_PROPERTY_MAP[gtype], (list, tuple)):
GOBJECT_PROPERTY_MAP[gtype][0](gobject, value)
else:
gobject.set_property(GOBJECT_PROPERTY_MAP[gtype], value)
@contextlib.contextmanager
def gobject_signal_blocked(gobject, signal_name):
"""
This is a context manager that can be used with the 'with' statement
to execute a block of code while *signal_name* is blocked.
:param gobject: The object to block the signal on.
:type gobject: :py:class:`GObject.Object`
:param str signal_name: The name of the signal to block.
"""
signal_id = GObject.signal_lookup(signal_name, gobject.__class__)
handler_id = GObject.signal_handler_find(gobject, GObject.SignalMatchType.ID, signal_id, 0, None, 0, 0)
GObject.signal_handler_block(gobject, handler_id)
yield
GObject.signal_handler_unblock(gobject, handler_id)
def gobject_signal_accumulator(test=None):
"""
Create an accumulator function for use with GObject signals. All return
values will be collected and returned in a list. If provided, *test* is a
callback that will be called with two arguments, the return value from the
handler and the list of accumulated return values.
.. code-block:: python
stop = test(retval, accumulated)
:param test: A callback to test whether additional handler should be executed.
"""
if test is None:
test = lambda retval, accumulated: True
def _accumulator(_, accumulated, retval):
if accumulated is None:
accumulated = []
stop = test(retval, accumulated)
accumulated.append(retval)
return (stop, accumulated)
return _accumulator
def gtk_calendar_get_pydate(calendar):
"""
Get the Python date from a :py:class:`Gtk.Calendar` instance.
:param calendar: The calendar to get the date from.
:type calendar: :py:class:`Gtk.Calendar`
:return: The date as returned by the calendar's :py:meth:`~Gtk.Calendar.get_date` method.
:rtype: :py:class:`datetime.date`
"""
if not isinstance(calendar, Gtk.Calendar):
raise ValueError('calendar must be a Gtk.Calendar instance')
calendar_day = calendar.get_date()
return datetime.date(calendar_day[0], calendar_day[1] + 1, calendar_day[2])
def gtk_calendar_set_pydate(calendar, pydate):
"""
Set the date on a :py:class:`Gtk.Calendar` instance from a Python
:py:class:`datetime.date` object.
:param calendar: The calendar to set the date for.
:type calendar: :py:class:`Gtk.Calendar`
:param pydate: The date to set on the calendar.
:type pydate: :py:class:`datetime.date`
"""
calendar.select_month(pydate.month - 1, pydate.year)
calendar.select_day(pydate.day)
GOBJECT_PROPERTY_MAP['calendar'] = (
gtk_calendar_set_pydate,
gtk_calendar_get_pydate
)
def gtk_list_store_search(list_store, value, column=0):
"""
Search a :py:class:`Gtk.ListStore` for a value and return a
:py:class:`Gtk.TreeIter` to the first match.
:param list_store: The list store to search.
:type list_store: :py:class:`Gtk.ListStore`
:param value: The value to search for.
:param int column: The column in the row to check.
:return: The row on which the value was found.
:rtype: :py:class:`Gtk.TreeIter`
"""
for row in list_store:
if row[column] == value:
return row.iter
return None
def gtk_menu_get_item_by_label(menu, label):
"""
Retrieve a menu item from a menu by it's label. If more than one items share
the same label, only the first is returned.
:param menu: The menu to search for the item in.
:type menu: :py:class:`Gtk.Menu`
:param str label: The label to search for in *menu*.
:return: The identified menu item if it could be found, otherwise None is returned.
:rtype: :py:class:`Gtk.MenuItem`
"""
for item in menu:
if item.get_label() == label:
return item
def gtk_menu_insert_by_path(menu, menu_path, menu_item):
"""
Add a new menu item into the existing menu at the path specified in
*menu_path*.
:param menu: The existing menu to add the new item to.
:type menu: :py:class:`Gtk.Menu` :py:class:`Gtk.MenuBar`
:param list menu_path: The labels of submenus to traverse to insert the new item.
:param menu_item: The new menu item to insert.
:type menu_item: :py:class:`Gtk.MenuItem`
"""
utilities.assert_arg_type(menu, (Gtk.Menu, Gtk.MenuBar), 1)
utilities.assert_arg_type(menu_path, list, 2)
utilities.assert_arg_type(menu_item, Gtk.MenuItem, 3)
while len(menu_path):
label = menu_path.pop(0)
menu_cursor = gtk_menu_get_item_by_label(menu, label)
if menu_cursor is None:
raise ValueError('missing node labeled: ' + label)
menu = menu_cursor.get_submenu()
menu.append(menu_item)
def gtk_menu_position(event, *args):
"""
Create a menu at the given location for an event. This function is meant to
be used as the *func* parameter for the :py:meth:`Gtk.Menu.popup` method.
The *event* object must be passed in as the first parameter, which can be
accomplished using :py:func:`functools.partial`.
:param event: The event to retrieve the coordinates for.
"""
if not hasattr(event, 'get_root_coords'):
raise TypeError('event object has no get_root_coords method')
coords = event.get_root_coords()
return (coords[0], coords[1], True)
def gtk_style_context_get_color(sc, color_name, default=None):
"""
Look up a color by it's name in the :py:class:`Gtk.StyleContext` specified
in *sc*, and return it as an :py:class:`Gdk.RGBA` instance if the color is
defined. If the color is not found, *default* will be returned.
:param sc: The style context to use.
:type sc: :py:class:`Gtk.StyleContext`
:param str color_name: The name of the color to lookup.
:param default: The default color to return if the specified color was not found.
:type default: str, :py:class:`Gdk.RGBA`
:return: The color as an RGBA instance.
:rtype: :py:class:`Gdk.RGBA`
"""
found, color_rgba = sc.lookup_color(color_name)
if found:
return color_rgba
if isinstance(default, str):
color_rgba = Gdk.RGBA()
color_rgba.parse(default)
return color_rgba
elif isinstance(default, Gdk.RGBA):
return default
return
def gtk_sync():
"""Wait while all pending GTK events are processed."""
while Gtk.events_pending():
Gtk.main_iteration()
def gtk_treesortable_sort_func_numeric(model, iter1, iter2, column_id):
"""
Sort the model by comparing text numeric values with place holders such as
1,337. This is meant to be set as a sorting function using
:py:meth:`Gtk.TreeSortable.set_sort_func`. The user_data parameter must be
the column id which contains the numeric values to be sorted.
:param model: The model that is being sorted.
:type model: :py:class:`Gtk.TreeSortable`
:param iter1: The iterator of the first item to compare.
:type iter1: :py:class:`Gtk.TreeIter`
:param iter2: The iterator of the second item to compare.
:type iter2: :py:class:`Gtk.TreeIter`
:param column_id: The ID of the column containing numeric values.
:return: An integer, -1 if item1 should come before item2, 0 if they are the same and 1 if item1 should come after item2.
:rtype: int
"""
column_id = column_id or 0
item1 = model.get_value(iter1, column_id).replace(',', '')
item2 = model.get_value(iter2, column_id).replace(',', '')
if item1.isdigit() and item2.isdigit():
return _cmp(int(item1), int(item2))
if item1.isdigit():
return -1
elif item2.isdigit():
return 1
item1 = model.get_value(iter1, column_id)
item2 = model.get_value(iter2, column_id)
return _cmp(item1, item2)
def gtk_treeview_selection_iterate(treeview):
"""
Iterate over the a treeview's selected rows.
:param treeview: The treeview for which to iterate over.
:type treeview: :py:class:`Gtk.TreeView`
:return: The rows which are selected within the treeview.
:rtype: :py:class:`Gtk.TreeIter`
"""
selection = treeview.get_selection()
(model, tree_paths) = selection.get_selected_rows()
if not tree_paths:
return
for tree_path in tree_paths:
yield model.get_iter(tree_path)
def gtk_treeview_selection_to_clipboard(treeview, columns=0):
"""
Copy the currently selected values from the specified columns in the
treeview to the users clipboard. If no value is selected in the treeview,
then the clipboard is left unmodified. If multiple values are selected, they
will all be placed in the clipboard on separate lines.
:param treeview: The treeview instance to get the selection from.
:type treeview: :py:class:`Gtk.TreeView`
:param column: The column numbers to retrieve the value for.
:type column: int, list, tuple
"""
treeview_selection = treeview.get_selection()
(model, tree_paths) = treeview_selection.get_selected_rows()
if not tree_paths:
return
if isinstance(columns, int):
columns = (columns,)
tree_iters = map(model.get_iter, tree_paths)
selection_lines = []
for ti in tree_iters:
values = (model.get_value(ti, column) for column in columns)
values = (('' if value is None else str(value)) for value in values)
selection_lines.append(' '.join(values).strip())
selection_lines = os.linesep.join(selection_lines)
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
clipboard.set_text(selection_lines, -1)
def gtk_treeview_get_column_titles(treeview):
"""
Iterate over a GTK TreeView and return a tuple containing the id and title
of each of it's columns.
:param treeview: The treeview instance to retrieve columns from.
:type treeview: :py:class:`Gtk.TreeView`
"""
for column_id, column in enumerate(treeview.get_columns()):
column_name = column.get_title()
yield (column_id, column_name)
def gtk_treeview_set_column_titles(treeview, column_titles, column_offset=0, renderers=None):
"""
Populate the column names of a GTK TreeView and set their sort IDs.
:param treeview: The treeview to set column names for.
:type treeview: :py:class:`Gtk.TreeView`
:param list column_titles: The names of the columns.
:param int column_offset: The offset to start setting column names at.
:param list renderers: A list containing custom renderers to use for each column.
:return: A dict of all the :py:class:`Gtk.TreeViewColumn` objects keyed by their column id.
:rtype: dict
"""
columns = {}
for column_id, column_title in enumerate(column_titles, column_offset):
renderer = renderers[column_id - column_offset] if renderers else Gtk.CellRendererText()
if isinstance(renderer, Gtk.CellRendererToggle):
column = Gtk.TreeViewColumn(column_title, renderer, active=column_id)
else:
column = Gtk.TreeViewColumn(column_title, renderer, text=column_id)
column.set_property('reorderable', True)
column.set_sort_column_id(column_id)
treeview.append_column(column)
columns[column_id] = column
return columns
def gtk_widget_destroy_children(widget):
"""
Destroy all GTK child objects of *widget*.
:param widget: The widget to destroy all the children of.
:type widget: :py:class:`Gtk.Widget`
"""
for child in widget.get_children():
child.destroy()
def show_dialog(message_type, message, parent, secondary_text=None, message_buttons=Gtk.ButtonsType.OK, use_markup=False, secondary_use_markup=False):
"""
Display a dialog and return the response. The response is dependent on
the value of *message_buttons*.
:param message_type: The GTK message type to display.
:type message_type: :py:class:`Gtk.MessageType`
:param str message: The text to display in the dialog.
:param parent: The parent window that the dialog should belong to.
:type parent: :py:class:`Gtk.Window`
:param str secondary_text: Optional subtext for the dialog.
:param message_buttons: The buttons to display in the dialog box.
:type message_buttons: :py:class:`Gtk.ButtonsType`
:param bool use_markup: Whether or not to treat the message text as markup.
:param bool secondary_use_markup: Whether or not to treat the secondary text as markup.
:return: The response of the dialog.
:rtype: int
"""
dialog = Gtk.MessageDialog(parent, Gtk.DialogFlags.DESTROY_WITH_PARENT, message_type, message_buttons)
dialog.set_property('text', message)
dialog.set_property('use-markup', use_markup)
dialog.set_property('secondary-text', secondary_text)
dialog.set_property('secondary-use-markup', secondary_use_markup)
if secondary_use_markup:
signal_label_activate_link = lambda _, uri: utilities.open_uri(uri)
for label in dialog.get_message_area().get_children():
if not isinstance(label, Gtk.Label):
continue
label.connect('activate-link', signal_label_activate_link)
dialog.show_all()
response = dialog.run()
dialog.destroy()
return response
def show_dialog_error(*args, **kwargs):
"""Display an error dialog with :py:func:`.show_dialog`."""
return show_dialog(Gtk.MessageType.ERROR, *args, **kwargs)
def show_dialog_exc_socket_error(error, parent, title=None):
"""
Display an error dialog with details regarding a :py:exc:`socket.error`
exception that has been raised.
:param error: The exception instance that has been raised.
:type error: :py:exc:`socket.error`
:param parent: The parent window that the dialog should belong to.
:type parent: :py:class:`Gtk.Window`
:param title: The title of the error dialog that is displayed.
"""
title = title or 'Connection Error'
if isinstance(error, socket.timeout):
description = 'The connection to the server timed out.'
elif len(error.args) > 1:
error_number, error_message = error.args[:2]
if error_number == 111:
description = 'The server refused the connection.'
else:
description = "Socket error #{0} ({1}).".format((error_number or 'N/A'), error_message)
return show_dialog(Gtk.MessageType.ERROR, title, parent, secondary_text=description)
def show_dialog_info(*args, **kwargs):
"""Display an informational dialog with :py:func:`.show_dialog`."""
return show_dialog(Gtk.MessageType.INFO, *args, **kwargs)
def show_dialog_warning(*args, **kwargs):
"""Display an warning dialog with :py:func:`.show_dialog`."""
return show_dialog(Gtk.MessageType.WARNING, *args, **kwargs)
def show_dialog_yes_no(*args, **kwargs):
"""
Display a dialog which asks a yes or no question with
:py:func:`.show_dialog`.
:return: True if the response is Yes.
:rtype: bool
"""
kwargs['message_buttons'] = Gtk.ButtonsType.YES_NO
return show_dialog(Gtk.MessageType.QUESTION, *args, **kwargs) == Gtk.ResponseType.YES
class GladeDependencies(object):
"""
A class for defining how objects should be loaded from a GTK Builder data
file for use with :py:class:`.GladeGObject`.
"""
__slots__ = ('children', 'top_level', 'name')
def __init__(self, children=None, top_level=None, name=None):
children = children or ()
utilities.assert_arg_type(children, tuple, 1)
self.children = children
"""A tuple of string names or :py:class:`.GladeProxy` instances listing the children widgets to load from the parent."""
self.top_level = top_level
"""A tuple of string names listing additional top level widgets to load such as images."""
self.name = name
"""The string of the name of the top level parent widget to load."""
def __repr__(self):
return "<{0} name='{1}' >".format(self.__class__.__name__, self.name)
class GladeProxyDestination(object):
"""
A class that is used to define how a :py:class:`.GladeProxy` object shall
be loaded into a parent :py:class:`.GladeGObject` instance. This includes
the information such as what container widget in the parent the proxied
widget should be added to and what method should be used. The proxied widget
will be added to the parent by calling
:py:attr:`~.GladeProxyDestination.method` with the proxied widget as the
first argument.
"""
__slots__ = ('widget', 'method', 'args', 'kwargs')
def __init__(self, widget, method, args=None, kwargs=None):
utilities.assert_arg_type(widget, str, 1)
utilities.assert_arg_type(method, str, 2)
self.widget = widget
"""The name of the parent widget for this proxied child."""
self.method = method
"""The method of the parent widget that should be called to add the proxied child."""
self.args = args or ()
"""Arguments to append after the proxied child instance when calling :py:attr:`~.GladeProxyDestination.method`."""
self.kwargs = kwargs or {}
"""Key word arguments to append after the proxied child instance when calling :py:attr:`~.GladeProxyDestination.method`."""
def __repr__(self):
return "<{0} widget='{1}' method='{2}' >".format(self.__class__.__name__, self.widget, self.method)
class GladeProxy(object):
"""
A class that can be used to load another top level widget from the GTK
builder data file in place of a child. This is useful for reusing small
widgets as children in larger ones.
"""
__slots__ = ('destination',)
name = None
"""The string of the name of the top level widget to load."""
children = ()
"""A tuple of string names or :py:class:`.GladeProxy` instances listing the children widgets to load from the top level."""
def __init__(self, destination):
utilities.assert_arg_type(destination, GladeProxyDestination, 1)
self.destination = destination
"""A :py:class:`.GladeProxyDestination` instance describing how this proxied widget should be added to the parent."""
def __repr__(self):
return "<{0} name='{1}' destination={2} >".format(self.__class__.__name__, self.name, repr(self.destination))
class GladeGObjectMeta(type):
"""
A meta class that will update the :py:attr:`.GladeDependencies.name` value
in the :py:attr:`.GladeGObject.dependencies` attribute of instances if no
value is defined.
"""
assigned_name = type('assigned_name', (str,), {})
"""A type subclassed from str that is used to define names which have been automatically assigned by this class."""
def __init__(cls, *args, **kwargs):
dependencies = getattr(cls, 'dependencies', None)
if dependencies is not None:
dependencies = copy.deepcopy(dependencies)
setattr(cls, 'dependencies', dependencies)
if isinstance(dependencies.name, (None.__class__, cls.assigned_name)):
dependencies.name = cls.assigned_name(cls.__name__)
super(GladeGObjectMeta, cls).__init__(*args, **kwargs)
# stylized metaclass definition to be Python 2.7 and 3.x compatible
class GladeGObject(GladeGObjectMeta('_GladeGObject', (object,), {})):
"""
A base object to wrap GTK widgets loaded from Glade data files. This
provides a number of convenience methods for managing the main widget and
child widgets. This class is meant to be subclassed by classes representing
objects from the Glade data file.
"""
dependencies = GladeDependencies()
"""A :py:class:`.GladeDependencies` instance which defines information for loading the widget from the GTK builder data."""
config_prefix = ''
"""A prefix to be used for keys when looking up value in the :py:attr:`~.GladeGObject.config`."""
top_gobject = 'gobject'
"""The name of the attribute to set a reference of the top level GObject to."""
objects_persist = True
"""Whether objects should be automatically loaded from and saved to the configuration."""
def __init__(self, application):
"""
:param application: The parent application for this object.
:type application: :py:class:`Gtk.Application`
"""
utilities.assert_arg_type(application, Gtk.Application, arg_pos=1)
self.config = application.config
"""A reference to the King Phisher client configuration."""
self.application = application
"""The parent :py:class:`Gtk.Application` instance."""
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
builder = Gtk.Builder()
self.gtk_builder = builder
"""A :py:class:`Gtk.Builder` instance used to load Glade data with."""
top_level_dependencies = [gobject.name for gobject in self.dependencies.children if isinstance(gobject, GladeProxy)]
top_level_dependencies.append(self.dependencies.name)
if self.dependencies.top_level is not None:
top_level_dependencies.extend(self.dependencies.top_level)
builder.add_objects_from_file(which_glade(), top_level_dependencies)
builder.connect_signals(self)
gobject = builder.get_object(self.dependencies.name)
setattr(self, self.top_gobject, gobject)
if isinstance(gobject, Gtk.Window):
gobject.set_transient_for(self.application.get_active_window())
self.application.add_reference(self)
if isinstance(gobject, Gtk.ApplicationWindow):
application.add_window(gobject)
if isinstance(gobject, Gtk.Dialog):
gobject.set_modal(True)
self.gobjects = utilities.FreezableDict()
"""A :py:class:`~king_phisher.utilities.FreezableDict` which maps gobjects to their unique GTK Builder id."""
self._load_child_dependencies(self.dependencies)
self.gobjects.freeze()
self._load_child_proxies()
if self.objects_persist:
self.objects_load_from_config()
def _load_child_dependencies(self, dependencies):
for child in dependencies.children:
if isinstance(child, GladeProxy):
self._load_child_dependencies(child)
child = child.destination.widget
gobject = self.gtk_builder_get(child, parent_name=dependencies.name)
# the following five lines ensure that the types match up, this is to enforce clean development
gtype = child.split('_', 1)[0]
if gobject is None:
raise TypeError("gobject {0} could not be found in the glade file".format(child))
elif gobject.__class__.__name__.lower() != gtype:
raise TypeError("gobject {0} is of type {1} expected {2}".format(child, gobject.__class__.__name__, gtype))
self.gobjects[child] = gobject
def _load_child_proxies(self):
for child in self.dependencies.children or []:
if not isinstance(child, GladeProxy):
continue
dest = child.destination
method = getattr(self.gobjects[dest.widget], dest.method)
if method is None:
raise ValueError("gobject {0} does not have method {1}".format(dest.widget, dest.method))
src_widget = self.gtk_builder.get_object(child.name)
self.logger.debug("setting proxied widget {0} via {1}.{2}".format(child.name, dest.widget, dest.method))
method(src_widget, *dest.args, **dest.kwargs)
def destroy(self):
"""Destroy the top-level GObject."""
getattr(self, self.top_gobject).destroy()
@property
def parent(self):
return self.application.get_active_window()
def get_entry_value(self, entry_name):
"""
Get the value of the specified entry then remove leading and trailing
white space and finally determine if the string is empty, in which case
return None.
:param str entry_name: The name of the entry to retrieve text from.
:return: Either the non-empty string or None.
:rtype: None, str
"""
text = self.gobjects['entry_' + entry_name].get_text()
text = text.strip()
if not text:
return None
return text
def gtk_builder_get(self, gobject_id, parent_name=None):
"""
Find the child GObject with name *gobject_id* from the GTK builder.
:param str gobject_id: The object name to look for.
:param str parent_name: The name of the parent object in the builder data file.
:return: The GObject as found by the GTK builder.
:rtype: :py:class:`GObject.Object`
"""
parent_name = parent_name or self.dependencies.name
gtkbuilder_id = "{0}.{1}".format(parent_name, gobject_id)
self.logger.debug('loading GTK builder object with id: ' + gtkbuilder_id)
return self.gtk_builder.get_object(gtkbuilder_id)
def objects_load_from_config(self):
"""
Iterate through :py:attr:`.gobjects` and set the GObject's value
from the corresponding value in the :py:attr:`~.GladeGObject.config`.
"""
for gobject_id, gobject in self.gobjects.items():
if not '_' in gobject_id:
continue
gtype, config_name = gobject_id.split('_', 1)
config_name = self.config_prefix + config_name
if not gtype in GOBJECT_PROPERTY_MAP or not config_name in self.config:
continue
value = self.config[config_name]
if value is None:
continue
if isinstance(GOBJECT_PROPERTY_MAP[gtype], (list, tuple)):
GOBJECT_PROPERTY_MAP[gtype][0](gobject, value)
else:
gobject.set_property(GOBJECT_PROPERTY_MAP[gtype], value)
def objects_save_to_config(self):
for gobject_id, gobject in self.gobjects.items():
if not '_' in gobject_id:
continue
gtype, config_name = gobject_id.split('_', 1)
config_name = self.config_prefix + config_name
if not gtype in GOBJECT_PROPERTY_MAP:
continue
self.config[config_name] = gobject_get_value(gobject, gtype)
class FileMonitor(object):
"""Monitor a file for changes."""
def __init__(self, path, on_changed):
"""
:param str path: The path to monitor for changes.
:param on_changed: The callback function to be called when changes are detected.
:type on_changed: function
"""
self.logger = logging.getLogger('KingPhisher.Utility.FileMonitor')
self.on_changed = on_changed
self.path = path
self._gfile = Gio.file_new_for_path(path)
self._gfile_monitor = self._gfile.monitor(Gio.FileMonitorFlags.NONE, None)
self._gfile_monitor.connect('changed', self.cb_changed)
self.logger.debug('starting file monitor for: ' + path)
def __del__(self):
self.stop()
def stop(self):
"""Stop monitoring the file."""
if self._gfile_monitor.is_cancelled():
return
self._gfile_monitor.cancel()
self.logger.debug('cancelled file monitor for: ' + self.path)
def cb_changed(self, gfile_monitor, gfile, gfile_other, gfile_monitor_event):
self.logger.debug("file monitor {0} received event: {1}".format(self.path, gfile_monitor_event.value_name))
self.on_changed(self.path, gfile_monitor_event)
|
[
"zeroSteiner@gmail.com"
] |
zeroSteiner@gmail.com
|
ebfcd58f0192f48997229b64ab5b28a93443be31
|
9dc45b6f2ba0d94d4b8a3090364d1116fd8f52b1
|
/src/scripts/show.py
|
01d6468cefe4fe8f2e8cd22540739aedb2664418
|
[
"MIT"
] |
permissive
|
mirgee/thesis_project
|
7c1128b46c224ec1770e89c3290de66e9a280c4d
|
296f292a84fe4756374d87c81e657ac991766a60
|
refs/heads/master
| 2022-04-15T12:22:57.601575
| 2019-08-22T14:16:15
| 2019-08-22T14:16:15
| 146,583,700
| 0
| 0
|
MIT
| 2020-03-31T03:19:20
| 2018-08-29T10:26:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 86
|
py
|
#!python3
import pandas as pd
import sys
df = pd.read_pickle(sys.argv[1])
print(df)
|
[
"miroslavkovar@protonmail.com"
] |
miroslavkovar@protonmail.com
|
67a8fd66fd325c2a66c113a5a6683f24d937bcb4
|
16e0407c03456570274efe40e6f1e076cf6ace75
|
/Exercise_7/challenge2.py
|
e1493ed675daae291b233732f844e4faf00426a8
|
[] |
no_license
|
bocephus51/HomeworkForPython
|
14715df09775e2beac814c047b040e8e869d6bf9
|
75a66a1206ff3a6a2fcf7ff479e1a1eef920e898
|
refs/heads/master
| 2016-09-06T19:05:45.856421
| 2015-10-05T16:52:55
| 2015-10-05T16:52:55
| 42,896,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
import arcpy
from arcpy import env
env.workspace = "P:/Fall2015/PythonProgramming/Exercise07/Exercise07data"
fc = "Results/raods.shp"
newfield = "FERRY"
fieldtype = "TEXT"
fieldname = arcpy.ValidateFieldName(newfield)
fieldlist = arcpy.ListFields(fc)
fieldnames = []
for field in fieldlist:
if field = "FERRY":
print "Yes"
else:
print "No"
|
[
"brwatk8554@GIS-VLAB-19.ad.uauth.net"
] |
brwatk8554@GIS-VLAB-19.ad.uauth.net
|
8147dd2aea21db389739c6cf9ddecefb25490dea
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02702/s099070229.py
|
891b8ce46b8b1b1687e1257e2cd8841f2209bbaf
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
s = list(map(int,input()))
s.reverse()
t = len(s)
mod = 2019
arr = [0] * (t+1)
arr[-2] = s[0]
for i in range(1,t):
arr[t-i-1] = (arr[t-i] + s[i]*pow(10,i,mod)) % mod
from collections import Counter
arr = Counter(arr)
ans = 0
for i in arr:
ans += (arr[i] - 1) * arr[i] // 2
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9fa04e51089fddcda263284ee9108936a4ff2b73
|
a3895c209fec46d184abe9e690dbfc2848ebbf7b
|
/src/buildJS.py
|
609b3d689793238c5ee75c05ad235ac2cca3c128
|
[] |
no_license
|
drdrang/mechanics
|
4e639e8d79ee959d150f45576763e63a696a70cd
|
400dd56286cc2eaf12453acd2f279930d77a0a66
|
refs/heads/master
| 2020-08-07T12:12:05.607298
| 2009-01-22T23:19:18
| 2009-01-22T23:19:18
| 112,865
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
#!/usr/bin/python
from glob import glob
# Get the number of problem solution files.
count = len(glob('problem*.md'))
# Print out a JavaScript function that will print that number.
print '''function problemCount() {
document.write(%d);
}
''' % count
# Get the chapter files.
chapters = glob('chapter*.md')
# Turn them into an HTML list of links.
chapterList = ['<li><a href="%s">Chapter %d</a></li>' % (f.replace('md', 'html'),i+1) for i,f in enumerate(chapters)]
chapterListString = '\\n'.join(chapterList)
# Print a JavaScript function that will print the chapter list.
print '''function chapterList() {
document.write('%s');
}
''' % chapterListString
|
[
"drdrang@gmail.com"
] |
drdrang@gmail.com
|
c1d2064b5559268bb779069c98714fe072aee3e1
|
beef54fe5731e99c98fb9306b4931cc952e50704
|
/ephys_stuff.py
|
fcbc27ab3ea8960382fc88257e520a4828fcbccb
|
[] |
no_license
|
isalinas4/twoac_performance_summary
|
1e4ee155cd5ca6dcf752e70a3dc5cd69f57c34f5
|
6219ab1376a8fd81bd7ee7f642b859f9d0e2c987
|
refs/heads/main
| 2023-06-11T12:41:32.147533
| 2021-07-06T18:29:25
| 2021-07-06T18:29:25
| 317,296,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,505
|
py
|
'''
Ephys Data Report Generator
'''
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from jaratoolbox import celldatabase
from jaratoolbox import settings
from jaratoolbox import behavioranalysis
from jaratoolbox import spikesanalysis
from jaratoolbox import extraplots
from jaratoolbox import ephyscore
from jaratoolbox import spikesorting
# Creating a database of cells - outputs a Pandas dataframe where each row contains information for one neuron
inforecFile = os.path.join(settings.INFOREC_PATH,'chad013_inforec.py')
celldb = celldatabase.generate_cell_database(inforecFile)
sys.exit()
# Loading electrophysiological data for all neurons and all sessions
for indRow,dbRow in celldb.iterrows():
'''
White noise raster plot --------------------------------------------------------
'''
oneCell = ephyscore.Cell(dbRow)
try:
ephysData, bdata = oneCell.load('noiseburst')
except ValueError as verror:
print(verror)
continue
# Aligning spikes to an event
spikeTimes = ephysData['spikeTimes']
eventOnsetTimes = ephysData['events']['stimOn']
timeRange = [-0.3, 0.8] # In seconds
(spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spikeTimes, eventOnsetTimes, timeRange)
extraplots.raster_plot(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange)
plt.xlabel('Time from event onset [s]')
plt.ylabel('Trials')
plt.title('Noiseburst')
'''
#Frequency tuning raster plot ---------------------------------------------------
'''
ephysData, bdata = oneCell.load('tc')
spikeTimes = ephysData['spikeTimes']
eventOnsetTimes = ephysData['events']['stimOn']
(spikeTimesFromEventOnsetTuning,trialIndexForEachSpikeTuning,indexLimitsEachTrialTuning) = spikesanalysis.eventlocked_spiketimes(spikeTimes, eventOnsetTimes, timeRange)
frequenciesEachTrialTuning = bdata['currentFreq']
numberOfTrialsTuning = len(frequenciesEachTrialTuning)
print('Number of trials run for the frequency tuning curve is {}.'.format(numberOfTrialsTuning))
arrayOfFrequenciesTuning = np.unique(bdata['currentFreq'])
labelsForYaxis = ['%.0f' % f for f in arrayOfFrequenciesTuning] # Generating a label of the behavior data for the y-axis
trialsEachCondTuning = behavioranalysis.find_trials_each_type(frequenciesEachTrialTuning,arrayOfFrequenciesTuning)
ax2 = plt.subplot2grid((3, 3), (1, 0), rowspan=2)
extraplots.raster_plot(spikeTimesFromEventOnsetTuning,indexLimitsEachTrialTuning,timeRange,trialsEachCondTuning, labels=labelsForYaxis)
plt.xlabel('Time from event onset [s]')
plt.ylabel('Frequency [Hz]')
plt.title('Tuning Curve (# of Trials = {})'.format(numberOfTrialsTuning))
'''
#Standard raster plot -----------------------------------------------------------
'''
ephysData, bdata = oneCell.load('standard')
spikeTimes = ephysData['spikeTimes']
eventOnsetTimes = ephysData['events']['stimOn']
if len(eventOnsetTimes)==len(bdata['currentFreq'])+1:
print('Removing last trial from standard ephys data.')
eventOnsetTimes = eventOnsetTimes[:-1]
(spikeTimesFromEventOnsetStandard,trialIndexForEachSpikeStandard,indexLimitsEachTrialStandard) = \
spikesanalysis.eventlocked_spiketimes(spikeTimes, eventOnsetTimes, timeRange)
frequenciesEachTrialStandard = bdata['currentFreq']
numberOfTrialsStandard = len(frequenciesEachTrialStandard)
print('Number of trials run for the standard sequence is {}.'.format(numberOfTrialsStandard))
arrayOfFrequenciesStandard = np.unique(bdata['currentFreq'])
labelsForYaxis = ['%.0f' % f for f in arrayOfFrequenciesStandard]
trialsEachCondStandard = behavioranalysis.find_trials_each_type(frequenciesEachTrialStandard,arrayOfFrequenciesStandard)
ax3 = plt.subplot2grid((3, 3), (1, 1))
extraplots.raster_plot(spikeTimesFromEventOnsetStandard,indexLimitsEachTrialStandard,
timeRange, trialsEachCondStandard, labels=labelsForYaxis)
plt.xlabel('Time from event onset [s]')
plt.ylabel('Frequency [Hz]')
plt.title('Standard Sequence (# of Trials = {})'.format(numberOfTrialsStandard))
'''
#Oddball raster plot ------------------------------------------------------------
'''
ephysData, bdata = oneCell.load('oddball')
spikeTimes = ephysData['spikeTimes']
eventOnsetTimes = ephysData['events']['stimOn']
if len(eventOnsetTimes)==len(bdata['currentFreq'])+1:
print('Removing last trial from oddball ephys data.')
eventOnsetTimes = eventOnsetTimes[:-1]
(spikeTimesFromEventOnsetOddball,trialIndexForEachSpikeOddball,indexLimitsEachTrialOddball) = spikesanalysis.eventlocked_spiketimes(spikeTimes, eventOnsetTimes, timeRange)
frequenciesEachTrialOddball = bdata['currentFreq']
numberOfTrialsOddball = len(frequenciesEachTrialOddball)
print('Number of trials run for the oddball sequence is {}.'.format(numberOfTrialsOddball))
arrayOfFrequenciesOddball = np.unique(bdata['currentFreq'])
labelsForYaxis = ['%.0f' % f for f in arrayOfFrequenciesOddball]
trialsEachCondOddball = behavioranalysis.find_trials_each_type(frequenciesEachTrialOddball,arrayOfFrequenciesOddball)
ax4 = plt.subplot2grid((3, 3), (2, 1))
extraplots.raster_plot(spikeTimesFromEventOnsetOddball,indexLimitsEachTrialOddball,timeRange, trialsEachCondOddball, labels=labelsForYaxis)
plt.xlabel('Time from event onset [s]')
plt.ylabel('Frequency [Hz]')
plt.title('Oddball Sequence (# of Trials = {})'.format(numberOfTrialsOddball))
'''
#Waveform plot ------------------------------------------------------------------
'''
ax5 = plt.subplot2grid((3, 3), (0, 2))
spikesorting.plot_waveforms(ephysData['samples'])
'''
#Plotting the overlapped PSTH ---------------------------------------------------
'''
# Parameters
binWidth = 0.010
timeVec = np.arange(timeRange[0],timeRange[-1],binWidth)
smoothWinSizePsth = 5
lwPsth = 2
downsampleFactorPsth = 1
# For standard sequence
iletLowFreqStandard = indexLimitsEachTrialStandard[:,trialsEachCondStandard[:,0]]
spikeCountMatLowStandard = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnsetStandard,iletLowFreqStandard,timeVec)
iletHighFreqStandard = indexLimitsEachTrialStandard[:,trialsEachCondStandard[:,1]]
spikeCountMatHighStandard = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnsetStandard,iletHighFreqStandard,timeVec)
# For oddball sequence
iletLowFreqOddball = indexLimitsEachTrialOddball[:,trialsEachCondOddball[:,0]]
spikeCountMatLowOddball = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnsetOddball,iletLowFreqOddball,timeVec)
iletHighFreqOddball = indexLimitsEachTrialOddball[:,trialsEachCondOddball[:,1]]
spikeCountMatHighOddball = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnsetOddball,iletHighFreqOddball,timeVec)
ax6 = plt.subplot2grid((3, 3), (1, 2))
extraplots.plot_psth(spikeCountMatLowOddball/binWidth, smoothWinSizePsth,timeVec,trialsEachCond=[],colorEachCond='b',linestyle=None,linewidth=lwPsth,downsamplefactor=downsampleFactorPsth)
extraplots.plot_psth(spikeCountMatLowStandard/binWidth, smoothWinSizePsth,timeVec,trialsEachCond=[],colorEachCond='c',linestyle=None,linewidth=lwPsth,downsamplefactor=downsampleFactorPsth)
plt.xlabel('Time from event onset [s]')
plt.ylabel('Number of spikes')
plt.title('Low Frequency Event')
# Legend for PSTH
oddball_patch = mpatches.Patch(color='b',label='Oddball')
standard_patch = mpatches.Patch(color='c',label='Standard')
plt.legend(handles=[oddball_patch, standard_patch])
ax7 = plt.subplot2grid((3, 3), (2, 2))
extraplots.plot_psth(spikeCountMatHighOddball/binWidth, smoothWinSizePsth,timeVec,trialsEachCond=[],colorEachCond='b',linestyle=None,linewidth=lwPsth,downsamplefactor=downsampleFactorPsth)
extraplots.plot_psth(spikeCountMatHighStandard/binWidth, smoothWinSizePsth,timeVec,trialsEachCond=[],colorEachCond='c',linestyle=None,linewidth=lwPsth,downsamplefactor=downsampleFactorPsth)
plt.xlabel('Time from event onset [s]')
plt.ylabel('Number of spikes')
plt.title('High Frequency Event')
plt.legend(handles=[oddball_patch, standard_patch])
'''
#Saving the figure --------------------------------------------------------------
'''
figFormat = 'png'
outputDir = '/home/jarauser/beth/'
figFilename ='{}_{}_D{}um_T{}_C{}.{}'.format(cellDict['subject'],cellDict['date'],cellDict['depth'],cellDict['tetrode'],cellDict['cluster'],figFormat)
figFullpath = os.path.join(outputDir,figFilename)
plt.savefig(figFullpath,format=figFormat)
plt.gcf().set_size_inches([18,10])
plt.tight_layout()
'''
plt.show()
|
[
"noreply@github.com"
] |
isalinas4.noreply@github.com
|
a6cc35cac625b797547a9a7f2cef8c4cf8cfda6f
|
53c62e3e6bdd68ed6f7c0f5264e11eaf72a55e3e
|
/evaluation/count_frequencies.py
|
dc3b06c0d28f85e884922562314cd8864b1fc368
|
[] |
no_license
|
lkopocinski/paintball
|
19d4076ad19d8c4bc3796621df63f26dc0e54f51
|
266c51693bd867924c48f2bbc3d81497a1b9e6ab
|
refs/heads/master
| 2022-04-17T04:01:54.036359
| 2020-03-23T12:52:54
| 2020-03-23T12:52:54
| 191,989,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
import sys
import pandas as pd
def main():
df = pd.read_csv(sys.argv[1], names=['term', 'distance'])
df = df[df['distance'] != -1]
df = df.groupby('term').min()
#df = df.distance.apply(lambda x: round(x, 0))
df = df.distance
df = df.value_counts()
df = df.sort_index()
print df
if __name__ == '__main__':
main()
|
[
"lkopocinski@gmail.com"
] |
lkopocinski@gmail.com
|
d3bf39bc2723f376e6b3a751ece562baec160cdb
|
34b9b39442bde1a3c8fa670ef60bcc84d772a067
|
/Assignment 3- Deadline 10 Oct 2017/Assignment3_step1_Chen.py
|
5d5e307459540e779670a078ea9fae69efd75b92
|
[] |
no_license
|
bnajafi/Scientific_Python_Assignments_POLIMI_EETBS
|
b398fc2754b843d63cd06d517235c16177a87dcf
|
8da926e995dcaf02a297c6bb2f3120c49d6d63da
|
refs/heads/master
| 2021-05-07T22:36:14.715936
| 2018-01-16T21:12:33
| 2018-01-16T21:12:33
| 107,265,075
| 38
| 86
| null | 2018-01-16T21:12:34
| 2017-10-17T12:24:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,666
|
py
|
#Assignment 3_Step1_Chen
Matrial_Library={"OutsideSurfaceWinter":0.030,"WoodBevelLappedSiding_13mm":0.14,
"WoodFiberboardSheeting_13mm":0.23,"GlassFiberInsulation_90mm":2.45,"WoodStud_90mm":0.63,
"GypsumWallboard_13mm":0.079,"InsideSurfaceAir":0.12}
#Making it into through insulation part and through the studs
Layers_throughInsulation=["OutsideSurfaceWinter","WoodBevelLappedSiding_13mm",
"WoodFiberboardSheeting_13mm","GlassFiberInsulation_90mm",
"GypsumWallboard_13mm","InsideSurfaceAir"]
Layers_throughStuds=["OutsideSurfaceWinter","WoodBevelLappedSiding_13mm",
"WoodFiberboardSheeting_13mm","WoodStud_90mm",
"GypsumWallboard_13mm","InsideSurfaceAir"]
Layers_Series=[Layers_throughInsulation,Layers_throughStuds]
Rtot_Series=[]
for series in Layers_Series:
Rtot=0
for anylayer in series:
Rtot=Rtot+Matrial_Library[anylayer]
Rtot_Series.append(Rtot)
print "The total unit value in series are " + str(Rtot_Series)+ " m2*degreeC/W"
Ratio=float(0.75) #insulation 0.75, while studs 1-ratio
Layers_Parallel_Ufactor=[1/Rtot_Series[0]*Ratio,1/Rtot_Series[1]*(1-Ratio)]
print "The total unit Ufactor in parallel are: " + str(Layers_Parallel_Ufactor)+" W/m2*degreeC"
Utot=Layers_Parallel_Ufactor[0]+Layers_Parallel_Ufactor[1]
print "The overall U-factor is: "+ str(Utot)+ " W/m2*degreeC"
Rtot=1/Utot
print "The overall unit thermal resistance is: "+str(Rtot)+ " m2*degreeC/W"
A_wall=0.8*50*2.5 #The perimeter of the building is 50m, the height of the walls is 2.5m,the glazing constitutes 20 percent of the walls
Ti=22
To=-2
Q=Utot*A_wall*(Ti-To)
print "The rate of heat loss through the walls under design conditions is: "+str(Q) + " W"
|
[
"behzad najafi"
] |
behzad najafi
|
a4d8d6fe0685a57c271fe4c3507e92d10afb7654
|
14f58c588cbea113188f7220545164400894e40f
|
/coding_challenge_be/coding_challenge/modules/auth/exceptions.py
|
20ea789a37cc491117365e3c8ca06a6948b3e831
|
[] |
no_license
|
janatii/CodingChallenge
|
f91bea7bf4176a046ccb2ca3eb37a8f1299ad585
|
c0d7e80b18a56158297175fd789b271fac82a4c8
|
refs/heads/master
| 2020-03-28T19:42:22.469802
| 2019-04-17T15:07:27
| 2019-04-17T15:07:27
| 149,001,727
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
class AuthException(Exception):
pass
class AuthException(Exception):
"""Base Exception in PxG Auth"""
pass
class NoAuthorizationError(AuthException):
status_code = 403
def __init__(self, msg):
AuthException.__init__(self)
self.message = msg
def to_dict(self):
rv = dict()
rv['message'] = self.message
return rv
class InvalidHeaderError(AuthException):
status_code = 403
def __init__(self, msg):
AuthException.__init__(self)
self.message = msg
def to_dict(self):
rv = dict()
rv['message'] = self.message
return rv
class InvalidTokenError(AuthException):
status_code = 403
def __init__(self, msg):
AuthException.__init__(self)
self.message = msg
def to_dict(self):
rv = dict()
rv['message'] = self.message
return rv
|
[
"mohammed.janatiidrissi1@usmba.ac.ma"
] |
mohammed.janatiidrissi1@usmba.ac.ma
|
7c36fe2d7a54bc0cf20da4b45ee86c42dcd6bebf
|
fcbba906a08ef64dd805241446c4dbf4df9829ee
|
/data/binary_norb.py
|
24bcd6dbaf96819f59b2275fab80d8cd0f1ff348
|
[] |
no_license
|
gdesjardins/deep_tempering
|
7325fa8eca745b5da02203629990424f449b5c73
|
b51a673ccb16a1f50ea1e1b9707b712ffa3fb934
|
refs/heads/master
| 2016-09-05T18:19:40.680063
| 2013-10-01T14:52:51
| 2013-10-01T14:52:51
| 9,759,187
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,139
|
py
|
import numpy
import os
import time
import copy
from pylearn2.datasets import dense_design_matrix
from pylearn2.datasets import retina
from pylearn2.training_algorithms import default
from deep_tempering.data import shift
from deep_tempering.data.grbm_preproc import GRBMPreprocessor
def onehot_encoding(y):
one_hot = numpy.zeros((y.shape[0],5),dtype='float32')
for i in xrange(y.shape[0]):
one_hot[i,y[i]] = 1
return one_hot
class BinaryNORB(dense_design_matrix.DenseDesignMatrix):
def __init__(self, which_set, one_hot = False):
"""
:param which_set: one of ['train','test']
"""
assert which_set in ['train','test']
self.which_set = which_set
# Load data and labels.
base = '%s/norb_small/ruslan_binarized' % os.getenv('PYLEARN2_DATA_PATH')
fname = '%s/%s_X.npy' % (base, which_set)
X = numpy.load(fname)
fname = '%s/%s_Y.npy' % (base, which_set)
y = numpy.load(fname).astype('int')
self.one_hot = one_hot
if one_hot:
y = onehot_encoding(y)
super(BinaryNORB, self).__init__(X = X, y = y)
class NumpyLoader(dense_design_matrix.DenseDesignMatrix):
def __init__(self, fname):
"""
:param which_set: one of ['train','test']
"""
self.which_set = fname.split('.')[0]
# Load data and labels.
base = '%s/norb_small/ruslan_binarized' % os.getenv('PYLEARN2_DATA_PATH')
fname = '%s/%s' % (base, fname)
X = numpy.load(fname)
y = numpy.zeros(X.shape[0])
super(NumpyLoader, self).__init__(X = X, y = y)
class MyBinaryNORB(dense_design_matrix.DenseDesignMatrix):
def __init__(self, which_set, one_hot = False):
"""
:param which_set: one of ['train','test']
"""
assert which_set in ['train','test']
self.which_set = which_set
# Load data and labels.
base = '%s/norb_small/ruslan_binarized' % os.getenv('PYLEARN2_DATA_PATH')
fname = '%s/norb96x96x2_fov8422_grbm_4k_%s_X.npy' % (base, which_set)
X = numpy.load(fname)
fname = '%s/%s_Y.npy' % (base, which_set)
y = numpy.load(fname).astype('int')
self.one_hot = one_hot
if one_hot:
y = onehot_encoding(y)
super(MyBinaryNORB, self).__init__(X = X, y = y)
class FoveatedPreprocNORB(dense_design_matrix.DenseDesignMatrix):
"""
This dataset can serve two purposes.
When used by itself, it loads up the preprocessed and foveated NORB data, used to train the
first layer GRBM (model used to binarize the dataset).
When used in conjunction with binary_norb.TrainingAlgorithm, will generate binarized
(through a GRBM) shifted version of this foveated NORB dataset. This thus generates a
binary representation (online) which can be used with binary RBMs or DBMs.
"""
def __init__(self, which_set, one_hot = False, seed=1239):
"""
:param which_set: one of ['train', 'valid', 'test']
:param center: data is in range [0,256], center=True subtracts 127.5.
:param multi_target: load extra information as additional labels.
"""
assert which_set in ['train', 'valid', 'test']
self.which_set = which_set
# Load data and labels.
base = '%s/norb_small/ruslan_binarized' % os.getenv('PYLEARN2_DATA_PATH')
if which_set in ['train', 'valid']:
xfname = '%s/norb96x96x2_fov8422_%s_X.npy' % (base, 'train')
yfname = '%s/norb96x96x2_fov8422_%s_Y.npy' % (base, 'train')
else:
xfname = '%s/norb96x96x2_fov8422_%s_X.npy' % (base, which_set)
yfname = '%s/norb96x96x2_fov8422_%s_Y.npy' % (base, which_set)
X = numpy.load(xfname)
y = numpy.load(yfname).astype('int')
if which_set in ['train', 'valid']:
rng = numpy.random.RandomState(seed)
pidx = rng.permutation(len(X))
idx = pidx[:-4300] if which_set == 'train' else pidx[-4300:]
X = X[idx]
y = y[idx]
self.one_hot = one_hot
if one_hot:
y = onehot_encoding(y)
view_converter = retina.RetinaCodingViewConverter((96,96,2), (8,4,2,2))
super(FoveatedPreprocNORB,self).__init__(X = X, y = y, view_converter = view_converter)
class PreprocIterator():
"""
A basic iterator which fetches the next example in the dataset, and then performs a random
shift (as described in the tempered transition paper).
"""
def __init__(self, iterator, topo_shape, rings, max_shift, seed=129387):
"""
:param iterator: an iterator which loops over the "raw" (foveated, unjitted,
unbinarized) NORB dataset
"""
self.topo_shape = topo_shape
self.rings = rings
self.max_shift = max_shift
self.rng = numpy.random.RandomState(seed)
self.grbm = GRBMPreprocessor()
# encapsulate the behavior of a "normal" dataset iterator
self.iterator = iterator
self._subset_iterator = iterator._subset_iterator
def __iter__(self):
return self
def debug(self, fx):
# Unfoveated the current batch
fx1 = copy.copy(fx)
x1 = retina.decode(fx1, (96,96,2), (8,4,2,2))
# Binarized, Reconstruct then defoveate minibatch
fx2 = copy.copy(fx)
bfx2 = self.grbm.preproc(fx2)
fxhat2 = self.grbm.reconstruct(bfx2)
xhat2 = retina.decode(fxhat2, (96,96,2), (8,4,2,2))
# Shift then defoveate minibatch
fx3 = copy.copy(fx)
sfx3 = shift.shift_batch(fx3,
topo_shape = self.topo_shape,
rings = self.rings,
maxshift = self.max_shift,
rng = self.rng)
sx3 = retina.decode(sfx3, (96,96,2), (8,4,2,2))
# Shift, binarize, reconstruct, then defoveate minibatch
bsfx4 = self.grbm.preproc(sfx3)
sfxhat4 = self.grbm.reconstruct(bsfx4)
sxhat4 = retina.decode(sfxhat4, (96,96,2), (8,4,2,2))
import pylab as pl
import pdb; pdb.set_trace()
for i in xrange(len(fx)):
pl.subplot(1,4,1); pl.gray(); pl.imshow(x1[i,:,:,0])
pl.subplot(1,4,2); pl.gray(); pl.imshow(sx3[i,:,:,0])
pl.subplot(1,4,3); pl.gray(); pl.imshow(xhat2[i,:,:,0])
pl.subplot(1,4,4); pl.gray(); pl.imshow(sxhat4[i,:,:,0])
pl.show()
return bin_fovx
def next(self, debug=False):
_fovx = self.iterator.next()
# make explicit copy of batch data so we don't overwrite the original example !
fovx = copy.copy(_fovx)
# Shift then defoveate minibatch
shift.shift_batch(fovx,
topo_shape = self.topo_shape,
rings = self.rings,
maxshift = self.max_shift,
rng = self.rng)
bin_shift_fovx = self.grbm.preproc(fovx)
return bin_shift_fovx
class TrainingAlgorithm(default.DefaultTrainingAlgorithm):
def setup(self, model, dataset):
dataset._iterator = PreprocIterator(
dataset.iterator(
mode='shuffled_sequential',
batch_size = model.batch_size),
topo_shape = (96,96,2),
rings = (8,4,2,2),
max_shift = 6)
x = dataset._iterator.next()
model.init_parameters_from_data(x)
super(TrainingAlgorithm, self).setup(model, dataset)
if __name__ == '__main__':
"""
from deep_tempering.data import grbm_preproc
grbm = grbm_preproc.GRBMPreprocessor()
# binary data extracted from Russ' MATLAB code (batchdata in MATLAB)
binrusX1 = binary_norb.BinaryNORB('train')
# foveated & other preprocessing's on NORB (fovimg1 and fovimg2 in MATLAB)
fovrusX2 = binary_norb.FoveatedPreprocNORB('train')
binrusX2 = grbm.encode(fovrusX2.X)
# We need to find the random mapping which was used to build "batchdata", so that we can
# numpy.sum(x1**2, axis=1)[:,None] + numpy.sum(x2**2, axis=1)[None,:] - 2*numpy.dot(x1, x2.T)
"""
from deep_tempering.data import grbm_preproc
grbm = grbm_preproc.GRBMPreprocessor()
# generate a static validation and test set for the callback methods to work with
train = FoveatedPreprocNORB('train')
binary_train = grbm.preproc(train.X)
numpy.save('/data/lisa/data/norb_small/ruslan_binarized/binary_train_GD.npy', binary_train)
del train, binary_train
# generate a static validation and test set for the callback methods to work with
valid = FoveatedPreprocNORB('valid')
binary_valid = grbm.preproc(valid.X)
numpy.save('/data/lisa/data/norb_small/ruslan_binarized/binary_valid_GD.npy', binary_valid)
del valid, binary_valid
# generate a static validation and test set for the callback methods to work with
test = FoveatedPreprocNORB('test')
binary_test = grbm.preproc(test.X)
numpy.save('/data/lisa/data/norb_small/ruslan_binarized/binary_test_GD.npy', binary_test)
del test, binary_test
|
[
"guillaume.desjardins@gmail.com"
] |
guillaume.desjardins@gmail.com
|
c1b1470d3c311a14ce3c7720c3bff91c14f7c7cb
|
92bf1bfccd55ec4acd266cc7eaebecf92ff25e84
|
/old_version/game/player/field/cells/__init__.py
|
78e24aa7e87e6f2e40e5dd749b175878e24b4fc2
|
[] |
no_license
|
SchwIst/sea_battle
|
9316ce5bcb8e526335d519ce0f99e7832c44b349
|
66d417db5f791ec2ae0675647ed738de1a01b311
|
refs/heads/main
| 2023-05-24T02:53:14.082697
| 2021-06-13T16:12:36
| 2021-06-13T16:12:36
| 369,798,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
from colorama import Fore, Back
from colorama.ansi import AnsiBack, AnsiFore
from old_version.game.utils import Display
class Cell(Display):
foreground: AnsiFore
background: AnsiBack
text: str
def __init__(self, text: str, foreground: AnsiFore, background: AnsiBack):
self.text = text
self.background = background
self.foreground = foreground
def __str__(self):
return str(self.background) + \
str(self.foreground) + \
self.text + \
Fore.RESET + Back.RESET
def __hash__(self) -> int:
return ord(self.text) * 31
@staticmethod
def from_hash(number: int):
return chr(int(number / 31))
def display(self):
print(str(self), end='')
TYPES: dict[str, Cell] = {
"empty": Cell(' ', Fore.WHITE, Back.WHITE),
"ship": Cell('@', Fore.BLUE, Back.WHITE),
"damaged": Cell('X', Fore.RED, Back.WHITE),
"miss": Cell('•', Fore.BLACK, Back.WHITE),
# TODO: add use of: "killed": Cell('X', Fore.WHITE, Back.RED),
# TODO: add use of: "struck": Cell('0', Fore.YELLOW, Back.RESET),
"selected": Cell('S', Fore.BLACK, Back.WHITE)
}
|
[
"juliachirkova@e1.ru"
] |
juliachirkova@e1.ru
|
53eda65123f75cbc0c9b054254adbbc24827819b
|
ab3d4a18d7798a9644e8d451f672daee43979344
|
/users/factories.py
|
6b707f310cd001abadd736ef13308e10c1b8e1b9
|
[] |
no_license
|
dingusagar/Motty-backend
|
62ff11af33d9780458bfd589e88d1323f2b4245f
|
73b67e1b0b08abb0aefd3aec53b29f63e3651ddb
|
refs/heads/master
| 2022-12-10T12:21:37.126704
| 2019-09-01T15:04:43
| 2019-09-01T15:04:43
| 201,797,194
| 0
| 0
| null | 2022-11-22T03:35:37
| 2019-08-11T17:53:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 423
|
py
|
import factory
from users.models import User
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
username = factory.Sequence(lambda n: 'username{0}'.format(n))
first_name = factory.Sequence(lambda n: 'firstname{0}'.format(n))
last_name = factory.Sequence(lambda n: 'lastname{0}'.format(n))
email = factory.Sequence(lambda n: 'xyz{0}@company{0}.com'.format(n))
|
[
"dingusagar@gmail.com"
] |
dingusagar@gmail.com
|
b14a968f1bdab65b69ffcfadaebe88b1686f5764
|
6c3273f818a80bcd3e191a3b18ff17b4f4ad1795
|
/app/config.py
|
2005251d1401583785b7622ad58265f9f8f32496
|
[] |
no_license
|
Evan-cell/news2
|
97f4df27ef707e5f9c441f8e9b6a6a3249df4386
|
420d1c91e70cad75f0f1e04189afa9eae102bb37
|
refs/heads/master
| 2023-07-31T22:44:10.309621
| 2021-09-15T14:02:37
| 2021-09-15T14:02:37
| 405,673,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
class Config:
'''
General configuration parent class
'''
NEWS_API_BASE_URL ='https://newsapi.org/v2/top-headlines?country=us&apiKey={}'
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
pass
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
DEBUG = True
|
[
"60639510+Evan-cell@users.noreply.github.com"
] |
60639510+Evan-cell@users.noreply.github.com
|
d1af5d61788451b164c2775e1b540de2621dfa2a
|
41948a77843732029a375198509000504e9666c1
|
/leetcode/dynamic programming/leet_1478.py
|
6d139753e57b087aa4cbf90aa1b6d2764c22a0f0
|
[] |
no_license
|
GoogleGu/leetcode
|
9a3882fe641b67c8494c1598947a4c043802b0dd
|
8b20a2039f2b1bf2cdb0db3bdc7ab367de3bef9d
|
refs/heads/master
| 2023-01-24T14:39:13.791507
| 2020-12-07T01:58:01
| 2020-12-07T01:58:01
| 160,458,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,588
|
py
|
class Solution:
def minDistance(self, houses: List[int], k: int) -> int:
if len(houses)<=k:
return 0
houses.sort()
n = len(houses)
inf = 100*10000+1
# 3-dim dp:
# 100 x 100 x 100
# 在前 i 个房子中,放置了 j 个邮筒, 最后一个邮筒的位置是第 l 个房子
answer = [[[inf]*n for _ in range(k)] for _ in range(n)]
for i in range(n):
for j in range(k):
for l in range(j,i+1):
if j>=i and l==i:
answer[i][j][l] = 0
continue
if j==0:
# 只有一个邮筒,它放在了第l个房子的位置
answer[i][j][l] = sum([abs(x-houses[l])for x in houses[:i+1]] )
elif l<i:
# 最后一个邮筒,并不放在最后一个房子的位置
answer[i][j][l] = answer[l][j][l] + sum([abs(x-houses[l])for x in houses[l+1:i+1]] )
else:
# 有多个邮筒,它们放在不同的位置
# 开始检索倒数第二个邮筒所有可能的位置
tt = inf
for ll in reversed(range(j-1,l)):
t = answer[ll][j-1][ll] + sum([min(abs(x-houses[ll]),abs(x-houses[l]))for x in houses[ll+1:i+1]] )
tt = min(t,tt)
answer[i][j][l] = tt
xx = answer[n-1][k-1]
return min(xx)
|
[
"gyx152540@gmail.com"
] |
gyx152540@gmail.com
|
3b4fd19274236066279027019933a0da6c067b96
|
c5dd2424d2f430074c070ec76514559788c4e25e
|
/python/aws/bin/rst2html4.py
|
83185c4681019e9c11bf1e429494ae9bdb2db7b4
|
[] |
no_license
|
hardy-devnix/Code
|
7f23e1cb71c67d12ebd7b62492cd663dadbc4e0a
|
9e19101801d20ac96d6aa9b45078878504112760
|
refs/heads/master
| 2021-07-04T17:17:56.788589
| 2018-04-29T19:41:39
| 2018-04-29T19:41:39
| 129,791,404
| 0
| 1
| null | 2020-07-25T23:48:18
| 2018-04-16T18:57:29
|
Python
|
UTF-8
|
Python
| false
| false
| 736
|
py
|
#!/home/hardy/code/python/aws/bin/python
# $Id: rst2html4.py 7994 2016-12-10 17:41:45Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing (X)HTML.
The output conforms to XHTML 1.0 transitional
and almost to HTML 4.01 transitional (except for closing empty tags).
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html4', description=description)
|
[
"hardy.devnix@gmail.com"
] |
hardy.devnix@gmail.com
|
0a438569afda9f4a3006043a32bfb60b5ab4827a
|
3dc3ed0fb0a21e564ba99e12d60df6882741d48b
|
/gwent/vendor/pygwinc_clone/gwinc/struct.py
|
2d9c801c1e8e7f6505e3e947f75bf057e75b1f7d
|
[
"MIT"
] |
permissive
|
xilong/gwent
|
3afbfaec1fe2dc5909c2f948166ba5287221c0d9
|
6ee2f7a633c973ea10b450257b1ad4dbd0323738
|
refs/heads/master
| 2023-02-25T17:02:55.990422
| 2021-01-26T15:46:47
| 2021-01-26T15:46:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,661
|
py
|
import os
import re
import io
import yaml
import numpy as np
from scipy.io import loadmat
from scipy.io.matlab.mio5_params import mat_struct
# HACK: fix loading number in scientific notation
#
# https://stackoverflow.com/questions/30458977/yaml-loads-5e-6-as-string-and-not-a-number
#
# An apparent bug in python-yaml prevents it from regognizing
# scientific notation as a float. The following is a modified version
# of the parser that recognize scientific notation appropriately.
yaml_loader = yaml.SafeLoader
yaml_loader.add_implicit_resolver(
"tag:yaml.org,2002:float",
re.compile(
"""^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$""",
re.X,
),
list("-+0123456789."),
)
def dictlist2recarray(l):
def dtype(v):
if isinstance(v, int):
return float
else:
return type(v)
# get dtypes from first element dict
dtypes = [(k, dtype(v)) for k, v in l[0].items()]
values = [tuple(el.values()) for el in l]
out = np.array(values, dtype=dtypes)
return out.view(np.recarray)
class Struct(object):
"""Matlab struct-like object
This is a simple implementation of a MATLAB struct-like object
that stores values as attributes of a simple class: and allows
assigning to attributes recursively, e.g.:
>>> s = Struct()
>>> s.a = 4
>>> s.b = Struct()
>>> s.b.c = 8
Various classmethods allow creating one of these objects from YAML
file, a nested dict, or a MATLAB struct object.
"""
# FIXME: This would be a way to allow setting nested struct
# attributes, e.g.:
#
# >>> s = Struct()
# >>> s.a.b.c = 4
#
# Usage of __getattr__ like this is dangerous and creates
# non-intuitive behavior (i.e. an empty struct is returned when
# accessing attributes that don't exist). Is there a way to
# accomplish this without that adverse side affect?
#
# def __getattr__(self, name):
# if name not in self.__dict__:
# self.__dict__[name] = Struct()
# return self.__dict__[name]
##########
def __init__(self, **kwargs):
"""Arguments can pre-fill the structure"""
self.__dict__.update(kwargs)
def __getitem__(self, key):
"""Get a (possibly nested) value from the struct."""
if "." in key:
k, r = key.split(".", 1)
# FIXME: this is inelegant. better done with regexp?
if len(k.split("[")) > 1:
kl, i = k.split("[")
i = int(i.strip("]"))
return self.__dict__[kl][i][r]
return self.__dict__[k][r]
else:
return self.__dict__[key]
def get(self, key, default):
"""Get a (possibly nested) value from the struct, or default."""
try:
return self[key]
except KeyError:
return default
def __setitem__(self, key, value):
if "." in key:
k, r = key.split(".", 1)
self.__dict__[k][r] = value
else:
self.__dict__[key] = value
def setdefault(self, key, default):
return self.__dict__.setdefault(key, default)
def items(self):
return self.__dict__.items()
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def __contains__(self, key):
return key in self.__dict__
def to_dict(self, array=False):
"""Return nested dictionary representation of Struct.
If `array` is True any lists encountered will be turned into
numpy arrays, and lists of Structs will be turned into record
arrays. This is needed to convert to structure arrays in
matlab.
"""
d = {}
for k, v in self.__dict__.items():
if isinstance(v, type(self)):
d[k] = v.to_dict(array=array)
else:
if isinstance(v, list):
try:
# this should fail if the elements of v are
# not Struct
# FIXME: need cleaner way to do this
v = [i.to_dict(array=array) for i in v]
if array:
v = dictlist2recarray(v)
except AttributeError:
if array:
v = np.array(v)
elif isinstance(v, int):
v = float(v)
d[k] = v
return d
def to_yaml(self, path=None):
"""Return YAML representation of Struct.
Write YAML to `path` if specified.
"""
y = yaml.dump(self.to_dict(), default_flow_style=False)
if path:
with open(path, "w") as f:
f.write(y)
else:
return y
# def __repr__(self):
# return self.to_yaml().strip('\n')
def __str__(self):
return "<GWINC Struct: {}>".format(list(self.__dict__.keys()))
def __iter__(self):
return iter(self.__dict__)
def walk(self):
"""Iterate over all leaves in the struct tree."""
for k, v in self.__dict__.items():
if isinstance(v, type(self)):
for sk, sv in v.walk():
yield k + "." + sk, sv
else:
try:
for i, vv in enumerate(v):
for sk, sv in vv.walk():
yield "{}[{}].{}".format(k, i, sk), sv
except (AttributeError, TypeError):
yield k, v
def diff(self, other):
"""Return tuple of differences between target IFO.
Returns list of (key, value, other_value) tuples. Value is
None if key not present.
"""
diffs = []
for k, ov in other.walk():
v = self.get(k, None)
if ov != v and ov is not v:
diffs.append((k, v, ov))
for k, v in self.walk():
ov = other.get(k, None)
if ov is None:
diffs.append((k, v, ov))
return diffs
def to_txt(self, path=None, fmt="0.6e", delimiter=": ", end=""):
"""Return text represenation of Struct, one element per line.
Struct keys use '.' to indicate hierarchy. The `fmt` keyword
controls the formatting of numeric values. MATLAB code can be
generated with the following parameters:
>>> ifo.to_txt(delimiter=' = ', end=';')
Write text to `path` if specified.
"""
txt = io.StringIO()
for k, v in sorted(self.walk()):
if isinstance(v, (int, float, complex)):
base = fmt
elif isinstance(v, (list, np.ndarray)):
if isinstance(v, list):
v = np.array(v)
v = np.array2string(
v,
separator="",
max_line_width=np.Inf,
formatter={"all": lambda x: "{:0.6e} ".format(x)},
)
base = "s"
else:
base = "s"
txt.write(
u"{key}{delimiter}{value:{base}}{end}\n".format(
key=k,
value=v,
base=base,
delimiter=delimiter,
end=end,
)
)
if path:
with open(path, "w") as f:
f.write(txt.getvalue())
else:
return txt.getvalue()
@classmethod
def from_dict(cls, d):
"""Create Struct from nested dict."""
c = cls()
for k, v in d.items():
if type(v) == dict:
c.__dict__[k] = Struct.from_dict(v)
else:
try:
c.__dict__[k] = list(map(Struct.from_dict, v))
except (AttributeError, TypeError):
c.__dict__[k] = v
return c
@classmethod
def from_yaml(cls, y):
"""Create Struct from YAML string."""
d = yaml.load(y)
return cls.from_dict(d)
@classmethod
def from_matstruct(cls, s):
"""Create Struct from scipy.io.matlab mat_struct object."""
c = cls()
try:
s = s["ifo"]
except:
pass
for k, v in s.__dict__.items():
if k in ["_fieldnames"]:
# skip these fields
pass
elif type(v) is mat_struct:
c.__dict__[k] = Struct.from_matstruct(v)
else:
# handle lists of Structs
try:
c.__dict__[k] = list(map(Struct.from_matstruct, v))
except:
c.__dict__[k] = v
# try:
# c.__dict__[k] = float(v)
# except:
# c.__dict__[k] = v
return c
@classmethod
def from_file(cls, path):
"""Load Struct from .yaml or MATLAB .mat file.
File type will be determined by extension.
"""
(root, ext) = os.path.splitext(path)
with open(path, "r") as f:
if ext in [".yaml", ".yml"]:
d = yaml.load(f, Loader=yaml_loader)
return cls.from_dict(d)
elif ext == ".mat":
s = loadmat(f, squeeze_me=True, struct_as_record=False)
return cls.from_matstruct(s)
else:
raise IOError("Unknown file type: {}".format(ext))
def load_struct(path):
"""Load struct from YAML or MATLAB file.
Files may be either .yaml, .mat or .m. For .m files, the file is
expected to include either an object or function that corresponds
to the basename of the file. The MATLAB engine will be invoked to
execute the .m code and extract the resultant IFO data.
"""
root, ext = os.path.splitext(path)
if ext == ".m":
from ..gwinc_matlab import Matlab
matlab = Matlab()
matlab.addpath(os.path.dirname(path))
func_name = os.path.basename(root)
matlab.eval("ifo = {};".format(func_name), nargout=0)
ifo = matlab.extract("ifo")
return Struct.from_matstruct(ifo)
else:
return Struct.from_file(path)
# accepted extension types for struct files
STRUCT_EXT = [".yaml", ".yml", ".mat", ".m"]
|
[
"ark0015@mix.wvu.edu"
] |
ark0015@mix.wvu.edu
|
5cf125533c796901753dd003da453d510a873b6f
|
985a9181f83171e9df88d9dccfba63c9cc2b8ba7
|
/tools/codegen/api/unboxing.py
|
29c2662d10127951412d6e96005d2f860199bf06
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
Poet-LiBai/pytorch
|
3c70caa61d245d67820d8d7773bdba45d67986f4
|
3e10fe323165a94fed66b1487902e8d394ce3be1
|
refs/heads/master
| 2022-05-04T18:25:05.419746
| 2022-04-21T01:48:46
| 2022-04-21T01:48:46
| 230,610,878
| 0
| 0
|
NOASSERTION
| 2019-12-28T13:03:02
| 2019-12-28T13:03:01
| null |
UTF-8
|
Python
| false
| false
| 9,299
|
py
|
from typing import List, Tuple
from tools.codegen.api import cpp
from tools.codegen.api.types import Binding, CType, CppSignatureGroup
from tools.codegen.model import (
Argument,
NativeFunction,
Type,
BaseType,
OptionalType,
ListType,
BaseTy,
)
# This file generates the code for unboxing wrappers, i.e., the glue logic to unbox a boxed operator and convert the
# ivalues from stack to correct arguments to the unboxed kernel, based on corresponding JIT schema. This codegen is
# an alternative way to generate unboxing wrappers similar to the existing C++ metaprogramming approach but gets the
# job done statically. These generated unboxing wrappers will be useful under the scenario where we need to register
# a fixed set of operators known at compile time and thus can save some time in runtime initialization phase.
#
# Here's an example on how the codegen works:
#
# - Function Schema (source of truth)
#
# aten::empty.names(int[] size, *, Dimname[]? names,
# ScalarType? dtype=None, Layout? layout=None,
# Device? device=None, bool? pin_memory=None,
# MemoryFormat? memory_format=None) -> Tensor
# - Argument Conversion
# Generates C++ code to convert an ivalue (from stack) to its underlying C++ type.
# - int[] size
# ```cpp
# const c10::List<c10::IValue> size_list_in = (std::move(peek(stack, 0, 7))).toList();
#
# std::vector<int64_t> size_vec;
# for (c10::IValue size_elem: size_list_in) {
# int64_t size_base = size_elem.to<int64_t>();
# size_vec.push_back(size_base);
# }
# at::ArrayRef<int64_t> size_list_out(size_vec);
# ~~~~~~~~~~~~~ <-- The converted argument from ivalues in the stack.
# Will be passed to unboxed kernel.
# ```
# - Dimname[]? names
# ```cpp
# c10::optional<c10::IValue> names_opt = (std::move(peek(stack, 1, 7))).toOptional<c10::IValue>();
# c10::optional<at::ArrayRef<at::Dimname>> names_opt_out;
# if (names_opt.has_value()) {
# ~~~~~~~~~~~ <-- Unwrapping optional shell
# const c10::IValue names_opt_in = names_opt.value();
# const c10::List<c10::IValue> names_list_in = names_opt_in.toList();
#
# std::vector<at::Dimname> names_vec;
# for (c10::IValue names_elem: names_list_in) {
# ~~~~~~~~~~~~~~~~~~~~~~~~~ <-- Unrolling list, then convert elements one by one.
# at::Dimname names_base = names_elem.to<at::Dimname>();
# names_vec.push_back(names_base);
# }
# at::ArrayRef<at::Dimname> names_list_out(names_vec);
#
# names_opt_out = c10::optional<at::ArrayRef<at::Dimname>>(names_list_out);
# } else {
# names_opt_out = c10::optional<at::ArrayRef<at::Dimname>>();
# }
# ```
# - ScalarType? dtype (similarly for the rest of the arguments)
# ```cpp
# c10::optional<c10::IValue> dtype_opt = (std::move(peek(stack, 2, 7))).toOptional<c10::IValue>();
# c10::optional<at::ScalarType> dtype_opt_out;
# if (dtype_opt.has_value()) {
# const c10::IValue dtype_opt_in = dtype_opt.value();
# at::ScalarType dtype_base = dtype_opt_in.to<at::ScalarType>();
# ~~~~~~~~~~~~~~~~~~~~ <-- For base types, convert ivalue to it
# directly using ".to<T>()" API.
# dtype_opt_out = c10::optional<at::ScalarType>(dtype_base);
# } else {
# dtype_opt_out = c10::optional<at::ScalarType>();
# }
# ```
#
# - Unboxed Kernel Call
# ```cpp
# auto result_ = torch::empty(
# size_list_out,
# names_opt_out,
# options,
# memory_format_opt_out
# );
# ```
#
# - Push Result Back to Stack
# ```cpp
# drop(stack, 7);
# pack(stack, std::move(result_));
# ```
connector = "\n\t"
# Return unboxing function name for a NativeFunction
def name(f: NativeFunction) -> str:
return f.func.name.unambiguous_name()
# Convert all the arguments in a NativeFunction to C++ code
def convert_arguments(f: NativeFunction) -> Tuple[List[Binding], List[str]]:
# we need the 'self' argument so method needs to be False
args = (
CppSignatureGroup.from_native_function(f, method=False)
.most_faithful_signature()
.arguments()
)
code_list = [
f"c10::IValue {args[i].name} = std::move(peek(stack, {i}, {len(args)}));"
for i in range(len(args))
] + [""]
binding_list = []
for i, arg in enumerate(args):
# expecting only Argument
if not isinstance(arg.argument, Argument):
raise Exception(
f"Unexpected argument type, expecting `Argument` but got {arg}"
)
argument: Argument = arg.argument
unboxed_name, _, code, decl = argumenttype_ivalue_convert(
argument.type, argument.name, mutable=argument.is_write
)
code_list.extend(decl)
code_list.extend(code)
binding_list.append(arg.with_name(unboxed_name))
return binding_list, code_list
# Takes in the type, name and mutability corresponding to an argument, and generates a tuple of:
# (1) the C++ code necessary to unbox the argument
# (2) A Binding corresponding to the newly created unboxed variable, including variable name and its CType
def argumenttype_ivalue_convert(
t: Type, arg_name: str, *, mutable: bool = False
) -> Tuple[str, CType, List[str], List[str]]:
ctype = cpp.argumenttype_type(t=t, mutable=mutable, binds=arg_name).type
if isinstance(t, BaseType):
out_name = f"{arg_name}_base"
code, decl = _gen_code_base_type(
arg_name=arg_name, out_name=out_name, ctype=ctype
)
elif isinstance(t, OptionalType):
out_name = f"{arg_name}_opt_out"
code, decl = _gen_code_optional_type(
arg_name=arg_name, out_name=out_name, t=t, ctype=ctype
)
elif isinstance(t, ListType):
out_name = f"{arg_name}_list_out"
code, decl = _gen_code_list_type(
arg_name=arg_name, out_name=out_name, t=t, ctype=ctype
)
else:
raise Exception(f"Cannot handle type {t}. arg_name: {arg_name}")
return out_name, ctype, code, decl
def _gen_code_base_type(
arg_name: str, out_name: str, ctype: CType
) -> Tuple[List[str], List[str]]:
return [
f"{ctype.cpp_type(strip_ref=True)} {out_name} = {arg_name}.to<{ctype.cpp_type(strip_ref=True)}>();"
], []
def _gen_code_optional_type(
arg_name: str, out_name: str, t: OptionalType, ctype: CType
) -> Tuple[List[str], List[str]]:
in_name = f"{arg_name}_opt_in"
res_name, _, res_code, decl = argumenttype_ivalue_convert(t.elem, in_name)
return (
f"""
c10::optional<c10::IValue> {arg_name}_opt = {arg_name}.toOptional<c10::IValue>();
{ctype.cpp_type(strip_ref=True)} {out_name};
if ({arg_name}_opt.has_value()) {{
const c10::IValue {in_name} = {arg_name}_opt.value();
{connector.join(res_code)}
{out_name} = {ctype.cpp_type(strip_ref=True)}({res_name});
}} else {{
{out_name} = {ctype.cpp_type(strip_ref=True)}();
}}
""".split(
"\n"
),
decl,
)
def _gen_code_list_type(
arg_name: str, out_name: str, t: ListType, ctype: CType
) -> Tuple[List[str], List[str]]:
in_name = f"{arg_name}_list_in"
elem_name = f"{arg_name}_elem"
code = [f"const c10::List<c10::IValue> {in_name} = {arg_name}.toList();"]
res_name, res_ctype, res_code, decl = argumenttype_ivalue_convert(t.elem, elem_name)
# handle list type with size, e.g., bool[4]
if isinstance(t.elem, BaseType) and t.elem.name == BaseTy.bool and t.size:
code.extend(
f"""
{ctype.cpp_type(strip_ref=True)} {out_name} = as_array<{res_ctype.cpp_type(strip_ref=True)}, {t.size}>({in_name});
""".split(
"\n"
)
)
# we have to use c10::List for optional element. e.g., Tensor?[] -> c10::List<c10::optional<at::Tensor>>
elif isinstance(t.elem, OptionalType):
code.extend(
f"""
{ctype.cpp_type(strip_ref=True)} {out_name};
for (c10::IValue {elem_name}: {in_name}) {{
{connector.join(res_code)}
{out_name}.push_back({res_name});
}}
""".split(
"\n"
)
)
else:
# use ArrayRef as default.
vec_name = arg_name + "_vec"
# need to bring vector instantiation out of scope so that ArrayRef has valid data
decl.append(f"std::vector<{res_ctype.cpp_type(strip_ref=True)}> {vec_name};")
code.extend(
f"""
for (c10::IValue {elem_name}: {in_name}) {{
{connector.join(res_code)}
{vec_name}.push_back({res_name});
}}
{ctype.cpp_type(strip_ref=True)} {out_name}({vec_name});
""".split(
"\n"
)
)
return code, decl
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
18b01c0bfd6fbefe2c9ab0f5e617a471aeea9967
|
b5295ecd1df0721471d6ee7f6caad8b46a2bfdda
|
/polls/admin.py
|
bed08c496b2e313acf2d2cd3d28611945650c427
|
[] |
no_license
|
wandiao/testdj
|
5b1b07fcc26b6303e4e0c25c77283e9e681e42f9
|
37225d8e204d40297256a7aac71ffb33f6608c08
|
refs/heads/master
| 2021-05-08T12:16:02.590440
| 2018-03-03T06:48:04
| 2018-03-03T06:48:04
| 119,920,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import inspect
from django.contrib import admin
import models as app_models
# Register your models here.
for attr in dir(app_models):
model = getattr(app_models, attr)
if not inspect.isclass(model):
continue
try:
admin.site.register(model)
except:
pass
|
[
"851298395@qq.com"
] |
851298395@qq.com
|
7eff13e47f67cf955c1ffcc0daa1712304c1e977
|
2c9ba018be94873c42dc20dd0d83993e87109dd7
|
/src/spaceone/inventory/manager/ecs/vpc_manager.py
|
0ba3ea3cae8ff97b34fe1a74966aee0092af4ded
|
[
"Apache-2.0"
] |
permissive
|
stat-kwon/plugin-alibaba-cloud-ecs-inven-collector
|
14a516b5c04258c2275d0912cc86840532fb6c6c
|
4529b0a64443814056916b2cd8b928f76228dc9e
|
refs/heads/master
| 2023-07-28T10:58:57.197040
| 2021-09-15T05:34:04
| 2021-09-15T05:34:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
from spaceone.core.manager import BaseManager
from spaceone.inventory.model.subnet import Subnet
from spaceone.inventory.model.vpc import VPC
class VPCManager(BaseManager):
def __init__(self, params, ecs_connector=None):
self.params = params
self.ecs_connector = ecs_connector
def get_vpc_info(self, vpc_id, subnet_id, vpcs, subnets):
"""
vpc_data = {
"vpc_name": "",
"vpc_id": "",
"cidr": "",
}
subnet_data = {
"subnet_name": "",
"subnet_id": "",
"cidr": ""
}
"""
matched_vpc = self.get_matched_vpc(vpc_id, vpcs)
matched_subnet = self.get_matched_subnet(subnet_id, subnets)
return VPC(matched_vpc, strict=False), Subnet(matched_subnet, strict=False)
@staticmethod
def get_matched_vpc(vpc_id, vpcs):
for vpc in vpcs:
if vpc_id == vpc["VpcId"]:
return vpc
return None
@staticmethod
def get_matched_subnet(subnet_id, subnets):
for subnet in subnets:
if subnet_id == subnet["VSwitchId"]:
return subnet
return None
|
[
"sunhyebaek99@gmail.com"
] |
sunhyebaek99@gmail.com
|
1384fc9602f827467726f82f961874289e48c4ae
|
b1d82c0a332c029fd23e3f94e0d7fa23427b3fd4
|
/meeting/migrations/0007_google_calendar.py
|
28e0fc6b8c0750f05cfd516a489b0bb357908542
|
[] |
no_license
|
cyliang/covart-web
|
4f09b8d1637371f161293c6adb08b08fec293e2c
|
acb0316187b408106432fc1fb752176fe6c91024
|
refs/heads/master
| 2021-01-01T06:34:44.313464
| 2018-01-15T06:51:48
| 2018-01-15T06:51:48
| 97,454,835
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-12-17 17:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meeting', '0006_no_null_presenthistory'),
]
operations = [
migrations.AddField(
model_name='meetinghistory',
name='gcal_id',
field=models.CharField(blank=True, max_length=150),
),
]
|
[
"r05944012@csie.ntu.edu.tw"
] |
r05944012@csie.ntu.edu.tw
|
b2ad8e4a1a3de787c96d4f94eed380c0a6db8655
|
e0b52d79f4f37f12223c5c2855b4c1c5453c6006
|
/app1/app1/settings.py
|
6aba98b4594d80eb50023e66de39454f7fd5cd9d
|
[] |
no_license
|
neoice/django-pki-standalone
|
2e725f4bd9b7aaf0e1c8d7d1f12b4b648f8a1766
|
7d4c6448f60ec2facc4cfc4f90feca2ae2656099
|
refs/heads/master
| 2021-01-16T18:30:10.499879
| 2013-05-30T05:22:56
| 2013-05-30T05:22:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,303
|
py
|
# Django settings for app1 project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'app1.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'app1.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'pki',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"neoice@neoice.net"
] |
neoice@neoice.net
|
9e846d0796e745e22ad77bb07b4f225c3c8152b6
|
b708503cd67404b632dd58364648d6a927d042b6
|
/main.py
|
1cc8f4904b7cf5ed96d36eafbdd82769e917db8c
|
[] |
no_license
|
muhasensei/simple_neuron_network
|
3091be4d221055e4ac8b9d5d248dc60b20bb8a33
|
fad29de7f306044176faf8a19715ab2033b7c946
|
refs/heads/master
| 2023-08-01T03:27:37.076867
| 2021-09-15T17:30:09
| 2021-09-15T17:30:09
| 406,865,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
import numpy as np
def act(x):
return 0 if x < 0.5 else 1
def go(car, mambo, handsome):
inputs = np.array([car, mambo, handsome])
w11 = [0.1, 0.3, 0]#non-prioritizer
w12 = [0.4, -0.5, 1]#prioritizer
weight1 = np.array([w11, w12])
weight2 = [-1, 1]
hidden_layer_values = np.dot(weight1, inputs)
# print(hidden_layer)
hidden_layer_outputs = np.array([act(x) for x in hidden_layer_values])
# print(hidden_layer_outputs)
res = np.dot(hidden_layer_outputs, weight2)
return act(res)
decision = go(1, 0, 0)
if decision == 1:
print('Let`s go')
else:
print('no, thanks')
|
[
"zhanmuha01@gmail.com"
] |
zhanmuha01@gmail.com
|
c2afa61ce5c07ddf58ad8a84d653b702b672e0f7
|
ba498cea94e8bd449663253c013e610c9c8a2b7a
|
/meiduo_mall/apps/users/urls.py
|
68731248cc3efcae63df2af6746f9c806cedb77f
|
[
"MIT"
] |
permissive
|
love20ai/shangchengxiangmu
|
ba7221eac0f80b026041bc319f16d3ae776db60c
|
55ab7f5d2daa59a068255037d2c97d6846115542
|
refs/heads/master
| 2020-09-07T13:32:37.695474
| 2019-12-05T08:51:54
| 2019-12-05T08:51:54
| 220,796,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
# 1.注册页面 显示
url(r'^register/$', views.RegisterView.as_view()),
# 2. 判断用户名是否重复 usernames/(?P<username>[a-zA-Z0-9_-]{5,20})/count/
url(r'^usernames/(?P<username>[a-zA-Z0-9_-]{5,20})/count/$', views.UsernameCountView.as_view()),
# 3. 判断手机号 是否 重复 mobiles/(?P<mobile>1[3-9]\d{9})/count/
url(r'^mobiles/(?P<mobile>1[3-9]\d{9})/count/$', views.MobileCountView.as_view()),
# 4. 登录显示
url(r'^login/$', views.LoginView.as_view(), name="login"),
# 5. 退出
url(r'^logout/$', views.LogoutView.as_view()),
# 6. 用户中心
url(r'^info/$', views.UserInfoView.as_view(), name='info'),
# 7. 新邮箱 emails/
url(r'^emails/$', views.EmailView.as_view(), name='emails'),
# 8.激活邮箱 emails/verification/
url(r'^emails/verification/$', views.VerifyEmailView.as_view()),
# 9. 收货地址 address/
url(r'^address/$', views.AddressView.as_view(),name='address'),
# 10. 新增 收货地址 addresses/create/
url(r'^addresses/create/$', views.AddressCreateView.as_view()),
# 11. 修改密码 password/
url(r'^password/$', views.ChangePwdView.as_view(), name='password'),
# 12. 用户浏览记录 browse_histories/
url(r'browse_histories/$', views.UserBrowserView.as_view()),
]
|
[
"1466481346@qq.com"
] |
1466481346@qq.com
|
5846553d669043dcb6f317c082cb8cf51f6ea4cc
|
3353938363622ad12547f18cb89a2f5197788e6c
|
/src/server/cgi-bin/data.py
|
9245cb585ee55888589994fbe3b0c9ea1af4ec94
|
[
"Apache-2.0"
] |
permissive
|
dvaumoron/fourreToutPython
|
e3c2bd05775753bcfb9eef8f0c5b3c47a35566ec
|
cb5eafb90609201da804edd068a5bd65d58c2168
|
refs/heads/master
| 2022-11-27T00:48:15.241766
| 2022-11-18T15:12:54
| 2022-11-18T15:12:54
| 43,980,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
import json
def load():
with open("data.json") as f:
return json.load(f)
def dump(d):
with open("data.json", mode="w") as f:
json.dump(d, f, indent="\t")
|
[
"dvaumoron@gmail.com"
] |
dvaumoron@gmail.com
|
9caf626c943383f604df60874358c5a5a2be9ead
|
3d518e7a29fa28e1e5e9eae26af8ad3f13f23693
|
/analysis/makeFFTPlots.py
|
c5310f2a0c443c4e6c8418fe50e184e76c7d8104
|
[
"BSD-3-Clause"
] |
permissive
|
Betterton-Lab/C-GLASS
|
544f891d088410ce2f825e9938001b67c8e967f3
|
ae4ddcb33e177746c5191060b1d511d8fd578b7b
|
refs/heads/master
| 2023-08-03T08:37:04.352178
| 2023-07-25T14:26:47
| 2023-07-25T14:26:47
| 266,194,665
| 4
| 2
|
BSD-3-Clause
| 2023-09-14T02:55:34
| 2020-05-22T19:46:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
#!/usr/local/bin/python
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib import cm
import seaborn as sns
import numpy as np
import pandas as pd
sp = "100"
lp = "100"
fname = "soft_pf0.2_sp" + sp + "_lp" + lp + "_condensed.density"
df = pd.read_csv(fname, delim_whitespace=True, header=None)
fig, ax = plt.subplots(1, 2, figsize=(8, 3))
# sns.heatmap(df,cmap=cm.viridis,ax=ax[0])
data = df.replace(0, 1e-10)
data = data / data.sum().sum()
min_data = data.min().min()
if min_data == 0:
min_data = 1
max_data = data.max().max()
log_norm = LogNorm(vmin=min_data, vmax=max_data)
cbar_ticks = [
10 ** i
for i in range(
int(np.floor(np.log10(min_data))), 1 + int(np.ceil(np.log10(max_data)))
)
]
sns.heatmap(
data, norm=log_norm, cmap=cm.viridis, ax=ax[0], cbar_kws={"ticks": cbar_ticks}
)
fft_data = np.fft.fftshift(np.fft.fft2(df))
data = np.abs(fft_data)
# data=data/data.sum().sum()
min_data = data.min().min()
if min_data == 0:
min_data = 1
max_data = data.max().max()
log_norm = LogNorm(vmin=min_data, vmax=max_data)
cbar_ticks = [
10 ** i
for i in range(
int(np.floor(np.log10(min_data))), 1 + int(np.ceil(np.log10(max_data)))
)
]
sns.heatmap(
data, norm=log_norm, cmap=cm.viridis, ax=ax[1], cbar_kws={"ticks": cbar_ticks}
)
savename = "sp" + sp + "_lp" + lp
fig.savefig(savename + ".png", dpi=300)
f = open(savename + "_fft_max.txt", "w")
f.write(str(np.max(data[data.shape[0] // 2])))
f.close()
|
[
"jeffreymm@protonmail.com"
] |
jeffreymm@protonmail.com
|
b40115583eb26337ab79c1dee7c1665a8e0bc712
|
06c354961b87738a3ddcc193ac20e1070ffe481b
|
/Q-tables/Maze/maze-env-from-github/gym_maze/__init__.py
|
e629e7bea838b4323216b2e9cc43b87f80b335ed
|
[] |
no_license
|
nicolasbdls/AI-Gamer
|
3e6daffd1228fbd9a277fd512b30799b2e8658a0
|
c87a2776a4eaa1103c6f7a276b9aa9078a4873c0
|
refs/heads/master
| 2022-11-02T05:41:03.443491
| 2020-06-19T17:33:01
| 2020-06-19T17:33:01
| 272,419,607
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,743
|
py
|
from gym.envs.registration import register
register(
id='maze-v0',
entry_point='gym_maze.envs:MazeEnvSample5x5',
max_episode_steps=2000,
)
register(
id='maze-sample-5x5-v0',
entry_point='gym_maze.envs:MazeEnvSample5x5',
max_episode_steps=2000,
)
register(
id='maze-random-5x5-v0',
entry_point='gym_maze.envs:MazeEnvRandom5x5',
max_episode_steps=2000,
nondeterministic=True,
)
register(
id='maze-sample-10x10-v0',
entry_point='gym_maze.envs:MazeEnvSample10x10',
max_episode_steps=10000,
)
register(
id='maze-random-10x10-v0',
entry_point='gym_maze.envs:MazeEnvRandom10x10',
max_episode_steps=10000,
nondeterministic=True,
)
register(
id='maze-sample-3x3-v0',
entry_point='gym_maze.envs:MazeEnvSample3x3',
max_episode_steps=1000,
)
register(
id='maze-random-3x3-v0',
entry_point='gym_maze.envs:MazeEnvRandom3x3',
max_episode_steps=1000,
nondeterministic=True,
)
register(
id='maze-sample-100x100-v0',
entry_point='gym_maze.envs:MazeEnvSample100x100',
max_episode_steps=1000000,
)
register(
id='maze-random-100x100-v0',
entry_point='gym_maze.envs:MazeEnvRandom100x100',
max_episode_steps=1000000,
nondeterministic=True,
)
register(
id='maze-random-10x10-plus-v0',
entry_point='gym_maze.envs:MazeEnvRandom10x10Plus',
max_episode_steps=1000000,
nondeterministic=True,
)
register(
id='maze-random-20x20-plus-v0',
entry_point='gym_maze.envs:MazeEnvRandom20x20Plus',
max_episode_steps=1000000,
nondeterministic=True,
)
register(
id='maze-random-30x30-plus-v0',
entry_point='gym_maze.envs:MazeEnvRandom30x30Plus',
max_episode_steps=1000000,
nondeterministic=True,
)
|
[
"nicolas.barbierdelaserre@epfl.ch"
] |
nicolas.barbierdelaserre@epfl.ch
|
75884e8b02dd6efacf5ba3732eef7fece123ecec
|
04e38679813f1ba8d8b9543fc99be2dbac966dbc
|
/node_cluster.py
|
1375eca76932086c28069ce78f3e1341e8961db2
|
[] |
no_license
|
TPNguyen/ACMin
|
158a4d61fb64d3eefb65af2fd441903472e522ca
|
6de67739e4649d16fd2def9b03bbf3e3f7e18393
|
refs/heads/main
| 2023-05-11T21:30:21.794632
| 2021-06-05T13:37:50
| 2021-06-05T13:37:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,233
|
py
|
#########################################################################
# File Name: node_cluster.py
# Author: anryyang
# mail: anryyang@gmail.com
# Created Time: Fri 05 Apr 2019 04:33:10 PM
#########################################################################
#!/usr/bin/env/ python
from sklearn.cluster import KMeans
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
from sklearn import metrics
import numpy as np
import argparse
import os
import cPickle as pickle
import networkx as nx
from scipy.sparse.linalg import svds
import scipy.sparse as sp
from scipy.sparse import identity
from scipy import linalg
from scipy import sparse
from munkres import Munkres
from sklearn import preprocessing
from sklearn.decomposition import NMF
import heapq
from sklearn.cluster import AffinityPropagation
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import SpectralClustering
from spectral import discretize
from scipy.sparse.linalg.eigen.arpack import eigsh as largest_eigsh
from scipy.sparse.linalg.eigen.arpack import eigs as largest_eigs
from scipy.linalg import qr
from scipy.linalg import orth
from scipy.sparse.csgraph import laplacian
import time
import sklearn
from sklearn.linear_model import SGDRegressor
from scipy.sparse import csc_matrix, csr_matrix
from numpy import linalg as LA
import operator
import random
print(sklearn.__version__)
def read_cluster(N,file_name):
if not file_name or not os.path.exists(file_name):
raise Exception("label file not exist!")
f = open(file_name, "r")
lines = f.readlines()
f.close()
#N = len(lines)
y = np.zeros(N, dtype=int)
for line in lines:
i, l = line.strip("\n\r").split()
i, l = int(i), int(l)
y[i] = l
return y
class clustering_metrics():
def __init__(self, true_label, predict_label):
self.true_label = true_label
self.pred_label = predict_label
def clusteringAcc(self):
print(len(self.true_label), len(self.pred_label))
# best mapping between true_label and predict label
l1 = list(set(self.true_label))
numclass1 = len(l1)
l2 = list(set(self.pred_label))
numclass2 = len(l2)
if numclass1 != numclass2:
print('Class Not equal!!!!')
c1_clusters = {c: set() for c in set(l1)}
c2_clusters = {c: set() for c in set(l2)}
for i in range(len(self.true_label)):
c1 = self.true_label[i]
c2 = self.pred_label[i]
c1_clusters[c1].add(i)
c2_clusters[c2].add(i)
c2_c1 = {}
for c2 in set(l2):
for c1 in set(l1):
c2_c1[str(c2)+","+str(c1)]=0
for (c1, s1) in c1_clusters.items():
for (c2, s2) in c2_clusters.items():
num_com_s1s2 = len(s1.intersection(s2))
c2_c1[str(c2)+","+str(c1)]=num_com_s1s2
sorted_x = sorted(c2_c1.items(), key=operator.itemgetter(1), reverse=True)
c2_c1_map = {}
c1_flag = {c: True for c in set(l1)}
c2_flag = {c: True for c in set(l2)}
for (k, v) in sorted_x:
if len(c2_c1_map.keys())==numclass1:
break
c2, c1 = k.split(',')
c2, c1 = int(c2), int(c1)
#print(c2, c1, v)
if c1_flag[c1] and c2_flag[c2]:
c2_c1_map[c2]=c1
c1_flag[c1] = False
c2_flag[c2] = False
new_predict = np.zeros(len(self.pred_label))
for i in range(len(l2)):
new_predict[i] = c2_c1_map[self.pred_label[i]]
else:
cost = np.zeros((numclass1, numclass2), dtype=int)
for i, c1 in enumerate(l1):
mps = [i1 for i1, e1 in enumerate(self.true_label) if e1 == c1]
for j, c2 in enumerate(l2):
mps_d = [i1 for i1 in mps if self.pred_label[i1] == c2]
cost[i][j] = len(mps_d)
# match two clustering results by Munkres algorithm
m = Munkres()
cost = cost.__neg__().tolist()
indexes = m.compute(cost)
# get the match results
new_predict = np.zeros(len(self.pred_label))
for i, c in enumerate(l1):
# correponding label in l2:
c2 = l2[indexes[i][1]]
# ai is the index with label==c2 in the pred_label list
ai = [ind for ind, elm in enumerate(self.pred_label) if elm == c2]
new_predict[ai] = c
acc = metrics.accuracy_score(self.true_label, new_predict)
f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro')
precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro')
recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro')
f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro')
precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro')
recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro')
return acc
def evaluationClusterModelFromLabel(self):
nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label)
adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label)
acc = self.clusteringAcc()
return acc, nmi, adjscore
def load_data(args):
folder = "./data/"
edge_file = folder+args.data+"/edgelist.txt"
feature_file = folder+args.data+"/attrs.pkl"
label_file = folder+args.data + '/labels.txt'
print("loading from "+feature_file)
features = pickle.load(open(feature_file))
print("nnz:", features.getnnz())
print(features.shape)
n = features.shape[0]
print("loading from "+edge_file)
graph = nx.read_edgelist(edge_file, create_using=nx.Graph(), nodetype=int)
for v in range(n):
graph.add_node(v)
print("loading from "+label_file)
true_clusters = read_cluster(n,label_file)
return graph, features, true_clusters
def si_eig(P, X, alpha, beta, k, a):
t = 500
q, _ = qr(a, mode='economic')
XT = X.T
xsum = X.dot(XT.sum(axis=1))
xsum[xsum==0]=1
X = X/xsum
for i in range(t):
z = (1-alpha-beta)*P.dot(q)+ (beta)*X.dot(XT.dot(q))
p = q
q, _ = qr(z, mode='economic')
if np.linalg.norm(p-q, ord=1)<0.01:
print("converged")
break
return q
def base_cluster(graph, X, num_cluster,true_clusters):
print("attributed transition matrix constrcution...")
adj = nx.adjacency_matrix(graph)
P = preprocessing.normalize(adj, norm='l1', axis=1)
n = P.shape[0]
print(P.shape)
start_time = time.time()
alpha=0.2
beta=0.35
XX = X.dot(X.T)
XX = preprocessing.normalize(XX, norm='l1', axis=1)
PP = (1-beta)*P + beta*XX
I = identity(n)
S = I
t = 5 #int(1.0/alpha)
for i in range(t):
S = (1-alpha)*PP.dot(S)+I
S = alpha*S
q = np.zeros(shape=(n,num_cluster))
predict_clusters = n*[1]
lls = [i for i in range(num_cluster)]
for i in range(n):
ll = random.choice(lls)
predict_clusters[i] = ll
M = csc_matrix((np.ones(len(predict_clusters)), (np.arange(0, n), predict_clusters)),shape=(n,num_cluster+1))
M = M.todense()
Mss = np.sqrt(M.sum(axis=0))
Mss[Mss==0]=1
q = M*1.0/Mss
largest_evc = np.ones(shape = (n,1))*(1.0/np.sqrt(n*1.0))
q = np.hstack([largest_evc,q])
XT = X.T
xsum = X.dot(XT.sum(axis=1))
xsum[xsum==0]=1
xsum = csr_matrix(1.0/xsum)
X = X.multiply(xsum)
print(type(X), X.shape)
predict_clusters = np.asarray(predict_clusters,dtype=np.int)
print(q.shape)
epsilon_f = 0.005
tmax = 200
err = 1
for i in range(tmax):
z = S.dot(q)
q_prev = q
q, _ = qr(z, mode='economic')
err = LA.norm(q-q_prev)/LA.norm(q)
if err <= epsilon_f:
break
if i==tmax-1:
evecs_large_sparse = q
evecs_large_sparse = evecs_large_sparse[:,1:num_cluster+1]
kmeans = KMeans(n_clusters=num_cluster, random_state=0, n_jobs=-1, algorithm='full', init='random', n_init=1, max_iter=50).fit(evecs_large_sparse)
predict_clusters = kmeans.predict(evecs_large_sparse)
time_elapsed = time.time() - start_time
print("%f seconds are taken to train"%time_elapsed)
return predict_clusters
def get_ac(P, X, XT, y, alpha, beta, t):
n = X.shape[0]
num_cluster = y.max()+1-y.min()
if(y.min()>0):
y = y-y.min()
print(n, len(y), num_cluster)
vectors_discrete = csc_matrix((np.ones(len(y)), (np.arange(0, n), y)), shape=(n, num_cluster)).toarray()
vectors_f = vectors_discrete
vectors_fs = np.sqrt(vectors_f.sum(axis=0))
vectors_fs[vectors_fs==0]=1
vectors_f = vectors_f*1.0/vectors_fs
q_prime = vectors_f
h = q_prime
for tt in range(t):
h = (1-alpha)*((1-beta)*P.dot(h)+ (beta)*X.dot(XT.dot(h))) +q_prime
h = alpha*h
h = q_prime-h
conductance_cur = 0
for k in range(num_cluster):
conductance_cur = conductance_cur + (q_prime[:,k].T).dot(h[:,k])#[0,0]
return conductance_cur/num_cluster
def cluster(graph, X, num_cluster,true_clusters, alpha=0.2, beta = 0.35, t=5, tmax=200, ri=False):
print("attributed transition matrix constrcution...")
adj = nx.adjacency_matrix(graph)
P = preprocessing.normalize(adj, norm='l1', axis=1)
n = P.shape[0]
print(P.shape)
epsilon_r = 6*n*np.log(n*1.0)/X.getnnz()
print("epsilon_r threshold:", epsilon_r)
degrees = dict(graph.degree())
topk_deg_nodes = heapq.nlargest(5*t*num_cluster, degrees, key=degrees.get)
PC = P[:,topk_deg_nodes]
M = PC
for i in range(t-1):
M = (1-alpha)*P.dot(M)+PC
class_evdsum = M.sum(axis=0).flatten().tolist()[0]
newcandidates = np.argpartition(class_evdsum, -num_cluster)[-num_cluster:]
M = M[:,newcandidates]
labels = np.argmax(M, axis=1).flatten().tolist()[0]
labels = np.asarray(labels,dtype=np.int)
# random initialization
if ri is True:
lls = np.unique(labels)
for i in range(n):
ll = random.choice(lls)
labels[i] = ll
M = csc_matrix((np.ones(len(labels)), (np.arange(0, M.shape[0]), labels)),shape=(M.shape))
M = M.todense()
start_time = time.time()
print("eigen decomposition...")
Mss = np.sqrt(M.sum(axis=0))
Mss[Mss==0]=1
q = M*1.0/Mss
largest_evc = np.ones(shape = (n,1))*(1.0/np.sqrt(n*1.0))
q = np.hstack([largest_evc,q])
XT = X.T
xsum = X.dot(XT.sum(axis=1))
xsum[xsum==0]=1
xsum = csr_matrix(1.0/xsum)
X = X.multiply(xsum)
print(type(X), X.shape)
predict_clusters_best=labels
iter_best = 0
conductance_best=100
conductance_best_acc = [0]*3
acc_best = [0]*3
acc_best_iter = 0
acc_best_conductance = 0
epsilon_f = 0.005
err = 1
for i in range(tmax):
z = (1-beta)*P.dot(q)+ (beta)*X.dot(XT.dot(q))
q_prev = q
q, _ = qr(z, mode='economic')
err = LA.norm(q-q_prev)/LA.norm(q)
if (i+1)%20==0:
evecs_large_sparse = q
evecs_large_sparse = evecs_large_sparse[:,1:num_cluster+1]
predict_clusters, q_prime = discretize(evecs_large_sparse)
conductance_cur = 0
h = q_prime
for tt in range(1):
h = (1-alpha)*((1-beta)*P.dot(h)+ (beta)*X.dot(XT.dot(h))) +q_prime
h = alpha*h
h = q_prime-h
for k in range(num_cluster):
conductance_cur = conductance_cur + (q_prime[:,k].T).dot(h[:,k])#[0,0]
conductance_cur=conductance_cur/num_cluster
if conductance_cur<conductance_best:
conductance_best = conductance_cur
predict_clusters_best = predict_clusters
iter_best = i
print(i, err, conductance_cur)
if err <= epsilon_f:
break
if tmax==0:
evecs_large_sparse = q
evecs_large_sparse = evecs_large_sparse[:,1:num_cluster+1]
predict_clusters, q_prime = discretize(evecs_large_sparse)
predict_clusters_best = predict_clusters
time_elapsed = time.time() - start_time
print("%f seconds are taken to train"%time_elapsed)
print(np.unique(predict_clusters_best))
print("best iter: %d, best condutance: %f, acc: %f, %f, %f"%(iter_best, conductance_best, conductance_best_acc[0], conductance_best_acc[1], conductance_best_acc[2]))
return predict_clusters_best
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process...')
parser.add_argument('--data', type=str, help='graph dataset name')
parser.add_argument('--k', type=int, default=0, help='the number of clusters')
args = parser.parse_args()
print("loading data ", args.data)
graph, feats, true_clusters = load_data(args)
n = feats.shape[0]
if args.k>0:
num_cluster = args.k
else:
num_cluster = len(np.unique(true_clusters))
print("k=", num_cluster)
alpha = 0.2
beta = 0.35
t = 5
tmax = 200
predict_clusters = cluster(graph, feats, num_cluster, true_clusters, alpha, beta, t, tmax, False)
if args.k<=0:
cm = clustering_metrics(true_clusters, predict_clusters)
print("%f\t%f\t%f"%cm.evaluationClusterModelFromLabel())
print("-------------------------------")
K = len(set(predict_clusters))
with open("sc."+args.data+"."+str(K)+".cluster.txt", "w") as fout:
for i in range(len(predict_clusters)):
fout.write(str(predict_clusters[i])+"\n")
|
[
"anryyang@gmail.com"
] |
anryyang@gmail.com
|
57c2d5313162d7e0fb6af8774cad77fd69aba20a
|
efe561ed874450beda6f2ddce40dc44dd9dffa97
|
/k3s_lcgc/__init__.py
|
1d95f5bac52cc982529669803d56afaca057abdc
|
[] |
no_license
|
kahf-sami/k3s_lcgc
|
e2d55ece08450911b1a9cef53a3c64bfe8945234
|
c4f8f26a0da094aaba0c9fe8bd492094d38ee77e
|
refs/heads/master
| 2021-01-19T23:00:51.425720
| 2017-05-21T11:44:36
| 2017-05-21T11:44:36
| 88,908,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48
|
py
|
from .topologyProcessor import TopologyProcessor
|
[
"kahf.sami@gmail.com"
] |
kahf.sami@gmail.com
|
8905e6c7c83c147138c3de73a9df10718c2b5416
|
387dd4af77f10e741dc00c58a6b607fdcf408839
|
/src/program_hosts.py
|
9a4e10680839b7bbe7b47ac6dcb530264359fac7
|
[] |
no_license
|
Sayantan-Nandy/SnakeMongo
|
3176ca63bbea265736d813feac8ce88df502821b
|
139c25b3e64aa54c51a376f01b8c3e8de8d9493d
|
refs/heads/main
| 2023-06-08T17:49:04.722467
| 2021-06-27T05:35:32
| 2021-06-27T05:35:32
| 380,526,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,872
|
py
|
from infrastructure.switchlang import switch
import infrastructure.state as state
import services.db_services as svc
from dateutil import parser
import datetime
def run():
print(' ****************** Welcome host **************** ')
print()
show_commands()
while True:
action = get_action()
with switch(action) as s:
s.case('c', create_account)
s.case('a', log_into_account)
s.case('l', list_cages)
s.case('r', register_cage)
s.case('u', update_availability)
s.case('v', view_bookings)
s.case('m', lambda: 'change_mode')
s.case(['x', 'bye', 'exit', 'exit()'], exit_app)
s.case('?', show_commands)
s.case('', lambda: None)
s.case('o', logout)
s.default(unknown_command)
if action:
print()
if s.result == 'change_mode':
return
def show_commands():
print('What action would you like to take:')
print('[C]reate an account')
print('Login to your [a]ccount')
print('[L]ist your cages')
print('[R]egister a cage')
print('[U]pdate cage availability')
print('[V]iew your bookings')
print('Change [M]ode (guest or host)')
print('e[X]it app')
print('[?] Help (this info)')
print("L[O]gout")
print()
def create_account():
print(' ****************** REGISTER **************** ')
name = input("Enter your name: ")
email = input("Enter your email: ").strip().lower()
acct_check = svc.check_email_exist(email)
if acct_check:
#print(acct_check)
error_msg("Emails exists")
return
else:
state.active_account = svc.create_account(name,email)
success_msg("Account Created")
def log_into_account():
print(' ****************** LOGIN **************** ')
email = input("Enter email for login: ")
acct_check = svc.check_email_exist(email)
if acct_check:
print("Login Succesful")
state.active_account = acct_check
else:
print("Wrong Email is enterred!!!")
def register_cage():
print(' ****************** REGISTER CAGE **************** ')
if not state.active_account:
print("Login needed to register cage")
return
name = input("Enter name of cage: ")
price = float(input("Enter price of cage: "))
sq_mts = float(input("Enter cage size in square meters: "))
carpet = input("Is it carpeted [y,n]: ").lower().startswith('y')
toys = input("Does it have toys [y,n]: ").lower().startswith('y')
dang_snakes = input("Is dangerous snakes allowed [y,n]: ").lower().startswith('y')
c = svc.create_cage(state.active_account,name,price,sq_mts,carpet,toys,dang_snakes)
state.reload_account() # Reload the active account object with the modified data
print("Cage is registered for ",{c.id})
def list_cages(supress_header=False):
if not supress_header:
print(' ****************** Your cages **************** ')
if not state.active_account:
print("Login needed to list cages")
return
cages = svc.get_list_cages(state.active_account)
for i,c in enumerate(cages):
print(i+1,"Cage is ",c.name)
for b in c.bookings:
print(' * Booking: {}, {} days, booked? {}'.format(
b.check_in_date,
(b.check_out_date - b.check_in_date).days,
'YES' if b.booked_date is not None else 'no'
))
def update_availability():
print(' ****************** Add available date **************** ')
if not state.active_account:
print("Login needed to update cage availabilty")
return
list_cages(supress_header=True)
cage_number = input("Enter cage number: ")
if not cage_number.strip():
error_msg('Cancelled')
print()
return
cage_number = int(cage_number)
cages = svc.get_list_cages(state.active_account)
selected_cage = cages[cage_number-1]
print("Cage selected is: ",selected_cage.name)
start_date = parser.parse(
input("Enter available date [yyyy-mm-dd]: ")
)
days = int(input("How many days is this block of time? "))
svc.add_available_date(
selected_cage,
start_date,
days
)
success_msg(f'Date added to cage {selected_cage.name}.')
def view_bookings():
print(' ****************** Your bookings **************** ')
"""
Prints details of all the bookings that have been done for the host.
"""
if not state.active_account:
error_msg("You must log in first to register a cage")
return
cages = svc.get_list_cages(state.active_account)
bookings = [
(c, b)
for c in cages
for b in c.bookings
if b.booked_date is not None # Only take the booking entries with the booking date set as others are not booked
]
print("You have {} bookings.".format(len(bookings)))
for c, b in bookings:
print(' * Cage: {}, booked date: {}, from {} for {} days.'.format(
c.name,
datetime.date(b.booked_date.year, b.booked_date.month, b.booked_date.day),
datetime.date(b.check_in_date.year, b.check_in_date.month, b.check_in_date.day),
b.duration_in_days
))
def exit_app():
print()
print('bye')
raise KeyboardInterrupt()
def get_action():
text = '> '
if state.active_account:
text = f'{state.active_account.name}> '
action = input(text)
return action.strip().lower()
def logout():
state.active_account=None
def unknown_command():
print("Sorry we didn't understand that command.")
def success_msg(text):
print(text)
def error_msg(text):
print(text)
|
[
"sayantannandy2598@gmail.com"
] |
sayantannandy2598@gmail.com
|
658df86a1feb575255d63e56ca2cc25d537a534c
|
b20942e2ec20f5c31d152cb47490af78e54737f1
|
/appdaemon/admain.py
|
6df0de15bec7bee9bc907fbd980c43cd2e4210f8
|
[
"Apache-2.0"
] |
permissive
|
arraylabs/appdaemon
|
afaf4a464a4f4ea705f37c296fc78d22aa2d4483
|
6e6a6cb48dfceebb319507f56ca3dcc68dac456f
|
refs/heads/dev
| 2019-07-23T14:11:34.084295
| 2018-01-30T18:10:01
| 2018-01-30T18:10:01
| 111,024,273
| 6
| 1
| null | 2017-11-16T21:27:08
| 2017-11-16T21:27:08
| null |
UTF-8
|
Python
| false
| false
| 19,226
|
py
|
#!/usr/bin/python3
from pkg_resources import parse_version
import sys
import traceback
import configparser
import argparse
import logging
import os
import os.path
from logging.handlers import RotatingFileHandler
import appdaemon.conf as conf
import time
import datetime
import signal
import platform
from urllib.parse import urlparse
import yaml
import asyncio
import appdaemon.utils as utils
import appdaemon.appdaemon as ad
import appdaemon.adapi as api
import appdaemon.rundash as appdash
# Windows does not have Daemonize package so disallow
if platform.system() != "Windows":
from daemonize import Daemonize
def find_path(name):
for path in [os.path.join(os.path.expanduser("~"), ".homeassistant"),
os.path.join(os.path.sep, "etc", "appdaemon")]:
_file = os.path.join(path, name)
if os.path.isfile(_file) or os.path.isdir(_file):
return _file
return None
# noinspection PyBroadException,PyBroadException
def run():
tasks = []
loop = asyncio.get_event_loop()
# Initialize AppDaemon
if conf.apps is True:
utils.log(conf.logger, "INFO", "Starting Apps")
ad.run_ad(loop, tasks)
else:
utils.log(conf.logger, "INFO", "Apps are disabled")
# Initialize Dashboard/API
if conf.dashboard is True:
utils.log(conf.logger, "INFO", "Starting dashboard")
appdash.run_dash(loop, tasks)
else:
utils.log(conf.logger, "INFO", "Dashboards are disabled")
if conf.api_port is not None:
utils.log(conf.logger, "INFO", "Starting API")
api.run_api(loop, tasks)
else:
utils.log(conf.logger, "INFO", "API is disabled")
utils.log(conf.logger, "DEBUG", "Start Loop")
loop.run_until_complete(asyncio.wait(tasks))
utils.log(conf.logger, "DEBUG", "End Loop")
utils.log(conf.logger, "INFO", "AppDeamon Exited")
# noinspection PyBroadException
def main():
# import appdaemon.stacktracer
# appdaemon.stacktracer.trace_start("/tmp/trace.html")
# Windows does not support SIGUSR1 or SIGUSR2
if platform.system() != "Windows":
signal.signal(signal.SIGUSR1, ad.handle_sig)
signal.signal(signal.SIGINT, ad.handle_sig)
signal.signal(signal.SIGHUP, ad.handle_sig)
# Get command line args
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="full path to config directory", type=str, default=None)
parser.add_argument("-p", "--pidfile", help="full path to PID File", default="/tmp/hapush.pid")
parser.add_argument("-t", "--tick", help="time that a tick in the schedular lasts (seconds)", default=1, type=float)
parser.add_argument("-s", "--starttime", help="start time for scheduler <YYYY-MM-DD HH:MM:SS>", type=str)
parser.add_argument("-e", "--endtime", help="end time for scheduler <YYYY-MM-DD HH:MM:SS>", type=str, default=None)
parser.add_argument("-i", "--interval", help="multiplier for scheduler tick", type=float, default=1)
parser.add_argument("-D", "--debug", help="debug level", default="INFO", choices=
[
"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
])
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + conf.__version__)
parser.add_argument('--commtype', help="Communication Library to use", default="WEBSOCKETS", choices=
[
"SSE",
"WEBSOCKETS"
])
parser.add_argument('--profiledash', help=argparse.SUPPRESS, action='store_true')
parser.add_argument('--convertcfg', help="Convert existing .cfg file to yaml", action='store_true')
# Windows does not have Daemonize package so disallow
if platform.system() != "Windows":
parser.add_argument("-d", "--daemon", help="run as a background process", action="store_true")
args = parser.parse_args()
conf.tick = args.tick
conf.interval = args.interval
conf.loglevel = args.debug
conf.profile_dashboard = args.profiledash
if args.starttime is not None:
conf.now = datetime.datetime.strptime(args.starttime, "%Y-%m-%d %H:%M:%S").timestamp()
else:
conf.now = datetime.datetime.now().timestamp()
if args.endtime is not None:
conf.endtime = datetime.datetime.strptime(args.endtime, "%Y-%m-%d %H:%M:%S")
if conf.tick != 1 or conf.interval != 1 or args.starttime is not None:
conf.realtime = False
config_dir = args.config
conf.commtype = args.commtype
if platform.system() != "Windows":
isdaemon = args.daemon
else:
isdaemon = False
if config_dir is None:
config_file_conf = find_path("appdaemon.cfg")
config_file_yaml = find_path("appdaemon.yaml")
else:
config_file_conf = os.path.join(config_dir, "appdaemon.cfg")
if not os.path.isfile(config_file_conf):
config_file_conf = None
config_file_yaml = os.path.join(config_dir, "appdaemon.yaml")
if not os.path.isfile(config_file_yaml):
config_file_yaml = None
config = None
config_from_yaml = False
if config_file_yaml is not None and args.convertcfg is False:
#
# First locate secrets file
#
try:
secrets_file = os.path.join(os.path.dirname(config_file_yaml), "secrets.yaml")
if os.path.isfile(secrets_file):
with open(secrets_file, 'r') as yamlfd:
secrets_file_contents = yamlfd.read()
conf.secrets = yaml.load(secrets_file_contents)
yaml.add_constructor('!secret', utils._secret_yaml)
config_from_yaml = True
conf.config_file = config_file_yaml
conf.app_config_file = os.path.join(os.path.dirname(config_file_yaml), "apps.yaml")
with open(config_file_yaml, 'r') as yamlfd:
config_file_contents = yamlfd.read()
config = yaml.load(config_file_contents)
except yaml.YAMLError as exc:
print("ERROR", "Error loading configuration")
if hasattr(exc, 'problem_mark'):
if exc.context is not None:
print("ERROR", "parser says")
print("ERROR", str(exc.problem_mark))
print("ERROR", str(exc.problem) + " " + str(exc.context))
else:
print("ERROR", "parser says")
print("ERROR", str(exc.problem_mark))
print("ERROR", str(exc.problem))
sys.exit()
else:
# Read Config File
conf.config_file = config_file_conf
config = configparser.ConfigParser()
config.read_file(open(config_file_conf))
if args.convertcfg is True:
yaml_file = os.path.join(os.path.dirname(config_file_conf), "appdaemon.yaml")
print("Converting {} to {}".format(config_file_conf, yaml_file))
new_config = {}
for section in config:
if section != "DEFAULT":
if section == "AppDaemon":
new_config["AppDaemon"] = {}
new_config["HADashboard"] = {}
new_config["HASS"] = {}
new_section = ""
for var in config[section]:
if var in ("dash_compile_on_start", "dash_dir", "dash_force_compile", "dash_url", "disable_dash", "dash_password", "dash_ssl_key", "dash_ssl_certificate"):
new_section = "HADashboard"
elif var in ("ha_key", "ha_url", "timeout"):
new_section = "HASS"
else:
new_section = "AppDaemon"
new_config[new_section][var] = config[section][var]
else:
new_config[section] = {}
for var in config[section]:
new_config[section][var] = config[section][var]
with open(yaml_file, "w") as outfile:
yaml.dump(new_config, outfile, default_flow_style=False)
sys.exit()
conf.config_dir = os.path.dirname(conf.config_file)
conf.config = config
conf.logfile = config['AppDaemon'].get("logfile")
conf.errorfile = config['AppDaemon'].get("errorfile")
conf.threads = int(config['AppDaemon'].get('threads'))
conf.certpath = config['AppDaemon'].get("cert_path")
conf.app_dir = config['AppDaemon'].get("app_dir")
conf.latitude = config['AppDaemon'].get("latitude")
conf.longitude = config['AppDaemon'].get("longitude")
conf.elevation = config['AppDaemon'].get("elevation")
conf.time_zone = config['AppDaemon'].get("time_zone")
conf.rss_feeds = config['AppDaemon'].get("rss_feeds")
conf.rss_update = config['AppDaemon'].get("rss_update")
conf.api_key = config['AppDaemon'].get("api_key")
conf.api_port = config['AppDaemon'].get("api_port")
conf.api_ssl_certificate = config['AppDaemon'].get("api_ssl_certificate")
conf.api_ssl_key = config['AppDaemon'].get("api_ssl_key")
if config_from_yaml is True:
conf.timeout = config['HASS'].get("timeout")
conf.ha_url = config['HASS'].get('ha_url')
conf.ha_key = config['HASS'].get('ha_key', "")
if 'HADashboard' in config:
conf.dash_url = config['HADashboard'].get("dash_url")
conf.dashboard_dir = config['HADashboard'].get("dash_dir")
conf.dash_ssl_certificate = config['HADashboard'].get("dash_ssl_certificate")
conf.dash_ssl_key = config['HADashboard'].get("dash_ssl_key")
conf.dash_password = config['HADashboard'].get("dash_password")
if config['HADashboard'].get("dash_force_compile") == "1":
conf.dash_force_compile = True
else:
conf.dash_force_compile = False
if config['HADashboard'].get("dash_compile_on_start") == "1":
conf.dash_compile_on_start = True
else:
conf.dash_compile_on_start = False
if "disable_dash" in config['HADashboard'] and config['HADashboard']["disable_dash"] == 1:
conf.dashboard = False
else:
conf.dashboard = True
else:
conf.timeout = config['AppDaemon'].get("timeout")
conf.ha_url = config['AppDaemon'].get('ha_url')
conf.ha_key = config['AppDaemon'].get('ha_key', "")
conf.dash_url = config['AppDaemon'].get("dash_url")
conf.dashboard_dir = config['AppDaemon'].get("dash_dir")
conf.dash_ssl_certificate = config['AppDaemon'].get("dash_ssl_certificate")
conf.dash_ssl_key = config['AppDaemon'].get("dash_ssl_key")
conf.dash_password = config['AppDaemon'].get("dash_password")
if config['AppDaemon'].get("dash_force_compile") == "1":
conf.dash_force_compile = True
else:
conf.dash_force_compile = False
if config['AppDaemon'].get("dash_compile_on_start") == "1":
conf.dash_compile_on_start = True
else:
conf.dash_compile_on_start = False
if "disable_dash" in config['AppDaemon'] and config['AppDaemon']["disable_dash"] == 1:
conf.dashboard = False
else:
conf.dashboard = True
if config['AppDaemon'].get("disable_apps") == "1":
conf.apps = False
else:
conf.apps = True
if config['AppDaemon'].get("cert_verify", True) == False:
conf.certpath = False
if conf.dash_url is not None:
url = urlparse(conf.dash_url)
#if url.scheme != "http":
# raise ValueError("Invalid scheme for 'dash_url' - only HTTP is supported")
dash_net = url.netloc.split(":")
conf.dash_host = dash_net[0]
try:
conf.dash_port = dash_net[1]
except IndexError:
conf.dash_port = 80
if conf.dash_host == "":
raise ValueError("Invalid host for 'dash_url'")
if conf.threads is None:
conf.threads = 10
if conf.logfile is None:
conf.logfile = "STDOUT"
if conf.errorfile is None:
conf.errorfile = "STDERR"
log_size = config['AppDaemon'].get("log_size", 1000000)
log_generations = config['AppDaemon'].get("log_generations", 3)
if isdaemon and (
conf.logfile == "STDOUT" or conf.errorfile == "STDERR"
or conf.logfile == "STDERR" or conf.errorfile == "STDOUT"
):
raise ValueError("STDOUT and STDERR not allowed with -d")
# Setup Logging
conf.logger = logging.getLogger("log1")
numeric_level = getattr(logging, args.debug, None)
conf.logger.setLevel(numeric_level)
conf.logger.propagate = False
# formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
# Send to file if we are daemonizing, else send to console
fh = None
if conf.logfile != "STDOUT":
fh = RotatingFileHandler(conf.logfile, maxBytes=log_size, backupCount=log_generations)
fh.setLevel(numeric_level)
# fh.setFormatter(formatter)
conf.logger.addHandler(fh)
else:
# Default for StreamHandler() is sys.stderr
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(numeric_level)
# ch.setFormatter(formatter)
conf.logger.addHandler(ch)
# Setup compile output
conf.error = logging.getLogger("log2")
numeric_level = getattr(logging, args.debug, None)
conf.error.setLevel(numeric_level)
conf.error.propagate = False
# formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
if conf.errorfile != "STDERR":
efh = RotatingFileHandler(
conf.errorfile, maxBytes=log_size, backupCount=log_generations
)
else:
efh = logging.StreamHandler()
efh.setLevel(numeric_level)
# efh.setFormatter(formatter)
conf.error.addHandler(efh)
# Setup dash output
if config['AppDaemon'].get("accessfile") is not None:
conf.dash = logging.getLogger("log3")
numeric_level = getattr(logging, args.debug, None)
conf.dash.setLevel(numeric_level)
conf.dash.propagate = False
# formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
efh = RotatingFileHandler(
config['AppDaemon'].get("accessfile"), maxBytes=log_size, backupCount=log_generations
)
efh.setLevel(numeric_level)
# efh.setFormatter(formatter)
conf.dash.addHandler(efh)
else:
conf.dash = conf.logger
# Startup message
utils.log(conf.logger, "INFO", "AppDaemon Version {} starting".format(conf.__version__))
utils.log(conf.logger, "INFO", "Configuration read from: {}".format(conf.config_file))
if config_from_yaml is True:
utils.log(conf.logger, "DEBUG", "AppDaemon Section: {}".format(config.get("AppDaemon")))
utils.log(conf.logger, "DEBUG", "Hass Section: {}".format(config.get("HASS")))
utils.log(conf.logger, "DEBUG", "HADashboard Section: {}".format(config.get("HADashboard")))
# Check with HA to get various info
ha_config = None
if conf.ha_url is not None:
utils.log(conf.logger, "DEBUG", "Calling HA for config with key: {} and url: {}".format(conf.ha_key, conf.ha_url))
while ha_config is None:
try:
ha_config = utils.get_ha_config()
except:
utils.log(
conf.logger, "WARNING", "Unable to connect to Home Assistant, retrying in 5 seconds")
if conf.loglevel == "DEBUG":
utils.log(conf.logger, "WARNING", '-' * 60)
utils.log(conf.logger, "WARNING", "Unexpected error:")
utils.log(conf.logger, "WARNING", '-' * 60)
utils.log(conf.logger, "WARNING", traceback.format_exc())
utils.log(conf.logger, "WARNING", '-' * 60)
time.sleep(5)
utils.log(conf.logger, "DEBUG", "Success")
utils.log(conf.logger, "DEBUG", ha_config)
conf.version = parse_version(ha_config["version"])
conf.ha_config = ha_config
conf.latitude = ha_config["latitude"]
conf.longitude = ha_config["longitude"]
conf.time_zone = ha_config["time_zone"]
if "elevation" in ha_config:
conf.elevation = ha_config["elevation"]
if "elevation" in config['AppDaemon']:
utils.log(conf.logger, "WARNING", "'elevation' directive is deprecated, please remove")
else:
conf.elevation = config['AppDaemon']["elevation"]
# Use the supplied timezone
if "time_zone" in config['AppDaemon']:
conf.ad_time_zone = config['AppDaemon']['time_zone']
os.environ['TZ'] = config['AppDaemon']['time_zone']
else:
os.environ['TZ'] = conf.time_zone
# Now we have logging, warn about deprecated directives
#if "latitude" in config['AppDaemon']:
# utils.log(conf.logger, "WARNING", "'latitude' directive is deprecated, please remove")
#if "longitude" in config['AppDaemon']:
# utils.log(conf.logger, "WARNING", "'longitude' directive is deprecated, please remove")
#if "timezone" in config['AppDaemon']:
# utils.log(conf.logger, "WARNING", "'timezone' directive is deprecated, please remove")
#if "time_zone" in config['AppDaemon']:
# utils.log(conf.logger, "WARNING", "'time_zone' directive is deprecated, please remove")
ad.init_sun()
# Add appdir and subdirs to path
if conf.apps is True:
conf.app_config_file_modified = os.path.getmtime(conf.app_config_file)
if conf.app_dir is None:
if config_dir is None:
conf.app_dir = find_path("apps")
else:
conf.app_dir = os.path.join(config_dir, "apps")
for root, subdirs, files in os.walk(conf.app_dir):
if root[-11:] != "__pycache__":
sys.path.insert(0, root)
else:
conf.app_config_file_modified = 0
# find dashboard dir
if conf.dashboard:
if conf.dashboard_dir is None:
if config_dir is None:
conf.dashboard_dir = find_path("dashboards")
else:
conf.dashboard_dir = os.path.join(config_dir, "dashboards")
#
# Setup compile directories
#
if config_dir is None:
conf.compile_dir = find_path("compiled")
else:
conf.compile_dir = os.path.join(config_dir, "compiled")
# Start main loop
if isdaemon:
keep_fds = [fh.stream.fileno(), efh.stream.fileno()]
pid = args.pidfile
daemon = Daemonize(app="appdaemon", pid=pid, action=run,
keep_fds=keep_fds)
daemon.start()
while True:
time.sleep(1)
else:
run()
if __name__ == "__main__":
main()
|
[
"andrew@acockburn.com"
] |
andrew@acockburn.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.