blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4e0d715b09ef16a51954d2c44d76ea74f7239998
|
a152737aeb8900fa28a09d6eb179c747d018a23d
|
/byke/apps.py
|
64616d4026053361cea7aef5785bb4aa6b282e83
|
[] |
no_license
|
jnm/nvb
|
ff12dd351cac003ff57cf1b415f118676a1fc6d6
|
b60278795912de7086a15667decbde364ad2557d
|
refs/heads/master
| 2021-01-10T09:49:54.983252
| 2016-03-06T02:10:21
| 2016-03-06T02:10:21
| 51,812,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
from __future__ import unicode_literals
from django.apps import AppConfig
class BykeConfig(AppConfig):
name = 'byke'
|
[
"john@tmoj.net"
] |
john@tmoj.net
|
da8d9dcfd0eca8579bdd3850fa9e8d5aecbcb137
|
88298e1bfd85f860af9541f10581a551508a2bc4
|
/sqla_mixins.py
|
79f2ae44939ea3ca90305c903790c3b9421d7d3b
|
[
"BSD-2-Clause"
] |
permissive
|
bboe/sqla_mixins
|
3f57b4b301599e8300e1f35418a0374e3e748f18
|
416a81875d75f2da18e454bd479e44490830e58f
|
refs/heads/master
| 2016-09-06T02:07:47.591474
| 2014-01-18T01:39:55
| 2014-01-18T01:39:55
| 5,026,249
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,612
|
py
|
import sys
from passlib.hash import pbkdf2_sha512
from sqlalchemy import Column, DateTime, String, Integer, Unicode, func
from sqlalchemy.ext.declarative import declared_attr, has_inherited_table
if sys.version_info < (3, 0):
builtins = __import__('__builtin__')
else:
import builtins
__version__ = '0.6'
class BasicBase(object):
"""A base sqlalchemy class that provides `id` and `created_at` fields."""
id = Column(Integer, primary_key=True)
created_at = Column(DateTime(timezone=True), default=func.now(),
index=True, nullable=False)
@declared_attr
def __tablename__(cls):
"""Set the tablename to be the lowercase of the class name.
Reference: http://docs.sqlalchemy.org/en/rel_0_9/orm/extensions/declarative.html#controlling-table-inheritance-with-mixins # noqa
"""
if has_inherited_table(cls) and BasicBase not in cls.__bases__:
return None
return cls.__name__.lower()
@classmethod
def fetch_by(cls, **kwargs):
"""Return a single object (or None) by the named attributes."""
return cls.query_by(**kwargs).first()
@classmethod
def fetch_by_id(cls, element_id):
"""Return an object (or None) by its id."""
return cls.query_by(id=int(element_id)).first()
@classmethod
def query_by(cls, **kwargs):
"""Return a query result for the named attributes."""
if not hasattr(builtins, '_sqla_mixins_session'):
raise Exception('__builtin__._sqla_mixins_session must be set to '
'your session class')
session = builtins._sqla_mixins_session()
return session.query(cls).filter_by(**kwargs)
def clone(self, exclude=None, update=None):
"""Return a shallow-copy clone of the sqlalchemy object.
Relationship objects are not copied, however foreign key assignments
held by this object are copied shallowly.
:param exclude: If provided, should be an iterable that contains the
names attributes to exclude from the copy. The attributes
`created_at` and `id` are always excluded.
:param update: If provided, should be a mapping of attribute name, to
the value that should be set.
"""
# Prepare attribute exclusion set
if not exclude:
exclude = set()
if not isinstance(exclude, set):
exclude = set(exclude)
exclude.update(('created_at', 'id'))
# Build a mapping of attributes to values
attrs = {x: getattr(self, x) for x in self.__mapper__.columns.keys()
if x not in exclude}
if update: # Update the mapping if necessary
attrs.update(update)
# Build and return the SQLA object
return self.__class__(**attrs)
def update(self, _ignore_order=False, **kwargs):
"""Update the named attributes.
Return a list of modified attribute names, or False if not updated.
Setting _ignore_order to True indicates that attribute lists should be
sorted before being compared. This is useful when updating relationship
lists.
"""
modified = []
for attr, value in kwargs.items():
self_value = getattr(self, attr)
if _ignore_order and (isinstance(self_value, list) and
isinstance(value, list)):
if sorted(self_value) != sorted(value):
setattr(self, attr, value)
modified.append(attr)
elif getattr(self, attr) != value:
setattr(self, attr, value)
modified.append(attr)
return modified or False
class UserMixin(object):
HASH_ROUNDS = 12000
SALT_SIZE = 16
username = Column(Unicode, index=True, nullable=False, unique=True)
_password = Column(String, nullable=False)
@classmethod
def hash_password(cls, password):
return pbkdf2_sha512.encrypt(password, rounds=cls.HASH_ROUNDS,
salt_size=cls.SALT_SIZE)
def __init__(self, *args, **kwargs):
if 'password' in kwargs:
kwargs['_password'] = UserMixin.hash_password(kwargs['password'])
del kwargs['password']
super(UserMixin, self).__init__(*args, **kwargs)
def set_password(self, password):
self._password = self.hash_password(password)
password = property(fset=set_password)
def verify_password(self, password):
return pbkdf2_sha512.verify(password, self._password)
|
[
"bbzbryce@gmail.com"
] |
bbzbryce@gmail.com
|
a20a7b40b54d2af4ad2695d429a65afacd91ffcb
|
481f451f28d0b76a5705a577925401631b947391
|
/appendAndDelete.py
|
9c02cd69db6e1427e87d79a8698ceeb942b66751
|
[] |
no_license
|
ecarlosfonseca/HackerRank
|
a9512c4e85947895bb1fe7218e6ba16a9d40a18a
|
64ec5b0524c0354d6d675eeff3a79e3beeec9d07
|
refs/heads/master
| 2022-11-12T11:08:39.231220
| 2020-06-26T15:22:20
| 2020-06-26T15:22:20
| 254,875,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
def appendAndDelete(s, t, k):
# Determines if string t can be transformed in string s with k amount of appends() and or removes()
cycles = min(len(s), len(t))
for cycle in range(cycles):
if s[cycle] != t[cycle]:
stop = cycle
break
else:
stop = len(s)
moves = len(s) - stop + len(t) - stop
if k == moves or k > moves and (k-moves) % 2 == 0 or k > moves and k > len(t) and k-len(t) > len(s):
return 'Yes'
else:
return 'No'
if __name__ == '__main__':
st0 = 'hackerhappy'
stt0 = 'hackerrank'
k0 = 9
st1 = 'aba'
stt1 = 'aba'
k1 = 7
st2 = 'ashley'
stt2 = 'ash'
k2 = 2
st5 = 'y'
stt5 = 'yu'
k5 = 2
st10 = 'abcd'
stt10 = 'abcdert'
k10 = 10
print(appendAndDelete(st10, stt10, k10))
|
[
"ecarlosfonseca@gmail.com"
] |
ecarlosfonseca@gmail.com
|
5a8e921432a486312e2f8bcb492440c0fa67770c
|
ae0e440e3dfbea4d52f883a773203f052499f9df
|
/todo/admin.py
|
59a15e032b74061486ec8a8e32ab5282a36052f2
|
[] |
no_license
|
pratikp676/todo-checklist
|
60a98ccc687a538fff627818d60dd5cc1cb786f4
|
14a0b7397adde01738f3c563470290212c695576
|
refs/heads/master
| 2023-08-23T07:19:37.818491
| 2021-01-20T09:41:34
| 2021-01-20T09:41:34
| 331,258,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
from django.contrib import admin
from .models import Todo
# Register your models here.
class TodoAdmin(admin.ModelAdmin):
readonly_fields = ('created',)
admin.site.register(Todo,TodoAdmin)
|
[
"pratikp676@gmail.com"
] |
pratikp676@gmail.com
|
b8e0bd3249af43f08fa161d7a65ca004c7b51f9c
|
72b5046f9a1bf097752b3653cefba5014d01afb1
|
/API/privs.py
|
5c9178a7a9211a3c0324960aad6828fd85538d89
|
[
"Apache-2.0"
] |
permissive
|
IsmaeRLGV/Modular-UserBot-
|
471b8fc6f0ff5c5a6d639cd9a67c3c8fb87cfed4
|
e65f4001232e0748091eefc3ad85a1b8eba30c60
|
refs/heads/master
| 2021-01-22T04:54:08.377379
| 2014-04-21T23:37:48
| 2014-04-21T23:37:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,877
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import arrays,re,db,client
def IsRegister(user):
try:
InDB=re.compile(r'%s' % user, re.IGNORECASE)
except sre_constants.error:
InDB=re.compile(r'Fgt5dR5s333', re.IGNORECASE)
isRegister=False
while isRegister == False:
for i in arrays.DB_user:
if InDB.match(i[0][0]):
posc=arrays.DB_user.index(i)
isRegister=True
break
if isRegister == True:
return [True, posc]
elif isRegister == False:
return (False,0)
def info(int,user):
"""Muestra la informacion del usuario especificado.
1 - Para la informacion del status.
2 - Para mostrar la contraseña.
3 - Para mostrar el host.
4 - Para mostrar los flags.
5 - Para mostrar los puntos de juego.
0 - Toda la informacion."""
j=IsRegister(user)
if j[0] == True:
if int == 1:
i=arrays.DB_user[j[1]][4]
if i[1] == "connected":
return [True,"connected"]
else:
return [False,"disconnected"]
if int == 2:
return arrays.DB_user[j[1]][1]
if int == 3:
return arrays.DB_user[j[1]][0][1]
if int == 4:
return arrays.DB_user[j[1]][2]
if int == 5:
return arrays.DB_user[j[1]][3]
if int == 0:
return arrays.DB_user[j[1]]
else:
return [False, "Not Register"]
def seguir(i,user,host,opc1="",opc2=""):
""" Sintaxis: <flags> <usuario> <host> </opcional1> </opcional2>"""
if info(1,user)[0]:
if i in info(4,user):
if info(3,user)==host:
if opc1.find(opc2) != -1:
return True
else:
client.notice(user,"El host no coincide:01 %s / %s."%(info(3,user),host))
else:
client.notice(user,"Usted no está autorizado para realizar esta operación. 01Requiere: +"+i)
else:
client.notice(user,"Usuario:01 inexistente o Desconectado.")
def register(user, host, password):
a=IsRegister(user)
if a[0] == False:
j=[[user, host],password,[],0,["status","connected"]]
arrays.DB_user.append(j)
i=IsRegister(user)[0]
if i == True:
db.database("API/DB/DB_user",arrays.DB_user).W_db()
return "Se completo el registro."
elif i == False:
return "No se pudo completar el registro."
else:
return "Ya se encuentra registrado."
def add_flag(user,flags):
j=IsRegister(user)
if j[0] == True:
for i in flags:
if i in ["f","j","k","o","p","q","r","s","t","v","F","S"] and not i in arrays.DB_user[j[1]][2]:
arrays.DB_user[j[1]][2].insert(0,i)
db.database("API/DB/DB_user",arrays.DB_user).W_db()
a=info(4,user)
a="".join(a)
return "Flags(%s): %s" % (user,a)
def del_flag(user,flags):
j=IsRegister(user)
if j[0] == True:
for i in flags:
if i in arrays.DB_user[j[1]][2]:
a=arrays.DB_user[j[1]][2].index(i)
del arrays.DB_user[j[1]][2][a]
db.database("API/DB/DB_user",arrays.DB_user).W_db()
a=info(4,user)
a="".join(a)
return "Flags(%s): %s" % (user,a)
def logged_out(user,host):
j=IsRegister(user)
if j[0] == True:
if arrays.DB_user[j[1]][4][1] == "connected" and arrays.DB_user[j[1]][0][1] == host:
del arrays.DB_user[j[1]][4][1]
del arrays.DB_user[j[1]][0][1]
arrays.DB_user[j[1]][0].insert(1,"")
arrays.DB_user[j[1]][4].insert(1,"disconnected")
if info(1,user)[1]=="disconnected":
return "disconnected."
def logged_in(user, host, password):
j=IsRegister(user)
if j[0] == True and info(1,user)[1] != "connected":
if arrays.DB_user[j[1]][1] == password:
arrays.DB_user[j[1]][0][1]+=host
del arrays.DB_user[j[1]][4][1]
arrays.DB_user[j[1]][4].insert(1,"connected")
if info(1,user)[1]=="connected":
return "connected."
else:
return "Contraseña/Usuario invalidos."
else:
return "Usuario, logueado o inexistente."
def find_admin():
for i in arrays.DB_user:
if "F" in info(4,i[0][0]):
return i[0][0]
def admin(target,user):
for i in arrays.DB_admins:
i=i.split()
if target[0] == i[0] and target[1] == i[1]:
a=add_flag(user, "F")
return a
|
[
"IsmaeRLGV@gmail.com"
] |
IsmaeRLGV@gmail.com
|
c152cce695f77b2e55e1c0265c6a6bb5eeaf1ff7
|
2ce89e344da74ebaf0f3f5667c91d4b15e8d108b
|
/seedingFundProject/seeding_fund/apps.py
|
8e16b95dfc777f009dec890b9a31e34ca63c24ca
|
[] |
no_license
|
qusaiqishta/infograph
|
c472d6d37a99c3483fbda034426721465dfd7807
|
a3996f801219d9ed2759db5fb038ba579fcc05e3
|
refs/heads/main
| 2023-07-27T18:26:36.936218
| 2021-09-08T12:13:01
| 2021-09-08T12:13:01
| 404,314,553
| 0
| 0
| null | 2021-09-08T12:13:01
| 2021-09-08T11:03:09
|
Python
|
UTF-8
|
Python
| false
| false
| 155
|
py
|
from django.apps import AppConfig
class SeedingFundConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'seeding_fund'
|
[
"qeshtaqusai0@gmail.com"
] |
qeshtaqusai0@gmail.com
|
94cc1f4f283f3af04379dc0bfad806581e1e0337
|
52e82cd90481b2935560a681267898f1613c707f
|
/mbs/restore.py
|
abfc90daa9c712322f26884e39ffc06dd97c7b64
|
[] |
no_license
|
gregbanks/mongodb-backup-system
|
41b8b2aea01e8f4a1ed330b1d90df19a890127fd
|
03bf03e1e218831f097c533b6df658189d6d0469
|
refs/heads/master
| 2021-01-20T22:51:08.518721
| 2013-06-23T07:48:38
| 2013-06-23T07:48:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,732
|
py
|
__author__ = 'abdul'
from task import *
from bson.dbref import DBRef
###############################################################################
# Restore
###############################################################################
class Restore(MBSTask):
def __init__(self):
# init fields
MBSTask.__init__(self)
self._source_backup = None
self._source_database_name = None
self._destination = None
self._destination_stats = None
###########################################################################
def execute(self):
"""
Override
"""
return self.strategy.run_restore(self)
###########################################################################
def cleanup(self):
"""
Override
"""
return self.strategy.cleanup_restore(self)
###########################################################################
@property
def source_backup(self):
return self._source_backup
@source_backup.setter
def source_backup(self, source_backup):
self._source_backup = source_backup
###########################################################################
@property
def source_database_name(self):
return self._source_database_name
@source_database_name.setter
def source_database_name(self, source_database_name):
self._source_database_name = source_database_name
###########################################################################
@property
def destination(self):
return self._destination
@destination.setter
def destination(self, destination):
self._destination = destination
###########################################################################
@property
def destination_stats(self):
return self._destination_stats
@destination_stats.setter
def destination_stats(self, destination_stats):
self._destination_stats = destination_stats
###########################################################################
def to_document(self, display_only=False):
doc = MBSTask.to_document(self, display_only=display_only)
doc.update({
"_type": "Restore",
"sourceBackup": DBRef("backups", self.source_backup.id),
"sourceDatabaseName": self.source_database_name,
"destination": self.destination.to_document(display_only=
display_only),
"destinationStats": self.destination_stats
})
return doc
###########################################################################
|
[
"abdul@mongolab.com"
] |
abdul@mongolab.com
|
db229c44bf419a6f4cffc081ccc2674a702177f6
|
73758dde83d1a1823c103e1a4ba71e7c95168f71
|
/nsd2005/py01/day05/stack.py
|
e086afc28450f9e0d7589e037f8359e59420a056
|
[] |
no_license
|
tonggh220/md_5_nsd_notes
|
07ffdee7c23963a7a461f2a2340143b0e97bd9e1
|
a58a021ad4c7fbdf7df327424dc518f4044c5116
|
refs/heads/master
| 2023-07-02T01:34:38.798929
| 2021-05-12T08:48:40
| 2021-05-12T08:48:40
| 393,885,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
stack = []
def push_it():
"用于压栈"
data = input("数据: ").strip()
if data: # 如果字符串非空
stack.append(data)
else:
print("\033[31;1m没有获取到数据\033[0m")
def pop_it():
"用于出栈"
if stack:
print("从栈中弹出了: \033[31;1m%s\033[0m" % stack.pop())
else:
print("\033[31;1m栈已经为空\033[0m")
def view_it():
"查询"
print("\033[32;1m%s\033[0m"% stack)
def show_menu():
"用于显示菜单,实现代码逻辑"
prompt = """(0) 压栈
(1) 出栈
(2) 查询
(3) 退出
请选择(0/1/2/3): """
while 1:
choice = input(prompt).strip() # 删除用户输入字符串两端的空格
if choice not in ['0', '1', '2', '3']:
print("无效的输入,请重试。")
continue
if choice == '0':
push_it()
elif choice == '1':
pop_it()
elif choice == '2':
view_it()
else:
print('Bye-bye')
break
if __name__ == '__main__':
show_menu()
|
[
"zhangzhg@tedu.cn"
] |
zhangzhg@tedu.cn
|
6d920157b1d0de15431a76545eb871345ec67ec6
|
5808b499777b247208dc6c964a2a94d33a0fbbf8
|
/github/py_code/divideCommentsMonth.py
|
a83e1464532f3b5076f5bc427304c36439d724a4
|
[] |
no_license
|
jiangsha1007/repoHealth
|
82eb723a7d65574cdac7b824149a45421e74b320
|
32f891c78cf1ebac6b7f545eb4664a4345411d28
|
refs/heads/master
| 2020-04-01T08:57:33.263257
| 2018-10-15T04:47:02
| 2018-10-15T04:47:02
| 153,053,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
import json
import re
import os
import time
import datetime
def divideCommentsMonth(repo,endYear):
repos = repo.split(sep="/")
folder = repos[1]
cDir = "public/data/" + repo+ "/" + "comments/"
files = os.listdir(cDir)
# print(files)
pages=len(files)
print(pages)
comments_created = {}
for page in range(1,pages+1):
print(page)
with open(cDir + "allComments-"+str(page)+".json",'r') as f:
data = json.loads(f.read())
for item in data:
date = item["created_at"];
date=date[0:7]
if (date not in comments_created):
comments_created[date] = []
comments_created[date].append(item)
for year in range(2008,int(endYear) + 1):
for month in range(1,13):
date = "%d-%02d" %(year,month)
if( date not in comments_created):
with open(cDir + date + ".json",'w') as f:
json.dump({},f)
else :
with open(cDir + date + ".json",'w') as f:
json.dump(comments_created[date],f)
|
[
"jiangsha1007@sina.com"
] |
jiangsha1007@sina.com
|
e14a8cf3d8884d6bf6d9581d2e4d79e26aa55349
|
4e79dcb25de7418d361e27499755aa7fdb4db3e5
|
/frontend/views/views_index.py
|
67fb34f016663ad6c1686724d02b7e94af385ae4
|
[] |
no_license
|
notedit/eightfoot
|
95e1df2021d113dfee7e94938198628ac58f4ade
|
2bf861bccb540caa86066e4d737e253623d7afdf
|
refs/heads/master
| 2016-09-10T14:57:24.998704
| 2012-07-16T05:43:23
| 2012-07-16T05:43:23
| 4,460,610
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,541
|
py
|
# -*- coding: utf-8 -*-
# date: 2012-05-29
# author: notedit
"""
Your time is limited, so don't waste it living someone else's life.
Don't be trapped by dogma - which is living with the results of other
people's thinking. Don't let the noise of other's opinions drown out
your own inner voice. And most important, have the courage to follow
your heart and intuition. They somehow already know what you truly
want to become. Everything else is secondary.
by Steve Jobs
"""
import os
from pprint import pprint
from django.conf import settings
from django.shortcuts import render_to_response
from django.http import HttpResponse
from libshare import oocrpc
from libshare import authutil
from libshare import strutil
RC = settings.RC
oocrpc.backend = settings.RPC
def index(req,page=1):
"""首页"""
offset = (page-1)*25
comm_dict = {}
is_logined = authutil.is_logined(req)
if is_logined:
curr_ukey = req.COOKIES.get('ukey')
follow_count = oocrpc.backend.GetFollowContentCount(curr_ukey)
follow_contents = oocrpc.backend.GetFollowContent({'Ukey':curr_ukey,'Offset':offset,'Limit':25})
pager = strutil.pager(page,follow_count,'/index/',per_page=25)
user_info = oocrpc.backend.GetUserInfo(curr_ukey)
comm_dict.update({'contents':follow_contents,'ukey':curr_ukey,'pager':pager,
'is_logined':True,'user_info':user_info})
else:
# hotest
hotest_count = oocrpc.backend.GetContentCount() # to do
hotest_contents = oocrpc.backend.GetHotestContent({'Offset':offset,'Limit':25}) # to do
pager = strutil.pager(page,hotest_count,'/index/',per_page=25)
comm_dict.update({'contents':hotest_contents,'pager':pager})
pprint(comm_dict)
return render_to_response('index.html',comm_dict)
def index_latest(req,page=1):
offset = (page-1)*25
comm_dict = {}
newest_count = oocrpc.backend.GetContentCount()
newest_contents = oocrpc.backend.GetLatestContent({'Offset':offset,'Limit':25})
pager = strutil.paper(page,newest_acount,'/index/newest/',per_page=25)
comm_dict.update({'newest_count':newest_count,'newest_contents':newest_contents,'pager':pager})
is_logined = authutil.is_logined(req)
if is_logined:
curr_ukey = req.COOKIES.get('ukey')
user_info = oocrpc.backend.GetUserInfo(curr_ukey)
comm_dict.update({'curr_ukey':curr_ukey,'user_info':user_info})
return render_to_response('index_newest.html',comm_dict)
def index_hotest(req,page=1):
offset = (page-1)*25
comm_dict = {}
hotest_count = oocrpc.backend.GetContentCount()
hotest_contents = oocrpc.backend.GetHotestContent({'Offset':offset,'Limit':25})
pager = strutil.paper(page,hotest_count,'/index/',per_page=25)
comm_dict.update({'hotest_count':hotest_count,'hotest_contents':hotest_contents})
is_logined = authutil.is_logined(req)
if is_logined:
curr_ukey = req.COOKIES.get('ukey')
user_info = oocrpc.backend.GetUserInfo(curr_ukey)
comm_dict.update({'curr_ukey':curr_ukey,'user_info':user_info})
return render_to_response('index_hotest.html',comm_dict)
def test_rpc(req):
username = "young man"
username = oocrpc.backend.GetHelloWorld('hey young man')
return HttpResponse(username)
### Unittest #################################################################
from django.utils import unittest
from django.test.client import Client
class TestView(unittest.TestCase):
def setUp(self):
pass
def test_index_hotest(self):
pass
def test_index_newest(self):
pass
def test_index(self):
pass
|
[
"notedit@gmail.com"
] |
notedit@gmail.com
|
ee581ee3b57018e57d9c7839908a6a1af6ba7a3e
|
2eaa44d462e916a69341a43cdeda96defea50003
|
/generateFeatures.py
|
e2d28bf4e5ea92ba37d0eb795c36b1d06911b667
|
[] |
no_license
|
jvalici/cagoleCaca
|
b63475d85b0165b11be3e997593bf87f2be96394
|
3f3ae00d0d9fe5692221e14a3b5dbff2dcdc50e9
|
refs/heads/master
| 2021-07-16T20:44:18.733352
| 2017-10-24T22:39:25
| 2017-10-24T22:39:25
| 108,190,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
import numpy as np
import pandas as pd
# generate the features for all the user with id in [currentId, nextId-1].
# dfs is the list of the three dataframes for members, transactions, user_logs, and train
def generate_features( currentId, nextId, dfs ):
ids = np.arange(currentId, nextId)
indicesLeft = [np.zeros(1), np.zeros(1), np.zeros(1), np.zeros(1)]
counts = [np.zeros(1), np.zeros(1), np.zeros(1), np.zeros(1)]
for i in range(4):
indicesLeft[i] = dfs[i][0].searchsorted( ids, side='left' )
counts[i] = dfs[i][0].searchsorted( ids, side='right' )
counts[i] = np.subtract(counts[i], indicesLeft[i])
counts[i] = np.where( counts[i] == currentId-currentId, 0, counts[i] )
return pd.DataFrame.from_dict( {0:ids, 1:counts[0], 2:counts[1], 3:counts[2], 4:counts[3] }, orient = 'columns')
|
[
"jvalici@gmail.com"
] |
jvalici@gmail.com
|
ff04bc0bf64a188a05cc335675fe9a68c84f5d29
|
6c9de356229e3f58a17ce86565874665521f27e6
|
/commons/pre_process.py
|
684c237d0de2f888e18e669070ff6a772a702333
|
[] |
no_license
|
LXY919/BpAnalysis
|
e5effc7bd1f64df4cc66de66296872e59b084288
|
947dc55efb085acf359add19f3d2461184e8474d
|
refs/heads/master
| 2020-07-28T21:27:54.288568
| 2019-09-18T14:52:54
| 2019-09-18T14:52:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,107
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from wavelet import wavelet_filter
"""
2019-9-5
对数据做预处理,过程包括:
1. 去除DC分量
2. 均值+小波滤波
3. 去除基线漂移
4. 归一化波形
"""
def remove_dc(table):
# 1. 去除 DC 分量
min_ir1 = min(table.ir1)
min_ir2 = min(table.ir2)
min_red1 = min(table.red1)
min_red2 = min(table.red2)
ir1 = [_ - min_ir1 for _ in table.ir1]
ir2 = [_ - min_ir2 for _ in table.ir2]
red1 = [_ - min_red1 for _ in table.red1]
red2 = [_ - min_red2 for _ in table.red2]
table.ir1 = ir1
table.ir2 = ir2
table.red1 = red1
table.red2 = red2
return table
def filter(table):
# 2. 均值 + 小波滤波
# 均值
table.ir1 = table.ir1.rolling(window=30).mean()
table.ir2 = table.ir2.rolling(window=30).mean()
table.red1 = table.red1.rolling(window=30).mean().rolling(window=30).mean()
table.red2 = table.red2.rolling(window=30).mean().rolling(window=30).mean()
table = table[120:]
################################################
# 反转波形
ir1_max = max(table.ir1)
print(ir1_max)
ir2_max = max(table.ir2)
red1_max = max(table.red1)
red2_max = max(table.red2)
ir1 = [ir1_max - _ for _ in table.ir1]
ir2 = [ir2_max - _ for _ in table.ir2]
red1 = [red1_max - _ for _ in table.red1]
red2 = [red2_max - _ for _ in table.red2]
################################################
# 小波滤波
table.ir1 = wavelet_filter(ir1)
table.ir2 = wavelet_filter(ir2)
table.red1 = wavelet_filter(red1)
table.red2 = wavelet_filter(red2)
return table
if __name__ == '__main__':
df = pd.read_table('../new_sensor/raw/14_50_02.txt', sep=',', header=None)
df.columns = ['red1', 'ir1', 'red2', 'ir2']
df = df[50:]
df.reset_index(drop=True, inplace=True)
###################################################################
# 原始数据
# plt.figure()
# fig1 = plt.subplot(211)
# plt.plot(df.ir1, c='b')
# plt.xlabel('Time(s)',fontsize=18)
# plt.ylabel('Amptitude',fontsize=18)
# x_ticks = [x for x in range(len(df.ir1)) if x % 400 == 0]
# fig1.set_xticks(x_ticks)
# fig1.set_xticklabels([x//400 for x in x_ticks],fontsize=15)
# plt.title('Raw PluseWave',fontsize=20)
###################################################################
# 处理数据
df = remove_dc(df)
df = filter(df)
###################################################################
# 结果数据
fig2 = plt.subplot(111)
plt.plot(df.red2, c='b')
# plt.plot(df.red2, c='r')
plt.xlabel('Time(s)', fontsize=18)
plt.ylabel('Amptitude', fontsize=18)
x_ticks = [x for x in range(len(df.ir2)) if x % 400 == 0]
fig2.set_xticks(x_ticks)
fig2.set_xticklabels([x // 400 for x in x_ticks], fontsize=15)
plt.title('After Pre-process PluseWave', fontsize=20)
plt.subplots_adjust(wspace=0, hspace=0.5)
plt.show()
###################################################################
|
[
"1050748528@qq.com"
] |
1050748528@qq.com
|
56d8cc7bf85206e899e481ec653355b243195992
|
f2420fe7530c4db1f8e4c23358be24ae4c7e9632
|
/train_VIE_SLEEP.py
|
0198b6adcfdb663daac2c1ea1974a3b8036366e6
|
[] |
no_license
|
ZidiXiu/VIE
|
2b7443737304af3e356b109f6a592628a3444395
|
555d4c84f15ff87ee05ab51bca6fc0fa4c4b7edf
|
refs/heads/master
| 2022-12-29T22:18:39.257966
| 2020-10-14T00:44:09
| 2020-10-14T00:44:09
| 297,220,935
| 4
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,928
|
py
|
from __future__ import print_function
import math
import os
import numpy as np
import pandas
import argparse
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision.utils import save_image
from torch.utils.data import Dataset, DataLoader, Sampler
from torchvision import transforms, utils
import pandas as pd
# from torch.utils.tensorboard import SummaryWriter
from torch.distributions import normal
import sklearn.metrics
import torch
import torch.utils.data
import torchvision
from data.simulation import simulation_cox_weibull, formatted_data_simu, saveDataCSV
from data.EVT_dataloader import EVTDataset, EVTDataset_dic,ImbalancedDatasetSampler, callback_get_label
from utils.distributions import mixed_loglikeli, loglog_function, sample_mixedGPD, log_sum_exp
from utils.preprocessing import loadDataDict, flatten_nested, datadicTimeCut_delcensor
from data.sleep_data import generate_data
# from networks.VIEVT import IAF, Decoder, Nu, log_score_marginal
# from networks.VIEVT import testing_VIEVT, pred_avg_risk
from networks.VIEVT_outHz import IAF, Decoder, Nu, log_score_marginal
from networks.VIEVT_outHz import testing_VIEVT, pred_avg_risk
from utils.metrics import binary_cross_entropy, view_distribution_z_e_hz, view_z_e, view_z_box, view_z_dist
from utils.metrics import boostrappingCI
from pathlib import Path
from utils.preprocessing import loadDataDict, flatten_nested, datadicTimeCut_delcensor
# Load SLEEP dataset
df=generate_data()
train_o, valid_o, test_o = df['train'], df['test'], df['valid']
del df
df={'x': np.concatenate([train_o['x'], valid_o['x'], test_o['x']],axis=0),\
'e': np.concatenate([train_o['e'], valid_o['e'], test_o['e']],axis=0),\
't': np.concatenate([train_o['t'], valid_o['t'], test_o['t']],axis=0)}
n_samples, ncov = df['x'].shape
# # cut as a whole
# # cut as a whole
# data_name = 'er05'
# df = datadicTimeCut(df, time_cut=600)
# seed = 1234
# lambda_ = [1.0, 1e-3, 1e-5]
data_name = 'er01'
df = datadicTimeCut_delcensor(df, time_cut=150)
seed=1111
lambda_ = [1.0, 1e-4, 1e-6]
np.random.seed(seed)
perm_idx = np.random.permutation(n_samples)
train_idx = perm_idx[0:int(3*n_samples/6)]
valid_idx = perm_idx[int(3*n_samples/6):int(4*n_samples/6)]
test_idx = perm_idx[int(4*n_samples/6):n_samples]
train = formatted_data_simu(df['x'], df['t'], df['e'], train_idx)
test = formatted_data_simu(df['x'], df['t'], df['e'], test_idx)
valid = formatted_data_simu(df['x'], df['t'], df['e'], valid_idx)
np.mean(train['e']), np.mean(valid['e']), np.mean(test['e'])
del df, train_o, test_o, valid_o
result_path_root = './results/'
result_path = result_path_root+"SLEEP"+'/'+data_name
Path(result_path).mkdir(parents=True, exist_ok=True)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(1)
# device = torch.device('cpu')
model_path = result_path+"/saved_models"
Path(model_path).mkdir(parents=True, exist_ok=True)
plot_path = result_path+"/plots"
Path(plot_path).mkdir(parents=True, exist_ok=True)
event_rate = np.mean(train['e'])
ncov = train['x'].shape[1]
########## Hyper-parameters##############
########## Hyper-parameters##############
# set hyperparameters
model_name = 'VIE'
z_dim = 4
hidden_layers=[32,32,32]
# eps_dim = np.int(ncov)
eps_dim = np.int(ncov)
input_size = ncov+eps_dim
unroll_steps = 5
nu_lambda=1.0
epochs = 500
batch_size = 200
flow_path = result_path+"/saved_models/"+model_name+'_flow'+".pt"
decoder_path = result_path+"/saved_models/"+model_name+'_decoder'+".pt"
nu_path = result_path+"/saved_models/"+model_name+'_nu'+".pt"
training = True
unroll_test = True
u_bound = np.max([0.99, 1-event_rate])
lower_bound = -5.0
N = 100
IAF_flow = IAF(input_size, z_dim=z_dim, h_dim=z_dim, hidden_layers=hidden_layers, nstep=5, device=device)
decoder = Decoder(z_dim=z_dim, hidden_layer_MNN=[32,32,32],loglogLink=True)
nu = Nu(z_dim=z_dim, ncov=ncov, hidden_layers=[32,32], marginal=True)
decoder.to(device)
IAF_flow.to(device)
nu.to(device)
# define optimizer
opt_flow = optim.Adam(IAF_flow.parameters(), lr=1e-4)
opt_dec = optim.Adam(decoder.parameters(), lr=1e-4)
opt_nu = optim.RMSprop( nu.parameters(), lr = 1e-3)
aggressive_flag = True
aggressive_nu = True
# splitting to training/validation/testing
cat_covariates = np.array([])
continuous_variables = np.setdiff1d(np.arange(ncov), cat_covariates)
# consider normaliztion of inputs
norm_mean = np.mean(train['x'][:,continuous_variables],axis=0)
norm_std = np.std(train['x'][:,continuous_variables],axis=0)
# delete variable with 0 std
continuous_variables = np.delete(continuous_variables, np.where(norm_std==0.0)[0])
norm_mean = np.nanmean(train['x'][:,continuous_variables],axis=0)
norm_std = np.nanstd(train['x'][:,continuous_variables],axis=0)
EVT_train = EVTDataset_dic(train,transform=True,norm_mean=norm_mean, norm_std=norm_std, continuous_variables=continuous_variables)
EVT_valid = EVTDataset_dic(valid,transform=True,norm_mean=norm_mean, norm_std=norm_std, continuous_variables=continuous_variables)
#
# train with imbalanced sampler
train_loader = DataLoader(EVT_train, batch_size=batch_size, sampler=ImbalancedDatasetSampler(train, callback_get_label=callback_get_label))
# valid_loader = DataLoader(EVT_valid, batch_size=batch_size*10, sampler=ImbalancedDatasetSampler(valid, callback_get_label=callback_get_label))
# validation on the original scale
valid_loader = DataLoader(EVT_valid, batch_size=1000, shuffle=True)
del train
## define aggressive training
def agrressive_step():
opt_flow.zero_grad()
opt_dec.zero_grad()
best_z, likelihood_qzx = IAF_flow(batched_x.float(), eps_.float())
assert (best_z != best_z).any()== False
pred_risk_cur = decoder(best_z, N, lower_bound).float()
BCE_loss = binary_cross_entropy(pred_risk_cur, \
batched_e.detach().float(), sample_weight=batch_weight.float())
z_nu, pz_nu, nanFlag = log_score_marginal(nu=nu, z=best_z, mu=IAF_flow.mu0, logvar=IAF_flow.logvar0, \
xi_=IAF_flow.xi_, sigma_=IAF_flow.sigma_,\
p_ = u_bound, eps=1e-3, nu_lambda=nu_lambda,device=device, train_nu=False)
# calculate KL(q(z|x)||p(z))
likelihood_pz = mixed_loglikeli(best_z, IAF_flow.mu0, IAF_flow.logvar0, IAF_flow.xi_, IAF_flow.sigma_, u_bound)
assert (likelihood_pz != likelihood_pz).any()== False
KL_cond = likelihood_qzx.sum() - likelihood_pz.sum()
loss = lambda_[0]*BCE_loss + lambda_[1]*(z_nu - pz_nu) + lambda_[2]*KL_cond
loss.backward()
torch.nn.utils.clip_grad_norm_(IAF_flow.parameters(), 1e-4)
opt_flow.step()
return loss.item()
# training process
if __name__ == "__main__":
if training:
best_valid_loss = np.inf
best_valid_recon_loss = np.inf
best_valid_pos_loss = np.inf
best_valid_auc = 0
best_epoch = 0
nanFlag = 0
# save training process
train_z_nu = []
train_pz_nu = []
train_KL = []
train_BCE = []
last_shrink = 0
# model.train()
for epoch in range(1, epochs + 1):
if nanFlag == 1:
break
# train(epoch)
# test(epoch)
train_loss = 0
valid_loss = 0
valid_recon_loss = 0
valid_pos_loss = 0
pre_mi = 0
improved_str = " "
# detect errors
# with torch.autograd.detect_anomaly():
for batch_idx, batched_sample in enumerate(train_loader):
# print(batch_idx)
if nanFlag == 1:
break
IAF_flow.train()
decoder.train()
nu.train()
batched_x = batched_sample['x']
batched_x = batched_x.to(device).view(-1, ncov)
batched_e = batched_sample['e'].to(device)
batch_weight = batched_e.clone().detach().data*event_rate + (1-batched_e.clone().detach().data)*(1-event_rate)
# add noise
eps_ = (torch.Tensor( batched_x.shape[0], eps_dim).normal_()).to(device)
best_z, likelihood_qzx = IAF_flow(batched_x.float(), eps_.float())
try:
assert (best_z != best_z).any()== False
except AssertionError:
break
# aim to update nu based on conditional q
# update multiple times of the critic
if aggressive_nu:
if epoch > 10:
aggressive_nu = False
print("STOP multiple learning of nu")
for iter_ in range(unroll_steps):
## conditional posterior
# aim to update nu based on marginal q
z_nu, pz_nu, loss_nu, nanFlag = log_score_marginal(nu=nu, z=best_z, \
mu=IAF_flow.mu0, logvar=IAF_flow.logvar0,\
xi_=IAF_flow.xi_, sigma_=IAF_flow.sigma_,\
p_ = u_bound, eps=1e-3, nu_lambda=nu_lambda,\
device=device,train_nu=True, opt_nu=opt_nu)
if ((1*torch.isnan(best_z)).sum() + (1*torch.isnan(pz_nu)).sum() + (1*torch.isnan(z_nu)).sum()).item()>0:
print("NaN occured at critic training")
# print(z_init)
print(IAF_flow.xi_, IAF_flow.sigma_, IAF_flow.mu0, IAF_flow.logvar0)
nanFlag = 1
break
else:
z_nu, pz_nu, loss_nu, nanFlag = log_score_marginal(nu=nu, z=best_z,\
mu=IAF_flow.mu0, logvar=IAF_flow.logvar0, \
xi_=IAF_flow.xi_, sigma_=IAF_flow.sigma_,\
p_ = u_bound, eps=1e-3, nu_lambda=nu_lambda,\
device=device, train_nu=True, opt_nu=opt_nu)
# update encoder and decoder's parameters
if aggressive_flag:
sub_iter = 0
while sub_iter < 10:
sub_loss = agrressive_step()
# print(sub_iter,sub_loss)
sub_iter += 1
opt_dec.zero_grad()
opt_flow.zero_grad()
BCE_loss = binary_cross_entropy(decoder(best_z, N, lower_bound).float(), \
batched_e.detach().float(), sample_weight=batch_weight.float())
z_nu, pz_nu, nanFlag = log_score_marginal(nu=nu, z=best_z, mu=IAF_flow.mu0, logvar=IAF_flow.logvar0, \
xi_=IAF_flow.xi_, sigma_=IAF_flow.sigma_,\
p_ = u_bound, eps=1e-3, nu_lambda=nu_lambda,device=device, train_nu=False)
likelihood_pz = mixed_loglikeli(best_z, IAF_flow.mu0, IAF_flow.logvar0, IAF_flow.xi_, IAF_flow.sigma_, u_bound)
KL_cond = likelihood_qzx.sum() - likelihood_pz.sum()
# print(likelihood_qzx, likelihood_pz.sum())
loss = lambda_[0]*BCE_loss + lambda_[1]*(z_nu - pz_nu) + lambda_[2]*KL_cond
loss.backward()
train_z_nu.append(z_nu.item())
train_pz_nu.append(pz_nu.item())
train_BCE.append(BCE_loss.item())
train_KL.append(KL_cond.item())
train_loss += loss.item()
if not aggressive_flag:
torch.nn.utils.clip_grad_norm_(IAF_flow.parameters(), 1e-4)
opt_flow.step()
opt_dec.step()
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss))
if nanFlag == 1:
break
# check performance on validation dataset
# with torch.no_grad():
if nanFlag == 0:
IAF_flow.eval()
decoder.eval()
nu.eval()
for i, batched_sample in enumerate(valid_loader):
batched_x = batched_sample['x']
batched_x = batched_x.to(device).view(-1, ncov)
batched_e = batched_sample['e'].to(device)
# add noise
eps_ = (torch.Tensor( batched_x.shape[0], eps_dim).normal_()).to(device)
batch_z, likelihood_qzx = IAF_flow(batched_x.float(), eps_.float())
if aggressive_flag:
cur_mi = likelihood_qzx.sum() - (log_sum_exp(likelihood_qzx)).sum()
if cur_mi - pre_mi < 0:
aggressive_flag = False
print("STOP aggressive learning")
cur_mi = pre_mi
# pred_risk_batch = decoder(batch_z, N, lower_bound)
pred_risk_batch, likelihood_qzx= pred_avg_risk(batched_x, eps_dim, IAF_flow, decoder, device, n_avg=1)
valid_recon_, pos_recon_ = binary_cross_entropy(pred_risk_batch.float(), \
batched_e.detach().float(), sample_weight=None, pos_acc=True)
# based on marginal q
z_nu, pz_nu,nanFlag = log_score_marginal(nu=nu, z=batch_z, mu=IAF_flow.mu0, logvar=IAF_flow.logvar0, \
xi_=IAF_flow.xi_, sigma_=IAF_flow.sigma_,\
p_ = u_bound, eps=1e-3, device=device, train_nu=False)
# based on conditional q
likelihood_pz = mixed_loglikeli(batch_z, IAF_flow.mu0, IAF_flow.logvar0, IAF_flow.xi_, IAF_flow.sigma_, u_bound)
KL_cond = likelihood_qzx.sum() - likelihood_pz.sum()
valid_loss_ = valid_recon_ + z_nu - pz_nu + KL_cond
# calculating AUC
pred_risk = pred_risk_batch.cpu().detach().squeeze().numpy()
nonnan_idx = np.where(np.isnan(pred_risk)==False)[0]
pred_risk = pred_risk[nonnan_idx]
valid_auc_ = sklearn.metrics.roc_auc_score(batched_sample['e'][nonnan_idx,:].cpu().squeeze().numpy(),\
pred_risk).item()
# # calculating F1 score
# valid_F1 = F1_score(batched_sample['e'].cpu().squeeze().numpy(),\
# pred_risk_batch.cpu().detach().squeeze().numpy(), beta=1.0)
valid_loss = valid_loss + valid_loss_.item()
valid_recon_loss = valid_recon_loss + valid_recon_.item()
valid_pos_loss = valid_pos_loss + pos_recon_.item()
break
# only save non-nan models
if np.isnan(valid_recon_loss) == False:
save_model = 0
if (valid_recon_loss < best_valid_recon_loss) or (valid_pos_loss < best_valid_pos_loss) or (valid_auc_ > best_valid_auc):
if (valid_recon_loss < best_valid_recon_loss):
# best_valid_recon_loss = valid_recon_loss
# torch.save(model.state_dict(), model_path)
save_model += 1
if (valid_pos_loss < best_valid_pos_loss):
# best_valid_pos_loss = valid_pos_loss
save_model += 1
if (valid_auc_ > best_valid_auc):
# best_valid_auc = valid_auc_
save_model += 1
# save current model
if save_model > 1:
# Save current metrics as standard
best_valid_pos_loss = valid_pos_loss
best_valid_auc = valid_auc_
best_valid_recon_loss = valid_recon_loss
best_epoch = epoch
torch.save(IAF_flow.state_dict(), flow_path)
torch.save(decoder.state_dict(), decoder_path)
torch.save(nu.state_dict(), nu_path)
improved_str = "*"
# prior_z = sample_mixedGPD(8000, mu=IAF_flow.mu0, logvar=IAF_flow.logvar0,\
# xi_=IAF_flow.xi_, sigma_=IAF_flow.sigma_,\
# p_ = u_bound, lower_bound = -5.0, upper_bound = 50, device=device)
# view_distribution(batch_z, prior_z, model_name, plot_path)
if (epoch - best_epoch >=10) and (epoch - last_shrink >=10):
lambda_[1] = lambda_[1] * 5e-1
lambda_[2] = lambda_[2] * 5e-1
last_shrink = epoch
print('====> Valid BCE loss: {:.4f}\t Pos Recon Loss: {:.4f} KL Loss: {:.4f} AUC: {:.4f} \tImproved: {}'.format(valid_recon_loss, valid_pos_loss, KL_cond, valid_auc_, improved_str))
if epoch - best_epoch >=30:
print('Model stopped due to early stopping')
break
# report results in testing
pred_label_risk, batch_z, Hz, auc_, auprc_ = testing_VIEVT(test, IAF_flow, flow_path, decoder, decoder_path, nu, nu_path, model_name, result_path, eps_dim, transform = True, norm_mean=norm_mean, norm_std=norm_std, continuous_variables=continuous_variables, device=device, saveResults=True)
# bootstrapping
_auc, _auprc = boostrappingCI(test['e'], pred_label_risk, "VIE", N=1000, nseed=124)
np.save(result_path+'/'+'VIEVT_bootstrap_auc', _auc)
np.save(result_path+'/VIEVT_bootstrap_auprc', _auprc)
|
[
"zx35@duke.edu"
] |
zx35@duke.edu
|
6195b3fe9424518c1c4191f0f89cc620ea98a3c4
|
6bbc9d1b6f031f64150bc016a536cb7d2c687032
|
/yibo/tempest/tempest/api/volume/test_sf_volumes_attach.py
|
323a9e53bb662a5b60c5c49e29a7610290356230
|
[
"Apache-2.0"
] |
permissive
|
laoyigrace/files
|
a4c91a47ba605aabfdd25abf574610ddb479969c
|
1ccdab06d5800572ee0fc569c87d56332efe1538
|
refs/heads/master
| 2020-07-07T23:25:03.674206
| 2017-05-19T14:49:24
| 2017-05-19T14:49:24
| 66,083,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,875
|
py
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest import test
import testtools
CONF = config.CONF
class VolumesV2AttachTest(base.BaseVolumeTest):
@classmethod
def setup_clients(cls):
super(VolumesV2AttachTest, cls).setup_clients()
cls.client = cls.volumes_client
cls.image_client = cls.os.image_client
@classmethod
def resource_setup(cls):
super(VolumesV2AttachTest, cls).resource_setup()
# Create a test shared instance
srv_name = data_utils.rand_name(cls.__name__ + '-Instance')
cls.server = cls.create_server(srv_name)
waiters.wait_for_server_status(cls.servers_client, cls.server['id'],
'ACTIVE')
# Create a test shared volume for attach/detach tests
cls.volume = cls.create_volume()
cls.client.wait_for_volume_status(cls.volume['id'], 'available')
def _delete_image_with_wait(self, image_id):
self.image_client.delete_image(image_id)
self.image_client.wait_for_resource_deletion(image_id)
@classmethod
def resource_cleanup(cls):
# Delete the test instance
cls.servers_client.delete_server(cls.server['id'])
cls.servers_client.wait_for_server_termination(cls.server['id'])
super(VolumesV2AttachTest, cls).resource_cleanup()
@test.idempotent_id('e63b0859-c81c-47de-8929-1169100eb0b7')
@test.stresstest(class_setup_per='process')
@test.attr(type='smoke')
@test.services('compute')
def test_get_volume_attachment(self):
# Verify that a volume's attachment information is retrieved
mountpoint = '/dev/vdc'
self.client.attach_volume(self.volume['id'],
self.server['id'],
mountpoint)
self.client.wait_for_volume_status(self.volume['id'], 'in-use')
# NOTE(gfidente): added in reverse order because functions will be
# called in reverse order to the order they are added (LIFO)
self.addCleanup(self.client.wait_for_volume_status,
self.volume['id'],
'available')
self.addCleanup(self.client.detach_volume, self.volume['id'])
volume = self.client.show_volume(self.volume['id'])
self.assertIn('attachments', volume)
attachment = self.client.get_attachment_from_volume(volume)
self.assertEqual(mountpoint, attachment['device'])
self.assertEqual(self.server['id'], attachment['server_id'])
self.assertEqual(self.volume['id'], attachment['id'])
self.assertEqual(self.volume['id'], attachment['volume_id'])
@test.idempotent_id('0257f24e-f8c7-43b2-bd60-bcda77ea11b4')
@test.stresstest(class_setup_per='process')
@test.attr(type='smoke')
@test.services('compute')
def test_get_volume_detachment(self):
# Volume is attached and detached successfully from an instance
mountpoint = '/dev/vdc'
self.client.attach_volume(self.volume['id'],
self.server['id'],
mountpoint)
self.client.wait_for_volume_status(self.volume['id'], 'in-use')
self.client.detach_volume(self.volume['id'])
self.client.wait_for_volume_status(self.volume['id'], 'available')
volume = self.client.show_volume(self.volume['id'])
self.assertIn('attachments', volume)
self.assertEqual(0, len(volume['attachments']))
class VolumesV2MultiAttachTest(base.BaseVolumeTest):
@classmethod
def setup_clients(cls):
super(VolumesV2MultiAttachTest, cls).setup_clients()
cls.client = cls.volumes_client
cls.image_client = cls.os.image_client
@classmethod
def resource_setup(cls):
super(VolumesV2MultiAttachTest, cls).resource_setup()
# Create a test shared instance
srv_name = data_utils.rand_name(cls.__name__ + '-Instance')
cls.server = cls.create_server(srv_name)
waiters.wait_for_server_status(cls.servers_client, cls.server['id'],
'ACTIVE')
# Create four test shared volumes for attach tests
cls.metadata = {'Type': 'work'}
for i in range(3):
cls.volume = cls.create_volume(metadata=cls.metadata)
cls.client.wait_for_volume_status(cls.volume['id'], 'available')
@classmethod
def resource_cleanup(cls):
# Delete the test instance
cls.servers_client.delete_server(cls.server['id'])
cls.servers_client.wait_for_server_termination(cls.server['id'])
super(VolumesV2MultiAttachTest, cls).resource_cleanup()
@test.idempotent_id('714394dc-767c-4853-b43a-52b21ad77e5f')
@test.stresstest(class_setup_per='process')
@test.services('compute')
def test_get_volume_attachment(self):
# Verify that a volume's attachment information is retrieved
i = 0
for volume in self.volumes:
flag = ['a', 'b', 'c', 'd']
mountpoint = '/dev/vd%s' % flag[i]
i += 1
self.client.attach_volume(volume['id'],
self.server['id'],
mountpoint)
self.client.wait_for_volume_status(volume['id'], 'in-use')
# NOTE(gfidente): added in reverse order because functions will be
# called in reverse order to the order they are added (LIFO)
self.addCleanup(self.client.wait_for_volume_status,
volume['id'],
'available')
self.addCleanup(self.client.detach_volume, volume['id'])
volume = self.client.show_volume(volume['id'])
self.assertIn('attachments', volume)
attachment = self.client.get_attachment_from_volume(volume)
self.assertEqual(mountpoint, attachment['device'])
self.assertEqual(self.server['id'], attachment['server_id'])
self.assertEqual(volume['id'], attachment['id'])
self.assertEqual(volume['id'], attachment['volume_id'])
|
[
"yibo_grace@163.com"
] |
yibo_grace@163.com
|
f8aa377e14866f71d01f1a5d33aa8abb0837f16e
|
8bfd78291d7cd7ab1fd806659b027b14f02acaf5
|
/deidentify/dataset/uthealth2corpus.py
|
6996289429f4c63f882ae5069b3ea2a1c99a571c
|
[
"MIT"
] |
permissive
|
nedap/deidentify
|
586ae3b5ba05d2b5369c1296211f39945c703f65
|
a827378b5b454a928cccdb8fe85d6e1ae5c26464
|
refs/heads/master
| 2023-07-08T22:37:32.555236
| 2022-11-09T18:26:50
| 2022-11-09T18:26:50
| 228,331,179
| 99
| 21
|
MIT
| 2023-06-22T15:03:00
| 2019-12-16T07:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,233
|
py
|
"""Conversion script for the i2b2/UTHealth corpus."""
import glob
import os
import xml.etree.ElementTree as ET
from os.path import basename, dirname, join, splitext
from loguru import logger
from sklearn.model_selection import train_test_split
from deidentify.base import Annotation, Document
from deidentify.dataset import brat
BASE_PATH = join(dirname(__file__), '../../data/raw/i2b2/')
TRAIN_SET_A = join(BASE_PATH, 'training-PHI-Gold-Set1')
TRAIN_SET_B = join(BASE_PATH, 'training-PHI-Gold-Set2')
TEST_SET = join(BASE_PATH, 'testing-PHI-Gold-fixed')
OUTPUT_PATH = join(dirname(__file__), '../../data/corpus/i2b2/')
TAG_MAPPING = {
# not sure why the PHI:* classes exist alongside with the other classes. This only affect 16
# instances of PHI. The following remaps those tags.
'PHI:PATIENT': 'NAME:PATIENT',
'PHI:DOCTOR': 'NAME:DOCTOR',
'PHI:DATE': 'DATE:DATE'
}
def xml_to_document(xml_file):
"""Converts an i2b2/UTHealth XML document to a `deidentify.base.Document`.
XML Structure:
```
<?xml version="1.0" encoding="UTF-8" ?>
<deIdi2b2>
<TEXT><![CDATA[
this is the record content
]]></TEXT>
<TAGS>
<DATE id="P0" start="16" end="26" text="2067-05-03" TYPE="DATE" comment="" />
<AGE id="P1" start="50" end="52" text="55" TYPE="AGE" comment="" />
</TAGS>
</deIdi2b2>
```
"""
tree = ET.parse(xml_file)
root = tree.getroot()
text = root.find('TEXT').text
doc_name = 'doc-' + splitext(basename(xml_file))[0]
annotations = []
for tag_element in root.find('TAGS'):
tag_name = tag_element.tag + ':' + tag_element.attrib['TYPE']
annotations.append(Annotation(
text=tag_element.attrib['text'],
start=tag_element.attrib['start'],
end=tag_element.attrib['end'],
# Example: NAME:DOCTOR
tag=TAG_MAPPING.get(tag_name, tag_name),
# i2b2 annotations have id prefixed with P. Example: P12
doc_id=doc_name,
ann_id='T{}'.format(tag_element.attrib['id'][1:])
))
return Document(name=doc_name, text=text, annotations=annotations)
def _write_documents(path, documents):
os.makedirs(path, exist_ok=True)
for doc in documents:
brat.write_brat_document(path, doc.name, doc.text, doc.annotations)
def main():
train_a = glob.glob(join(TRAIN_SET_A, '*.xml'))
train_b = glob.glob(join(TRAIN_SET_B, '*.xml'))
test = glob.glob(join(TEST_SET, '*.xml'))
train_docs = [xml_to_document(xml_doc) for xml_doc in train_a + train_b]
test_docs = [xml_to_document(xml_doc) for xml_doc in test]
logger.info('train/test docs: {}/{}'.format(len(train_docs), len(test_docs)))
logger.info('Take 20% of training instances as dev set...')
train_docs, dev_docs = train_test_split(train_docs, test_size=0.2, random_state=42)
logger.info('train/dev/test docs: {}/{}/{}'.format(
len(train_docs), len(dev_docs), len(test_docs)))
_write_documents(join(OUTPUT_PATH, 'train'), train_docs)
_write_documents(join(OUTPUT_PATH, 'dev'), dev_docs)
_write_documents(join(OUTPUT_PATH, 'test'), test_docs)
logger.info('Done.')
if __name__ == '__main__':
main()
|
[
"jan.trienes@googlemail.com"
] |
jan.trienes@googlemail.com
|
a905908679bb48c05d3afd0ea70e309c1c90813d
|
ad82717f0cf768a2cc38851249d123771ecadb4d
|
/assignment3/cs231n/classifiers/rnn.py
|
df5d245ddaa2389c2057ecdc87d31407bc58a4a3
|
[] |
no_license
|
ssriramana93/cs231
|
8ceba9d04beaf86cd53f34400abbb28e935bf098
|
eb9b0b7c5137b776647bd126afc9dd7241952631
|
refs/heads/master
| 2021-07-06T10:15:04.074385
| 2017-09-30T22:45:35
| 2017-09-30T22:45:35
| 103,793,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,275
|
py
|
from builtins import range
from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
"""
A CaptioningRNN produces captions from image features using a recurrent
neural network.
The RNN receives input vectors of size D, has a vocab size of V, works on
sequences of length T, has an RNN hidden dimension of H, uses word vectors
of dimension W, and operates on minibatches of size N.
Note that we don't use any regularization for the CaptioningRNN.
"""
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
"""
Construct a new CaptioningRNN instance.
Inputs:
- word_to_idx: A dictionary giving the vocabulary. It contains V entries,
and maps each string to a unique integer in the range [0, V).
- input_dim: Dimension D of input image feature vectors.
- wordvec_dim: Dimension W of word vectors.
- hidden_dim: Dimension H for the hidden state of the RNN.
- cell_type: What type of RNN to use; either 'rnn' or 'lstm'.
- dtype: numpy datatype to use; use float32 for training and float64 for
numeric gradient checking.
"""
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.items()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
# Initialize CNN -> hidden state projection parameters
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.items():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
"""
Compute training-time loss for the RNN. We input image features and
ground-truth captions for those images, and use an RNN (or LSTM) to compute
loss and gradients on all parameters.
Inputs:
- features: Input image features, of shape (N, D)
- captions: Ground-truth captions; an integer array of shape (N, T) where
each element is in the range 0 <= y[i, t] < V
Returns a tuple of:
- loss: Scalar loss
- grads: Dictionary of gradients parallel to self.params
"""
# Cut captions into two pieces: captions_in has everything but the last word
# and will be input to the RNN; captions_out has everything but the first
# word and this is what we will expect the RNN to generate. These are offset
# by one relative to each other because the RNN should produce word (t+1)
# after receiving word t. The first element of captions_in will be the START
# token, and the first element of captions_out will be the first word.
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
# You'll need this
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the forward and backward passes for the CaptioningRNN. #
# In the forward pass you will need to do the following: #
# (1) Use an affine transformation to compute the initial hidden state #
# from the image features. This should produce an array of shape (N, H)#
# (2) Use a word embedding layer to transform the words in captions_in #
# from indices to vectors, giving an array of shape (N, T, W). #
# (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #
# process the sequence of input word vectors and produce hidden state #
# vectors for all timesteps, producing an array of shape (N, T, H). #
# (4) Use a (temporal) affine transformation to compute scores over the #
# vocabulary at every timestep using the hidden states, giving an #
# array of shape (N, T, V). #
# (5) Use (temporal) softmax to compute loss using captions_out, ignoring #
# the points where the output word is <NULL> using the mask above. #
# #
# In the backward pass you will need to compute the gradient of the loss #
# with respect to all model parameters. Use the loss and grads variables #
# defined above to store loss and gradients; grads[k] should give the #
# gradients for self.params[k]. #
############################################################################
h0 = np.dot(features, W_proj) + b_proj
xemb, ecache = word_embedding_forward(captions_in, W_embed)
if self.cell_type == 'rnn':
h, rcache = rnn_forward(xemb, h0, Wx, Wh, b)
if self.cell_type == 'lstm':
h, rcache = lstm_forward(xemb, h0, Wx, Wh, b)
out, acache = temporal_affine_forward(h, W_vocab, b_vocab)
loss, dx = temporal_softmax_loss(out, captions_out, mask)
dh, dW_vocab, db_vocab = temporal_affine_backward(dx, acache)
if self.cell_type == 'rnn':
dx, dh0, dWx, dWh, db = rnn_backward(dh, rcache)
if self.cell_type == 'lstm':
dx, dh0, dWx, dWh, db = lstm_backward(dh, rcache)
dW_embed = word_embedding_backward(dx, ecache)
dW_proj = np.dot(features.T, dh0)
db_proj = np.sum(dh0, axis = 0)
grads['W_proj'] = dW_proj
grads['b_proj'] = db_proj
grads['W_embed'] = dW_embed
grads['Wx'] = dWx
grads['Wh'] = dWh
grads['b'] = db
grads['W_vocab'] = dW_vocab
grads['b_vocab'] = db_vocab
pass
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def sample(self, features, max_length=30):
"""
Run a test-time forward pass for the model, sampling captions for input
feature vectors.
At each timestep, we embed the current word, pass it and the previous hidden
state to the RNN to get the next hidden state, use the hidden state to get
scores for all vocab words, and choose the word with the highest score as
the next word. The initial hidden state is computed by applying an affine
transform to the input image features, and the initial word is the <START>
token.
For LSTMs you will also have to keep track of the cell state; in that case
the initial cell state should be zero.
Inputs:
- features: Array of input image features of shape (N, D).
- max_length: Maximum length T of generated captions.
Returns:
- captions: Array of shape (N, max_length) giving sampled captions,
where each element is an integer in the range [0, V). The first element
of captions should be the first sampled word, not the <START> token.
"""
N = features.shape[0]
captions = self._null * np.ones((N, max_length), dtype=np.int32)
# Unpack parameters
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
W_embed = self.params['W_embed']
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
###########################################################################
# TODO: Implement test-time sampling for the model. You will need to #
# initialize the hidden state of the RNN by applying the learned affine #
# transform to the input image features. The first word that you feed to #
# the RNN should be the <START> token; its value is stored in the #
# variable self._start. At each timestep you will need to do to: #
# (1) Embed the previous word using the learned word embeddings #
# (2) Make an RNN step using the previous hidden state and the embedded #
# current word to get the next hidden state. #
# (3) Apply the learned affine transformation to the next hidden state to #
# get scores for all words in the vocabulary #
# (4) Select the word with the highest score as the next word, writing it #
# to the appropriate slot in the captions variable #
# #
# For simplicity, you do not need to stop generating after an <END> token #
# is sampled, but you can if you want to. #
# #
# HINT: You will not be able to use the rnn_forward or lstm_forward #
# functions; you'll need to call rnn_step_forward or lstm_step_forward in #
# a loop. #
###########################################################################
h = np.dot(features, W_proj) + b_proj
H = h.shape[1]
c = np.zeros((N, H))
word = np.ones((N, 1), np.int32)*self._start
for t in range(max_length):
word_emb, _ = word_embedding_forward(word.astype(np.int32), W_embed)
if self.cell_type == 'rnn':
h, _ = rnn_step_forward(np.squeeze(word_emb), h, Wx, Wh, b)
if self.cell_type == 'lstm':
h, c, _ = lstm_step_forward(np.squeeze(word_emb), h, c, Wx, Wh, b)
out, _ = temporal_affine_forward(h.reshape((N, 1, -1)), W_vocab, b_vocab)
word = np.amax(np.squeeze(out), axis = 1)
captions[:, t] = word.reshape((-1)).astype(np.int32)
pass
############################################################################
# END OF YOUR CODE #
############################################################################
return captions
|
[
"raminmyst@gmail.com"
] |
raminmyst@gmail.com
|
d7ce72f83a50a569a7db3d1cba0ef7e47f409d52
|
c3d2f02daf1dbfb90817bab822177bceec1cc9e3
|
/spm/kkk.py
|
03aa97a304c204a102ac842d41c5e440d26edbf2
|
[] |
no_license
|
KAYDEEP/Stock-prediction
|
a40b8b63b907a1a982345a61b1eeb04a55a27603
|
349bfb944f6da051ee04dac276269323c3d5347b
|
refs/heads/master
| 2020-06-04T08:03:51.655240
| 2019-06-29T13:56:16
| 2019-06-29T13:56:16
| 191,937,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
import csv
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
plt.switch_backend('TkAgg')
dates = []
prices = []
def get_data(filename):
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
next(csvFileReader) # skipping column names
for row in csvFileReader:
dates.append(int(row[0].split('-')[0]))
prices.append(float(row[1]))
return
def predict_price(dates, prices, x):
dates = np.reshape(dates,(len(dates), 1))
svr_lin = SVR(kernel= 'linear', C= 1e3)
svr_rbf = SVR(kernel= 'rbf', C= 1e3, gamma= 0.1)
svr_rbf.fit(dates, prices)
svr_lin.fit(dates, prices)
plt.scatter(dates, prices, color= 'black', label= 'Data')
plt.plot(dates, svr_rbf.predict(dates), color= 'red', label= 'RBF model')
plt.plot(dates,svr_lin.predict(dates), color= 'green', label= 'Linear model')
plt.xlabel('Date')
plt.ylabel('Price')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
return svr_rbf.predict(x)[0], svr_lin.predict(x)[0]
get_data('kk.csv')
predicted_price = predict_price(dates, prices, 29)
print('The predicted prices are:', predicted_price)
|
[
"+KAYDEEP@users.noreply.github.com"
] |
+KAYDEEP@users.noreply.github.com
|
cfc51e09dfdf342ebdea407d83179772f132b123
|
122ba27afaab4b2f909b1b23d3020506cbc1b356
|
/setup.py
|
e2660a9046ea37653cc4e43cc733bb21c331a56a
|
[
"MIT"
] |
permissive
|
lukasturcani/molder
|
5f018c159f1cb0813249b61056f6a9fe4d8c1474
|
a5a3e8e3958dd0daa83576ec7a21cfc73f5b75d2
|
refs/heads/master
| 2021-01-01T04:53:19.144138
| 2019-01-26T22:35:28
| 2019-01-26T22:35:28
| 90,673,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
from distuils.core import setup
setup(
name='molder',
version='1.0',
description='A molecular data collection web app.',
author='Lukas Turcani',
url='https://www.github.com/lukasturcani/molder',
packages=['molder'],
install_requires=['flask']
)
|
[
"lukasturcani93@gmail.com"
] |
lukasturcani93@gmail.com
|
640d8615e3a22c8ea6c8ab5407cbaf849441e260
|
192c001fa61d30a3d04c4b6e6ec37c680e7f4b5f
|
/trunk/AdditionalPlugIns/VtkWindow/VtkWindow/Helpers.py
|
34574bf57692cbeb7aaab633a9d215b0cac690c6
|
[] |
no_license
|
BackupTheBerlios/simuvis4-svn
|
a9ee1d9d3df2ddc319ea33a56b2bed2197b82639
|
df38c6205dcd37e005142e70d1cec9b4a541761e
|
refs/heads/master
| 2021-01-22T04:49:19.550943
| 2009-09-12T20:38:51
| 2009-09-12T20:38:51
| 40,748,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
# encoding: utf-8
# version: $Id$
# author: Joerg Raedler <jr@j-raedler.de>
# license: GPL v2
# this file is part of the SimuVis4 framework
# FIXME: old SimuVis code
import math
class RgbCalculator(object):
"""calculates an rgb pattern for a value"""
def __init__(self, min, max):
self.setMinMax(min, max)
def RGB(self, val):
x = (val-self.min) * (2.0*math.pi/(self.max-self.min))
if x < 0: x = 0
if x > 2.0*math.pi: x = 2.0*math.pi
if x < math.pi:
r = 0.5+0.5*math.cos(x)
b = 0.0
else:
b = 0.5+0.5*math.cos(x)
r = 0.0
return (r, 1.0-r-b, b)
def setMinMax(self, min, max):
self.min = min
self.max = max
self.half = 0.5 * (max + min)
def isActor(a): # FIXME: HACK!
return a and a.GetClassName() in ('vtkActor', 'vtkOpenGLActor',
'vtkLODActor')
def isAssembly(a): # FIXME: HACK!
return a and a.GetClassName() == 'vtkAssembly'
def getActorsRecursive(a):
l = []
parts = a.GetParts()
numParts = parts.GetNumberOfItems()
parts.InitTraversal()
for i in range(0,numParts):
p = parts.GetNextProp3D()
if isActor(p):
l.append(p)
elif isAssembly(p):
l += getActorsRecursive(p)
return l
|
[
"jraedler@6b4c185e-cb43-0410-a2ac-e70e11b4cc95"
] |
jraedler@6b4c185e-cb43-0410-a2ac-e70e11b4cc95
|
684f32296e32278604c5960ca9e11ddffe578e78
|
5c58b90e9a735b7ee779339ea417e913eebe83bf
|
/vocab.py
|
e2ba6aca827a36b82c1bc73d44fe70b176a11320
|
[] |
no_license
|
RuixinGui/XCS224N-A5-master_fixed
|
1a98eff4c7eddc56c067e63b67b73c93b0ee17e0
|
951512e66e265642b411eeeb2321ac6709b828b3
|
refs/heads/main
| 2023-01-24T12:50:38.558487
| 2020-11-29T00:12:52
| 2020-11-29T00:12:52
| 316,846,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,612
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Usage:
vocab.py --train-src=<file> --train-tgt=<file> [options] VOCAB_FILE
Options:
-h --help Show this screen.
--train-src=<file> File of training source sentences
--train-tgt=<file> File of training target sentences
--size=<int> vocab size [default: 50000]
--freq-cutoff=<int> frequency cutoff [default: 2]
"""
from collections import Counter
from docopt import docopt
from itertools import chain
import json
import torch
from typing import List
from utils import read_corpus, pad_sents, pad_sents_char
class VocabEntry(object):
""" Vocabulary Entry, i.e. structure containing either
src or tgt language terms.
"""
def __init__(self, word2id=None):
""" Init VocabEntry Instance.
@param word2id (dict): dictionary mapping words 2 indices
"""
if word2id:
self.word2id = word2id
else:
self.word2id = dict()
self.word2id['<pad>'] = 0 # Pad Token
self.word2id['<s>'] = 1 # Start Token
self.word2id['</s>'] = 2 # End Token
self.word2id['<unk>'] = 3 # Unknown Token
self.unk_id = self.word2id['<unk>']
self.id2word = {v: k for k, v in self.word2id.items()}
## Additions to the A4 code:
self.char_list = list("""ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]""")
self.char2id = dict() # Converts characters to integers
self.char2id['<pad>'] = 0
self.char2id['{'] = 1
self.char2id['}'] = 2
self.char2id['<unk>'] = 3
for i, c in enumerate(self.char_list):
self.char2id[c] = len(self.char2id)
self.char_unk = self.char2id['<unk>']
self.start_of_word = self.char2id["{"]
self.end_of_word = self.char2id["}"]
assert self.start_of_word+1 == self.end_of_word
self.id2char = {v: k for k, v in self.char2id.items()} # Converts integers to characters
## End additions to the A4 code
def __getitem__(self, word):
""" Retrieve word's index. Return the index for the unk
token if the word is out of vocabulary.
@param word (str): word to look up.
@returns index (int): index of word
"""
return self.word2id.get(word, self.unk_id)
def __contains__(self, word):
""" Check if word is captured by VocabEntry.
@param word (str): word to look up
@returns contains (bool): whether word is contained
"""
return word in self.word2id
def __setitem__(self, key, value):
""" Raise error, if one tries to edit the VocabEntry.
"""
raise ValueError('vocabulary is readonly')
def __len__(self):
""" Compute number of words in VocabEntry.
@returns len (int): number of words in VocabEntry
"""
return len(self.word2id)
def __repr__(self):
""" Representation of VocabEntry to be used
when printing the object.
"""
return 'Vocabulary[size=%d]' % len(self)
def id2word(self, wid):
""" Return mapping of index to word.
@param wid (int): word index
@returns word (str): word corresponding to index
"""
return self.id2word[wid]
def add(self, word):
""" Add word to VocabEntry, if it is previously unseen.
@param word (str): word to add to VocabEntry
@return index (int): index that the word has been assigned
"""
if word not in self:
wid = self.word2id[word] = len(self)
self.id2word[wid] = word
return wid
else:
return self[word]
def words2charindices(self, sents):
""" Convert list of sentences of words into list of list of list of character indices.
@param sents (list[list[str]]): sentence(s) in words
@return word_ids (list[list[list[int]]]): sentence(s) in indices
"""
### YOUR CODE HERE for part 1a
### TODO:
### This method should convert characters in the input sentences into their
### corresponding character indices using the character vocabulary char2id
### defined above.
###
### You must prepend each word with the `start_of_word` character and append
### with the `end_of_word` character.
word_ids=[[[self.start_of_word]+[self.char2id[char] for char in w]+[self.end_of_word] for w in s ]for s in sents ]
return word_ids
### END YOUR CODE
def words2indices(self, sents):
""" Convert list of sentences of words into list of list of indices.
@param sents (list[list[str]]): sentence(s) in words
@return word_ids (list[list[int]]): sentence(s) in indices
"""
return [[self[w] for w in s] for s in sents]
def indices2words(self, word_ids):
""" Convert list of indices into words.
@param word_ids (list[int]): list of word ids
@return sents (list[str]): list of words
"""
return [self.id2word[w_id] for w_id in word_ids]
def to_input_tensor_char(self, sents: List[List[str]], device: torch.device) -> torch.Tensor:
""" Convert list of sentences (words) into tensor with necessary padding for
shorter sentences.
@param sents (List[List[str]]): list of sentences (words)
@param device: device on which to load the tensor, i.e. CPU or GPU
@returns sents_var: tensor of (max_sentence_length, batch_size, max_word_length)
"""
### YOUR CODE HERE for part 1c
### TODO:
### Connect `words2charindices()` and `pad_sents_char()` which you've defined in
### previous parts
word_ids=self.words2charindices(sents)
sents_t=pad_sents_char(word_ids, self['<pad>']) #pad
# (batch_size, max_sentence_length, max_word_length)
sents_var = torch.tensor(sents_t, dtype=torch.long, device=device) #convert into tensor
#batch_size=len(word_ids)
return sents_var.permute(1, 0, 2)# (max sentence length, batch size, max word length)
### END YOUR CODE
def to_input_tensor(self, sents: List[List[str]], device: torch.device) -> torch.Tensor:
""" Convert list of sentences (words) into tensor with necessary padding for
shorter sentences.
@param sents (List[List[str]]): list of sentences (words)
@param device: device on which to load the tesnor, i.e. CPU or GPU
@returns sents_var: tensor of (max_sentence_length, batch_size)
"""
word_ids = self.words2indices(sents)
sents_t = pad_sents(word_ids, self['<pad>'])
sents_var = torch.tensor(sents_t, dtype=torch.long, device=device)
return torch.t(sents_var)
@staticmethod
def from_corpus(corpus, size, freq_cutoff=2):
""" Given a corpus construct a Vocab Entry.
@param corpus (list[str]): corpus of text produced by read_corpus function
@param size (int): # of words in vocabulary
@param freq_cutoff (int): if word occurs n < freq_cutoff times, drop the word
@returns vocab_entry (VocabEntry): VocabEntry instance produced from provided corpus
"""
vocab_entry = VocabEntry()
word_freq = Counter(chain(*corpus))
valid_words = [w for w, v in word_freq.items() if v >= freq_cutoff]
print('number of word types: {}, number of word types w/ frequency >= {}: {}'
.format(len(word_freq), freq_cutoff, len(valid_words)))
top_k_words = sorted(valid_words, key=lambda w: word_freq[w], reverse=True)[:size]
for word in top_k_words:
vocab_entry.add(word)
return vocab_entry
class Vocab(object):
""" Vocab encapsulating src and target langauges.
"""
def __init__(self, src_vocab: VocabEntry, tgt_vocab: VocabEntry):
""" Init Vocab.
@param src_vocab (VocabEntry): VocabEntry for source language
@param tgt_vocab (VocabEntry): VocabEntry for target language
"""
self.src = src_vocab
self.tgt = tgt_vocab
@staticmethod
def build(src_sents, tgt_sents, vocab_size, freq_cutoff) -> 'Vocab':
""" Build Vocabulary.
@param src_sents (list[str]): Source sentences provided by read_corpus() function
@param tgt_sents (list[str]): Target sentences provided by read_corpus() function
@param vocab_size (int): Size of vocabulary for both source and target languages
@param freq_cutoff (int): if word occurs n < freq_cutoff times, drop the word.
"""
assert len(src_sents) == len(tgt_sents)
print('initialize source vocabulary ..')
src = VocabEntry.from_corpus(src_sents, vocab_size, freq_cutoff)
print('initialize target vocabulary ..')
tgt = VocabEntry.from_corpus(tgt_sents, vocab_size, freq_cutoff)
return Vocab(src, tgt)
def save(self, file_path):
""" Save Vocab to file as JSON dump.
@param file_path (str): file path to vocab file
"""
json.dump(dict(src_word2id=self.src.word2id, tgt_word2id=self.tgt.word2id), open(file_path, 'w'), indent=2)
@staticmethod
def load(file_path):
""" Load vocabulary from JSON dump.
@param file_path (str): file path to vocab file
@returns Vocab object loaded from JSON dump
"""
entry = json.load(open(file_path, 'r'))
src_word2id = entry['src_word2id']
tgt_word2id = entry['tgt_word2id']
return Vocab(VocabEntry(src_word2id), VocabEntry(tgt_word2id))
def __repr__(self):
""" Representation of Vocab to be used
when printing the object.
"""
return 'Vocab(source %d words, target %d words)' % (len(self.src), len(self.tgt))
if __name__ == '__main__':
args = docopt(__doc__)
print('read in source sentences: %s' % args['--train-src'])
print('read in target sentences: %s' % args['--train-tgt'])
src_sents = read_corpus(args['--train-src'], source='src')
tgt_sents = read_corpus(args['--train-tgt'], source='tgt')
vocab = Vocab.build(src_sents, tgt_sents, int(args['--size']), int(args['--freq-cutoff']))
print('generated vocabulary, source %d words, target %d words' % (len(vocab.src), len(vocab.tgt)))
vocab.save(args['VOCAB_FILE'])
print('vocabulary saved to %s' % args['VOCAB_FILE'])
|
[
"noreply@github.com"
] |
RuixinGui.noreply@github.com
|
4b03c42aaa7425a6b379ca6b3096c3dd1dd205e0
|
d56bf627aa5eb674efe4052ae7d42f4e5a24f3c1
|
/pset9/application.py
|
d8901ab7c3fab8e55d90af32db59ea7567f293d1
|
[] |
no_license
|
mido3ds/CS50-Psets
|
20e620490a379200f0f8e7445f73a3b679e223e7
|
d6702f3b3db5ef890e0bd3bcee27a5cdfa011a81
|
refs/heads/master
| 2020-05-30T07:14:22.230830
| 2017-02-17T19:38:11
| 2017-02-17T19:38:11
| 69,060,775
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,456
|
py
|
from cs50 import SQL
from flask import Flask, redirect, render_template, request, session, url_for
from flask_session import Session
from passlib.apps import custom_app_context as pwd_context
from tempfile import gettempdir
from random import randrange
from os import urandom
from datetime import datetime
from subprocess import call
from helpers import usd, login_required, apology, lookup
# configure application
app = Flask(__name__)
# my configurations
app.config.update(
TEMPLATES_AUTO_RELOAD=True,
SECRET_KEY=urandom(40),
HOST='0.0.0.0',
PORT=randrange(5000, 9001),
DEBUG=False,
)
# ensure responses aren't cached
if app.config["DEBUG"]:
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# custom filter
app.jinja_env.filters["usd"] = usd
# configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = gettempdir()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
@app.route("/")
@login_required
def index():
# get cash and make it float
try:
cash = float(
db.execute(
'SELECT cash FROM Users WHERE id = :id',
id=session['user_id']
)[0]['cash']
)
except IndexError:
# user is not in DataBase, some error happened
# clear session and get him to login
session.clear()
return redirect(url_for('login'))
# get stocks for this id
stocks = get_user_stocks()
# add now prices to stocks, calc grand total
grand_total = cash
for symbol in stocks:
stocks[symbol]['price'] = lookup(symbol)['price'] * int(stocks[symbol]['num_shares'])
grand_total += stocks[symbol]['price']
return render_template(
'index.html',
stocks=stocks,
cash=cash,
grand_total=grand_total,
)
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock."""
if request.method == 'GET':
return render_template('buy.html')
else:
# check
if request.form['symbol'] == '' or request.form['num_shares'] == '':
return apology('some fields are empty')
if int(request.form['num_shares']) < 0:
return apology('num_shares can\'t be negative')
elif int(request.form['num_shares']) == 0:
return apology('so', 'u want to buy nothing')
# get money
db_result = db.execute(
'SELECT cash, username FROM Users WHERE id = :id',
id=session['user_id']
)
# save time now, e.g:
# 2017-2-5 3:44:13
time_purchase = '{:%Y-%m-%d %H:%M:%S}'.format(datetime.now())
lookup_result = lookup(request.form['symbol'])
if lookup_result is None:
return apology('error happened', 'check the symbol plz')
total_buy = lookup_result['price'] * float(request.form['num_shares'])
# check money is enough
if total_buy > float(db_result[0]['cash']):
return apology('you don\'t have enough money', 'ur kiddin me?')
# buy: update cash,
db.execute(
'UPDATE Users SET cash = :cash WHERE id = :id',
cash=float(db_result[0]['cash'])-total_buy,
id=session['user_id'],
)
# and log it
db.execute(
'INSERT INTO Buying(user_id, symbol, stock_price, num_shares)\
VALUES(:user_id, :symbol, :price, :num_shares)',
user_id=session['user_id'],
symbol=lookup_result['symbol'],
price=lookup_result['price'],
num_shares=request.form['num_shares'],
)
return render_template(
'bought.html',
symbol=lookup_result['symbol'],
price=usd(lookup_result['price']),
num_shares=request.form['num_shares'],
total_cash=usd(total_buy),
user_name=db_result[0]['username'],
time=time_purchase,
)
@app.route("/history")
@login_required
def history():
"""Show history of transactions."""
# get logs
sell_rows = db.execute(
'SELECT stock_price, num_shares, symbol, time FROM Selling WHERE user_id = :id',
id=session['user_id'],
)
buy_rows = db.execute(
'SELECT stock_price, num_shares, symbol, time FROM Buying WHERE user_id = :id',
id=session['user_id'],
)
# send them
return render_template(
'history.html',
sell_rows=sell_rows,
buy_rows=buy_rows,
)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in."""
# forget any user_id
session.clear()
# if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# ensure username was submitted
if not request.form.get("username"):
return apology("must provide username")
# ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password")
# query database for username
rows = db.execute("SELECT * FROM Users WHERE username = :username", username=request.form.get("username"))
# ensure username exists and password is correct
if len(rows) != 1 or not pwd_context.verify(request.form.get("password"), rows[0]["hash"]):
return apology("invalid username and/or password")
# remember which user has logged in
session["user_id"] = rows[0]["id"]
# redirect user to home page
return redirect(url_for("index"))
# else if user reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out."""
# forget any user_id
session.clear()
# redirect user to login form
return redirect(url_for("login"))
@app.route("/quote", methods=["GET", 'POST'])
@login_required
def quote():
"""Get stock quote."""
if request.method == 'GET':
return render_template('quote.html')
if request.method == 'POST':
if request.form['symbol'] == '':
return apology('no symbol provided')
result = lookup(request.form['symbol'])
if result is None:
return apology('some error happened')
return render_template(
'quoted.html',
name=result['name'],
symbol=result['symbol'],
price=usd(result['price']),
)
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user."""
if request.method == 'GET':
return render_template('register.html')
if request.method == 'POST':
if request.form['user'] == '':
return apology('You didn\'t type a user name')
if user_is_registered(request.form['user']):
return apology('User name is used, try another name')
if request.form['password1'] == '' or request.form['password2'] == '':
return apology('password can\'t be left empty')
if request.form['password1'] != request.form['password2']:
return apology('Passwords don\'t match')
hash = pwd_context.hash(request.form['password1'])
val = db.execute(
'INSERT INTO Users (username, hash) VALUES(:user, :passw)',
user=request.form['user'],
passw=hash,
)
print(request.form['user'], request.form['password1'], val)
return render_template('login.html')
def user_is_registered(user):
''' returns True if user is found in db '''
result = db.execute('SELECT username FROM Users WHERE username = :user', user=user)
if len(result) == 1:
return True
return False
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock."""
# get user stocks
stocks = get_user_stocks()
if request.method == 'GET':
return render_template(
'sell.html',
stocks=stocks,
)
if request.method == 'POST':
symbol = request.form['symbol']
lookup_price = lookup(symbol)['price']
# sell some
if request.form['radioOption'] == "Select some shares":
num_shares = int(request.form['num_shares'])
# num is psitive
if num_shares < 0:
return apology('num of shares can\'t be negative')
if num_shares == 0:
return apology('sooo', 'ur selling nothing')
# num_shares must be <= available shares
if num_shares > stocks[symbol]['num_shares']:
return apology('ur selling more than you have')
# sell all
else:
# make num_shares all what use have for this symbol
num_shares = stocks[symbol]['num_shares']
sell_price = lookup_price * num_shares
# update user cash
db.execute(
'UPDATE Users \
SET cash = cash + :sell_price',
sell_price=sell_price,
)
# log it
db.execute(
'INSERT INTO Selling(user_id, symbol, stock_price, num_shares) \
VALUES(:user_id, :symbol, :price, :num_shares)',
user_id=session['user_id'],
symbol=request.form['symbol'],
price=sell_price,
num_shares=num_shares,
)
return redirect(url_for('index'))
def get_user_stocks():
'''return dict of stocks that user have.
dict:
stocks = {
'<symbol>':{
'price':float,
'num_shares':int
},
..
}
'''
sell = get_user_log('Selling')
buy = get_user_log('Buying')
stocks = {}
for symbol in buy:
if symbol in sell:
# sub sell.price from buy.price
buy[symbol]['num_shares'] -= sell[symbol]['num_shares']
# add it to stocks
if buy[symbol]['num_shares'] != 0:
stocks[symbol] = buy[symbol]
return stocks
def get_user_log(table):
"""return dict of log history in given table name
dict:
stocks = {
'<symbol>':{
'price':float,
'num_shares':int
},
..
}
"""
rows = db.execute(
'SELECT symbol, stock_price, num_shares FROM :table WHERE user_id = :id',
table=table,
id=session['user_id'],
)
# add sold
stocks = {}
for row in rows:
symbol = row['symbol']
# if not created, create it
if symbol not in stocks:
stocks[symbol] = {
'price': float(row['stock_price']),
'num_shares': int(row['num_shares']),
}
# dict is created, update num_shares in it
else:
stocks[symbol]['num_shares'] += int(row['num_shares'])
return stocks
if __name__=='__main__':
# open site in browser
host=app.config['HOST']
port=app.config['PORT']
call(['open', 'http://{}:{}'.format(host, port)])
app.run(host, port)
|
[
"mido3ds@gmail.com"
] |
mido3ds@gmail.com
|
c027acb5823d8bb3e393791fb8b39074ba6f2175
|
a1903bad5a9a5b42214a27d07cf5eb01845b7cbd
|
/dashboard/apps/utilities/urls.py
|
4294e497acfc40c6f004c8e64cd2cd773069b400
|
[] |
no_license
|
aderraik/Dashboard-with-Django-and-Bootstrap
|
a877cb7d01640e0908323f83c22e5a4ee46fd759
|
97fdc8bef05517a57032542decc11f07025a0233
|
refs/heads/master
| 2022-12-11T13:11:19.813025
| 2020-08-31T22:47:29
| 2020-08-31T22:47:29
| 287,655,630
| 1
| 0
| null | 2020-08-15T01:23:24
| 2020-08-15T01:23:23
| null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
from django.urls import path
import dashboard.apps.utilities.colors.views as ColorsViews
import dashboard.apps.utilities.borders.views as BordersViews
import dashboard.apps.utilities.animations.views as AnimationsViews
import dashboard.apps.utilities.others.views as OthersViews
app_name = 'utilities'
urlpatterns = [
path('', BordersViews.IndexView.as_view(), name='index'),
path('animations/', AnimationsViews.IndexView.as_view(), name='animations'),
path('borders/', BordersViews.IndexView.as_view(), name='borders'),
path('colors/', ColorsViews.IndexView.as_view(), name='colors'),
path('others/', OthersViews.IndexView.as_view(), name='others'),
]
|
[
"rgoestenmeier@via-internet.de"
] |
rgoestenmeier@via-internet.de
|
0177cf4f3dd6a5080a474d6ba3e640c1e74e6855
|
d99c6fbd5e602bb9d475fe5db5cac71e78a82399
|
/seq_func_review.py
|
28b611700e9bd4f97c3799f3f7a07549886e31f1
|
[] |
no_license
|
kjhjh04003/Python-Review
|
f435e80239ba4ec9d46beca061391588560d4022
|
9b5dcb8da3b422d5267e7f1b998d6e789d1deb26
|
refs/heads/master
| 2023-05-07T06:14:18.022360
| 2021-05-27T02:10:04
| 2021-05-27T02:10:04
| 370,963,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,876
|
py
|
def using_range():
# range 객체 : 범위 생성
# 순차 자료형이기 때문에 list() 사용 가능
# 인자가 1개 : 0부터 인자 경계 이전
# 실제 값은 가지고 있지 않고 필요할 때 한개씩 생성
seq = range(10) # 0~9Rkwl
print(seq, type(seq))
print(list(seq))
# 인자가 2개 : 시작경계, 끝경계
seq2 = range(2, 10) # 2~9까지
print(seq2)
print(list(seq2))
# 인자가 3개 : 시작경계, 끝경계, 증감값
seq3 = range(2, 10, 2) # 2~9까지 2씩 증가
print(seq3)
print(list(seq3))
# 증감값이 음수 : 큰 수 -> 작은 수
seq4 = range(0, -10, -1) # -9~0까지, 역순
print(seq4)
print(list(seq4))
# 실제 값은 가지고 있지 않지만 순차 객체
print(seq, "len : ", len(seq))
# 포함여부 확인 가능
print(5 in seq)
# 인덱스를 이용, 내부 데이터 접근 가능
print(seq[0], seq[1], seq[2]) # 정인덱싱
print(seq[-1], seq[-2], seq[-3]) # 역인덱싱
# 슬라이싱 가능
print(seq[2:5])
# 불변 객체 -> 인덱스 이용 치환, 슬라이싱을 이용한 치환 등은 불가
# range 객체를 이용한 for 루프
for i in range(10):
print(i, end=" ")
else:
print()
def using_enumerate():
""" enumerate() 함수 : 순차형에서 현재 아이템과 함께 내부 인덱스도 함께 필요할 때 사용 """
colors = ["red", "yellow", "blue", "white", "grey"]
# print(colors, type(colors))
i = 0 # 별도의 인덱스값
for color in colors: # 항목값은 확인할 수 있지만 인덱스는 확인 불가능
print("color {0}: {1}".format(i, color))
i += 1
print("======================================")
for index, color in enumerate(colors): # (index, 항목) -> unpacking
print("color {}: {}".format(index, color))
def using_zip():
# zip 객체 : 여러 개의 순차 자료형을 동시에 루프 시키는 객체
english = "Sun", "Mon", "Tue", "Wed"
korean = "일요일", "월요일", "화요일", "수요일", "목요일"
enkor = zip(english, korean) # 묶이는 조합의 길이는 짧은 쪽으로 정해진다.
print(enkor, type(enkor))
# 기본 순회
for pair in enkor: # 조합의 튜플 반환
print(pair, type(pair))
# zip 객체는 일회성 객체
enkor = zip(english, korean)
# 언팩킹 순회
for eng, kor in enkor: # 조합의 튜플 언패킹
print(eng, ">", kor)
enkor = zip(english, korean)
# 인덱스, 영어, 한국어
for index, (eng, kor) in enumerate(enkor):
print(index, ">", eng, ">", kor)
# zip 객체를 이용, dict 생성 가능
print(dict(zip(english, korean)))
if __name__ == "__main__":
# using_range()
# using_enumerate()
using_zip()
|
[
"kjhjh04003@naver.com"
] |
kjhjh04003@naver.com
|
3dcb1d11f5b2c28b2c3b9e83f6742c756f44fe94
|
61e358080f40545ff9b2bb0dcbd8d974c3387e65
|
/venv/bin/gunicorn
|
7ec12ecb5cce6b20762e22b7bdf4a0a8ec5af74d
|
[] |
no_license
|
AugustLONG/goodnews
|
f5b652b675b9a5612049dbab4781e1c89b96bbc6
|
8e73d159f16d0cda7e64b09d92544bd3fec31dac
|
refs/heads/master
| 2016-09-01T09:16:02.929640
| 2015-12-17T12:37:54
| 2015-12-17T12:37:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
#!/home/zhoumiao/djangoscrapy/hacker/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"melodyres@163.com"
] |
melodyres@163.com
|
|
574fc83dd4d87dc20f79d82e496505e6cc7fdf67
|
da7afdcaa07f00b9ec6fbc6c29f87e71936f29ab
|
/libraries/go-stats/go_gmt.py
|
06db30da517685310113f1718bc7b85fa7908be1
|
[
"BSD-3-Clause"
] |
permissive
|
geneontology/go-stats
|
f208ef5cd6624a88168cbcd1be90ddb782b2b39a
|
89ef0e2238b16359361ed47cd938c24d526e4971
|
refs/heads/master
| 2023-05-25T12:00:10.057871
| 2021-08-26T19:55:23
| 2021-08-26T19:55:23
| 191,247,143
| 0
| 0
|
BSD-3-Clause
| 2023-05-22T22:16:42
| 2019-06-10T21:19:40
|
Python
|
UTF-8
|
Python
| false
| false
| 8,799
|
py
|
# This script is experimental and is used to produce GMT files out of GO terms
import sys, getopt, os, json
import go_stats_utils as utils
from obo_parser import OBO_Parser, TermState
max_rows = 10000000
select_ontology = "select?fq=document_category:\"ontology_class\"&q=*:*&rows=" + str(max_rows) + "&wt=json&fq=idspace:\"GO\"&fq=is_obsolete:false&fl=annotation_class,annotation_class_label,source,regulates_closure,isa_closure,isa_partof_closure,regulates_closure"
select_annotations = "select?fq=document_category:\"annotation\"&q=*:*&rows=" + str(max_rows) + "&wt=json&fq=type:\"protein\"&fl=bioentity,annotation_class,evidence_type"
ASPECTS = {
"GO:0003674" : "MF",
"GO:0008150" : "BP",
"GO:0005575" : "CC"
}
def create_ontology_map(golr_base_url):
ontology = utils.golr_fetch(golr_base_url, select_ontology)
ontology = ontology['response']['docs']
map={}
for item in ontology:
map[item['annotation_class']] = item
return map
def create_go_annotation_map(golr_base_url, taxa):
"""
Create a Map { GO-Term -> [ annotations ] } using the direct annotation to the term (annotation_class)
"""
annots = utils.golr_fetch_by_taxa(golr_base_url, select_annotations, taxa)
annots = annots['response']['docs']
map={}
for item in annots:
iclass = item['annotation_class']
iannots = []
if iclass in map:
iannots = map[iclass]
else:
map[iclass] = iannots
iannots.append(item)
return map
def remap_go_annotation_map(go_annotation_map, ontology_map, closure):
"""
Remap an existing go annotation map using a certain closure (see CLOSURE_LABELS)
"""
new_map = {}
for term in go_annotation_map:
new_map[term] = []
closure_terms = ontology_map[term][closure]
for closure_term in closure_terms:
# continue only if there is an annotation for that closure term
if closure_term not in go_annotation_map:
continue
# discard annotation to root terms
if closure_term in ASPECTS:
continue
new_map[term] = new_map[term] + go_annotation_map[closure_term]
return new_map
def format_id(id):
return id.replace("MGI:MGI:", "MGI:")
# return id.replace("UniProtKB:", "")
def gmt(ontology_map, golr_base_url, taxa):
print("\nCreating term annotation map for taxa ", taxa , " ...")
go_annotation_map = create_go_annotation_map(golr_base_url, taxa)
print("Term annotation map created with ", len(go_annotation_map) , " terms")
closure = utils.CLOSURE_LABELS.REGULATES.value
print("\nRemapping annotations using closure ", closure)
go_annotation_map = remap_go_annotation_map(go_annotation_map, ontology_map, closure)
print("Term annotation remapped using closure ", closure , " with ", len(go_annotation_map) , " terms")
evidence_groups = [ "ALL", "EXPERIMENTAL", "COMPUTATIONAL" ]
aspect_lists = [ "ALL", "BP", "MF", "CC" ]
report = { }
for aspect in aspect_lists:
report[aspect] = { }
count = 0
for term_id, value in go_annotation_map.items():
# do not consider aspect level terms (irrelevant: a gene supposedly always have at least 1 MF, 1 BP and 1 CC)
if term_id in ASPECTS:
continue
term_label = ontology_map[term_id]['annotation_class_label']
term_aspect = utils.aspect_from_source(ontology_map[term_id]['source'])
# for each annotated term, we'll keep a list of all the genes associated based on their evidence groups
id_sets = { }
for evgroup in evidence_groups:
id_set = set()
id_sets[evgroup] = id_set
# going through each annotation for the term considered
for annot in value:
bioentity = annot['bioentity']
et = annot['evidence_type']
# Don't annotate the gene to that term if ND !
evgroup = utils.get_evidence_min_group(et)
if(evgroup == "ND"):
continue
# Add all annotations (don't filter by evidence)
id_sets["ALL"].add(bioentity)
# Add the annotation for the specific group of evidence
id_sets[evgroup].add(bioentity)
# Building the report for that term; will add only the term to an evidence group report IF the term has at least one gene
for evgroup in evidence_groups:
id_set = id_sets[evgroup]
if len(id_set) == 0:
continue
if evgroup not in report["ALL"]:
report["ALL"][evgroup] = []
report["ALL"][evgroup].append(term_label + "%" + term_aspect + "%" + term_id + "\t" + "\t".join(id_set))
if evgroup not in report[term_aspect]:
report[term_aspect][evgroup] = []
report[term_aspect][evgroup].append(term_label + "%" + term_aspect + "%" + term_id + "\t" + "\t".join(id_set))
count += 1
if count % 2000 == 0:
print(str(count) + " terms map created...")
print(str(count) + " terms map created...")
# Transforming to text
for aspect in report:
for evgroup in report[aspect]:
report[aspect][evgroup] = "\n".join(report[aspect][evgroup])
return report
def filter_slim(report, terms):
gmt_slim = { }
for aspect in report:
gmt_slim[aspect] = { }
for evgroup in report[aspect]:
gmt_aspect = report[aspect][evgroup]
lines = gmt_aspect.split("\n")
for line in lines:
# test if the line contains any terms of the slim
res = any(ele in line for ele in terms)
if res:
if evgroup not in gmt_slim[aspect]:
gmt_slim[aspect][evgroup] = ""
gmt_slim[aspect][evgroup] += line + "\n"
return gmt_slim
def print_help():
print('\nUsage: python go_gmt.py -g <golr_base_url> -o <output_rep> -s <slim_base_url>\n')
def main(argv):
golr_base_url = ''
output_rep = ''
slim_base_url = ''
if len(argv) < 6:
print_help()
sys.exit(2)
try:
opts, argv = getopt.getopt(argv,"g:o:s:",["golrurl=","orep=","slim="])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
elif opt in ("-g", "--golrurl"):
golr_base_url = arg
if not golr_base_url.endswith("/"):
golr_base_url = golr_base_url + "/"
elif opt in ("-o", "--orep"):
output_rep = arg
elif opt in ("-s", "--slim"):
slim_base_url = arg
if not slim_base_url.endswith("/"):
slim_base_url = slim_base_url + "/"
if not output_rep.endswith("/"):
output_rep += "/"
if not os.path.exists(output_rep):
os.mkdir(output_rep)
print("\n1 - Creating ontology map...")
ontology_map = create_ontology_map(golr_base_url)
print("Ontology map created with ", len(ontology_map) , " terms")
slims = [ "goslim_agr.obo", "goslim_generic.obo", "goslim_chembl.obo" ]
print("\n2 - Loading ", len(slims), " slims to create the slim-specific GMTs...")
slim_obos = { }
for slim in slims:
response = utils.fetch(slim_base_url + slim)
obo = OBO_Parser(response.text)
slim_obos[slim] = obo
print("Slims loaded: ", len(slim_obos))
# taxa = utils.REFERENCE_GENOME_IDS
taxa = [ "NCBITaxon:9606", "NCBITaxon:10090" ]
print("\n3 - Creating the GMTs for " , len(taxa) , " taxa")
for taxon in taxa:
taxon_id = taxon.split(":")[1]
gmt_taxon = gmt(ontology_map, golr_base_url, taxon)
output = output_rep + taxon_id
for aspect in gmt_taxon:
for evgroup in gmt_taxon[aspect]:
if len(gmt_taxon[aspect][evgroup]) > 0:
utils.write_text(output + "-" + aspect.lower() + "-" + evgroup.lower() + ".gmt", gmt_taxon[aspect][evgroup])
for slim_obo in slim_obos:
oterms = slim_obos[slim_obo].get_terms(TermState.VALID)
terms = oterms.keys()
gmt_taxon_slim = filter_slim(gmt_taxon, terms)
slim_key = slim_obo.replace(".obo", "")
for aspect in gmt_taxon_slim:
for evgroup in gmt_taxon_slim[aspect]:
if len(gmt_taxon_slim[aspect][evgroup]) > 0:
utils.write_text(output + "-" + slim_key + "-" + aspect.lower() + "-" + evgroup.lower() + ".gmt", gmt_taxon_slim[aspect][evgroup])
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"24249870+lpalbou@users.noreply.github.com"
] |
24249870+lpalbou@users.noreply.github.com
|
bdc056c52547f02f6f62705b2a1968cb662b234c
|
15599ccd25ed2a640ecb46518d663bebf49ef970
|
/gridgeo/__init__.py
|
4c0a7d38878bf1d17732e3b3f523fd2ca50c244c
|
[
"MIT"
] |
permissive
|
lizferguson5/gridgeo
|
452dcad56bd456166ac0b560163e467681278e94
|
5a9a8550391b06ef446c439b4b5108fd399a7c40
|
refs/heads/master
| 2020-03-27T12:18:57.350609
| 2018-07-19T19:59:15
| 2018-07-19T19:59:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
from __future__ import absolute_import, division, print_function
from gridgeo.gridgeo import GridGeo
__version__ = '1.0.0'
__all__ = [
'GridGeo',
]
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
[
"ocefpaf@gmail.com"
] |
ocefpaf@gmail.com
|
e30245051b9e70ca5dd97f524ae45129745672b2
|
3a54f2007dd18471f589929b5fe66771b280ac4a
|
/Example_15.py
|
28ceb5055e94bd7f6693925307652954c03aab0f
|
[] |
no_license
|
OvchinnikovaNadya/coursePython1
|
512186258394828ca955ce1c5ea00944b4c756ae
|
4d08f46f3134c2dc0cf2902932f99ac266eb347d
|
refs/heads/main
| 2023-04-10T13:11:56.682348
| 2021-04-25T13:29:40
| 2021-04-25T13:29:40
| 360,199,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
#Сделать int что 2 + 2 = 5
class MyInt(int):
def __add__(self,x):
return super().__add__(x+1)
y = MyInt(2)
y += 2
print(y)
#Сделать list в котором не больше 10
x = [1]
print(type(x))
class MyList(list):
def __init__(self, x):
if len(x) > 10:
raise ValueError('Длина list не может быть >10!')
else:
super().__init__(x)
def append(self, x):
if len(self) == 10:
raise ValueError('Длина list не может быть >10!')
else:
super().append(x)
y = MyList([1,2,3,4,5,6,7,8,9,10,11])
print(y)
|
[
"ovchinikova-n@mail.ru"
] |
ovchinikova-n@mail.ru
|
ab77e1c7361df9f59c4fd67c62d7caf5fa4abc0c
|
efa03e0895b32591d2debf2dcbc62d13c73a45f1
|
/Projet ISN/Le Projet Final.py
|
11720e5bb1c5db542bf1e56fc67462d9f5dc4e56
|
[] |
no_license
|
AlexisMalletTS2/ProjetISN2020
|
7b94022be785c2a88de8f16932cb5274ef0771d3
|
56885fdf9d43020adb33def571318aeda12b4ec7
|
refs/heads/master
| 2022-08-28T18:09:51.997517
| 2020-05-27T11:28:41
| 2020-05-27T11:28:41
| 266,716,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,602
|
py
|
import tkinter as tk # le programme va aller chercher toutes les fonctions de la bibliothèque Tkinter
from tkinter.font import *
from PIL import Image, ImageTk
def deplacement(): #Tentative de fonction de déplacement d'image
canvas_dep.move(pirouette7,0,5)
def nouvfen(): #Création d'une nouvelle fenêtre sous la forme d'une fonction
fenetre.destroy() #Destruction de la précédente
global fenetre2
fenetre2 = tk.Tk()
fenetre2.title("Le cercle de l'amitié") #Titre de la fenêtre
fenetre2.geometry("481x600") #Taille de la fenêtre
fenetre2.minsize(481,600) #Taille minimale
fenetre2.maxsize(481,600) #Taille maximale
fenetre2.config(bg = "#798081") #Couleur de fond
Grille = tk.Canvas(fenetre2,height=480,width=480) #Création d'une grille sous la forme d'un canvas
Grille.pack()
carreau=[[Grille.create_rectangle(i*32,j*32,(i+1)*32,(j+1)*32,fill="#D2CAEC")
for i in range(15)] for j in range(15)]
B_exit = tk.Button (fenetre2, text = "Quitter le Jeu" , command = fenetre2.destroy , activebackground = "#FEA347" , bg = "#A9EAFE" ) #Création d'un bouton
B_exit.place(x = 380, y = 530)
B_depart = tk.Button (fenetre2, text = "Commencer", command = deplacement, activebackground = "#FEA347" , bg = "#A9EAFE" ) #Création d'un bouton
B_depart.place(x= 300, y = 530)
L_score = tk.Label(fenetre2, text ="Votre cercle d'Amitié :", fg = "#EE1010", bg = "#798081") #Création d'un label
L_score.place(x = 30, y = 500)
L_TimeAfterQuest = tk.Label(fenetre2, text ="Nombre de coeur à avaler avant votre question :", fg = "#EE1010", bg = "#798081") #Création d'un label
L_TimeAfterQuest.place(x = 30, y = 530)
pirouette1=Image.open("E:/ISN/Projet ISN/1Pirouette32x32.png") #Importation d'une image
photoImage=ImageTk.PhotoImage(pirouette1)
Labelimage2=tk.Label(fenetre2, image=photoImage)
Labelimage2.image = photoImage
Labelimage2.place(x=66,y=66)
Labelimage2.configure(bg="#D2CAEC")
pirouette2=Image.open("E:/ISN/Projet ISN/2Pirouette32x32.png") #Importation d'une image
photoImage=ImageTk.PhotoImage(pirouette2)
Labelimage2=tk.Label(fenetre2, image=photoImage)
Labelimage2.image = photoImage
Labelimage2.place(x=66,y=66)
Labelimage2.configure(bg="#D2CAEC")
pirouette3=Image.open("E:/ISN/Projet ISN/3Pirouette32x32.png") #Importation d'une image
photoImage=ImageTk.PhotoImage(pirouette3)
Labelimage2=tk.Label(fenetre2, image=photoImage)
Labelimage2.image = photoImage
Labelimage2.place(x=66,y=66)
Labelimage2.configure(bg="#D2CAEC")
pirouette4=Image.open("E:/ISN/Projet ISN/4Pirouette32x32.png") #Importation d'une image
photoImage=ImageTk.PhotoImage(pirouette4)
Labelimage2=tk.Label(fenetre2, image=photoImage)
Labelimage2.image = photoImage
Labelimage2.place(x=66,y=66)
Labelimage2.configure(bg="#D2CAEC")
pirouette5=Image.open("E:/ISN/Projet ISN/5Pirouette32x32.png") #Importation d'une image
photoImage=ImageTk.PhotoImage(pirouette5)
Labelimage2=tk.Label(fenetre2, image=photoImage)
Labelimage2.image = photoImage
Labelimage2.place(x=66,y=66)
Labelimage2.configure(bg="#D2CAEC")
pirouette6=Image.open("E:/ISN/Projet ISN/6Pirouette32x32.png") #Importation d'une image
photoImage=ImageTk.PhotoImage(pirouette6)
Labelimage2=tk.Label(fenetre2, image=photoImage)
Labelimage2.image = photoImage
Labelimage2.place(x=66,y=66)
Labelimage2.configure(bg="#D2CAEC")
global pirouette7
pirouette7=Image.open("E:/ISN/Projet ISN/7Pirouette32x32.png") #Importation d'une image
photoImage=ImageTk.PhotoImage(pirouette7)
Labelimage2=tk.Label(fenetre2, image=photoImage)
Labelimage2.image = photoImage
Labelimage2.place(x=66,y=66)
Labelimage2.configure(bg="#D2CAEC")
global canvas_dep
canvas_dep=tk.Canvas(fenetre2,width=26,height=26,bd=1,bg="#D2CAEC")
canvas_dep.pack( padx=66, pady=66)
pirouette7=canvas_dep.create_image(66,66)
fenetre2.mainloop()
fenetre = tk.Tk() #Création de la page d'accueil
fenetre.title("Bienvenue dans Le Jeu")
fenetre.geometry("481x600")
L = tk.Label(fenetre, text = 'BIENVENUE DANS LE CERCLE DE L AMITIE', fg = 'black',font = 'times')
L.place(x = 15, y = 5)
L.configure(bg = "white")
fenetre.configure(bg = "white")
Bouton1 = tk.Button(fenetre, text = "Commencer la Partie", width = 20, activebackground ="light green",command=nouvfen) #Création d'un bouton
Bouton1.place (x = 165, y = 175)
Bouton2 = tk.Button(fenetre, text = "Quitter Le Jeu", width = 20, command = fenetre.destroy, activebackground ="red") #Création d'un bouton
Bouton2.place (x = 165, y = 400)
L1 = tk.Label(fenetre, text = "Conçu par Lavie Florian, Fournier Benjamin, Mallet Alexis")
L1.place(x = 90, y = 550)
L1.configure(bg = "white")
coeur = Image.open("F:\ISN\Projet ISN\coeur.png") #Importation d'une image
photoimage = ImageTk.PhotoImage(coeur)
Labelimage = tk.Label(fenetre, image = photoimage)
Labelimage.image = photoimage
Labelimage.place(x = 130,y = 205)
pirouette1 = Image.open("F:\ISN\Projet ISN\Pirouette1.png") #Importation d'une image
photoimage = ImageTk.PhotoImage(pirouette1)
Labelimage = tk.Label(fenetre, image = photoimage)
Labelimage.image = photoimage
Labelimage.place(x = 53,y = 100)
Labelimage.configure(bg = "white")
pirouette2 = Image.open("F:\ISN\Projet ISN\Pirouette2.png") #Importation d'une image
photoimage = ImageTk.PhotoImage(pirouette2)
Labelimage = tk.Label(fenetre, image = photoimage)
Labelimage.image = photoimage
Labelimage.place(x = 50,y = 300)
Labelimage.configure(bg = "white")
pirouette3 = Image.open("F:\ISN\Projet ISN\Pirouette3.png") #Importation d'une image
photoimage = ImageTk.PhotoImage(pirouette3)
Labelimage = tk.Label(fenetre, image = photoimage)
Labelimage.image = photoimage
Labelimage.place(x = 50,y = 450)
Labelimage.configure(bg = "white")
pirouette4 = Image.open("F:\ISN\Projet ISN\Pirouette4.png") #Importation d'une image
photoimage = ImageTk.PhotoImage(pirouette4)
Labelimage = tk.Label(fenetre, image = photoimage)
Labelimage.image = photoimage
Labelimage.place(x = 230,y = 450)
Labelimage.configure(bg = "white")
pirouette5 = Image.open("F:\ISN\Projet ISN\Pirouette5.png") #Importation d'une image
photoimage = ImageTk.PhotoImage(pirouette5)
Labelimage = tk.Label(fenetre, image = photoimage)
Labelimage.image = photoimage
Labelimage.place(x = 415 ,y = 450)
Labelimage.configure(bg = "white")
pirouette6 = Image.open("F:\ISN\Projet ISN\Pirouette6.png") #Importation d'une image
photoimage = ImageTk.PhotoImage(pirouette6)
Labelimage = tk.Label(fenetre, image = photoimage)
Labelimage.image = photoimage
Labelimage.place(x = 415,y = 300)
Labelimage.configure(bg = "white")
pirouette7 = Image.open("F:\ISN\Projet ISN\Pirouette7.png") #Importation d'une image
photoimage = ImageTk.PhotoImage(pirouette7)
Labelimage = tk.Label(fenetre, image = photoimage)
Labelimage.image = photoimage
Labelimage.place(x = 415,y = 100)
Labelimage.configure(bg = "white")
fenetre.mainloop() # lance la boucle principale
|
[
"noreply@github.com"
] |
AlexisMalletTS2.noreply@github.com
|
94a8d61a8ba25a18456697292d741887bcf84a6b
|
f660496f040b483dd46d8b154014cf4926ad2b26
|
/robotics_assignments/image_processing_hw/src/image_pub.py
|
c17f9385138d134e2198b8b9ee2866faecabe18c
|
[] |
no_license
|
ashwinj92/Duckiebot-ROS
|
a50224db303823b1418b043f3e3f33c18165c5dd
|
bda21dee9c4e09d4e0b68030ca6a92d46fddd68c
|
refs/heads/master
| 2023-06-22T15:19:16.385936
| 2023-06-11T03:11:12
| 2023-06-11T03:11:12
| 237,695,590
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
#!/usr/bin/env python
import sys
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
if __name__=="__main__":
if len(sys.argv) < 1:
print "ERROR incorrect number of arguments"
print "Usage: %s <image filename>" % sys.argv[0]
exit()
# get the filename from the command line
filename = sys.argv[1]
# initialize our node and create a publisher as normal
rospy.init_node("image_publisher", anonymous=True)
pub = rospy.Publisher("image", Image, queue_size=10)
# we need to instatiate the class that does the CV-ROS conversion
bridge = CvBridge()
#read the image file into an OpenCV image
cv_img = cv2.imread(filename)
# convert to a ROS sensor_msgs/Image
ros_img = bridge.cv2_to_imgmsg(cv_img, "bgr8")
# publish ten times over a second
r = rospy.Rate(10)
while not rospy.is_shutdown():
pub.publish(ros_img)
r.sleep()
|
[
"you@example.com"
] |
you@example.com
|
9d4df17a454b8ca07bec82a2a5327d58f338f0f6
|
e0f0d6e574394f2f3de7440fa774c1e6926653fe
|
/si601_project_giantbomb_tssameer.py
|
4ee0134073255a48eb89dce43ccb035a537ce876
|
[] |
no_license
|
sameer-t/Data_Manipulation
|
1fd41c2928527ae7c164f8a94bff114b1e00ac86
|
7e87177053587bdd87e179459ce8307a25dcfc81
|
refs/heads/master
| 2020-04-16T17:52:30.804138
| 2014-10-29T03:49:18
| 2014-10-29T03:49:18
| 25,590,656
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
import urllib2, json, re, collections, csv
from time import sleep
access_token = '628bfb5b1daf49082ccce4de40702548fdb8e3d8'
def convert(data):
if isinstance(data, basestring):
return data.encode('utf-8')
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
def plat_results(link, platform):
offset = 0
tot_results = 100
limit = 100
results = []
while(offset<tot_results):
response = urllib2.urlopen(link% (access_token, limit, offset,platform))
json_str = response.read()
op = convert(json.loads(json_str))
for r in op['results']:
results.append(r['name'])
# for k in op['results']:
# print k
offset += 100
tot_results = op['number_of_total_results']
print offset
sleep(1)
return results
# #ids for the platforms
# for platform in ['PC','Xbox 360', 'Xbox One', 'PlayStation 3', 'PlayStation 4']:
# response = urllib2.urlopen('http://www.giantbomb.com/api/platforms/?format=json&api_key=%s&format=json&field_list=name,id&filter=name:%s' % (access_token,platform))
# json_str = response.read()
# temp = convert(json.loads(json_str))
# print json.dumps(temp, indent=4, sort_keys=True)
game_setl = []
for p_id in [94, 145, 146]:
game_setl.append(set(plat_results('http://www.giantbomb.com/api/games/?format=json&api_key=%s&field_list=name&limit=%i&offset=%i&filter=original_release_date:2013-1-1 00:00:00|2015-1-1 00:00:00,platforms:%i&sort=original_release_date:asc',p_id)))
cmn_games = list(set.intersection(*game_setl))
print len(cmn_games)
print cmn_games
with open("cmn_games3.csv","w") as op:
out = csv.writer(op)
for val in cmn_games:
out.writerow([val])
|
[
"saisameer.t@gmail.com"
] |
saisameer.t@gmail.com
|
677f77dbd62ba0033b6067106f9fd8d9857e1d18
|
c8cf1bdacdbf6de75e61cc6a2ce8617479c19ec6
|
/test/jit/test_tracer.py
|
1d95dc8d0d8a4bd0fd29a4919b4bd07edc85a3d3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
Afonso-2403/pytorch
|
7f5ddf8370de938045b4ec412b98bef9dfc193ed
|
e35e6237d24b6c96b122deb21f015c0fe3eccb13
|
refs/heads/master
| 2023-08-21T18:43:43.019194
| 2021-09-13T17:58:00
| 2021-09-13T17:58:00
| 363,847,561
| 1
| 0
|
NOASSERTION
| 2021-07-08T19:06:16
| 2021-05-03T07:16:49
|
C++
|
UTF-8
|
Python
| false
| false
| 87,640
|
py
|
import unittest
import io
import os
import sys
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.common_utils import suppress_warnings, \
skipIfCompiledWithoutNumpy, enable_profiling_mode_for_profiling_tests, \
IS_SANDCASTLE, TemporaryFileName
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, \
_tmp_donotuse_dont_inline_everything, _trace, RUN_CUDA, \
RUN_CUDA_MULTI_GPU, make_global
from torch.testing._internal.common_cuda import with_tf32_off
from torch import Tensor
# Standard library
from collections import namedtuple
from itertools import chain
from typing import Dict, List, Optional, Tuple
import warnings
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestTracer(JitTestCase):
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_large_nbr_kernel_args(self):
class Recurrence(nn.Module):
def __init__(self, seq_len):
super(Recurrence, self).__init__()
self.seq_len = seq_len
def forward(self, input):
input = input.transpose(0, 1)
# Main loop
output = []
for i in range(self.seq_len):
b = input[i] * 2
output.append(b)
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
output = output.transpose(0, 1)
return output
input_size = 8
batch_size = 2
seq_len = 130
rec = Recurrence(seq_len)
input = torch.rand(batch_size, seq_len, input_size)
torch.cuda.set_device(0)
rec = rec.cuda()
input = input.cuda()
traced_rec = torch.jit.trace(rec, (input))
def test_trace_legacy_ctor(self):
class MyModule(nn.Module):
def forward(self, x):
return (x + 1, torch.FloatTensor([0]))
traced_rec = torch.jit.trace(MyModule(), torch.randn(2, 2))
def test_simple(self):
x = torch.tensor([0.4], requires_grad=True)
y = torch.tensor([0.7], requires_grad=True)
def f(x, y):
return torch.sigmoid(torch.tanh(x * (x + y)))
self.checkTrace(f, (x, y))
def test_trace_checking_with_global_name(self):
class MyClass(torch.nn.Module):
def __init__(self):
super(MyClass, self).__init__()
def forward(self, xs: List[Tensor]):
y = torch.cat(xs, dim=0)
return y
model = MyClass()
# Simulate these inputs being in the globals, like they would be if,
# e.g. they were defined outermost scope of a script
global input1, input2
input1 = torch.ones(2, 2)
input2 = torch.ones(2, 2)
m2 = torch.jit.trace(model, ((input1, input2),))
def test_trace_aliased_parameter(self):
class M(nn.Module):
def __init__(self, x):
super(M, self).__init__()
self.x = nn.Parameter(x)
def forward(self, y):
return self.x + y
m = M(torch.rand(3, 4))
r = torch.jit.trace(m, m.x)
t2 = torch.rand(3, 4)
self.assertEqual(r(t2), m.x + t2)
def test_trace_nested_fn(self):
class TracedInlineDecision(torch.nn.Module):
def forward(self, x, flag):
@torch.jit.script
def make_decision(flag, x):
if flag:
return x
else:
return torch.zeros_like(x)
x = torch.neg(x)
return make_decision(flag, x)
decision = TracedInlineDecision()
torch.jit.trace(decision, (torch.rand(3, 4), torch.tensor([True], dtype=torch.bool)), check_trace=True)
def test_trace_single_tuple(self):
x = torch.tensor(2.)
def f2(x):
return (x,)
jit_f2 = torch.jit.trace(f2, x)
assert f2(x) == jit_f2(x) # fails
def test_trace_namedtuple(self):
Point = namedtuple('point', ['x', 'y'])
def f(p):
if type(p) is tuple:
p = Point(*p)
return p.x + p.y
p = Point(torch.randn(1), torch.randn(1))
traced = torch.jit.trace(f, (p,))
self.assertEqual(f(p), traced(p))
def test_trace_topk(self):
class M(torch.nn.Module):
def forward(self, x, y):
return x.topk(y, dim=1)[1]
mod = M()
inputs = (torch.randint(0, 10, (20, 20)), torch.tensor(17))
traced_func = torch.jit.trace(mod, inputs)
test_inputs = (torch.randint(0, 9, (9, 9)), torch.tensor(8))
eager_out = mod(*test_inputs)
traced_out = traced_func(*test_inputs)
self.assertNotWarn(lambda: traced_func(*test_inputs), "Shouldn't throw slicing related warn here")
self.assertEqual(eager_out, traced_out)
test_inputs = (torch.randint(0, 50, (50, 50)), torch.tensor(12))
eager_out = mod(*test_inputs)
traced_out = traced_func(*test_inputs)
self.assertNotWarn(lambda: traced_func(*test_inputs), "Shouldn't throw slicing related warn here")
self.assertEqual(eager_out, traced_out)
def test_typeas_trace_check(self):
a = torch.tensor([0.4], requires_grad=True)
b = torch.tensor([0.7], requires_grad=True)
def f(x, y):
return x.type_as(y)
trace = torch.jit.trace(f, (a, b))
def test_trace_index(self):
x = torch.tensor([0.4], requires_grad=True)
y = torch.tensor([0], dtype=torch.int64)
def fn(x, y):
return x[y]
fn_traced = torch.jit.trace(fn, (x, y,))
self.assertEqual(fn(x, y), fn_traced(x, y))
# Backwards tracing was broken for indexing by a constant,
# because it's internally implemented using as_strided,
# and we attempted to trace its derivative (which is not
# currently supported.) It currently works because
# slice() is now not marked as traceable.
def test_trace_index_constant(self):
x = torch.tensor([0.4], requires_grad=True)
def fn(x):
return x[0]
def run(f):
y = f(x)
grad = torch.autograd.grad(y, x)[0].clone()
return y, grad
traced_fn = torch.jit.trace(fn, torch.ones(1))
self.assertEqual(run(fn), run(traced_fn))
def test_index_put(self):
ten = torch.zeros(3, 3)
mask = torch.tensor([[True, True, True],
[True, False, False],
[True, True, False]])
def test_fn(ten, mask):
ten[mask] = torch.ones(6)
return ten
traced_test_fn = torch.jit.trace(test_fn, (ten, mask))
ten = torch.rand(3, 3)
self.assertEqual(test_fn(ten, mask), traced_test_fn(ten, mask))
def test_canonicalize_tensor_iterator(self):
x = torch.randn(4, 4)
def f(x):
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
traced = torch.jit.trace(f, (x,))
f(x)
graph = traced.graph_for(x)
# There should be 4 int constants for the right sides of operators, plus one
# for the alpha argument for add and sub
self.assertTrue(str(traced.graph_for(x)).count(': int = prim::Constant') == 5)
@suppress_warnings
def test_constant(self):
x = torch.randn(2, 2, requires_grad=True)
def f(x):
return x.matmul(torch.diag(torch.tensor([2., 2.])))
self.checkTrace(f, (x,), (torch.ones(2, 2, requires_grad=True),))
def test_wrapped_number(self):
# Scalar's get converted to 'wrapped' tensors of default tensor type.
# Wrapped tensors behave differently in certain promotion operations:
# float_tensor * double -> float but wrapped_float * double -> double.
# This can cause issues in check-trace if not handled correctly in
# `aten::isclose()`.
def foobar():
x = -10000.0
result = x * torch.ones(1, dtype=torch.float)
return result
scripted = torch.jit.trace(foobar, (), check_trace=True)
def test_inplace_transplant(self):
x = torch.tensor([0.], requires_grad=True)
def fn(x):
y = x.clone()
y.add_(2)
y.add_(3)
return y
g, _ = torch.jit._get_trace_graph(fn, (x,))
self.run_pass('dce', g)
FileCheck().check_count("aten::clone", 1, exactly=True) \
.check_count("aten::add_", 2, exactly=True) \
.check_next("return").run(str(g))
self.assertExportImport(g, (x,))
def test_inplace_flags(self):
class InplaceFn(Function):
@staticmethod
def forward(ctx, x):
ctx.mark_dirty(x)
return x.add_(1)
@staticmethod
def backward(ctx, go):
return go
class RegularFn(Function):
@staticmethod
def forward(ctx, x):
return x.add(1)
@staticmethod
def backward(ctx, go):
return go
x = torch.tensor([0.], requires_grad=True)
def fn(x):
y = RegularFn.apply(x)
y = InplaceFn.apply(y)
y = InplaceFn.apply(y)
y = RegularFn.apply(y)
return y
trace_graph, _ = torch.jit._get_trace_graph(fn, (x,), _force_outplace=True)
self.run_pass('dce', trace_graph)
ops = list(trace_graph.nodes())
for op in ops:
self.assertTrue(op.hasAttribute('inplace'))
inplace_flags = [False, True, True, False]
for op, is_inplace in zip(ops, inplace_flags):
self.assertEqual(op.i('inplace'), is_inplace)
def test_inplace_check(self):
class MyInplaceFn(Function):
@staticmethod
def forward(self, x):
x.add_(1)
self.mark_dirty(x)
return x
@staticmethod
def backward(self, grad):
return grad
def fn(x):
return MyInplaceFn.apply(x)
x = torch.randn(5, 5)
ge = torch.jit.trace(fn, (x,), _force_outplace=True, check_trace=False)
with self.assertRaisesRegex(RuntimeError, 'inplace MyInplaceFn'):
ge(x)
def test_force_outplace_check_fill(self):
def f(x):
return torch.empty(x.shape).fill_(7)
x = torch.randn(10, 15)
ft = torch.jit.trace(f, x, _force_outplace=True)
self.assertEqual(f(x), ft(x))
def test_force_outplace_check_zero(self):
def f(x):
return torch.empty(x.shape).zero_()
x = torch.randn(10, 15)
ft = torch.jit.trace(f, x, _force_outplace=True)
self.assertEqual(f(x), ft(x))
def do_trace_size(self, requires_grad):
def fn(x):
return x.view(x.shape[1] * 2, x.size(0), 2)
x = torch.randn(5, 2, 4, requires_grad=requires_grad)
y = torch.randn(4, 8, 4, requires_grad=requires_grad)
# Check that it behaves as expected
traced_fn = torch.jit.trace(fn, x)
self.assertEqual(traced_fn(y), fn(y))
self.assertEqual(traced_fn(x), fn(x))
def test_trace_size(self):
self.do_trace_size(False)
# test the different graph_executor path that happens when
# gradients are required and sizes are involved
def test_trace_size_with_grad(self):
self.do_trace_size(True)
def do_trace_arange(self, requires_grad):
def arange(x):
return torch.arange(x.shape[0])
def arange_scalar(x):
return torch.arange(12)
def arange_start_end(x):
return torch.arange(start=x.shape[0], end=x.shape[0] + 5)
x = torch.randn(5, 3, 2, requires_grad=requires_grad)
y = torch.randn(8, 2, 4, requires_grad=requires_grad)
# Check that it behaves as expected
traced_arange = torch.jit.trace(arange, x)
self.assertEqual(traced_arange(y), arange(y))
self.assertEqual(traced_arange(x), arange(x))
traced_arange_scalar = torch.jit.trace(arange_scalar, x)
self.assertEqual(traced_arange_scalar(y), arange_scalar(y))
self.assertEqual(traced_arange_scalar(x), arange_scalar(x))
traced_arange_start_end = torch.jit.trace(arange_start_end, x)
self.assertEqual(traced_arange_start_end(y), arange_start_end(y))
self.assertEqual(traced_arange_start_end(x), arange_start_end(x))
def test_trace_arange(self):
self.do_trace_arange(False)
# test the different graph_executor path that happens when
# gradients are required and sizes are involved
def test_trace_arange_with_grad(self):
self.do_trace_arange(True)
# Test that a trace of torch.full(x.shape) doesn't store the shape as a constant
def test_trace_full_dynamic_shape(self):
def full_with_shape_like(x):
return torch.full(x.shape, 2.)
x = torch.randn(3, 4)
ge = torch.jit.trace(full_with_shape_like, example_inputs=x)
y = torch.randn(2, 7)
self.assertEqual(ge(y).shape, y.shape)
self.assertEqual(ge(x).shape, x.shape)
# Test that the trace of setitem doesn't store shapes as constants
# Fix https://github.com/pytorch/pytorch/issues/43548
def test_trace_slice_setitem_dynamic_shape(self):
def slice_setitem(x, y):
x[:, 2] = y + 1
return x
x = torch.randn(3, 4)
traced = torch.jit.trace(slice_setitem, (x, x[:, 0]))
x = torch.randn(10, 5)
self.assertEqual(traced(x.clone(), x[:, 0]), slice_setitem(x.clone(), x[:, 0]))
# Suppression: we are intentionally slicing a tensor, we don't care that it
# will be constantified
@suppress_warnings
def do_trace_slice(self, requires_grad):
def slice(x):
results = []
for i in range(4):
results.append(x[:x.size(0) - i, i:x.size(2), i:3])
return tuple(results)
def slice_select(x):
results = []
for i in range(4):
results.append(x[:, i:, x.size(2) - 5])
return tuple(results)
x = torch.randn(5, 6, 7, requires_grad=requires_grad)
y = torch.randn(7, 8, 9, requires_grad=requires_grad)
# Check that it behaves as expected
traced_slice = torch.jit.trace(slice, x)
self.assertEqual(traced_slice(y), slice(y))
self.assertEqual(traced_slice(x), slice(x))
traced_slice_select = torch.jit.trace(slice_select, x)
self.assertEqual(traced_slice_select(y), slice_select(y))
self.assertEqual(traced_slice_select(x), slice_select(x))
def test_trace_slice(self):
self.do_trace_slice(False)
# test the different graph_executor path that happens when
# gradients are required and sizes are involved
def test_trace_slice_with_grad(self):
self.do_trace_slice(True)
def test_trace_casts(self):
casts = [
lambda x: x.byte(),
lambda x: x.float(),
lambda x: x.cpu(),
lambda x: x.to(device='cpu'),
lambda x: x.to(dtype=torch.int64),
lambda x: x.to(device='cpu', dtype=torch.float),
lambda x: x.to(x)
]
def assertContainsCast(trace):
self.assertEqual(sum(n.kind() == 'aten::to' for n in trace.graph.nodes()), 1)
for cast in casts:
trace = torch.jit.trace(cast, torch.randn(2, 2))
assertContainsCast(trace)
x = torch.randn(2, 2)
self.assertEqual(trace(x), cast(x))
def to_tensor(x, y):
return x.to(y)
to_tensor_trace = torch.jit.trace(to_tensor, (torch.randn(2, 2), torch.randn(1, 8)))
assertContainsCast(to_tensor_trace)
x, y = torch.randn(2, 2), torch.randn(1, 10)
self.assertEqual(to_tensor_trace(x, y), to_tensor(x, y))
@skipIfCompiledWithoutNumpy
def test_trace_warn(self):
def fn(x):
int(x) # Warning 1.
y = x * 1
if y: # Warning 2.
pass
q = [x, x * 4]
z = q[y]
float(z) # Warning 3.
z.tolist() # Warning 4.
z.numpy() # Warning 5.
for _ in torch.ones(4, 4): # Warning 6.
pass
return z + 4
with warnings.catch_warnings(record=True) as warns:
traced_fn = torch.jit.trace(fn, torch.tensor([1]))
for warn in warns:
self.assertIs(warn.category, torch.jit.TracerWarning)
warns = [str(w.message) for w in warns]
self.assertIn('a Python integer', warns[0])
self.assertIn('a Python boolean', warns[1])
self.assertIn('a Python float', warns[2])
self.assertIn('a Python list', warns[3])
self.assertIn('a NumPy array', warns[4])
self.assertIn('Iterating over', warns[5])
def test_trace_tuple(self):
def fn(x, y):
return x, (x * y[1], x * y[0])
x, y = torch.randn(2, 2), (torch.ones(2, 2), torch.randn(2, 2))
traced_fn = torch.jit.trace(fn, (x, y))
self.assertEqual(traced_fn(x, y), fn(x, y))
# should be a tuple nested within another tuple
FileCheck().check_count("prim::TupleConstruct", 2, exactly=True).check_next("return") \
.run(str(traced_fn.graph))
self.assertExportImport(traced_fn.graph, (x, y))
def test_trace_random(self):
def f(mean, std):
return torch.normal(mean, std)
traced = torch.jit.trace(f, (torch.zeros(2, 3), torch.ones(2, 3)), check_trace=False)
mean, std = torch.zeros(5, 5), torch.ones(5, 5)
with torch.random.fork_rng(devices=[]):
output = f(mean, std)
traced_output = traced(mean, std)
self.assertEqual(output, traced_output)
def test_trace_tensor_factory(self):
def run(**kwargs):
inputs_require_grads = kwargs.pop('inputs_require_grads', True)
def fn(x):
return x + torch.ones(2, 3, **kwargs)
input_kwargs = kwargs.copy()
if 'out' in input_kwargs:
del input_kwargs['out']
input = torch.ones(2, 3, **input_kwargs)
self.checkTrace(fn, (input,), inputs_require_grads=inputs_require_grads)
# check we recorded 'ones' and did not just record a constant
tfn = torch.jit.trace(fn, input)
self.assertTrue("ones" in str(tfn.graph))
run()
run(dtype=torch.int, inputs_require_grads=False)
run(out=torch.tensor([]))
if RUN_CUDA:
run(device="cuda:0")
if RUN_CUDA_MULTI_GPU:
run(device="cuda:1")
def test_trace_indexed_assignment(self):
def stuff(x, y):
x = x.clone()
x[0] = y
return x
example = torch.rand(3, 4)
self.checkTrace(stuff, (example, example[0] + 1))
# TODO: implement
@unittest.expectedFailure
def test_output_unflatten(self):
"""Check that outputs of traced functions retain the original structure and nesting"""
def fn(x):
return (x * 2, (x ** 2, x + 4, (x + 2,), ), x * 4)
self.checkTrace(fn, (torch.randn(2, 2),))
def test_input_flatten(self):
"""Check that inputs to traced functions are flattened"""
def fn(x, t):
y, z = t
return x * y * z
inputs = (torch.randn(1), (torch.randn(1), torch.randn(1)))
self.checkTrace(fn, inputs)
def test_input_dict_empty(self):
def test(d):
pass
with self.assertRaises(RuntimeError):
self.checkTrace(test, {})
def test_input_dict_remembers_keys(self):
"""Check that the trace remembers which keys were in a dict input"""
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, dict_input):
return dict_input['x']
input_1 = {'x': torch.tensor(1)}
m = TestModule()
m_traced = torch.jit.trace(m, (input_1, ))
self.assertEqual(m_traced(input_1), torch.tensor(1))
# should work to change the values and not the keys
input_same_key_different_value = {'x': torch.tensor(2)}
self.assertEqual(m_traced(input_same_key_different_value), torch.tensor(2))
# error to use something that doesn't have `x`
input_different_key = {'y': torch.tensor(3)}
with self.assertRaises(RuntimeError):
m_traced(input_different_key)
# it's okay to have additional elements in the dictionary, so long as 'x' is there
input_additional_key = {'x': torch.tensor(4), 'y': torch.tensor(3)}
self.assertEqual(m_traced(input_additional_key), torch.tensor(4))
def test_input_dict_insertion_order(self):
"""Check that dictionary access doesn't care about insertion order"""
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, dict_input):
return dict_input['x'], dict_input['y']
input_x_then_y = {}
input_x_then_y['x'] = torch.tensor(1)
input_x_then_y['y'] = torch.tensor(2)
m = TestModule()
m_traced = torch.jit.trace(m, (input_x_then_y, ))
self.assertEqual(m_traced(input_x_then_y), (torch.tensor(1), torch.tensor(2)))
input_y_then_x = {}
input_y_then_x['y'] = torch.tensor(4)
input_y_then_x['x'] = torch.tensor(3)
self.assertEqual(m_traced(input_y_then_x), (torch.tensor(3), torch.tensor(4)))
def test_input_dict_recursive(self):
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, dict_input):
return dict_input['x'][1]
input_1 = {'x': {1: torch.tensor(1)}}
m = TestModule()
m_traced = torch.jit.trace(m, (input_1, ))
input_2 = {'x': {1: torch.tensor(2)}}
self.assertEqual(m_traced(input_2), torch.tensor(2))
def test_input_dict_checkTrace_mut(self):
def test(d):
d['x'].tanh_()
return d['x']
inputs = {'x': torch.rand(3, 4), 'y': torch.rand(3, 4)}
self.checkTrace(test, (inputs,), inputs_require_grads=False)
def test_input_dict_unify(self):
def test(d):
return d['int'], d['float']
inputs = {'int': torch.ones((2, 2), dtype=torch.int32),
'float': torch.ones((2, 2), dtype=torch.float32)}
self.checkTrace(test, (inputs,), inputs_require_grads=False)
def test_input_tuple_of_dicts(self):
def test(t):
d = t[0]
return d['x']['y']
inputs = {'x': {'y': torch.rand(2, 3)}}
self.checkTrace(test, ((inputs, inputs),), allow_unused=True)
def test_input_dict_of_dicts(self):
def test(d):
return d['x']['y']
nested_input = {'y': torch.rand(2, 3)}
unified_nested = {'y': torch.rand(3, 2)}
inputs = {'x': nested_input, 'force_unify': unified_nested}
self.checkTrace(test, (inputs,), allow_unused=True)
def test_input_dict_of_lists(self):
def test(d):
return d['x'][0]
inputs = {'x': [torch.rand(3, 2)]}
self.checkTrace(test, (inputs,))
def test_input_list_toplevel_flatten(self):
def test(t1, t2):
return torch.add(t1, t2)
inputs = [torch.ones(2, 2), torch.rand(2, 2)]
self.checkTrace(test, inputs)
def test_input_list_toplevel_flatten_direct(self):
class Test(torch.nn.Module):
def forward(self, t1, t2):
return torch.add(t1, t2)
inputs = [torch.ones(2, 2), torch.rand(2, 2)]
torch.jit.trace(Test(), inputs)
def test_input_list_of_tuples(self):
def test(l):
return l[0][0]
inputs = [(torch.ones(2, 2),)]
self.checkTrace(test, (inputs,))
def test_input_dict_empty_list(self):
def test(d):
pass
inputs = {1: []}
with self.assertRaisesRegex(RuntimeError, 'List trace'):
self.checkTrace(test, (inputs,))
def test_input_list_mixed_type(self):
def test(d):
pass
inputs = [torch.rand(2, 3), (torch.ones(2), torch.ones(2))]
with self.assertRaisesRegex(RuntimeError, 'consistent'):
self.checkTrace(test, (inputs,))
def test_conv(self):
x = torch.ones(20, 16, 50, 40)
g, outputs, inputs = torch.jit._get_trace_graph(nn.Conv2d(16, 13, 3, bias=False), x, return_inputs=True)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
def test_max_pool(self):
x = torch.rand(20, 16, 10, 10)
def max_pool2d(x):
return F.max_pool2d(x, 2) + 2
trace = torch.jit.trace(max_pool2d, (x))
graph = trace.graph_for(x)
FileCheck().check("aten::max_pool2d(").run(graph)
self.assertEqual(max_pool2d(x), trace(x))
def test_nested_inplace(self):
x = torch.randn(2, 2)
g, outputs, inputs = torch.jit._get_trace_graph(
lambda x: F.threshold(x, 0, 0, inplace=True), (x, ), return_inputs=True)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
FileCheck().check("threshold_").run(str(g))
self.assertExportImport(g, (x,))
def test_repeated_input(self):
def fn(a, b):
return a + b
ge = self.checkTrace(fn, [torch.randn(2, 2)] * 2)
inputs = set(ge.graph.inputs())
# three instead of 2 because the export/import in checkTrace adds a
# `self` module argument
self.assertTrue(len(inputs) == 3)
def test_repeated_output(self):
def fn(a, b):
z = a + b
return z, z
ge = self.checkTrace(fn, [torch.randn(2, 2) for _ in range(2)])
tuple_output = list(ge.graph.outputs())[0]
tuple_inputs = list(tuple_output.node().inputs())
self.assertTrue(tuple_inputs[0] == tuple_inputs[1])
def test_inplace_copy(self):
x = torch.randn(4, 4, requires_grad=True)
def f(x):
out = torch.zeros(x.size())
out.copy_(x)
return out
g, outputs, inputs = torch.jit._get_trace_graph(f, (x, ), return_inputs=True)
self.run_pass('dce', g)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
self.assertExportImport(g, (x,))
def test_inplace_copy_force_outplace(self):
x = torch.randn(4, 4, requires_grad=True)
def f(x):
out = torch.zeros(x.size())
out.copy_(x)
return out
g, outputs, inputs = torch.jit._get_trace_graph(
f, (x, ), return_inputs=True, _force_outplace=True)
self.run_pass('dce', g)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
self.assertExportImport(g, (x,))
FileCheck().check("expand_as").run(str(g))
def test_shared_param(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.b = self.a = nn.Parameter(torch.randn(2, 2))
def forward(self, x):
return x * self.a + self.b
m = MyModule()
g, _ = torch.jit._get_trace_graph(m, (torch.randn(2, 2),))
self.run_pass('dce', g)
self.assertEqual(len(list(g.inputs())), 2)
FileCheck().check("mul").check("add").run(str(g))
def test_trace_c10_ops(self):
try:
_ = torch.ops._caffe2.GenerateProposals
except RuntimeError:
self.skipTest("Skip the test since c2 ops are not registered.")
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, scores, bbox_deltas, im_info, anchors):
a, b = torch.ops._caffe2.GenerateProposals(
(scores), (bbox_deltas), (im_info), (anchors),
2.0, 6000, 300, 0.7, 16, True, -90, 90, 1.0, True,
)
return a, b
model = MyModel()
A = 4
H = 10
W = 8
img_count = 3
scores = torch.ones(img_count, A, H, W, dtype=torch.float32)
bbox_deltas = torch.linspace(0, 10, steps=img_count * 4 * A * H * W,
dtype=torch.float32)
bbox_deltas = bbox_deltas.view(img_count, 4 * A, H, W)
im_info = torch.ones(img_count, 3, dtype=torch.float32)
anchors = torch.ones(A, 4, dtype=torch.float32)
inputs = (scores, bbox_deltas, im_info, anchors)
traced_model = torch.jit.trace(model, inputs)
self.assertEqual(traced_model(*inputs), model(*inputs))
self.assertExportImportModule(traced_model, (scores, bbox_deltas, im_info, anchors))
def run_ge_tests(self, optimize, use_cuda):
with enable_profiling_mode_for_profiling_tests():
with torch.jit.optimized_execution(optimize):
def rand(*args):
t = torch.rand(*args).float()
if use_cuda:
t = t.cuda()
return t
self.checkTrace(lambda a, b: a * b + b,
[rand(1), rand(1)], [rand(2, 3), rand(2, 3)])
# trivial identity
self.checkTrace(lambda a, b: (b, a), [rand(1), rand(1)])
def foo(a):
t = a * a
return t * t, 4 * t
self.checkTrace(foo, [rand(1)])
# unused input
self.checkTrace(
lambda a, b: a * a, [rand(1), rand(1)], allow_unused=True)
# test outputs that do not get used in grad
self.checkTrace(foo, [rand(1)], drop=1)
# test autograd fallback
self.checkTrace(lambda a, b: a * b /
(a - 2 * b) + b, [rand(1), rand(1)])
def test_ge_unoptimized(self):
self.run_ge_tests(False, False)
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser support for Sandcastle")
@enable_cpu_fuser
def test_ge_optimized(self):
with enable_profiling_mode_for_profiling_tests():
self.run_ge_tests(True, False)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_ge_cuda(self):
self.run_ge_tests(True, True)
# more manual test of graph executor that can be used as a scratchpad
def test_ge(self):
def foo(a, b):
return a * b / (a - b) + b
V = Variable
a, b = V(torch.rand(1)), V(torch.rand(1))
ge = torch.jit.trace(foo, (a, b))
a, b = V(torch.rand(1), requires_grad=True), V(
torch.rand(1), requires_grad=True)
r, = ge(a, b)
da, db = torch.autograd.grad(r + 3, [a, b], create_graph=True)
l2 = (da * db + db * db)
g2result = torch.autograd.grad(l2, [da, db])
r = foo(a, b)
da2, db2 = torch.autograd.grad(r + 3, [a, b], create_graph=True)
self.assertEqual(da, da2)
self.assertEqual(db, db2)
l3 = (da2 * db2 + db2 * db2)
g2result2 = torch.autograd.grad(l3, [da2, db2])
self.assertEqual(g2result, g2result2)
def test_trace_annotation(self):
@_trace(torch.rand(1))
def foo(a):
return a + a + a
x = torch.randn(5, 5)
self.assertEqual(foo(x), x + x + x)
@unittest.skipIf(not RUN_CUDA, "calls .cuda()")
# By default, on Ampere or later GPUs, nn.Linear computes float tensors at TF32 precision.
# We want float tensors to be computed at full precision in order to use the default precision
@with_tf32_off
def test_traced_module_cuda(self):
class Model(nn.Module):
def __init__(self, num_features, num_layers):
super(Model, self).__init__()
self.num_layers = num_layers
layers = [[nn.Linear(num_features, num_features), nn.Sigmoid()]
for _ in range(num_layers)]
self.submodule = nn.Sequential(*chain(*layers))
def forward(self, x):
for i in range(self.num_layers):
x = self.submodule[i](x) + x
return x
model = Model(5, 3)
x = torch.randn(2, 5)
traced_model = torch.jit.trace(model, x)
# We're missing some attributes these modules had initially. Make sure we can
# still get the __repr__()
model.__repr__()
# XXX: indexing sequentials is broken
linear_submodule = next(iter(traced_model.submodule._modules.values()))
# All attributes that aren't parameters should raise
with self.assertRaises(AttributeError):
linear_submodule.in_features
linear_submodule.weight
linear_submodule.weight = nn.Parameter(torch.randn(linear_submodule.weight.shape))
with self.assertRaises(RuntimeError):
del linear_submodule.weight
# Submodules can't be called
with self.assertRaises(RuntimeError):
linear_submodule(x)
# Type casts
linear_submodule.cuda()
traced_model.float().cuda()
cuda_out = traced_model(x.float().cuda())
traced_model.cpu()
cpu_out = traced_model(x.float())
self.assertEqual(cpu_out, cuda_out)
traced_model.to('cuda')
cuda_out = traced_model(x.float().cuda())
traced_model.to('cpu')
cpu_out = traced_model(x.float())
self.assertEqual(cpu_out, cuda_out)
traced_model.double()
# state_dict + load_state_dict
state = {k: v.clone() for k, v in traced_model.state_dict().items()}
new_state = {k: v.clone().fill_(1) for k, v in state.items()}
out = traced_model(x)
traced_model.load_state_dict(new_state)
out_ones = traced_model(x)
traced_model.load_state_dict(state)
out_state = traced_model(x)
self.assertEqual(out, out_state)
self.assertNotEqual(out, out_ones)
def test_export_no_reorder(self):
def func(a, b):
return a * b / (a - 2 * b) + b
recording_inputs = [torch.tensor([0.55619788169860839844], dtype=torch.float32, requires_grad=True),
torch.tensor([0.25947844982147216797], dtype=torch.float32, requires_grad=True)]
ge1 = torch.jit.trace(func, recording_inputs)
ge2 = self.getExportImportCopy(ge1)
outputs_ge1 = ge1(*recording_inputs)
outputs_ge2 = ge2(*recording_inputs)
grad_ge1 = torch.autograd.grad(outputs_ge1, recording_inputs)
grad_ge2 = torch.autograd.grad(outputs_ge2, recording_inputs)
self.assertTrue(outputs_ge1 == outputs_ge2)
self.assertTrue(grad_ge1 == grad_ge2)
def test_python_function(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
return x + 1
@staticmethod
def backward(ctx, grad_output):
return grad_output
@_trace(torch.zeros(2))
def fn(x):
return MyFn.apply(x + 2) + 3
x = torch.tensor([1., 2., 3.])
y = torch.randn(2, 2, requires_grad=True)
fn(x)
fn(y)
def test_python_function_tup(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
return x + 1, x - 1
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
@_trace(torch.zeros(2))
def fn(x):
a, b = MyFn.apply(x + 2)
return a + b + 3
x = torch.tensor([1., 2., 3.])
y = torch.randn(2, 2, requires_grad=True)
fn(x)
fn(y)
def test_trace_detach(self):
def foo(x, w):
return torch.matmul(x, w).detach()
traced = torch.jit.trace(foo, (torch.rand(3, 4), torch.rand(4, 5)))
FileCheck().check("matmul").check("detach").run(str(traced.graph))
x, w = torch.rand(3, 4), torch.rand(4, 5, requires_grad=True)
traced_result = traced(x, w)
self.assertEqual(foo(x, w), traced_result)
self.assertFalse(traced_result.requires_grad)
self.assertIsNone(traced_result.grad_fn)
def test_trace_detach_redispatch(self):
def foo(x, w):
y = torch.matmul(x, w)
assert y.requires_grad
y = y.detach()
# Make sure trace kernel redispatches to the right lower kernel.
assert not y.requires_grad
return y
x, w = torch.rand(3, 4), torch.rand(4, 5, requires_grad=True)
# With `check_trace=True` it will run with `@torch.no_grad()` and break assert.
torch.jit.trace(foo, (x, w), check_trace=False)
def test_trace_detach_inplace(self):
def foo(x, w):
y = torch.matmul(x, w)
y.detach_()
return y
traced = torch.jit.trace(foo, (torch.rand(3, 4), torch.rand(4, 5)))
FileCheck().check("matmul").check("detach(").run(str(traced.graph))
x, w = torch.rand(3, 4), torch.rand(4, 5, requires_grad=True)
traced_result = traced(x, w)
self.assertEqual(foo(x, w), traced_result)
self.assertFalse(traced_result.requires_grad)
self.assertIsNone(traced_result.grad_fn)
def test_trace_detach_inplace_redispatch(self):
def foo(x, w):
y = torch.matmul(x, w)
assert y.requires_grad
y.detach_()
# Make sure trace kernel redispatches to the right lower kernel.
assert not y.requires_grad
return y
x, w = torch.rand(3, 4), torch.rand(4, 5, requires_grad=True)
# With `check_trace=True` it will run with `@torch.no_grad()` and break assert.
torch.jit.trace(foo, (x, w), check_trace=False)
def test_trace_detach_onnx_erase(self):
class Mod(torch.nn.Module):
def forward(self, x, w):
return torch.matmul(x, w).detach()
f = io.BytesIO()
torch.onnx.export_to_pretty_string(
Mod(), (torch.rand(3, 4), torch.rand(4, 5)), f)
def test_trace_slice_full_dim(self):
def foo(x):
return x[0:5, 0] + 1.0
traced = torch.jit.trace(foo, (torch.rand(5, 4),))
test_x = torch.rand(6, 3)
self.assertEqual(foo(test_x), traced(test_x))
def test_trace_dict_input(self):
class Bar(torch.nn.Module):
def __init__(self):
super(Bar, self).__init__()
self.foo = Foo()
def forward(self, a, b):
return self.foo({'a': a, 'b': b})['a']
class Foo(torch.nn.Module):
def forward(self, x):
return {'a': x['a'] * x['b']}
x = (torch.rand(3), torch.rand(3))
model = Bar()
self.checkTrace(model, x)
def test_trace_dict_output(self):
class TraceDictStrTensor(torch.nn.Module):
def forward(self, a, b):
return {'a': a, 'b': b}
class TraceDictTensorTensor(torch.nn.Module):
def forward(self, a, b):
return {a: b, b: a}
x = (torch.rand(3), torch.rand(3))
with self.assertRaisesRegex(RuntimeError, r"Encountering a dict at the output"):
torch.jit.trace(TraceDictStrTensor(), x)
traced_dict_str_mod = torch.jit.trace(TraceDictStrTensor(), x, strict=False)
self.assertEqual(traced_dict_str_mod(*x), {'a': x[0], 'b': x[1]})
traced_dict_tensor_mod = torch.jit.trace(TraceDictTensorTensor(), x, strict=False)
self.assertEqual(traced_dict_tensor_mod(*x), {x[0]: x[1], x[1]: x[0]})
def test_trace_with_tensor_list_output(self):
def f():
return [torch.zeros(1), torch.zeros(5)]
with self.assertWarnsRegex(torch.jit.TracerWarning, "cause the trace to be incorrect"):
torch.jit.trace(f, [])
traced_non_strict_f = torch.jit.trace(f, [], strict=False)
self.assertEqual(traced_non_strict_f(), f())
def test_trace_with_number_list_output(self):
def f():
return [1, 5]
with self.assertRaisesRegex(RuntimeError, r"Only tensors.+can be output from traced functions"):
traced_f = torch.jit.trace(f, [])
def test_trace_with_nested_tensor_list_output(self):
def f():
return [[torch.zeros(1)], [torch.zeros(5)]]
with self.assertRaisesRegex(RuntimeError, r"Only tensors.+can be output from traced functions"):
traced_f = torch.jit.trace(f, [])
def test_trace_variable_instantiation(self):
def random_foo(x):
return Variable(Variable(x) + 1.0)
random_foo_traced = torch.jit.trace(random_foo, (torch.rand(3, 4),))
x = torch.rand(5, 6)
self.assertEqual(random_foo(x), random_foo_traced(x))
def test_trace_slice_expr_complete_type(self):
def random_foo(x):
return x + 1.0
random_foo_traced = torch.jit.trace(random_foo, (torch.rand(3, 4),))
@torch.jit.script
def random_bar(x):
return random_foo_traced(x)[0:1]
x = torch.rand(3, 4)
self.assertEqual(random_bar(x), (x + 1)[0:1])
def test_trace_inline_shape(self):
# testing peephole optimization of size is turned into a constant
# in script fn
@torch.jit.script
def tensor_size(x: torch.Tensor) -> torch.Tensor:
return torch.tensor([x.size()[0]])
self.assertEqual(
tensor_size(torch.rand(15,)),
torch.tensor([15])
)
traced_tensor_size = torch.jit.trace(tensor_size, torch.rand(7,))
self.assertEqual(
traced_tensor_size(torch.rand(15,)),
torch.tensor([15])
)
@torch.jit.script
def use_device(x):
return torch.zeros_like(x, device=x.device)
def foo(x):
return use_device(x)
traced_tensor_size = torch.jit.trace(foo, torch.rand(7,))
self.run_pass('inline', traced_tensor_size.graph)
FileCheck().check("prim::device").run(traced_tensor_size.graph)
def test_trace_save(self):
def fn(x):
return x + 2
def check(func):
with TemporaryFileName() as fname:
func.save(fname)
loaded = torch.jit.load(fname)
input = torch.randn(2, 2)
self.assertEqual(func(input), loaded(input))
out = torch.jit.trace(fn, (torch.ones(2, 2),))
check(out)
def test_trace_optioanl_dtype(self):
class Test(torch.nn.Module):
def forward(self):
return torch.arange(5)
traced = torch.jit.trace(Test(), ())
torch.allclose(traced(), Test()())
def test_trace_save_load_copy(self):
class Test(torch.nn.Module):
def __init__(self):
super(Test, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 3)
def forward(self, x):
return self.conv(x)
traced = torch.jit.trace(Test(), torch.rand(1, 3, 224, 224))
buffer = io.BytesIO()
torch.jit.save(traced, buffer)
buffer.seek(0)
loaded = torch.jit.load(buffer)
# should work
copy.copy(loaded)
copy.deepcopy(loaded)
def test_trace_export_fns(self):
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.a = 3
@torch.jit.export
def __getstate__(self):
return (3, self.training)
@torch.jit.export
def __setstate__(self, state):
self.a = state[0]
self.training = state[1]
def forward(self, x):
return x + self.a
f = Foo()
traced = torch.jit.trace(f, (torch.rand(3, 4),))
expected_names = ['__getstate__', '__setstate__']
def check(mod):
self.assertTrue(all(name in mod._c._method_names() for name in expected_names))
check(traced)
imported = self.getExportImportCopy(traced)
check(imported)
def test_trace_export_fns_recursive(self):
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.a = 3
@torch.jit.export
def __getstate__(self):
return (3, self.training)
@torch.jit.export
def __setstate__(self, state):
self.a = state[0]
self.training = state[1]
def forward(self, x):
return x + self.a
class Wrapper(torch.nn.Module):
def __init__(self):
super(Wrapper, self).__init__()
self.foo = Foo()
def forward(self, x):
return self.foo(x)
f = Wrapper()
traced = torch.jit.trace(f, (torch.rand(3, 4),))
expected_names = ['__getstate__', '__setstate__']
def check(mod):
self.assertTrue(all(name in mod._c._method_names() for name in expected_names))
check(traced.foo)
imported = self.getExportImportCopy(traced)
check(imported.foo)
# Note that Bar's forward can only be traced, but not scripted
class Bar(nn.Module):
def __init__(self):
super().__init__()
@torch.jit.export
def addTwo(self, x):
return x + 2
def forward(self, input):
return (lambda a: a + 1)(input)
# When tracing Bar as a submodule, we only want to script the
# exported methods, and we want to keep the forwards still
# being traced.
class WrapperExports(torch.nn.Module):
def __init__(self):
super(WrapperExports, self).__init__()
self.bar = Bar()
@torch.jit.export
def addOne(self, x):
return x + 1
def forward(self, x):
return self.bar(x)
f = WrapperExports()
traced = torch.jit.trace(f, (torch.rand(3, 4),))
expected_names = ['addOne']
check(traced)
def test_trace_autograd_function(self):
class TestFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return torch.neg(input)
@staticmethod
def backward(ctx, grad_output):
return torch.neg(grad_output)
class TracedModule(torch.nn.Module):
def forward(self, x):
return torch.relu(TestFunc.apply(x))
class Wrapper(torch.nn.Module):
def __init__(self):
super(Wrapper, self).__init__()
self.tm = TracedModule()
def forward(self, x):
return self.tm(x)
traced = torch.jit.trace(Wrapper(), (torch.rand(3, 4),))
def test_trace_multi_output_function(self):
# An autograd.Function with two outputs.
# It swaps inputs so we can check if shape
# handling is correct in TorchScript.
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
return y, x
@staticmethod
def backward(ctx, du, dv):
return dv, du
class Bar(torch.nn.Module):
def forward(self, x, y):
x = x.relu()
y = y.relu()
z = Foo.apply(x, y)
return z
x = torch.rand(3, 2, dtype=torch.double)
y = torch.rand(1, 2, dtype=torch.double)
# Generate JIT IR.
traced = torch.jit.trace(Bar(), (x, y))
print(traced.graph)
# Expected output schema of the custom autograd.Function.
schema = '(Double(1, 2, strides=[2, 1], requires_grad=0, device=cpu), '\
'Double(3, 2, strides=[2, 1], requires_grad=0, device=cpu)) '\
'= ^Foo'
# See if expected schema exists.
FileCheck().check(schema).run(traced.graph)
# Also examine if the graph is runnable and produces
# the right result.
u, v = traced(x, y)
self.assertEqual(u, y)
self.assertEqual(v, x)
def test_interpolate_trace(self):
class test(nn.Module):
def __init__(self):
super(test, self).__init__()
self.conv = nn.Conv2d(1, 32, kernel_size=3, padding=1)
def forward(self, x):
y = self.conv(x)
w = nn.functional.interpolate(y, mode='bilinear', align_corners=False, scale_factor=3)
return w
f = test()
# no failure
g = torch.jit.trace(f, (torch.zeros(1, 1, 28, 28),))
x = torch.zeros(1, 1, 14, 14)
# constants not baked in
self.assertEqual(g(x), f(x))
@_tmp_donotuse_dont_inline_everything
def test_trace_optional(self):
@torch.jit.script
def test(x: Optional[Tensor]):
if x is None:
return torch.zeros(1)
else:
return x
def test_none():
return test(None)
def test_tensor():
return test(torch.zeros(2))
f_none = torch.jit.trace(test_none, ())
self.assertEqual(f_none(), torch.zeros(1))
f_tensor = torch.jit.trace(test_tensor, ())
self.assertEqual(f_tensor(), torch.zeros(2))
graph = f_tensor.graph
FileCheck().check('name="test"').check_next("prim::CallFunction").run(graph)
def test_trace_nested_datatypes(self):
@torch.jit.script
def foo(x):
return [[x + 1, x - 1], [x + 2, x - 2]]
def bar(x):
list_stuff = foo(x)
return list_stuff[0][0], list_stuff[1][1]
traced = torch.jit.trace(bar, torch.rand(3, 4))
x = torch.rand(5, 6)
self.assertEqual(bar(x), traced(x))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_fn_from_traced_module(self):
@_trace(torch.rand(3, 4))
def traced_fn(x):
return torch.neg(x)
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
def forward(self, x):
return traced_fn(torch.mm(x, self.param))
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
# Note: neg op from the traced function should be properly inlined
FileCheck().check("aten::mm") \
.check('name="traced_fn"') \
.check_next("prim::CallFunction") \
.run(str(tm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_module_from_traced_module(self):
class TracedModule1(torch.nn.Module):
def __init__(self):
super(TracedModule1, self).__init__()
self.param = torch.nn.Parameter(torch.rand(5, 7))
def forward(self, x):
return torch.mm(x, self.param)
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
self.mod = torch.jit.trace(TracedModule1(), torch.rand(3, 5))
def forward(self, x):
return self.mod(torch.mm(x, self.param)) + 1.0
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
FileCheck().check("aten::mm").check("prim::CallMethod").check_same("forward").check("aten::add").run(str(tm.graph))
def test_index_put_trace_with_view(self):
@_trace(torch.rand(100), torch.tensor([1, 2, 3, 4]), torch.rand(1, 1, 1, 4))
def test_index_put(target, indices, rhs):
target[indices] = rhs
return target
FileCheck().check("aten::view").check("index_put_").run(str(test_index_put.graph))
def test_index_put_trace_without_view(self):
@_trace(torch.rand(100), torch.tensor([1, 2, 3, 4]), torch.rand(4))
def test_index_put(target, indices, rhs):
target[indices] = rhs
return target
FileCheck().check_not("aten::view").check("index_put_").run(str(test_index_put.graph))
@suppress_warnings
def test_trace_checker_dot_data(self):
with self.assertRaisesRegex(torch.jit.TracingCheckError, r'Tensor-valued Constant nodes differed in value '
r'across invocations'):
@_trace(torch.rand(3, 4), check_inputs=[(torch.rand(3, 4),)])
def foo(x):
y = x.data
return x + y
@suppress_warnings
def test_trace_checker_control_flow(self):
def foo(x):
for _ in range(x.size(0)):
x = torch.neg(x)
return x
with self.assertRaisesRegex(torch.jit.TracingCheckError, r'Graphs differed across invocations!'):
torch.jit.trace(foo, torch.randn(3, 4), check_inputs=[torch.randn(4, 4)])
@suppress_warnings
def test_trace_checker_memoization(self):
with self.assertRaisesRegex(torch.jit.TracingCheckError, r'Graphs differed across invocations!'):
def foo(x):
if not hasattr(foo, 'cache'):
foo.cache = torch.neg(x)
return x + foo.cache
traced = torch.jit.trace(foo, torch.rand(3, 4), check_inputs=[(torch.rand(3, 4),)])
def test_trace_checker_slice_lhs(self):
def foo(x):
for i in range(3):
x[i, :] = torch.zeros(4)
return x
self.checkTrace(foo, (torch.rand(3, 4),), inputs_require_grads=False)
def test_trace_checker_inplace_on_view(self):
def foo(x):
x.view(-1).add_(-x.view(-1))
return x
with self.assertWarnsRegex(torch.jit.TracerWarning,
'Output nr 1. of the traced function does not match the '
'corresponding output of the Python function'):
torch.jit.trace(foo,
torch.rand(3, 4),
check_inputs=[torch.rand(5, 6)],
_force_outplace=True)
def test_lhs_index_fails(self):
def foo(x):
x[0, 1] = 4
return x
with self.assertWarnsRegex(torch.jit.TracerWarning, "cause the trace to be incorrect"):
torch.jit.trace(foo, torch.rand(3, 4), _force_outplace=True)
def test_lhs_index_trivial(self):
def foo(y, x):
y[...] = x
return y
self.checkTrace(foo, (torch.rand(3, 4), torch.rand(4)), inputs_require_grads=False)
def test_inplace_warn(self):
def foo(x):
x.view(-1).add_(-x.view(-1))
return x
with self.assertWarnsRegex(torch.jit.TracerWarning, "cause the trace to be incorrect"):
torch.jit.trace(foo, torch.rand(3, 4), _force_outplace=True)
@suppress_warnings
def test_trace_checker_dropout_train(self):
def foo(x):
return torch.dropout(x, p=0.5, train=True)
with self.assertWarnsRegex(torch.jit.TracerWarning,
'Output nr 1. of the traced function does not match the '
'corresponding output of the Python function'):
torch.jit.trace(foo, torch.rand(3, 4), check_inputs=[torch.rand(5, 6)])
with self.assertWarnsRegex(torch.jit.TracerWarning,
'Trace had nondeterministic nodes'):
torch.jit.trace(foo, torch.rand(3, 4), check_inputs=[torch.rand(5, 6)])
def test_trace_checker_dropout_notrain(self):
input = torch.rand(3, 4)
@_trace(input)
def foo(x):
return torch.dropout(x, p=0.5, train=False)
self.assertEqual(foo(input), input)
def test_trace_contiguous(self):
def foo(x):
return x[:, :, ::2].contiguous().view(12)
x = torch.rand(2, 3, 4)
traced = torch.jit.trace(foo, (x,))
y = traced(x)
self.assertNotEqual(x.storage().data_ptr(), y.storage().data_ptr())
# This tests the logic in THPVariable_contiguous. There is short-circuiting
# code that prevents us from even getting to VariableType::contiguous, since
# it is an optimization that prevents us from acquiring the GIL for touching
# the device. We needed to add the tracing logic directly into the
# THPVariable_contiguous function only for the path where we are skipping
# dispatch into contiguous. We should see an aten::contiguous in this trace!
def test_trace_contiguous_short_circuit(self):
def foo(x):
return x.contiguous()
x = torch.rand(2, 3, 4)
traced = torch.jit.trace(foo, (x,))
FileCheck().check("aten::contiguous").run(str(traced.graph))
def test_trace_inverse(self):
def foo(x):
return ~x
foo_traced = torch.jit.trace(foo, torch.zeros(3, 4, dtype=torch.uint8))
eg = torch.zeros(3, dtype=torch.uint8)
self.assertEqual(foo_traced(eg), foo(eg))
def test_trace_modulelist(self):
class MySubmod(torch.nn.Module):
def __init__(self):
super(MySubmod, self).__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
class MyMod(torch.nn.Module):
def __init__(self):
super(MyMod, self).__init__()
self.ml = torch.nn.ModuleList([
MySubmod(),
MySubmod()
])
def forward(self, x):
for mod in self.ml:
x = mod(x)
return x
traced = torch.jit.trace(MyMod(), (torch.rand(3, 4),))
def test_trace_fork_join_and_module(self):
class MySubmod(torch.nn.Module):
def __init__(self):
super(MySubmod, self).__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x), torch.neg(x)
class Mod(torch.nn.Module):
def __init__(self):
super(Mod, self).__init__()
self.ml = torch.nn.ModuleList([
MySubmod() for i in range(2)
])
def forward(self, x):
futs = []
for i in range(2):
futs.append(torch.jit._fork(self.ml[i], x))
results = []
for i in range(2):
results.append(torch.jit._wait(futs[i])[0])
return torch.stack(results)
m = Mod()
traced = torch.jit.trace(m, torch.rand(3, 4))
def test_trace_invert_module_hierarchy(self):
class MySubmod(torch.nn.Module):
def __init__(self):
super(MySubmod, self).__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x), torch.neg(x)
class MyFunctionalMod(torch.nn.Module):
def forward(self, x, submod):
return submod(x)
class Mod(torch.nn.Module):
def __init__(self):
super(Mod, self).__init__()
self.sm = MySubmod()
self.fm = MyFunctionalMod()
def forward(self, x):
return self.fm(x, self.sm)
torch.jit.trace(Mod(), (torch.rand(3, 4),))
def test_trace_records_names(self):
def foo(bar, baz):
baz = bar + 3
quick_brown_fox = torch.neg(baz)
for _ in range(20):
yeet = quick_brown_fox - 3.14
return yeet
traced = torch.jit.trace(foo, (torch.rand(3, 3), torch.rand(3, 3)))
graph_str = str(traced.graph)
assert 'bar' in graph_str
assert 'baz' in graph_str
assert 'quick_brown_fox' in graph_str
def test_tracing_hooks(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
def forward(self, x):
return x + x
def test_hook(is_post_hook, hook, fc):
n = Net()
if is_post_hook:
n.register_forward_hook(hook)
else:
n.register_forward_pre_hook(hook)
module = torch.jit.trace(n, (torch.tensor(1.0),))
eager_input = torch.tensor(1.0)
eager_out = n(eager_input)
fc.run(module.forward.graph)
input = torch.tensor(1.0)
output = module(input)
self.assertEqual(input, eager_input)
self.assertEqual(output, eager_out)
def hook_no_return(mod, input, output):
input[0].add_(1)
output.sub_(1)
fc = FileCheck().check("add(").check("add_(").check("sub_(")
test_hook(True, hook_no_return, fc)
def hook_return(mod, input, output):
input[0].add_(1)
return output - 3
fc = FileCheck().check("add(").check("add_(").check("sub(")
test_hook(True, hook_return, fc)
b = torch.tensor(3.0)
def captured_hook(mod, input, output):
return output - b
fc = FileCheck().check("add(").check("sub(")
test_hook(True, captured_hook, fc)
def pre_hook_no_ret(mod, input):
input[0].add_(3)
fc = FileCheck().check("add_(").check("add(")
test_hook(False, pre_hook_no_ret, fc)
def pre_hook_ret(mod, input):
return input[0] - 4
fc = FileCheck().check("sub(").check("add(")
test_hook(False, pre_hook_ret, fc)
def test_tracing_backward_hook_error(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
def forward(self, x):
return x + x
n = Net()
def backward_hook(module, grad_input, grad_output):
pass
n.register_backward_hook(backward_hook)
with self.assertRaisesRegex(Exception, "backward hooks assigned"):
torch.jit.trace(n, (torch.tensor(1.0),))
def test_tracing_multiple_methods(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, x):
return self.conv(x)
def weighted_kernel_sum(self, weight):
return weight * self.conv.weight
example_weight = torch.rand(1, 1, 3, 3)
example_forward_input = torch.rand(1, 1, 3, 3)
inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight}
n = Net()
module = torch.jit.trace_module(n, inputs)
check_inputs = []
for i in range(2):
check_weight = torch.rand(1, 1, 3, 3)
check_forward_input = torch.rand(1, 1, 3, 3)
check_inputs.append({'forward' : check_forward_input, 'weighted_kernel_sum' : check_weight})
module = torch.jit.trace_module(n, inputs, check_trace=True, check_inputs=check_inputs)
self.assertTrue(module._c._has_method("forward"))
self.assertTrue(module._c._has_method("weighted_kernel_sum"))
module = torch.jit.trace(n.forward, example_forward_input)
module = torch.jit.trace(n.forward, example_forward_input, check_trace=True, check_inputs=[example_forward_input])
with self.assertRaisesRegex(AttributeError, "trace doesn't support compiling individual module's functions"):
module = torch.jit.trace(n.weighted_kernel_sum, inputs)
def test_tensor_with_grad_as_constant(self):
param = torch.randn(3).requires_grad_()
x = torch.randn(3)
def f(x):
return x + param
with self.assertRaisesRegex(RuntimeError, "Cannot insert a Tensor that requires grad as a constant"):
torch.jit.trace(f, x)
def test_non_tensor_tracing(self):
def f(x):
return x + param
with self.assertRaisesRegex(RuntimeError, r"Type 'Tuple\[int\]' cannot be traced"):
torch.jit.trace(f, (1,))
def test_trace_skip_none_submodule(self):
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = torch.nn.Linear(3, 4)
self.submod = None
def forward(self, inputs):
return inputs
m = TestModule()
tm = torch.jit.trace(m, torch.tensor(1.))
self.assertFalse(hasattr(tm, "submod"))
def test_trace_with_conditional_property(self):
class Net(nn.Module):
def __init__(self, attr=None):
super(Net, self).__init__()
if attr is not None:
self._attr = attr
self.attr_name = '_attr'
@property
def attr(self):
return getattr(self, self.attr_name)
def forward(self, x):
return x
x = torch.ones(1)
torch.jit.trace(Net(), x)
def test_trace_func_argument_names_captured(self):
def fn(first_arg: torch.Tensor, second_arg: torch.Tensor) -> torch.Tensor:
return first_arg + second_arg
traced_fn = torch.jit.trace(fn, (torch.ones(1), torch.ones(1)))
FileCheck().check("first_arg").check_next("second_arg") \
.run(str(traced_fn.graph))
def test_trace_partial_func_argument_names_captured(self):
def fn(first_arg: torch.Tensor, second_arg=1) -> torch.Tensor:
return first_arg + second_arg
traced_fn = torch.jit.trace(fn, (torch.ones(1),))
FileCheck().check("first_arg").check_not("second_arg") \
.run(str(traced_fn.graph))
def test_trace_module_argument_names_captured(self):
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, first_arg: torch.Tensor, second_arg: torch.Tensor):
return self.conv(first_arg) + second_arg
m = TestModule()
example_input = (torch.ones(1, 1, 3, 3), torch.ones(1, 1, 3, 3))
# Explicitly tracing module's forward method
traced_module_forward = torch.jit.trace(m.forward, example_input)
FileCheck().check("first_arg").check_next("second_arg") \
.run(str(traced_module_forward.graph))
# Tracing module's directly
traced_module = torch.jit.trace(m, example_input)
FileCheck().check("first_arg").check_next("second_arg") \
.run(str(traced_module.graph))
class TestMixTracingScripting(JitTestCase):
def test_trace_script(self):
@torch.jit.script
def func1(x: Tuple[Tensor, Tensor]) -> Tensor:
return x[0] + x[1]
@torch.jit.script
def func2(x: List[Tensor]) -> Tensor:
return x[0] + x[1]
a = torch.randn(5)
b = torch.randn(5)
self.checkTrace(func1, ((a, b),))
self.checkTrace(func2, ((a, b),))
@torch.jit.script
def func3(x: Tensor, method: str = 'bilinear', align_corners: bool = True) -> Tensor:
hw = x.shape[2:4]
return F.interpolate(x, hw, mode=method, align_corners=align_corners)
inp = torch.rand(1, 3, 6, 6)
self.checkTrace(func3, (inp,))
@torch.jit.script
def func4(x: Tensor, a: List[Optional[str]]) -> Tensor:
if len(a) == 2:
return x + 2
else:
return x
def test_trace_mixed_by_script_with_dict_output(self):
@torch.jit.script
def return_dict(input: torch.Tensor) -> Dict[str, torch.Tensor]:
return {"foo" : input + 1}
class TraceModule(torch.nn.Module):
def forward(self, input):
dict = return_dict(input)
return dict["foo"] + dict["foo"]
x = torch.ones(1)
tm = torch.jit.trace(TraceModule(), x)
self.assertEqual(tm(x), x + 1 + x + 1)
def test_trace_of_script(self):
@torch.jit.script
def foo(a, c):
b = 0.0
if bool(a == 0.0):
b = 1.0
return b + c
a = torch.ones(1, dtype=torch.float)
@_trace(torch.zeros(1, dtype=torch.float))
def use(b):
return foo(b - 1.0, a) + 1.0
# test we propagated shapes through the function
self.assertTrue("Dynamic" not in str(use.graph))
self.assertEqual(3, use(torch.ones(1, dtype=torch.float)))
self.assertEqual(2, use(torch.zeros(1, dtype=torch.float)))
def test_trace_with_size(self):
@_trace(torch.zeros(1, 1))
def foo(x):
return x + 1
@torch.jit.script
def bar(x):
y = int(foo(x))
if 1 == 1:
y = 7
return y + 1
self.assertEqual(8, bar(torch.ones(1, 1)))
def test_tracing_slicing(self):
@_trace(torch.zeros(10))
def foo_trace(x):
return x[-5:-3]
@torch.jit.script
def foo_script(x):
return x[-5:-3]
def foo(x):
return x[-5:-3]
a = torch.arange(0, 8)
b = torch.arange(0, 20)
self.assertEqual(foo_trace(a), foo_script(a))
self.assertEqual(foo_trace(a), foo(a))
self.assertNotEqual(foo_trace(a), foo_trace(b))
def test_tracing_indexing(self):
@_trace(torch.zeros(10))
def foo_trace(x):
return x[-2]
@torch.jit.script
def foo_script(x):
return x[-2]
def foo(x):
return x[-2]
a = torch.arange(0, 8)
b = torch.arange(0, 20)
self.assertEqual(foo_script(a), foo_trace(a))
self.assertEqual(foo_trace(a), foo(a))
self.assertNotEqual(foo_trace(a), foo_trace(b))
def test_trace_hierarchy(self):
# Test that we preserve the module hierarchy for a ScriptModule
# submodule during tracing
class AnotherScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(AnotherScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(1, 2, 3))
@torch.jit.script_method
def bar(self):
return torch.zeros(4, 5)
class SomeScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(SomeScriptMod, self).__init__()
self.asm = AnotherScriptMod()
@torch.jit.script_method
def foo(self):
return torch.zeros(3, 4)
@torch.jit.script_method
def bar(self):
return torch.zeros(4, 3)
class TraceMe(torch.nn.Module):
def __init__(self):
super(TraceMe, self).__init__()
self.ssm = SomeScriptMod()
def forward(self, x):
return self.ssm.bar() + x
orig = TraceMe()
traced = torch.jit.trace(orig, (torch.rand(4, 3),))
# for each of these checks, check that *BOTH* the underlying
# _C.ScriptModule object has the expected method/param, as well as the
# Python object that wraps it.
self.assertTrue(traced.ssm._c._has_method('foo'))
self.assertTrue(hasattr(traced.ssm, 'foo'))
imported = self.getExportImportCopy(traced)
self.assertTrue(imported.ssm._c._has_method('foo'))
self.assertTrue(hasattr(imported.ssm, 'foo'))
self.assertTrue(imported.ssm.asm._c._has_method('bar'))
self.assertTrue(hasattr(imported.ssm.asm, 'bar'))
self.assertTrue(hasattr(imported.ssm.asm, 'param'))
def test_trace_parameter(self):
class Param(nn.Module):
def __init__(self):
super(Param, self).__init__()
self.register_parameter("bias", nn.Parameter(torch.empty(4, 4)))
def forward(self, x):
return x
class M3(torch.jit.ScriptModule):
def __init__(self, model):
super(M3, self).__init__()
self.traced = torch.jit.trace(model, (torch.rand(3, 3)))
@torch.jit.script_method
def forward(self, x):
return self.traced(x)
class M2(nn.Module):
def __init__(self, model):
super(M2, self).__init__()
self.module = M3(model)
def forward(self, x):
return self.module(x)
class M1(torch.jit.ScriptModule):
def __init__(self, model):
super(M1, self).__init__()
self.traced = torch.jit.trace(M2(model), (torch.rand(3, 3)))
@torch.jit.script_method
def forward(self, x):
return self.traced(x)
with torch.jit.optimized_execution(False):
module = M1(Param())
f = io.BytesIO()
torch.jit.save(module, f)
@_tmp_donotuse_dont_inline_everything
def test_call_script_fn_from_traced_module(self):
@torch.jit.script
def scripted_fn(x):
return torch.neg(x)
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
def forward(self, x):
return scripted_fn(torch.mm(x, self.param))
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
FileCheck().check("aten::mm").check("name=\"scripted_fn\"").check("prim::CallFunction").run(str(tm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_script_module_from_traced_module(self):
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param_foo = torch.nn.Parameter(torch.rand(5, 7))
@torch.jit.script_method
def forward(self, x):
return torch.mm(x, self.param_foo)
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
self.mod = ScriptMod()
def forward(self, x):
return self.mod(torch.mm(x, self.param)) + 1.0
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
FileCheck().check("aten::mm").check("prim::CallMethod").check_same("forward").check("aten::add").run(str(tm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_fn_from_script_fn(self):
@_trace(torch.rand(3, 4))
def traced_fn(x):
return torch.neg(x)
@torch.jit.script
def script_fn(x):
return traced_fn(x) + 1
FileCheck().check("prim::CallFunction").check("aten::add").run(str(script_fn.graph))
def test_call_traced_mod_from_script_fn(self):
with self.assertRaisesRegex(RuntimeError, "Cannot call a ScriptModule that is not a submodule of the caller"):
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
def forward(self, x):
return torch.mm(x, torch.zeros(4, 3))
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
@torch.jit.script
def script_fn(x):
return tm(x) + 1
@_tmp_donotuse_dont_inline_everything
def test_call_tracing_fn_from_script_module(self):
@_trace(torch.rand(3, 3))
def traced_fn(x):
return torch.neg(x)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
@torch.jit.script_method
def forward(self, x):
return traced_fn(torch.mm(x, self.param))
sm = ScriptMod()
FileCheck().check("aten::mm").check("prim::CallFunction").run(str(sm.forward.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_tracing_mod_from_script_module(self):
class TracedMod(torch.nn.Module):
def __init__(self):
super(TracedMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(3, 5))
def forward(self, x):
return torch.mm(x, self.param)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
self.tm = torch.jit.trace(TracedMod(), torch.rand(3, 3))
@torch.jit.script_method
def forward(self, x):
return self.tm(torch.mm(x, self.param))
sm = ScriptMod()
FileCheck().check("aten::mm").check("prim::CallMethod").run(str(sm.graph))
def test_script_inline_trace_multiple_args(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, input, input2):
return input + input2
class M2(torch.jit.ScriptModule):
def __init__(self):
super(M2, self).__init__()
self.m = torch.jit.trace(M(), (torch.zeros(4, 3), torch.zeros(4, 3)))
@torch.jit.script_method
def forward(self, inp):
return self.m(inp, inp)
with torch.jit.optimized_execution(False):
m2 = M2()
m2(torch.zeros(4, 3))
def test_trace_dict_mix_script(self):
class testB(torch.nn.Module):
def __init__(self):
super(testB, self).__init__()
self.linear = torch.nn.Linear(2, 2)
def forward(self, feature_map: Dict[str, List[Tensor]]) -> Tensor:
output = []
for i, j in feature_map.items():
output.append(self.linear(j[0]))
return torch.stack(output)
class testA(torch.nn.Module):
def __init__(self):
super(testA, self).__init__()
self.b = torch.jit.script(testB())
def forward(self, input_map: Dict[str, List[Tensor]]) -> Tensor:
feature_map = {}
for i, j in input_map.items():
feature_map[i] = [j[0]]
return self.b(feature_map)
input_map = {"1" : [torch.rand(2, 2), torch.rand(2, 2)], "3" : [torch.rand(2, 2), torch.rand(2, 2)]}
model = testA()
traced_model = torch.jit.trace(model, input_map)
new_input_map = {"1" : [torch.rand(2, 2), torch.randn(2, 2)], "3" : [torch.rand(2, 2), torch.rand(2, 2)]}
self.assertEqual(model(new_input_map), traced_model(new_input_map))
def test_trace_script_returning_complex_dict(self):
"""Tracing over a script function returning a dictionary should work.
The dictionary can should be able to contain other containers (like a tuple) recursively.
"""
class ReturnsDict(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(
self, id_score_list: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
# do some random operations and then return a dict of the same structure
v = id_score_list["1000"]
idx_keys = v[1] - 1500000
weights = v[2]
result = {
"1000": (v[0], idx_keys, weights)
}
return result
class ChecksDict(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]):
v = input["1000"]
return v[1] + 1
class TestModule(torch.nn.Module):
def __init__(self, checks_dict, returns_dict):
super().__init__()
self.checks_dict = checks_dict
self.returns_dict = returns_dict
def forward(self, input: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]):
foo = self.returns_dict(input)
return self.checks_dict(foo)
input1 = {
"1000": (
torch.tensor([0]),
torch.tensor([], dtype=torch.int64),
torch.tensor([])
)
}
input2 = {
"1000": (
torch.tensor([0]),
torch.tensor([1500000, 1500004], dtype=torch.int64),
torch.tensor([2.0, 3.0])
)
}
checks_dict = torch.jit.script(ChecksDict())
returns_dict = torch.jit.script(ReturnsDict())
eager_module = TestModule(checks_dict, returns_dict)
traced_module = torch.jit.trace(eager_module, input1)
self.assertEqual(traced_module(input1), eager_module(input1))
self.assertEqual(traced_module(input2), eager_module(input2))
def test_trace_returning_dict_with_tensor_tuples(self):
"""Tracing over a module returning a dictionary whose values are tuples of tensors
should work.
"""
class ReturnsDict(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(
self, k: torch.Tensor, v: torch.Tensor
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]:
x = 2 * k
y = 3 * v
result = {
"imakey": (x, y)
}
return result
class ReturnsBadDict(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(
self, k: torch.Tensor, v: torch.Tensor
) -> Dict[str, Tuple[torch.Tensor, float]]:
x = 2 * k
result = {
"imakey": (x, 1)
}
return result
mod = ReturnsDict()
traced_module = torch.jit.trace(mod, [torch.ones(1), torch.ones(1)], strict=False)
out = traced_module(torch.ones(1), torch.ones(1))
expected = {
"imakey": (torch.tensor([2.]), torch.tensor([3.]))
}
self.assertEqual(out, expected)
with self.assertRaisesRegex(RuntimeError, "cannot be understood by the tracer, only outputs matching"):
mod = ReturnsBadDict()
traced_module = torch.jit.trace(mod, [torch.ones(1), torch.ones(1)], strict=False)
def test_trace_linear(self):
m = torch.nn.Linear(20, 20)
inp = torch.rand([20, 20])
self.checkTrace(m, (inp,))
g = torch.jit.trace(m, (inp,)).graph
FileCheck().check("aten::linear").run(g)
def test_traced_module_implements_interface(self):
@torch.jit.interface
class TestModuleInterface(nn.Module):
def forward(self, first_arg: torch.Tensor, second_arg: torch.Tensor) -> torch.Tensor:
pass
make_global(TestModuleInterface)
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, first_arg: torch.Tensor, second_arg: torch.Tensor) -> torch.Tensor:
return self.conv(first_arg) + second_arg
def fn_takes_interface(x: TestModuleInterface):
ones = torch.ones(1, 1, 3, 3)
return x.forward(ones, ones)
scripted_test_module = torch.jit.script(TestModule())
self.checkScript(fn_takes_interface, (scripted_test_module,))
def test_traced_module_contains_scripted_interface_types(self):
class LeafModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(19))
def forward(self, input: torch.Tensor):
return input + self.weight
class LowerModuleImpl(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.leaf = LeafModule()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return self.leaf(input)
@torch.jit.interface
class LowerModuleInterface(torch.nn.Module):
def forward(self, input: torch.Tensor) -> torch.Tensor:
pass
class MiddleModule(torch.nn.Module):
lower: LowerModuleInterface
def __init__(self, feature_processor_modules=None):
super().__init__()
self.lower = LowerModuleImpl()
def forward(self, input):
return self.lower(input)
class WrapperModule(torch.nn.Module):
def __init__(self, m):
super().__init__()
self.middle = m
def forward(self, input):
return self.middle(input)
class TopModule(torch.nn.Module):
def __init__(self):
super().__init__()
m = MiddleModule()
m = torch.jit.script(m)
self.sub1 = m
self.sub2 = WrapperModule(m)
def forward(self, input: torch.Tensor):
return self.sub1(input) + self.sub2(input)
top = TopModule()
top_example_input = torch.ones(1)
torch.jit.trace(top, top_example_input)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
690cba99d52f50b437c164249f2dfb2d5f3856e7
|
0fa82ccc0b93944c4cbb8255834b019cf16d128d
|
/2020/Concurrency/Producer&ConsumerModel.py
|
3a23d9d01fb9c1bf3a33c494fa23bed90f0dbfcb
|
[] |
no_license
|
Akashdeepsingh1/project
|
6ad477088a3cae2d7eea818a7bd50a2495ce3ba8
|
bdebc6271b39d7260f6ab5bca37ab4036400258f
|
refs/heads/master
| 2022-12-13T23:09:35.782820
| 2020-08-27T14:22:37
| 2020-08-27T14:22:37
| 279,722,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,944
|
py
|
from threading import Thread
from threading import Condition
from threading import Lock
from threading import current_thread
from collections import deque
import time
import random
class Solution:
def __init__(self, n):
self.cond = Condition()
self.list_item = deque()
self.curr = 0
self.int_max = n
self.lock = Lock()
def dequeue(self):
self.cond.acquire()
while self.curr == 0:
self.cond.wait()
item = self.list_item.pop()
self.curr -= 1
self.cond.notify_all()
self.cond.release()
return item
def enqueue(self,n):
self.cond.acquire()
while self.int_max == self.curr:
self.cond.wait()
self.list_item.append(n)
self.curr +=1
self.cond.notify_all()
self.cond.release()
def consumer_thread(self):
while 1:
item = self.dequeue()
print('{} consumer thread - consumed {}'.format(current_thread().getName(),item))
time.sleep(random.randint(1,3))
def producer_thread(self,q):
while 1:
#item = random.randint(1,100)
item = q
self.enqueue(item)
print('{} producer thread - is producing {} '.format(current_thread().getName(),item))
time.sleep(random.randint(1,3))
def main(self):
producer1 = Thread(target = self.producer_thread, name = "Producer1", args=(1,), daemon=True)
producer2 = Thread(target = self.producer_thread,name = "Producer2", args= (100,), daemon=True)
consumer1 = Thread(target = self.consumer_thread, name = "Consumer1", daemon=True)
consumer2 = Thread(target = self.consumer_thread, name = "Consumer2", daemon = True)
consumer1.start ()
consumer2.start ()
producer1.start()
producer2.start()
time.sleep(15)
obj = Solution(5)
obj.main()
|
[
"Akashdeep_S@Dell.com"
] |
Akashdeep_S@Dell.com
|
ba46ec62e8b6bd7269e47076a5906df2f7336aa0
|
8926921df76ab45f982dc74ad1a0bb9a69d162f1
|
/DCF.py
|
f1944b872cb15ef7fb2b80f44f09444c8f32ea34
|
[] |
no_license
|
SurajKoju/Image_Processing_using_Python
|
36676aecb681f580afc541ff1532b0d5cd017423
|
71824b1aeea90092d63954eca9c47cc5d3383b35
|
refs/heads/master
| 2022-04-09T07:27:43.285804
| 2020-02-02T04:12:17
| 2020-02-02T04:12:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
# #discrete cosine transform
import cv2
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import Normalize
import matplotlib.cm as cm
B=8 #blocksize
fn3= '/home/koju/Desktop/Image_Processing/images.png'
img1 = cv2.imread(fn3,cv2.IMREAD_GRAYSCALE)
h,w=np.array(img1.shape[:2])//B * B
print(h)
print(w)
img1=img1[:h,:w]
blocksV=h/B
blocksH=w/B
vis0 = np.zeros((h,w))
Trans = np.zeros((h,w))
vis0[:h, :w] = img1
for row in range(int(blocksV)):
for col in range(int(blocksH)):
currentblock = cv2.dct(vis0[row*B:(row+1)*B,col*B:(col+1)*B])
Trans[row*B:(row+1)*B,col*B:(col+1)*B]=currentblock
cv2.imshow("trans",Trans)
cv2.waitKey(0)
|
[
"noreply@github.com"
] |
SurajKoju.noreply@github.com
|
8970c78416509d050a786e0c4dd52eb392acd29c
|
2d81f497033301046eb9f2da6835e2a812f92185
|
/syde675-3b.py
|
10561961ab71647301825c964d5f33dd1fb4b9c5
|
[] |
no_license
|
peterzhangboyun/feature-recognition
|
fa8c20184ad903b2fdf299e36cc4e1eb08869a99
|
e128555a8e1f6402c955cf0fe36c0e913d7291cf
|
refs/heads/master
| 2020-03-26T22:06:41.719605
| 2019-06-26T23:38:44
| 2019-06-26T23:38:44
| 145,432,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,135
|
py
|
from numpy import *
import numpy as np
import struct
def load_images(file_name):
binfile = open(file_name, 'rb')
buffers = binfile.read()
magic, num, rows, cols = struct.unpack_from('>IIII',buffers, 0)
bits = num * rows * cols
images = struct.unpack_from('>' + str(bits) + 'B', buffers, struct.calcsize('>IIII'))
binfile.close()
images = np.reshape(images, [num, rows * cols])
return images
global data
data = load_images('train-images.idx3-ubyte')
def pca(i):
means = mean(data, axis=0)
new_data = data-means
covMat = np.cov(new_data.T)
eigVals, eigVects = np.linalg.eig(covMat)
n_eigValIndice = argsort(-eigVals)
selectedfeature = np.matrix(eigVects.T[n_eigValIndice[:i]])
finalData = new_data*selectedfeature.T
finalData = finalData.real
reconMat = (finalData*selectedfeature)+means
return eigVals
eigvalue = sorted(pca(1), reverse=True)
eigvalue = np.real(eigvalue)
sum1 = []
for j in range(len(eigvalue)):
sum1.append(eigvalue[j])
if np.sum(sum1) > (np.sum(eigvalue)*0.95):
print("Suitable d (POV=95%) is ", len(sum1))
break
|
[
"peterzhangby@126.com"
] |
peterzhangby@126.com
|
533bf2d662d9b4e37c86a722e3f67e70265ba6b4
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/WPojigJER35bJT6YH_19.py
|
81005550eb38687a8fcd86bc85eed7d403cf330e
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
def reversed_binary_integer(num):
return int(bin(num)[2:][::-1],2)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
c027ff6dbd3b65a060cbbf428beba67e46c55b97
|
8d45f303a34188316009405ca007ba5c663bfbef
|
/ch09/favorite_languages.py
|
81f854dc0403f17d3e8205858c0e87a15d5fcff5
|
[] |
no_license
|
heyb7/python_crash_course
|
4d7f45961d085e4dc9146872a651bb4cd001b663
|
04cb6c9b0c362f5db5bd208432f222edfcd65126
|
refs/heads/master
| 2021-09-09T10:37:38.355446
| 2018-03-15T08:57:08
| 2018-03-15T08:57:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
from collections import OrderedDict
favorite_languages = OrderedDict()
favorite_languages['jen'] = "python"
favorite_languages['sarah'] = "c"
favorite_languages['edward'] = 'ruby'
favorite_languages['phil'] = 'python'
for name, langeage in favorite_languages.items():
print(name.title() + "'s favorite language is " + langeage.title() + ".")
|
[
"heyanbing@emindsoft.com.cn"
] |
heyanbing@emindsoft.com.cn
|
b5bddfc34ba6cecc04cc3e80b8acf14dc26ed421
|
d4832ac489089b4e6f9bcaa8dc57a549472e63fb
|
/unit_3/lecture3/lecture3/settings.py
|
2f65d2f528582e40f4aefefbc1fed9d39ef8996f
|
[] |
no_license
|
AuguestGao/cs50web
|
1fef7d462fa6605f15d8c55ca19f4a10e0c11c9e
|
a938b24678176191889ca9b82a5096df76cb9602
|
refs/heads/master
| 2023-02-10T02:00:26.290948
| 2021-01-06T03:26:03
| 2021-01-06T03:26:03
| 312,689,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,109
|
py
|
"""
Django settings for lecture3 project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6g**n$)v_84=!84-jt2)(&cshllmehccsq2bp=y@3l!hz-(g_y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'hello',
'newyear',
'tasks',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lecture3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lecture3.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"AuLucian@users.noreply.github.com"
] |
AuLucian@users.noreply.github.com
|
843e886d5c65bec2a64ffc185df889da65e818c9
|
9e1b7c7a097707c5b1e8120f22e8e404f8a48158
|
/src/day05.py
|
c05b807d1ac44020860030391421228491310d95
|
[] |
no_license
|
Jaxwood/special-palm-tree
|
589a57e4748458f64725d5551f946500f9a00027
|
e60f3d5bb1641b0fda766f1b99aa8cc24af0f42d
|
refs/heads/master
| 2023-05-03T03:15:52.684355
| 2021-05-28T17:32:29
| 2021-05-28T17:32:29
| 367,483,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
from typing import Dict, List, Set
def find_nice_strings(candidates: List[str]) -> int:
"""find strings that are nice"""
sum = 0
vowels = {'a', 'e', 'i', 'o', 'u'}
for candidate in candidates:
vowelCount = 0
doubleCount = 0
banned = list(filter(lambda s: candidate.find(
s) != -1, ['ab', 'cd', 'pq', 'xy']))
for i in range(0, len(candidate)):
# check for vowels
if candidate[i] in vowels:
vowelCount += 1
# check for double letter
if i != len(candidate) - 1 and candidate[i] == candidate[i+1]:
doubleCount += 1
sum += 1 if vowelCount > 2 and doubleCount > 0 and len(
banned) == 0 else 0
return sum
def has_pair(s: str) -> bool:
"""find pair with no overlap"""
segs: Dict[str, Set[int]] = {}
for i in range(0, len(s) - 1):
st = s[i] + s[i + 1]
if st in segs:
segs[st] = segs[st].union({i, i+1})
else:
segs[st] = {i, i+1}
return any(filter(lambda s: len(s) == 4, segs.values()))
def has_repeating_letter(s: str) -> bool:
"""find repeating letter"""
for i in range(0, len(s) - 2):
if s[i] == s[i+2]:
return True
return False
def find_even_nicer_strings(candidates: List[str]) -> int:
"""find even nicer strings"""
sum = 0
for s in candidates:
if has_pair(s) and has_repeating_letter(s):
sum += 1
return sum
|
[
"jacob@lorenzen.me"
] |
jacob@lorenzen.me
|
eea72e4751fe69b590a5a46878ff485555074252
|
6c186657a841311aaa424e58f86280b4cd91cc78
|
/20_oops.py
|
8e544dd4ce8ddb518248359b12fe6da4528e1ae6
|
[] |
no_license
|
Balajikrishnan00/Python
|
15c0cd18adfda0380d08dbe269b59dac8a183d64
|
d3b982a533a6a8014e1cb4df83b0550b15a26428
|
refs/heads/master
| 2023-05-26T17:45:55.371071
| 2021-06-17T19:09:31
| 2021-06-17T19:09:31
| 360,929,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,204
|
py
|
"""
import sys
class customer:
''' This class is about bank'''
bank='Indian Overseas Bank'
def __init__(self,name,acno,blance=500):
self.name=name
self.acno=acno
self.blance=blance
print('welcome Mr.',self.name,'How can help you')
def deposit(self,amt):
self.blance+=amt
def withdraw(self,amt):
if self.blance>=500 and self.blance-amt>=500 :
self.blance -= amt
else:
print('sorry you have only minimum balance only')
print('Welcome to',customer.bank)
name=input('whats is your name:')
acno=int(input('Ac number:'))
c1=customer(name,acno,500)
c1.deposit(500)
print(c1.blance)
c1.withdraw(200)
print(c1.blance)
c1.withdraw(300)
print(c1.blance)
c1.withdraw(100)
#print(c1.blance)
----------------------------------
import sys
class bank:
bankName='Indian Overseas Bank'
'''this class is about bank'''
def __init__(self,name,accno,blance=500):
self.name=name
self.accno=accno
self.blance=blance
print('Welcome to ',bank.bankName,'Mr.',self.name)
def deposit(self,amount):
self.blance+=amount
def blanceEnquiry(self):
print('Your amount:',self.blance)
def widthraw(self,amount):
if self.blance>=500 and self.blance-amount>=500:
self.blance-=amount
else:
print('sorry minimum balance must be maintain is 500.00')
def exit(self):
sys.exit()
name=input('acHolder Name:')
accno=int(input('ac Number:'))
c=bank(name, accno)
while True:
choice =input('D-Deposit\nB-Balance Enquiry\nW-Widthraw\nS-Exit\n')
if choice=='D' or choice=='d':
#c=bank(name, accno)
amt=float(input('Enter your amount:'))
c.deposit(amt)
elif choice=='B' or choice=='b':
c.blanceEnquiry()
elif choice=='W' or choice=='w':
amt=float(input('Enter Widthraw amount:'))
c.widthraw(amt)
elif choice=='S' or choice=='s':
sys.exit()
---------------------------------------
# inheritance
# 1. HAS A relationship
# 2. IS A relationship
class Engine:
'''This class is about Engine'''
mileage=22
def __init__(self):
self.petrol=True
self.Engine_Running=False
def EngineStart(self):
if self.Engine_Running:
print('Engine Already Running')
else:
self.Engine_Running=True
print('Engine Started...')
def EngineStop(self):
if self.Engine_Running:
self.Engine_Running=False
print('Engine Stopped..!')
else:
print('Engine Already Stopped..!')
class Car:
'''This class is about Car'''
def __init__ (self):
self.engine=Engine()
def drive(self):
self.engine.EngineStart()
print('Car in Running')
def park(self):
self.engine.EngineStop()
print('Car stoped')
c1=Car()
c1.drive()
c1.park()
#t1=Engine()
#t1.EngineStart()
#t1.EngineStart()
#t1.EngineStop()
#t1.EngineStop()
------------------------------
# 2 is a relationship
class humanbeing:
'''this class about is humanbeing'''
def __init__(self,name,age,sex):
self.name=name
self.age=age
self.sex=sex
def reading(self):
print('reading books')
class empolyee(humanbeing):
'''this class is about employee'''
def __init__(self,empno,salary,name,age,sex):
super().__init__(name,age,sex)
self.empno=empno
self.salary=salary
def dowork(self):
print('Emp working')
emp1=empolyee(101,20000,'balaji',24,'male')
print(emp1.age)
emp1.reading()
emp1.dowork()
-----------------------------------------------
class bank:
bankname='SBI'
def __init__(self):
self.min=2000
def deposit(self):
print('Deposit')
def withdraw(self):
print('widthraw')
@staticmethod
def staticmethod():
print('staticmethod is running')
@classmethod
def classmethod(cls):
print('classmethod is running',cls.bankname)
user1=bank()
user1.classmethod()
user1.staticmethod()
user1.deposit()
user1.withdraw()
print(user1.min)
----------------------------------
class Signup:
'''This class is about Signup your account'''
def __init__(self,name,accno):
self.name=name
self.accno=accno
self.account=True
class Rbi(Signup):
'''This class is about Bank'''
def __init__(self,acname,acno):
super(Rbi,self).__init__(acname,acno)
def deposit(self):
if self.account:
print('cash Deposited')
else:
print('Please Login')
def withdraw(self):
if self.account:
print('cash withdraw success full.')
else:
print('Login')
class indianBank(Rbi):
@staticmethod
def staticmethod():
print('staticmethod is running')
#user1=indianBank('balaji',12234)
#user1.deposit()
#user1.withdraw()
#print(user1.account)
#user1.staticmethod()
user2 =indianBank()
user2.deposit()
---------------------------------------
# multiple inheritance
class RBI:
def Loan(self):
print('Loan')
def loadthallupadi(self):
print('Getting done')
class SBI(RBI):
def deposite(self):
print('Deposited')
def withdraw(self):
print('Withdraw')
class LBank(SBI):
pass
l1=LBank()
l1.Loan()
l1.deposite()
----------------------------
class Bank1:
def deposite(self):
print('Deposite amount')
def withdraw(self):
print('Withdraw')
class Bank2:
def AgriLoad(self):
print('Got AgriLoan')
def EducationLoan(self):
print('You Got Education Loan')
class Bank3(Bank2,Bank1):
pass
user1=Bank3()
user1.deposite()
user1.withdraw()
user1.AgriLoad()
user1.EducationLoan()
------------------------------
class lali:
address='chennai'
def __init__(self):
self.Ho_OFFER=1000
def MegaOffer(self):
print('Mega Offer')
class lali1(lali):
def __init__(self):
super(lali1,self).__init__()
self.L_OFFE=500
def LocalOffer(self):
print('Local 0ffer')
c1=lali1()
#c1.Ho_OFFER
print(c1.address)
print(c1.L_OFFE)
print(c1.Ho_OFFER)
c1.MegaOffer()
c1.LocalOffer()
-----------------------------
class Human:
def __init__(self,name,age):
self.name=name
self.age=age
class employee(Human):
'''This class is about Employee'''
def __init__(self,name,age,empno,salary):
super(employee,self).__init__(name,age)
self.emp=empno
self.salary=salary
#emp1=employee('balaji',24,101,2000)
emp2=employee('siva',24,102,40000)
print(emp2.__doc__)
print(emp2.__dict__)
-----------------------------
# Multilevel Bank
class HeadBank:
def EDULoan(self):
print('Edu Loan')
def AgriLoan(self):
print('AgriLoan')
class SBI(HeadBank):
def Saving(self):
print('Savings')
def Deposit(self):
print('Deposit')
def Widthraw(self):
print('withdraw')
class OppBank(HeadBank):
pass
class VillageBank(SBI):
def NEWaccount(self):
print('New User')
c1=VillageBank()
c1.EDULoan()
c1.Saving()
c1.NEWaccount()
---------------------------"""
|
[
"balajikrishnan00@gmail.com"
] |
balajikrishnan00@gmail.com
|
bdda16a27d9413491a5b1701765061540eb88330
|
c39564f0b7616d697e4e7a42d8dd87238e76ae57
|
/p03_Funciones/src/ej10_es_perfecto.py
|
33fca835d6e1e3beb2cab29ea82d5b79f9bccee7
|
[] |
no_license
|
agarod/Programacion_Cientifica
|
39181f2dcc079407ed9a807080a69295cb993dd1
|
af907fee55c323862ac268390ecc268b5dabe334
|
refs/heads/master
| 2021-01-11T15:18:07.482967
| 2017-01-29T00:54:03
| 2017-01-29T00:54:03
| 80,322,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
#!/usr/bin/python
# encoding: utf-8
import sys
'''
__author__: "Ardiel Garcia Rodriguez"
__email__: "alu0100266382@ull.edu.es"_
__numero de ejercicio__: 10
__enunciado__:Escriba una funcion que indique si un numero dado es
o no es perfcto.
__status__: "Terminado"
'''
def es_perfecto(numero):
sumatorio=0
for i in range(1, numero):
if numero% i == 0:
sumatorio = sumatorio + i
return sumatorio == numero:
try:
numero = int(sys.argv[1])
print es_perfecto(numero)
except:
print 'Este programa comprueba si el numero es perfecto \n\n'
print 'la forma correcta de ejecutar este programa es', sys.argv[0], 'numero'
|
[
"ardielgr.dev@gmail.com"
] |
ardielgr.dev@gmail.com
|
e7bd6a08d5b0ff56acf58ee060eb8870c0682e28
|
5fc47f29e08c036aa6d1ff9ef3def4b5e4011982
|
/demo.py
|
1aaef3ec4d70ee545f549d7de5e266d891929c04
|
[] |
no_license
|
theSreeRam/intensityApp
|
b9f8f6a904057c0158f673844e956e412f641fb6
|
3cdf2ef40a1208a5134063c1bcd6e2bf7b98b93a
|
refs/heads/master
| 2023-01-22T16:20:58.154850
| 2020-11-26T07:58:54
| 2020-11-26T07:58:54
| 302,035,412
| 1
| 1
| null | 2020-11-18T10:35:09
| 2020-10-07T12:52:28
|
Python
|
UTF-8
|
Python
| false
| false
| 569
|
py
|
from tkinter import *
root = Tk()
def myClick():
myLabel = Label(root, text="Look! I clicked a button")
myLabel.grid()
print(e.get())
#first defining the widget
myLabel1 = Label(root, text="Hello World")
myLabel2 = Label(root, text="My name is Sreeram")
myButton = Button(root, text="Enter your name", padx=50, pady=50, command=myClick)
e = Entry(root)
e.grid()
e.insert(1,"Insert your name")
myLabel1.grid(row=0, column = 0)
myLabel2.grid(row=3, column = 1)
myButton.grid()
#creating an event loop, it's looping and figures out any change
root.mainloop()
|
[
"panigrahi.sreeram@gmail.com"
] |
panigrahi.sreeram@gmail.com
|
7ad9710521fe168b6210538e9c45ef2fc1bceeb0
|
4bc1600bdb68fc7ae26a15f382f17521721c2be7
|
/about_dice_project.py
|
7330b10e07e6511154d0816d0477eee61a4d8d61
|
[] |
no_license
|
abhishekshahgithub/Python_2
|
923e1046408a21b81579ae439d7cdb1a722b7420
|
8a08c8520ac8d86861b62a15bd490a05900435f0
|
refs/heads/master
| 2020-03-20T05:07:36.953696
| 2018-06-13T11:23:57
| 2018-06-13T11:23:57
| 137,204,378
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import random
class DiceSet(object):
def __init__(self):
self._values = None
@property
def values(self):
return self._values
def roll(self, n):
# Needs implementing!
# Tip: random.randint(min, max) can be used to generate random numbers
self._values = []
for r in range(0, n):
self._values.append(random.randint(1, 6))
class AboutDiceProject(Koan):
def test_can_create_a_dice_set(self):
dice = DiceSet()
self.assertTrue(dice)
def test_rolling_the_dice_returns_a_set_of_integers_between_1_and_6(self):
dice = DiceSet()
dice.roll(5)
self.assertTrue(isinstance(dice.values, list), "should be a list")
self.assertEqual(5, len(dice.values))
for value in dice.values:
self.assertTrue(
value >= 1 and value <= 6,
"value " + str(value) + " must be between 1 and 6")
def test_dice_values_do_not_change_unless_explicitly_rolled(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
second_time = dice.values
self.assertEqual(first_time, second_time)
def test_dice_values_should_change_between_rolls(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
dice.roll(5)
second_time = dice.values
self.assertNotEqual(first_time, second_time, \
"Two rolls should not be equal")
# THINK ABOUT IT:
#
# If the rolls are random, then it is possible (although not
# likely) that two consecutive rolls are equal. What would be a
# better way to test this?
def test_you_can_roll_different_numbers_of_dice(self):
dice = DiceSet()
dice.roll(3)
self.assertEqual(3, len(dice.values))
dice.roll(1)
self.assertEqual(1, len(dice.values))
|
[
"noreply@github.com"
] |
abhishekshahgithub.noreply@github.com
|
a964f56d9cf00c52cf72e1ff825a4253e203d742
|
aaff711b31dcaf59e0924a8ff2928d6ff859b4a7
|
/main2.py
|
aa95dd857ed385b608685e42c783cce6c6d8330f
|
[] |
no_license
|
harupy/kaggle-dsb2019
|
abde925d0b529d07085b3010a79c56729409273d
|
144d34c40200523e7263ac2c3512c15b6fa522e7
|
refs/heads/master
| 2020-12-26T18:22:01.285659
| 2020-01-24T08:58:23
| 2020-01-24T08:58:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,521
|
py
|
import logging
import gc
import pickle
import sys
import warnings
import lightgbm as lgb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from pathlib import Path
from typing import List
if __name__ == "__main__":
sys.path.append("./")
warnings.filterwarnings("ignore")
from src.utils import (get_preprocess_parser, load_config,
configure_logger, timer, feature_existence_checker,
save_json, plot_confusion_matrix, seed_everything,
delete_duplicated_columns)
from src.features import (
Basic, generate_features, PastAssessment, PastClip, PastGame, Unified,
ModifiedUnified, UnifiedWithInstallationIDStats, RenewedFeatures,
PastActivity, ImprovedBasic, ImprovedPastAssessment, ImprovedPastGame,
PastSummary, PastSummary2, PastSummary3, PastSummary4, NakamaV8, Ratio,
PastSummary3TimeEncoding, Tfidf, Tfidf2, DecayedPastSummary3)
from src.validation import (get_validation, select_features,
remove_correlated_features,
get_assessment_number)
from src.models import get_model
from src.evaluation import (
OptimizedRounder, truncated_cv_with_adjustment_of_distribution)
seed_everything(42)
parser = get_preprocess_parser()
args = parser.parse_args()
config = load_config(args.config)
configure_logger(args.config, log_dir=args.log_dir, debug=args.debug)
logging.info(f"config: {args.config}")
logging.info(f"debug: {args.debug}")
config["args"] = dict()
config["args"]["config"] = args.config
# make output dir
output_root_dir = Path(config["output_dir"])
feature_dir = Path(config["dataset"]["feature_dir"])
config_name: str = args.config.split("/")[-1].replace(".yml", "")
output_dir = output_root_dir / config_name
output_dir.mkdir(parents=True, exist_ok=True)
logging.info(f"model output dir: {str(output_dir)}")
config["model_output_dir"] = str(output_dir)
# ===============================
# === Data/Feature Loading
# ===============================
input_dir = Path(config["dataset"]["dir"])
if not feature_existence_checker(feature_dir, config["features"]):
with timer(name="load data"):
if args.dryrun:
train = pd.read_csv(input_dir / "train.csv", nrows=50000)
test = pd.read_csv(input_dir / "test.csv", nrows=50000)
else:
train = pd.read_csv(input_dir / "train.csv")
test = pd.read_csv(input_dir / "test.csv")
sample_submission = pd.read_csv(
input_dir / "sample_submission.csv")
with timer(name="generate features"):
generate_features(
train,
test,
namespace=globals(),
required=config["features"],
overwrite=args.force,
log=True)
if globals().get("train") is not None:
del train, test
gc.collect()
if args.dryrun:
exit(0)
with timer("feature loading"):
x_train = pd.concat([
pd.read_feather(feature_dir / (f + "_train.ftr"), nthreads=-1)
for f in config["features"]
],
axis=1,
sort=False)
x_valid = pd.concat([
pd.read_feather(feature_dir / (f + "_valid.ftr"), nthreads=-1)
for f in config["features"]
],
axis=1,
sort=False)
x_test = pd.concat([
pd.read_feather(feature_dir / (f + "_test.ftr"), nthreads=-1)
for f in config["features"]
],
axis=1,
sort=False)
x_train = delete_duplicated_columns(x_train)
x_valid = delete_duplicated_columns(x_valid)
x_test = delete_duplicated_columns(x_test)
groups = x_train["installation_id"].values
groups_valid = x_valid["installation_id"].values
test_nth_assessment = get_assessment_number(x_valid, x_test)
threshold = np.percentile(test_nth_assessment, 95)
y_train = x_train["accuracy_group"].values.reshape(-1)
y_valid = x_valid["accuracy_group"].values.reshape(-1)
cols: List[str] = x_train.columns.tolist()
cols.remove("installation_id")
cols.remove("accuracy_group")
x_train, x_valid, x_test = x_train[cols], x_valid[cols], x_test[cols]
assert len(x_train) == len(y_train)
logging.debug(f"number of features: {len(cols)}")
logging.debug(f"number of train samples: {len(x_train)}")
logging.debug(f"numbber of test samples: {len(x_test)}")
# ===============================
# === Feature Selection with correlation
# ===============================
with timer("Feature Selection with correlation"):
to_remove = remove_correlated_features(x_train, cols)
cols = [col for col in cols if col not in to_remove]
logging.info('Training with {} features'.format(len(cols)))
x_train, x_valid, x_test = x_train[cols], x_valid[cols], x_test[cols]
# ===============================
# === Feature Selection with importance
# ===============================
# get folds
x_train["group"] = groups
splits = get_validation(x_train, config)
x_train.drop("group", axis=1, inplace=True)
feature_selection_config = {
"model": {
"name": "lgbm2",
"mode": "regression",
"sampling": {
"name": "none"
},
"model_params": {
"boosting_type": "gbdt",
"objective": "regression",
"metrics": "rmse",
"max_depth": 6,
"num_leaves": 25,
"learning_rate": 0.01,
"subsample": 0.8,
"subsample_freq": 1,
"colsample_bytree": 0.7,
"data_random_seed": 9999,
"seed": 9999,
"bagging_seed": 9999,
"feature_fraction_seed": 9999,
"reg_alpha": 0.1,
"min_split_gain": 0.5,
"reg_lambda": 0.1,
"min_data_in_leaf": 100,
"n_jobs": -1,
"verbose": -1,
"first_metric_only": True
},
"train_params": {
"num_boost_round": 5000,
"early_stopping_rounds": 100,
"verbose_eval": 100
}
},
"post_process": {
"params": {
"reverse": False,
"n_overall": 20,
"n_classwise": 20
}
}
}
with timer("Feature Selection with importance"):
model = get_model(feature_selection_config)
_, _, _, _, feature_importance, _ = model.cv(
y_train,
x_train[cols],
x_test[cols],
groups,
feature_name=cols,
folds_ids=splits,
threshold=threshold,
config=feature_selection_config,
log=True)
feature_imp = feature_importance.reset_index().rename(
columns={
"index": "feature",
0: "value"
})
cols = select_features(
cols,
feature_imp,
config,
delete_higher_importance=False)
logging.info(f"Train cols: {len(cols)}")
x_train, x_valid, x_test = x_train[cols], x_valid[cols], x_test[cols]
# ===============================
# === Adversarial Validation
# ===============================
logging.info("Adversarial Validation")
with timer("Adversarial Validation"):
train_adv = x_train.copy()
test_adv = x_valid.copy()
train_adv["target"] = 0
test_adv["target"] = 1
groups_adv = np.concatenate([groups, groups_valid])
train_test_adv = pd.concat(
[train_adv, test_adv],
axis=0,
sort=False).reset_index(drop=True)
train_test_adv["group"] = groups_adv
splits = get_validation(train_test_adv, config)
train_test_adv.drop("group", axis=1, inplace=True)
aucs = []
importance = np.zeros(len(cols))
for trn_idx, val_idx in splits:
x_train_adv = train_test_adv.loc[trn_idx, cols]
y_train_adv = train_test_adv.loc[trn_idx, "target"]
x_val_adv = train_test_adv.loc[val_idx, cols]
y_val_adv = train_test_adv.loc[val_idx, "target"]
train_lgb = lgb.Dataset(x_train_adv, label=y_train_adv)
valid_lgb = lgb.Dataset(x_val_adv, label=y_val_adv)
model_params = config["av"]["model_params"]
train_params = config["av"]["train_params"]
clf = lgb.train(
model_params,
train_lgb,
valid_sets=[train_lgb, valid_lgb],
valid_names=["train", "valid"],
**train_params)
aucs.append(clf.best_score)
importance += clf.feature_importance(
importance_type="gain") / len(splits)
# Check the feature importance
feature_imp = pd.DataFrame(
sorted(zip(importance, cols)), columns=["value", "feature"])
plt.figure(figsize=(20, 10))
sns.barplot(
x="value",
y="feature",
data=feature_imp.sort_values(by="value", ascending=False).head(50))
plt.title("LightGBM Features")
plt.tight_layout()
plt.savefig(output_dir / "feature_importance_adv.png")
config["av_result"] = dict()
config["av_result"]["score"] = dict()
for i, auc in enumerate(aucs):
config["av_result"]["score"][f"fold{i}"] = auc
config["av_result"]["feature_importances"] = \
feature_imp.set_index("feature").sort_values(
by="value",
ascending=False
).to_dict()["value"]
# ===============================
# === Train model
# ===============================
logging.info("Train model")
# get folds
with timer("Train model"):
x_train["group"] = groups
splits = get_validation(x_train, config)
x_train.drop("group", axis=1, inplace=True)
model = get_model(config)
models, oof_preds, y_oof, test_preds, \
feature_importance, eval_results = model.cv(
y_train,
x_train[cols],
x_test[cols],
groups,
feature_name=cols,
folds_ids=splits,
threshold=threshold,
config=config,
log=True)
config["eval_results"] = dict()
for k, v in eval_results.items():
config["eval_results"][k] = v
if "classwise" not in config["model"]["name"]:
feature_imp = feature_importance.reset_index().rename(
columns={
"index": "feature",
0: "value"
})
plt.figure(figsize=(20, 10))
sns.barplot(
x="value",
y="feature",
data=feature_imp.sort_values(by="value", ascending=False).head(50))
plt.title("Model Features")
plt.tight_layout()
plt.savefig(output_dir / "feature_importance_model.png")
else:
for k, v in feature_importance.items():
feature_imp = v.reset_index().rename(columns={
"index": "feature",
0: "value"
})
plt.figure(figsize=(20, 10))
sns.barplot(
x="value",
y="feature",
data=feature_imp.sort_values(by="value",
ascending=False).head(50))
plt.title(f"Feature importance: Assessment {k}")
plt.tight_layout()
plt.savefig(output_dir / f"feature_importance_assessment_{k}.png")
# Confusion Matrix
plot_confusion_matrix(
y_oof,
oof_preds,
classes=np.array(["acc_0", "acc_1", "acc_2", "acc_3"]),
normalize=True,
save_path=output_dir / "confusion_matrix_oof.png")
raw_normal_oof = model.raw_normal_oof
OptR = OptimizedRounder(n_overall=20, n_classwise=20)
OptR.fit(raw_normal_oof, y_train)
normal_oof_preds = OptR.predict(raw_normal_oof)
truncated_result = truncated_cv_with_adjustment_of_distribution(
normal_oof_preds, y_train, groups, test_nth_assessment, n_trials=1000)
config["truncated_mean_adjust"] = truncated_result["mean"]
config["truncated_std_adjust"] = truncated_result["std"]
config["truncated_upper"] = truncated_result["0.95upper_bound"]
config["truncated_lower"] = truncated_result["0.95lower_bound"]
plot_confusion_matrix(
y_train,
normal_oof_preds,
classes=np.array(["acc_0", "acc_1", "acc_2", "acc_3"]),
normalize=True,
save_path=output_dir / "confusion_matrix_normal_oof.png")
# ===============================
# === Save
# ===============================
save_path = output_dir / "output.json"
save_json(config, save_path)
np.save(output_dir / "oof_preds.npy", oof_preds)
with open(output_dir / "model.pkl", "wb") as m:
pickle.dump(models, m)
|
[
"arabiannight1994@yahoo.co.jp"
] |
arabiannight1994@yahoo.co.jp
|
31e541aaccddff85da5d1bf271711495bf95994e
|
68a3c320323f5b3f0fd8c568953c1f473f91772a
|
/cmds/admin/owner.py
|
d8e5583592cf8f63ba2524f255503c99a8bea3cf
|
[] |
no_license
|
Mj11jM/Void-Bot
|
e9011ebed29dd49f84941174d114ca844d4e4cc4
|
c24309e4ab2cd742a2e0cf5a60fe02ad18efd45b
|
refs/heads/master
| 2023-04-01T01:46:15.009796
| 2020-08-22T20:53:42
| 2020-08-22T20:53:42
| 254,979,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,329
|
py
|
import discord
import random
import aiohttp
import os
import cmds.utils.loader
from io import BytesIO
from bot_index import startExt, presDB, freeDB, AllLoad
from discord.ext import commands, tasks
class Owner(commands.Cog):
"""Owner Only Commands"""
def __init__(self, bot):
self.bot = bot
self.presCycle.start()
def cog_unload(self):
self.presCycle.cancel()
# Do I need to explain?
@commands.command(hidden=True, aliases=['sd', 'die'])
@commands.is_owner()
async def shutdown(self, ctx):
embed = discord.Embed(title="Shutdown Initiated",
description="You're a monster for how many times you kill me for 'testing'", color=0xff0000)
await ctx.send(embed=embed)
await ctx.bot.logout()
# load extensions on command
@commands.command(hidden=True)
@commands.is_owner()
async def load(self, ctx, extension):
try:
load = cmds.utils.loader.Loader(self)
loader = load.pathWalkerLoader('./cmds')
for i in loader:
if i.endswith('.py') and i[2:-3].casefold().split('.')[2] == extension.casefold():
i = i[2:-3]
self.bot.load_extension(i)
print('Reloaded: ' + i)
else:
continue
embed = discord.Embed(title='Successfully loaded '+extension+'!', color=0xffff00)
await ctx.send(embed=embed)
startExt.append(extension)
except commands.ExtensionNotFound:
embed = discord.Embed(title='Extension: "' + extension +
'" was not found!', color=0xff0000)
await ctx.send(embed=embed)
except commands.ExtensionAlreadyLoaded:
embed = discord.Embed(title='Extension: "' + extension +
'" is already loaded!', color=0xff0000)
await ctx.send(embed=embed)
# unload extensions on command
@commands.command(hidden=True)
@commands.is_owner()
async def unload(self, ctx, extension):
try:
load = cmds.utils.loader.Loader(self)
loader = load.pathWalkerLoader('./cmds')
for i in loader:
if i.endswith('.py') and i[2:-3].casefold().split('.')[2] == extension.casefold():
i = i[2:-3]
self.bot.unload_extension(i)
print('Reloaded: ' + i)
else:
continue
embed = discord.Embed(title='Successfully un-loaded '+extension+'!', color=0xffff00)
await ctx.send(embed=embed)
startExt.remove(extension)
except commands.ExtensionNotLoaded:
embed = discord.Embed(title='Extension: "' + extension +
'" is already unloaded or was not found!', color=0xff0000)
await ctx.send(embed=embed)
# reload extensions on command
@commands.command(hidden=True, name='reload', description="test description")
@commands.is_owner()
async def reloadExt(self, ctx, extension):
try:
load = cmds.utils.loader.Loader(self)
loader = load.pathWalkerLoader('./cmds')
for i in loader:
if i.endswith('.py') and i[2:-3].casefold().split('.')[2] == extension.casefold():
i = i[2:-3]
self.bot.reload_extension(i)
print('Reloaded: ' + i)
else:
continue
embed = discord.Embed(title='Successfully re-loaded '+extension+'!', color=0xffff00)
await ctx.send(embed=embed)
except commands.ExtensionNotFound:
embed = discord.Embed(title='Extension: "' + extension +
'" was not found!', color=0xff0000)
await ctx.send(embed=embed)
except commands.ExtensionNotLoaded:
embed = discord.Embed(title='Extension: "' + extension +
'" did not load or was not found!', color=0xff0000)
await ctx.send(embed=embed)
@commands.command(hidden=True)
async def allExt(self, ctx):
load = cmds.utils.loader.Loader(self)
loader = load.pathWalkerLoader('./cmds')
for i in loader:
if i.endswith('.py'):
i = i[2:-3]
self.bot.reload_extension(i)
print('Reloaded: ' + i)
else:
continue
@commands.command(hidden=True, aliases=["chpres"], description="0=Playing\n1=Streaming\n2=Listening\n3=Watching\ndnd = do not disturb\nonline=why are you asking\nidle = orange/afk")
@commands.is_owner()
async def changePresence(self, ctx, status: str, types: int, *, name: str):
await self.bot.change_presence(status=status, activity=discord.Activity(type=types, name=name))
@commands.command(hidden=True)
@commands.is_owner()
async def rPresSet(self, ctx, status: str, types: int, *, name: str):
allAsList = [status, types, name]
author = ctx.author.id
allList = {
"owner": author
}
presDB.find_one_and_update(allList, {'$push': {"rPres": allAsList}})
currentDB = presDB.find_one(allList)
await ctx.send("success", delete_after=5)
@tasks.loop(minutes=5.0)
async def presCycle(self):
findMe = {
"owner": self.bot.owner_id
}
presList = presDB.find_one(findMe)
if presList != None:
randomPres = random.choice(presList['rPres'])
status = randomPres[0]
types = randomPres[1]
name = randomPres[2]
await self.bot.change_presence(status=status, activity=discord.Activity(type=types, name=name))
@presCycle.before_loop
async def before_presCycle(self):
await self.bot.wait_until_ready()
@commands.command(hidden=True)
@commands.is_owner()
async def stopPres(self, ctx):
self.presCycle.cancel()
embed = discord.Embed(description="Stopped Presence Cycle", color=0x00aa00)
await ctx.send(embed=embed)
@commands.command(hidden=True)
@commands.is_owner()
async def startPres(self, ctx):
self.presCycle.start()
embed = discord.Embed(description="Started Presence Cycle", color=0x00aa00)
await ctx.send(embed=embed)
@commands.command(hidden=True)
@commands.is_owner()
async def broadcastGame(self, ctx, *, message):
ayy = list(freeDB.find())
for a in ayy:
newGuild = self.bot.get_guild(a["guild_id"])
new_channel = newGuild.get_channel(a["channel_id"])
await new_channel.send("{}\n<@&{}>".format(message, str(a['role_ID'])))
@commands.command(hidden=True)
@commands.is_owner()
async def changeAvatar(self, ctx, *, message):
async with aiohttp.ClientSession() as session:
async with session.get(message) as resp:
if resp.status != 200:
return await ctx.channel.send('Could not download file...')
toBytes = await resp.read()
await self.bot.user.edit(avatar=toBytes)
def setup(bot):
bot.add_cog(Owner(bot))
|
[
"themj11jm@gmail.com"
] |
themj11jm@gmail.com
|
5c684988b26cbf5fdd03b0b5626cee3a1148da9a
|
c7be0921028d8fb471b752e3e57708c3bfdd440d
|
/wordgame.py
|
5b4cb332ad6b08918132068cb5a876c4e40053c3
|
[] |
no_license
|
elca337/wordgame
|
6c7baa4c14c614e08bf41bac1c086077bf8461e8
|
8f62d9a0af40d48a72a2ebc1d6822bc59e639799
|
refs/heads/master
| 2020-04-26T10:54:21.922593
| 2019-03-02T21:14:59
| 2019-03-02T21:14:59
| 173,499,641
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
verb = input("Please enter a verb: ")
noun = input("Please enter a noun: ")
adjective = input("Please enter an adjective: ")
print("I enjoy practice, I find it helps me to ", verb, "better.")
print("Without practice my ", noun, "would probably not even work.")
print("My code is getting more ", adjective, "every single day!")
|
[
"noreply@github.com"
] |
elca337.noreply@github.com
|
34a0c47bf183c0720e16bd205cbddff72d4e149e
|
de64b143a346585f51590bd674e8d13bbc672386
|
/algorithm/2020/0320/JaeBin.py
|
bb5c5a4ffc160fa3f6e14a8bb84e82eddfafc43d
|
[] |
no_license
|
ai-kmu/etc
|
304ec20f59e4026025abdcbcae21863c80630dcb
|
9c29941e19b7dd2a2037b110dd6e16690e9a0cc2
|
refs/heads/master
| 2023-08-21T16:30:31.149956
| 2023-08-21T16:26:19
| 2023-08-21T16:26:19
| 199,843,899
| 3
| 24
| null | 2023-05-31T09:56:59
| 2019-07-31T11:36:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,062
|
py
|
# 12. 프린터
def solution(priorities, location):
answer = 0
first = priorities[0]
big = 0
stack = []
for i in range(len(priorities)):
stack.append([priorities[i], i])
if first < priorities[i]:
big = stack[i]
# big = max(stack[len(priorities)])
# print('big : ', big)
while big[0] != first:
cost = stack.pop(0)
stack.append(cost)
first = stack[0][0]
# print('stack : ', stack)
answer_lst = [i+1 for i in range(len(stack)) if stack[i][1] == location]
answer = answer_lst.pop()
return answer
priorities_1 = [2, 1, 3, 2]
location_1 = 2
priorities_2 = [1, 1, 9, 1, 1, 1]
location_2 = 0
print(solution(priorities_1, location_1))
print()
print(solution(priorities_2, location_2))
# 내 풀이 index 처리가 완벽하게 되지 않아 skip
# def solution(priorities, location):
# answer = 0
# first = priorities[0]
# move = 0
# big = 0
#
# for idx in range(len(priorities)):
# if first < priorities[idx]:
# big = priorities[idx]
# # print('big : ', big)
#
# while first != big:
# cost = priorities.pop(0)
# priorities.append(cost)
# first = priorities[0]
# move += 1
#
# print(priorities)
# answer = priorities.index(priorities[-move])
# return answer
# 다른 사람 풀이
def solution(priorities, location):
pi_list = [(p, i) for i, p in enumerate(priorities)]
print('pi_list : ', pi_list)
waiting_q = []
max_p = 0
while pi_list:
pi = pi_list.pop(0)
print('pi : ', pi)
priority = pi[0]
print('priority : ', priority)
p_list = [priority for priority, idx in pi_list]
if p_list:
max_p = max(p_list)
if priority >= max_p:
waiting_q.append(pi)
else:
pi_list.append(pi)
for i, item in enumerate(waiting_q):
if item[1] == location:
return i+1
|
[
"noreply@github.com"
] |
ai-kmu.noreply@github.com
|
b551694fd8d3e854e5c1500fc69e605d0b90bafe
|
1484709afe5cce20402a4fd348f9a3ea7ac61f87
|
/CS201/Homework/Homework 2.py
|
798ba26ec7ee0c16d3f413d8b1f718e6625125e2
|
[] |
no_license
|
coxl24wv/Introduction-Computer-Programming-Part1-CS201
|
5ae87845962b63e9658fffef90110cf01f467f8f
|
c9172fc4ac9ba941da26a9c74b6c9c90ebe3b1ff
|
refs/heads/master
| 2020-07-05T18:31:01.651491
| 2019-08-16T13:30:50
| 2019-08-16T13:30:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
# Second Program/Homework 2
# Programmer Lauren Cox
# Date of last revision 15-01-2016
name = input('What is your name?')
print( name + ' ' 'loves Python.')
|
[
"noreply@github.com"
] |
coxl24wv.noreply@github.com
|
49188fddd898ec5dea31df3dc0056aadeceb2734
|
3c0a46303746ee2349462570281dc305ebe79f0d
|
/src/transport_cards/migrations/0001_initial.py
|
1da299bdab0bad6a0a534c411de9cd691c8c9634
|
[] |
no_license
|
devtimberg/to24
|
431eef0fd4f34b1cab243681d5f9893d97d08c85
|
c4b6082f217328a9a3e463b807c9ac1f38720604
|
refs/heads/master
| 2020-03-08T13:01:11.098765
| 2018-04-03T23:42:32
| 2018-04-03T23:42:32
| 128,146,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,660
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-22 10:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(db_index=True, max_length=255, unique=True, verbose_name='Email')),
('is_active', models.BooleanField(default=True, verbose_name='\u0410\u043a\u0442\u0438\u0432\u0435\u043d')),
('is_staff', models.BooleanField(default=False, verbose_name='\u0410\u0434\u043c\u0438\u043d\u0438\u0441\u0442\u0440\u0430\u0442\u043e\u0440')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c',
'verbose_name_plural': '\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u0438',
},
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0438 \u0432\u0440\u0435\u043c\u044f \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f')),
('updated', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u0438 \u0432\u0440\u0435\u043c\u044f \u043f\u043e\u0441\u043b\u0435\u0434\u043d\u0435\u0433\u043e \u0438\u0437\u043c\u0435\u043d\u0435\u043d\u0438\u044f')),
('sum', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=11, verbose_name='\u0421\u0443\u043c\u043c\u0430 \u043f\u043b\u0430\u0442\u0435\u0436\u0430')),
],
options={
'verbose_name': '\u041f\u043b\u0430\u0442\u0435\u0436',
'verbose_name_plural': '\u041f\u043b\u0430\u0442\u0435\u0436\u0438',
},
),
migrations.CreateModel(
name='Price',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sum', models.DecimalField(decimal_places=2, default=0, max_digits=11, verbose_name='\u0426\u0435\u043d\u0430')),
('category', models.CharField(choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('E', 'E')], max_length=1, null=True, unique=True, verbose_name='\u041a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u044f')),
],
options={
'verbose_name': '\u0426\u0435\u043d\u043d\u0438\u043a',
'verbose_name_plural': '\u0426\u0435\u043d\u043d\u0438\u043a\u0438',
},
),
migrations.CreateModel(
name='TransportCard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0438 \u0432\u0440\u0435\u043c\u044f \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f')),
('updated', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u0438 \u0432\u0440\u0435\u043c\u044f \u043f\u043e\u0441\u043b\u0435\u0434\u043d\u0435\u0433\u043e \u0438\u0437\u043c\u0435\u043d\u0435\u043d\u0438\u044f')),
('phone', models.CharField(max_length=25, null=True, verbose_name='\u0422\u0435\u043b\u0435\u0444\u043e\u043d')),
('status', models.SmallIntegerField(blank=True, choices=[(0, '\u041e\u0436\u0438\u0434\u0430\u0435\u0442 \u043e\u043f\u043b\u0430\u0442\u044b'), (1, '\u041e\u0436\u0438\u0434\u0430\u0435\u0442 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f'), (2, '\u041e\u0436\u0438\u0434\u0430\u0435\u0442 \u0440\u0435\u043d\u0434\u0435\u0440\u0430'), (3, '\u0413\u043e\u0442\u043e\u0432\u0430'), (4, '\u041e\u0448\u0438\u0431\u043a\u0430 \u043f\u0440\u0438 \u0440\u0435\u0433\u0438\u0441\u0442\u0440\u0430\u0446\u0438\u0438 \u043a\u0430\u0440\u0442\u044b'), (5, '\u041e\u0448\u0438\u0431\u043a\u0430 \u043f\u0440\u0438 \u0440\u0435\u043d\u0434\u0435\u0440\u0435')], default=0, verbose_name='\u0421\u0442\u0430\u0442\u0443\u0441')),
('exception', models.TextField(blank=True, default='', verbose_name='\u0418\u0441\u043a\u043b\u044e\u0447\u0435\u043d\u0438\u0435')),
('pdf', models.FileField(blank=True, null=True, upload_to='pdf/', verbose_name='PDF')),
('FName', models.CharField(default='', max_length=100, verbose_name='\u0418\u043c\u044f')),
('Name', models.CharField(default='', max_length=100, verbose_name='\u0424\u0430\u043c\u0438\u043b\u0438\u044f')),
('MName', models.CharField(blank=True, default='', max_length=100, verbose_name='\u041e\u0442\u0447\u0435\u0441\u0442\u0432\u043e')),
('Series', models.CharField(default='', max_length=100, verbose_name='\u0421\u0435\u0440\u0438\u044f')),
('Number', models.PositiveIntegerField(null=True, verbose_name='\u041d\u043e\u043c\u0435\u0440')),
('Organization', models.CharField(default='', max_length=100, verbose_name='\u0412\u044b\u0434\u0430\u043d \u043a\u0435\u043c')),
('Date', models.DateField(null=True, verbose_name='\u0412\u044b\u0434\u0430\u043d \u043a\u043e\u0433\u0434\u0430')),
('Foreign', models.BooleanField(default=False, verbose_name='\u0418\u043d\u043e\u0441\u0442\u0440\u0430\u043d\u043d\u044b\u0439 \u0433\u0440\u0430\u0436\u0434\u0430\u043d\u0438\u043d')),
('specials', models.CharField(blank=True, choices=[('taxi', '\u042f\u0432\u043b\u044f\u0435\u0442\u0441\u044f c\u043f\u0435\u0446\u0442\u0435\u0445\u043d\u0438\u043a\u043e\u0439 \u0438\u043b\u0438 \u0442\u0430\u043a\u0441\u0438'), ('MVD', '\u042f\u0432\u043b\u044f\u0435\u0442\u0441\u044f \u0443\u0447\u0435\u0431\u043d\u043e\u0439 \u0438\u043b\u0438 \u043f\u0440\u0438\u043d\u0430\u0434\u043b\u0435\u0436\u0438\u0442 \u0413\u0418\u0411\u0414\u0414 \u0438\u043b\u0438 \u041c\u0412\u0414')], max_length=4, null=True, verbose_name='\u041e\u0441\u043e\u0431\u0435\u043d\u043d\u043e\u0441\u0442\u044c \u0422\u0421')),
('EAISTO_code', models.CharField(blank=True, default='', max_length=21, verbose_name='\u041a\u043e\u0434 \u0415\u0410\u0418\u0421\u0422\u041e')),
('BodyNumber', models.CharField(blank=True, default='', max_length=100, null=True, verbose_name='\u041a\u0443\u0437\u043e\u0432 \u2116')),
('Note', models.TextField(blank=True, default='', verbose_name='\u0417\u0430\u043c\u0435\u0447\u0430\u043d\u0438\u044f')),
('RegistrationNumber', models.CharField(blank=True, default='', max_length=10, null=True, verbose_name='\u0413\u043e\u0441\u0443\u0434\u0430\u0440\u0441\u0442\u0432\u0435\u043d\u043d\u044b\u0439 \u0440\u0435\u0433\u0438\u0441\u0442\u0440\u0430\u0446\u0438\u043e\u043d\u043d\u044b\u0439 \u0437\u043d\u0430\u043a')),
('VehicleCategory', models.CharField(choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('E', 'E')], max_length=1, null=True, verbose_name='\u0421\u0420\u0422\u0421 \u0438\u043b\u0438 \u041f\u0422\u0421')),
('VehicleCategory2', models.CharField(choices=[('A', (('L', '\u041c\u043e\u0442\u043e\u0442\u0440\u0430\u043d\u0441\u043f\u043e\u0440\u0442 L'),)), ('B', (('M1', '\u041b\u0435\u0433\u043a\u043e\u0432\u043e\u0439 M1'), ('N1', '\u0413\u0440\u0443\u0437\u043e\u0432\u043e\u0439 \u0434\u043e 3.5 \u0442\u043e\u043d\u043d N1'))), ('C', (('N2', '\u0413\u0440\u0443\u0437\u043e\u0432\u043e\u0439 \u0434\u043e 12 \u0442\u043e\u043d\u043d N2'), ('N3', '\u0413\u0440\u0443\u0437\u043e\u0432\u043e\u0439 \u0431\u043e\u043b\u0435\u0435 12 \u0442\u043e\u043d\u043d N3'))), ('D', (('M2', '\u0410\u0432\u0442\u043e\u0431\u0443\u0441\u044b \u0434\u043e 5 \u0442\u043e\u043d\u043d M2'), ('M3', '\u0410\u0432\u0442\u043e\u0431\u0443\u0441\u044b \u0431\u043e\u043b\u0435\u0435 5 \u0442\u043e\u043d\u043d M3'))), ('E', (('O1', '\u041f\u0440\u0438\u0446\u0435\u043f\u044b \u0434\u043e 150 \u043a\u0433 O1'), ('O2', '\u041f\u0440\u0438\u0446\u0435\u043f\u044b \u0434\u043e 3.5 \u0442\u043e\u043d\u043d O2'), ('O3', '\u041f\u0440\u0438\u0446\u0435\u043f\u044b \u0434\u043e 10 \u0442\u043e\u043d\u043d O3'), ('O4', '\u041f\u0440\u0438\u0446\u0435\u043f\u044b \u0431\u043e\u043b\u0435\u0435 10 \u0442\u043e\u043d\u043d O4')))], default='B', max_length=2, verbose_name='\u041a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u044f \u0422\u0421 (\u041e\u041a\u041f)')),
('VIN', models.CharField(blank=True, default='', max_length=100, null=True, verbose_name='VIN')),
('Year', models.PositiveIntegerField(null=True, verbose_name='\u0413\u043e\u0434 \u0432\u044b\u043f\u0443\u0441\u043a\u0430 \u0422\u0421')),
('FrameNumber', models.CharField(blank=True, default='', max_length=100, null=True, verbose_name='\u0428\u0430\u0441\u0441\u0438 (\u0420\u0430\u043c\u0430) \u2116')),
('EmptyMass', models.PositiveIntegerField(null=True, verbose_name='\u041c\u0430\u0441\u0441\u0430 \u0431\u0435\u0437 \u043d\u0430\u0433\u0440\u0443\u0437\u043a\u0438 (\u043a\u0433)')),
('MaxMass', models.PositiveIntegerField(blank=True, null=True, verbose_name='\u0420\u0430\u0437\u0440\u0435\u0448\u0435\u043d\u043d\u0430\u044f \u043c\u0430\u043a\u0441\u0438\u043c\u0430\u043b\u044c\u043d\u0430\u044f \u043c\u0430\u0441\u0441\u0430 (\u043a\u0433)')),
('Fuel', models.CharField(blank=True, choices=[(None, '\u0411\u0435\u0437 \u0442\u043e\u043f\u043b\u0438\u0432\u0430'), ('Petrol', '\u0411\u0435\u043d\u0437\u0438\u043d'), ('Diesel', '\u0414\u0438\u0437\u0435\u043b\u044c\u043d\u043e\u0435 \u0442\u043e\u043f\u043b\u0438\u0432\u043e'), ('PressureGas', 'C\u0436\u0430\u0442\u044b\u0439 \u0433\u0430\u0437'), ('LiquefiedGas', '\u0421\u0436\u0438\u0436\u0435\u043d\u043d\u044b\u0439 \u0433\u0430\u0437')], max_length=15, null=True, verbose_name='\u0422\u0438\u043f \u0442\u043e\u043f\u043b\u0438\u0432\u0430')),
('BrakingSystem', models.CharField(choices=[(None, '\u0411\u0435\u0437 \u0442\u043e\u0440\u043c\u043e\u0437\u043d\u043e\u0439 \u0441\u0438\u0441\u0442\u0435\u043c\u044b'), ('Mechanical', '\u041c\u0435\u0445\u0430\u043d\u0438\u0447\u0435\u0441\u043a\u0438\u0439'), ('Hydraulic', '\u0413\u0438\u0434\u0440\u0430\u0432\u043b\u0438\u0447\u0435\u0441\u043a\u0438\u0439'), ('Pneumatic', '\u041f\u043d\u0435\u0432\u043c\u0430\u0442\u0438\u0447\u0435\u0441\u043a\u0438\u0439'), ('Combined', '\u041a\u043e\u043c\u0431\u0438\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u043d\u044b\u0439')], max_length=15, null=True, verbose_name='\u0422\u0438\u043f \u043f\u0440\u0438\u0432\u043e\u0434\u0430 \u0442\u043e\u0440\u043c\u043e\u0437\u043d\u043e\u0439 \u0441\u0438\u0441\u0442\u0435\u043c\u044b')),
('Tyres', models.CharField(default='', max_length=100, verbose_name='\u041c\u0430\u0440\u043a\u0430 \u0448\u0438\u043d')),
('Killometrage', models.PositiveIntegerField(null=True, verbose_name='\u041f\u0440\u043e\u0431\u0435\u0433 \u0422\u0421 (\u043a\u043c)')),
('Make', models.CharField(default='', max_length=100, verbose_name='\u041c\u0430\u0440\u043a\u0430')),
('Model', models.CharField(default='', max_length=100, verbose_name='\u041c\u043e\u0434\u0435\u043b\u044c')),
('DocumentType', models.CharField(choices=[('RegTalon', '\u0421\u0432\u0438\u0434\u0435\u0442\u0435\u043b\u044c\u0441\u0442\u0432\u043e \u0440\u0435\u0433\u0438\u0441\u0442\u0440\u0430\u0446\u0438\u0438 \u0442\u0440\u0430\u043d\u0441\u043f\u043e\u0440\u0442\u043d\u043e\u0433\u043e \u0441\u0440\u0435\u0434\u0441\u0442\u0432\u0430'), ('PTS', '\u041f\u0430\u0441\u043f\u043e\u0440\u0442 \u0442\u0440\u0430\u043d\u0441\u043f\u043e\u0440\u0442\u043d\u043e\u0433\u043e \u0441\u0440\u0435\u0434\u0441\u0442\u0432\u0430')], max_length=15, null=True, verbose_name='\u0422\u0438\u043f \u0440\u0435\u0433\u0438\u0441\u0442\u0440\u0430\u0446\u0438\u043e\u043d\u043d\u043e\u0433\u043e \u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430')),
('Validity', models.PositiveIntegerField(choices=[(6, '6'), (12, '12'), (24, '24')], null=True, verbose_name='\u0421\u0440\u043e\u043a \u0434\u0435\u0439\u0441\u0442\u0432\u0438\u044f \u043a\u0430\u0440\u0442\u044b')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transport_cards', to=settings.AUTH_USER_MODEL, verbose_name='\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c')),
],
options={
'verbose_name': '\u0422\u0440\u0430\u043d\u0441\u043f\u043e\u0440\u0442\u043d\u0430\u044f \u043a\u0430\u0440\u0442\u0430',
'verbose_name_plural': '\u0422\u0440\u0430\u043d\u0441\u043f\u043e\u0440\u0442\u043d\u044b\u0435 \u043a\u0430\u0440\u0442\u044b',
},
),
migrations.AddField(
model_name='payment',
name='card',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='payment', to='transport_cards.TransportCard', verbose_name='\u041d\u043e\u043c\u0435\u0440 \u043f\u043b\u0430\u0442\u0435\u0436\u0430'),
),
]
|
[
"devtimberg@gmail.com"
] |
devtimberg@gmail.com
|
a7b6e5afec9fe2d8ccf4ca906cff2d118c13692d
|
4738129b25fceb5c8fdc83eebdd7621e41910230
|
/miscellanea/ugly_number/python/is_ugly_number.py
|
0a6fee75df8dbaf3f2775f601f2713bc1e4d86d9
|
[
"MIT"
] |
permissive
|
MDGSF/JustCoding
|
43aa20773b9c8325e6ba632e9941d235e9e285aa
|
2faa46323df991a12014021b49d568387a882233
|
refs/heads/master
| 2023-07-21T19:07:15.899019
| 2023-07-09T07:29:59
| 2023-07-09T07:29:59
| 201,714,062
| 15
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
#!/usr/bin/env python3
def is_ugly_number(num):
if num <= 0:
return False
while num % 2 == 0:
num //= 2
while num % 3 == 0:
num //= 3
while num % 5 == 0:
num //= 5
return num == 1
def main():
for i in range(10):
print(f'{i}: {is_ugly_number(i)}')
if __name__ == "__main__":
main()
|
[
"huangjian@minieye.cc"
] |
huangjian@minieye.cc
|
2070b583b3a2cffd1e5aaa4a5458c9d7d1be3026
|
584f589b276bd9e6b6b6bad652875fa932b70b46
|
/app.py
|
3dcd54b3ee6ce6f19dc325563aff7fbf4c233862
|
[] |
no_license
|
velicue/contestify
|
34abce3dd12501b69e88a89a9b99594a489f7eb8
|
08bf296b84050498ba279a07378fdda40e5615d8
|
refs/heads/master
| 2021-01-12T20:07:42.105813
| 2015-12-07T14:04:29
| 2015-12-07T14:04:29
| 44,889,591
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
from app import app, service
service.emailService()
#app.debug = True
app.run()
|
[
"mattzhang9@gmail.com"
] |
mattzhang9@gmail.com
|
86afb52964faf98f53393f2ec9ff86d22017d1a9
|
e9fd01aaf5cd5aea0b92aa72a5652cc4b08bd26d
|
/TD/Idees/animation.py
|
de326e1c3849966e01c9dda14e4d3c93cfb19abf
|
[] |
no_license
|
Costadoat/Informatique
|
cc45f053a66f8b4773dbabb48ce99b3323075c1c
|
f478327928a6c04586ca819d4c2deedac7a50efb
|
refs/heads/master
| 2023-09-01T18:07:49.508745
| 2023-08-25T14:11:11
| 2023-08-25T14:11:11
| 39,880,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,854
|
py
|
# when we're in pylab mode, the next two imports are not necessary
# we do it here for correctness sake, iow your code will also run without pylab mode
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# gravitational acceleration on Earth in m*s^-2
g = 9.81
#g = 1.6249
# acceleration vector due to g
ag = np.array((0,-g))
# coefficient of restitution (ratio of velocity after and before bounce)
# see http://en.wikipedia.org/wiki/Coefficient_of_restitution
cor = 0.95
# bounds of the room
xlim = (0,300)
ylim = (0,20)
# bounds of the basket
xlimbg = (150,200)
ylimbg = (0,10)
xlimbd = (250,300)
ylimbd = (0,10)
ep=5
# 1 millisecond delta t
delta_t = 0.001
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=xlim, ylim=ylim)
ax.grid()
ax.fill([160,160,190,190,160], [0,8,8,0,0], "b")
# in Python 2.7 we have to derive from object to have new-style classes
# in Python 3 this is still valid, but not necessary, as all classes are new-style
class Ball(object):
def __init__(self, xy, v):
"""
:param xy: Initial position.
:param v: Initial velocity.
"""
self.xy = np.array(xy)
self.v = np.array(v)
self.scatter, = ax.plot([], [], 'o', markersize=20)
def update(self):
if self.xy[0] <= xlim[0] or self.xy[0] < xlimbg[1] and self.xy[0] > xlimbg[1]-ep and self.xy[1] < ylimbg[1]:
# hit the left wall, reflect x component
self.v[0] = cor * np.abs(self.v[0])
elif self.xy[0] >= xlim[1] or self.xy[0] > xlimbg[0] and self.xy[0] < xlimbg[0]+ep and self.xy[1] < ylimbg[1]:
self.v[0] = - cor * np.abs(self.v[0])
if self.xy[1] <= ylim[0] or self.xy[0] > xlimbg[0] and self.xy[0] < xlimbg[1] and self.xy[1] <= ylimbg[1]and self.xy[1] >= ylimbg[1]-ep:
# hit the ground, reflect y component
self.v[1] = cor * np.abs(self.v[1])
elif self.xy[1] >= ylim[1]:
self.v[1] = - cor * np.abs(self.v[1])
# delta t is 0.1
delta_v = delta_t * ag
self.v += delta_v
self.xy += self.v
self.xy[0] = np.clip(self.xy[0], xlim[0], xlim[1])
self.xy[1] = np.clip(self.xy[1], ylim[0], ylim[1])
self.scatter.set_data(self.xy)
balls = [Ball((3.0,18.0), (0.2,0.3)), Ball((4.0,17.0), (-0.2,0.1)), Ball((1.0,19.0), (-0.3,0.5))]
balls = [Ball((3.0,18.0), (3,0.3))]
def init():
return []
def animate(t):
# t is time in seconds
global xy, v
for ball in balls:
ball.update()
# have to return an iterable
return [ball.scatter for ball in balls]
# interval in milliseconds
# we're watching in slow motion (delta t is shorter than interval)
ani = animation.FuncAnimation(fig, animate, np.arange(0,100,delta_t), init_func=init, interval=10, blit=True)
plt.show()
|
[
"costadoat@crans.org"
] |
costadoat@crans.org
|
63753e6623eb3fb1c72e7b10b37e8748f1af4ea6
|
a1bffcd8854e1843e56bb812d4d83b3161a5211e
|
/tests/unit/modules/net_tools/nios/test_nios_mx_record.py
|
3e120d57947e3432c7b67268436e1ef01f2de139
|
[] |
no_license
|
goneri/ansible.community
|
1a71f9d98c164b77f8ed2ed7f558b4963005ff8f
|
f26f612dd0a3154050d90b51a75502018c95f6e4
|
refs/heads/master
| 2020-12-29T07:47:35.353515
| 2020-01-22T17:43:18
| 2020-01-22T17:43:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,268
|
py
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.ansible.community.plugins.modules import nios_mx_record
from ansible_collections.ansible.community.plugins.module_utils.net_tools.nios import api
from ansible_collections.ansible.community.tests.unit.compat.mock import patch, MagicMock, Mock
from ..test_nios_module import TestNiosModule, load_fixture
class TestNiosMXRecordModule(TestNiosModule):
module = nios_mx_record
def setUp(self):
super(TestNiosMXRecordModule, self).setUp()
self.module = MagicMock(name='ansible_collections.ansible.community.plugins.modules.nios_mx_record.WapiModule')
self.module.check_mode = False
self.module.params = {'provider': None}
self.mock_wapi = patch('ansible_collections.ansible.community.plugins.modules.nios_mx_record.WapiModule')
self.exec_command = self.mock_wapi.start()
self.mock_wapi_run = patch('ansible_collections.ansible.community.plugins.modules.nios_mx_record.WapiModule.run')
self.mock_wapi_run.start()
self.load_config = self.mock_wapi_run.start()
def tearDown(self):
super(TestNiosMXRecordModule, self).tearDown()
self.mock_wapi.stop()
self.mock_wapi_run.stop()
def _get_wapi(self, test_object):
wapi = api.WapiModule(self.module)
wapi.get_object = Mock(name='get_object', return_value=test_object)
wapi.create_object = Mock(name='create_object')
wapi.update_object = Mock(name='update_object')
wapi.delete_object = Mock(name='delete_object')
return wapi
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_nios_mx_record_create(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible.com',
'mx': 'mailhost.ansible.com', 'preference': 0, 'comment': None, 'extattrs': None}
test_object = None
test_spec = {
"name": {"ib_req": True},
"mx": {"ib_req": True},
"preference": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
print("WAPI: ", wapi)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
'mx': 'mailhost.ansible.com', 'preference': 0})
def test_nios_mx_record_update_comment(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible.com', 'mx': 'mailhost.ansible.com',
'preference': 0, 'comment': 'updated comment', 'extattrs': None}
test_object = [
{
"comment": "test comment",
"_ref": "mxrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
"name": "ansible.com",
"mx": "mailhost.ansible.com",
"preference": 0,
"extattrs": {}
}
]
test_spec = {
"name": {"ib_req": True},
"mx": {"ib_req": True},
"preference": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
def test_nios_mx_record_remove(self):
self.module.params = {'provider': None, 'state': 'absent', 'name': 'ansible.com', 'mx': 'mailhost.ansible.com',
'preference': 0, 'comment': None, 'extattrs': None}
ref = "mxrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
test_object = [{
"comment": "test comment",
"_ref": ref,
"name": "ansible.com",
"mx": "mailhost.ansible.com",
"extattrs": {'Site': {'value': 'test'}}
}]
test_spec = {
"name": {"ib_req": True},
"mx": {"ib_req": True},
"preference": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.delete_object.assert_called_once_with(ref)
|
[
"ansible_migration@example.com"
] |
ansible_migration@example.com
|
c22be5c1ca7f421ee748d71de9f1986d2efd4573
|
c33007bc802a9c1f39379919f7999705bfb663c0
|
/yademo/utils/text_util.py
|
f0e7a1b5a293e8728a2a7283cd1699bfa9605e27
|
[] |
no_license
|
Sijiu/mcrawl
|
69a953a900848edc64a553dbb6a22b67f5b9c064
|
cd1332e7eeaf3cb6bcb6a11914e1bfbd13df195f
|
refs/heads/master
| 2021-01-18T11:08:26.889630
| 2016-05-25T11:40:07
| 2016-05-25T11:40:07
| 59,658,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,095
|
py
|
# -*- coding: utf-8 -*-
# @author: xuhe
# @date: 15/10/29
# @description:
FONT_ZH_EN_DICT = {
"宋体": "SimSun",
"微软雅黑": "Microsoft YaHei",
"黑体": "SimHei",
"隶书": "LiS",
"幼圆": "YouYuan",
"仿宋": "FangSong",
"楷体": "KaiTi",
"华文黑体": "STHeiti",
"细明体": "MingLi",
"标楷体": "DFKai-SB",
"新宋体": "NSimSun",
"俪黑 Pro": "LiHei Pro Medium",
"俪宋 Pro": "LiSong Pro Light",
"苹果俪中黑": "Apple LiGothic Medium",
"苹果俪细宋": "Apple LiSung Light",
"新细明体": "PMingLi",
"微软正黑": "Microsoft JhengHei",
"华文细黑": "STXihei",
"华文楷体": "STKaiti",
"华文宋体": "STSong",
"华文中宋": "STZhongsong",
"华文仿宋": "STFangsong",
"方正舒体": "FZShuTi",
"方正姚体": "FZYaoti",
"华文彩云": "STCaiyun",
"华文琥珀": "STHupo",
"华文隶书": "STLiti",
"华文行楷": "STXingkai",
"华文新魏": "STXinwei",
"仿宋_GB2312": "FangSong_GB2312",
"楷体_GB2312": "KaiTi_GB2312",
}
|
[
"mxh403@163.com"
] |
mxh403@163.com
|
dfedb649c95fe61715d89b2b2d28ee0d4657a49e
|
72160b01d6b2fa87c6b584fe7ac4dfabba623780
|
/tracking/bustersAgents.py
|
cf2bcd388b0654df58ed0c9edc7af3522c7ed608
|
[] |
no_license
|
mdogan13/intro-to-ai-pacman-projects
|
fe28a5dbb6ba6036607eff4159e11a36bd8de5cd
|
a619a6ade150eabc9d71f5d97ca2c370db8f4c46
|
refs/heads/master
| 2020-05-30T15:01:08.267941
| 2019-06-02T06:06:15
| 2019-06-02T06:06:15
| 189,808,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,356
|
py
|
# bustersAgents.py
# ----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import util
from game import Agent
from game import Directions
from keyboardAgents import KeyboardAgent
import inference
import busters
class NullGraphics:
"Placeholder for graphics"
def initialize(self, state, isBlue = False):
pass
def update(self, state):
pass
def pause(self):
pass
def draw(self, state):
pass
def updateDistributions(self, dist):
pass
def finish(self):
pass
class KeyboardInference(inference.InferenceModule):
"""
Basic inference module for use with the keyboard.
"""
def initializeUniformly(self, gameState):
"Begin with a uniform distribution over ghost positions."
self.beliefs = util.Counter()
for p in self.legalPositions: self.beliefs[p] = 1.0
self.beliefs.normalize()
def observe(self, observation, gameState):
noisyDistance = observation
emissionModel = busters.getObservationDistribution(noisyDistance)
pacmanPosition = gameState.getPacmanPosition()
allPossible = util.Counter()
for p in self.legalPositions:
trueDistance = util.manhattanDistance(p, pacmanPosition)
if emissionModel[trueDistance] > 0:
allPossible[p] = 1.0
allPossible.normalize()
self.beliefs = allPossible
def elapseTime(self, gameState):
pass
def getBeliefDistribution(self):
return self.beliefs
class BustersAgent:
"An agent that tracks and displays its beliefs about ghost positions."
def __init__( self, index = 0, inference = "ExactInference", ghostAgents = None, observeEnable = True, elapseTimeEnable = True):
inferenceType = util.lookup(inference, globals())
self.inferenceModules = [inferenceType(a) for a in ghostAgents]
self.observeEnable = observeEnable
self.elapseTimeEnable = elapseTimeEnable
def registerInitialState(self, gameState):
"Initializes beliefs and inference modules"
import __main__
self.display = __main__._display
for inference in self.inferenceModules:
inference.initialize(gameState)
self.ghostBeliefs = [inf.getBeliefDistribution() for inf in self.inferenceModules]
self.firstMove = True
def observationFunction(self, gameState):
"Removes the ghost states from the gameState"
agents = gameState.data.agentStates
gameState.data.agentStates = [agents[0]] + [None for i in range(1, len(agents))]
return gameState
def getAction(self, gameState):
"Updates beliefs, then chooses an action based on updated beliefs."
for index, inf in enumerate(self.inferenceModules):
if not self.firstMove and self.elapseTimeEnable:
inf.elapseTime(gameState)
self.firstMove = False
if self.observeEnable:
inf.observeState(gameState)
self.ghostBeliefs[index] = inf.getBeliefDistribution()
self.display.updateDistributions(self.ghostBeliefs)
return self.chooseAction(gameState)
def chooseAction(self, gameState):
"By default, a BustersAgent just stops. This should be overridden."
return Directions.STOP
class BustersKeyboardAgent(BustersAgent, KeyboardAgent):
"An agent controlled by the keyboard that displays beliefs about ghost positions."
def __init__(self, index = 0, inference = "KeyboardInference", ghostAgents = None):
KeyboardAgent.__init__(self, index)
BustersAgent.__init__(self, index, inference, ghostAgents)
def getAction(self, gameState):
return BustersAgent.getAction(self, gameState)
def chooseAction(self, gameState):
return KeyboardAgent.getAction(self, gameState)
from distanceCalculator import Distancer
from game import Actions
from game import Directions
class GreedyBustersAgent(BustersAgent):
"An agent that charges the closest ghost."
def registerInitialState(self, gameState):
"Pre-computes the distance between every two points."
BustersAgent.registerInitialState(self, gameState)
self.distancer = Distancer(gameState.data.layout, False)
def chooseAction(self, gameState):
"""
First computes the most likely position of each ghost that has
not yet been captured, then chooses an action that brings
Pacman closer to the closest ghost (according to mazeDistance!).
To find the mazeDistance between any two positions, use:
self.distancer.getDistance(pos1, pos2)
To find the successor position of a position after an action:
successorPosition = Actions.getSuccessor(position, action)
livingGhostPositionDistributions, defined below, is a list of
util.Counter objects equal to the position belief
distributions for each of the ghosts that are still alive. It
is defined based on (these are implementation details about
which you need not be concerned):
1) gameState.getLivingGhosts(), a list of booleans, one for each
agent, indicating whether or not the agent is alive. Note
that pacman is always agent 0, so the ghosts are agents 1,
onwards (just as before).
2) self.ghostBeliefs, the list of belief distributions for each
of the ghosts (including ghosts that are not alive). The
indices into this list should be 1 less than indices into the
gameState.getLivingGhosts() list.
"""
pacmanPosition = gameState.getPacmanPosition()
legal = [a for a in gameState.getLegalPacmanActions()]
livingGhosts = gameState.getLivingGhosts()
livingGhostPositionDistributions = [beliefs for i, beliefs in enumerate(self.ghostBeliefs)if livingGhosts[i+1]]
"*** YOUR CODE HERE ***"
# Find the most likely position of the closest ghost(minPos)
distances = [(self.distancer.getDistance(belief.argMax(), pacmanPosition), belief) for belief in livingGhostPositionDistributions]
minPos = min(distances, key=lambda t: t[0])[1].argMax()
# For each legal action, calculate (successor coordinate, distance to successor coordinate from minPos, action needed to be taken to get to that coordinate)
actions = [(Actions.getSuccessor(pacmanPosition, action), self.distancer.getDistance(minPos, Actions.getSuccessor(pacmanPosition, action)), action) for action in legal]
# Then pick the tuple with the minimum distance and return that tuple's action as the best action.
bestAction = min(actions, key=lambda t: t[1])[2]
return bestAction
|
[
"noreply@github.com"
] |
mdogan13.noreply@github.com
|
05a1b79677dcf03dd493d3d2e027c349e84bc3f6
|
37d0dd5d90c98e4d519b3b1f2e6567c24e434913
|
/blog/admin.py
|
ce896de6aeefbebbe5f6c1d8621efec8cd6798ed
|
[
"MIT"
] |
permissive
|
chnlong12/django2_DjangoUeditor
|
117127be241177ede745edd505e3fb2dd308ce62
|
f70f21230661a7959baf7cdfc1fd1ae665c9413c
|
refs/heads/master
| 2020-08-09T16:40:53.464697
| 2018-04-20T04:59:14
| 2018-04-20T04:59:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
from django.contrib import admin
from blog.models import *
# Register your models here.
admin.site.register(Article)
|
[
"740264559@qq.com"
] |
740264559@qq.com
|
fa93322d25b8ce6f4e1ebd56ecf747da0d7c6261
|
715ce41725734fffd3959db8d2a32be27e2cfea8
|
/evalml/automl/utils.py
|
996823a5a215e1bd3552dd9eb7d6588a6029a0d5
|
[
"BSD-3-Clause"
] |
permissive
|
johnkabler/evalml
|
a85a26a6d734dc145d400ceab81a4ba8bc10bff6
|
bd04b000fc9f8285dc7f0365051e516126dbdc8e
|
refs/heads/main
| 2022-12-24T08:42:47.688851
| 2020-10-05T15:49:27
| 2020-10-05T15:49:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
from evalml.objectives import get_objective
from evalml.problem_types import handle_problem_types
def get_default_primary_search_objective(problem_type):
"""Get the default primary search objective for a problem type.
Arguments:
problem_type (str or ProblemType): problem type of interest.
Returns:
ObjectiveBase: primary objective instance for the problem type.
"""
problem_type = handle_problem_types(problem_type)
objective_name = {'binary': 'Log Loss Binary',
'multiclass': 'Log Loss Multiclass',
'regression': 'R2'}[problem_type.value]
return get_objective(objective_name, return_instance=True)
|
[
"noreply@github.com"
] |
johnkabler.noreply@github.com
|
4c1ac643b9852ca20c0b0471ac175c6da6c67e3c
|
26087dc771fa3032c5598582ebb5aa780f5c7d7b
|
/doos_op.py
|
9b9f498cddc35a955c48402e54ded073d3a9f500
|
[] |
no_license
|
TiemenB/Blender
|
3d9c18d8104f94a2125730110fe403685c0ef982
|
c4ba27144bd76f1d2cd4ac5c6c9dfb4d0b4bf997
|
refs/heads/main
| 2023-02-02T09:41:27.507347
| 2020-12-16T08:17:31
| 2020-12-16T08:17:31
| 321,281,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,491
|
py
|
import bpy
class MESH_OT_doos(bpy.types.Operator):
"""maak een doos met opgegeven maten"""
bl_idname = "mesh.doos"# komy in de afdeling mesh en heet planken
bl_label = "doos"
bl_options = {'REGISTER', 'UNDO'} # nodig voor menu in beeld te krijgen
# hier staan waardes die je mee kan geven
naam:bpy.props.StringProperty(
name="naam",
default='doos',
)
x:bpy.props.FloatProperty(
name="x-maat(cm)",
default=100,
)
y:bpy.props.FloatProperty(
name="y-maat(cm)",
default=20,
)
z:bpy.props.FloatProperty(
name="z-maat(cm)",
default=20,
)
def execute(self,context):
naam=self.naam
x=self.x
y=self.y
z=self.z
bpy.ops.mesh.primitive_cube_add(size=(1))
bpy.context.object.name=naam
bpy.ops.transform.resize(value=(x/100,y/100,z/100), orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
return{'FINISHED'}
def register():
bpy.utils.register_class(MESH_OT_doos)
def unregister():
bpy.utils.unregister_class(MESH_OT_doos)
if __name__ == '__main__':
register()
|
[
"noreply@github.com"
] |
TiemenB.noreply@github.com
|
3a1c04e0ab201854b64eb7f178f6a2987750c4c6
|
a9916adc07af57b01fcf7dafd8bb8e44542463a3
|
/stoptraining.py
|
9cbcc717657535b20901baf02bee28b6fb607e67
|
[] |
no_license
|
maxiaoyuzdz/cifar_classifier
|
d71d96c7a2915ef46988fcf134da8d5ad54f9165
|
cafe356a4bbeddb8839f089fa1493e7b325ba090
|
refs/heads/master
| 2021-01-20T16:18:31.892080
| 2017-09-18T14:25:31
| 2017-09-18T14:25:31
| 95,727,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
import numpy as np
from logoperator import ReadAllLossLog
def main():
epoch, m, training_loss, validation_loss, training_accuracy, validation_accuracy = ReadAllLossLog('/media/maxiaoyu/data/Log/q11.log')
shouldv = 20
va = np.array(validation_accuracy)[::-1]
max_va = va.max()
# print(max_va)
va_std = []
# print(va.size)
for index in np.arange(0, shouldv, 5):
# print(va[index: index + 5], va[index: index + 5].std())
va_std.append(va[index: index + 5].std())
# key 1
va_std_avg = np.mean(va_std)
# print(va_std_avg)
va_max_small = va[0:shouldv].max()
# print(va_max_small)
# key 2
max_dis = np.abs(max_va - va_max_small)
# print(max_dis)
if va_std_avg <= 0.07 and max_dis <= 0.2:
print('end')
#return True
else:
print('not end')
#return False
print('end')
if __name__ == '__main__':
main()
|
[
"maxiaoyuzdz@gmail.com"
] |
maxiaoyuzdz@gmail.com
|
5cd808f525c720804758561dc493a8c43b1be782
|
7a19dfc7c8741e3c2e5e0c2f1b0260959c303ab4
|
/scripts/artifacts/simInfo.py
|
c7e68494062c76df096b76f7c4aa51ee872497e5
|
[
"MIT"
] |
permissive
|
abrignoni/iLEAPP
|
89735f04b96a697c16e0d2edc44d95e529c01584
|
d9c43007aca17554148dbd2b0ffaad44fe3869f1
|
refs/heads/main
| 2023-08-08T13:36:48.234249
| 2023-08-01T20:29:17
| 2023-08-01T20:29:17
| 229,842,283
| 528
| 104
|
MIT
| 2023-09-12T21:45:41
| 2019-12-24T00:28:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,756
|
py
|
import os
import datetime
import json
import plistlib
import datetime
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, logdevinfo
def timestampcalc(timevalue):
timestamp = (datetime.datetime.fromtimestamp(int(timevalue)).strftime('%Y-%m-%d %H:%M:%S'))
return timestamp
def get_siminfo(files_found, report_folder, seeker, wrap_text):
data_lista = []
data_listb = []
for file_found in files_found:
file_found = str(file_found)
#file_found = './com.apple.commcenter.data.plist'
with open(file_found, "rb") as fp:
pl = plistlib.load(fp)
for key, val in pl.items():
if key == 'PersonalWallet':
for x, y in val.items():
simid = x
for a, z in y.items():
cbver = z.get('cb_ver', '')
labelid = z.get('label-id', '')
labelidconf = z.get('label-id-confirmed', '')
tss = z.get('ts', '')
tss = timestampcalc(tss)
cbid = z.get('cb_id', '')
mdn = z.get('mdn', '')
esim = z.get('esim', '')
eapaka = z.get('eap_aka', '')
types = z.get('type', '')
nosrc = z.get('no_src', '')
data_lista.append((tss,mdn,esim,types,cbid,nosrc,labelid,labelidconf,eapaka,cbver))
if key == 'unique-sim-label-store':
for x, y in val.items():
simlabelstoreid = x
tag = y.get('tag', '')
text = y.get('text', '')
ts = y.get('ts', '')
ts = timestampcalc(ts)
data_listb.append((ts,tag,simlabelstoreid,text))
if data_lista:
report = ArtifactHtmlReport('SIM - UUID')
report.start_artifact_report(report_folder, 'SIM - UUID')
report.add_script()
data_headers = ('Timestamp Unknown','MDM','ESIM','Type','CB_ID','No_SRC','Label-ID','Label-ID Confirmed','EAP_AKA','CB_Ver')
report.write_artifact_data_table(data_headers, data_lista, file_found)
report.end_artifact_report()
tsvname = f'SIM - UUID'
tsv(report_folder, data_headers, data_lista, tsvname)
tlactivity = f'SIM - UUID'
timeline(report_folder, tlactivity, data_lista, data_headers)
else:
logfunc('No SIM - UUID data available')
if data_listb:
report = ArtifactHtmlReport('SIM - Unique Label Store')
report.start_artifact_report(report_folder, 'SIM - Unique Label Store')
report.add_script()
data_headers = ('Timestamp','Tag','SIM Label Store ID','Text')
report.write_artifact_data_table(data_headers, data_listb, file_found)
report.end_artifact_report()
tsvname = f'SIM - Unique Label Store'
tsv(report_folder, data_headers, data_listb, tsvname)
tlactivity = f'SIM - Unique Label Store'
timeline(report_folder, tlactivity, data_listb, data_headers)
else:
logfunc('No SIM - Unique Label Store data available')
__artifacts__ = {
"siminfo": (
"SIM Info",
('*/com.apple.commcenter.data.plist'),
get_siminfo)
}
|
[
"abrignoni@gmail.com"
] |
abrignoni@gmail.com
|
ab3a049774e068b3f59821b6c0194c172286ce1d
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/venv/Lib/site-packages/setuptools/_distutils/__init__.py
|
d1cbc7a38c1e92584848e326567b22da7d3c940f
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:969400a6147feee8560b67db484a6ce096bd5b86307b337f217fcb244b779215
size 250
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
cd5a6a8fe1ea58ecfa49c6e2ca13de97a7f9cdf4
|
543923e777241c5e27920dc3a652c28b0e916477
|
/flask_mysql/validations/Recipes/flask_app/models/recipe.py
|
597fc373f9eeb66926252073fa315c1d2060ab53
|
[] |
no_license
|
KaterynaShydlovska/python
|
b04398c5ea6f04e787cd253d807ac6d7db711153
|
34cd7b5277a6405aff55722c6da94ad065bfde19
|
refs/heads/main
| 2023-05-04T00:12:20.924373
| 2021-05-25T21:59:23
| 2021-05-25T21:59:23
| 363,815,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,217
|
py
|
from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class Recipes:
def __init__(self, data):
self.name = data['name']
self.description = data['description']
self.instructions = data['instructions']
self.time = data['time']
self.user_id = data['user_id']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
@staticmethod
def validate_recipe(data):
is_valid = True
if len(data['name']) < 3:
flash("Name must be at least 5 characters long!")
is_valid = False
if len(data['description']) < 3:
flash("Description must be at least 5 characters long!")
is_valid = False
if len(data['instructions']) < 3:
flash("Instructions must be at least 5 characters long!")
is_valid = False
return is_valid
@classmethod
def create(cls, data):
query = ' INSERT INTO recipe (name, description, instructions, time, user_id) VALUES ( %(name)s, %(description)s, %(instructions)s, %(time)s, %(user_id)s);'
res = connectToMySQL('recipes').query_db(query, data)
return res
@classmethod
def getAllrecipes(cls, data):
query = ' SELECT * FROM recipe LEFT JOIN user ON user.id = recipe.user_id WHERE user.id = %(id)s;'
res = connectToMySQL('recipes').query_db(query, data)
return res
@classmethod
def getOnerecipes(cls, data):
query = ' SELECT * FROM recipe WHERE id = %(id)s;'
res = connectToMySQL('recipes').query_db(query, data)
return res
@classmethod
def updateRecipes(cls, data):
query = 'UPDATE recipe SET name=%(name)s, description=%(description)s, instructions=%(instructions)s, time=%(time)s WHERE id = %(id)s;'
res = connectToMySQL('recipes').query_db(query, data)
return res
@classmethod
def deleteRecipe(cls, data):
query = 'DELETE FROM recipe WHERE id = %(id)s;'
connectToMySQL('recipes').query_db(query, data)
|
[
"katerynashydlovska@Katerynas-MacBook-Air.local"
] |
katerynashydlovska@Katerynas-MacBook-Air.local
|
03360380c64760cca86dce93a18d862111b20792
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-apig/huaweicloudsdkapig/v2/model/delete_gateway_response_type_v2_request.py
|
0ccfa905440f6191d4bd6234f8754221a55beb5f
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 9,417
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteGatewayResponseTypeV2Request:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'group_id': 'str',
'response_id': 'str',
'response_type': 'str'
}
attribute_map = {
'instance_id': 'instance_id',
'group_id': 'group_id',
'response_id': 'response_id',
'response_type': 'response_type'
}
def __init__(self, instance_id=None, group_id=None, response_id=None, response_type=None):
"""DeleteGatewayResponseTypeV2Request
The model defined in huaweicloud sdk
:param instance_id: 实例ID,在API网关控制台的“实例信息”中获取。
:type instance_id: str
:param group_id: 分组的编号
:type group_id: str
:param response_id: 响应编号
:type response_id: str
:param response_type: 错误类型 - AUTH_FAILURE: 认证失败,IAM或APP认证校验失败 - AUTH_HEADER_MISSING: 认证身份来源信息缺失 - AUTHORIZER_FAILURE: 自定义认证方返回认证失败 - AUTHORIZER_CONF_FAILURE:自定义认证方异常,通信失败、返回异常响应等错误 - AUTHORIZER_IDENTITIES_FAILURE: 前端自定义认证的身份来源信息缺失或不合法错误 - BACKEND_UNAVAILABLE: 后端不可用,网络不可达错误 - BACKEND_TIMEOUT: 后端超时,与后端的网络交互超过预配置的时间错误 - THROTTLED: API调用次数超出所配置的流量策略阈值 - UNAUTHORIZED: 使用的凭据未被授权访问该API - ACCESS_DENIED: 拒绝访问,如触发配置的访问控制策略、或异常攻击检测拦截 - NOT_FOUND: 未匹配到API错误 - REQUEST_PARAMETERS_FAILURE: 请求参数校验失败、不支持的HTTP方法 - DEFAULT_4XX: 其它4XX类错误 - DEFAULT_5XX: 其它5XX类错误 - THIRD_AUTH_FAILURE: 第三方认证方返回认证失败 - THIRD_AUTH_IDENTITIES_FAILURE: 第三方认证的身份来源信息缺失或不合法错误 - THIRD_AUTH_CONF_FAILURE: 第三方认证方异常,通信失败、返回异常响应等错误
:type response_type: str
"""
self._instance_id = None
self._group_id = None
self._response_id = None
self._response_type = None
self.discriminator = None
self.instance_id = instance_id
self.group_id = group_id
self.response_id = response_id
self.response_type = response_type
@property
def instance_id(self):
"""Gets the instance_id of this DeleteGatewayResponseTypeV2Request.
实例ID,在API网关控制台的“实例信息”中获取。
:return: The instance_id of this DeleteGatewayResponseTypeV2Request.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this DeleteGatewayResponseTypeV2Request.
实例ID,在API网关控制台的“实例信息”中获取。
:param instance_id: The instance_id of this DeleteGatewayResponseTypeV2Request.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def group_id(self):
"""Gets the group_id of this DeleteGatewayResponseTypeV2Request.
分组的编号
:return: The group_id of this DeleteGatewayResponseTypeV2Request.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this DeleteGatewayResponseTypeV2Request.
分组的编号
:param group_id: The group_id of this DeleteGatewayResponseTypeV2Request.
:type group_id: str
"""
self._group_id = group_id
@property
def response_id(self):
"""Gets the response_id of this DeleteGatewayResponseTypeV2Request.
响应编号
:return: The response_id of this DeleteGatewayResponseTypeV2Request.
:rtype: str
"""
return self._response_id
@response_id.setter
def response_id(self, response_id):
"""Sets the response_id of this DeleteGatewayResponseTypeV2Request.
响应编号
:param response_id: The response_id of this DeleteGatewayResponseTypeV2Request.
:type response_id: str
"""
self._response_id = response_id
@property
def response_type(self):
"""Gets the response_type of this DeleteGatewayResponseTypeV2Request.
错误类型 - AUTH_FAILURE: 认证失败,IAM或APP认证校验失败 - AUTH_HEADER_MISSING: 认证身份来源信息缺失 - AUTHORIZER_FAILURE: 自定义认证方返回认证失败 - AUTHORIZER_CONF_FAILURE:自定义认证方异常,通信失败、返回异常响应等错误 - AUTHORIZER_IDENTITIES_FAILURE: 前端自定义认证的身份来源信息缺失或不合法错误 - BACKEND_UNAVAILABLE: 后端不可用,网络不可达错误 - BACKEND_TIMEOUT: 后端超时,与后端的网络交互超过预配置的时间错误 - THROTTLED: API调用次数超出所配置的流量策略阈值 - UNAUTHORIZED: 使用的凭据未被授权访问该API - ACCESS_DENIED: 拒绝访问,如触发配置的访问控制策略、或异常攻击检测拦截 - NOT_FOUND: 未匹配到API错误 - REQUEST_PARAMETERS_FAILURE: 请求参数校验失败、不支持的HTTP方法 - DEFAULT_4XX: 其它4XX类错误 - DEFAULT_5XX: 其它5XX类错误 - THIRD_AUTH_FAILURE: 第三方认证方返回认证失败 - THIRD_AUTH_IDENTITIES_FAILURE: 第三方认证的身份来源信息缺失或不合法错误 - THIRD_AUTH_CONF_FAILURE: 第三方认证方异常,通信失败、返回异常响应等错误
:return: The response_type of this DeleteGatewayResponseTypeV2Request.
:rtype: str
"""
return self._response_type
@response_type.setter
def response_type(self, response_type):
"""Sets the response_type of this DeleteGatewayResponseTypeV2Request.
错误类型 - AUTH_FAILURE: 认证失败,IAM或APP认证校验失败 - AUTH_HEADER_MISSING: 认证身份来源信息缺失 - AUTHORIZER_FAILURE: 自定义认证方返回认证失败 - AUTHORIZER_CONF_FAILURE:自定义认证方异常,通信失败、返回异常响应等错误 - AUTHORIZER_IDENTITIES_FAILURE: 前端自定义认证的身份来源信息缺失或不合法错误 - BACKEND_UNAVAILABLE: 后端不可用,网络不可达错误 - BACKEND_TIMEOUT: 后端超时,与后端的网络交互超过预配置的时间错误 - THROTTLED: API调用次数超出所配置的流量策略阈值 - UNAUTHORIZED: 使用的凭据未被授权访问该API - ACCESS_DENIED: 拒绝访问,如触发配置的访问控制策略、或异常攻击检测拦截 - NOT_FOUND: 未匹配到API错误 - REQUEST_PARAMETERS_FAILURE: 请求参数校验失败、不支持的HTTP方法 - DEFAULT_4XX: 其它4XX类错误 - DEFAULT_5XX: 其它5XX类错误 - THIRD_AUTH_FAILURE: 第三方认证方返回认证失败 - THIRD_AUTH_IDENTITIES_FAILURE: 第三方认证的身份来源信息缺失或不合法错误 - THIRD_AUTH_CONF_FAILURE: 第三方认证方异常,通信失败、返回异常响应等错误
:param response_type: The response_type of this DeleteGatewayResponseTypeV2Request.
:type response_type: str
"""
self._response_type = response_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteGatewayResponseTypeV2Request):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
a00a2497299d4256d3532de2895cd3f20effbe25
|
2d2dd7024f2ac732adf059fb86e75ff9ef2bc001
|
/commentparsingreddit/redditFetcher.py
|
32f33f72ef25f5e79db59be84713b659659c840e
|
[] |
no_license
|
plattenschieber/COINs2015Election
|
3bc359fd7c113c9ff66335ab8b378f0ee00741a1
|
b703d0d35ec4964440914e9bd3fb211268bb470a
|
refs/heads/master
| 2021-01-10T06:21:27.241723
| 2016-02-15T18:56:57
| 2016-02-15T18:56:57
| 44,482,467
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,273
|
py
|
import praw
import csv
from pprint import pprint
# setup a unique user agent to not get banned
r = praw.Reddit(user_agent='mac:COINs2015Election:v1.0.3 (by /u/plattenschieber)')
# a list of subreddits based on buzzwords and some manual ones where we know they are relevant for us
subredditlist = ['SandersForPresident', 'Clinton', 'hillaryclinton', 'democrats', 'Libertarian', 'PoliticalDiscussion', 'worldpolitics', 'POLITIC', 'politics', 'SandersAlerts', 'uspolitics', 'Liberal', 'The_Donald', 'Conservative', 'Conservatives', 'Marco_Rubio', 'republicans', 'Republican', 'ElectionPolls', 'Forecast2016', 'BenCarson']
# a '+' between all subreddits lets us get all named subreddits at once
subreddit = r.get_subreddit('+'.join(subredditlist))
# get last 1000 comments in subreddit 'SandersForPresident'
subreddit_comments = subreddit.get_comments(limit=10000)
with open('comments.csv', 'w') as csvfile:
# count total relevant answers
count=0
# define dictionary for csv header and write it
fieldnames = ['author', 'created_utc', 'subreddit', 'subreddit_id', 'ups', 'downs', 'body']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
# flattening the comment tree since we don't care about the answering order
flat_comments = praw.helpers.flatten_tree(subreddit_comments)
# print only some specific comments
for comment in flat_comments:
# test if it contains relevant information about a candidate
buzzwords = ['vote', 'voting', 'sander', 'sanders', 'bernie', 'hillary', 'clinton', 'donald', 'trump', 'marco', 'rubio', 'ben', 'carson', 'republican', 'republicans', 'conservative', 'libertarian', 'democrats']
if any(buzz in comment.body.lower() for buzz in buzzwords):
count = count + 1
# print only first 100 character (for more look at comment.body)
print(str(count) + ": " + str(comment))
# and write relevant comment into csv
writer.writerow({'author':comment.author, 'created_utc':comment.created_utc, 'subreddit':comment.subreddit, 'subreddit_id':comment.subreddit_id, 'ups':comment.ups, 'downs':comment.downs, 'body':comment.body.encode('utf-8')})
print('Number of comments including buzzwords: ', count)
|
[
"plattenschieber@gmail.com"
] |
plattenschieber@gmail.com
|
091097d6f0291cd9c2424437ad0185a12eb147c0
|
8931653e0970afb45649b98a1c253cd759dd4f2b
|
/model_flcolagedo__gd_conexiones_def.py
|
d1141598fefe2d1f64715fedae201cdca9986875
|
[] |
no_license
|
yeboyebo/flcolagedo
|
7444909c9e58a1cd62fd605444e97ee77db62fe4
|
7054c3cdde2c9b7e47da62376d9ec35026d8e90f
|
refs/heads/master
| 2020-04-13T19:27:04.943804
| 2019-03-01T13:00:01
| 2019-03-01T13:00:01
| 163,402,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
# @class_declaration interna #
from YBLEGACY import qsatype
class interna(qsatype.objetoBase):
ctx = qsatype.Object()
def __init__(self, context=None):
self.ctx = context
# @class_declaration flcolagedo #
from YBLEGACY.constantes import *
class flcolagedo(interna):
def flcolagedo_getDesc(self):
return "descripcion"
def __init__(self, context=None):
super().__init__(context)
def getDesc(self):
return self.ctx.flcolagedo_getDesc()
# @class_declaration head #
class head(flcolagedo):
def __init__(self, context=None):
super().__init__(context)
# @class_declaration ifaceCtx #
class ifaceCtx(head):
def __init__(self, context=None):
super().__init__(context)
# @class_declaration FormInternalObj #
class FormInternalObj(qsatype.FormDBWidget):
def _class_init(self):
self.iface = ifaceCtx(self)
|
[
"javiercortesveliz@gmail.com"
] |
javiercortesveliz@gmail.com
|
b6747965849a70d5277a34229eaf3531be0e53c5
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/i18n/obsolete_translations/__init__.py
|
3c69a69a26e7500e7884ccba3dcffbd47861d80b
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django.utils.translation import gettext as _
string1 = _("This is a translatable string.")
# Obsolete string.
# string2 = _("Obsolete string.")
|
[
"felisiak.mariusz@gmail.com"
] |
felisiak.mariusz@gmail.com
|
464a87d0d9a78b8573100bd56517f8595229ec4d
|
44bbfe1c9a7f16e632cdd27c2de058033b33ea6d
|
/mayan/apps/lock_manager/apps.py
|
27e11f8bd162add6c07c31c7945ed733a9f66077
|
[
"Apache-2.0"
] |
permissive
|
lxny2004/open-paperless
|
34025c3e8ac7b4236b0d8fc5ca27fc11d50869bc
|
a8b45f8f0ee5d7a1b9afca5291c6bfaae3db8280
|
refs/heads/master
| 2020-04-27T04:46:25.992405
| 2019-03-06T03:30:15
| 2019-03-06T03:30:15
| 174,064,366
| 0
| 0
|
NOASSERTION
| 2019-03-06T03:29:20
| 2019-03-06T03:29:20
| null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
from __future__ import unicode_literals
from django import apps
from django.utils.translation import ugettext_lazy as _
class LockManagerApp(apps.AppConfig):
has_tests = True
name = 'lock_manager'
verbose_name = _('Lock manager')
|
[
"littlezhoubear@gmail.com"
] |
littlezhoubear@gmail.com
|
727d582df5bab8783d9f40b7a707f8c06595d363
|
58e348983582f85d95ba25f1a097262b08c00be2
|
/tests/test_token_handler.py
|
deaea18891f108964acae521ad5fdedadab34860
|
[
"MIT"
] |
permissive
|
dmuhs/web3data-py
|
4903e480481a8a2ded083fabf45751c6b2e96608
|
27b885549d55f617ff9924a4269ad282b45d3fba
|
refs/heads/master
| 2023-01-06T11:46:26.398440
| 2022-02-17T15:59:19
| 2022-02-17T15:59:19
| 247,438,937
| 10
| 4
|
MIT
| 2022-12-26T21:35:50
| 2020-03-15T09:49:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,446
|
py
|
from itertools import product
import pytest
import requests_mock
from web3data.chains import Chains
from web3data.exceptions import APIError
from web3data.handlers.signature import SignatureHandler
from . import API_PREFIX, CHAINS, HEADERS, RESPONSE
LIMITED_CHAINS = (
Chains.BCH,
Chains.BSV,
Chains.BTC,
Chains.LTC,
Chains.ZEC,
)
SIGNATURE_HANDLER_METHODS = (["details", ("SIGNATURE",), LIMITED_CHAINS],)
SIGNATURE_PARAMS = []
for chain_value, call in product(CHAINS, SIGNATURE_HANDLER_METHODS):
SIGNATURE_PARAMS.append([chain_value] + call[:2] + [chain_value in call[2]])
@pytest.mark.parametrize("chain,method,parameters,raises", SIGNATURE_PARAMS)
def test_signature_handler(chain, method, parameters, raises):
handler = SignatureHandler(initial_headers=HEADERS, chain=chain)
method = getattr(handler, method, None)
if raises:
with pytest.raises(APIError):
method(*parameters)
else:
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.ANY, requests_mock.ANY, json=RESPONSE)
response = method(*parameters)
assert m.call_count == 1
assert response == RESPONSE
assert m.request_history[0].url.startswith(API_PREFIX)
# assert header kv pairs are in request headers
assert set(HEADERS.items()).issubset(
set(m.request_history[0].headers.items())
)
|
[
"dmuhs@protonmail.ch"
] |
dmuhs@protonmail.ch
|
268a23f3c374d97903a842dc0dae84a3e6a32d06
|
e57713d795d1c6030bff1602e8ed930889b76917
|
/.ipynb_checkpoints/textos.py
|
defe52a367ed3f8ae134177c26b28ec9d0fe2b5c
|
[] |
no_license
|
rvaldez1986/ai_chatbot
|
f8b1a0c11ce9dd7d20dd1dd3274b8097cd55d5af
|
eab5424e6a43d89eb73972bc805f7a21e34ac047
|
refs/heads/master
| 2020-06-04T09:20:39.840292
| 2020-02-18T21:15:27
| 2020-02-18T21:15:27
| 191,962,844
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,304
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 12:35:29 2019
@author: rober
"""
def dict_textos0():
textos = {}
#REPLYS
repST = ('Su consulta sobre el tema {0} es para una persona natural o para una empresa?')
repQ = ('Gracias por su retroalimentacion. En ACTUARIA queremos siempre dar el mejor servicio. '
'Desea formalizar su queja?')
repHF = ('Gracias por confiar en nosotros!')
repJS = ('Gracias por su interes, puede enviar su cv y una carta motivacional al correo 123@actuaria.com.ec')
repCN = ('ACTUARIA tiene oficinas en quito y guayaquil. El telefono de quito es 2-501-001 y el telefono '
'de Guayaquil es 2-501-001. La direccion de Quito es Orellana y 6 de Diciembre y en Guayaquil es '
'Emilio Romero y Benjamin Carrion.')
repGR = ('Hola!')
repCHAR = ('Para charlas hay que desarrollar la info de horarios, hasta aca nomas esta hecho')
repNT = ('Hmm.. Este tema es relativo a ACTUARIA? O es de otro tema?')
repCE = ('Gracias por contactarnos, si desea deje su correo y/o numero de telefono y nos contactaremos con usted.')
#FILL DICT
textos["ST"] = repST
textos["Queja"] = repQ
textos["Hi Five"] = repHF
textos["job seeker"] = repJS
textos["Contacto"] = repCN
textos["Greeting"] = repGR
textos["Otros servicios (Charlas/Capacitaciones/Financiera)"] = repCHAR
textos['NT'] = repNT
textos['CE'] = repCE
return textos
def dict_textos1():
textos = {}
#REPLYS
repST0 = ('Actuaria esta dirigida principalmente a servicio de empresas, sin embargo para el tema {0} esta a '
'disposicion los siguientes links y blogs: www.actuaria.com.ec\link1')
repST1 = ('Por favor puede ingresar el RUC de su empresa?')
repST2 = ('No entendi. Su consulta sobre el tema {0} es para una persona natural o de una empresa?')
repQ0 = ('Si quere anadir algo a su queja, esto sera analizado directamente por el departamento de satisfaccion '
'del cliente. De manera adicional nos puede incluir un correo electronico para contactarnos con usted.')
repQ1 = ('Gracias por sus comentarios, lo tendremos en cuenta para nuestro proceso de mejora continua.')
repQ2 = ('No entendi su respuesta, desea formalizar su queja?')
repNT0 = ('Gracias por contactarnos, si desea deje su correo y/o numero de telefono y nos contactaremos con usted.')
repNT1 = ('No entendi su respuesta, el tema es relativo a ACTUARIA o a otro tema?')
#FILL DICT
textos["ST"] = [repST0, repST1, repST2]
textos["Queja"] = [repQ0, repQ1, repQ2]
textos['NT'] = [repNT0, repNT1]
return textos
def dict_textos2():
textos = {}
#REPLYS
repST0 = ('Requiere el envio de una cotizacion?')
repST1 = ('Requiere informacion sobre un proceso que actualmente esta realizando con ACTUARIA?')
repST2 = ('No se encontro intent, Hasta aca nomas esta hecho')
repST3 = ('El RUC ingresado no es valido, puede ingresar el RUC de su empresa?')
repQ = ('Muchas gracias por su tiempo. Vamos a analizar el contenido de sus comentarios y nos ponemos en '
'contacto con usted')
#FILL DICT
textos["ST"] = [repST0, repST1, repST2, repST3]
textos["Queja"] = repQ
return textos
def dict_textos3():
textos = {}
#REPLYS
repST0 = ('Perfecto, nos puede dejar un nombre y un correo para contactarnos y enviar la propuesta de {0}?')
repST1 = ('Aqui se debe hacer una consulta al API de KOHINOR')
repST2 = ('No se encontro intent, Hasta aca nomas esta hecho (se puede unir con estado anterior)')
repST3 = ('No entendi su respuesta, requiere envio de una cotizacion?')
repST4 = ('No entendi su respuesta, requiere informacion sobre un proceso que actualmente esta realizando con ACTUARIA?')
#FILL DICT
textos["ST"] = [repST0, repST1, repST2, repST3, repST4]
return textos
def dict_textos4():
textos = {}
#REPLYS
repST = ('Muchas gracias, nos contactaremos con usted en la brevedad posible')
#FILL DICT
textos["ST"] = repST
return textos
|
[
"roberto.valdez.ponce@gmail.com"
] |
roberto.valdez.ponce@gmail.com
|
839bdfa63b3c361abff59ac4c35f10771779c1f0
|
b5c2571948d1e7fd6a21cfe3267cb7de9088cf56
|
/Bytecode Decompile/MazeGameGlobals.py
|
30bbb50ce31dbda90060af33ab0cc787621333a8
|
[] |
no_license
|
C0MPU73R/Toontown-2003-Bytecode
|
ff32042d4da5894ec3a4fb7da43614df26d25a9d
|
aa6862f86034f342d5fee9934cd6ed3e83de99f3
|
refs/heads/master
| 2023-05-03T11:55:57.959617
| 2018-12-02T00:05:43
| 2018-12-02T00:05:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
import RandomNumGen
ENDLESS_GAME = config.GetBool('endless-maze-game', 0)
GAME_DURATION = 60.0
SHOWSCORES_DURATION = 2.0
SUIT_TIC_FREQ = int(256)
def getMazeName(gameDoId, numPlayers, mazeNames):
try:
return forcedMaze
except:
names = mazeNames[numPlayers - 1]
return names[RandomNumGen.randHash(gameDoId) % len(names)]
|
[
"flamingdog101@gmail.com"
] |
flamingdog101@gmail.com
|
82d4c413dc12df27ead2b991c59a70cce5f20b90
|
52585c8d95cef15199c18ba1a76899d2c31329f0
|
/04Python workbook/ch3loop/68paritbit.py
|
4ff8317860bc6f6b0ccf8f7760c725b4d33eee3b
|
[] |
no_license
|
greatabel/PythonRepository
|
c7a952257303a21083ed7d535274c339362bd126
|
836fcdd3f5c1b150122302685104fe51b5ebe1a3
|
refs/heads/master
| 2023-08-30T15:56:05.376391
| 2023-08-26T03:34:14
| 2023-08-26T03:34:14
| 29,392,599
| 33
| 6
| null | 2023-02-14T13:33:21
| 2015-01-17T13:54:58
|
Python
|
UTF-8
|
Python
| false
| false
| 291
|
py
|
line = input("Enter 8 bits:")
while line != "":
if line.count("0") + line.count("1") != 8 or len(line )!=8:
print("That's not 8 bit")
else:
ones = line.count("1")
if ones % 2 == 0:
print("Parit should be 0")
else:
print("Parit shoudld be 1")
line = input("Enter 8 bits:")
|
[
"greatabel1@126.com"
] |
greatabel1@126.com
|
c3fb8a2d33c48c10b8d1ed1f167229dc363f6461
|
e1c6eee6042cfb6d1b254fe04219d5339fbb6e8f
|
/Sprint Challenge/northwind.py
|
ad798f2ff56588bf3307c2c3cbd0454a6d057022
|
[
"MIT"
] |
permissive
|
CJRicciardi/DS-Unit-3-Sprint-2-SQL-and-Databases
|
9fc41a52e4bcd2c14530cb15d0dfa612c462787c
|
b7d7f505a4e33775eb9973de3dae4ef47215da41
|
refs/heads/master
| 2021-03-28T06:29:27.917331
| 2020-03-20T17:57:40
| 2020-03-20T17:57:40
| 247,846,535
| 1
| 0
|
MIT
| 2020-03-17T00:46:34
| 2020-03-17T00:46:34
| null |
UTF-8
|
Python
| false
| false
| 1,795
|
py
|
# northwind.py
import sqlite3 as lite
import os
import pandas as pd
# construct path to northwind_small.sqlite3
connection = lite.connect("northwind_small.sqlite3")
#print("Connection:", connection)
# create cursor instance for connection
cursor = connection.cursor()
#print("Cursor:", cursor)
# query, execution and result for ten most expensive products
expensive_query = """
SELECT ProductName, UnitPrice
FROM Product
ORDER BY UnitPrice DESC
LIMIT 10;
"""
result = cursor.execute(expensive_query).fetchall()
pretty_result = pd.DataFrame(result, columns=["Item", "Price"])
print(f"\nThe ten most expensive items are:\n", pretty_result)
# query, execution and answer for average age at hire
hire_age = """
SELECT AVG(HireDate - BirthDate)
FROM Employee;
"""
result3 = cursor.execute(hire_age).fetchall()
print(f"\nThe average age of a newly hired employee at Northwind is {result3[0][0]:.2f}.")
# query, execute and answer for what are the ten most expensive items and their suppliers
expensive_supplier = """
SELECT p.ProductName, p.UnitPrice, s.CompanyName
FROM Product as p
LEFT JOIN Supplier as s
ON p.SupplierID = s.ID
ORDER BY UnitPrice DESC
LIMIT 10;
"""
result4 = cursor.execute(expensive_supplier).fetchall()
pretty_result4 = pd.DataFrame(result4, columns=["Item", "Price", "Supplier"])
print(f"\nThe ten most expensive items and their supplier are:\n", pretty_result4)
# query, execute, and answer for largest category
category_count = """
SELECT CategoryId, COUNT(CategoryId) AS "Count"
FROM Product
GROUP BY CategoryId
LIMIT 1;
"""
result2 = cursor.execute(category_count).fetchall()
print(f"\nCategory {result2[0][0]} has the most items, with {result2[0][1]} items.")
connection.close()
|
[
"ricciardistg@gmail.com"
] |
ricciardistg@gmail.com
|
876870088df44bce8e2d806e443ba6db4e7ed56c
|
2c6295c2845dcee5112be821307367059a00d249
|
/Term Assignment/max-value random forest.py
|
df44775286aea59abcf4e36a3140fca85ea5c335
|
[] |
no_license
|
arianne1998/1BM110
|
eb96604bf84750678a94d90227a286c4e35ba7de
|
b3dedc6cc0912d7675be072b40b6788dbb739842
|
refs/heads/main
| 2023-03-28T14:31:47.981787
| 2021-04-02T07:47:05
| 2021-04-02T07:47:05
| 340,035,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,747
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
import sklearn
from sklearn.metrics import roc_curve, auc, accuracy_score, r2_score, mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import scale
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestRegressor
# Read final dataset twice
final_df = pd.read_csv('Datasets/final_dataset.csv')
final_df2 = pd.read_csv('Datasets/final_dataset.csv')
# Variables, specify which variables are not needed for prediction and which variables will be predicted
ignore_columns = ["datetime", "meter_num_id", "T1", "T2", "T3", "T4", "T5", "T6", "T7", "T8", "T9", "T10", "T11", "T12", "T13", "T14", "T15", "T16",
"T17", "T18", "T19", "T20", "T21", "T22", "T23", "T24", "T25", "T26", "T27", "T28", "T29", "T30",
"T31", "T32", "T33", "T34", "T35", "T36", "T37", "T38", "T39", "T40", "T41", "T42", "T43", "T44",
"T45", "T46", "T47", "T48"]
all_columns = ["T1", "T2", "T3", "T4", "T5", "T6", "T7", "T8", "T9", "T10", "T11", "T12", "T13", "T14", "T15", "T16",
"T17", "T18", "T19", "T20", "T21", "T22", "T23", "T24", "T25", "T26", "T27", "T28", "T29", "T30",
"T31", "T32", "T33", "T34", "T35", "T36", "T37", "T38", "T39", "T40", "T41", "T42", "T43", "T44",
"T45", "T46", "T47", "T48"]
final_df['max value'] = final_df[["T1", "T2", "T3", "T4", "T5", "T6", "T7", "T8", "T9", "T10", "T11", "T12", "T13", "T14", "T15", "T16",
"T17", "T18", "T19", "T20", "T21", "T22", "T23", "T24", "T25", "T26", "T27", "T28", "T29", "T30",
"T31", "T32", "T33", "T34", "T35", "T36", "T37", "T38", "T39", "T40", "T41", "T42", "T43", "T44",
"T45", "T46", "T47", "T48"]].max(axis=1)
label_columns = ["max value"]
# Remove columns which should be ignored
final_df = final_df.drop(columns=ignore_columns)
# Split x (features) and y (labels) in separate dataframes
final_x = final_df.copy()
final_x = final_x.drop(columns=label_columns)
final_y = final_df.copy()[label_columns]
# Split dataframes into test and train with a ratio of 30% - 70%
train_x, test_x, train_y, test_y = train_test_split(final_x, final_y, test_size=.3, random_state=42)
train_y=np.ravel(train_y)
test_y=np.ravel(test_y)
#####################################################################################################
# create grid for hyperparameter tuning, values are somewhat randomly sampled to make a first estimation
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start=100, stop=1200, num=20)]
# Number of features to consider at every split
max_features = ['sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 100, num=10)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [10, 15, 20, 25, 30]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10, 15]
# Method of selecting samples for training each tree, with or without replacement
bootstrap = [True, False]
# Create the random grid so it can be called upon later
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
# Use the grid to search for the optimal parameters given the input
# Create base model to tune
rf = RandomForestRegressor()
# search of parameters using 3 fold cross validation (3 is used here instead of 10 to reduce required computation time)
rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, n_iter=600, cv=5, verbose=2,
random_state=42, n_jobs=-1)
# Fit the search model
rf_random.fit(train_x, train_y)
#define evaluation and prediction for preliminary test
def evaluate(model, test_features, test_labels):
predictions = model.predict(test_features)
predictions=np.ravel(predictions)
predictions=predictions.tolist()
predictions_df=pd.DataFrame({'predictions':predictions})
predictions_df.insert(0, "datetime", final_df2['datetime'])
predictions_df.insert(0, "meter_num_id", final_df2['meter_num_id'])
predictions_df.insert(3, "max value", final_df['max value'])
#calculate performance measures
mse = mean_squared_error(test_labels, predictions)
rmse = mean_squared_error(test_labels, predictions, squared=False)
r_squared = r2_score(test_labels, predictions)
adj_r_squared = 1 - (1-r_squared)*(len(test_labels)-1)/(len(test_labels)-test_features.shape[1]-1)
print(predictions_df)
print('Model Performance')
print('mean squared error', mse)
print('root mean squared error', rmse)
print('adjusted r squared value', adj_r_squared)
return predictions_df
# make preliminary prediction and evaluate the performance by calling the evaluation function
best_model = rf_random.best_estimator_
random_mse = evaluate(best_model, train_x, train_y)
#retrieve best parameters of search conducted above and create parameters similar to these for new hyperparameter tuning
n_estimators_start=int(round(rf_random.best_params_.get('n_estimators')*0.8,0))
n_estimators_stop=int(round(rf_random.best_params_.get('n_estimators')*1.2,0))
max_depth_start=int(round(rf_random.best_params_.get('max_depth')*0.8,0))
max_depth_stop=int(round(rf_random.best_params_.get('max_depth')*1.2,0))
min_samples_split_1=int(round(rf_random.best_params_.get('min_samples_split')*0.8,0))
min_samples_split_2=int(round(rf_random.best_params_.get('min_samples_split')*0.95,0))
min_samples_split_4=int(round(rf_random.best_params_.get('min_samples_split')*1.05,0))
min_samples_split_5=int(round(rf_random.best_params_.get('min_samples_split')*1.2,0))
min_samples_leaf_1=int(round(rf_random.best_params_.get('min_samples_leaf')*0.8,0))
min_samples_leaf_2=int(round(rf_random.best_params_.get('min_samples_leaf')*0.95,0))
min_samples_leaf_4=int(round(rf_random.best_params_.get('min_samples_leaf')*1.05,0))
min_samples_leaf_5=int(round(rf_random.best_params_.get('min_samples_leaf')*1.2,0))
bootstrap_choice=rf_random.best_params_.get('bootstrap')
#####################################################################################################
# refine the search by making a new grid with parameters around the best parameters found above
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start=n_estimators_start, stop=n_estimators_stop, num=10)]
# Number of features to consider at every split
max_features = ['sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(max_depth_start, max_depth_stop, num=10)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [min_samples_split_1, min_samples_split_2, min_samples_split_4, min_samples_split_5]
# Minimum number of samples required at each leaf node
min_samples_leaf = [min_samples_leaf_1, min_samples_leaf_2, min_samples_leaf_4, min_samples_leaf_5]
# Method of selecting samples for training each tree
bootstrap = [bootstrap_choice]
# Create the random grid so it can be called upon later
param_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
# Use the grid to search for the optimal parameters
# Create base model to tune
rf = RandomForestRegressor()
# define search of parameters using 5 fold cross validation
grid_search = GridSearchCV(estimator=rf, param_grid=param_grid, cv=5, verbose=2, n_jobs=-1)
# Fit the search model
grid_search.fit(train_x, train_y)
# make final prediction and evaluate the performance by calling the evaluation function
best_model = grid_search.best_estimator_
random_mse = evaluate(best_model, test_x, test_y)
# give the parameters which are used in the final optimal model
print("optimal parameters which are used in the final model", grid_search.best_params_)
|
[
"bas_roodenburg@hotmail.com"
] |
bas_roodenburg@hotmail.com
|
eb91efcb416ab3a3b19e7a40980a9fe7b954abad
|
6a99dc451bc5af666494729999d91d1ce48a917f
|
/mysite/mysite/settings.py
|
5486fe804be72db0ead041a75fe6deedd26f0174
|
[] |
no_license
|
vanm98/djangoTutorial
|
82f35cd9eae2a7fa9cccb8eafa480776c60b2ae6
|
a6a921df06103ce4a69b10224561d3defd4b6eb2
|
refs/heads/master
| 2020-04-18T13:54:17.672840
| 2019-02-26T15:37:49
| 2019-02-26T15:37:49
| 167,574,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,166
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6*q=m!*vwld*j=)i8mxy1j*4d!9#ge$@g07+&w72co6wh7tn2$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"1522808179@mil"
] |
1522808179@mil
|
ca9df04907f9ce4cd79b705860017169c2a4a7ef
|
79079c592d9a86d32bc16535b6b6edaf796d21c4
|
/main.py
|
70545940ed4786b9afca10af4bdd921903f5cc9b
|
[] |
no_license
|
c1a1o1/3d-gan-tensorflow
|
00d37ff7b27d3fe2b6c93d2c796709b621e3e031
|
1a43185c0332a9a4ac888ecc77645cfbbe24d2c0
|
refs/heads/master
| 2021-01-15T17:50:16.018118
| 2017-06-16T04:03:16
| 2017-06-16T04:03:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
import tensorflow as tf
import numpy as np
import time
from Gan_3D_model import*
with tf.Session() as sess:
train_data_path = 'D:/python_workspace/Liuhy/3Dporject/ModelNet40_Voxel/ModelNet40_Voxel/airplane/64/train'
checkpoint_dir = 'D:/python_workspace/Liuhy/3Dporject/3DShapeNets/3DGan_liuhy/checkpoints'
sample_g_path = 'D:/python_workspace/Liuhy/3Dporject/3DShapeNets/3DGan_liuhy/sample_generator'
gan3d = GAN_3D(sess = sess, data_set_path = train_data_path,
checkpoint_dir = checkpoint_dir, sample_g_path = sample_g_path)
gan3d.train()
|
[
"756993749@qq.com"
] |
756993749@qq.com
|
2d855a5c23a7fed4a1d192321137937f6763fb22
|
6ee249a2fe56d4664c7385e8ee40846e365e6378
|
/insideout/views.py
|
1f27189cd86a8badf5d7dc4611eaf4a4ff840dbc
|
[] |
no_license
|
cproctor/insideout
|
0a47c6477520524e7946618c2e0b44a3b0fa8287
|
ef0ceb420052e400692c8700fb69294965e68300
|
refs/heads/master
| 2021-01-25T09:52:56.045820
| 2018-03-13T07:53:32
| 2018-03-13T07:53:32
| 123,326,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
from django.shortcuts import render
def homepage(request):
return render(request, 'insideout/homepage.html', {})
def crossdomain(request):
return render(request, 'insideout/crossdomain.xml', {}, content_type="text/xml")
|
[
"chris.proctor@gmail.com"
] |
chris.proctor@gmail.com
|
3d281be997e07cb039534e51306f24ac0d1602a9
|
d14d5702c27e6a3958a0008975092714d04da7b6
|
/src/hdusd/ui/usd_list.py
|
71c1e4114d0c0a5f0fa4f603dc5b17e3ce7db1d3
|
[
"Apache-2.0"
] |
permissive
|
Speedwag00n/BlenderUSDHydraAddon
|
645f95d7a81f01645291b1ed693d01ba237607ad
|
a55b7fed51429815fc591510cab183a24e088b2a
|
refs/heads/master
| 2023-02-16T10:27:28.880258
| 2020-12-29T17:28:39
| 2020-12-29T17:28:39
| 326,612,971
| 0
| 0
|
Apache-2.0
| 2021-01-06T12:52:02
| 2021-01-04T08:10:39
| null |
UTF-8
|
Python
| false
| false
| 8,344
|
py
|
#**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
import bpy
from pxr import UsdGeom
from . import HdUSD_Panel, HdUSD_Operator
from ..usd_nodes.nodes.base_node import USDNode
class HDUSD_OP_usd_list_item_expand(bpy.types.Operator):
"""Expand USD item"""
bl_idname = "hdusd.usd_list_item_expand"
bl_label = "Expand"
index: bpy.props.IntProperty(default=-1)
def execute(self, context):
if self.index == -1:
return {'CANCELLED'}
node = context.active_node
usd_list = node.hdusd.usd_list
items = usd_list.items
item = items[self.index]
if len(items) > self.index + 1 and items[self.index + 1].indent > item.indent:
next_index = self.index + 1
item_indent = item.indent
removed_items = 0
while True:
if next_index >= len(items):
break
if items[next_index].indent <= item_indent:
break
items.remove(next_index)
removed_items += 1
if usd_list.item_index > self.index:
usd_list.item_index = max(self.index, usd_list.item_index - removed_items)
else:
prim = usd_list.get_prim(item)
added_items = 0
for child_index, child_prim in enumerate(prim.GetChildren(), self.index + 1):
child_item = items.add()
child_item.sdf_path = str(child_prim.GetPath())
items.move(len(items) - 1, child_index)
added_items += 1
if usd_list.item_index > self.index:
usd_list.item_index += added_items
return {'FINISHED'}
class HDUSD_OP_usd_list_item_show_hide(bpy.types.Operator):
"""Show/Hide USD item"""
bl_idname = "hdusd.usd_list_item_show_hide"
bl_label = "Show/Hide"
index: bpy.props.IntProperty(default=-1)
def execute(self, context):
if self.index == -1:
return {'CANCELLED'}
node = context.active_node
usd_list = node.hdusd.usd_list
items = usd_list.items
item = items[self.index]
prim = usd_list.get_prim(item)
im = UsdGeom.Imageable(prim)
if im.ComputeVisibility() == 'invisible':
im.MakeVisible()
else:
im.MakeInvisible()
return {'FINISHED'}
class HDUSD_UL_usd_list_item(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
if self.layout_type not in {'DEFAULT', 'COMPACT'}:
return
for i in range(item.indent):
layout.split(factor=0.1)
items = data.items
prim = data.get_prim(item)
if not prim:
return
visible = UsdGeom.Imageable(prim).ComputeVisibility() != 'invisible'
col = layout.column()
if not prim.GetChildren():
icon = 'DOT'
col.enabled = False
elif len(items) > index + 1 and items[index + 1].indent > item.indent:
icon = 'TRIA_DOWN'
else:
icon = 'TRIA_RIGHT'
expand_op = col.operator(HDUSD_OP_usd_list_item_expand.bl_idname, text="", icon=icon,
emboss=False, depress=False)
expand_op.index = index
col = layout.column()
col.label(text=prim.GetName())
col.enabled = visible
col = layout.column()
col.alignment = 'RIGHT'
col.label(text=prim.GetTypeName())
col.enabled = visible
col = layout.column()
col.alignment = 'RIGHT'
if prim.GetTypeName() == 'Xform':
icon = 'HIDE_OFF' if visible else 'HIDE_ON'
else:
col.enabled = False
icon = 'NONE'
visible_op = col.operator(HDUSD_OP_usd_list_item_show_hide.bl_idname, text="", icon=icon,
emboss=False, depress=False)
visible_op.index = index
class HDUSD_NODE_PT_usd_list(HdUSD_Panel):
bl_label = "USD List"
bl_space_type = "NODE_EDITOR"
bl_region_type = "UI"
bl_category = "Item"
@classmethod
def poll(cls, context):
node = context.active_node
return node and isinstance(node, USDNode)
def draw(self, context):
node = context.active_node
usd_list = node.hdusd.usd_list
layout = self.layout
layout.template_list(
"HDUSD_UL_usd_list_item", "",
usd_list, "items",
usd_list, "item_index",
sort_lock=True
)
prop_layout = layout.column()
prop_layout.use_property_split = True
for prop in usd_list.prim_properties:
if prop.type == 'STR' and prop.value_str:
row = prop_layout.row()
row.enabled = False
row.prop(prop, 'value_str', text=prop.name)
elif prop.type == 'FLOAT':
prop_layout.prop(prop, 'value_float', text=prop.name)
class HDUSD_OP_usd_nodetree_add_basic_nodes(bpy.types.Operator):
"""Add basic USD nodes"""
bl_idname = "hdusd.usd_nodetree_add_basic_nodes"
bl_label = "Add Basic Nodes"
scene_source: bpy.props.EnumProperty(
items=(('SCENE', 'Scene', 'Render current scene'),
('USD_FILE', 'USD File', 'Load and render scene from USD file')),
default='SCENE',
)
def execute(self, context):
tree = context.space_data.edit_tree
tree.add_basic_nodes(self.scene_source)
return {'FINISHED'}
class HDUSD_OP_usd_tree_node_print_stage(HdUSD_Operator):
""" Print selected USD nodetree node stage to console """
bl_idname = "hdusd.usd_tree_node_print_stage"
bl_label = "Print Stage To Console"
@classmethod
def poll(cls, context):
return super().poll(context) and context.space_data.tree_type == 'hdusd.USDTree' and context.active_node
def execute(self, context):
tree = context.space_data.edit_tree
node = context.active_node
if not node:
print(f"Unable to print USD nodetree \"{tree.name}\" stage: no USD node selected")
return {'CANCELLED'}
# get the USD stage from selected node
stage = node._compute_node(node)
if not stage:
print(f"Unable to print USD node \"{tree.name}\":\"{node.name}\" stage: could not get the correct stage")
return {'CANCELLED'}
print(f"Node \"{tree.name}\":\"{node.name}\" USD stage is:")
print(stage.ExportToString())
return {'FINISHED'}
class HDUSD_UsdNodeTreePanel(HdUSD_Panel):
bl_space_type = "NODE_EDITOR"
bl_region_type = "UI"
bl_category = "Tool"
@classmethod
def poll(cls, context):
tree = context.space_data.edit_tree
return super().poll(context) and tree and tree.bl_idname == "hdusd.USDTree"
class HDUSD_NODE_PT_usd_nodetree_tree_tools(HDUSD_UsdNodeTreePanel):
bl_label = "Setup basic USD Node Tree"
def draw(self, context):
col = self.layout.column()
col.label(text="Replace current tree using")
op_idname = HDUSD_OP_usd_nodetree_add_basic_nodes.bl_idname
col.operator(op_idname, text="Current Scene").scene_source = "SCENE"
col.operator(op_idname, text="USD file").scene_source = "USD_FILE"
class HDUSD_NODE_PT_usd_nodetree_node_tools(HDUSD_UsdNodeTreePanel):
bl_label = "USD Nodes Tools"
def draw(self, context):
col = self.layout.column()
op_idname = HDUSD_OP_usd_tree_node_print_stage.bl_idname
col.operator(op_idname, text="Print node stage to console")
|
[
"noreply@github.com"
] |
Speedwag00n.noreply@github.com
|
c65109dbe0856949890b2cab6b76ccd2f6214e0c
|
19027267b94cba3da430239a1ef2a7f213417e45
|
/Tracer_Kinetic/methods/quant_method_lp.py
|
7d6eb5c8751f14cf5dda8e80e8b293d48c980236
|
[
"MIT"
] |
permissive
|
llevitis/APPIAN
|
f3256199a02e9a7c3f7877211de794250feb0b97
|
5fce2ea908422ea95cc88b29c15dfd3981f61f32
|
refs/heads/master
| 2020-04-01T22:24:17.451850
| 2019-02-05T12:18:02
| 2019-02-05T12:18:02
| 153,706,639
| 0
| 0
|
MIT
| 2018-10-19T00:58:58
| 2018-10-19T00:58:58
| null |
UTF-8
|
Python
| false
| false
| 2,369
|
py
|
from quantification_template import *
in_file_format="ECAT"
out_file_format="ECAT"
reference=True
voxelwise=True
class quantOutput(TraitedSpec):
out_file = File(argstr="%s", desc="Parametric image")
class quantInput( CommandLineInputSpec):
out_file = File(argstr="%s", position=-1, desc="image to operate on")
in_file= File(exists=True, mandatory=True, position=-3, argstr="%s", desc="PET file")
reference = File(exists=True, mandatory=True, position=-4, argstr="%s", desc="Reference file")
start_time=traits.Float(argstr="%s", position=-2, desc="Start time for regression in mtga.")
k2= traits.Float(argstr="-k2=%f", desc="With reference region input it may be necessary to specify also the population average for regerence region k2")
thr=traits.Float(argstr="-thr=%f", desc="Pixels with AUC less than (threshold/100 x max AUC) are set to zero. Default is 0%")
Max=traits.Float(argstr="-max=%f",default=10000, use_default=True, desc="Upper limit for Vt or DVR values; by default max is set pixel-wise to 10 times the AUC ratio.")
Min=traits.Float(argstr="-min=%f", desc="Lower limit for Vt or DVR values, 0 by default")
Filter=traits.Bool(argstr="-filter", desc="Remove parametric pixel values that over 4x higher than their closest neighbours.")
end=traits.Float(argstr="-end %f", desc="By default line is fit to the end of data. Use this option to enter the fit end time.")
v=traits.Str(argstr="-v %s", desc="Y-axis intercepts time -1 are written as an image to specified file.")
n=traits.Str(argstr="-n %s", desc="Numbers of selected plot data points are written as an image.")
class quantCommand(quantificationCommand):
input_spec = quantInput
output_spec = quantOutput
_cmd = "imgdv" #input_spec.pvc_method
_suffix = "_lp"
def check_options(tkaNode, opts):
#Define node for logan plot analysis
if opts.tka_k2 != None: tkaNode.inputs.k2=opts.tka_k2
if opts.tka_thr != None: tkaNode.inputs.thr=opts.tka_thr
if opts.tka_max != None: tkaNode.inputs.Max=opts.tka_max
if opts.tka_filter != None: tkaNode.inputs.Filter=opts.tka_filter
if opts.tka_end != None: tkaNode.inputs.end=opts.tka_end
if opts.tka_v != None: tkaNode.inputs.v=opts.tka_v
if opts.tka_start_time != None: tkaNode.inputs.start_time=opts.tka_start_time
return tkaNode
|
[
"thomas.funck@mail.mcgill.ca"
] |
thomas.funck@mail.mcgill.ca
|
1890cab32416f7c0823f8b42e3eeffb616663f84
|
44f39ab252edcfb51555497c2a43190a4905ee97
|
/docs/conf.py
|
76b035bfd684ea589246934a383da9deff3e9ea8
|
[
"MIT"
] |
permissive
|
mjsiers/practice-data-goveqpaper
|
ebf68c28f4d60c3b116d752a7bc5c54bfc6068dc
|
9bbc7492fa1941ed6732feaeb269f894a9d4e929
|
refs/heads/master
| 2022-12-15T04:27:58.819399
| 2019-02-04T17:26:20
| 2019-02-04T17:26:20
| 165,886,978
| 1
| 0
|
MIT
| 2022-12-08T01:35:09
| 2019-01-15T16:40:45
|
Makefile
|
UTF-8
|
Python
| false
| false
| 8,024
|
py
|
# -*- coding: utf-8 -*-
#
# practice-data-goveqpaper documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'practice-data-goveqpaper'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'practice-data-goveqpaperdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'practice-data-goveqpaper.tex',
u'practice-data-goveqpaper Documentation',
u"Your name (or your organization/company/team)", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'practice-data-goveqpaper', u'practice-data-goveqpaper Documentation',
[u"Your name (or your organization/company/team)"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'practice-data-goveqpaper', u'practice-data-goveqpaper Documentation',
u"Your name (or your organization/company/team)", 'practice-data-goveqpaper',
'A short description of the project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
[
"msiers@saturnsys.com"
] |
msiers@saturnsys.com
|
b5950da974e034833bc7e9e32b1788956be49686
|
fcd29745ed7a66b46f5039c2ad07f2fa5cb457a2
|
/22_popular_libraries/video_code/5_creating_a_mailgun_library/start/email_mailgun.py
|
8e87f86a7e62a5264c0a94e718c88c9c10f63829
|
[
"MIT"
] |
permissive
|
PacktPublishing/The-Complete-Python-Course
|
17489ec6939b5c6c20b92d5bb2d15a71a6444f8e
|
b880ef9c0e281316f4080531d3690435a686e9c0
|
refs/heads/master
| 2023-02-19T04:22:33.451524
| 2023-02-02T06:04:40
| 2023-02-02T06:04:40
| 204,631,924
| 56
| 72
|
MIT
| 2023-02-15T23:21:39
| 2019-08-27T06:09:32
|
Python
|
UTF-8
|
Python
| false
| false
| 572
|
py
|
import requests
MAILGUN_API_URL = 'https://api.mailgun.net/v3/sandbox0fd1d065f521484b8af277034648e756.mailgun.org'
MAILGUN_API_KEY = 'key-798b9585aedd35d87f1bf506cadc221e'
FROM_NAME = 'Jose'
FROM_EMAIL = 'jose@schoolofcode.me'
TO_EMAILS = ['jslvtr@gmail.com']
SUBJECT = 'Test e-mail'
CONTENT = 'Hello, this is a test e-mail'
print(requests.post(
MAILGUN_API_URL,
auth=('api', MAILGUN_API_KEY), # This is Basic Auth
data={
'from': f'{FROM_NAME} <{FROM_EMAIL}>',
'to': TO_EMAILS,
'subject': SUBJECT,
'text': CONTENT
}))
|
[
"dwayned@packtpub.com"
] |
dwayned@packtpub.com
|
41d2e17998e5b82cea13d9aa9c6150dfd5d469ba
|
30dd715d8f90c77fab24964355592f177c27e319
|
/ValidationProject/wsgi.py
|
401b9d4390e4633ba2d9791f0462b4fb43d3bf74
|
[
"Apache-2.0"
] |
permissive
|
cs-fullstack-2019-spring/django-validation-ic-Kenn-CodeCrew-1
|
86f54b2c0435e1a777087d5417b72c55b136e5e5
|
4a02bdc4b9f9c171c085653fc6d7bfc9ca25c7b1
|
refs/heads/master
| 2020-04-25T23:04:46.038214
| 2019-02-28T17:11:39
| 2019-02-28T17:11:39
| 173,132,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
WSGI config for ValidationProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ValidationProject.settings")
application = get_wsgi_application()
|
[
"kenn+git@code-crew.org"
] |
kenn+git@code-crew.org
|
7d527dce82b5e2728ccf9deafdfef94fc31d91be
|
30c3debb297e0938a719364dea79bf22dd1a5904
|
/backend/trips/migrations/0002_trip.py
|
9afceb6f93b01b1e9e9c8c7f3e9b949c576b7c65
|
[] |
no_license
|
davidespihernandez/django-react-trips
|
00b2106c3dfd1227bc03aff903ab2ed1691224dd
|
a2951828334d90a87583b80d00c4898ad997c4ef
|
refs/heads/master
| 2022-11-30T20:08:41.692546
| 2020-08-20T16:30:52
| 2020-08-20T16:30:52
| 289,054,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
# Generated by Django 3.0.5 on 2020-05-25 16:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("trips", "0001_initial")]
operations = [
migrations.CreateModel(
name="Trip",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("destination", models.TextField()),
("start_date", models.DateField()),
("end_date", models.DateField()),
("comment", models.TextField(blank=True, null=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
)
]
|
[
"davidespihernandez@gmail.com"
] |
davidespihernandez@gmail.com
|
7d1c2049b4737894968dece4dbebf229e896b61e
|
33ab9ae25104c83d3ca0275025b9226a5447860e
|
/product/product/settings.py
|
22413cd469447158190272bb21fedd24e98111a6
|
[] |
no_license
|
bhuwan55/jquery-ajax-snippets
|
33da01caa1a60be1af8aa5bf47042e1dd2b65d94
|
3f2936186ab23d47a5b2a0d6383b91e71e19bedc
|
refs/heads/master
| 2022-12-05T16:33:34.522568
| 2020-02-20T10:15:49
| 2020-02-20T10:15:49
| 241,549,472
| 1
| 0
| null | 2022-11-22T05:19:44
| 2020-02-19T06:29:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,136
|
py
|
"""
Django settings for product project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=ekqvgxqy198x^a@hp$qrmvw(q_z1lx@zjjur!+l1zr2+r#x#g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'product.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'product.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"awsumbj55@gmail.com"
] |
awsumbj55@gmail.com
|
c33ce417e4d842ed41233c8d39dfde5c93bf90a5
|
71517349ee8a94fd4f683042371861cb7884efd9
|
/buddysystem/application/migrations/0005_profile_desired_companions.py
|
b88b65fce9a906e05dbe659ca1d4bb4a3dd5ea47
|
[] |
no_license
|
Audrey-Newman/buddy-system
|
d6edf5388e6a06272314948c78aa087df96ee6f9
|
731f0fd2332eebdd9b35f72430d5758b0bde9791
|
refs/heads/master
| 2021-08-06T18:29:44.010655
| 2017-11-06T19:13:07
| 2017-11-06T19:13:07
| 109,515,737
| 0
| 0
| null | 2017-11-06T19:13:07
| 2017-11-04T17:33:11
|
Python
|
UTF-8
|
Python
| false
| false
| 474
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-05 05:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0004_profile_score'),
]
operations = [
migrations.AddField(
model_name='profile',
name='desired_companions',
field=models.TextField(blank=True, max_length=500),
),
]
|
[
"aen7xa@virginia.edu"
] |
aen7xa@virginia.edu
|
7537f1cf3c5f7375cee6813d559e4b5c34438a87
|
75f5ee997eab7f0050fd515b486bc6549304bbd5
|
/insta/photoalbum/forms.py
|
2ab725f3b83438384643055f7ca95044250fa578
|
[] |
no_license
|
agatagmaj/Instasomething
|
8757d46bf361ebdbe0762362d49fa2f7630040b9
|
ac6ba2c1672002881c184e76b1f490c1ffccac49
|
refs/heads/master
| 2020-03-30T04:09:21.147208
| 2018-09-28T12:57:54
| 2018-09-28T12:57:54
| 150,726,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
from django import forms
class UploadFileForm(forms.Form):
title = forms.CharField(max_length=64, required=False)
file = forms.ImageField()
|
[
"agatagmaj@gmail.com"
] |
agatagmaj@gmail.com
|
a36888a3c77e242d1f5f2e551f917dedeffe417b
|
546c4ff60de3967d99d2af9dc92c711fbe2fa3c8
|
/idetec/urls.py
|
1c60db3abff08b8e421d8ea89cc408ac23c532f9
|
[] |
no_license
|
suchilin/maquinaria
|
ae27ff86791b867bbab6096362774d8b5f69eb8c
|
ca35802c5ecf0f4f1d27a5f3f7d81118498fdc42
|
refs/heads/master
| 2020-04-02T22:53:35.479268
| 2016-06-24T14:09:46
| 2016-06-24T14:09:46
| 61,839,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import RedirectView
urlpatterns = patterns('',
url(r'^', include('solicitudes.urls', namespace='solicitudes')),
(r'^idetec/accounts/', include('registration.backends.default.urls')),
url(r'^idetec/grappelli/', include('grappelli.urls')), # grappelli URLS
url(r'^idetec/admin/', include(admin.site.urls)),
url(r'^idetec/solicitudes/', include('solicitudes.urls', namespace='solicitudes')),
url(r'^idetec/dbupdater/', include('dbupdater.urls', namespace='dbupdater')),
)
urlpatterns += staticfiles_urlpatterns()
|
[
"enigma_profunda@hotmail.com"
] |
enigma_profunda@hotmail.com
|
4756747b60219eaecb57896c416cbf92f47c2594
|
6938ddc0516e3aaadc41dec1a816bee289a7762d
|
/src/python/algorithms/subsets/subsets.py
|
9e088471070ac3e4fb22637ec52075c55c73ea2d
|
[] |
no_license
|
hansewetz/gitrep2
|
aefc0b78d1fdf598260f5fa0d17b8113624f1b45
|
8aac1a4e3ae096b2e2f9d1746880b790b4760936
|
refs/heads/master
| 2020-03-14T22:20:53.371631
| 2018-05-03T07:32:32
| 2018-05-03T07:32:32
| 131,819,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
#!/usr/bin/env python
import sys
# generate subsets
# v - vector
# bv - bol vector, true of element part of subset, false otherwise
# k - current position
# n #of elements in v
def subsetsAux(v,bv,k,n):
# check if we should print elements
if k==n:
for i in range(0,n):
if bv[i]: sys.stdout.write("{0} ".format(v[i]));
sys.stdout.write("\n")
else:
bv[k]=False
subsetsAux(v,bv,k+1,n)
bv[k]=True
subsetsAux(v,bv,k+1,n)
# print subsets
def subsets(v):
bv=[False]*len(v)
subsetsAux(v,bv,0,len(v))
# test
v=[1,2,3]
print(v)
subsets(v)
|
[
"hansewetz@hotmail.com"
] |
hansewetz@hotmail.com
|
4dcfa5175cb1853111f2cb692eaae3a760089e63
|
ad7e65ca210dc254f29454ab382d875cb9535daa
|
/coffeebot/utils.py
|
10575cb69565fb13bfb300d5a28f51a65ac35513
|
[
"MIT"
] |
permissive
|
RobotDisco/mattermost-coffeebot
|
3b65e52053ecc89f72a0741f549ae26d8ac7284c
|
cd27b9c0ecc7ecb38f570c42fb0789999021f468
|
refs/heads/master
| 2020-06-01T04:00:07.013100
| 2019-07-10T04:40:24
| 2019-07-10T04:40:24
| 190,626,517
| 0
| 0
|
MIT
| 2019-07-10T04:40:25
| 2019-06-06T17:59:16
|
Python
|
UTF-8
|
Python
| false
| false
| 6,702
|
py
|
import random
from sqlalchemy.sql import text
from coffeebot import config, session
from coffeebot.models import User, Pair
def get_channel(driver, team_name, channel_name):
"""
Retrieve a channel given a team and channel name.
Returns the JSON response from the Mattermost API.
"""
response = driver.channels.get_channel_by_name_and_team_name(
team_name, channel_name)
return response
def get_channel_members(driver, team_name, channel_name):
"""
Retrieve all of the members from a channel given a team and channel name.
Returns a list of user IDs sorted alphabetically.
"""
channel = get_channel(driver, team_name, channel_name)
channel_id = channel['id']
# By default, the Mattermost API will return only 60 members. Set this to
# an amount that is at least the number of members in the channel to get
# all members
params = {
'per_page': '10000'
}
response = driver.channels.get_channel_members(channel_id, params=params)
bot = driver.users.get_user('me')
bot_id = bot['id']
# Return all of the user IDs excluding the bot's user ID (don't want to
# count the bot as a user in pairings)
members = [
member['user_id'] for member in response if (
member['user_id'] != bot_id)]
# Sort the member list alphabetically so that when we create pairs in the
# database using the list, we won't create duplicate pairs (A <-> B is the
# same as B <-> A)
members.sort()
return members
def create_users(members):
"""
Create a User object in the database representing each Mattermost user
given a list of current users in the channel.
"""
# Set only the users that exist in the input list as active
session.query(User).update({
'active': False})
session.query(User).filter(User.user_id.in_(members)).update({
'active': True
}, synchronize_session='fetch')
for member in members:
user = session.query(User).filter(User.user_id == member).all()
if not user:
user = User(user_id=member, active=True)
session.add(user)
session.commit()
def create_pairs(members):
"""
Create a Pair object in the database representing a potential pairing
between two Mattermost users given a list of current users in the channel.
"""
# In order to prevent duplicate pairings (A <-> B is the same as B <-> A),
# the input list must be alphabetically sorted
# We iterate over the list of members similar to a selection sort in order
# create every possible pairing
for i, first_user in enumerate(members):
for second_user in members[i + 1:]:
pair = session.query(Pair).filter(
Pair.first_user_id == first_user,
Pair.second_user_id == second_user).all()
if not pair:
new_pair = Pair(
first_user_id=first_user,
second_user_id=second_user,
count=0)
session.add(new_pair)
session.commit()
def get_pair(members):
"""
Generate one pair of users from a list of members depending on the
frequencies of each user's previous pairings.
"""
member = members[0]
# Select a single user that is currently active in the channel, has not
# been paired with another member in this session yet, and has the lowest
# frequency of previous pairings with the current user
sql = text("""
SELECT paired_member
FROM (
SELECT p.first_user_id as paired_member, p.count
FROM pairs p
JOIN users u ON u.user_id = p.first_user_id
WHERE p.second_user_id = :member
AND u.is_paired = 0
AND u.active = 1
UNION
SELECT p.second_user_id as paired_member, p.count
FROM pairs p
JOIN users u ON u.user_id = p.second_user_id
WHERE p.first_user_id = :member
AND u.is_paired = 0
AND u.active = 1
)
ORDER BY count ASC
LIMIT 1
""")
result = session.execute(sql, {'member': member})
paired_member = result.first()[0]
# Increase the historical number of times this pair has been paired up
# before
sql = text("""
UPDATE pairs
SET count = count + 1
WHERE (first_user_id = :first_member
AND second_user_id = :second_member)
OR (first_user_id = :second_member
AND second_user_id = :first_member)
""")
session.execute(
sql, {'first_member': member, 'second_member': paired_member})
# Mark both users as is_paired so that on the next pairing, we won't try to
# pair either user with a different user
sql = text("""
UPDATE users
SET is_paired = 1
WHERE user_id = :first_member
OR user_id = :second_member
""")
session.execute(
sql, {'first_member': member, 'second_member': paired_member})
session.commit()
members.remove(member)
members.remove(paired_member)
return (member, paired_member)
def get_pairs(members):
"""
Pair up all users from a list of members depending on the frequencies of
each user's previous pairings.
Returns a list of tuples of user IDs.
"""
# In the case of an odd number of members, the user that is sequentially
# last in the input list will have a lower chance of getting paired. In
# order to make it fair, we shuffle the list so that everyone has an equal
# chance of not getting paired
random.shuffle(members)
pairs = []
while len(members) > 1:
pairs.append(get_pair(members))
# Reset the is_paired flag for each user in preparation for the next time
# users get paired
sql = text("""
UPDATE users
SET is_paired = 0
""")
session.execute(sql)
session.commit()
return pairs
def message_pair(driver, pair):
"""
Send a group message to both users in a pair notifying them of their
pairing.
Returns the JSON response from the Mattermost API.
"""
user_list = list(pair)
channel = driver.channels.create_group_message_channel(user_list)
channel_id = channel['id']
message = config.MESSAGE
message_options = {
"channel_id": channel_id,
"message": message
}
response = driver.posts.create_post(message_options)
return response
def message_pairs(driver, pairs):
"""
Send a group message to each pair of users notifying them of their pairing.
"""
for pair in pairs:
message_pair(driver, pair)
|
[
"pcjl@users.noreply.github.com"
] |
pcjl@users.noreply.github.com
|
2f0be5f99cfb60f9a02393b858c049a2e7353bab
|
60811c2d3f81b77f3b870b1ec0ace4b8f1bad19d
|
/python test/vowels string.py
|
45ffe6bb70bdc3f36f42240a5fad866113ed4d18
|
[] |
no_license
|
APARNAS1998/luminardjango1
|
b85c249dacb4d5e819d338e19fd8af48a2ea393e
|
8bd91a38223910c14270e0e21c2d890dc16e2117
|
refs/heads/master
| 2023-08-09T12:49:49.201051
| 2021-09-15T08:39:25
| 2021-09-15T08:39:25
| 403,527,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
string=input('enter the string')
vow='aeiou'
empty=''
for i in string:
if i not in vow:
empty=empty+i
print(empty)
|
[
"aparna.s1721@saintgits.org"
] |
aparna.s1721@saintgits.org
|
b4fedb807639f5c38443073a26046acd46ed2334
|
ea7d6085e653105e3a31cedbe1ea9c324d470efb
|
/thesis/img/micrografias/170-outros/convert_gray_scalebar.py
|
1c05872a997ab1cd67e7916ae99bcedfaa963514
|
[] |
no_license
|
arthursn/PhD
|
2820b96ca0d92a9b9ce4d0fac84c8ab29d2138b8
|
a9b860c840269eb67aa7023d07063f781d23b0e8
|
refs/heads/master
| 2021-10-16T05:01:13.552451
| 2019-02-08T00:39:51
| 2019-02-08T00:39:51
| 151,268,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from PIL import Image
from matplotlib_scalebar.scalebar import ScaleBar
font0 = FontProperties()
font0.set_size(13)
font0.set_family('sans-serif')
font0.set_file('/usr/share/fonts/truetype/msttcorefonts/Arial.ttf')
pxsize1kx = 20./214.
# fname: px size (um)
cal = {'6a.png': pxsize1kx/10.,
'6b.png': 10./304.,
'6d.png': pxsize1kx/10.,
'6d-2.png': pxsize1kx/10.,
'6e.png': pxsize1kx/5.}
for fname, pxsize in cal.items():
if os.path.isfile(fname) is True:
fout = '{}.pdf'.format(os.path.splitext(fname)[0])
print(fname)
basename = fname.split('/')[-1]
basename = basename.split('.')[0]
# open and convert to grayscale
img = Image.open(fname) # .convert('LA')
plt.imshow(img)
scalebar = ScaleBar(pxsize*1e-6, location='lower left')
scalebar.font_properties = font0
plt.gca().add_artist(scalebar)
plt.gca().set_axis_off()
plt.subplots_adjust(top=1, bottom=0, right=1,
left=0, hspace=0, wspace=0)
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig(fout, bbox_inches='tight', pad_inches=0, dpi=300)
plt.close()
|
[
"nishikawa.poli@gmail.com"
] |
nishikawa.poli@gmail.com
|
1f53d11dafb4c125e55493d44db015e1533bdf42
|
622d2e4b894e5884847638523a7e584a8afb8c8e
|
/lib/instruments/TempChamberControl/modbus_tk/TempChamberControl.py
|
b2974b00c40698970f580a174c14cd8745d58528
|
[] |
no_license
|
kennyku796/V2X-ATE
|
23db23128fb83a7d590bc21fbafb9264eb14294a
|
05e110cd3c2382a134d30474ac832b6e5d6faf77
|
refs/heads/master
| 2020-04-07T17:24:28.831961
| 2017-10-03T08:18:03
| 2017-10-03T08:18:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,049
|
py
|
#!/usr/bin/env python
# -*- coding: utf_8 -*-
"""
modbus_tk for the temperature chamber.
Modbus TestKit: Implementation of Modbus protocol in python
(C)2009 - Luc Jean - luc.jean@gmail.com
(C)2009 - Apidev - http://www.apidev.fr
This is distributed under GNU LGPL license, see license.txt
"""
import sys
import serial
#sys.path.append("c:/Local/Design/Seion/sw/gui/src")
#sys.path.append("c:/Local/Design/Seion/sw/lab_utils/tests")
sys.path.append(r"C:\Local\wavesys\trunk\lab_utils\tests")
#sys.path.append(r"C:\Local\gui_versions\dual_rf_r16")
#from atlk.gui.ChannelControl import ChannelControl
#from atlk.gui.AnalysisControl import AnalysisControl
#add logging capability , This module defines functions and classes which implement a flexible error logging system for applications
import logging
import time
import modbus_tk
import modbus_tk.defines as cst
import modbus_tk.modbus_rtu as modbus_rtu
import configurationFile
logger = modbus_tk.utils.create_logger("console")
""" by chosing console the logging information/errors will display in console"""
class TempChamberControl:
def __init__(self, port_num = 1, mode = "", srvIpAddra = "", srvPort = 13456):
self.highLimit = 86
self.lowLimit = -23
self.threshold = 0
self.__mode = mode
self.__retries = 3
if self.__mode != "REMOTE":
print "Connecting to serial port number: ",port_num
"""Connect to the temperature chamber"""
self.chamber = modbus_rtu.RtuMaster(serial.Serial(port=port_num-1, baudrate=9600, bytesize=8, parity='N', stopbits=1, xonxoff=0))
"""port = port_num-1 ,because port 1 is actually port 2 ,and thats why we need to decrement"""
self.chamber.set_timeout(5.0)
self.chamber.set_verbose(True)
logger.info("connected")
else:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((srvIpAddra, srvPort))
def SetTemp(self, next_temp):
"""protection check"""
NeedProtection = self.TemperatureLimitProtection(next_temp)
print ("\n\n NeedProtection= ",NeedProtection)
if True in NeedProtection:
print ("\n\n\n\n pay attention!!! - the chamber reach is limit \n\n\n\n")
if 'over' in NeedProtection:
self.__WriteReg(300, (self.highLimit-self.threshold)*10) #in case the temperature is over limit -> we decrease by threshold
elif 'under' in NeedProtection:
self.__WriteReg(300, (self.lowLimit+self.threshold)*10) #in case the temperature is under limit -> we increase by threshold
else:
""" set the next temperature by parameter) """
self.__WriteReg(300, (next_temp)*10)
return
def GetTemp(self, expectedValue=0):
""" get the current temperature """
regValue = 100
self.expectedValue = expectedValue
ChamberTemp = self.__ReadReg(regValue)
ChamberTemp = ChamberTemp/10
return (ChamberTemp)
def __WriteReg(self,regAddress, regValue):
if self.__mode != "REMOTE":
""" write single """
self.chamber.execute(1, cst.WRITE_SINGLE_REGISTER, regAddress, output_value = regValue )
else:
sent = self.socket.send(regAddress)
if sent == 0:
raise RuntimeError, "socket connection broken"
sent = self.socket.send(regValue)
if sent == 0:
raise RuntimeError, "socket connection broken"
return
def __ReadReg(self,regAddress, size=1):
if self.__mode != "REMOTE":
""" read single thru logger in order to support errors"""
print "address for reading temperature from chamber is- %s" %(regAddress)
#readValue = self.chamber.execute(1, cst.READ_HOLDING_REGISTERS, regAddress, size)
readValue = self.chamber.execute(1, cst.READ_INPUT_REGISTERS, regAddress, size)[0]
#print "debug1"
while self.expectedValue != 0:
if self.expectedValue == readValue:
isSuccess = True
print "debug2_true"
else:
isSuccess = False
print "debug2_false"
else:
print "debug3"
readValue = self.socket.recv(regAddress)
if size > 1:
self.socket.recv(size)
return readValue #, isSuccess
#send some queries
#logger.info(self.chamber.execute(1, cst.READ_COILS, 0, 10))
#logger.info(master.execute(1, cst.READ_DISCRETE_INPUTS, 0, 8))
#logger.info(master.execute(1, cst.READ_INPUT_REGISTERS, 100, 3))
#logger.info(master.execute(1, cst.READ_HOLDING_REGISTERS, 100, 12))
#logger.info(master.execute(1, cst.WRITE_SINGLE_COIL, 7, output_value=1))
#logger.info(master.execute(1, cst.WRITE_SINGLE_REGISTER, 100, output_value=54))
#logger.info(master.execute(1, cst.WRITE_MULTIPLE_COILS, 0, output_value=[1, 1, 0, 1, 1, 0, 1, 1]))
#logger.info(master.execute(1, cst.WRITE_MULTIPLE_REGISTERS, 100, output_value=xrange(12)))
def __del__(self):
""" Close the pserial port to the Temperature chamber"""
self.chamber.close()
def PmcTemperature(self, evk):
(status, dimmStartConfigDict) = evk.DimmLoggerInit()
currTemp = dimmStartConfigDict["temperature"]
return currTemp
def TemperatureLimitProtection(self,next_temp):
""" verify that the chamber does not set temperature over the limit"""
chamberTemp = int(self.GetTemp()) #reading the current temperature of the chamber
chamberNextTemp = next_temp #the next temperature that the chamber has set to
if chamberNextTemp>self.highLimit:
return (True,'over')
if chamberNextTemp<self.lowLimit:
return (True,'under')
return (False,None)
"""
def chkTemp(self,evk):
#EvkTemperature = evk.ChannelControl.PollDcocTemperature()
#EvkTemperature = evk.ChannelControl.GetChannelTepmerature()
EvkTemperature = evk.AnalysisControl.GetChannelTemperature()
return (EvkTemperature)
def SetEvkTemperature(self,evk_RX, TempVal, temperatureSteps=2):
'''setting temperature to EVK board'''
#tempChamberControl = TempChamberControl(serPortNum) #selecting serial port
TemperatureLogFile = open('c:/Local/TempLogFile.txt','w' )
#currentTemp = int(self.PmcTemperature(evk_RX))
currentTemp = self.chkTemp(evk_RX)[1]
TemperatureLogFile.write("before entering the loop - EVK current temperature is- " + str(currentTemp) +'\n')
TemperatureLogFile.write("Chamber_curr_temp,Chamber_next_temp,EVK_req_temp,EVK_curr_temp \n")
print "\n evk temperature is- %s" %currentTemp
while abs(currentTemp-TempVal) > 1:
if abs(currentTemp-TempVal)>5:
temperatureSteps = 5
if abs(currentTemp-TempVal)>10:
temperatureSteps = 10
#read chamber temperature
chamberTemp = int(self.GetTemp())
print "\n Chamber Temperature is ",chamberTemp
TemperatureLogFile.write(str(chamberTemp) +",")
if currentTemp < TempVal:
#increase chamber temperature by n dgree
self.SetTemp(chamberTemp+temperatureSteps+5)
print "\n The current temperature is", currentTemp, "less then", TempVal
print "Raising chamber temperature to", (chamberTemp+temperatureSteps+5)
TemperatureLogFile.write(str(chamberTemp+temperatureSteps) +",")
if currentTemp > TempVal:
#decrease chamber temperature by n dgree
nextTemp = self.SetTemp(chamberTemp-temperatureSteps-3)
print "\n The current temperature is", currentTemp, "more then", TempVal
print "decreasing chamber temperature to", (chamberTemp-temperatureSteps-3)
TemperatureLogFile.write(str(chamberTemp-temperatureSteps) +",")
time.sleep(20)
if temperatureSteps >5:
time.sleep(35)
time.sleep(25)
currentTemp = self.chkTemp(evk_RX)[1]
print "evk temperature is- %s \n" %currentTemp
TemperatureLogFile.write(str(TempVal) +",")
TemperatureLogFile.write(str(currentTemp) +", \n")
TemperatureLogFile.flush()
print ("EVK temperature is set as expected")
TemperatureLogFile.write("EVK temperature is set as expected \n")
TemperatureLogFile.close()
return
"""
|
[
"zohar_sefti@yahoo.com"
] |
zohar_sefti@yahoo.com
|
802ad5f40c0f30464d37f5c2f052d08893a2acee
|
c009d46a0b791fbd05b74629fe9f0e45bb6573d0
|
/django/hands-on/session1/python_rev.py
|
2333ef947659df5882dda46b3b7fa72d0725aa19
|
[] |
no_license
|
serdardurmus/Clarusway-Projects
|
bcf52775fa17a82ed31728a22e53a107e34733e0
|
327028283a01beddb3e7f134db78cfa8d39606ea
|
refs/heads/master
| 2023-02-12T17:37:22.754666
| 2021-01-09T16:39:57
| 2021-01-09T16:39:57
| 289,539,403
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,583
|
py
|
# Minner noe / oppsummering
# print("Hello World")
# not and or (priotiere)
# print([] or 1 and 0 and [1] or not 0)
# a = "henry"
# b = a.rstrip("y")
# c = a.rs
# print(a)
# ----------------------------
## List
## Dictionary
## Tuple
## Set
a = list()
b = ["ali", 1, -1, True, [1,2]]
print(type(a))
print(type(b))
print(b[1:4])
print(b[::-1])
# ----------------------------
thislist = ["apple", "banana", "cherry"]
thislist.insert(1, "orange")
thislist.pop(1)
print(thislist)
# ----------------------------
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
x = thisdict.get("model", "YOK")
print(x)
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
thisdict.pop("model")
print(thisdict)
# ----------------------------
thisset = {"apple", "banana", "cherry"}
for x in thisset:
print(x)
thisset.add("orange")
print(thisset)
thisset.discard("bananaa") # hata yok
print(thisset)
# ---------------------------
a = {1,2,3,10,32,100}
b = {1,2,32}
print(a.difference(b))
print(a.intersection(b))
print(a.union(b))
g = []
if g:
print("yaz")
else:
print("g er tom")
# ---------------------------
# antall = int(input("Skriv det siste nummeret av Fibonacci-listen du vil se: "))
# listeen= []
# a = 1
# b = 1
# while True:
# if a > antall: break
# listeen.append(a)
# if b > antall : break
# listeen.append(b)
# a = a+b
# b = a+b
# print(listeen)
# if listeen[-1] != antall : print("{} er ikke et fibonacci nummer".format(antall))
# ----------------
def my_function():
print("Hello world")
return print("Hello world")
my_function()
def my_func2(*a):
print(a)
# return c
my_func2(2,3)
def my_func(**c):
print(c)
# return c
my_func(a=1,b=3)
# -------------------------
# def is_palindrom(string):
# return string[::-1].upper() == string.upper()
# def palindrom(sentences):
# string =""
# for chr in sentences:
# if chr.isalnum():
# string += chr
# print(string)
# return is_palindrom(string)
from deneme import is_palindrom, palindrom
sentences = "ahmet"
# sentences = input("Please Enter a word or sentences: ")
if palindrom(sentences):
print("{} is a palindrom".format(sentences))
else:
print("{} is not a palindrom".format(sentences))
username = ",,,,...!!henry***"
x = username.strip(',.!*')
print("my name is: " + x)
# ------------------------------------
l= (lambda x: x**2) (2)
print(l)
# -------------------------------
listem = [1,2,3,4,5,6,7,8,9]
even = filter(lambda x: x%2 == 0, listem)
print(list(even))
|
[
"serdar83durmus@gmail.com"
] |
serdar83durmus@gmail.com
|
7fd34c0f9f522af5403fa9a97596d57c30c50fa4
|
370bcd596051652f893ca1776123eaf27e7813ed
|
/Encapsulation - Exercise/wild cat zoo/project/tiger.py
|
6533339b627c303f9075b63a4a7bde22aad398e7
|
[] |
no_license
|
zdravkob98/Python-OOP-November-2020
|
130eb711fc06cd39da0c9b9112f0dad867867f8c
|
ed0abdbff32a9afcb904bc08e53bee1584e1b0d3
|
refs/heads/main
| 2023-02-04T10:34:36.959141
| 2020-12-23T15:39:37
| 2020-12-23T15:39:37
| 323,936,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
from project.animals_base import AnimalBase
class Tiger(AnimalBase):
needs = 45
|
[
"zdravkobonev@abv.bg"
] |
zdravkobonev@abv.bg
|
15e1b0d05f28b755b3c069502846842bca5a2a7e
|
d697c1d45e96bd440be9c17ab14243a5882b1f52
|
/qianfeng/常用模块/urllib/3-get.py
|
b350fd85a8630d8b2e107ef0ed06231b231877a8
|
[] |
no_license
|
ithjl521/python
|
9eeda2e60dda97ee36e8764c06400eb12818689f
|
f4fe50799501c483cb64445fd05ee0f30f56576c
|
refs/heads/master
| 2020-07-12T23:10:53.608276
| 2019-11-08T08:59:35
| 2019-11-08T08:59:35
| 204,931,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
import urllib.request
import urllib.parse
word = input('请输入你要搜索的内容:')
url = 'https://www.baidu.com/s?'
# 参数写成字典
data = {
'ie':'utf-8',
'wd':word,
}
query_string = urllib.parse.urlencode(data)
url += query_string
# 发送请求
response = urllib.request.urlopen(url)
print(url)
print(response.read())
|
[
"it_hjl@163.com"
] |
it_hjl@163.com
|
8239ff353d8819af83732e68a6f5a66b33e9c3ef
|
2cc68d1f90e519d803573b04da0b40eb139ae70e
|
/users/migrations/0006_auto_20200114_2234.py
|
faf157fe313e43d78da445c7f7b5bef8a34710db
|
[] |
no_license
|
eRafaell/simple-flashcards
|
5ebd21946ab892e00c3c4ca0b2dfc36f3b1a3104
|
ee9f41caefd9c4d3085149059c26098ed1d641e3
|
refs/heads/master
| 2022-12-17T12:52:15.551867
| 2020-02-08T20:00:20
| 2020-02-08T20:00:20
| 225,472,886
| 0
| 1
| null | 2022-11-22T04:54:40
| 2019-12-02T21:33:18
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 440
|
py
|
# Generated by Django 2.2 on 2020-01-14 22:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20191215_1353'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(blank=True, default='default.png', null=True, upload_to='profile_pics'),
),
]
|
[
"rafalo82@interia.pl"
] |
rafalo82@interia.pl
|
ceea35a95eb95d2d80993d3f36373bd3d4b4e393
|
02a0633107f34a0a40f8c87c56ebcb7d52582d48
|
/conv_vae.py
|
ef9c51abb551b0e797923d57aa27bb287fad554f
|
[
"BSD-3-Clause"
] |
permissive
|
kastnerkyle/ift6266h15
|
87a1e4a0903690a59c5c833d0d16096c229b2a0d
|
536484926cc0555e4a042a582c1c1958835f9163
|
refs/heads/master
| 2021-01-01T19:01:42.630534
| 2015-04-20T03:22:24
| 2015-04-20T03:22:24
| 29,013,264
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,906
|
py
|
# Kyle Kastner
# License: MIT
"""
VAE in a single file.
Bringing in code from IndicoDataSolutions and Alec Radford (NewMu)
"""
import theano
import theano.tensor as T
from theano.compat.python2x import OrderedDict
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from optimizers import rmsprop, sgd_nesterov
from theano.tensor.signal.downsample import max_pool_2d
from theano.tensor.nnet import conv2d
import tempfile
import gzip
import cPickle
import numpy as np
from matplotlib import pyplot as plt
from scipy.misc import imsave
from time import time
import os
import sys
sys.setrecursionlimit(40000)
def hard_tanh(X):
return T.clip(X, -1., 1.)
def relu(X):
return X * (X > 1E-6)
def minibatch_indices(X, minibatch_size, lb=None, ub=None):
if lb is None:
lb = 0
if ub is None:
ub = len(X)
minibatch_indices = np.arange(lb, ub, minibatch_size)
minibatch_indices = np.asarray(list(minibatch_indices) + [ub])
start_indices = minibatch_indices[:-1]
end_indices = minibatch_indices[1:]
return zip(start_indices, end_indices)
def conv_layer(input_variable, filter_shape, pool_shape, random_state):
# This is a mode='same' convolution
np_filters = 0.1 * (random_state.rand(
*filter_shape).astype(theano.config.floatX) - 0.5)
filters = theano.shared(np_filters)
np_biases = np.zeros(filter_shape[0]).astype(theano.config.floatX)
biases = theano.shared(np_biases)
# Assume square filters
s = int(np.floor(filter_shape[-1] / 2.))
conv = conv2d(input_variable, filters, border_mode='full')[:, :, s:-s, s:-s]
params = [filters, biases]
conv += biases.dimshuffle('x', 0, 'x', 'x')
# batch_normalization
n, n_params = normalization_layer(conv, filter_shape)
params += n_params
out = relu(n)
pooled = max_pool_2d(out, pool_shape)
return pooled, params
def deconv_layer(input_variable, filter_shape, pool_shape, random_state,
activation="relu"):
# This is a mode='same' convolution
np_filters = 0.1 * (random_state.rand(
*filter_shape).astype(theano.config.floatX) - 0.5)
filters = theano.shared(np_filters)
np_biases = np.zeros(filter_shape[0]).astype(theano.config.floatX)
biases = theano.shared(np_biases)
if pool_shape[-1] > 1:
pooled = depool_2d(input_variable, factor=pool_shape[-1])
else:
pooled = input_variable
# Assume square filters
s = int(np.floor(filter_shape[-1] / 2.))
conv = conv2d(pooled, filters, border_mode='full')[:, :, s:-s, s:-s]
params = [filters, biases]
conv += biases.dimshuffle('x', 0, 'x', 'x')
# batch_normalization
n, n_params = normalization_layer(conv, filter_shape)
params += n_params
if activation == "relu":
out = relu(n)
elif activation == "hard_tanh":
out = hard_tanh(n)
# assume square pool_shape
return out, params
def depool_2d(X, factor=2):
"""
perforated upsample
http://www.brml.org/uploads/tx_sibibtex/281.pdf
Modified from Alec Radford (NewMu)
"""
output_shape = (X.shape[1], X.shape[2] * factor, X.shape[3] * factor)
stride = X.shape[2]
offset = X.shape[3]
in_dim = stride * offset
out_dim = in_dim * factor * factor
upsamp_matrix = T.zeros((in_dim, out_dim))
rows = T.arange(in_dim)
cols = rows * factor + (rows / stride * factor * offset)
upsamp_matrix = T.set_subtensor(upsamp_matrix[rows, cols], 1.)
flat = T.reshape(X, (X.shape[0], output_shape[0], X.shape[2] * X.shape[3]))
up_flat = T.dot(flat, upsamp_matrix)
upsamp = T.reshape(up_flat, (X.shape[0], output_shape[0],
output_shape[1], output_shape[2]))
return upsamp
def normalization_layer(input_variable, layer_shape):
if len(layer_shape) == 4:
# conv bc01 but layer_shape is (new_c, old_c, w, h)
np_G = np.ones(layer_shape[0]).astype(theano.config.floatX)
np_B = np.zeros(layer_shape[0]).astype(theano.config.floatX)
G = theano.shared(np_G)
B = theano.shared(np_B)
normed = (input_variable - input_variable.mean(
axis=(0, 2, 3), keepdims=True)) / (input_variable.std(
axis=(0, 2, 3), keepdims=True) + 1E-6)
out = G.dimshuffle('x', 0, 'x', 'x') * normed + B.dimshuffle(
'x', 0, 'x', 'x')
else:
np_G = np.ones(layer_shape[1]).astype(theano.config.floatX)
np_B = np.zeros(layer_shape[1]).astype(theano.config.floatX)
G = theano.shared(np_G)
B = theano.shared(np_B)
normed = (input_variable - input_variable.mean(
axis=0, keepdims=True)) / (input_variable.std(
axis=0, keepdims=True) + 1E-6)
out = G * normed + B
params = [G, B]
return out, params
def linear_layer(input_variable, layer_shape, random_state):
np_W = 0.1 * (random_state.rand(
*layer_shape).astype(theano.config.floatX) - 0.5)
W = theano.shared(np_W)
np_b = np.zeros(layer_shape[1]).astype(theano.config.floatX)
b = theano.shared(np_b)
params = [W, b]
l = T.dot(input_variable, W) + b
# batch_normalization
out, n_params = normalization_layer(l, layer_shape)
params += n_params
return out, params
def relu_layer(input_variable, layer_shape, random_state):
out, params = linear_layer(input_variable, layer_shape, random_state)
return relu(out), params
def tanh_layer(input_variable, layer_shape, random_state):
out, params = linear_layer(input_variable, layer_shape, random_state)
return T.tanh(out), params
def bw_grid_vis(X, show=True, save=False, transform=False):
ngrid = int(np.ceil(np.sqrt(len(X))))
sqrt_shp = int(np.sqrt(X.shape[1]))
npxs = np.sqrt(X[0].size)
img = np.zeros((npxs * ngrid + ngrid - 1,
npxs * ngrid + ngrid - 1))
for i, x in enumerate(X):
j = i % ngrid
i = i / ngrid
if len(x.shape) < 3:
x = x.reshape((sqrt_shp, sqrt_shp))
img[i*npxs+i:(i*npxs)+npxs+i, j*npxs+j:(j*npxs)+npxs+j] = x
if show:
plt.imshow(img, interpolation='nearest')
plt.show()
if save:
imsave(save, img)
return img
def unpickle(f):
import cPickle
fo = open(f, 'rb')
d = cPickle.load(fo)
fo.close()
return d
def mnist(datasets_dir='/Tmp/kastner'):
try:
import urllib
urllib.urlretrieve('http://google.com')
except AttributeError:
import urllib.request as urllib
url = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
data_file = os.path.join(datasets_dir, 'mnist.pkl.gz')
if not os.path.exists(data_file):
urllib.urlretrieve(url, data_file)
print('... loading data')
# Load the dataset
f = gzip.open(data_file, 'rb')
try:
train_set, valid_set, test_set = cPickle.load(f, encoding="latin1")
except TypeError:
train_set, valid_set, test_set = cPickle.load(f)
f.close()
random_state = np.random.RandomState(1000)
pwr = 0.0
test_x, test_y = test_set
test_x = test_x.astype('float32') + pwr * random_state.randn(*test_x.shape)
test_y = test_y.astype('int32')
valid_x, valid_y = valid_set
valid_x = valid_x.astype('float32') + pwr * random_state.randn(
*valid_x.shape)
valid_y = valid_y.astype('int32')
train_x, train_y = train_set
train_x = train_x.astype('float32') + pwr * random_state.randn(
*train_x.shape)
train_y = train_y.astype('int32')
rval = [(train_x, train_y), (valid_x, valid_y), (test_x, test_y)]
return rval
def make_paths(n_code, n_paths, n_steps=480):
"""
create a random path through code space by interpolating between points
"""
paths = []
p_starts = np.random.randn(n_paths, n_code)
for i in range(n_steps / 48):
p_ends = np.random.randn(n_paths, n_code)
for weight in np.linspace(0., 1., 48):
paths.append(p_starts*(1-weight) + p_ends*weight)
p_starts = np.copy(p_ends)
paths = np.asarray(paths)
return paths
# TODO: FIX THIS WHOLE THING
class PickleMixin(object):
def __getstate__(self):
if not hasattr(self, '_pickle_skip_list'):
self._pickle_skip_list = []
for k, v in self.__dict__.items():
try:
f = tempfile.TemporaryFile()
cPickle.dump(v, f)
except:
self._pickle_skip_list.append(k)
state = OrderedDict()
for k, v in self.__dict__.items():
if k not in self._pickle_skip_list:
state[k] = v
return state
def __setstate__(self, state):
self.__dict__ = state
class ConvVAE(PickleMixin):
def __init__(self, image_save_root=None, snapshot_file="snapshot.pkl",
enc_sizes=[256, 128],
dec_sizes=[256, 128], n_code=64, learning_rate=0.1,
momentum=0.99, batch_size=20, n_epoch=100):
self.srng = RandomStreams()
self.enc_sizes = enc_sizes
self.dec_sizes = dec_sizes
self.n_code = n_code
self.n_epoch = n_epoch
self.batch_size = batch_size
self.learning_rate = theano.shared(np.cast['float32'](learning_rate))
self.momentum = momentum
self.costs_ = []
self.epoch_ = 0
self.snapshot_file = snapshot_file
self.image_save_root = image_save_root
"""
if os.path.exists(self.snapshot_file):
print("Loading from saved snapshot " + self.snapshot_file)
f = open(self.snapshot_file, 'rb')
classifier = cPickle.load(f)
self.__setstate__(classifier.__dict__)
f.close()
"""
def _setup_functions(self, X, random_state):
X_sym = T.tensor4()
e_sym = T.matrix()
X_sym.tag.test_value = X[:self.batch_size]
e_sym.tag.test_value = random_state.randn(
self.batch_size, self.n_code).astype(theano.config.floatX)
"""
Z_sym = T.matrix()
Z_sym.tag.test_value = random_state.randn(
self.n_batch, self.n_code).astype(theano.config.floatX)
"""
enc_tuples = []
dec_tuples = []
if len(X.shape) != 4:
raise ValueError("Batch should be 4D in b, c, h, w format")
# Number of channels in below layer
prev_size = X.shape[1]
downpool_factor = 1
for s in self.enc_sizes:
if isinstance(s, (tuple, list)):
enc_t = (s[0], prev_size, s[1], s[2], s[3])
else:
raise ValueError("ConvVAE only takes tuples of encoder size")
enc_t = (prev_size, s)
downpool_factor = downpool_factor * s[3]
enc_tuples.append(enc_t)
prev_size = s[0]
downpool_size = X.shape[2] / downpool_factor
if X.shape[2] % downpool_factor != 0:
raise ValueError("Pool shapes must match image size"
"and layer depth!")
prev_size = X.shape[1]
for s in self.dec_sizes[::-1]:
if isinstance(s, (tuple, list)):
dec_t = (prev_size, s[0], s[1], s[2], s[3])
else:
raise ValueError("ConvVAE only takes tuples of encoder size")
dec_t = (prev_size, s)
dec_tuples.append(dec_t)
prev_size = s[0]
print(enc_tuples)
print(dec_tuples[::-1])
if not hasattr(self, "params"):
print('generating weights')
enc_params = []
in_sym = X_sym
for n in range(len(enc_tuples)):
filter_shape = (enc_tuples[n][0], enc_tuples[n][1],
enc_tuples[n][2], enc_tuples[n][3])
pool_shape = (enc_tuples[n][4], enc_tuples[n][4])
print("Encode filters")
print(filter_shape)
print(pool_shape)
out_sym, params = conv_layer(in_sym, filter_shape,
pool_shape, random_state)
enc_params.extend(params)
in_sym = out_sym
# linear tuple
# assume square
in_sym = in_sym.reshape((in_sym.shape[0], -1))
latent_size = (enc_tuples[-1][0] * downpool_size * downpool_size,
self.n_code)
in_sym, hidden_params = tanh_layer(in_sym, latent_size,
random_state)
enc_params.extend(hidden_params)
translation_size = (self.n_code, self.n_code)
mu_sym, mu_params = linear_layer(in_sym,
translation_size,
random_state)
enc_params.extend(mu_params)
sigma_sym, sigma_params = linear_layer(in_sym,
translation_size,
random_state)
# Constrain to be > 0
sigma_sym = T.nnet.softplus(sigma_sym + 1E-15)
enc_params.extend(sigma_params)
self.enc_params = enc_params
# Code layer calculations
log_sigma_sym = T.log(sigma_sym)
code_sym = mu_sym + T.exp(log_sigma_sym) * e_sym
# Decoding from the code layer
decode_size = (self.n_code,
dec_tuples[-1][1] * downpool_size * downpool_size)
dec_sym, dec_params = relu_layer(code_sym,
decode_size,
random_state)
reshape_size = (-1, dec_tuples[-1][1], downpool_size,
downpool_size)
print("Reshape size")
print(reshape_size)
dec_sym = dec_sym.reshape(reshape_size)
# stop = -1 to include 0
in_sym = dec_sym
for n in range(len(dec_tuples) - 1, -1, -1):
# Reverse order due to end reversal
if len(enc_tuples[n]) < 4:
out_sym, params = relu_layer(in_sym, dec_tuples[n],
random_state)
else:
filter_shape = (dec_tuples[n][0], dec_tuples[n][1],
dec_tuples[n][2], dec_tuples[n][3])
pool_shape = (enc_tuples[n][4], enc_tuples[n][4])
print("Decode filters")
print(filter_shape)
print(pool_shape)
if n == 0:
out_sym, params = deconv_layer(in_sym, filter_shape,
pool_shape,
random_state,
activation="hard_tanh")
else:
out_sym, params = deconv_layer(in_sym, filter_shape,
pool_shape, random_state)
dec_params.extend(params)
in_sym = out_sym
y_sym = out_sym
self.dec_params = dec_params
self.params = self.enc_params + self.dec_params
# Derived from
# http://stats.stackexchange.com/questions/7440/kl-divergence-between-two-univariate-gaussians
# with \sigma_2 = 1 and \mu_2 = 0
# Key identity:
# x = exp(log(x))
# exp(log(sigma ** 2)) = exp(2 log(sigma))
kl_cost = -0.5 * T.sum(2 * log_sigma_sym - T.exp(2 * log_sigma_sym) -
mu_sym ** 2 + 1)
# see https://www.cs.toronto.edu/~hinton/csc2515/notes/lec6tutorial.pdf
# page 3
likelihood_cost = T.sum(T.sqr(X_sym - y_sym))
# from Autoencoding Variational Bayes
# http://arxiv.org/abs/1312.6114
cost = kl_cost + likelihood_cost
learning_rate = self.learning_rate
momentum = self.momentum
grads = T.grad(cost, self.params)
opt = rmsprop(self.params)
updates = opt.updates(self.params, grads,
learning_rate / np.cast['float32'](
self.batch_size),
momentum)
print('compiling')
self._fit_function = theano.function([X_sym, e_sym], cost,
updates=updates)
self._reconstruct = theano.function([X_sym, e_sym], y_sym)
self._x_given_z = theano.function([code_sym], y_sym)
self._z_given_x = theano.function([X_sym], (mu_sym, log_sigma_sym))
def fit(self, X):
random_state = np.random.RandomState(1999)
if not hasattr(self, "_fit_function"):
self._setup_functions(X, random_state)
xs = random_state.randn(self.batch_size, self.n_code).astype(
theano.config.floatX)
idx = random_state.randint(0, len(X), self.batch_size)
x_rec = X[idx].astype(theano.config.floatX)
n = 0.
for e in range(self.n_epoch):
t = time()
for n, (i, j) in enumerate(minibatch_indices(X, self.batch_size)):
xmb = X[i:j]
cost = self._fit_function(xmb, random_state.randn(
xmb.shape[0], self.n_code).astype(theano.config.floatX))
self.costs_.append(cost)
n += xmb.shape[0]
print("Train iter", e)
print("Total iters run", self.epoch_)
print("Total Cost", cost)
print("Mean Cost per Example", cost / len(xmb))
print("Time", time() - t)
self.epoch_ += 1
if e % (self.n_epoch // 10) == 0 or e == (self.n_epoch - 1):
print("Saving model snapshot")
f = open(self.snapshot_file, 'wb')
cPickle.dump(self, f, protocol=2)
f.close()
def tf(x):
return ((x + 1.) / 2.).transpose(1, 2, 0)
if e == (self.n_epoch - 1) or e % (self.n_epoch // 10) == 0:
if self.image_save_root is None:
image_save_root = os.path.split(__file__)[0]
else:
image_save_root = self.image_save_root
samples_path = os.path.join(
image_save_root, "sample_images_epoch_%d" % self.epoch_)
if not os.path.exists(samples_path):
os.makedirs(samples_path)
samples = self._x_given_z(xs)
samples = samples[:100]
recs = self._reconstruct(x_rec, random_state.randn(
len(x_rec), self.n_code).astype(theano.config.floatX))
recs = recs[:100]
x_rec = x_rec[:100]
img1 = bw_grid_vis(x_rec, show=False)
img2 = bw_grid_vis(recs, show=False)
img3 = bw_grid_vis(samples, show=False)
imsave(os.path.join(samples_path, 'source.png'), img1)
imsave(os.path.join(samples_path, 'source_recs.png'), img2)
imsave(os.path.join(samples_path, 'random_samples.png'), img3)
paths = make_paths(self.n_code, 3)
for i in range(paths.shape[1]):
path_samples = self._x_given_z(paths[:, i, :].astype(
theano.config.floatX))
for j, sample in enumerate(path_samples):
imsave(os.path.join(samples_path,
'paths_%d_%d.png' % (i, j)),
sample.squeeze())
def transform(self, x_rec):
recs = self._reconstruct(
x_rec, np.random.randn(x_rec.shape[0], self.n_code).astype(
theano.config.floatX))
return recs
def encode(self, X, e=None):
if e is None:
e = np.ones((X.shape[0], self.n_code)).astype(
theano.config.floatX)
return self._z_given_x(X, e)
def decode(self, Z):
return self._z_given_x(Z)
if __name__ == "__main__":
tr, _, _, = mnist()
trX, trY = tr
tf = ConvVAE(image_save_root="/Tmp/kastner/conv_vae",
snapshot_file="/Tmp/kastner/conv_mnist_snapshot.pkl",
enc_sizes=[(64, 3, 3, 2), (128, 3, 3, 2)],
dec_sizes=[(128, 3, 3, 2), (64, 3, 3, 2)],
n_code=512,
learning_rate=.01, momentum=0.9, n_epoch=1000, batch_size=128)
trX = trX.astype(theano.config.floatX)
trX = trX.reshape(len(trX), 1, 28, 28)
tf.fit(trX)
recs = tf.transform(trX[:100])
|
[
"kastnerkyle@gmail.com"
] |
kastnerkyle@gmail.com
|
df5d6bd5f4a392d58b19719f3e6584daca573c4b
|
c828cd876d92b1e10309909c1d4e63ae64d5dfde
|
/eigen.py
|
a24865fdc3e2be8728d191bdc33efd75e9fa17ee
|
[] |
no_license
|
walkergussler/random_python
|
111770b2c06c0560145210073510568c5eeca564
|
c7d8386d906ce4c21490b651d9f3a49ab3fa24e5
|
refs/heads/main
| 2023-01-09T16:39:58.916523
| 2020-11-10T07:24:18
| 2020-11-10T07:24:18
| 311,574,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,339
|
py
|
import numpy as np
import sys
from ghost.util.distance import hamming
from Bio import SeqIO
def doHdistReturnProp(seqs1,seqs2): #calculate hamming proportions between two sets of sequences, return matrix
keylen=len(seqs1[0])
l1=len(seqs1)
l2=len(seqs2)
hdist=hamming(seqs1,seqs2,ignore_gaps=False)
arr=np.zeros([l1,l2])
for id in range(len(hdist)):
item=hdist[id]
arr[:,id]=item[:,0]
return np.divide(arr,keylen,dtype=float)
def getseqs(input1):
seqs=[]
input_handle = open(input1)
for record in SeqIO.parse(input_handle, "fasta"): # for FASTQ use "fastq", for fasta "fasta"
if len(record.seq) > 0 and len(record.seq) < 50000:
seqs.append(record.seq)
input_handle.close()
return seqs
def printFiles(inmat,fileList): #takes in square matrices only
# print(fileList)
if len(fileList)==1:
print(fileList[0])
return 0
maxes=np.zeros(len(fileList))
for id in range(len(fileList)):
# print("id=%i"%id)
newarr=np.delete(np.delete(inmat,id,axis=1),id,axis=0)
# print(newarr)
(values,vectors)=np.linalg.eig(newarr)
# print(values)
maxes[id]=values.real.max()
# print("-")
# print(max(values),fileList[id])
# print("-")
# print(maxes)
smallestSpec=maxes.real.argmax()
newfiles=list(fileList)
# print("its here")
print(fileList[smallestSpec])
# print("now its not")
newfiles.remove(fileList[smallestSpec])
newarr=np.delete(np.delete(inmat,smallestSpec,axis=1),smallestSpec,axis=0)
printFiles(newarr,newfiles)
del sys.argv[0]
inputs=sys.argv
numFiles=len(inputs)
# print("---------running following inputs---------")
dsamp=np.zeros([numFiles,numFiles])
for i1 in range(numFiles):
f1=inputs[i1]
# print(f1)
seqs1=getseqs(f1)
l1=len(seqs1)
for i2 in range(i1,numFiles):
f2=inputs[i2]
if i1!=i2:
seqs2=getseqs(f2)
l2=len(seqs2)
tmp=doHdistReturnProp(seqs1,seqs2)
asd=np.amin(tmp)
if asd==0:
dsamp[i1,i2]=.00001
dsamp[i2,i1]=.00001
else:
dsamp[i1,i2]=asd
dsamp[i2,i1]=asd
# print("------------------------------------------")
printFiles(dsamp,inputs)
|
[
"mnz0@biolinux.biotech.cdc.gov"
] |
mnz0@biolinux.biotech.cdc.gov
|
f3b973abaeb7d82e984adf8b5947f37cbd3d670e
|
1a3d6caf89e5b51a33627458ae7c0bbb00efdc1d
|
/src/gluonts/torch/model/deepar/lightning_module.py
|
dac207ccc13117d02b5260d54ff56b32dd4225c7
|
[
"Apache-2.0"
] |
permissive
|
zoolhasson/gluon-ts
|
e9ff8e4ead4d040d9f8fa8e9db5f07473cb396ed
|
3dfc0af66b68e3971032a6bd0f75cd216988acd6
|
refs/heads/master
| 2023-01-25T01:52:57.126499
| 2023-01-13T17:50:38
| 2023-01-13T17:50:38
| 241,743,126
| 0
| 1
|
Apache-2.0
| 2020-08-06T16:53:11
| 2020-02-19T22:45:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,044
|
py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytorch_lightning as pl
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from gluonts.core.component import validated
from gluonts.itertools import select
from gluonts.torch.modules.loss import DistributionLoss, NegativeLogLikelihood
from .module import DeepARModel
class DeepARLightningModule(pl.LightningModule):
"""
A ``pl.LightningModule`` class that can be used to train a
``DeepARModel`` with PyTorch Lightning.
This is a thin layer around a (wrapped) ``DeepARModel`` object,
that exposes the methods to evaluate training and validation loss.
Parameters
----------
model
``DeepARModel`` to be trained.
loss
Loss function to be used for training,
default: ``NegativeLogLikelihood()``.
lr
Learning rate, default: ``1e-3``.
weight_decay
Weight decay regularization parameter, default: ``1e-8``.
patience
Patience parameter for learning rate scheduler, default: ``10``.
"""
@validated()
def __init__(
self,
model: DeepARModel,
loss: DistributionLoss = NegativeLogLikelihood(),
lr: float = 1e-3,
weight_decay: float = 1e-8,
patience: int = 10,
) -> None:
super().__init__()
self.save_hyperparameters()
self.model = model
self.loss = loss
self.lr = lr
self.weight_decay = weight_decay
self.patience = patience
self.example_input_array = tuple(
[
torch.zeros(shape, dtype=self.model.input_types()[name])
for (name, shape) in self.model.input_shapes().items()
]
)
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
def training_step(self, batch, batch_idx: int): # type: ignore
"""
Execute training step.
"""
train_loss = self.model.loss(
**select(self.model.input_shapes(), batch),
future_observed_values=batch["future_observed_values"],
future_target=batch["future_target"],
loss=self.loss,
).mean()
self.log(
"train_loss",
train_loss,
on_epoch=True,
on_step=False,
prog_bar=True,
)
return train_loss
def validation_step(self, batch, batch_idx: int): # type: ignore
"""
Execute validation step.
"""
val_loss = self.model.loss(
**select(self.model.input_shapes(), batch),
future_observed_values=batch["future_observed_values"],
future_target=batch["future_target"],
loss=self.loss,
).mean()
self.log(
"val_loss", val_loss, on_epoch=True, on_step=False, prog_bar=True
)
return val_loss
def configure_optimizers(self):
"""
Returns the optimizer to use.
"""
optimizer = torch.optim.Adam(
self.model.parameters(),
lr=self.lr,
weight_decay=self.weight_decay,
)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": ReduceLROnPlateau(
optimizer=optimizer,
mode="min",
factor=0.5,
patience=self.patience,
),
"monitor": "train_loss",
},
}
|
[
"noreply@github.com"
] |
zoolhasson.noreply@github.com
|
9356317c2642a1bdfa87247aa8eea8fbbd762bd8
|
8850d67783bb86a83a5bb68df0a78c0f8684ac56
|
/test/unit/helpers/aws_mocks.py
|
0fa2a1c362eef3558596cd789053b17c69edf9fa
|
[
"Apache-2.0"
] |
permissive
|
awesome-security/streamalert
|
057b8d86d65ff0e84d580a2f251efcb7760b548c
|
33a3f1034873a2ee10485ad179110dccb2bd965b
|
refs/heads/master
| 2021-01-19T13:56:28.307580
| 2017-08-18T01:33:52
| 2017-08-18T01:33:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,997
|
py
|
'''
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from datetime import datetime
import uuid
from botocore.exceptions import ClientError
class MockLambdaClient(object):
"""http://boto3.readthedocs.io/en/latest/reference/services/lambda.html"""
def __init__(self, name, **kwargs):
self.region = kwargs.get('region')
self.throw_exception = kwargs.get('throw_exception')
self.current_version = 10
self.name = name
def publish_version(self, **kwargs):
# Test error handling
if self.throw_exception:
raise ClientError({'Error': {}}, 'test')
function_name = kwargs.get('FunctionName')
code_sha_256 = kwargs.get('CodeSha256')
description = kwargs.get('Description')
return {
'FunctionName': function_name,
'FunctionArn': 'arn:aws:lambda:region:account-id:function:{}'.format(function_name),
'Runtime': 'python2.7',
'Role': 'string',
'Handler': 'main.handler',
'CodeSize': 128,
'Description': 'string',
'Timeout': 60,
'MemorySize': 128,
'LastModified': 'string',
'CodeSha256': code_sha_256,
'Version': self.current_version + 1
}
class MockAthenaClient(object):
"""http://boto3.readthedocs.io/en/latest/reference/services/athena.html"""
def __init__(self, **kwargs):
self.query_executions = {}
self.results = kwargs.get('results', [{'test': 'test'}])
self.result_state = kwargs.get('result_state', 'SUCCEEDED')
def get_start_query_execution(self, **kwargs):
return {
'QueryExecution': {
'QueryExecutionId': uuid.uuid4(),
'Query': kwargs.get('QueryString'),
'ResultConfiguration': {
'OutputLocation': kwargs.get('OutputLocation', ''),
'EncryptionConfiguration': kwargs.get('EncryptionConfiguration', {})
},
'QueryExecutionContext': kwargs.get('QueryExecutionContext', {}),
'Status': {
'State': 'QUEUED',
'StateChangeReason': 'string',
'SubmissionDateTime': datetime(2017, 1, 1),
'CompletionDateTime': datetime(2017, 1, 1)
},
'Statistics': {
'EngineExecutionTimeInMillis': 123,
'DataScannedInBytes': 123
}
}
}
def start_query_execution(self, **kwargs):
"""Start an Athena Query Exectuion."""
new_query_execution = self.get_start_query_execution(**kwargs)
new_query_id = new_query_execution['QueryExecution']['QueryExecutionId']
self.query_executions[new_query_id] = new_query_execution
return {
'QueryExecutionId': new_query_id
}
def get_query_execution(self, **kwargs):
"""Get the status of an Athena Query Exectuion."""
query_execution = self.query_executions.get(kwargs['QueryExecutionId'])
query_execution['QueryExecution']['Status']['State'] = self.result_state
return query_execution
def get_query_results(self, **kwargs):
"""Get the results of a executed query"""
if self.results:
return {'ResultSet': {'Rows': [{'Data': self.results}]}}
else:
return {'ResultSet': {'Rows': []}}
|
[
"jack.naglieri@airbnb.com"
] |
jack.naglieri@airbnb.com
|
de483b46af8e92f611afdd077928ed53d03d7f10
|
f8191eddfe8e8894b1b8a9410e94718176f7df69
|
/ComputerVision/src/ImageSorting.py
|
0cc84067af6d1bbc22c90be4e2196c95902fc8cf
|
[
"MIT"
] |
permissive
|
egdavis1/CloserThanMars
|
6a625fa5dba9bd881f2cef5d2cc5359fe3407b0f
|
57049d5b6bf8fee50d84672e8333debcd5d60e1a
|
refs/heads/master
| 2020-04-02T05:05:15.183335
| 2018-10-21T21:17:20
| 2018-10-21T21:17:20
| 154,051,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
#Written by: Sam Dugan and Justin Drapeau
import json
import os
import requests
CONST = "data.txt"
CONST1 = "TheWayYouNibbleOnMyEarTheOnlyWordsIWannaHear.txt"
x1 = open(CONST, "r")
x2 = open(CONST1, "r")
y1 = json.loads(x1.read())
y2 = json.loads(x2.read())
print("Proccessing files.")
c = 0
z = ["color", "description", "categories"]
for b in y1["images"]:
if y2["images"][c]["metadata"]["format"] == "Jpeg":
for a in z:
y1["images"][c][a] = y2["images"][c][a]
if y1["images"][c]["description"]["captions"] == []:
y1["images"][c]["description"]["captions"] = [{"confidence": 1,"text": "unknown"}]
y1["images"][c]["description"]["confidence"] = y1["images"][c]["description"]["captions"][0]["confidence"]
y1["images"][c]["description"]["text"] = y1["images"][c]["description"]["captions"][0]["text"]
del y1["images"][c]["description"]["captions"]
payload_dict = {'data': json.dumps(y1["images"][c])}
r2 = requests.post('http://e9bf61d8.ngrok.io/api/upload/image', data=payload_dict)
print("Line ", c, " success ", r2)
else:
print("Error on line ", c)
c += 1
x1.close()
x2.close()
os.remove(CONST)
os.remove(CONST1)
|
[
"davis@davis-VirtualBox.(none)"
] |
davis@davis-VirtualBox.(none)
|
85e5fd7b7fe9549d598dc33f20e5e0931955970f
|
ab6c6559d9cfac36c3c4ece192fa2300767662d1
|
/Python Game Development for Beginners - Working Files/Chapter 4/Checking for intersection Part II/counters.py
|
98eb5ccf9936a6d2850e6e60edaa9f0d5803b2a7
|
[] |
no_license
|
Igor-Nosatov/PythonGameDev_Trinket
|
962b86572c74c64652a24768dfec2101fcae221f
|
e6166f69307ded6880b0aaa3299c0a151807bb9c
|
refs/heads/master
| 2020-06-24T20:22:57.187289
| 2016-05-03T10:33:26
| 2016-05-03T10:33:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
import turtle
from math import sqrt
from time import sleep
counterscreen = turtle.Screen()
counterscreen.reset()
class Counter(turtle.Turtle):
def __init__(self, coordinates = [160, 170], screen = counterscreen):
turtle.Turtle.__init__(self)
self.reset()
self.hideturtle()
self.penup()
self.speed(0)
x, y = coordinates
self.goto(x,y)
self.screen = screen
def show(self, message, alignment = "right", size = 18):
self.screen.tracer(0)
self.clear()
self.write(message,font=("Arial",size),align=alignment)
self.screen.tracer(1)
|
[
"lrbeaver@gmail.com"
] |
lrbeaver@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.