blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f4fb165252962fe02564d44fc8d8a6cb9eaef1e9
|
c591f5676468a7447f0e4f104c4889debb35c051
|
/resources/idc/__init__.py
|
4a6431ad2c6890dd3d7348b37981f6a9a2f2b983
|
[] |
no_license
|
zhagyilig/Adahome
|
3f3bc1b664bd65964b8befa78405c07da3c8a228
|
76f08be7c21e90bb58803aa1c11be59f66332f42
|
refs/heads/dev
| 2022-12-12T11:51:30.341859
| 2019-07-10T04:22:12
| 2019-07-10T04:22:12
| 149,948,322
| 2
| 4
| null | 2022-12-08T01:01:36
| 2018-09-23T04:39:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,671
|
py
|
# coding=utf-8
# author: zhangyiling
from django.shortcuts import render
from django.views.generic import TemplateView, ListView
from django.contrib.auth.mixins import LoginRequiredMixin # 登陆验证
from django.shortcuts import redirect # 页面跳转
from django.shortcuts import reverse # 反转解析url的'name='
from django.http import HttpResponse
from resources.models import Idc
import json
from resources.forms import CreateIdcForm
'''
1. 添加idc, 使用模版视图
'''
class AddidcTemView(LoginRequiredMixin, TemplateView):
template_name = 'resources/idc/add_idc.html'
def post(self, request):
'''
获取添加idc表单提交的数据
:param request:
:return:
'''
# print(request.POST) # 打印表单提交的数据
# print(reverse('success', kwargs={'next': 'user_list'}))
# 输出: /dashboard/success/user_list/
# print(redirect('success', next='user_list'))
# 输出: <HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="/dashboard/success/user_list/">
# reverse
# redirect: 两个的区别:reverse传入的是字典信息:kwargs;而redirect是arg,kwargs
""" 更新使用django表单验证
# 第一步: 获取表单数据
name = request.POST.get('name', '')
idc_name = request.POST.get('idc_name', '')
address = request.POST.get('address', '')
phone = request.POST.get('phone', '')
email = request.POST.get('email', '')
username = request.POST.get('username', '')
# 第二步: 验证数据, 这里只是简单的校验
error_msg = []
if not name:
error_msg.append('idc简称不能为空')
if not idc_name:
error_msg.append('idc_name不能为空')
if error_msg:
# print(error_msg)
return redirect('error', next='add_idc', msg=json.dumps(error_msg, ensure_ascii=False))
# 第三步: 实例化
idc = Idc()
idc.name = name
idc.idc_name = idc_name
idc.address = address
idc.phone = phone
idc.email = email
idc.username = username
try:
idc.save()
except Exception as e:
return redirect('error', next='idc_list', msg=e.args)
return redirect('success', next='idc_list') # 返回成功页面;next是success的关键参数名
# return redirect('error', next='user_list', msg='这是错误页面测试')# 返回错误页面;next/msg是error的关键参数名
"""
# 使用django表单验证
idcform = CreateIdcForm(request.POST) # request.POST 表单提交的数据
# print('idcform %s' %idcform)
if idcform.is_valid(): # 验证数据
idc = Idc(**idcform.cleaned_data) # cleaned_data 获取数据
try:
idc.save()
return redirect('success', next='idc_list')
except Exception as e:
return redirect('error', next='idc_list', msg=e.args)
else:
# print(json.dumps(json.loads(idcform.errors.as_json()), ensure_ascii=False))
# return HttpResponse('')
error_msg = json.dumps(json.loads(idcform.errors.as_json()), ensure_ascii=False)
return redirect('error', next='idc_list', msg=error_msg)
'''
2.idc 详细信息列表, 使用ListView
'''
class IdcListView(LoginRequiredMixin, ListView):
template_name = 'resources/idc/idc_list.html'
model = Idc
paginate_by = 10 # 一个页面5个条目
ordering = 'id' # 列表按id排序
|
[
"YilingZhang@YilingZhang.local"
] |
YilingZhang@YilingZhang.local
|
986bf659063dbb4023eaaf094cd1d3cccd06ebdb
|
44dbb043e52f00c9a797b1bea8f1df50dd621842
|
/os-example-4.py
|
69064074cfa33ba2ae8384a237bc9351ebad664a
|
[] |
no_license
|
peterdocter/standardmodels
|
140c238d3bef31db59641087e3f3d5413d4baba1
|
7addc313c16b416d0970461998885833614570ad
|
refs/heads/master
| 2020-12-30T16:59:30.489486
| 2016-12-13T06:32:03
| 2016-12-13T06:32:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
import os
# where are we?
cwd = os.getcwd()
print "1", cwd
# go down
os.chdir("samples")
print "2", os.getcwd()
# go back up
os.chdir(os.pardir)
print "3", os.getcwd()
|
[
"415074476@qq.com"
] |
415074476@qq.com
|
95b2abdf3b691a753c2587061a681df8fd8851d1
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/messenger/proto/xmpp/extensions/chat.py
|
567a173fdee232fd567d9e3a472d0a0c272f68b0
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 9,509
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/messenger/proto/xmpp/extensions/chat.py
import calendar
from datetime import datetime
import json
import time
from debug_utils import LOG_CURRENT_EXCEPTION
from messenger.proto.xmpp.extensions import PyExtension, PyHandler, PyQuery
from messenger.proto.xmpp.extensions.dataform import DataForm, Field
from messenger.proto.xmpp.extensions.ext_constants import XML_NAME_SPACE as _NS
from messenger.proto.xmpp.extensions.ext_constants import XML_TAG_NAME as _TAG
from messenger.proto.xmpp.extensions.shared_handlers import IQHandler
from messenger.proto.xmpp.extensions.shared_queries import MessageQuery
from messenger.proto.xmpp.extensions.shared_queries import PresenceQuery
from messenger.proto.xmpp.extensions.wg_items import WgSharedExtension
from messenger.proto.xmpp.gloox_constants import IQ_TYPE, CHAT_STATE, MESSAGE_TYPE_ATTR, PRESENCE
from messenger.proto.xmpp.wrappers import ChatMessage
class ChatStateExtension(PyExtension):
def __init__(self, state=CHAT_STATE.UNDEFINED):
super(ChatStateExtension, self).__init__(state)
self.setXmlNs(_NS.CHAT_STATES)
@classmethod
def getDefaultData(cls):
return CHAT_STATE.UNDEFINED
def getXPath(self, index=None, suffix='', name=None):
if self.getName() == CHAT_STATE.UNDEFINED:
paths = []
getXPath = super(ChatStateExtension, self).getXPath
for state in CHAT_STATE.RANGE:
paths.append(getXPath(index, suffix, state))
name = paths
else:
name = super(ChatStateExtension, self).getXPath(index, suffix, name)
return name
def parseTag(self, pyGlooxTag):
result = pyGlooxTag.filterXPath('|'.join(CHAT_STATE.RANGE))
if result:
state = result[0].getTagName()
if state not in CHAT_STATE.RANGE:
state = self.getDefaultData()
else:
state = self.getDefaultData()
return state
class DelayExtension(PyExtension):
def __init__(self):
super(DelayExtension, self).__init__(_TAG.DELAY)
self.setXmlNs(_NS.DELAY)
@classmethod
def getDefaultData(cls):
return time.time()
def parseTag(self, pyGlooxTag):
stamp = pyGlooxTag.findAttribute('stamp')
if stamp:
try:
tm = time.strptime(stamp, '%Y-%m-%dT%H:%M:%SZ')
tm = tm[0:8] + (0,)
sentAt = calendar.timegm(tm)
except ValueError:
try:
dt = datetime.strptime(stamp, '%Y-%m-%dT%H:%M:%S.%fZ')
sentAt = calendar.timegm(dt.timetuple()) + dt.microsecond / 1000000.0
except ValueError:
LOG_CURRENT_EXCEPTION()
sentAt = self.getDefaultData()
else:
sentAt = self.getDefaultData()
return sentAt
class MessageIDExtension(PyExtension):
def __init__(self):
super(MessageIDExtension, self).__init__(_TAG.WG_MESSAGE_ID)
self.setXmlNs(_NS.WG_MESSAGE_ID)
@classmethod
def getDefaultData(cls):
pass
def parseTag(self, pyGlooxTag):
return pyGlooxTag.findAttribute('uuid')
class ChatHistoryQuery(PyExtension):
def __init__(self, jid, limit):
super(ChatHistoryQuery, self).__init__(_TAG.QUERY)
self.setXmlNs(_NS.WG_PRIVATE_HISTORY)
self.setAttribute('with', str(jid))
self.setAttribute('limit', limit)
class PrivateHistoryItem(PyExtension):
def __init__(self):
super(PrivateHistoryItem, self).__init__(_TAG.WG_PRIVATE_HISTORY)
self.setXmlNs(_NS.WG_PRIVATE_HISTORY)
@classmethod
def getDefaultData(cls):
return ('', False)
def parseTag(self, pyGlooxTag):
requestID = pyGlooxTag.findAttribute('request-id')
isFinal = pyGlooxTag.findAttribute('final')
if isFinal:
isFinal = json.loads(isFinal)
else:
isFinal = False
return (requestID, isFinal)
class _MucPrivilegesExtension(PyExtension):
def __init__(self, affiliation='', role=''):
super(_MucPrivilegesExtension, self).__init__(_TAG.WG_MUC_PRIVILEGES)
self.setAttribute('affiliation', affiliation)
self.setAttribute('role', role)
@classmethod
def getDefaultData(cls):
pass
def parseTag(self, pyGlooxTag):
affiliation = pyGlooxTag.findAttribute('affiliation') or 'none'
role = pyGlooxTag.findAttribute('role') or 'none'
return (affiliation, role)
class MessageWgSharedExtension(WgSharedExtension):
def __init__(self, includeNS=True):
super(MessageWgSharedExtension, self).__init__(includeNS)
self.setChild(_MucPrivilegesExtension())
@classmethod
def getDefaultData(cls):
return super(MessageWgSharedExtension, cls).getDefaultData()
def parseTag(self, pyGlooxTag):
info = super(MessageWgSharedExtension, self).parseTag(pyGlooxTag)
affiliation, role = self._getChildData(pyGlooxTag, 0, _MucPrivilegesExtension.getDefaultData())
info['affiliation'] = affiliation
info['role'] = role
return info
class _MessageCustomExtension(PyExtension):
def __init__(self, msgType, state=CHAT_STATE.UNDEFINED):
super(_MessageCustomExtension, self).__init__(_TAG.MESSAGE)
self.setAttribute('type', msgType)
self.setChild(ChatStateExtension(state))
self.setChild(MessageWgSharedExtension(False))
self.setChild(DelayExtension())
self.setChild(MessageIDExtension())
self.setChild(PrivateHistoryItem())
@classmethod
def getDefaultData(cls):
return ChatMessage()
def parseTag(self, pyGlooxTag):
message = ChatMessage()
message.state = self._getChildData(pyGlooxTag, 0, ChatStateExtension.getDefaultData())
info = self._getChildData(pyGlooxTag, 1, MessageWgSharedExtension.getDefaultData())
if info:
message.accountDBID = info['dbID']
message.accountName = info['name']
message.accountRole = info['role']
message.accountAffiliation = info['affiliation']
message.sentAt = self._getChildData(pyGlooxTag, 2, DelayExtension.getDefaultData())
message.uuid = self._getChildData(pyGlooxTag, 3, MessageIDExtension.getDefaultData())
message.requestID, message.isFinalInHistory = self._getChildData(pyGlooxTag, 4, PrivateHistoryItem.getDefaultData())
return message
class ChatMessageHolder(MessageQuery):
def __init__(self, msgType, to, msgBody='', state=CHAT_STATE.UNDEFINED):
if state:
ext = ChatStateExtension(state)
else:
ext = None
super(ChatMessageHolder, self).__init__(msgType, to, msgBody, ext)
return
class MessageHandler(PyHandler):
__slots__ = ('_typeAttr',)
def __init__(self, typeAttr):
self._typeAttr = typeAttr
super(MessageHandler, self).__init__(_MessageCustomExtension(self._typeAttr, CHAT_STATE.UNDEFINED))
def getFilterString(self):
return "/{0}[@type='{1}']".format(self._ext.getName(), self._typeAttr)
class ChatMessageHandler(MessageHandler):
def __init__(self):
super(ChatMessageHandler, self).__init__(MESSAGE_TYPE_ATTR.CHAT)
class GetChatHistoryQuery(PyQuery):
def __init__(self, jid, limit):
super(GetChatHistoryQuery, self).__init__(IQ_TYPE.GET, ChatHistoryQuery(jid, limit))
class MUCEntryQuery(PresenceQuery):
def __init__(self, to):
super(MUCEntryQuery, self).__init__(PRESENCE.AVAILABLE, to)
class MUCLeaveQuery(PresenceQuery):
def __init__(self, to):
super(MUCLeaveQuery, self).__init__(PRESENCE.UNAVAILABLE, to)
class OwnerConfigurationForm(PyExtension):
def __init__(self, fields=None):
super(OwnerConfigurationForm, self).__init__(_TAG.QUERY)
self.setXmlNs(_NS.MUC_OWNER)
self.setChild(DataForm(fields))
@classmethod
def getDefaultData(cls):
return DataForm.getDefaultData()
def parseTag(self, pyGlooxTag):
return self._getChildData(pyGlooxTag, 0, DataForm.getDefaultData())
class OwnerConfigurationFormQuery(PyQuery):
def __init__(self, to):
super(OwnerConfigurationFormQuery, self).__init__(IQ_TYPE.GET, OwnerConfigurationForm(), to)
class OwnerConfigurationFormSet(PyQuery):
def __init__(self, to, fields):
super(OwnerConfigurationFormSet, self).__init__(IQ_TYPE.SET, OwnerConfigurationForm(fields), to)
class OwnerConfigurationFormHandler(IQHandler):
def __init__(self):
super(OwnerConfigurationFormHandler, self).__init__(OwnerConfigurationForm())
class UserRoomConfigurationFormSet(OwnerConfigurationFormSet):
def __init__(self, to, room, password=''):
fields = (Field('text-single', 'muc#roomconfig_roomname', room),
Field('boolean', 'muc#roomconfig_persistentroom', 1),
Field('boolean', 'muc#roomconfig_publicroom', 1),
Field('boolean', 'muc#roomconfig_membersonly', 0),
Field('boolean', 'muc#roomconfig_allowinvites', 1),
Field('boolean', 'muc#roomconfig_survive_reboot', 1))
if password:
fields += (Field('boolean', 'muc#roomconfig_passwordprotectedroom', 1), Field('text-single', 'muc#roomconfig_roomsecret', password))
super(UserRoomConfigurationFormSet, self).__init__(to, fields)
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
a497ba217122e7b18367fa57adc6a0602064311d
|
eb333acea85364d39f2811ae368dd35bc84392f0
|
/exts/counting.py
|
0b1623741328e7c6745febe4359c2f8f373a044b
|
[] |
no_license
|
blueeidk/vendetta
|
7312b37e469ba2abbb46be07ba84365086f0cac3
|
e697dd3ebc224d50399dd8c4c0ee1d8f67085151
|
refs/heads/master
| 2023-04-12T19:22:13.009886
| 2021-05-10T20:29:42
| 2021-05-10T20:29:42
| 366,365,871
| 0
| 0
| null | 2021-05-11T12:01:11
| 2021-05-11T11:58:46
| null |
UTF-8
|
Python
| false
| false
| 1,939
|
py
|
import discord
from discord.ext import commands, tasks
from discord import Webhook, AsyncWebhookAdapter
class Counting(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.current_num = -1
self.fetch_num.start()
def cog_unload(self):
self.fetch_num.cancel()
@tasks.loop(seconds=60*1)
async def fetch_num(self):
await self.bot.wait_until_ready()
channel = self.bot.get_channel(self.bot.config["counting_channel"])
async for message in channel.history(limit=100):
try:
self.current_num = int(message.content)
break
except ValueError:
continue
if self.current_num == -1:
self.current_num = 0
@commands.Cog.listener()
async def on_message(self, message):
if message.channel.id == self.bot.config["counting_channel"] and not message.author.bot:
await message.delete()
try:
if int(message.content) != self.current_num + 1:
raise ValueError
except ValueError:
webhook = Webhook.from_url(self.bot.config["counting_webhookurl"],
adapter=AsyncWebhookAdapter(self.bot.session))
await webhook.send(message.content, username=message.author.name, avatar_url=message.author.avatar_url)
self.current_num = 0
await message.channel.send("Looks like someone made a mistake! Lets start again:")
await message.channel.send("0")
return
webhook = Webhook.from_url(self.bot.config["counting_webhookurl"], adapter=AsyncWebhookAdapter(self.bot.session))
await webhook.send(message.content, username=message.author.name, avatar_url=message.author.avatar_url)
self.current_num += 1
def setup(bot):
bot.add_cog(Counting(bot))
|
[
"niteblock@gmail.com"
] |
niteblock@gmail.com
|
8bacb8e843f98006b0d409848f10edb92140f035
|
f160cf4eb335ea799559312ac3d43a60c2c5848b
|
/library/zip_extract.py
|
e1f1faecce940706c2ead17d0b449c0c1525aa28
|
[
"MIT"
] |
permissive
|
baseplate-admin/Machine-Learning-Source-Code
|
c3389e0acb81e1f4c8e4c0cc763fcbc3781ef94e
|
a2203033d525c17b31584b52527c30e2c8aad1c4
|
refs/heads/master
| 2022-11-21T04:33:41.307477
| 2020-07-10T15:46:32
| 2020-07-10T15:46:32
| 277,730,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
def zip_extract():
import os
from zipfile import ZipFile
def zip_function():
print("We are extracting ZIP!!!")
where_is_zip=input("What is your zip location?")
what_is_zip_name=input("What is your zip name?")
what_is_zip_extension=input("What is your ZIP format?")
zip_join=os.path.join(where_is_zip,what_is_zip_name+ '.'+ what_is_zip_extension)
with ZipFile(zip_join,"r") as zip:
zip.extractall()
zip.printdir()
print("Enter a Number or It will cause ValueError.")
how_many_zip=int(input('How many zip do you want to extract?'))
try:
print("""
This is a number!!
Lets Go!!!
""")
for i in range(how_many_zip):
ask_if_zip_extract=input("""
Do you want to extract zip?
Enter 0 to skip extracting zip.
Enter 1 to to extract ZIP.
""")
if int(ask_if_zip_extract)==0:
zip_function(2)
elif int(ask_if_zip_extract)==1:
zip_function(1)
else:
print("Theres a problem with zip extract.")
except Exception as e:
print(e)
|
[
"61817579+baseplate-admin@users.noreply.github.com"
] |
61817579+baseplate-admin@users.noreply.github.com
|
76b07fab07edb0667ffdda682c409887fdab50cc
|
2cf99a155405b48bf14f872e1980ed948079e5dd
|
/test/test_router.py
|
a30b567e256a3ea2fe3ba97d23c6ab0b5d1539e8
|
[
"MIT"
] |
permissive
|
marrow/web.dispatch.route
|
c15309a26023d068b8f84ea4bbc221b674c1e6b8
|
92494bcad2e2a9a52d2e51eecfab910d829cc2de
|
refs/heads/master
| 2021-01-25T04:01:46.245851
| 2016-02-15T07:54:36
| 2016-02-15T07:54:36
| 32,564,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,820
|
py
|
# encoding: utf-8
import pytest
from web.dispatch.route.router import __DYNAMIC__, Router
from sample import Root
@pytest.fixture
def router():
return Router.from_object(Root)
def test_dynamic_repr():
assert repr(__DYNAMIC__) == '<dynamic element>'
def test_router_singleton():
assert Router.from_object(Root) is Router.from_object(Root)
def test_invalid_route():
router = Router()
with pytest.raises(ValueError):
router.parse("{bad:/}")
class TestRouterSample(object):
def test_single_static(self, router):
assert len(router.routes) == 1 # There's only a single top-level element.
assert 'user' in router.routes # It's "user".
assert len(router.routes['user']) == 2 # Which has a terminus and dynamic continuation.
assert router.routes['user'][None] == Root.root # The terminus is the "root" method.
assert router.routes['user'][None](Root()) == "I'm all people." # It really is.
def test_dynamic_username(self, router):
assert __DYNAMIC__ in router.routes['user']
dynamic = router.routes['user'][__DYNAMIC__]
assert len(dynamic) == 1
assert list(dynamic.keys())[0].match("GothAlice") # The regular expression matches.
assert len(list(dynamic.values())[0]) == 2
assert list(dynamic.values())[0][None] == Root.user
assert list(dynamic.values())[0][None](Root(), "GothAlice") == "Hi, I'm GothAlice"
def test_dynamic_username_action(self, router):
assert __DYNAMIC__ in router.routes['user']
dynamic = router.routes['user'][__DYNAMIC__]
assert len(dynamic) == 1
assert list(dynamic.keys())[0].match("GothAlice") # The regular expression matches.
assert len(list(dynamic.values())[0]) == 2
assert list(dynamic.values())[0][None] == Root.user
assert list(dynamic.values())[0][None](Root(), "GothAlice") == "Hi, I'm GothAlice"
|
[
"alice@gothcandy.com"
] |
alice@gothcandy.com
|
05fd2afde8a2efa035b5c2ee861b1f0e9b62fc97
|
8bdf78e902a02e3bd175e759fc98fd37277247af
|
/youtube_dl/extractor/mangomolo.py
|
2db503f2b13dc8499a6f665ef97d3e09cfcdf35b
|
[
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
oxidius2/youtube-dl
|
191f5bde4992313308d2ab010cdb82ecd0d1b654
|
30d9e20938fa91ece09c376b67030647215d48df
|
refs/heads/master
| 2017-03-20T13:01:36.106539
| 2016-09-16T21:06:55
| 2016-09-16T21:06:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
# coding: utf-8
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
int_or_none,
)
class MangomoloBaseIE(InfoExtractor):
def _get_real_id(self, page_id):
return page_id
def _real_extract(self, url):
page_id = self._get_real_id(self._match_id(url))
webpage = self._download_webpage(url, page_id)
hidden_inputs = self._hidden_inputs(webpage)
m3u8_entry_protocol = 'm3u8' if self._IS_LIVE else 'm3u8_native'
format_url = self._html_search_regex(
[
r'file\s*:\s*"(https?://[^"]+?/playlist.m3u8)',
r'<a[^>]+href="(rtsp://[^"]+)"'
], webpage, 'format url')
formats = self._extract_wowza_formats(
format_url, page_id, m3u8_entry_protocol, ['smil'])
self._sort_formats(formats)
return {
'id': page_id,
'title': self._live_title(page_id) if self._IS_LIVE else page_id,
'uploader_id': hidden_inputs.get('userid'),
'duration': int_or_none(hidden_inputs.get('duration')),
'is_live': self._IS_LIVE,
'formats': formats,
}
class MangomoloVideoIE(MangomoloBaseIE):
IE_NAME = 'mangomolo:video'
_VALID_URL = r'https?://admin\.mangomolo.com/analytics/index\.php/customers/embed/video\?.*?\bid=(?P<id>\d+)'
_IS_LIVE = False
class MangomoloLiveIE(MangomoloBaseIE):
IE_NAME = 'mangomolo:live'
_VALID_URL = r'https?://admin\.mangomolo.com/analytics/index\.php/customers/embed/index\?.*?\bchannelid=(?P<id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)'
_IS_LIVE = True
def _get_real_id(self, page_id):
return base64.b64decode(compat_urllib_parse_unquote(page_id).encode()).decode()
|
[
"remitamine@gmail.com"
] |
remitamine@gmail.com
|
97d55e2aec24c8c3c273787b6a0bfb6e207c6ee0
|
c261f0e98eedb4f0d85e92bd6ab8f4ae47096269
|
/lifeservice/schedule117/04美食下载团购糯米/getNuomiOtherCinemaMap.py
|
7e6d7d90119847ca9a6a6e964889df38e7707452
|
[] |
no_license
|
ShenDezhou/CPP
|
24379fe24f3c8588a7859ee586527d5cc6bfbe73
|
933c1e764a6ed2879b26aa548ff67153ca026bf6
|
refs/heads/master
| 2021-01-11T22:09:24.900695
| 2017-04-05T02:04:07
| 2017-04-05T02:04:07
| 78,928,291
| 0
| 1
| null | null | null | null |
GB18030
|
Python
| false
| false
| 1,328
|
py
|
#coding=gb2312
nuomiCinemaMap = dict()
otherCinemaMap = dict()
input = '/fuwu/Merger/Output/movie/cinema_movie_rel.table'
for line in open(input):
segs = line.strip('\n').decode('gb2312', 'ignore').split('\t')
cinemaid, source, ting = segs[1], segs[3], segs[9]
if source.find(u'糯米') != -1:
if cinemaid not in nuomiCinemaMap:
nuomiCinemaMap[cinemaid] = []
if ting not in nuomiCinemaMap[cinemaid]:
nuomiCinemaMap[cinemaid].append(ting)
else:
if cinemaid not in otherCinemaMap:
otherCinemaMap[cinemaid] = []
if ting not in otherCinemaMap[cinemaid]:
otherCinemaMap[cinemaid].append(ting)
# 糯米影院的厅名称是否都被包含
for cinemaid in otherCinemaMap:
if cinemaid not in nuomiCinemaMap:
#print ('#%s\t%s\t%s' % (cinemaid, u'糯米', '\t'.join(nuomiCinemaMap[cinemaid]))).encode('gb2312', 'ignore')
continue
noMatchTingList = []
for ting in nuomiCinemaMap[cinemaid]:
if ting not in otherCinemaMap[cinemaid]:
noMatchTingList.append(ting)
if len(noMatchTingList) == 0:
continue
# 存在不一致的情况
normTing = '\t'.join(otherCinemaMap[cinemaid])
noMatchTing = '\t'.join(noMatchTingList)
print ('%s\t%s\t%s' % (cinemaid, u'非糯米', normTing)).encode('gb2312', 'ignore')
print ('%s\t%s\t%s' % (cinemaid, u'糯米', noMatchTing)).encode('gb2312', 'ignore')
|
[
"bangtech@sina.com"
] |
bangtech@sina.com
|
2cbf9ce5648b670ee81e72a542610d78690a54f4
|
1097ed333a4000634e68a590ee6ffc6129ae61e3
|
/written_examination/matrix8.py
|
017cb25ae0dcc0f546bd9b3cf05825723bb344a7
|
[
"MIT"
] |
permissive
|
AutuanLiu/Code-Storm2019
|
1bbe890c7ca0d033c32348173bfebba612623a90
|
8efc7c5475fd888f7d86c3b08a3c1c9e55c1ac30
|
refs/heads/master
| 2020-04-23T07:03:08.975232
| 2019-10-24T08:56:26
| 2019-10-24T08:56:26
| 170,995,032
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,513
|
py
|
def getSum(i, j, n, m, maps): # [i, j]单阵入口,[n,m]矩阵维度数,maps矩阵
queue, sump, maps[i][j] = [[i, j]], maps[i][j], 0 # 初始化队列
while queue:
x, y = queue[0][0], queue[0][1] # 获取队列头元素
for dx, dy in zip((-1, -1, 0, 1, 1, 1, 0, -1), (0, 1, 1, 1, 0, -1, -1, -1)): # 8个方向
nx, ny = x + dx, y + dy
if -1 < nx < n and -1 < ny < m and maps[nx][ny] != 0:
queue.append([nx, ny]) # 入队
sump += maps[nx][ny] # 累计兵力
maps[nx][ny] = 0 # 累计过的单个区域兵力为0
del queue[0] # 出队
return sump # 返回单阵的兵力总和
if __name__ == '__main__':
maps = [[34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 30], [0, 23, 10, 5, 5, 0, 0, 0, 5, 5, 5, 5, 5, 0, 0, 0, 30, 0, 40, 0],
[0, 9, 0, 0, 5, 0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0, 0, 30, 0, 0], [0, 8, 7, 7, 0, 5, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 7, 0, 9, 0],
[0, 9, 0, 0, 5, 0, 5, 0, 0, 12, 12, 0, 0, 0, 0, 10, 0, 0, 0, 9], [0, 0, 0, 0, 5, 0, 0, 5, 0, 12, 12, 0, 0, 5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 12, 0, 0, 5, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0], [40, 30, 3, 6, 6, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 10, 0],
[0, 0, 20, 0, 0, 6, 6, 0, 0, 0, 0, 0, 0, 0, 5, 6, 5, 10, 10, 0], [40, 30, 3, 7, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 10, 0],
[0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 17, 0, 0, 6, 5, 7, 7, 0], [0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 20, 0, 0, 7, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 0, 10, 0, 0, 0], [0, 20, 0, 0, 7, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 0, 10, 0, 0, 0],
[0, 20, 0, 0, 7, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 0, 10, 0, 0, 0], [0, 30, 0, 7, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 10, 0, 50],
[0, 40, 7, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 50, 0], [43, 30, 25, 10, 50, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0, 0, 0, 50, 0, 0]]
n, m = 20, 20 # 输入行列
army = []
for i in range(20):
for j in range(20):
if maps[i][j] != 0:
army.append(getSum(i, j, n, m, maps)) # 获取每个单阵的兵力和
print('每个单阵兵力和:', army)
print('单阵兵力最多为:', max(army))
print('单阵兵力最少为:', min(army))
|
[
"autuanliu@163.com"
] |
autuanliu@163.com
|
a31be73325befa7634569a9b289ebac7e238c219
|
f4bdd0d988ed63ed314f5703abd3543cded9f49e
|
/Amazon/Reviews & Big Data Analytics/Amazon_LDA.py
|
32ae2a94f52d0aa94ba4eaf229433dab27abf4ff
|
[] |
no_license
|
jessicakaye/Python-Projects
|
643f0e1808163187cfe3db7d5adff800e2e3a98c
|
8365e84f110b53df2bd54604f2206e9bc1f09617
|
refs/heads/master
| 2022-05-02T07:37:09.591545
| 2022-03-10T01:28:39
| 2022-03-10T01:28:39
| 253,980,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,244
|
py
|
# Amazon_LDA.py
# 4/28/20
# @jessicakaye
# Used to conduct LDA on the top 10 most reviewed Amazon products in a dataset
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from wordcloud import WordCloud
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from time import time
from time import time
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from wordcloud import WordCloud
pd.set_option('display.max_columns', None)
# Load the dataset!
df = pd.read_json('AmazonData_text_processed_full.json', lines = True)
print(df)
print(df.describe())
# Let's drop those duplicates
df.drop_duplicates(['overall', 'reviewText', 'reviewTime', 'asin', 'reviewerID'], inplace=True)
#plot for all of the products
plt.figure(figsize=(16,10))
ax = sns.countplot(x='asin', data = df, palette = 'Set1', order=df['asin'].value_counts().index)
plt.xlabel('ASIN', fontsize=12)
plt.ylabel('Count', fontsize=12)
total = float(len(df))
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x()+p.get_width()/2.,
height + 10,
'{}'.format(height),
ha="center")
plt.title("Count of Reviews Per ASIN")
plt.savefig("Count of Reviews Per ASIN.png")
#Distribution of Ratings!
plt.figure()
ax = sns.countplot(x='overall', data=df, palette='Set1', order=df['overall'].value_counts().index)
plt.xlabel('overall', fontsize=12)
plt.ylabel('Count', fontsize=12)
total = float(len(df))
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x() + p.get_width() / 2.,
height + 10,
'{0:.0%}'.format(height / total),
ha="center")
plt.title("Count of Reviews Per Rating")
plt.savefig("Count of Reviews Per Rating.png")
# Distribution of NPS Categories!
plt.figure()
ax = sns.countplot(x='nps_category', data=df, palette='Set1', order=df['nps_category'].value_counts().index)
plt.xlabel('nps_category', fontsize=12)
plt.ylabel('Count', fontsize=12)
total = float(len(df))
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x() + p.get_width() / 2.,
height + 10,
'{0:.0%}'.format(height / total),
ha="center")
plt.title("Count of Reviews Per NPS Category")
plt.savefig("Count of Reviews Per NPS Category.png")
# Let's create a wordcloud!
wordcloud = WordCloud(background_color="white", max_words=5000, contour_width=3, contour_color='steelblue')
wordcloud.generate(df['filtered'].to_string())
# plot the wordcloud!
plt.figure(figsize=(16,10))
plt.imshow(wordcloud, interpolation="bilinear")
plt.savefig('wordcloudoftop10products')
# Let's optimize our df and try using CountVectorizer
# I already have these columns from text processing in Spark, but I want to try the following in sklearn
amazon_df = df.drop(labels=['raw_features', 'features'], axis=1)
# Let's create a list of all of the different ASINs
list_asins = amazon_df.asin.unique()
sns.set_style('whitegrid')
# Helper function
def plot_10_most_common_words(asin, count_data, count_vectorizer):
words = count_vectorizer.get_feature_names()
total_counts = np.zeros(len(words))
for t in count_data:
total_counts += t.toarray()[0]
count_dict = (zip(words, total_counts))
count_dict = sorted(count_dict, key=lambda x: x[1], reverse=True)[0:10]
words = [w[0] for w in count_dict]
counts = [w[1] for w in count_dict]
x_pos = np.arange(len(words))
plt.figure(2, figsize=(15, 15 / 1.6180))
plt.subplot(title=f'10 most common words for {asin}')
sns.set_context("notebook", font_scale=1.25, rc={"lines.linewidth": 2.5})
sns.barplot(x_pos, counts, palette='husl')
plt.xticks(x_pos, words, rotation=90)
plt.xlabel('words')
plt.ylabel('counts')
plt.tight_layout()
plt.savefig(f'{asin}_topwords.png')
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
def topics_words(model, feature_names, n_top_words):
topics = []
words =[]
for topic_idx, topic in enumerate(model.components_):
topics.append(topic_idx)
words.append([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])
new_df = pd.DataFrame(list(zip(topics, words)), columns=['topicID', 'words'])
return new_df
n_top_words = 6
n_components = 7
all_words_and_topics = pd.DataFrame(columns=['topicID', 'words', 'asin', 'num documents'])
all_asins_df = pd.DataFrame(columns=list(amazon_df.columns.values))
# We want to find the top words per product. Let's create a loop.
for asin in list_asins:
asin_df = amazon_df.loc[amazon_df['asin'] == str(asin)]
asin_df.reset_index(inplace=True)
# Initialise the count vectorizer with the English stop words
# We are going to use the raw term count for LDA
print("Extracting tf features for LDA...")
stop_words = ENGLISH_STOP_WORDS
cv = CountVectorizer(stop_words='english', analyzer=lambda x:[w for w in x if w not in stop_words])
# Fit and transform the processed titles
t0 = time()
count_vector = cv.fit_transform(asin_df['filtered'])
print("done in %0.3fs." % (time() - t0))
print()
# Materialize the sparse data
data_dense = count_vector.todense()
# Compute Sparsicity = Percentage of Non-Zero cells
print("Sparsicity: ", ((data_dense > 0).sum() / data_dense.size) * 100, "%")
# Visualise the 10 most common words
plot_10_most_common_words(asin, count_vector, cv)
print("Fitting LDA models with tf features...")
lda = LatentDirichletAllocation(n_components=n_components, learning_method='online')
t0 = time()
# This is the Document - Topic Matrix
lda_output = lda.fit_transform(count_vector)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = cv.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
# Log Likelihood: Higher the better
print("Log Likelihood: ", lda.score(count_vector))
# Perplexity: Lower the better. Perplexity = exp(-1. * log-likelihood per word)
print("Perplexity: ", lda.perplexity(count_vector))
# See model parameters
# print(lda.get_params())
# column names
topicnames = ["Topic" + str(i) for i in range(lda.n_components)]
# index names
docnames = ["Doc" + str(i) for i in range(asin_df.shape[0])]
# Make the pandas dataframe
df_document_topic = pd.DataFrame(np.round(lda_output, 2), columns=topicnames)#, index=docnames)
# Get dominant topic for each document
dominant_topic = np.argmax(df_document_topic.values, axis=1)
df_document_topic['dominant_topic_weight'] = np.amax(df_document_topic, axis=1)
df_document_topic['dominant_topic'] = dominant_topic
print(df_document_topic)
asin_df = asin_df.join(df_document_topic['dominant_topic'].astype('int'), how = 'inner')
asin_df = asin_df.join(df_document_topic['dominant_topic_weight'], how='inner')
all_asins_df = pd.concat([all_asins_df, asin_df])
#What is the topic distribution across documents?
df_topic_distribution = df_document_topic['dominant_topic'].value_counts().reset_index(name="num documents")
df_topic_distribution.columns = ['topicID', 'num documents']
print(df_topic_distribution)
asintw = topics_words(lda, tf_feature_names, n_top_words)
asintw['asin'] = asin
asintw = asintw.merge(df_topic_distribution, on = "topicID", how = "inner")
all_words_and_topics = pd.concat([all_words_and_topics, asintw])
print(all_words_and_topics)
print(all_asins_df)
all_asins_df.to_csv('all_asins_and_indices.csv')
all_words_and_topics.to_csv('all_words_and_topics.csv')
#
#
# # plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
e2a2d639b617529303a24cb365818a069f9e4628
|
423e396e226494c34f99851cc050d929f3f144c8
|
/posts/admin.py
|
cb3ff4597adc8ff8a87e027e420a3d4c0b3387da
|
[] |
no_license
|
Marihuana-Kox/hw05_final
|
1ff1a34cdcb9d66fe715ffbf8d9f5fb0d0ca2820
|
77a20ac2571fec13b979e763859de6f2bce43537
|
refs/heads/master
| 2022-12-09T13:53:21.195711
| 2020-03-10T17:45:21
| 2020-03-10T17:45:21
| 243,992,895
| 0
| 0
| null | 2022-12-08T07:24:27
| 2020-02-29T15:27:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
from django.contrib import admin
from .models import Post, Group, Comment
class PostAdmin(admin.ModelAdmin):
# перечисляем поля, которые должны отображаться в админке
list_display = ("pk", "text", "pub_date", "author")
# добавляем интерфейс для поиска по тексту постов
search_fields = ("text",)
# добавляем возможность фильтрации по дате
list_filter = ("pub_date", "author")
# это свойство сработает для всех колонок: где пусто - там будет эта строка
empty_value_display = '-пусто-'
class CommentAdmin(admin.ModelAdmin):
list_display = ("pk", "text", "author", "created")
search_fields = ("text",)
list_filter = ("created", "author")
# при регистрации модели Post источником конфигурации для неё назначаем класс PostAdmin
admin.site.register(Post, PostAdmin)
admin.site.register(Group)
admin.site.register(Comment, CommentAdmin)
|
[
"yakuhs@yandex.ru"
] |
yakuhs@yandex.ru
|
897350387fa941830a98c5edbca3834b1d382a04
|
77e0adf27f8ce8ada31937045d31d063f6661434
|
/noteapp/serializers.py
|
d79624bd60e6d29c39a0ea99f8d0c5c9c37ab2a7
|
[] |
no_license
|
naveenijeri/urbanstop_drf
|
f84185d6e1ba043e96535e67429d1cf421430eee
|
33dfe71507cc02d85e5e1b1e19efc40eed24c4f4
|
refs/heads/master
| 2021-09-23T09:22:58.472057
| 2020-03-14T08:31:26
| 2020-03-14T08:31:26
| 247,235,337
| 0
| 0
| null | 2021-09-22T18:43:36
| 2020-03-14T07:56:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
from .models import NoteModel,UserModel
from rest_framework import serializers
class UserModelSerializer(serializers.ModelSerializer):
class Meta:
model=UserModel
fields=('username',)
class NoteModelSerializer(serializers.ModelSerializer):
user_note = UserModelSerializer(many=True)
class Meta:
model=NoteModel
fields=('id','note_text','created_date','updated_date','user_note')
def create(self, validated_data):
user_data = validated_data.pop('user_note')
note = NoteModel.objects.create(**validated_data)
for user_data in user_data:
UserModel.objects.create(notemodel=note, **user_data)
return note
def update(self, instance, validated_data):
user_data = validated_data.pop('user_note')
users = (instance.user_note).all()
users = list(users)
instance.note_text = validated_data.get('note_text', instance.note_text)
instance.created_date = validated_data.get('created_date', instance.created_date)
instance.updated_date = validated_data.get('updated_date', instance.updated_date)
instance.save()
for user_data in user_data:
user = users.pop(0)
user.username = user_data.get('username', user.username)
user.save()
return instance
|
[
"naveen.ijeri123@gmail.com"
] |
naveen.ijeri123@gmail.com
|
ada7809ed008445486cb53ed74ffb2f3f533ab06
|
c05ed32f1ef7e1eb7d73efd674e7d1fd710ad171
|
/daily-coding-problems/problem429.py
|
f131f4e79b05103324b498c75f6d6f5240e45cd3
|
[] |
no_license
|
carlhinderer/python-exercises
|
c8367517fdf835fa1117f96dbfee3dccc596afa6
|
4e09bbb4c4e2bd5644ed50e997db9f3c289a18f7
|
refs/heads/master
| 2021-06-01T16:17:00.389134
| 2021-02-09T18:21:01
| 2021-02-09T18:21:01
| 150,902,917
| 0
| 0
| null | 2021-04-20T20:33:11
| 2018-09-29T21:03:36
|
Python
|
UTF-8
|
Python
| false
| false
| 533
|
py
|
# Problem 429
# Medium
# Asked by Stitch Fix
#
# Pascal's triangle is a triangular array of integers constructed with the
# following formula:
#
# The first row consists of the number 1.
#
# For each subsequent row, each element is the sum of the numbers directly
# above it, on either side.
#
# For example, here are the first few rows:
#
# 1
# 1 1
# 1 2 1
# 1 3 3 1
# 1 4 6 4 1
#
# Given an input k, return the kth row of Pascal's triangle.
#
# Bonus: Can you do this using only O(k) space?
#
|
[
"carl.hinderer4@gmail.com"
] |
carl.hinderer4@gmail.com
|
7352b0e05bca2fbe6125d96a47f9b75c32c44715
|
542b256178e8f0d9a30423fc6eed23b021cf4a64
|
/Mask_RCNN-master/model.py
|
8dc408116a11f74ca04d412646ebcdb46547ce55
|
[
"MIT"
] |
permissive
|
gtagency/Project_Nucleus
|
caed1b9cec3e49a93f43b501e4e6de7e3cbe3ad5
|
a14632a682915f3f389af53817f692cf6e57357d
|
refs/heads/master
| 2021-04-28T01:11:22.146707
| 2018-05-11T01:00:47
| 2018-05-11T01:00:47
| 122,269,451
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111,323
|
py
|
"""
Mask R-CNN
The main Mask R-CNN model implemenetation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import sys
import glob
import random
import math
import datetime
import itertools
import json
import re
import logging
from collections import OrderedDict
import numpy as np
import scipy.misc
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.initializers as KI
import keras.engine as KE
import keras.models as KM
import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} min: {:10.5f} max: {:10.5f}".format(
str(array.shape),
array.min() if array.size else "",
array.max() if array.size else ""))
print(text)
class BatchNorm(KL.BatchNormalization):
"""Batch Normalization class. Subclasses the Keras BN class and
hardcodes training=False so the BN layer doesn't update
during training.
Batch normalization has a negative effect on training if batches are small
so we disable it here.
"""
def call(self, inputs, training=None):
return super(self.__class__, self).call(inputs, training=False)
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(axis=3, name=bn_name_base + '1')(shortcut)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False):
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(axis=3, name='bn_conv1')(x)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i))
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 4] where each row is y1, x1, y2, x2
deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, 4] each row is y1, x1, y2, x2
window: [4] in the form y1, x1, y2, x2
"""
# Split corners
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, anchors,
config=None, **kwargs):
"""
anchors: [N, (y1, x1, y2, x2)] anchors defined in image coordinates
"""
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
self.anchors = anchors.astype(np.float32)
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Base anchors
anchors = self.anchors
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = min(6000, self.anchors.shape[0])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
anchors = utils.batch_slice(ix, lambda x: tf.gather(anchors, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]
height, width = self.config.IMAGE_SHAPE[:2]
window = np.array([0, 0, height, width]).astype(np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Normalize dimensions to range of 0 to 1.
normalized_boxes = boxes / np.array([[height, width, height, width]])
# Non-max suppression
def nms(normalized_boxes, scores):
indices = tf.image.non_max_suppression(
normalized_boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(normalized_boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([normalized_boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementatin of Log2. TF doesn't have a native implemenation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [height, width] of the output pooled regions. Usually [7, 7]
- image_shape: [height, width, channels]. Shape of input image in pixels
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, image_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
self.image_shape = tuple(image_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[1:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(
self.image_shape[0] * self.image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indicies for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
pooled = tf.expand_dims(pooled, 0)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[1][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
Class-specific bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine postive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI corrdinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinements.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, 1), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def clip_to_window(window, boxes):
"""
window: (y1, x1, y2, x2). The window in the image we want to clip to.
boxes: [N, (y1, x1, y2, x2)]
"""
boxes[:, 0] = np.maximum(np.minimum(boxes[:, 0], window[2]), window[0])
boxes[:, 1] = np.maximum(np.minimum(boxes[:, 1], window[3]), window[1])
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], window[2]), window[0])
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], window[3]), window[1])
return boxes
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where
coordinates are in image domain.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Convert coordiates to image domain
# TODO: better to keep them normalized until later
height, width = config.IMAGE_SHAPE[:2]
refined_rois *= tf.constant([height, width, height, width], dtype=tf.float32)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# Round and cast to int since we're deadling with pixels now
refined_rois = tf.to_int32(tf.rint(refined_rois))
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.to_float(tf.gather(pre_nms_rois, ixs)),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are in image domain.
detections = tf.concat([
tf.to_float(tf.gather(refined_rois, keep)),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are in image domain
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Run detection refinement graph on each item in the batch
_, _, window, _ = parse_image_meta_graph(image_meta)
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
# Region Proposal Network (RPN)
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the featuremap
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns:
logits: [N, NUM_CLASSES] classifier logits (before softmax)
probs: [N, NUM_CLASSES] classifier probabilities
bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_classifier")([rois] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3), name='mrcnn_class_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_class_bn2')(x)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns: Masks [batch, roi_count, height, width, num_classes]
"""
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_mask")([rois] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn2')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn3')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn4')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typicallly: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
# TODO: use smooth_l1_loss() rather than reimplementing here
# to reduce code duplication
diff = K.abs(target_bbox - rpn_bbox)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indicies.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: If true, apply random image augmentation. Currently, only
horizontal flipping is offered.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
shape = image.shape
image, window, scale, padding = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
max_dim=config.IMAGE_MAX_DIM,
padding=config.IMAGE_PADDING)
mask = utils.resize_mask(mask, scale, padding)
# Random horizontal flips.
if augment:
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, shape, window, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Grund truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indicies of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks.
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(scipy.misc.imresize(class_mask.astype(float), (gt_h, gt_w),
interp='nearest') / 255.0).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = scipy.misc.imresize(
m.astype(float), config.MASK_SHAPE, interp='nearest') / 255.0
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=True, random_rois=0,
batch_size=1, detection_targets=False):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: If True, applies image augmentation to images (currently only
horizontal flips are supported)
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The containtes
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, size of image meta]
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinately.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
if config.USE_MINI_MASK:
batch_gt_masks = np.zeros((batch_size, config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1],
config.MAX_GT_INSTANCES))
else:
batch_gt_masks = np.zeros(
(batch_size, image.shape[0], image.shape[1], config.MAX_GT_INSTANCES))
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=config.IMAGE_SHAPE.tolist(), name="input_image")
input_image_meta = KL.Input(shape=[None], name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
h, w = K.shape(input_image)[1], K.shape(input_image)[2]
image_scale = K.cast(K.stack([h, w, h, w], axis=0), tf.float32)
gt_boxes = KL.Lambda(lambda x: x / image_scale)(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
_, C2, C3, C4, C5 = resnet_graph(input_image, "resnet101", stage5=True)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Generate Anchors
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), 256)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
anchors=self.anchors,
config=config)([rpn_class, rpn_bbox])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
_, _, _, active_class_ids = KL.Lambda(lambda x: parse_image_meta_graph(x),
mask=[None, None, None, None])(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates to 0-1 range.
target_rois = KL.Lambda(lambda x: K.cast(
x, tf.float32) / image_scale[:4])(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in image coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Convert boxes to normalized coordinates
# TODO: let DetectionLayer return normalized coordinates to avoid
# unnecessary conversions
h, w = config.IMAGE_SHAPE[:2]
detection_boxes = KL.Lambda(
lambda x: x[..., :4] / np.array([h, w, h, w]))(detections)
# Create masks for detections
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
model = KM.Model([input_image, input_image_meta],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
log_dir: The directory where events and weights are saved
checkpoint_path: the path to the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
return None, None
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
return dir_name, None
checkpoint = os.path.join(dir_name, checkpoints[-1])
return dir_name, checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the correspoding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exlude: list of layer names to excluce
"""
import h5py
from keras.engine import topology
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
topology.load_weights_from_hdf5_group_by_name(f, layers)
else:
topology.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum,
clipnorm=5.0)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = ["rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
self.keras_model.add_loss(
tf.reduce_mean(layer.output, keep_dims=True))
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(optimizer=optimizer, loss=[
None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
self.keras_model.metrics_tensors.append(tf.reduce_mean(
layer.output, keep_dims=True))
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainble layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5
regex = r".*/\w+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_\w+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
self.epoch = int(m.group(6)) + 1
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heaads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE,
augment=False)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = max(self.config.BATCH_SIZE // 2, 2)
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=next(val_generator),
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
#workers=workers,
use_multiprocessing=True,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matricies [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matricies:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image to fit the model expected size
# TODO: move resizing to mold_image()
molded_image, window, scale, padding = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
max_dim=self.config.IMAGE_MAX_DIM,
padding=self.config.IMAGE_PADDING)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, window,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)]
mrcnn_mask: [N, height, width, num_classes]
image_shape: [height, width, depth] Original size of the image before resizing
window: [y1, x1, y2, x2] Box in the image where the real image is
excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Compute scale and shift to translate coordinates to image domain.
h_scale = image_shape[0] / (window[2] - window[0])
w_scale = image_shape[1] / (window[3] - window[1])
scale = min(h_scale, w_scale)
shift = window[:2] # y, x
scales = np.array([scale, scale, scale, scale])
shifts = np.array([shift[0], shift[1], shift[0], shift[1]])
# Translate bounding boxes to image domain
boxes = np.multiply(boxes - shifts, scales).astype(np.int32)
# Filter out detections with zero area. Often only happens in early
# stages of training when the network weights are still a bit random.
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty((0,) + masks.shape[1:3])
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
# Run object detection
detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, \
rois, rpn_class, rpn_bbox =\
self.keras_model.predict([molded_images, image_metas], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs):
"""Runs a sub-set of the computation graph that computes the given
outputs.
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Run inference
molded_images, image_metas, windows = self.mold_inputs(images)
# TODO: support training mode?
# if TEST_MODE == "training":
# model_in = [molded_images, image_metas,
# target_rpn_match, target_rpn_bbox,
# gt_boxes, gt_masks]
# if not config.USE_RPN_ROIS:
# model_in.append(target_rois)
# if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
# model_in.append(1.)
# outputs_np = kf(model_in)
# else:
model_in = [molded_images, image_metas]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, image_shape, window, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
image_shape: [height, width, channels]
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8] # (y1, x1, y2, x2) window of image in in pixels
active_class_ids = meta[:, 8:]
return [image_id, image_shape, window, active_class_ids]
def mold_image(images, config):
"""Takes RGB images with 0-255 values and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name=None):
"""Often boxes are represented with matricies of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
|
[
"will.crawford@live.com"
] |
will.crawford@live.com
|
846876364bc01fda2b044a0b561e2709369cd56c
|
268d9c21243e12609462ebbd6bf6859d981d2356
|
/Python/python_stack/Django/BeltReview/main/apps/books/models.py
|
fddd59aa3b548da3b7fdfa2c3d3484b1350a19f0
|
[] |
no_license
|
dkang417/cdj
|
f840962c3fa8e14146588eeb49ce7dbd08b8ff4c
|
9966b04af1ac8a799421d97a9231bf0a0a0d8745
|
refs/heads/master
| 2020-03-10T03:29:05.053821
| 2018-05-23T02:02:07
| 2018-05-23T02:02:07
| 129,166,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
from __future__ import unicode_literals
from django.db import models
from django import forms
from django.core.exceptions import ValidationError
# Create your models here.
class UserManager(models.Manager):
def basic_validator(self,postData):
errors={}
#validate password
if len(postData['password']) < 8:
errors["password"] = "password should be more than 8 characters"
#checks that the passwords match
if postData['password'] != postData['confirm']:
errors["confirm"] = "passwords do not match"
return errors
class User(models.Model):
name = models.CharField(max_length=255)
alias = models.CharField(max_length=255)
email = models.CharField(max_length=255)
password = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = UserManager()
class AuthorManager(models.Manager):
def validate_author(request, postData):
errors = {}
return errors
class Author(models.Model):
author = models.CharField(max_length=255)
objects = AuthorManager()
class BookManager(models.Manager):
def validate_book(request,postData):
errors = {}
return errors
class Book(models.Model):
title = models.CharField(max_length=255)
author = models.ForeignKey(Author, related_name="books")
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = BookManager()
class ReviewManager(models.Manager):
def validate_review(request, postData):
errors = {}
return errors
class Review(models.Model):
rating = models.IntegerField()
comment = models.TextField()
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
book = models.ForeignKey(Book, related_name="reviews")
user = models.ForeignKey(User, related_name="reviews")
objects = ReviewManager()
|
[
"dkang417@gmail.com"
] |
dkang417@gmail.com
|
254a54f04d7e2527304887a3982a7456e97068b4
|
a088c5e4c4c2e6c722ba2df47c35f4f98d540412
|
/eduzen_bot/plugins/messages/inline.py
|
3469090624de031336b06b61a3e51716ad9cbd40
|
[] |
no_license
|
mikael85/bot
|
c884602363dba9efb716940981494987fa37e3d3
|
86751cf57061ae317804cfc19806ebb15d9ac8b4
|
refs/heads/master
| 2020-11-30T02:15:42.221636
| 2019-08-24T16:39:01
| 2019-08-24T16:39:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
import logging
from uuid import uuid4
from telegram import InlineQueryResultArticle, InputTextMessageContent, ParseMode
from telegram.utils.helpers import escape_markdown
logger = logging.getLogger()
def code_markdown(bot, update):
query = update.inline_query.query
if not query:
return
results = [
InlineQueryResultArticle(
id=uuid4(),
title="code",
input_message_content=InputTextMessageContent(
f"```\n{query}\n```", parse_mode=ParseMode.MARKDOWN
),
),
InlineQueryResultArticle(
id=uuid4(), title="Caps", input_message_content=InputTextMessageContent(query.upper())
),
InlineQueryResultArticle(
id=uuid4(),
title="Bold",
input_message_content=InputTextMessageContent(
"*{}*".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN
),
),
InlineQueryResultArticle(
id=uuid4(),
title="Italic",
input_message_content=InputTextMessageContent(
"_{}_".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN
),
),
]
bot.answer_inline_query(update.inline_query.id, results)
|
[
"eduardo.a.enriquez@gmail.com"
] |
eduardo.a.enriquez@gmail.com
|
78e09543d9fe810959a5f9c88d88fc9890e0a11d
|
228a253a698fd8ceb0af4e63187ee201004aca4e
|
/IotServer.py
|
d6306058174631582c8a438fc2b709bd31389722
|
[] |
no_license
|
mtpajula/iotLocalNetworkServer
|
4b16a5d93f5dcaab98afaec1e37a317d35bb4649
|
aa3c0187dff14c4bf568afa554f82cf13a2500f5
|
refs/heads/master
| 2021-05-11T14:34:57.921236
| 2018-02-23T17:40:29
| 2018-02-23T17:40:29
| 117,707,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,580
|
py
|
# -*- coding: utf-8 -*-
from IotServerDevice import *
from time import sleep
import copy
import sys
class IotServer:
wait = 10
def __init__(self):
self.d = IotServerDevice()
def printer(self, category, message):
if category == "t1":
print("\n")
print(message)
print("======================================")
elif category == "t2":
print("\n")
print(message)
print("--------------------------------------")
elif category == "p":
print(message)
elif category == "error":
print(" ! ERROR: " + message)
'''
run in terminal command mode
Example: IotServer.py device=server command="reset devices"
'''
def send_command(self, device, command):
self.printer("p","Run in terminal command mode")
#self.printer("t1","Load devices from db")
self.d.collect_iot(True)
for d in self.d.c.devices:
if d.name == device:
d.receive_command('command', command)
if self.d.name == device:
self.d.receive_command('command', command)
# Send messages to db
self.send_message();
def close_db(self):
self.d.db.con.conn.close()
def send_message(self):
self.printer("t1","Send messages to db")
self.d.db.set_messages(self.d.c.devices)
self.d.db.set_messages([self.d])
'''
run in normal mode
'''
def run(self, schedule = False):
self.printer("p","Run in normal mode")
# Get devs from db
#self.printer("t1","Load devices from db")
self.d.collect_iot(True)
# get commands
self.printer("t1","Get commands")
self.d.db.get_commands(self.d.c.devices)
self.d.db.get_commands([self.d])
# Send messages to db
self.send_message();
'''
run in schedule mode
'''
def runSchedule(self):
self.printer("p","Run in schedule mode")
# Get devs from db
#self.printer("t1","Load devices from db")
self.d.collect_iot(True)
# Get scheduled commands
self.printer("t1","Get scheduled commands")
self.d.db.get_schedules(self.d.c.devices)
self.d.db.get_schedules([self.d])
# get commands
self.printer("t1","Get commands")
self.d.db.get_commands(self.d.c.devices)
self.d.db.get_commands([self.d])
# Send messages to db
self.send_message();
'''
run in status mode
'''
def runStatus(self):
self.printer("p","Run in status mode")
# Get devs from db
#self.printer("t1","Load devices from db")
self.d.collect_iot(True)
# save statuses to db
self.printer("t1","Save statuses to db")
self.d.db.set_status(self.d.c.devices)
self.d.db.set_status([self.d])
# Send messages to db
self.send_message();
if __name__ == '__main__':
iot = IotServer()
if "schedule" in sys.argv:
iot.runSchedule()
iot.close_db()
sys.exit()
if "status" in sys.argv:
iot.runStatus()
iot.close_db()
sys.exit()
c = None
d = None
for ar in sys.argv:
if "command=" in ar:
arp = ar.split("=")
c = arp[1]
elif "device=" in ar:
arp = ar.split("=")
d = arp[1]
if c != None and d != None:
iot.send_command(d,c)
iot.close_db()
sys.exit()
iot.run()
iot.close_db()
|
[
"mtpajula@gmail.com"
] |
mtpajula@gmail.com
|
67b528a1d4897d406c2df773535234cf98e46ce4
|
b7ada17734345131348d541d269c171ffbf88508
|
/Clase 15-11-2019/EJM EXCEPCIONES.py
|
ffef2497de09d7ed5d0c969e35a71e143b8da847
|
[] |
no_license
|
PatrickPuente/Curso-Python-CEC-EPN
|
709094e0e10c26b5bb4883649383c9660b227c32
|
83c9e4f85ca939f12d4fc536e46f58c4470ffa0d
|
refs/heads/master
| 2020-09-11T16:18:56.670104
| 2019-11-16T17:43:50
| 2019-11-16T17:43:50
| 222,123,485
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
import math
'''try:
y = 1/0
except ZeroDivisionError:
print("Zero Division")
except ArithmeticError:
print("Arithmetic Problem")
print("THE END")
#VAriantes
def badFun(n):
try:
return 1/n
except ArithmeticError:
print("Arithmetic Problem")
return None
badFun(0)
print("THE END")'''
'''def badFun(n):
try:
return n/0
except:
print("I did it again")
raise
try:
badFun(0)
except ArithmeticError:
print("dasdsa")'''
x = float(input("Enter a Number: "))
assert x>=0.0
x = math.sqrt(x)
print(x)
|
[
"noreply@github.com"
] |
noreply@github.com
|
9616bdcb9ebc14028225fac131ca2aa6763cfb91
|
9e3205c13404f6bf2b36c96af7d0a9d2532596a0
|
/cart_pole/dqn.py
|
a37de3641dfad0cf9d3e7d3c578e6d83d554f348
|
[] |
no_license
|
mminhou/openai
|
fce2da3e1b49da0b99a55087cc97e8890fb5a1f7
|
05418b83218f4f2b29d70deef4a41cde7ad6941e
|
refs/heads/master
| 2020-03-11T07:36:33.644382
| 2018-04-17T07:04:13
| 2018-04-17T07:04:13
| 129,861,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,109
|
py
|
import numpy as np
import random as random
from collections import deque
from cnn_tensorflow import CNN
# See https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf for model description
class DQN:
def __init__(self, num_actions, observation_shape, dqn_params, cnn_params):
self.num_actions = num_actions
self.epsilon = dqn_params['epsilon']
self.gamma = dqn_params['gamma']
self.mini_batch_size = dqn_params['mini_batch_size']
# memory
self.memory = deque(maxlen=dqn_params['memory_capacity'])
# initialize network
self.model = CNN(num_actions, observation_shape, cnn_params)
print("model initialized")
def select_action(self, observation):
"""
Selects the next action to take based on the current state and learned Q.
Args:
observation: the current state
"""
if random.random() < self.epsilon:
# with epsilon probability select a random action
action = np.random.randint(0, self.num_actions)
else:
# select the action a which maximizes the Q value
obs = np.array([observation])
q_values = self.model.predict(obs)
action = np.argmax(q_values)
return action
def update_state(self, action, observation, new_observation, reward, done):
"""
Stores the most recent action in the replay memory.
Args:
action: the action taken
observation: the state before the action was taken
new_observation: the state after the action is taken
reward: the reward from the action
done: a boolean for when the episode has terminated
"""
transition = {'action': action,
'observation': observation,
'new_observation': new_observation,
'reward': reward,
'is_done': done}
self.memory.append(transition)
def get_random_mini_batch(self):
"""
Gets a random sample of transitions from the replay memory.
"""
rand_idxs = random.sample(range(len(self.memory)), self.mini_batch_size)
mini_batch = []
for idx in rand_idxs:
mini_batch.append(self.memory[idx])
return mini_batch
def train_step(self):
"""
Updates the model based on the mini batch
"""
if len(self.memory) > self.mini_batch_size:
mini_batch = self.get_random_mini_batch()
Xs = []
ys = []
actions = []
for sample in mini_batch:
y_j = sample['reward']
# for nonterminals, add gamma*max_a(Q(phi_{j+1})) term to y_j
if not sample['is_done']:
new_observation = sample['new_observation']
new_obs = np.array([new_observation])
q_new_values = self.model.predict(new_obs)
action = np.max(q_new_values)
y_j += self.gamma*action
action = np.zeros(self.num_actions)
action[sample['action']] = 1
observation = sample['observation']
Xs.append(observation.copy())
ys.append(y_j)
actions.append(action.copy())
Xs = np.array(Xs)
ys = np.array(ys)
actions = np.array(actions)
self.model.train_step(Xs, ys, actions)
|
[
"exit19093@gmail.com"
] |
exit19093@gmail.com
|
66d3b82f69e86c48f0251452cf320598139f48d5
|
f7108e688415975baf5e3290d9b210585e4faaed
|
/monkeybat2.1/date.py
|
04e20469868384d3244bafb377ee7322bf43019a
|
[] |
no_license
|
lijiansheng325/python-2019
|
20ef1a960bc1cd8f09c0133eafda2755d273e2a4
|
a577992d71d7d36a93d9cbb7658887c9152173f1
|
refs/heads/master
| 2020-04-19T03:30:48.426503
| 2019-01-30T09:12:02
| 2019-01-30T09:12:02
| 167,936,368
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
class Date(object):
def __init__(self, day=0, month=0, year=0):
self.day = day
self.month = month
self.year = year
def __str__(self):
return "{0}-{1}-{2}".format(self.year, self.month, self.day)
@classmethod
def from_string(cls, date_as_string):
year, month, day = map(int, date_as_string.split('-'))
date1 = cls(day, month, year)
return date1
@staticmethod
def is_date_valid(date_as_string):
year, month, day = map(int, date_as_string.split('-'))
return day <= 31 and month <= 12 and year <= 3999
@staticmethod
def millenium(month, day):
return Date(month, day, 2000)
class DateTime(Date):
def __str__(self):
return "{0}-{1}-{2} - 00:00:00PM".format(self.year, self.month, self.day)
if __name__=="__main__":
s='3000-09-11'
if Date.is_date_valid(s):
date1 = Date.from_string(s)
print date1
date2 = DateTime.from_string(s)
print date2
millenium_new_year1 = Date.millenium(1, 1)
print millenium_new_year1
millenium_new_year2 = DateTime.millenium(10, 10)
print millenium_new_year2
|
[
"lijiansheng325@163.com"
] |
lijiansheng325@163.com
|
2bfac6ff84eb132dbe0ca2d7e60294830f89405d
|
697948f1b4e889258d64e4b641aa00f352c915d2
|
/model/relation_prediction_semantic_loss/mydataloader.py
|
e0c59029b30751d753cdaf9484117914bd70a388
|
[] |
no_license
|
cheunglei/myLENSR
|
6c8ad0376d907396b2db53f9ac42c76a001cd2eb
|
063e50cc66dcc4390423150af89e95a9e0d2493a
|
refs/heads/master
| 2021-03-21T02:02:16.576945
| 2020-05-18T08:02:47
| 2020-05-18T08:02:47
| 247,254,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,456
|
py
|
from torch.utils.data import Dataset, DataLoader
from torch import Tensor
import numpy as np
import pickle as pk
class VRD_dataset(Dataset):
def __init__(self, train_set_keys, image_features_train, annotation_train, information):
self.train_set_keys = train_set_keys
self.image_features_train = image_features_train
self.annotation_train = annotation_train
self.information = information
def __len__(self):
return len(self.train_set_keys)
def __getitem__(self, idx):
img = self.train_set_keys[idx]
pairs = list(self.annotation_train[img].keys())
x = []
y = []
info = []
for i in range(len(pairs)):
key = pairs[i]
relation = self.annotation_train[img][key]
if relation == 100:
if np.random.random() < 0.01 and (self.information[img][key][1][1] != self.information[img][key][2][1]):
x.append(self.image_features_train[img][key])
y.append(relation)
info.append(self.information[img][key])
else:
x.append(self.image_features_train[img][key])
y.append(relation)
info.append(self.information[img][key])
x = Tensor(x)
y = Tensor(y).long()
# print ('debug',img,pairs,x,y,info)
return x, y, info
class VRD_dataset_test(Dataset):
def __init__(self, train_set_keys, image_features_train, annotation_train, information):
self.train_set_keys = train_set_keys
self.image_features_train = image_features_train
self.annotation_train = annotation_train
self.information = information
def __len__(self):
return len(self.train_set_keys)
def __getitem__(self, idx):
# print(idx)
img = self.train_set_keys[idx]
pairs = list(self.annotation_train[img].keys())
x = []
y = []
info = []
for i in range(len(pairs)):
key = pairs[i]
relation = self.annotation_train[img][key]
if self.information[img][key][1][1] != self.information[img][key][2][1]:
x.append(self.image_features_train[img][key])
y.append(relation)
info.append(self.information[img][key])
x = Tensor(x)
y = Tensor(y).long()
# print ('debug',img,pairs,x,y,info)
return x, y, info
|
[
"948594226@qq.com"
] |
948594226@qq.com
|
52722c46ff54f9d588bdd4cd1a24506d64dacd60
|
bcc2d156334d3680561b17cec82cbc31a5ea07ad
|
/String/22. Generate Parentheses.py
|
2431fefda0dcde528d7eafd0b65a378afe0ebe31
|
[] |
no_license
|
kevinsshah/Leetcode
|
72b14e226b6881bcd18913b2fa132b0e3f8dd6ef
|
4419f46e6f6b1d96ff8b7066fce687cfa88e65a0
|
refs/heads/master
| 2020-03-25T23:00:49.851183
| 2018-09-08T04:13:27
| 2018-09-08T04:13:27
| 144,255,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,129
|
py
|
# Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
#
# For example, given n = 3, a solution set is:
#
# [
# "((()))",
# "(()())",
# "(())()",
# "()(())",
# "()()()"
# ]
class Solution(object):
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
# def helper(A = []):
# if len(A) == 2*n:
# if isValid(A):
# ans.append("".join(A))
# else:
# A.append("(")
# helper(A)
# A.pop()
# A.append(")")
# helper(A)
# A.pop()
# def isValid(A):
# bal = 0
# for c in A:
# if c == "(":
# bal+=1
# else:
# bal -= 1
# if bal < 0:
# return False
# return bal == 0
# ans = []
# helper()
# return ans
# def backtrack(S = '', left = 0, right = 0):
# if len(S) == 2*n:
# ans.append(S)
# return
# if left < n:
# backtrack(S+"(", left + 1, right)
# if right < left:
# backtrack(S+")", left, right + 1)
# ans = []
# backtrack()
# return ans
ans = []
def helper(left, right, string, ans):
if right < left:
return
if not left and not right:
ans.append(string)
return
if left:
helper(left - 1, right, string + "(", ans)
if right:
helper(left, right - 1, string + ")", ans)
helper(n, n, "", ans)
return ans
|
[
"shah.kevi@husky.neu.edu"
] |
shah.kevi@husky.neu.edu
|
1f43b2642f2cdbd247d3109f36b3583af0b787b8
|
adc53c3aa155a93610261353df13ae0b25393f7a
|
/src/app/api/files.py
|
d9c2ebc273e444cc8a6e6769f8eb359a3c004451
|
[] |
no_license
|
alvinTaoOps/geofiles-api
|
fe9b95a63117cbfcceb7e404c0bd7c94b2bedfbe
|
66bb1bd09d57f294a40ed8aec13ab58a2234ca6f
|
refs/heads/master
| 2023-07-18T10:52:19.939089
| 2021-04-27T16:55:41
| 2021-04-27T16:55:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,897
|
py
|
from typing import Optional, List
from fastapi import APIRouter, status, UploadFile, File, Header, Request
from ..db import files as files_repository
from ..utils.Exceptions import raise_422_exception, raise_401_exception, raise_404_exception, raise_410_exception
from ..utils.http import HTTPFactory
from ..core.validator import Validator, SupportedFormat
from ..core.convertors.helper_functions import convert_to_geojson as to_geojson, convert_to_cad as to_cad, \
convert_to_shp as to_shp
from fastapi.responses import FileResponse
from pathlib import Path
from geojson_pydantic.features import FeatureCollection
from .schemas import FileRecord, PublicFile
import os
router = APIRouter()
async def file_request_handler(file_uuid: str, request: Request, token: Optional[str] = Header(None)):
if not request.state.user:
raise_401_exception()
file_record = await files_repository.get_one(file_uuid)
if not file_record:
raise_410_exception()
if file_record.get("user_id") != request.state.user["user_id"]:
raise_401_exception()
if not Path(file_record.get("path")).exists():
raise_410_exception()
return FileRecord.parse_obj(dict(file_record))
@router.post("/upload/", status_code=status.HTTP_201_CREATED)
async def create_upload_file(request: Request, file: UploadFile = File(...),
token: Optional[str] = Header(None)):
filename, file_extension = os.path.splitext(file.filename)
if file_extension not in Validator.SUPPORTED_FORMAT:
raise_422_exception()
if not request.state.user:
raise_401_exception()
file_uuid = await files_repository.create_from_request(file, file_extension, request.state.user)
return file_uuid
@router.get("/{file_uuid}", status_code=status.HTTP_200_OK)
async def download_file(request: Request, file_uuid: str, token: Optional[str] = Header(None)):
file_record = await file_request_handler(file_uuid, request)
return FileResponse(
file_record.path, media_type=SupportedFormat.get_mime_type(file_record.type), filename=file_record.file_name)
@router.get("/{file_uuid}/format", status_code=status.HTTP_200_OK)
async def get_allowed_formats(request: Request, file_uuid: str, token: Optional[str] = Header(None)):
file_record = await file_request_handler(file_uuid, request)
available_format = SupportedFormat.get_available_format(file_record.type)
urls = [f"/{file_uuid}/to{export_format}" for export_format in available_format]
return urls
@router.get("/{file_uuid}/toGEOJSON", response_model=FeatureCollection, status_code=status.HTTP_200_OK)
async def convert_to_geojson(request: Request, file_uuid: str, token: Optional[str] = Header(None)):
file_record = await file_request_handler(file_uuid, request)
geojson_response = await to_geojson(file_record, stream=False)
if not geojson_response:
raise_422_exception()
file_name = f"{os.path.splitext(file_record.file_name)[0]}.json"
return FileResponse(
geojson_response, media_type='application/json', filename=file_name)
@router.get("/{file_uuid}/toCAD", status_code=status.HTTP_200_OK)
async def convert_to_dwg(request: Request, file_uuid: str, token: Optional[str] = Header(None)):
file_record = await file_request_handler(file_uuid, request)
dwg_response = await to_cad(file_record)
if not dwg_response:
raise_422_exception()
file_name = f"{os.path.splitext(file_record.file_name)[0]}.dxf"
return FileResponse(
dwg_response, media_type='application/dxf', filename=file_name)
@router.get("/{file_uuid}/toSHP", status_code=status.HTTP_200_OK)
async def convert_to_shp(request: Request, file_uuid: str, token: Optional[str] = Header(None)):
file_record = await file_request_handler(file_uuid, request)
shp_response = await to_shp(file_record)
if not shp_response:
raise_422_exception()
file_name = f"{os.path.splitext(file_record.file_name)[0]}.zip"
return FileResponse(
shp_response, media_type='application/zip', filename=file_name)
@router.get("/{file_uuid}/stream/geojson", response_model=FeatureCollection, status_code=status.HTTP_200_OK)
async def convert_to_geojson(request: Request, file_uuid: str, token: Optional[str] = Header(None)):
file_record = await file_request_handler(file_uuid, request)
geojson_response = await to_geojson(file_record, stream=True)
if not geojson_response:
raise_422_exception()
return FeatureCollection.parse_raw(geojson_response)
@router.get("/", status_code=status.HTTP_200_OK, response_model=List[PublicFile])
async def retrieve_users_files(request: Request, token: Optional[str] = Header(None)):
if not request.state.user:
raise_401_exception()
users_files = await files_repository.retrieve_users_files(request.state.user["user_id"])
return users_files
|
[
"jossefaz@protonmail.com"
] |
jossefaz@protonmail.com
|
a926afb7922e05c0385c644c79fe80df6e229e01
|
ff983c83c59011c91ef1d28ef0b6ce6bfd843d8e
|
/cola.py
|
9f4af97d65cbd2e4bbb9bc14d98eccfe9ac5f6b7
|
[] |
no_license
|
jiterman/Flights-Manager
|
7af81f025342988ef5a9497dd79f0849e87ba43c
|
197d49aa3f012846521d3e06a992fcf0d8b2b9d9
|
refs/heads/master
| 2022-11-05T11:43:53.566874
| 2020-06-22T01:09:16
| 2020-06-22T01:09:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
class Cola:
def __init__(self):
self.items = []
def encolar(self, x):
self.items.append(x)
def desencolar(self):
if self.esta_vacia():
raise ValueError("La cola esta vacia")
return self.items.pop(0)
def esta_vacia(self):
return len(self.items) == 0
|
[
"noreply@github.com"
] |
noreply@github.com
|
12d896a3fb16ddce598c3c26b8715790f3f41155
|
bb7ee0c29834864964a445cc7cc68a742937791c
|
/file_crawler_w_yts_downloader.py
|
667d19c6ff4a1df526fb6ea31d1ddfe5ce354fed
|
[] |
no_license
|
quadcube/Automated-Yify-Subtitle-Downloader
|
6a5ef01f70cb44e77f602bf8fac529c9f3436cf1
|
2254fccdebe61fa2871123267556b11cd75bb4c7
|
refs/heads/master
| 2020-08-23T08:38:44.358378
| 2020-04-12T14:18:36
| 2020-04-12T14:18:36
| 216,580,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,176
|
py
|
import os
import re
import urllib
import logging
import requests # pip install requests
from zipfile import ZipFile
from html2text import HTML2Text # pip install html2text
log_path = "/Users/quadcube/Project/Subtitle Tool"
log_name = "file_crawler_w_yts_downloader"
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s", handlers=[logging.FileHandler("{0}/{1}.log".format(log_path, log_name)), logging.StreamHandler()])
logger = logging.getLogger()
root_dir = "/Volumes/GoogleDrive/My Drive/Server Backup/WD_MyBookLive_2TB/Public/Shared Videos/" #os.getcwd()
root_url = "http://www.yifysubtitles.com" # 1) www.yifysubtitles.com 2) yts-subs.com (need refinement)
srt_language = ['English']
srt_manual_select = False
refresh_yts_srt = False # if YTS movie files are found, rename any srt files (.backup) in that folder and download the best srt
remove_invalid_srt = True
invalid_srt_size_threshold = 1024 # remove anything less than 1024 bytes if remove_invalid_srt = True
valid_movie_file_ext = ['.mp4', '.m4v', '.avi', '.mkv', '.mov', '.webm', '.flv', '.vob', '.rm', '.rmvb', '.wmv', '.m4v', '.mpeg', '.mpg', '.m2v', '.MTS', '.M2TS', '.TS']
def html2text(url):
raw_html = requests.get(url)
raw_html.raise_for_status() # raise exception if status code is not 200
h = HTML2Text()
h.ignore_links = False
return h.handle(raw_html.text) # html2text translate html to readable text
def main():
counter_movie = 0
counter_movie_w_srt = 0
counter_movie_dl_srt = 0
counter_movie_dl_srt_failed = 0
counter_movie_no_srt = 0
counter_no_movie = 0
for dir_name, subdir_list, file_list in os.walk(root_dir): # crawl thru current directory
if '/' in dir_name[len(root_dir):] or dir_name == root_dir:
continue # only transverse one level deep
else:
logger.debug('Found dir: {}'.format(dir_name))
found_srt = False
counter_movie += 1
for file_name in file_list:
if file_name.lower().endswith('.srt'):
if refresh_yts_srt == True and ('yts' in file_name.lower() or 'yify' in file_name.lower()):
logger.debug('Renaming srt file_list: {}'.format(file_list))
os.rename(dir_name + '/' + file_name, dir_name + '/' + file_name[:-4] + '.backup') # rename .srt to .backup
break
else:
logger.debug('Found file_list: {}'.format(file_list))
if remove_invalid_srt == True:
if os.stat(dir_name + '/' + file_name).st_size < invalid_srt_size_threshold:
logger.info('Removing file {}'.format(file_name))
os.remove(dir_name + '/' + file_name)
break
found_srt = True
counter_movie_w_srt += 1
break
if found_srt == False:
try:
found_movie = False
dir_name_list = dir_name[len(root_dir):].split("(", maxsplit=1)
dir_name_year = dir_name_list[1].split(")", maxsplit=1)[0]
search_query = dir_name_list[0].strip() # remove year and lead, trailing whitespace as yifisubtitle.com search query will return nothing
for i in range(search_query.count(' ') + 1): # i = 0, .replace() does nothing
if root_url == "http://www.yifysubtitles.com":
text_html = html2text(root_url + '/search?' + urllib.parse.urlencode({'q':search_query.replace(' ', ': ', i).replace(': ', ' ', i-1)})) # Try diff combinations of ":" in the search query
else: # yts-subs.com
text_html = html2text(root_url + '/search/' + urllib.parse.quote(search_query).replace(' ', ': ', i).replace(': ', ' ', i-1))
relevant_results = re.findall('\/movie-imdb\/.+\)\n+.\n+.+\n+.+year', text_html)
for result in relevant_results:
result_list = result.split(')\n\n[\n\n### ', maxsplit=1)
result_link = result_list[0]
result_name = result_list[1].split('\n\n')[0]
for j in range(5):
if result[-5 - j].isdigit(): # as long as not digit, backtrack until digit is found
result_year = result[-8 - j:-4 - j]
break
if result_name.lower() == search_query.lower().replace(' ', ': ', i).replace(': ', ' ', i-1) and dir_name_year == result_year:
logger.info('Found movie: {} Year: {}'.format(result_name, result_year))
found_movie = True
break
if found_movie == True:
break
if found_movie == True:
text_html = html2text(root_url + result_link)
#print(repr(text_html))
relevant_results = re.findall('\s\s\n\d{1,}\s?\|\s\s?\w+\s?\|\s\s?\[\s?subtitle\s.+\d\)\s\s\n\s\s\n', text_html, re.DOTALL) #re.findall('\s\s\n\d{1,}\s?\|\s\s?\w+\s?\|\s\s?\[\s?subtitle\s.+####\sTrailer', text_html, re.DOTALL)
if len(relevant_results) > 1:
logger.warning('Relevant result more than 1. {}'.format(dir_name))
if len(relevant_results) == 0:
logger.warning('No srt found on {}! {}'.format(root_url, dir_name))
else:
relevant_results = relevant_results[0].split(' \n')
subtitle_results = {}
subtitle_num = 0
for result in relevant_results:
if result != '':
if result[0].isnumeric():
result = result.replace('\n', '').replace(' ', '').split('|') # first remove the annoying \n, spaces and split according to tags
if result[1] in srt_language:
result_title_link = result[2].replace('[subtitle', '').split('](/subtitles')
subtitle_results[subtitle_num] = {'Rate': int(result[0]), 'Lang': result[1], 'Title': result_title_link[0], 'Link': '/subtitle' + result_title_link[1][:-1] + '.zip', 'Uploader': result[4][1:].split('](')[0] if result[3] == '' else result[3]}
#if srt_manual_select == True:
logger.info('({}) {}'.format(subtitle_num, subtitle_results[subtitle_num]))
subtitle_num += 1
if subtitle_num > 0: # check whether there's any filtered srt
if srt_manual_select == True and subtitle_num > 0:
while True:
try:
user_selection = int(input('Select subtitle (e.g. 0/1/2/...)'))
if user_selection < len(subtitle_results):
break
else:
raise
except:
print('Option is not valid!')
subtitle_results = subtitle_results[user_selection]
else: # Auto srt selection
subtitle_yts_rank = (None, 0) # subtitle_key, rating
subtitle_rank = (None, 0) # subtitle_key, rating
for subtitle_key, subtitle_value in subtitle_results.items():
if subtitle_yts_rank[1] <= subtitle_value['Rate'] and ('yts' in subtitle_value['Title'].lower() or 'yify' in subtitle_value['Title'].lower()): #prioritize YTS tags in title, since most movie files are obtained from YTS'
subtitle_yts_rank = (subtitle_key, subtitle_value['Rate'])
elif subtitle_rank[1] <= subtitle_value['Rate']:
subtitle_rank = (subtitle_key, subtitle_value['Rate'])
if subtitle_yts_rank[0] == None: # if YTS srt is not available, use non-YTS
subtitle_yts_rank = subtitle_rank
subtitle_results = subtitle_results[subtitle_yts_rank[0]]
logger.info(subtitle_results)
logger.debug(file_list)
movie_name = None
for file_name in file_list:
for file_type in valid_movie_file_ext:
if file_name.endswith(file_type):
found_movie = file_name.replace(file_type, '.srt')
break
if found_movie != None:
with open(dir_name + '/temp_srt.zip', 'wb') as srt_zip_file:
srt_zip_file.write(requests.get(root_url + subtitle_results['Link']).content) # TODO: yts-subs.com subtitles come from www.yifysubtitles.com, hence root_url won't work.
with ZipFile(dir_name + '/temp_srt.zip') as srt_zip_file:
srt_zip_file_list = srt_zip_file.namelist()
for srt_file in srt_zip_file_list:
if srt_file.lower().endswith('.srt'):
srt_zip_file.extract(srt_file, dir_name)
break
os.rename(dir_name + '/' + srt_file, dir_name + '/' + found_movie) # rename srt to match movie file
os.remove(dir_name + '/temp_srt.zip')
counter_movie_dl_srt += 1
else:
logger.warning('No filtered srt found on {}! {}'.format(root_url, dir_name))
counter_movie_no_srt += 1
else:
logger.warning('No movie found on {}! {}'.format(root_url, dir_name))
counter_no_movie += 1
except Exception as error:
logger.exception(error)
counter_movie_dl_srt_failed += 1
#logger.info(text_html)
# Errors caused by line 57 is due to missing year info in dir_name
# Errors caused by bad html response code, ignore since there's nothing to do about it
logger.debug('Current stat -> Movie: {}\tMovie w srt: {}\tMovie dl srt: {}\tMovie dl srt failed: {}\tMovie no srt failed: {}\tNo movie: {}'.format(counter_movie, counter_movie_w_srt, counter_movie_dl_srt, counter_movie_dl_srt_failed, counter_movie_no_srt, counter_no_movie))
logger.info('Final stat -> Movie: {}\tMovie w srt: {}\tMovie dl srt: {}\tMovie dl srt failed: {}\tMovie no srt failed: {}\tNo movie: {}'.format(counter_movie, counter_movie_w_srt, counter_movie_dl_srt, counter_movie_dl_srt_failed, counter_movie_no_srt, counter_no_movie))
logging.info('Completed. Exiting...')
if __name__== "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
2e2d00ecfeb31b0168a0130af2aa68e6f2967de9
|
aa245f4e900ab0f27eee9b0fb2d7c9f7d4172269
|
/tests/test_utils.py
|
5c5bd201679fb0fdf8b3403da887b2dcab97dcbe
|
[
"MIT"
] |
permissive
|
Vetrovec/chainee
|
ed4edd4e92637b29fcf5ff0493de6f6983e66e98
|
3a1a300f86ad8aeb385d8de7f766dd035c039f04
|
refs/heads/master
| 2022-04-05T13:54:38.804711
| 2020-02-01T14:11:16
| 2020-02-01T14:11:16
| 235,657,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,992
|
py
|
from unittest import TestCase
import chainee.utils as utils
class TestUtils(TestCase):
def test_is_hex_string(self):
self.assertTrue(utils.is_hex_string("AbCdeF1234567890"), "is hex")
self.assertFalse(utils.is_hex_string("abcdefg"), "is not hex")
def test_validate_private_key(self):
self.assertTrue(
utils.validate_private_key("685CF62751CEF607271ED7190b6a707405c5b07ec0830156e748c0c2ea4a2cfe"),
"is valid private key"
)
self.assertFalse(
utils.validate_private_key("0000000000000000000000000000000000000000000000000000000000000000"),
"is not valid private key"
)
self.assertFalse(
utils.validate_private_key("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"),
"is not valid private key"
)
def test_validate_address(self):
self.assertTrue(
utils.validate_address("0000000000000000000000000000000000000000"),
"is valid address"
)
self.assertTrue(
utils.validate_address("c70f4891d2ce22b1f62492605c1d5c2fc1a8ef47"),
"is valid address"
)
self.assertFalse(
utils.validate_address("1234567890"),
"is not valid address"
)
self.assertFalse(
utils.validate_address("abcdefghijklmnopqrstuvwxyzabcdefghijklmn"),
"is not valid address"
)
def test_sha3(self):
self.assertEqual(
utils.sha3("abcdef"),
"8b8a2a6bc589cd378fc57f47d5668c58b31167b2bf9e632696e5c2d50fc16002"
)
self.assertEqual(
utils.sha3("test", False),
"36f028580bb02cc8272a9a020f4200e346e276ae664e45ee80745574e2f5ab80"
)
def test_generate_private_key(self):
self.assertTrue(
utils.validate_private_key(utils.generate_private_key()),
"should generate valid private key"
)
def test_get_pub_key(self):
self.assertEqual(
utils.get_pub_key("685cf62751cef607271ed7190b6a707405c5b07ec0830156e748c0c2ea4a2cfe"),
"6b2cc423e68813a13b4f0b3c7666939d20f845a40104a3c85db2d8a3bcfd9517620075fac7de10a94073ab9a09a9a8dd28bb44adaaf24bf334a6c6258524dd08"
)
def test_address_from_public(self):
self.assertEqual(
utils.address_from_public("6b2cc423e68813a13b4f0b3c7666939d20f845a40104a3c85db2d8a3bcfd9517620075fac7de10a94073ab9a09a9a8dd28bb44adaaf24bf334a6c6258524dd08"),
"c70f4891d2ce22b1f62492605c1d5c2fc1a8ef47"
)
def test_address_from_private(self):
self.assertEqual(
utils.address_from_private("685cf62751cef607271ed7190b6a707405c5b07ec0830156e748c0c2ea4a2cfe"),
"c70f4891d2ce22b1f62492605c1d5c2fc1a8ef47"
)
def test_sign(self):
self.assertEqual(
utils.sign("abcdef", "685cf62751cef607271ed7190b6a707405c5b07ec0830156e748c0c2ea4a2cfe"),
"b90e97baea96a2120a53d3ba34201705891e79beb8b86cfaf26a4e467264ac6e2481ffed9036a8403161d1d0bf7a7485f6e190d1ffdc1bccefd74fe6c547b30a01"
)
self.assertEqual(
utils.sign("test", "685cf62751cef607271ed7190b6a707405c5b07ec0830156e748c0c2ea4a2cfe", False),
"6f2dfa18ba808d126ef8d7664cbb5331a4464f6ab739f82981a179e47569550636daa57960b6bfeef2981ea61141ce34b2febe811394ce3b46ffde0ce121516101"
)
def test_recover(self):
self.assertEqual(
utils.recover("abcdef", "b90e97baea96a2120a53d3ba34201705891e79beb8b86cfaf26a4e467264ac6e2481ffed9036a8403161d1d0bf7a7485f6e190d1ffdc1bccefd74fe6c547b30a01"),
"c70f4891d2ce22b1f62492605c1d5c2fc1a8ef47"
)
self.assertEqual(
utils.recover("test", "6f2dfa18ba808d126ef8d7664cbb5331a4464f6ab739f82981a179e47569550636daa57960b6bfeef2981ea61141ce34b2febe811394ce3b46ffde0ce121516101", False),
"c70f4891d2ce22b1f62492605c1d5c2fc1a8ef47"
)
|
[
"stepan.vetrovec@gmail.com"
] |
stepan.vetrovec@gmail.com
|
b96c59645e8a2d9a6c3fc4d83acb6984da618953
|
dfe50c0041a5dc23b63ea39369d115a8b74c56f0
|
/array_167.py
|
e396760dac8bbbcd6d360a390f08503b38081aa2
|
[] |
no_license
|
cainingning/leetcode
|
1c624caf6330d2e1af4835741e5f0748c3f9513b
|
09b7121628df824f432b8cdd25c55f045b013c0b
|
refs/heads/master
| 2021-07-07T14:28:09.207501
| 2019-02-22T08:48:55
| 2019-02-22T08:48:55
| 142,756,206
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
class Solution:
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
l_index = 0
r_index = len(numbers) - 1
while l_index < r_index:
if numbers[l_index] + numbers[r_index] == target:
return [l_index, r_index]
elif numbers[l_index] + numbers[r_index] < target:
l_index += 1
else:
r_index -= 1
return []
|
[
"499814159@qq.com"
] |
499814159@qq.com
|
79e2b660e292e440ae352f3b6b11c484f59e6ad4
|
ad00e2f10ae396a02ded81d90e31e90a8999fbc8
|
/kaggle/DigitRecognizer/tensorflow-cnn2.py
|
c32ba7704e1c74578cabd9e8f115fde48eed94a7
|
[] |
no_license
|
yixiaoyang/SmallData
|
a8c2f8525cf12b6c2e719c5aca0dee1580ce7215
|
6643ac67a150e1d7fdb924c8dde501f8c72fd40f
|
refs/heads/master
| 2021-01-17T09:55:31.630233
| 2020-04-02T18:19:26
| 2020-04-02T18:19:26
| 59,277,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,728
|
py
|
# coding: utf-8
#!/usr/bin/python
import tensorflow as tf
import pandas as pd
import numpy as np
import time
class DigitsModelCNN(object):
def __init__(self):
self.train_input = tf.placeholder(tf.float32, shape=[None,784])
self.train_out = tf.placeholder(tf.float32, shape=[None,10])
self.keep_prob = tf.placeholder(tf.float32)
self.sess = tf.Session()
# 21000 =》100*210
self.batch_size = 100
self.epochs = 210*16
self.learn_rate = 5e-4
'''
@func Computes a 2-D convolution given 4-D input and filter tensors.
@param input 4-D input tensor of shape [batch, in_height, in_width, in_channels]
filter 4-D filter / kernel tensor of shape [filter_height, filter_width, in_channels, out_channels]
@return
'''
def conv2d(self, input, filter, stride_w=1, stride_h=1):
return tf.nn.conv2d(input, filter, strides=[1,stride_w,stride_h,1], padding='SAME')
'''
@func Performs the max pooling on the input.
@param input 4-D Tensor with shape [batch, height, width, channels] and type tf.float32
ksize A list of ints that has length >= 4. The size of the window for each dimension of the input tensor.
strides A list of ints that has length >= 4. The stride of the sliding window for each dimension of the input tensor
@return
'''
def max_pool_2x2(self, input, stride_w=2, stride_h=2):
return tf.nn.max_pool(input, ksize=[1,2,2,1], strides=[1,stride_w,stride_h,1], padding="SAME")
'''
@func outputs random values from a truncated normal distribution.
'''
def init_w(self,shape):
# the standard deviation is 0.1
value = tf.truncated_normal(shape=shape, stddev=0.1)
return tf.Variable(value)
'''
@func outputs random values as bias
'''
def init_b(self,shape):
value = tf.constant(0.1, shape=shape)
return tf.Variable(value)
'''
@note LeNet-5 Architecture
layer operation feature-maps kernel stride size activation
in input 1(gray image) - - 28*28 -
C1 convolution 16 5*5 1 28*28 relu
S2 avg pool 16 2*2 2 14*14 relu
C3 convolution 32 3*3 1 14*14 relu
S4 avg pool 32 2*2 2 7*7 relu
F5 full connected - - - 256 relu
out full connected - - - 10 -
'''
def build(self):
self.train_input = tf.placeholder(tf.float32, shape=[None,784])
self.input = tf.reshape(self.train_input, [-1, 28, 28, 1])
self.f_c1 = self.init_w([5,5,1,16])
self.b_c1 = self.init_b([16])
self.c1 = tf.nn.relu(self.conv2d(self.input, self.f_c1) + self.b_c1)
self.s2 = self.max_pool_2x2(self.c1)
self.f_c3 = self.init_w([5,5,16,32])
self.b_c3 = self.init_b([32])
self.c3 = tf.nn.relu(self.conv2d(self.s2, self.f_c3) + self.b_c3)
self.s4 = self.max_pool_2x2(self.c3)
self.w_f5 = self.init_w([7*7*32, 256])
self.b_f5 = self.init_b([256])
self.x_f5 = tf.reshape(self.s4, [-1,7*7*32])
self.f5 = tf.nn.relu(tf.matmul(self.x_f5, self.w_f5) + self.b_f5)
# out@10
self.f5_drop = tf.nn.dropout(self.f5, self.keep_prob)
self.w_out = self.init_w([256,10])
self.b_out = self.init_b([10])
self.out = tf.nn.softmax(tf.matmul(self.f5_drop, self.w_out) + self.b_out)
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.out, labels=self.train_out))
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learn_rate).minimize(self.loss)
predict = tf.equal(tf.argmax(self.out,1), tf.argmax(self.train_out,1))
self.accuracy = tf.reduce_mean(tf.cast(predict, tf.float32))
def train(self, train_x, train_y, test_x, test_y, keep_prob=0.1):
print("start training")
self.sess.run(tf.global_variables_initializer())
batch_start = 0
batch_end = batch_start + self.batch_size
print(self.train_input.shape)
print(self.train_out.shape)
for epoch in range(self.epochs):
_, loss, prob = self.sess.run([self.optimizer, self.loss, self.out],feed_dict={
self.train_input : train_x[batch_start:batch_end],
self.train_out: train_y[batch_start:batch_end],
self.keep_prob : keep_prob
})
if epoch %100 == 0:
train_accuracy = self.sess.run(self.accuracy, feed_dict={
self.train_input: train_x[0:1024],
self.train_out: train_y[0:1024],
self.keep_prob: 1.0
})
validate_accuracy = self.sess.run(self.accuracy, feed_dict={
self.train_input: test_x,
self.train_out: test_y,
self.keep_prob: 1.0
})
print("epoch %d, training accuracy %g, validate accuracy %g" % (epoch, train_accuracy, validate_accuracy))
batch_start = batch_end
batch_end = batch_start + self.batch_size
if(batch_end > train_x.shape[0]):
print("reset batch")
batch_start = 0
batch_end = batch_start + self.batch_size
train_x, train_y = self.permutation(train_x, train_y)
print("training done")
def permutation(selfself, x, y):
sequence = np.random.permutation(x.shape[0])
return x[sequence], y[sequence]
def info(self):
print("c1,s2,c3,s4,c5 shape:")
print(self.c1.shape)
print(self.s2.shape)
print(self.c3.shape)
print(self.s4.shape)
print(self.f5.shape)
print('-'*16)
print(train_x.shape)
print(train_y.shape)
def dense_to_one_hot(labels_dense, num_classes):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def load_data(filename, train_data=True, split=0.9):
data_frame = pd.read_csv(filename)
# (42000, 785)
print(data_frame.shape)
train_data_len = data_frame.shape[0]
train_data_split = int(train_data_len*split)
print(train_data_split)
train_x = data_frame.iloc[:train_data_split, 1:].values
train_x = train_x.astype(np.float)
train_x = np.multiply(train_x, 1.0/255.0)
train_y = data_frame.iloc[:train_data_split, 0].values
train_y = dense_to_one_hot(train_y,10)
validate_x = data_frame.iloc[train_data_split:, 1:].values
validate_x = validate_x.astype(np.float)
validate_x = np.multiply(validate_x, 1.0/255.0)
validate_y = data_frame.iloc[train_data_split:, 0].values
validate_y = dense_to_one_hot(validate_y,10)
print(train_x.shape)
print(train_y.shape)
print(validate_x.shape)
print(validate_y.shape)
return train_x, train_y, validate_x, validate_y
train_x, train_y, validate_x, validate_y = load_data('./data/train.csv')
print(train_y.shape)
print(train_y[0:4,])
cnn = DigitsModelCNN()
cnn.build()
cnn.info()
time_start = time.time()
cnn.train(train_x, train_y, validate_x, validate_y)
time_end = time.time()
print("total training time:")
print(time_end-time_start)
|
[
"hityixiaoyang@gmail.com"
] |
hityixiaoyang@gmail.com
|
066a5edb911a9b5069125b1aee9dfad1bbc78dbb
|
7d74195bd00cbe8516670c8fe718e983106c9830
|
/src/data_types/test_collections_ordereddict.py
|
ee4fe8c69fee1eec3bc707d6f7b10d39022930d8
|
[] |
no_license
|
masa4u/example_python
|
7ab3d48020855ad493336afcd8d0c02eb3104b2b
|
7bdee4cb8e90255b20353f7f95d3e879f6462638
|
refs/heads/master
| 2021-01-18T14:10:56.539659
| 2017-03-28T12:52:08
| 2017-03-28T12:52:08
| 30,511,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
d = {'banana': 3, 'apple':4, 'pear': 1, 'orange': 2}
from collections import OrderedDict
print OrderedDict(sorted(d.items(), key=lambda t:t[0]))
|
[
"masa4u@gmail.com"
] |
masa4u@gmail.com
|
1c9cb402c43d4cdc6747cd94f70df60e1fb424bf
|
4276667227d01d225bcc083e9d82439d52f6cd6c
|
/10.io_code/4.serialization.py
|
8781bf10dcf222c4764dafc10d9adcaa30f0cc42
|
[] |
no_license
|
JianxiangChan/python_learning
|
82e24498e96369c1a25c7cb557e80f7baf5e7961
|
488e6f6cb0591b8fce9261b072346c745b19cb2d
|
refs/heads/master
| 2020-06-05T22:01:54.429817
| 2019-12-16T14:40:14
| 2019-12-16T14:40:14
| 192,557,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
# -*- coding: utf-8 -*-
import pickle
d = dict(name = 'bob', age = 20, score = 88)
print(pickle.dumps(d)) #use of dumps
with open('dump.txt','wb') as f:
pickle.dump(d,f)
with open('dump.txt','rb') as f:
d = pickle.load(f)
print(d)
import json
d = dict(name = 'bob', age = 20, score = 88)
print(json.dumps(d))
class Student(object):
def __init__(self,name,age,score):
self.name = name
self.age = age
self.score = score
s = Student('bob', 20 , 80)
def student2dict(std):
return {
'name' : std.name,
'age' : std.age,
'score' : std.score
}
print(json.dumps(s, default = student2dict))
print(json.dumps(s, default = lambda obj: obj.__dict__))
s = json.dumps(s, default = lambda obj: obj.__dict__)
def dict2student(d):
return Student(d['name'],d['age'],d['score'])
print(json.loads(s , object_hook = dict2student))
obj = dict(name='小明', age=20)
s = json.dumps(obj, ensure_ascii=False)
print(s)
s = json.dumps(obj)
print(s)
|
[
"15651898806@163.com"
] |
15651898806@163.com
|
7e0772e81bc42eb837cd3dce54f0f187bcad8970
|
3505132210ee8e48c2f216400aed6c2478075a86
|
/feature_selection/find_signature.py~
|
e0d9df6158e852a573058dd3eaff86b9c629a9bd
|
[] |
no_license
|
yutsai84/Enron_POI_identifier
|
7610da2403a63857c3963977096fef9565a95b3f
|
03a27f997641fd97eaa78aec446b9b3704fd15df
|
refs/heads/master
| 2019-04-03T12:10:48.198921
| 2018-04-23T02:47:28
| 2018-04-23T02:47:28
| 66,225,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,252
|
#!/usr/bin/python
import pickle
import numpy
numpy.random.seed(42)
### The words (features) and authors (labels), already largely processed.
### These files should have been created from the previous (Lesson 10)
### mini-project.
words_file = "../text_learning/your_word_data.pkl"
authors_file = "../text_learning/your_email_authors.pkl"
word_data = pickle.load( open(words_file, "r"))
authors = pickle.load( open(authors_file, "r") )
### test_size is the percentage of events assigned to the test set (the
### remainder go into training)
### feature matrices changed to dense representations for compatibility with
### classifier functions in versions 0.15.2 and earlier
from sklearn import cross_validation
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
features_train = vectorizer.fit_transform(features_train)
features_test = vectorizer.transform(features_test).toarray()
### a classic way to overfit is to use a small number
### of data points and a large number of features;
### train on only 150 events to put ourselves in this regime
features_train = features_train[:150].toarray()
labels_train = labels_train[:150]
### your code goes here
from sklearn import tree
clf=tree.DecisionTreeClassifier()
clf.fit(features_train,labels_train)
pred=clf.predict(features_test)
print "pred=",pred
import sklearn
accuracy=sklearn.metrics.accuracy_score(pred,labels_test)
print "accuracy:\t",accuracy
#print importance>0.2 and its index
importances=clf.feature_importances_
import numpy as np
#indices=np.argsort(importances)[::-1] #sort descending
#print "Feature ranking:"
#for i in range(10):
# print "{} feature No.{} ({})".format(i+1,indices[i],importances[indices[i]])
for i in range(len(importances)):
if importances[i]>=0.2:
print "Feature No.{} with importance {}".format(i,importances[i])
#the output is 33614,0.76
#print which feature cause the problem
print "the features cause the problem: "vectorizer.get_feature_names()[i]
|
[
"yuchengtsai84@gmail.com"
] |
yuchengtsai84@gmail.com
|
|
051bf23137383141aa82658c92056367cacb34f9
|
d5c159e43758e5bee418a75cbb856ff2bbd9e285
|
/bitcoinexp/routing.py
|
586038d988de0a21eb789a7c4e7609f61940d059
|
[] |
no_license
|
okcdbu/bitcoinexperiment
|
b2b1ab3f54de12fb215be890cf6f4d587bcaa146
|
46af6018210fddc64464a4a867540efc894b5b01
|
refs/heads/master
| 2023-05-24T06:33:43.703070
| 2021-06-08T08:11:19
| 2021-06-08T08:11:19
| 350,988,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
from flask import Flask, render_template
from bitcoinexp.trading import get_chart_data, run
from flask_socketio import SocketIO
import threading
app = Flask(__name__)
socketio = SocketIO(app)
thread_lock = threading.Lock()
@app.route("/")
@app.route("/chart")
def chart_visualization():
return render_template('chart.html')
@socketio.on("connect")
def init_data():
data = get_chart_data("BTC")
jsondata = data.to_json(orient='records') # get json data like {{open,high,low,close,date},...}
worker = threading.Thread(target=run, args=(socketio,))
worker.start()
socketio.emit('response', jsondata)
|
[
"okcdbu@gmail.com"
] |
okcdbu@gmail.com
|
c78554bfaf8bee6f13777307c2c97139d339f973
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02390/s457532968.py
|
390a81631bac8de1e3a93db961d2ef9a82cb8ed1
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
import sys
line = sys.stdin.readline()
inp = int(line)
h,mod = inp//3600, inp%3600
m,mod = mod//60, mod%60
s = mod
print ("%d:%d:%d" % (h,m,s))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
dc6217c8436382f7a1d6ad3ae9face803e235091
|
931f1a920913dc21ea6cb5b4b591e05259abf490
|
/input_files/create_text_hdfs.py
|
414999ed6bf77e3288c4a4c21af9200eeb0fa107
|
[] |
no_license
|
cgeroux/big_data_benchmark
|
f7bf3dbce55ae234c4548704f74710fa2f57cfef
|
b612665d0bda6e20283148fd9ba7be398f8d24d2
|
refs/heads/master
| 2021-01-10T13:34:19.043658
| 2017-10-12T15:24:04
| 2017-10-12T15:24:04
| 53,532,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,341
|
py
|
#!/usr/bin/env python
import random
import optparse as op
from subprocess import Popen, PIPE,list2cmdline
import os
def addParserOptions(parser):
"""Adds command line options
"""
#these options apply globally
parser.add_option("-f",dest="forceOverwrite",default=False,action="store_true"
,help="Forces overwriting of an existing output file [not default].")
parser.add_option("--line-length",dest="lineLength",type="int",default=80
,help="Set the length of lines in the file [default: %default]")
parser.add_option("--lines-split",dest="splitLines",default=True
,action="store_true"
,help="Separate file into lines of length LINELENGTH or less [default].")
parser.add_option("--lines-not-split",dest="splitLines",default=True
,action="store_false"
,help="File will be a single line [not default].")
parser.add_option("--file-size",dest="fileSize",type="int",default=1000
,help="The size of the file in bytes [default: %default bytes]")
parser.add_option("-o",dest="outputFileName",type="string"
,default="generated.txt",help="Specify the name of the output file "
+"and path within HDFS [default: \"%default\"].")
parser.add_option("--seed-file",dest="seedFile",default=1,help="Seed used "
+"for randomly choosing words from the dictionary [default: %default].")
parser.add_option("--dictionary-file",dest="dictionaryFile",type="string"
,default="english-wordlist.txt"
,help="Specify a file containing a list of words separated by newlines "
+"to be used as the language dictionary. This option has no effect if "
+"the option --randomly-generate-dict is specified "
+"[default: \"%default\"].")
parser.add_option("--randomly-generate-dict",dest="genDict",default=False
,action="store_true",help="If set will create a dictionary by selecting"
+" random letters for NUMWORDS words of a randomly chosen word length "
+"between MINWORDLENGTH and MAXWORDLENGTH. See \"Randomly generated "
+"dictionary options\" [default: %default].")
parser.add_option("--hdfs-upload-size",dest="hdfsUploadSize",type="int"
,default=100000000
,help="Size in bytes between uploads to HDFS [default: %default].")
randDictGroup=op.OptionGroup(parser,"Randomly generated dictionary options")
randDictGroup.add_option("--min-word-length",dest="minWordLength",default=1
,type="int",help="Sets the minimum word length [default: %default].")
randDictGroup.add_option("--max-word-length",dest="maxWordLength",default=10
,type="int",help="Sets the maximum word length [default: %default].")
randDictGroup.add_option("--num-words",dest="numWords",default=1000
,type="int",help="Sets the maximum word length [default: %default].")
randDictGroup.add_option("--seed-dict",dest="seedDict",default=1,help="Seed used "
+"for randomly generating dictionary [default: %default].")
parser.add_option_group(randDictGroup)
def parseOptions():
"""Parses command line options
"""
parser=op.OptionParser(usage="Usage: %prog [options]"
,version="%prog 1.0",description=r"Randomly generates the content of a text file in HDFS.")
#add options
addParserOptions(parser)
#parse command line options
return parser.parse_args()
def createGiberishDict(numWords,minWordLength,maxWordLength,seed=1):
"""Creates a dictionary of numWords created by randomly selecting a word
length between minWordLength and maxWordLength and the populating it with
randomly selected lower case letters.
"""
characterLow=97
characterHigh=122
random.seed(seed)
#create a dictionary of words
dictionary={}
for i in range(numWords):
length=random.randint(minWordLength,maxWordLength)
word=""
for j in range(length):
character=chr(random.randint(characterLow,characterHigh))
word+=character
dictionary[i]=word
return dictionary
def loadDictFromFile(fileName):
"""Loads a dicionary from a file containing words seperated by newline
characters.
"""
dictionary={}
count=0
for line in open(fileName,'r'):
line=line.strip()
line=line.replace("(a)","")
if len(line)>0:
dictionary[count]=line.strip()
count+=1
return dictionary
def performCommand(cmd,throwOnError=True):
#upload file to HDFS
process=Popen(cmd,stdout=PIPE,stderr=PIPE)
stdout,stderr=process.communicate()
returnCode=process.returncode
if throwOnError:
if (returnCode!=0):
raise Exception("error encounter while executing command "
+str(cmd)+" got stdout=\""+str(stdout)+"\" and stderr=\""
+str(stderr)+"\" and return code="+str(returnCode))
return returnCode
def main():
#parse command line options
(options,args)=parseOptions()
#create a dictionary to use to construct the file
if options.genDict:
dictionary=createGiberishDict(options.numWords
,options.minWordLength,options.maxWordLength
,seed=options.seedDict)
else:
dictionary=loadDictFromFile(options.dictionaryFile)
#should check if the hdfs file is there and remove it if it is
cmd=["hdfs","dfs","-stat",options.outputFileName]
returnCode=performCommand(cmd,throwOnError=False)#throwOnError=False since we will handle the error here
if(returnCode==0):
overwrite=False
if not options.forceOverwrite:
#check if we should overwrite it
overWriteResponse=raw_input("File exists, overwrite? (y/n)")
if overWriteResponse in ["y","Y","Yes","T","True","1"]:
overwrite=True
else:
overwrite=True
#remove the file
if overwrite:
cmd=["hdfs","dfs","-rm",options.outputFileName]
performCommand(cmd)
else:
print "Not overwriting pre-existing file in HDFS \"" \
+options.outputFileName+"\""
quit()
#create the command to upload to HDFS
tempFileName="tmp.txt"
cmd=["hdfs","dfs","-appendToFile",tempFileName,options.outputFileName]
#create file from the dictionary
sizeTotal=0
sizeToUpload=0
f=open(tempFileName,'w')
lenDict=len(dictionary.keys())-1
random.seed(options.seedFile)
sizePerHDFAppend=options.hdfsUploadSize
while(sizeTotal<options.fileSize):
#create a line to add to the file
line=""
lineLen=0
while(True):
wordKey=random.randint(0,lenDict)
word=dictionary[wordKey]
lineLen+=len(word)+1
if lineLen<options.lineLength:
line+=word+" "
else:
break
#write the line to the file
if options.splitLines:
line+="\n"
f.write(line)
sizeTotal+=len(line)
sizeToUpload+=len(line)
#if temporary file big enough upload to HDFS
if sizeToUpload>=sizePerHDFAppend:
print "uploading "+str(sizeToUpload)+" bytes to hdfs"
#close the file
f.close()
#upload file to HDFS
performCommand(cmd)
#remove file after upload and open a new file for the next chunk
os.remove(tempFileName)
f=open(tempFileName,'w')
sizeToUpload=0
#close the temporary file
f.close()
#upload any extra content written to the temporary file since last upload
if sizeToUpload>0:
print "uploading remaining "+str(sizeToUpload)+" bytes to hdfs"
performCommand(cmd)
#remove temporary file
os.remove(tempFileName)
if __name__ == "__main__":
main()
|
[
"chris.m.geroux@gmail.com"
] |
chris.m.geroux@gmail.com
|
2e77e1bf2950b9ae5d4e921023ac91b6785e05f8
|
7474675ad1a50bd41792ef9c4de09924acbc8f17
|
/KNN/iris.py
|
85f0cf2fd3a28cafc5e979950791eb122826a8a8
|
[] |
no_license
|
itsmefarhan/MachineLearning
|
5f2b756e31ab199701ac8f223c420634a0d04478
|
6df397f583222575ac9035350e76f6a9b9c0a2eb
|
refs/heads/master
| 2020-09-05T09:24:56.605009
| 2019-11-11T20:07:39
| 2019-11-11T20:07:39
| 220,056,068
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
dataset = load_iris()
# print(dataset.keys())
# print(dataset.data)
X_train, X_test, y_train, y_test = train_test_split(dataset['data'], dataset['target'], test_size = 0.2, random_state = 0)
model = KNeighborsClassifier()
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
accuracy = model.score(X_test, y_test)
print(accuracy)
cm = confusion_matrix(y_test, y_predict)
print(cm)
|
[
"farhan.farooq@live.com"
] |
farhan.farooq@live.com
|
f23488ded619c675fe870811001ad1b85b57c931
|
4eaf9f8ef3eb2addf6a4fb0a6bc4f41b8584bbc6
|
/Week10/src/button.py
|
626572ee8c47c89608c588dd40fe26a8514f7b33
|
[
"MIT"
] |
permissive
|
Kids-Hack-Labs/Winter2021
|
3d6afd99ae0c77ae7a9767d08c6f89b9e92da34e
|
4c66d5cf05045d2724db2393a0c2c581f314f903
|
refs/heads/main
| 2023-04-01T13:45:45.200124
| 2021-04-07T04:32:14
| 2021-04-07T04:32:14
| 329,418,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,819
|
py
|
from pygame import Color, Rect, Surface
import pygame.mouse as pm
from src.text_generator import TextGenerator
class Button():
STATES = ("NONE","OUT","HOVER","DOWN","UP")
def __init__(self, button_text, text_info, button_info, func):
self.colours = {Button.STATES[1]:button_info["out"],
Button.STATES[2]:button_info["hover"],
Button.STATES[3]:button_info["down"],
Button.STATES[4]:button_info["up"]}
self.rect = Rect(button_info["rect"])
self.surf = Surface(self.rect.size)
self.text_surf = TextGenerator.generate_text(button_text, text_info, None)
self.text_rect = self.text_surf.get_rect()
self.text_rect.center = (self.rect.width/2, self.rect.height/2)
self.on_click = func
self.current_state = Button.STATES[1]
self.previous_state = Button.STATES[1]
self.active = True
def update(self, delta):
if self.active:
self.current_state = self.check_states()
if self.previous_state == Button.STATES[3] and\
self.current_state == Button.STATES[2]:
self.on_click()
self.previous_state = self.current_state
def render(self,target):
self.surf.fill(self.colours[self.current_state])
self.surf.blit(self.text_surf, self.text_rect)
target.blit(self.surf, self.rect)
def check_states(self):
mouse_pos = pm.get_pos()
mouse_buttons = pm.get_pressed()
if not self.rect.collidepoint(mouse_pos):
return Button.STATES[1]
else:
if not mouse_buttons[0]:
return Button.STATES[2]
else:
return Button.STATES[3]
def deactivate(self):
self.active = False
|
[
"hercules.diascampos@kidshacklabs.com"
] |
hercules.diascampos@kidshacklabs.com
|
b8a62fa93f2532714aacb95518a96010cd6afe03
|
fffa7b13491deadfc649dfd035099ef764d8d303
|
/api/tests/mathematical_object_detail.py
|
3ecfae51fd020c715c1a8504027fcc57a26800f4
|
[
"MIT"
] |
permissive
|
Gawaboumga/OEMS
|
3b12b8bebbe4b29716e8be4e22034ec394af36da
|
1e60fa1f350f4cf1ca2e48072e0b4228eeb15024
|
refs/heads/master
| 2022-12-14T11:15:55.797241
| 2019-01-22T10:22:42
| 2019-01-22T10:22:42
| 147,358,167
| 0
| 0
|
MIT
| 2022-12-08T01:26:59
| 2018-09-04T14:20:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,231
|
py
|
from rest_framework import status
from rest_framework.test import APITestCase
from django.test import override_settings
from django.urls import reverse
from oems.settings import TEST_MEDIA_ROOT
from api.models import MathematicalObject
from api.tests import utils
@override_settings(MEDIA_ROOT=TEST_MEDIA_ROOT)
class MathematicalObjectDetailTests(APITestCase):
def test_retrieve_small_mathematical_object(self):
utils.log_as(self, utils.UserType.STAFF)
representation = 'test'
type = 'S'
data = {
'latex': representation,
'type': type,
}
response = self.client.post(reverse('api:mathematical_objects'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get(reverse('api:mathematical_object', kwargs={'pk': response.data['id']}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(representation, response_data['latex'])
self.assertEqual(type, response_data['type'])
def test_retrieve_full_mathematical_object(self):
utils.log_as(self, utils.UserType.STAFF)
representation = 'test'
type = 'S'
function = 'function'
name = 'name'
tag = 'tag'
convergence_radius = '|z < 1|'
data = {
'latex': representation,
'type': type,
'functions': [{'function': function}],
'names': [{'name': name}],
'tags': [{'tag': tag}],
'convergence_radius': convergence_radius
}
response = self.client.post(reverse('api:mathematical_objects'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get(reverse('api:mathematical_object', kwargs={'pk': response.data['id']}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(representation, response_data['latex'])
self.assertEqual(type, response_data['type'])
self.assertEqual(function, response_data['functions'][0]['function'])
self.assertEqual(name, response_data['names'][0]['name'])
self.assertEqual(tag, response_data['tags'][0]['tag'])
self.assertEqual(convergence_radius, response_data['convergence_radius'])
def test_put_small_mathematical_object(self):
utils.log_as(self, utils.UserType.STAFF)
representation = 'test'
type = 'S'
data = {
'latex': representation,
'type': type,
}
response = self.client.post(reverse('api:mathematical_objects'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
new_type = 'P'
data['type'] = new_type
response = self.client.put(reverse('api:mathematical_object', kwargs={'pk': response.data['id']}), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(representation, response_data['latex'])
self.assertEqual(new_type, response_data['type'])
def test_delete_full_mathematical_object(self):
utils.log_as(self, utils.UserType.STAFF)
representation = 'test'
type = 'S'
function = 'function'
name = 'name'
tag = 'tag'
convergence_radius = '|z < 1|'
data = {
'latex': representation,
'type': type,
'functions': [{'function': function}],
'names': [{'name': name}],
'tags': [{'tag': tag}],
'convergence_radius': convergence_radius
}
response = self.client.post(reverse('api:mathematical_objects'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.delete(reverse('api:mathematical_object', kwargs={'pk': response.data['id']}), data, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(MathematicalObject.objects.count(), 0)
|
[
"yourihubaut@hotmail.com"
] |
yourihubaut@hotmail.com
|
3e4331ea4515d8ab9a244201033c44ae2211e3db
|
d4cd2476f8fa8a7d94e183a68bd0678971310c5b
|
/checkio/06_Ice_Base/06_IceBase_04_FunnyAddition.py
|
9030b3fb8d1063f001b7c9e2d024d3d76144968e
|
[] |
no_license
|
gwqw/LessonsSolution
|
b495579f6d5b483c30d290bfa8ef0a2e29515985
|
0b841b1ae8867890fe06a5f0dcee63db9a3319a3
|
refs/heads/master
| 2020-07-05T19:15:53.758725
| 2019-10-01T11:34:44
| 2019-10-01T11:34:44
| 202,744,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
def checkio(data):
"""The sum of two integer elements"""
return sum(data)
if __name__ == '__main__':
assert checkio([5, 5]) == 10, 'First'
assert checkio([7, 1]) == 8, 'Second'
print('All ok')
|
[
"="
] |
=
|
1d6ae632a35692b47fe5e5803717396272bfc1bd
|
ba84b4776efbfd114be6e489e206c61bcc93cf1d
|
/ScoreChanger.py
|
bcee5df8c7cda74ef7a3328cc951dd1ab5fdc825
|
[] |
no_license
|
ReiraH/Pinball-Machine
|
5ad94267e3d4eb642aa03e4d4606e05cc6417431
|
c4baee924bb8655a1464f6eebd0df0887bf95615
|
refs/heads/master
| 2020-03-21T05:20:39.219845
| 2018-06-21T11:03:06
| 2018-06-21T11:03:06
| 138,156,415
| 0
| 0
| null | 2018-06-21T11:03:07
| 2018-06-21T10:35:23
| null |
UTF-8
|
Python
| false
| false
| 8,332
|
py
|
import RPi.GPIO as GPIO
from time import sleep
class ScoreChanger(object):
HIGH = 0
LOW = 1
digitOnes = 24
digitTens = 23
digitHundreds = 15
digitThousands = 18
A = 0
B = 0
C = 0
D = 0
active = False
GPIO.setmode(GPIO.BCM)
GPIO.setup(digitOnes, GPIO.OUT)
GPIO.output(digitOnes, LOW)
GPIO.setup(digitTens, GPIO.OUT)
GPIO.output(digitTens, LOW)
GPIO.setup(digitHundreds, GPIO.OUT)
GPIO.output(digitHundreds, LOW)
GPIO.setup(digitThousands, GPIO.OUT)
GPIO.output(digitThousands, LOW)
print "HI I AM A SCORECHANGER!!!!"
state = 0
coilActive = False
timeEnabled = 0.0
maxTimeEnabled = 0.07
def changeScore(self,score, deltaTime):
if self.state == 0:
inputString = str(score)
while(inputString.__len__() != 4):
inputString = "0" + inputString
ScoreArray = list(inputString)
self.newA = int(ScoreArray[0])
self.atemp = self.newA
self.newB = int(ScoreArray[1])
self.btemp = self.newB
self.newC = int(ScoreArray[2])
self.ctemp = self.newC
self.newD = int(ScoreArray[3])
self.dtemp = self.newD
print str(self.newD)
if self.newA < self.A:
self.newA += 10
if self.newB < self.B:
self.newB += 10
if self.newC < self.C:
self.newC += 10
if self.newD < self.D:
self.newD += 10
self.state = 1
elif self.state == 1:
if self.coilActive == False:
if self.newA > self.A:
self.timeEnabled+=deltaTime
if(self.timeEnabled>self.maxTimeEnabled):
GPIO.output(self.digitThousands, self.HIGH)
self.coilActive = True
self.timeEnabled = 0.0
self.newA-=1
else:
self.state = 2
else:
self.timeEnabled += deltaTime
if self.timeEnabled > self.maxTimeEnabled:
GPIO.output(self.digitThousands, self.LOW)
self.coilActive = False
self.timeEnabled = 0
elif self.state == 2:
if self.coilActive == False:
if self.newB > self.B:
self.timeEnabled+=deltaTime
if(self.timeEnabled>self.maxTimeEnabled):
GPIO.output(self.digitHundreds, self.HIGH)
self.coilActive = True
self.timeEnabled = 0.0
self.newB-=1
else:
self.state = 3
else:
self.timeEnabled += deltaTime
if self.timeEnabled > self.maxTimeEnabled:
GPIO.output(self.digitHundreds, self.LOW)
self.coilActive = False
self.timeEnabled = 0
elif self.state == 3:
if self.coilActive == False:
if self.newC > self.C:
self.timeEnabled+=deltaTime
if(self.timeEnabled>self.maxTimeEnabled):
GPIO.output(self.digitTens, self.HIGH)
self.coilActive = True
self.timeEnabled = 0.0
self.newC-=1
else:
self.state = 4
else:
self.timeEnabled += deltaTime
if self.timeEnabled > self.maxTimeEnabled:
GPIO.output(self.digitTens, self.LOW)
self.coilActive = False
self.timeEnabled = 0
elif self.state == 4:
if self.coilActive == False:
if self.newD > self.D:
self.timeEnabled+=deltaTime
if(self.timeEnabled>self.maxTimeEnabled):
GPIO.output(self.digitOnes, self.HIGH)
self.coilActive = True
self.timeEnabled = 0.0
self.newD-=1
else:
self.state = 5
else:
self.timeEnabled += deltaTime
if self.timeEnabled > self.maxTimeEnabled:
GPIO.output(self.digitOnes, self.LOW)
self.coilActive = False
self.timeEnabled = 0
elif self.state == 5:
self.A = self.atemp
self.B = self.btemp
self.C = self.ctemp
self.D = self.dtemp
self.state = 0
def changeScoreOld(self,score):
if self.active == False:
self.active = True
print "Program started"
print "set input function"
inputString = str(score)
while(inputString.__len__() != 4):
inputString = "0" + inputString
ScoreArray = list(inputString)
newA = int(ScoreArray[0])
atemp = newA
newB = int(ScoreArray[1])
btemp = newB
newC = int(ScoreArray[2])
ctemp = newC
newD = int(ScoreArray[3])
dtemp = newD
print str(newD)
if newA < self.A:
newA += 10
if newB < self.B:
newB += 10
if newC < self.C:
newC += 10
if newD < self.D:
newD += 10
print "HI I AM A SCORECHANGER!!!! Score: "+ inputString + "Last Score: " + str(self.A)+ str(self.B)+ str(self.C)+ str(self.D)
while(newA > self.A):
GPIO.output(self.digitThousands, self.HIGH)
sleep(0.15)
GPIO.output(self.digitThousands, self.LOW)
sleep(0.15)
newA-=1
while(newB > self.B):
GPIO.output(self.digitHundreds, self.HIGH)
sleep(0.15)
GPIO.output(self.digitHundreds, self.LOW)
sleep(0.15)
newB-=1
while(newC > self.C):
GPIO.output(self.digitTens, self.HIGH)
sleep(0.15)
GPIO.output(self.digitTens, self.LOW)
sleep(0.15)
newC-=1
while(newD > self.D):
GPIO.output(self.digitOnes, self.HIGH)
sleep(0.15)
GPIO.output(self.digitOnes, self.LOW)
sleep(0.15)
newD-=1
self.A = atemp
self.B = btemp
self.C = ctemp
self.D = dtemp
self.active = False
def resetScoreReels(self):
oneAmount = 10 - self.D
tenAmount = 10 - self.C
hundredAmount = 10 - self.B
thousandAmount = 10 - self.A
if oneAmount != 10:
for ones in range(0,oneAmount):
GPIO.output(digitOnes, HIGH)
sleep(0.1)
GPIO.output(digitOnes, LOW)
sleep(0.1)
if tenAmount != 10:
for tens in range(0,tenAmount):
GPIO.output(digitTens, HIGH)
sleep(0.1)
GPIO.output(digitTens, LOW)
sleep(0.1)
if hundredAmount != 10:
for hundreds in range(0,hundredAmount):
GPIO.output(digitHundreds, HIGH)
sleep(0.1)
GPIO.output(digitHundreds, LOW)
sleep(0.1)
if thousandAmount != 10:
for thousands in range(0,thousandAmount):
GPIO.output(digitThousands, HIGH)
sleep(0.1)
GPIO.output(digitThousands, LOW)
sleep(0.1)
|
[
"noreply@github.com"
] |
noreply@github.com
|
60c21ecdefa93da86c1761960a9774855f951f81
|
fab44b6672152764ad965291d645223ccbe6186a
|
/Undergrad_research(Machine Learning) Project/Machine Learning_undergraduate research project/lab2-part1/debugging2.py
|
4e814c85f717f68d4f752899d62cf392491012d2
|
[] |
no_license
|
AndanteKim/AP_Archive
|
45149c410dcdc8d4f2cd64422091de00f451f34b
|
bcec25375edc5c2f44598bd9f48a6de49e108d35
|
refs/heads/master
| 2023-02-23T20:33:08.650315
| 2021-01-28T23:51:23
| 2021-01-28T23:51:23
| 276,733,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,201
|
py
|
#!/usr/bin/env python
# This script is full of common errors you're likely to run into.
# To fix it, you need to debug it. Look at the error messages, use print
# statements, and trace your code by hand on paper to find and fix the bugs.
# This scripts calculates the fibonacci sequence in four different ways.
# Be sure to read the description at the top of each function.
# The goal is not to change the way in which the code is written but to find
# all the semantic and syntax errors.
#----------------
import numpy
# This function prints the first n numbers of the fibonacci sequence
#def print_n_fibonacci(n):
# a = 1.
# b = 1.
# print a
# print b
# counter = 2
# for i in range(n):
# newa = b
# b = a+b
# a = newa
# print b
# counter +=1
# print 'This function requested ', n, 'numbers and printed ',counter,'numbers'
#print 'output for print_n_fibonacc where n =',10,':'
#print_n_fibonacci(10)
#print
# This function prints the fibonacci sequence up to the number 610
#def print_fibonacci_upto610() :
# a,b = 1.,1.
# print a
# print b
# while b < 610:
# a,b = b,a+b
# print b
#print 'output for print_fibonacci_upto610:'
#print_fibonacci_upto610()
#print
# This function creates a list which contains the first n numbers of the
# fibonacci sequence and returns this list
#def create_fibonacci_list_uptoN(n):
# fibonacci = [1.,1.]
# for i in range(n):
# fibonacci.append(fibonacci[i]+fibonacci[i+1])
# return fibonacci
#print 'list return from create_fibonacci_list_uptoN where n =',10,':'
#fib = create_fibonacci_list_uptoN(10)
#print fib
#print 'The length of the returned list is', len(fib)
#print
# This function creates a numpy array which contais the fibonacci sequence
# up to the number 610
def create_fibonacci_array_upto610():
counter = 1
fibonacci = numpy.array([1.,1.])
while fibonacci[counter] < 610. :
fibonacci = numpy.append(fibonacci, fibonacci[counter-1] + fibonacci[counter])
counter += 1
return fibonacci
print 'array return from create_fibonacci_array_upto610:'
fib = create_fibonacci_array_upto610()
print fib
|
[
"54167881+AndanteKim@users.noreply.github.com"
] |
54167881+AndanteKim@users.noreply.github.com
|
d9a464be1a3be2b144f34de63add4214c3cfc0dd
|
6cfc109684e689fd4fba01380f95ebdde567531d
|
/Lab2/prueba.py
|
58c38427b1d982a7c6a10fd06c3ffd5445836209
|
[] |
no_license
|
jaoc1811/CI2692
|
83291c70277dbe05dc076f9bffcb5db44a9c9864
|
ab185a695c0a7722ccdd8317e4d4130853e9c9ae
|
refs/heads/master
| 2020-03-18T20:32:45.852537
| 2018-05-29T00:58:17
| 2018-05-29T00:58:17
| 131,191,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,992
|
py
|
from common.base.basic import read_file
from common.base.basic import Random
def mergesort(A):
# Busca la cantidad de elementos del arreglo.
r = len(A)
# Si el arreglo es unitario esta ordenado por definicion.
if 1 == r: return A
# Crea dos nuevos sub-arreglos ordenados.
# L para el sub-arreglo de la izquierda (Left).
# R para el sub-arreglo de la derecha (Right).
L = mergesort(A[:(r/2)])
R = mergesort(A[(r/2):])
# Delvuelve el arreglo ordenado.
return merge(L,R)
def merge(L,R):
# Crea un nuevo arreglo vacio donde se guardaran los valores ordenados.
array = []
# Inicializa las variable para iterar sobre los sub-arreglos.
i,j = 0,0
# Inicializa las variables para ver si los arreglos ya han sido recorridos.
a,b = len(L),len(R)
# Mientras el valor del iterador este en el rango del sub arreglo, entra en el condicional.
while (i < a or j < b):
# El condicional fue implementado de esta manera ya que las guardias en python son
# deterministas. De esta forma la tercera y la cuarta guardia no dan error ya que
# entra en la primera o segunda guardia si el indice a comparar esta fuera del rango
# del arreglo.
if (i >= a): # Chequea si ya recorrio el arreglo L completo.
array.append(R[j])
j += 1
elif (j >= b): # Chequea si ya recorrio el arreglo R completo.
array.append(L[i])
i += 1
elif (L[i] <= R[j]): # Asigna el menor de los elementos.
array.append(L[i])
i += 1
elif (R[j] < L[i]): # Asigna el menor de los elementos.
array.append(R[j])
j += 1
#print array
return array
def insertion_sort(A):
for i in range(1, len(A)):
key = A[i]
j = i - 1
while j >= 0 and A[j] > key:
A[j+1] = A[j]
j = j - 1
A[j+1] = key
def freivalds(n, A, B, C):
def multiply(n, A, Z):
# Crea el vector a retornar
R = n * [0]
# Recorre los elementos del vector R y las filas de la matriz A
for i in range(n):
# Recorre los elementos del vector Z y los elementos de la fila i de A
for j in range(n):
R[i] = R[i] + (A[i][j] * Z[j])
return R
# Genera un vector Z lleno de ceros y unos
Z = n * [n]
for i in range(n):
Z[i] = Random(0,1)
# Multiplica B x Z, luego A x (B x Z) y C x Z
# Obteniendo 2 vectores x1 y x2 de largo n
Y = multiply(n, B, Z)
x1 = multiply(n, A, Y)
x2 = multiply(n, C, Z)
# Chequea si A x (B x Z) = C x Z
return x1 == x2
def amplified_freivalds(k, n, A, B, C):
for i in range(k):
r = freivalds(n, A, B, C)
if r == False:
return False
return True
def problema_3_8(A, x):
B = mergesort(A)
print B
R = False
for i in range(len(B) - 1):
start = i + 1
end = len(B) - 1
while start < end:
mid = (start + end) / 2
if B[mid] + B[i] == x:
R = True
break
elif B[mid] + B[i] < x:
start = mid + 1
elif B[mid] + B[i] > x:
end = mid - 1
if B[start] + B[i] == x:
R = True
return R
A = [ Random(0,2) for i in range(100)]
x = 71
#print A
print mergesort(A)
#print problema_3_8(A,x)
|
[
"jaoc1811@gmail.com"
] |
jaoc1811@gmail.com
|
ab9064ed0cf5cdd9c40ea7d1980c735a9bd402c3
|
ed98cf758a1aebb7a4415502a3672dcd5d480f91
|
/app/email.py
|
24033cf66ce2aa9d49a15899dba09de47e85f155
|
[
"MIT"
] |
permissive
|
eclectic-coding/microblog
|
541a4e7c187def2a3511b8d7fc69cddb7e3e3b51
|
7193bb04d3073bb918aeb1e437fd72869555c467
|
refs/heads/main
| 2023-04-28T01:56:32.835458
| 2021-05-16T18:05:53
| 2021-05-16T18:05:53
| 356,620,088
| 0
| 0
|
MIT
| 2021-05-16T18:05:54
| 2021-04-10T15:20:27
|
Python
|
UTF-8
|
Python
| false
| false
| 940
|
py
|
from threading import Thread
from flask import render_template
from flask_mail import Message
from app import app, mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[Microblog] Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
|
[
"noreply@github.com"
] |
noreply@github.com
|
bddd1e68745eb9d0c4be78f83fbe5b77dccf95e0
|
bff3b19be6408c671b99a8c08f8faee932460686
|
/afnd6.py
|
69873feb97141fca01ad456deafbdc69854124d0
|
[] |
no_license
|
OrionVi1998/Automatas
|
47591e9bb9548674e2a885cc348bf300d0eaafb4
|
3969ad25b66684c635d10138ffd71adf61d21e7c
|
refs/heads/master
| 2023-05-28T01:58:27.042093
| 2021-06-15T22:35:27
| 2021-06-15T22:35:27
| 376,657,207
| 0
| 0
| null | 2021-06-15T22:30:50
| 2021-06-13T23:10:41
|
Python
|
UTF-8
|
Python
| false
| false
| 637
|
py
|
grafo = {
0: [(0, "a"), (0, "b"), (1, "a")],
1: [(2, "b")],
2: [(3, "b")],
3: []
}
grafo2 = {
0: [(1, "a"), (2, "a")],
1: [(3, "b")],
2: [(5, "b")],
3: [(4, "a")],
4: [(1, "b")],
5: [(2, "a")]
}
def bfs(start):
queue = [(start, "")]
visited = []
while len(queue) > 0:
estado = queue.pop(0)
neighbours = grafo.get(estado[0])
print("estado ", estado, "vecinos: ", neighbours)
for edge in neighbours:
if edge not in visited:
visited.append(edge)
queue.append(edge)
print(edge)
bfs(0)
|
[
"octaviov1998@gmail.com"
] |
octaviov1998@gmail.com
|
2a200f3a2374864f5dfb04e9acef5ed89b61e21d
|
30b3fe3e33c090099f8d86e498b80e70da069822
|
/solution.py
|
9605a5aca6066e2072a43573499ef3283f88859a
|
[] |
no_license
|
selvaramkumar/leetcode1451
|
5e967d2b6d89e7ce5c7345dcdbef3478e3fcb20a
|
bebf87f5beca2aa791fcd8f3b00ae1e6cf87364c
|
refs/heads/main
| 2023-02-07T08:46:21.077083
| 2021-01-05T14:23:29
| 2021-01-05T14:23:29
| 327,020,442
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
from collections import OrderedDict
class Solution:
def arrangeWords(self, text: str) -> str:
temp=text.split(" ")
dict1={}
for i in temp:
if not len(i) in dict1:
dict1[len(i)]=i
else :
dict1[len(i)]=dict1[len(i)]+" "+i
res=""
dict2=OrderedDict(sorted(dict1.items()))
count=0
for key,value in dict2.items():
if count>=1:
res=res+" "+value[0].lower() + value[1:]
count=count+1
else:
res=res+value[0].upper() + value[1:]
count=count+1
return res
s=Solution()
str1="Keep calm and code on"
print(s.arrangeWords(str1))
|
[
"sselvaramkumar@gmail.com"
] |
sselvaramkumar@gmail.com
|
4d876adb17ed372668e9f24105bb83023429a2af
|
ef9368cc0b4f1bfad3abae292be5c7677f11a8e4
|
/EazyHacks/urls.py
|
8cc74321382162d1e9bd6f86e1997887ef30302c
|
[] |
no_license
|
prnvshrn/EazyHacks
|
89fc519c034fb4c8c75ea91c7a83b50ce77d2a63
|
212c66c80de4bf4eb3eb76dda4479abcfe67d873
|
refs/heads/master
| 2021-09-05T21:26:55.891948
| 2018-01-31T04:36:36
| 2018-01-31T04:36:36
| 115,707,094
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
"""EazyHacks URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from EazyHacks import views
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^$', views.openLogin, name='login'),
url(r'^AddHack.html/', views.openAddHack, name='add_hack'),
url(r'^BrowseHack.html/(?P<hack_type>[0-9]+)/', views.openBrowseHack, name='browse_hack'),
url(r'^HackDetails.html/(?P<hack_id>[0-9]+)/', views.openHackDetails, name='hack_details'),
url(r'^HackDetails.html/', views.openLogin ,name='hack_base'),
url(r'^Logout/',views.logOut,name='logout')
]
|
[
"prnvshrn@gmail.com"
] |
prnvshrn@gmail.com
|
a95329335b970233b588cd83bb48ba1a20a06e5b
|
97e833b79e40f798019e45829d4c3eb91b852438
|
/telegraph/appos.py
|
0326616f7c814a68bea716949be213b918db56f4
|
[] |
no_license
|
AwkwardLiSFan/news-tone
|
b6069d6abb55b6e4eb8caf38ff27669669d66560
|
fd55786991c3c1c4d4cbe3585026b14992bec69f
|
refs/heads/main
| 2023-06-22T10:43:57.793472
| 2021-07-21T10:58:45
| 2021-07-21T10:58:45
| 388,087,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
appos_list = {
"aren't" : "are not",
"can't" : "cannot",
"couldn't" : "could not",
"didn't" : "did not",
"doesn't" : "does not",
"don't" : "do not",
"hadn't" : "had not",
"hasn't" : "has not",
"haven't" : "have not",
"he'd" : "he would",
"he'll" : "he will",
"he's" : "he is",
"i'd" : "I would",
"i'd" : "I had",
"i'll" : "I will",
"i'm" : "I am",
"isn't" : "is not",
"it's" : "it is",
"it'll":"it will",
"i've" : "I have",
"let's" : "let us",
"mightn't" : "might not",
"mustn't" : "must not",
"shan't" : "shall not",
"she'd" : "she would",
"she'll" : "she will",
"she's" : "she is",
"shouldn't" : "should not",
"that's" : "that is",
"there's" : "there is",
"they'd" : "they would",
"they'll" : "they will",
"they're" : "they are",
"they've" : "they have",
"we'd" : "we would",
"we're" : "we are",
"weren't" : "were not",
"we've" : "we have",
"what'll" : "what will",
"what're" : "what are",
"what's" : "what is",
"what've" : "what have",
"where's" : "where is",
"who'd" : "who would",
"who'll" : "who will",
"who're" : "who are",
"who's" : "who is",
"who've" : "who have",
"won't" : "will not",
"wouldn't" : "would not",
"you'd" : "you would",
"you'll" : "you will",
"you're" : "you are",
"you've" : "you have",
"'re": " are",
"wasn't": "was not",
"we'll":" will",
"didn't": "did not"
}
|
[
"noreply@github.com"
] |
noreply@github.com
|
b7ba80089f455b58d92760039c26578e86a680f3
|
3b380acf42684aaaa3201c241456e43920a40c1d
|
/paradeground/units/__init__.py
|
19c57c101368292a30f2e9093d01635cbcbbd3f7
|
[] |
no_license
|
warp-one/electron
|
484245c45a7947f5bbe3b87020b62df74eb884ca
|
0147b3ff2e6320147562161ec2c9edea784b4015
|
refs/heads/master
| 2021-01-24T03:18:24.758276
| 2016-09-28T19:22:11
| 2016-09-28T19:22:11
| 41,637,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,473
|
py
|
from math import sqrt, pi, sin, cos, tan, degrees
from random import randint
import pyglet
from tools import *
from units.behavior import *
from units.behavior import states
from selection import selectiontriangle as st
import settings
class Status(object):
name = "Buff"
def __init__(self, unit):
self.unit = unit
self.active = False
def trigger(self):
pass
def update(self, dt):
pass
class Speed(Status):
name = "Speed"
def __init__(self, unit, max_speed=600, acceleration=20, speed_bonus=30):
super(Speed, self).__init__(unit)
self.deceleration = acceleration
self.max_speed = 600
self.zones = set()
self.speed_bonus = speed_bonus
def trigger(self, zone):
self.zones.add(zone)
def deactivate(self, zone):
return
#self.zones.discard(zone)
def update(self, dt):
active = False
if self.zones:
max_speed = min([max([z.top_speed for z in self.zones]), self.unit.MAX_SPEED])
acceleration = max([z.acceleration for z in self.zones])
active = True
else:
max_speed = self.max_speed
speed_normal = (self.unit.current_speed - self.unit.BASE_SPEED)/(max_speed - self.unit.BASE_SPEED)
if active:
if self.unit.current_speed < max_speed:
self.unit.current_speed += min(acceleration, max_speed - self.unit.current_speed)
self.unit.flat_poly.colors = [self.unit.color[i%3] + int((255 - x)*speed_normal) if not randint(0, 5) else self.unit.color[i%3] for i, x in enumerate(self.unit.flat_poly.colors)]
else:
if self.unit.current_speed > self.unit.BASE_SPEED:
inactive_cap = max_speed - self.speed_bonus
if self.unit.current_speed > inactive_cap:
self.unit.current_speed = inactive_cap
else:
self.unit.current_speed -= min(self.deceleration/16, self.unit.current_speed - self.unit.BASE_SPEED)
self.unit.flat_poly.colors = [self.unit.color[i%3] + int((255 - x)*speed_normal) if not randint(0, 5) else int(self.unit.color[i%3]) for i, x in enumerate(self.unit.flat_poly.colors)]
else:
self.unit.flat_poly.colors = [int(self.unit.color[i%3]*.69) for i, x in enumerate(self.unit.flat_poly.colors)]
self.zones.clear()
class BasicUnit(pyglet.sprite.Sprite):
ROTATION_RATE = 1 * pi/180 # radians = degrees * pi/180
size = 32
radius = size/2
w = size
h = size
BASE_SPEED = 300.0 # pixels per frame
MAX_SPEED = 600.0
solid = True
image_factor = 1
selection_scale = 2 * image_factor
immobile = False
def __init__(self, team=None, *args, **kwargs):
super(BasicUnit, self).__init__(*args, **kwargs)
self.team = team
self.name = None
self.id = 0
# grid
self.prev = None
self.next = None
self.graphics = []
self.group = settings.FOREGROUND
self.sgroup = settings.MIDGROUND
self.rotate_tick = .1 #1 * pi/180.
self.rotation = 0
self.velocity = 0.
self.selectable = False
self.selected = False
self.selection_indicator = None
self.selection_rotation = 0
self.current_speed = self.BASE_SPEED
self.statuses = {}
def select(self):
if self.selectable and not self.is_selected():
self.selected = True
self.selection_indicator = st.SelectionTriangle(self)
self.graphics.append(self.selection_indicator.graphic)
def deselect(self):
if self.is_selected():
self.selected = False
if self.selection_indicator:
self.graphics.remove(self.selection_indicator.graphic)
self.selection_indicator.graphic.delete()
self.selection_indicator = None
def is_selected(self):
if self.selected:
return True
else:
return False
def suicide(self):
#self.spawn_death_animation()
for g in self.graphics:
g.delete()
self.delete()
def update(self, dt):
self.rotation -= .01
while self.rotation < 0:
self.rotation += 360
for s in self.statuses:
self.statuses[s].update(dt)
self.velocity = self.current_speed * dt
self.tick_graphics(dt)
def get_location(self):
return self.x, self.y
def tick_selection_rotation(self):
self.selection_rotation += self.ROTATION_RATE
def init_graphics(self):
pass
def tick_graphics(self, dt):
if self.selection_indicator:
self.selection_indicator.update(dt)
self.tick_selection_rotation()
def handle_collision(self, collider):
return self.solid
class ActiveUnit(BasicUnit):
def __init__(self, *args, **kwargs):
super(ActiveUnit, self).__init__(*args, **kwargs)
self.current_destination = (0, 0)
self.dx, self.dy = 0, 0
self.old_x, self.old_y = 0, 0
def move(self, dx, dy):
self.dx, self.dy = dx, dy
self.old_x, self.old_y = self.x, self.y
def rotate(self, dx, dy):
position = self.old_x, self.old_y
mark = self.x + dx, self.y + dy
# heading = get_angle_in_radians(position, mark)
# self.rotation = heading
def arrive(self):
self.current_destination = (0, 0)
self.brain.set_state("idleing")
self.stop()
self.leash_point = self.get_location()
def stop(self):
self.dx, self.dy = 0, 0
def receive_command(self, target, command=None, origin=(0, 0)):
if command == "MOVE":
x = target[0] + self.x - origin[0]
y = target[1] + self.y - origin[1]
self.current_destination = (x, y)
self.brain.set_state("movecommand")
elif command == "STOP":
self.current_destination = self.x, self.y
self.stop()
self.brain.set_state("idleing")
else:
self.current_destination = target
self.brain.set_state("movecommand")
def update(self, dt):
super(ActiveUnit, self).update(dt)
class ThinkingUnit(ActiveUnit):
def __init__(self, *args, **kwargs):
super(ThinkingUnit, self).__init__(*args, **kwargs)
self.brain = StateMachine()
self.leash_point = (0, 0)
self.alert_range = 200
self.target = None
self.wait_count = 0
idleing_state = states.UnitStateIdleing(self)
chasing_state = states.UnitStateChasing(self)
waiting_state = states.UnitStateWaiting(self)
command_state = states.UnitStateMoveCommand(self)
self.brain.add_state(idleing_state)
self.brain.add_state(chasing_state)
self.brain.add_state(waiting_state)
self.brain.add_state(command_state)
self.brain.set_state("idleing")
def update(self, dt):
super(ThinkingUnit, self).update(dt)
self.brain.think()
|
[
"wrschuller@gmail.com"
] |
wrschuller@gmail.com
|
1e5c3dec3126452c25e701e2cef0ece2a6572176
|
7556fc49cef701861ce456c962181c8a4d8522ce
|
/employee/models.py
|
e51481d0fb408a74056a647258631f3c29935d3c
|
[] |
no_license
|
km-pythoner/job_market_cms
|
7fa708e6bc0f14ac0936e863c971e2e62c0f6ed0
|
2e18f8822f6938098bcff7317dd9350d4d837540
|
refs/heads/master
| 2021-09-09T19:50:54.551274
| 2018-03-19T10:05:52
| 2018-03-19T10:05:52
| 125,135,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
from datetime import datetime
from django.db import models
from users.models import UserProfile
from employer.models import JobInfo
class EmployeeInfo(models.Model):
pass
|
[
"jj19901030"
] |
jj19901030
|
caff9c7cb685bc07ae6b58176aa41c8d83544348
|
9f0a4262c4402201df1cdd5674a679543f4a50b5
|
/shaderLibrary_maya2017/resources/__init__.py
|
05e522a865f16bd93dd2591fa2f1e5a4d20967ec
|
[] |
no_license
|
subing85/subins-toolkits
|
611b6b3b3012ccb023096f6e21d18d2bda5a534b
|
d02af1289ec3ee5bce6fa3d78c134a8847113aa6
|
refs/heads/master
| 2022-07-12T17:19:57.411454
| 2022-07-01T20:37:16
| 2022-07-01T20:37:16
| 168,826,548
| 11
| 2
| null | 2022-07-02T01:03:34
| 2019-02-02T11:51:25
|
Mathematica
|
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
import os
from shaderLibrary_maya2017.utils import platforms
CURRENT_PATH = os.path.dirname(__file__)
MODULE = platforms.get_tool_kit()[0]
def getInputPath(module=None):
return os.path.join(
CURRENT_PATH, "inputs", "{}.json".format(module)
)
def getIconPath():
return os.path.join(CURRENT_PATH, "icons")
def getPreferencePath():
return os.path.join(getWorkspacePath(), "preference")
def getWorkspacePath():
return os.path.join(os.getenv("HOME"), "Documents", MODULE)
def getPublishDirectory():
return os.path.join(
os.environ["HOME"], "Walk_cycle", "characters"
)
def getResourceTypes():
data = {
"preference": getPreferencePath(),
"shader": getWorkspacePath(),
"generic": None,
}
return data
def getToolKitLink():
return "https://www.subins-toolkits.com"
def getToolKitHelpLink():
return "https://vimeo.com/314966208"
def getDownloadLink():
return "https://www.subins-toolkits.com/shader-library"
# end ####################################################################
|
[
"subing85@gmail.com"
] |
subing85@gmail.com
|
4f17a87004d2e33cbb26f6d49b7cb84a0b7ffef9
|
70532360ddfdd8006bf7044c117403ce837cef0a
|
/code/Rplot.py
|
cd1f9b2b402c74ca5ecf9502d4eba1665cd10a9b
|
[] |
no_license
|
wsgan001/campus_wifi_analysis
|
09a7944f5019f726682925c8785cdf5f7d8c469a
|
c470135691ff8faad3cb4755301e4f59389e2c5a
|
refs/heads/master
| 2020-03-10T11:09:05.579870
| 2017-03-03T07:13:57
| 2017-03-03T07:13:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,312
|
py
|
# -*- coding: utf-8 -*-
import fileinput
user = {}
for line in fileinput.input("../data/select/select_a"):
mac = line.strip().split(" ")[0]
user[mac] = True
fileinput.close()
with open("../data/plot/R_trace_all","w") as f:
f.write("mac time dura\n")
for line in fileinput.input("../data/feature/trace_all_statistic_filter"):
part = line.strip().split(" ")
mac, objs = part[0], part[3:]
if user.has_key(mac):
for one in objs:
tag, rto = one.split("@")[0], str(int(one.split("@")[1].split(",")[0])/42)
if tag in ["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23"]:
f.write(mac+" "+tag+" "+rto+"\n")
fileinput.close()
with open("../data/plot/R_trace_online","w") as f:
f.write("mac time dura\n")
for line in fileinput.input("../data/feature/trace_online_statistic_filter"):
part = line.strip().split(" ")
mac, objs = part[0], part[3:]
if user.has_key(mac):
for one in objs:
tag, rto = one.split("@")[0], str(int(one.split("@")[1].split(",")[0])/42)
if tag in ["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23"]:
f.write(mac+" "+tag+" "+rto+"\n")
fileinput.close()
jac = {}
for line in fileinput.input("../data/jaccount/jaccount_taged"):
part = line.strip().split(" ")
dev, mac, sex, sta, col, age = part[0], part[1], part[2], part[3], part[4], int(part[5])
if dev == "mobile":
jac[mac] = {'sex':sex, 'sta':sta, 'col':col, 'age':age}
if sex == "男性":
jac[mac]['sex'] = "Male"
elif sex == "女性":
jac[mac]['sex'] = "Female"
if age <= 20:
jac[mac]['age'] = "<=20"
elif age > 20 and age <=22 :
jac[mac]['age'] = "21~22"
elif age > 22:
jac[mac]['age'] = ">=23"
if col == "电子信息与电气工程学院":
jac[mac]['col'] = "TOP1"
elif col == "机械与动力工程学院":
jac[mac]['col'] = "TOP2"
elif col == "材料科学与工程学院":
jac[mac]['col'] = "TOP3"
elif col == "船舶海洋与建筑工程学院":
jac[mac]['col'] = "TOP4"
elif col == "安泰经济与管理学院":
jac[mac]['col'] = "TOP5"
fileinput.close()
with open("../data/plot/R_trace_all_cor","w") as f:
f.write("mac Acad Adm Ath Cant Hosp Lib Soc Supp Teach Other sex age\n")
for line in fileinput.input("../data/feature/trace_all_statistic_filter"):
part = line.strip().split(" ")
mac, objs, user = part[0], part[3:], {"Acad":"0","Adm":"0","Ath":"0","Cant":"0","Hosp":"0","Lib":"0","Soc":"0","Supp":"0","Teach":"0","Other":"0"}
for one in objs:
tag, rto = one.split("@")[0], one.split("@")[1].split(",")[0]
if tag in ["Acad","Adm","Ath","Cant","Hosp","Lib","Soc","Supp","Teach","Other"]:
user[tag] = rto
f.write(mac+' '+user['Acad']+' '+user['Adm']+' '+user['Ath']+' '+user['Cant']+' '+user['Hosp']+' '+user['Lib']+' '+user['Soc']+' '+user['Supp']+' '+user['Teach']+' '+user['Other']+' '+jac[mac]['sex']+' '+jac[mac]['age']+'\n')
fileinput.close()
with open("../data/plot/R_trace_online_cor","w") as f:
f.write("mac Acad Adm Ath Cant Hosp Lib Soc Supp Teach Other sex age\n")
for line in fileinput.input("../data/feature/trace_online_statistic_filter"):
part = line.strip().split(" ")
mac, objs, user = part[0], part[3:], {"Acad":"0","Adm":"0","Ath":"0","Cant":"0","Hosp":"0","Lib":"0","Soc":"0","Supp":"0","Teach":"0","Other":"0"}
for one in objs:
tag, rto = one.split("@")[0], one.split("@")[1].split(",")[0]
if tag in ["Acad","Adm","Ath","Cant","Hosp","Lib","Soc","Supp","Teach","Other"]:
user[tag] = rto
f.write(mac+' '+user['Acad']+' '+user['Adm']+' '+user['Ath']+' '+user['Cant']+' '+user['Hosp']+' '+user['Lib']+' '+user['Soc']+' '+user['Supp']+' '+user['Teach']+' '+user['Other']+' '+jac[mac]['sex']+' '+jac[mac]['age']+'\n')
fileinput.close()
# 1:renren, 2:baidu, 3:sina, 4:taobao, 5:qq
mapping = {'1':'1','2':'1','3':'1','27':'1','46':'1','64':'1','69':'1',\
'5':'2','6':'2','21':'2','22':'2','26':'2','60':'2','63':'2','70':'2','77':'2','80':'2','93':'2','98':'2',\
'11':'3','15':'3','16':'3','17':'3','23':'3','24':'3','28':'3','29':'3','51':'3','82':'3','84':'3',\
'19':'4','23':'4','36':'4','39':'4','42':'4','56':'4','57':'4','58':'4','59':'4',\
'20':'5','31':'5','41':'5','45':'5','48':'5','86':'5',\
}
with open("../data/plot/R_trace_http_cor","w") as f:
f.write("mac renren baidu sina taobao qq sex age\n")
for line in fileinput.input("../data/feature/trace_http_statistic_filter"):
part = line.strip().split(" ")
mac, objs, user = part[0], part[3:], {"renren":0,"baidu":0,"sina":0,"taobao":0,"qq":0}
for one in objs:
tag, rto = one.split("@")[0], int(one.split("@")[1].split(",")[1])
if len(tag.split("+")) == 2 and tag.split("+")[0] == "WD" and ":" in tag:
tag = tag.split("+")[1]
hst, typ = tag.split(":")[0], tag.split(":")[1]
if mapping.has_key(hst):
top = mapping[hst]
if top == "1":
user['renren'] += rto
elif top == "2":
user['baidu'] += rto
elif top == "3":
user['sina'] += rto
elif top == "4":
user['taobao'] += rto
elif top == "5":
user['qq'] += rto
f.write(mac+' '+str(user['renren'])+' '+str(user['baidu'])+' '+str(user['sina'])+' '+str(user['taobao'])+' '+str(user['qq'])+' '+jac[mac]['sex']+' '+jac[mac]['age']+'\n')
fileinput.close()
|
[
"mqiang@splunk.com"
] |
mqiang@splunk.com
|
3f8ff7bf52aee9a81f937005bb281f95f35481df
|
4b5d7d9131cd342d0d54130d217cb10eff7c1bff
|
/lab4/algorithmTests.py
|
904f07e9267a8788aa66254840d2d128f0696911
|
[] |
no_license
|
sklaboi/ochrona-danych-laboratorium
|
48f8b02d2ab73d764e869c4a3a001088d34134e2
|
7701cc1e29afb2b7b3d8fb0a25a95b7d00d4d61d
|
refs/heads/master
| 2021-05-26T18:10:36.085276
| 2012-04-02T10:22:49
| 2012-04-02T10:22:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
#!/usr/bin/python
import sys
import random
import math
import hashlib
from Crypto.Cipher import DES,AES
import time
des = DES.new("key12345")
des = DES.new("key12345",DES.MODE_CBC)
#encrypted = des.encrypt("secret12")
#print encrypted
aes = AES.new("1234567890123456",AES.MODE_CFB)
encrypted = aes.encrypt("test")
#print encrypted
haslo = sys.argv[1]
random.seed(time.time())
sol = ""
for s in range(8):
sol += str(random.randint(0,9))
print "sol:"
print sol
print "pass:"
password = hashlib.sha224(haslo).hexdigest()
for i in range(1000):
password = hashlib.sha224(password+str(sol)).hexdigest()
print password
|
[
"gwiazdal@volt.iem.pw.edu.pl"
] |
gwiazdal@volt.iem.pw.edu.pl
|
43c10cdae7648e4ba849bdb25a0d0584082480de
|
a1678f80efe56423d08bea6a2843633b8a81dd34
|
/DSALGO_String/firstNonRpeatingCharacterInStream.py
|
d49f0701ab89e0eff461dd81e21982da2b3f07ca
|
[] |
no_license
|
NIDHISH99444/CodingNinjas
|
af60aa93dbfcf050e727949d41201f72973b0608
|
b77b652cf0bf9b098ef9da4eff5eaecb7bfeaea5
|
refs/heads/master
| 2021-05-17T03:50:45.376843
| 2020-05-03T17:25:01
| 2020-05-03T17:25:01
| 250,608,228
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
from _collections import deque
def firstNonRepeating(string):
dict=[0]*26
q=deque()
for i in range(len(string)):
dict[ord(string[i])-ord('a')]+=1
q.append(string[i])
while len(q)!=0:
if dict[ord(q[0])-ord('a')]>1:
q.popleft()
else:
print(q[0],end=" ")
break
if len(q)==0:
print("-1",end=" ")
print()
firstNonRepeating("aabc")
firstNonRepeating("aac")
|
[
"nidhish99444@gmail.com"
] |
nidhish99444@gmail.com
|
2ec70de8b0fa6c526ab26722c4d947d9f7a07da4
|
241c347e5842c19bb298b8422a4bc68e64350d66
|
/machine_learner.py
|
7fa09bca40979c59a871e5e4fa1155713a8286a7
|
[] |
no_license
|
ThePianoDentist/dota_talent_stats
|
92956c43356ea8e8d752c15f1294978eff026545
|
e2c3d1cec51d1e8b426c804f0331ee1221e3208b
|
refs/heads/master
| 2021-01-23T03:43:12.700928
| 2017-09-29T12:49:54
| 2017-09-29T12:49:54
| 86,113,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,309
|
py
|
import random
from keras.models import Sequential
from keras.layers import Dense
import numpy
import itertools
seed = 7 # random seed fixed so can reproduce things
numpy.random.seed(seed)
# TODO abstract model stuff away so can literally just give our hero id, and team and enemy ids.
# TODO i.e dont hardcode these numpy.zeros(230) everywhere
class Model:
def __init__(self, inputs, outputs, model, alpha, test_inputs, test_outputs):
self.model = model
self.inputs = inputs
self.outputs = outputs
self.ignoreHeroes = False
self.alpha = alpha # for http://stats.stackexchange.com/a/136542
self.test_inputs = test_inputs
self.test_outputs = test_outputs
def _net_predict(self, input_):
if self.ignoreHeroes:
input_ = input_[-4:]
return self.model.predict(numpy.array([input_]))
@property
def neuron_upper_limit(self):
# TODO assumes only 1 output field
upper_limit = len(self.inputs) / (self.alpha * (len(self.inputs[0]) + 1))
return upper_limit
def evaluate(self):
scores = self.model.evaluate(self.inputs, self.outputs)
# print("Evaluation: \n")
# print(scores)
# print("%s: %.2f%%" % (self.model.metrics_names[1], scores[1] * 100))
def predict(self, our_hero, friendly_heroes, enemy_heroes):
inputs = numpy.empty(230)
inputs.fill(-1.0)
for h in friendly_heroes:
inputs[DiscreteHeroModel.hero_id_to_index(h, our_hero.id, True)] = 1.0
for h in enemy_heroes:
inputs[DiscreteHeroModel.hero_id_to_index(h, our_hero.id, False)] = 1.0
skill_trees = [list(i) for i in itertools.product([-1.0, 1.0], repeat=4)]
for sk_tree in skill_trees:
temp_inputs = inputs
temp_inputs[-4:] = sk_tree
prediction = self._net_predict(temp_inputs)
rounded = [round(x[0], 4) for x in prediction]
print("\nSkill tree:")
print(temp_inputs[-4:])
print("\nPrediction: ")
print(rounded)
def test(self):
# TODO whats the best way to measure accuracy?
# do i need to be checking std_devs of inaccuracies as well?
inaccuracy = 0.0
actual_out_sum = predicted_out_sum = 0.0
for i, input_ in enumerate(self.test_inputs):
predicted_out = self._net_predict(input_)[0]
actual_out = self.test_outputs[i]
inaccuracy += abs(actual_out - predicted_out)
predicted_out_sum += predicted_out
actual_out_sum += actual_out
#inaccuracy /= len(self.test_outputs)
inaccuracy = abs(actual_out_sum - predicted_out_sum) / len(self.test_inputs)
print("Actual winrate: ", actual_out_sum/ len(self.test_inputs))
print("Predicted winrate: ", predicted_out_sum / len(self.test_inputs))
return inaccuracy
class SimpleModel(Model):
pass
class RandomForestDeicisonTreeModel(Model):
"does the 100 or so branches for each choice make this kind of hard? / poor performance?"
"could do same thing and turn it into binary choices to choose a hero or not"
"but just trading width for height"
pass
class DiscreteHeroModel(Model):
def __init__(self, inputs, outputs, alpha=2, test_inputs=None, test_outputs=None, ignore_heroes=False):
"""
:param inputs: the discrete representations of possible heros
- plus the 4 talent choices
- 0.5 represents never chose that talent
:param outputs: 1 for win. 0 for loss :)
"""
self.ignoreHeroes = ignore_heroes
# TODO tidy how inheritance occurring. how consturctors behave. this is messy
if self.ignoreHeroes:
self.inputs = numpy.array([inp[-4:] for inp in inputs])
self.test_inputs = numpy.array([inp[-4:] for inp in test_inputs])
dimension = 4
else:
self.inputs = numpy.array(inputs)
self.test_inputs = numpy.array(test_inputs)
dimension = 230
self.outputs = numpy.array(outputs)
self.test_outputs = numpy.array(test_outputs)
self.model = Sequential()
# TODO 80, 40, 72000. whats a number ¯\_(ツ)_/¯
self.model.add(Dense(115, input_dim=dimension, init='uniform', activation='relu'))
#self.model.add(Dense(260, input_dim=230, init='uniform', activation='relu'))
# self.model.add(Dense(133, init='uniform', activation='relu'))
# self.model.add(Dense(8, init='uniform', activation='relu'))
self.model.add(Dense(1, init='uniform', activation='sigmoid'))
# print(len(self.inputs))
# print(len(self.outputs))
self.model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
self.model.fit(self.inputs, self.outputs, epochs=150, batch_size=100)#, verbose=0)
super().__init__(self.inputs, self.outputs, self.model, alpha, self.test_inputs, self.test_outputs)
@staticmethod
def hero_id_to_index(hero_id, our_hero_id, friendly):
start = 0 if friendly else 113
if hero_id < our_hero_id:
return start + hero_id - 1 # hero_ids start at 1, not 0
else:
return start + hero_id - 2 # we 'jump over' our_hero in the array
class DecomposedHeroModel(Model):
pass
class Net:
def __init__(self, inputs, outputs):
self.inputs = inputs
self.outputs = outputs
# inputs
# 4 friendly team-mates
# our hero
# 5 enemies
#
# ouput w/l
# hmmmmmmmmmmmmmm
# so the input arent numerical values where differences have meaning...they're just ids
# this isnt really a machine learning problem?
# this is more, we have different estimates with different errors
# how to combine to make most accurate guess :/
# as in we may have a game with these exact heroes and won it. but that 100% is less reliable
# than 1000s of games with a few hero matches with maybe 60% winrate
# so standard error = standard deviation / sqrt(sample size)
model = Sequential()
# random note: rectifier funcs over sigmoids > performance (dont do for output layer)
|
[
"jbknight07@gmail.com"
] |
jbknight07@gmail.com
|
10a39221f5994440bcf13c5a105678bdd1ad321e
|
08f60e7f496e76a4c6d5d8f6b671eb65fe7f4c7e
|
/env/Scripts/rst2man.py
|
cf0ea6a096d96e11d05be44d0d3c7949c0e96b1a
|
[] |
permissive
|
Cell5/nfckey
|
dca892a0d647a3594fbb9af00615e388a8b54758
|
15a052e4877ad8eb4d71de3c92b2285e3e7d9d57
|
refs/heads/master
| 2022-11-27T03:45:29.944031
| 2018-11-16T09:38:01
| 2018-11-16T09:38:01
| 156,221,618
| 0
| 1
|
BSD-3-Clause
| 2022-11-19T01:38:13
| 2018-11-05T13:23:52
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!c:\xampp\htdocs\nfckey\env\scripts\python.exe
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
|
[
"exride@gmail.com"
] |
exride@gmail.com
|
64ebd8dc8dee1409f7462da7e97b36589440ca93
|
897d82d4953ed7b609746a0f252f3f3440b650cb
|
/evening/20200615/demo3.py
|
fb8a2467fdd7cd54f0e4530ae9c506eeaa9352c6
|
[] |
no_license
|
haiou90/aid_python_core
|
dd704e528a326028290a2c18f215b1fd399981bc
|
bd4c7a20950cf7e22e8e05bbc42cb3b3fdbe82a1
|
refs/heads/master
| 2022-11-26T19:13:36.721238
| 2020-08-07T15:05:17
| 2020-08-07T15:05:17
| 285,857,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
class GParent:
pass
class Parent(GParent):
def __init__(self,atk,hp):
self.atk = atk
self.hp = hp
def attack(self,target):
pass
def damage(self,value):
pass
#玩家攻击敌人 敌人受伤,还可能死亡
class Player(Parent,GParent):
def attack(self,target):
print('黑虎掏心')
target.damage(self.atk)
def damage(self,value):
print('小样你敢打我!')
self.hp -= value
if self.hp <= 0:
print('太菜了')
class Enemy(Parent):
def attack(self,target):
print('普通攻击第一式')
target.damage(self.atk)
def damage(self,value):
print('玩家打人啦')
self.hp -= value
if self.hp <= 0:
print('a~~~~')
print('爆装备')
p1 = Player(50,100)
e1 = Enemy(10,100)
p1.attack(e1)
e1.attack(p1)
e1.attack(p1)
e1.attack(p1)
e1.attack(p1)
p1.attack(e1)
|
[
"caoho@outlook.com"
] |
caoho@outlook.com
|
39b26a09d6fbe8fddb9e0b8211cadb3d9dd28529
|
f418f6f3a4f1e6574103b4426150c6a26e233bfe
|
/criteo/src/xgboost.py
|
c52aa05e8e393e239ef1a069b3f22698c0755499
|
[] |
no_license
|
fengqi0423/hahaha
|
495b8e6916cb553ce8dbeb02673b5c41489b93ab
|
4bdd96a81eb1165bc0eb05ab41b0f1ac3c9cde8a
|
refs/heads/master
| 2021-01-10T19:23:47.828477
| 2014-09-23T03:30:44
| 2014-09-23T03:30:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,098
|
py
|
# Author: Tianqi Chen, Bing Xu
# module for xgboost
import ctypes
import os
# optinally have scipy sparse, though not necessary
import numpy
import numpy.ctypeslib
import scipy.sparse as scp
# set this line correctly
XGBOOST_PATH = '/usr/local/lib/libxgboostpy.so'
# entry type of sparse matrix
class REntry(ctypes.Structure):
_fields_ = [("findex", ctypes.c_uint), ("fvalue", ctypes.c_float) ]
# load in xgboost library
xglib = ctypes.cdll.LoadLibrary(XGBOOST_PATH)
xglib.XGDMatrixCreate.restype = ctypes.c_void_p
xglib.XGDMatrixNumRow.restype = ctypes.c_ulong
xglib.XGDMatrixGetLabel.restype = ctypes.POINTER( ctypes.c_float )
xglib.XGDMatrixGetWeight.restype = ctypes.POINTER( ctypes.c_float )
xglib.XGDMatrixGetRow.restype = ctypes.POINTER( REntry )
xglib.XGBoosterCreate.restype = ctypes.c_void_p
xglib.XGBoosterPredict.restype = ctypes.POINTER( ctypes.c_float )
def ctypes2numpy( cptr, length ):
# convert a ctypes pointer array to numpy
assert isinstance( cptr, ctypes.POINTER( ctypes.c_float ) )
res = numpy.zeros( length, dtype='float32' )
assert ctypes.memmove( res.ctypes.data, cptr, length * res.strides[0] )
return res
# data matrix used in xgboost
class DMatrix:
# constructor
def __init__(self, data=None, label=None, missing=0.0, weight = None):
# force into void_p, mac need to pass things in as void_p
self.handle = ctypes.c_void_p( xglib.XGDMatrixCreate() )
if data == None:
return
if isinstance(data,str):
xglib.XGDMatrixLoad(self.handle, ctypes.c_char_p(data.encode('utf-8')), 1)
elif isinstance(data,scp.csr_matrix):
self.__init_from_csr(data)
elif isinstance(data, numpy.ndarray) and len(data.shape) == 2:
self.__init_from_npy2d(data, missing)
else:
try:
csr = scp.csr_matrix(data)
self.__init_from_csr(csr)
except:
raise Exception("can not intialize DMatrix from"+str(type(data)))
if label != None:
self.set_label(label)
if weight !=None:
self.set_weight(weight)
# convert data from csr matrix
def __init_from_csr(self,csr):
assert len(csr.indices) == len(csr.data)
xglib.XGDMatrixParseCSR( self.handle,
( ctypes.c_ulong * len(csr.indptr) )(*csr.indptr),
( ctypes.c_uint * len(csr.indices) )(*csr.indices),
( ctypes.c_float * len(csr.data) )(*csr.data),
len(csr.indptr), len(csr.data) )
# convert data from numpy matrix
def __init_from_npy2d(self,mat,missing):
data = numpy.array( mat.reshape(mat.size), dtype='float32' )
xglib.XGDMatrixParseMat( self.handle,
data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
mat.shape[0], mat.shape[1], ctypes.c_float(missing) )
# destructor
def __del__(self):
xglib.XGDMatrixFree(self.handle)
# load data from file
def load(self, fname, silent=True):
xglib.XGDMatrixLoad(self.handle, ctypes.c_char_p(fname.encode('utf-8')), int(silent))
# load data from file
def save_binary(self, fname, silent=True):
xglib.XGDMatrixSaveBinary(self.handle, ctypes.c_char_p(fname.encode('utf-8')), int(silent))
# set label of dmatrix
def set_label(self, label):
xglib.XGDMatrixSetLabel(self.handle, (ctypes.c_float*len(label))(*label), len(label) )
# set group size of dmatrix, used for rank
def set_group(self, group):
xglib.XGDMatrixSetGroup(self.handle, (ctypes.c_uint*len(group))(*group), len(group) )
# set weight of each instances
def set_weight(self, weight):
xglib.XGDMatrixSetWeight(self.handle, (ctypes.c_float*len(weight))(*weight), len(weight) )
# get label from dmatrix
def get_label(self):
length = ctypes.c_ulong()
labels = xglib.XGDMatrixGetLabel(self.handle, ctypes.byref(length))
return ctypes2numpy( labels, length.value );
# get weight from dmatrix
def get_weight(self):
length = ctypes.c_ulong()
weights = xglib.XGDMatrixGetWeight(self.handle, ctypes.byref(length))
return ctypes2numpy( weights, length.value );
# clear everything
def clear(self):
xglib.XGDMatrixClear(self.handle)
def num_row(self):
return xglib.XGDMatrixNumRow(self.handle)
# append a row to DMatrix
def add_row(self, row):
xglib.XGDMatrixAddRow(self.handle, (REntry*len(row))(*row), len(row) )
# get n-throw from DMatrix
def __getitem__(self, ridx):
length = ctypes.c_ulong()
row = xglib.XGDMatrixGetRow(self.handle, ridx, ctypes.byref(length) );
return [ (int(row[i].findex),row[i].fvalue) for i in range(length.value) ]
class Booster:
"""learner class """
def __init__(self, params={}, cache=[]):
""" constructor, param: """
for d in cache:
assert isinstance(d,DMatrix)
dmats = ( ctypes.c_void_p * len(cache) )(*[ d.handle for d in cache])
self.handle = ctypes.c_void_p( xglib.XGBoosterCreate( dmats, len(cache) ) )
self.set_param( {'seed':0} )
self.set_param( params )
def __del__(self):
xglib.XGBoosterFree(self.handle)
def set_param(self, params, pv=None):
if isinstance(params,dict):
for k, v in params.items():
xglib.XGBoosterSetParam(
self.handle, ctypes.c_char_p(k.encode('utf-8')),
ctypes.c_char_p(str(v).encode('utf-8')))
elif isinstance(params,str) and pv != None:
xglib.XGBoosterSetParam(
self.handle, ctypes.c_char_p(params.encode('utf-8')),
ctypes.c_char_p(str(pv).encode('utf-8')) )
else:
for k, v in params:
xglib.XGBoosterSetParam(
self.handle, ctypes.c_char_p(k.encode('utf-8')),
ctypes.c_char_p(str(v).encode('utf-8')) )
def update(self, dtrain):
""" update """
assert isinstance(dtrain, DMatrix)
xglib.XGBoosterUpdateOneIter( self.handle, dtrain.handle )
def boost(self, dtrain, grad, hess, bst_group = -1):
""" update """
assert len(grad) == len(hess)
assert isinstance(dtrain, DMatrix)
xglib.XGBoosterBoostOneIter( self.handle, dtrain.handle,
(ctypes.c_float*len(grad))(*grad),
(ctypes.c_float*len(hess))(*hess),
len(grad), bst_group )
def update_interact(self, dtrain, action, booster_index=None):
""" beta: update with specified action"""
assert isinstance(dtrain, DMatrix)
if booster_index != None:
self.set_param('interact:booster_index', str(booster_index))
xglib.XGBoosterUpdateInteract(
self.handle, dtrain.handle, ctypes.c_char_p(str(action)) )
def eval_set(self, evals, it = 0):
for d in evals:
assert isinstance(d[0], DMatrix)
assert isinstance(d[1], str)
dmats = ( ctypes.c_void_p * len(evals) )(*[ d[0].handle for d in evals])
evnames = ( ctypes.c_char_p * len(evals) )(
*[ctypes.c_char_p(d[1].encode('utf-8')) for d in evals])
xglib.XGBoosterEvalOneIter( self.handle, it, dmats, evnames, len(evals) )
def eval(self, mat, name = 'eval', it = 0 ):
self.eval_set( [(mat,name)], it)
def predict(self, data, bst_group = -1):
length = ctypes.c_ulong()
preds = xglib.XGBoosterPredict( self.handle, data.handle, ctypes.byref(length), bst_group)
return ctypes2numpy( preds, length.value )
def save_model(self, fname):
""" save model to file """
xglib.XGBoosterSaveModel(self.handle, ctypes.c_char_p(fname.encode('utf-8')))
def load_model(self, fname):
"""load model from file"""
xglib.XGBoosterLoadModel( self.handle, ctypes.c_char_p(fname.encode('utf-8')) )
def dump_model(self, fname, fmap=''):
"""dump model into text file"""
xglib.XGBoosterDumpModel(
self.handle, ctypes.c_char_p(fname.encode('utf-8')),
ctypes.c_char_p(fmap.encode('utf-8')))
def train(params, dtrain, num_boost_round = 10, evals = [], obj=None):
""" train a booster with given paramaters """
bst = Booster(params, [dtrain]+[ d[0] for d in evals ] )
if obj == None:
for i in range(num_boost_round):
bst.update( dtrain )
if len(evals) != 0:
bst.eval_set( evals, i )
else:
# try customized objective function
for i in range(num_boost_round):
pred = bst.predict( dtrain )
grad, hess = obj( pred, dtrain )
bst.boost( dtrain, grad, hess )
if len(evals) != 0:
bst.eval_set( evals, i )
return bst
|
[
"feng.qi@hulu.com"
] |
feng.qi@hulu.com
|
67539a56c45da689a06a5d0dbec167da20875c44
|
0f85c7bfd4f29bcd856adc316cecc097fda744dc
|
/tests/test_ensure_db_indexes.py
|
b76b5876506cd87e0fd1691da623de883de60b0f
|
[
"MIT"
] |
permissive
|
yandex/yandex-taxi-testsuite
|
260f46731c9888a9efcc3372c3d92329f2fb4d56
|
8befda8c13ef58d83b2ea7d0444e34de0f67ac7f
|
refs/heads/develop
| 2023-08-31T23:28:31.874786
| 2023-08-14T16:00:53
| 2023-08-14T16:00:53
| 244,937,107
| 150
| 41
|
MIT
| 2023-09-13T16:34:07
| 2020-03-04T15:35:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,916
|
py
|
import pymongo
import pytest
from testsuite.databases.mongo import ensure_db_indexes
@pytest.fixture(scope='session')
def mongodb_collections():
return ['sharded_collection']
@pytest.mark.parametrize(
'index_from_yaml, arg_and_kwargs',
[
({'key': 'field'}, ('field', {'background': True})),
(
{'key': 'field', 'background': False},
('field', {'background': False}),
),
(
{
'key': 'field',
'expireAfterSeconds': 2592000,
'sparse': True,
'unique': True,
'name': 'name',
},
(
'field',
{
'expireAfterSeconds': 2592000,
'sparse': True,
'unique': True,
'name': 'name',
'background': True,
},
),
),
(
{
'key': [
{'name': 'field', 'type': 'ascending'},
{'name': 'field_2', 'type': 'descending'},
{'name': 'field_3', 'type': '2d'},
{'name': 'field_4', 'type': '2dsphere'},
{'name': 'field_5', 'type': 'hashed'},
{'name': 'field_6', 'type': 'ascending'},
{'name': 'field_7', 'type': 'text'},
],
},
(
[
('field', pymongo.ASCENDING),
('field_2', pymongo.DESCENDING),
('field_3', pymongo.GEO2D),
('field_4', pymongo.GEOSPHERE),
('field_5', pymongo.HASHED),
('field_6', pymongo.ASCENDING),
('field_7', pymongo.TEXT),
],
{'background': True},
),
),
(
{
'key': 'field',
'partialFilterExpression': {
'is_added_to_balance': {'$eq': 'holded'},
},
},
(
'field',
{
'partialFilterExpression': {
'is_added_to_balance': {'$eq': 'holded'},
},
'background': True,
},
),
),
],
)
def test_arg_and_kwargs_generation(index_from_yaml, arg_and_kwargs):
# pylint: disable=protected-access
assert (
ensure_db_indexes._get_args_for_ensure_func(index_from_yaml)
== arg_and_kwargs
)
def test_sharded_collection(mongodb, pytestconfig):
if not pytestconfig.option.no_sharding:
return
mongodb.sharded_collection.insert({'_id': 'foo', '_shard_id': 0})
with pytest.raises(pymongo.errors.WriteError):
mongodb.sharded_collection.insert({'_id': 'bar'})
|
[
"vitja@yandex-team.ru"
] |
vitja@yandex-team.ru
|
864225aab249cfde9e18603e2f560f35df07377d
|
acce415d18f324fdcbd2df9d4bfae003c0b6560a
|
/user/urls.py
|
8650a041a0109d2dcf93a0c0ff42c65a91bffd75
|
[] |
no_license
|
borsden/kanban
|
c9b08d34b779975b4cf3b8cc67e0e03f7816d37a
|
be0bfd22b8af61f78c407025b1706e57e5389ba4
|
refs/heads/master
| 2016-08-11T20:25:20.803053
| 2016-02-18T05:49:16
| 2016-02-18T05:49:16
| 48,171,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
# coding=utf-8
from django.conf.urls import patterns, url
import views
urlpatterns = patterns('',
url(r'^current_user/$', views.CurrentUser.as_view()),
url(r'^update_user/$', views.UpdateUser.as_view()),
url(r'^login/$', views.LoginUser.as_view(), name='login'),
url(r'^logout/$', views.LogoutUser.as_view()),
url(r'^change_avatar/$', views.ChangeAvatar.as_view()),
url(r'^change_password/$', views.ChangePassword.as_view()),
)
|
[
"borsden@gmail.com"
] |
borsden@gmail.com
|
74b61650487cc870cd8e9dd2cda6ff92a8231e9d
|
fac2ed23a092fe8c07c30c6542f977e2244d57e3
|
/문24.py
|
bc6d66ba577e4c6c0f17f198a2fd390df6fccb99
|
[] |
no_license
|
rhkdgh815/rhkdgh815
|
d1fcf9b192ffb8eb1ccc4a2dd3d2d7997342ed8d
|
5cb6380ba17fcc1bbffced4d8f0f5aab259ad155
|
refs/heads/master
| 2023-08-01T23:58:50.459446
| 2021-09-28T05:55:50
| 2021-09-28T05:55:50
| 403,934,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
n1 = int(input())
n2 = int(input())
odd_sum = 0
even_ sum = 0
for i in range(n1+n2+1):
if i % 2 == 1 :
odd_sum += i
else:
even_sum += i
print("짝수:",even_sum,"홀수:",odd_sum)
|
[
"80893010+rhkdgh815@users.noreply.github.com"
] |
80893010+rhkdgh815@users.noreply.github.com
|
f34988ec1779777e353d26f3d66f85407eee93b7
|
91ad7dcbb7db4066e1bbcba01affa0a46eba1439
|
/Plotter.py
|
b44ae256fcf4ed3cc63627793a4930bcdab84531
|
[] |
no_license
|
dcakagi/PnPSolver
|
54e4c6f79037989e309aefe7debe670fee36ef5a
|
d77344034497cdd47e4605cfa21df7c10dbd729b
|
refs/heads/master
| 2023-07-24T07:57:28.141307
| 2021-09-03T20:54:46
| 2021-09-03T20:54:46
| 393,401,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,830
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnnotationBbox, TextArea
class ErrorPlotter:
def __init__(self, plots: list, error_window_size: int, error_units_: str, time_units_: str, secondary_axes: bool=False):
'''
Class to be used for plotting errors. Default settings will plot some provided error(s) vs. time, although a different
variable can be plotted along the x-axis by providing the data in the first argument of the update_plot() function
:param plots: List of plots to be graphed. Names of error plots provided in list will be the default main y-axes labels,
with secondary y-axes tracking the percent error
:param error_window_size: Number of prior timesteps to be used to calculate the mean error
:param error_units_: Units for measuring absolute error (m, cm, rad, etc.)
:param time_units_: Units of time to be plotted along the x-axis if plotting error vs. time
:param secondary_axes: Show secondary axis of percent error on plots
'''
self.state = None
self.num_plots = len(plots)
self.fig, self.axs = plt.subplots(self.num_plots, 1)
self.lines = []
self.times = []
self.twins = []
self.twin_lines = []
self.error_data = {}
self.perc_error_data = {}
self.window_size = error_window_size
self.error_units = error_units_
self.time_units = time_units_
self.error_window = None
self.perc_error_window = None
self.annotation_boxes = []
self.annotations = []
self.second_axis = secondary_axes
idx = 0
for ax in self.axs:
ax.set_ylabel(plots[idx] + " (" + self.error_units + ")")
if self.second_axis:
twin = ax.twinx()
self.twins.append(twin)
twin.set_ylabel(str(plots[idx] + " (Percent)"))
idx += 1
self.axs[-1].set_xlabel("Time (" + self.time_units + ")")
plt.ion()
self.init = False
def set_title(self, title):
self.axs[0].set_title(title)
def set_xlabel(self, label):
self.axs[-1].set_xlabel(label)
def set_main_ylabels(self, *labels):
idx = 0
for ax in self.axs:
ax.set_ylabel(labels[idx])
idx += 1
def set_secondary_ylabels(self, *labels):
if not self.second_axis:
return
idx = 0
for twin in self.twins:
twin.set_ylabel(labels[idx])
idx += 1
def get_average_errors(self):
error = np.mean(self.error_window, axis=1)
perc_error = None
if self.second_axis:
perc_error = np.mean(self.perc_error_window, axis=1)
return [error, perc_error]
def update_plot(self, time: float, *in_data: float):
'''
:param time: Timestep associated with updated data if plotting error vs. time, OR other independent variable (i.e. range) to plot error against
:param data: Data to be plotted. If plotting secondary axis of percent error, use form (error_i, percent_error_i, ...) for n plots
'''
plt.ion()
self.times.append(time)
if self.second_axis:
data = in_data[0:-1:2]
perc_data = in_data[1::2]
else:
data = in_data
perc_data = None
if not self.init:
self.error_window = np.array(data).reshape(-1, 1)
self.perc_error_window = None
if self.second_axis:
self.perc_error_window = np.array(perc_data).reshape(-1, 1)
ave_errors = self.get_average_errors()
for idx in range(0, len(data)):
self.error_data[idx] = [data[idx]]
line, = self.axs[idx].plot(time, data[idx], "r-")
self.lines.append(line)
ave_error = ave_errors[0][idx]
self.annotations.append([TextArea("Absolute Error (" + str(self.window_size) + " window): " + str("{:.3f} ".format(ave_error)) + self.error_units)])
ab = AnnotationBbox(self.annotations[idx][0], (0.01, 0.9), xycoords='axes fraction', alpha=1.0, pad=0.1, box_alignment=(0, 0))
self.axs[idx].add_artist(ab)
if self.second_axis:
self.axs[idx].tick_params(axis="y", colors=line.get_color())
self.axs[idx].yaxis.label.set_color(line.get_color())
self.perc_error_data[idx] = [perc_data[idx]]
twin_line, = self.twins[idx].plot(time, perc_data[idx], "b-", zorder=1)
self.twin_lines.append(twin_line)
self.twins[idx].tick_params(axis="y", colors=twin_line.get_color())
self.twins[idx].yaxis.label.set_color(twin_line.get_color())
self.axs[idx].set_zorder(self.twins[idx].get_zorder()+1)
self.axs[idx].patch.set_visible(False)
ave_perc_error = ave_errors[1][idx]
self.annotations[idx].append(TextArea("Percent Error (" + str(self.window_size) + " window): " + str("{:.3f}%".format(ave_perc_error))))
ab1 = AnnotationBbox(self.annotations[idx][1], (0.01, 0.8), xycoords='axes fraction', alpha=1.0, pad=0.1, box_alignment=(0, 0))
self.axs[idx].add_artist(ab1)
self.init = True
return
# Check if window(s) is/are at maximum size, delete oldest points if needed
if self.error_window.shape[1] == self.window_size:
self.error_window = np.delete(self.error_window, 0, 1)
if self.second_axis:
self.perc_error_window = np.delete(self.perc_error_window, 0, 1)
self.error_window = np.append(self.error_window, np.array(data).reshape(-1, 1), axis=1)
if self.second_axis:
self.perc_error_window = np.append(self.perc_error_window, np.array(perc_data).reshape(-1, 1), axis=1)
for idx in range(0, len(data)):
ave_errors = self.get_average_errors()
self.error_data[idx].append(data[idx])
self.lines[idx].set_data(self.times, self.error_data[idx])
ave_error = ave_errors[0][idx]
self.annotations[idx][0].set_text("Absolute Error (" + str(self.window_size) + " window): " + str("{:.3f} ".format(ave_error)) + self.error_units)
self.axs[idx].relim()
self.axs[idx].autoscale_view(True, True, True)
if self.second_axis:
self.perc_error_data[idx].append(perc_data[idx])
self.twin_lines[idx].set_data(self.times, self.perc_error_data[idx])
ave_perc_error = ave_errors[1][idx]
self.annotations[idx][1].set_text("Percent Error (" + str(self.window_size) + " window): " + str("{:.3f}%".format(ave_perc_error)))
self.twins[idx].relim()
#self.twins[idx].set_ylim(0, 100)
self.twins[idx].autoscale_view(True, True, True)
#plt.show()
plt.pause(0.0000001)
class PosePlotter:
def __init__(self, plots: [list], units: str, time_units: str, use_estimates: bool=True):
'''
:param plots: List of variable lists to plot on each axis. If a single variable is to be graphed it will be plotted vs time
:param units: Measurement units of plotted data (used for axis labeling)
:param time_units: Units of time to be plotted along the x-axis
'''
self.num_plots = len(plots)
self.fig, self.axs = plt.subplots(1, self.num_plots)
self.units = units
self.time_units = time_units
self.times = []
self.data_lines = []
self.est_lines = []
self.data = {}
self.est_data = {}
self.plots = plots
self.use_estimates = use_estimates
idx = 0
for ax in self.axs:
if len(plots[idx]) == 1:
ax.set_ylabel(plots[idx][0] + " (" + self.units + ")")
ax.set_xlabel("Time (" + self.time_units + ")")
elif len(plots[idx]) == 2:
ax.set_xlabel(plots[idx][0] + " (" + self.units + ")")
ax.set_ylabel(plots[idx][1] + " (" + self.units + ")")
else:
pass # Does not handle plotting three dimensions
idx += 1
plt.ion()
self.init = False
def update_plot(self, time: float, *in_data: float):
'''
:param time: Timestep associated with updated data
:param data: Data to be plotted, matching order of variables provided to class constructor, in form (data_i, est_data_i, ...)
'''
plt.ion()
self.times.append(time)
if self.use_estimates:
data = in_data[0:-1:2]
est_data = in_data[1::2]
else:
data = in_data
est_data = None
if not self.init:
for d in range(len(data)):
self.data[d] = [data[d]]
if self.use_estimates:
self.est_data[d] = [est_data[d]]
data_idx = 0
for p in range(self.num_plots):
if len(self.plots[p]) == 1:
data_line, = self.axs[p].plot(self.times, self.data[data_idx], "b-")
self.data_lines.append(data_line)
if self.use_estimates:
est_line, = self.axs[p].plot(self.times, self.est_data[data_idx], "r-")
self.est_lines.append(est_line)
self.axs[p].legend([self.data_lines[p], self.est_lines[p]], ["Actual " + self.plots[p][0], "Estimated " + self.plots[p][0]])
data_idx += 1
elif len(self.plots[p]) == 2:
data_line, = self.axs[p].plot(self.data[data_idx], self.data[data_idx + 1], "b-")
self.data_lines.append(data_line)
if self.use_estimates:
est_line, = self.axs[p].plot(self.est_data[data_idx], self.est_data[data_idx + 1], "r-")
self.est_lines.append(est_line)
self.axs[p].legend([self.data_lines[p], self.est_lines[p]], ["Actual " + self.plots[p][0] + ", " + self.plots[p][1],
"Estimated " + self.plots[p][1] + ", " + self.plots[p][1]])
data_idx += 2
else:
pass # No 3D plotting implemented
self.init = True
else:
for d in range(len(data)):
self.data[d].append(data[d])
if self.use_estimates:
self.est_data[d].append(est_data[d])
data_idx = 0
for p in range(self.num_plots):
if len(self.plots[p]) == 1:
self.data_lines[p].set_data(self.times, self.data[data_idx])
if self.use_estimates:
self.est_lines[p].set_data(self.times, self.est_data[data_idx])
data_idx += 1
elif len(self.plots[p]) == 2:
self.data_lines[p].set_data(self.data[data_idx], self.data[data_idx + 1])
if self.use_estimates:
self.est_lines[p].set_data(self.est_data[data_idx], self.est_data[data_idx + 1])
data_idx += 2
self.axs[p].relim()
self.axs[p].autoscale_view(True, True, True)
plt.pause(0.00001)
def set_xlabel(self, plot_idx, label):
self.axs[plot_idx].set_xlabel(label)
def set_ylabel(self, plot_idx, label):
self.axs[plot_idx].set_ylabel(label)
|
[
"dcakagi@gmail.com"
] |
dcakagi@gmail.com
|
4f2b19ca6ea2aa053e8a9553366d01288860bf6f
|
5ee1c8378e374dd239752bcc79b44bcbbd89559a
|
/wsgi.py
|
3368c2fb6bbe0e361458b3fcc7990de7fce240c8
|
[
"Apache-2.0"
] |
permissive
|
mahdikord/kordba
|
302bdaf03afddef04c3e9b860c096a8d0f29514a
|
20c71f636cfb4e49265c0f7984ac3373cd2e7ba4
|
refs/heads/master
| 2021-01-10T07:49:14.110378
| 2016-02-07T08:22:08
| 2016-02-07T08:22:08
| 51,240,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40,537
|
py
|
#!/usr/bin/env python
import os
def application(environ, start_response):
ctype = 'text/plain'
if environ['PATH_INFO'] == '/health':
response_body = "1"
elif environ['PATH_INFO'] == '/env':
response_body = ['%s: %s' % (key, value)
for key, value in sorted(environ.items())]
response_body = '\n'.join(response_body)
else:
ctype = 'text/html'
response_body = '''<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>Welcome to OpenShift</title>
<style>
/*!
* Bootstrap v3.0.0
*
* Copyright 2013 Twitter, Inc
* Licensed under the Apache License v2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Designed and built with all the love in the world @twitter by @mdo and @fat.
*/
.logo {
background-size: cover;
height: 58px;
width: 180px;
margin-top: 6px;
background-image: url(data:image/svg+xml;base64,<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 14.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 43363)  -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="180px"
	 height="58px" viewBox="-127.391 432.019 180 58" enable-background="new -127.391 432.019 180 58" xml:space="preserve">
<g id="Layer_1" display="none">
	<g display="inline">
		<path d="M-121.385,438.749c-0.416,0.361-1.006,0.541-1.771,0.541h-2.774v-7h2.874c0.612,0,1.099,0.155,1.462,0.464
			c0.362,0.31,0.544,0.76,0.544,1.353c0,0.359-0.084,0.651-0.253,0.874c-0.168,0.223-0.378,0.398-0.629,0.524
			c0.139,0.04,0.278,0.102,0.417,0.185s0.265,0.192,0.377,0.326c0.112,0.133,0.204,0.293,0.273,0.48s0.104,0.401,0.104,0.641
			C-120.761,437.852-120.969,438.389-121.385,438.749z M-122.312,433.514c-0.146-0.176-0.396-0.264-0.75-0.264h-1.88v1.8h1.88
			c0.173,0,0.322-0.024,0.445-0.074c0.123-0.05,0.223-0.116,0.3-0.199c0.077-0.083,0.133-0.177,0.17-0.283s0.055-0.215,0.055-0.328
			C-122.091,433.906-122.165,433.689-122.312,433.514z M-122.121,436.32c-0.214-0.207-0.52-0.31-0.92-0.31h-1.9v2.32h1.87
			c0.466,0,0.795-0.106,0.985-0.32s0.285-0.494,0.285-0.84C-121.801,436.81-121.908,436.527-122.121,436.32z"/>
		<path d="M-116.281,439.29v-0.506c-0.134,0.195-0.318,0.347-0.555,0.455s-0.492,0.162-0.765,0.162c-0.613,0-1.078-0.196-1.395-0.59
			c-0.316-0.393-0.475-0.98-0.475-1.76v-3.01h1.04v2.963c0,0.532,0.095,0.905,0.284,1.117c0.189,0.213,0.453,0.319,0.792,0.319
			c0.345,0,0.61-0.116,0.796-0.349c0.186-0.233,0.279-0.562,0.279-0.988v-3.063h1.04v5.25H-116.281z"/>
		<path d="M-112.697,433.165c-0.13,0.13-0.285,0.195-0.465,0.195c-0.187,0-0.345-0.065-0.475-0.195s-0.195-0.285-0.195-0.465
			c0-0.187,0.065-0.345,0.195-0.475s0.288-0.195,0.475-0.195c0.18,0,0.335,0.065,0.465,0.195s0.195,0.289,0.195,0.475
			C-112.501,432.88-112.567,433.035-112.697,433.165z M-113.682,439.29v-5.25h1.04v5.25H-113.682z"/>
		<path d="M-111.031,439.29v-6.75l1.04-0.54v7.29H-111.031z"/>
		<path d="M-105.921,439.16c-0.127,0.073-0.275,0.131-0.445,0.175c-0.17,0.043-0.358,0.065-0.565,0.065
			c-0.367,0-0.655-0.113-0.865-0.34s-0.315-0.577-0.315-1.05v-3.03h-0.75v-0.94h0.75v-1.5l1.01-0.54v2.04h1.3v0.94h-1.3v2.85
			c0,0.247,0.042,0.414,0.125,0.5c0.083,0.087,0.222,0.13,0.415,0.13c0.133,0,0.27-0.021,0.41-0.065s0.256-0.091,0.35-0.145
			L-105.921,439.16z"/>
		<path d="M-97.452,437.805c-0.12,0.343-0.287,0.633-0.5,0.87c-0.213,0.237-0.463,0.417-0.75,0.54
			c-0.287,0.124-0.6,0.185-0.94,0.185c-0.333,0-0.64-0.065-0.92-0.195c-0.28-0.13-0.523-0.315-0.73-0.555
			c-0.207-0.24-0.368-0.526-0.485-0.86s-0.175-0.707-0.175-1.12c0-0.426,0.06-0.81,0.18-1.15s0.285-0.628,0.495-0.865
			c0.21-0.237,0.457-0.417,0.74-0.54c0.284-0.124,0.592-0.185,0.925-0.185c0.333,0,0.643,0.065,0.93,0.195s0.535,0.312,0.745,0.545
			s0.374,0.519,0.49,0.855c0.116,0.337,0.175,0.708,0.175,1.115C-97.271,437.073-97.332,437.462-97.452,437.805z M-98.667,435.385
			c-0.237-0.317-0.565-0.475-0.985-0.475c-0.394,0-0.702,0.158-0.925,0.475c-0.223,0.316-0.335,0.735-0.335,1.255
			c0,0.58,0.12,1.021,0.36,1.325c0.24,0.304,0.557,0.455,0.95,0.455c0.193,0,0.37-0.046,0.53-0.14
			c0.16-0.094,0.296-0.219,0.41-0.375c0.113-0.157,0.2-0.342,0.26-0.555s0.09-0.44,0.09-0.68
			C-98.312,436.13-98.43,435.702-98.667,435.385z"/>
		<path d="M-92.812,439.29v-2.963c0-0.532-0.095-0.904-0.284-1.117c-0.189-0.213-0.453-0.319-0.791-0.319
			c-0.345,0-0.611,0.116-0.796,0.349c-0.186,0.233-0.279,0.562-0.279,0.988v3.063h-1.04v-5.25h1.04v0.506
			c0.133-0.195,0.318-0.347,0.555-0.455s0.492-0.162,0.765-0.162c0.613,0,1.078,0.197,1.395,0.59c0.316,0.394,0.475,0.98,0.475,1.76
			v3.01H-92.812z"/>
	</g>
</g>
<g id="Layer_6">
	<g>
		<path d="M-122.266,438.984c-0.39,0.344-0.955,0.516-1.695,0.516h-2.51v-7h2.56c0.28,0,0.535,0.035,0.765,0.105
			s0.43,0.176,0.6,0.319c0.17,0.143,0.301,0.324,0.395,0.544c0.093,0.22,0.14,0.479,0.14,0.779c0,0.386-0.093,0.693-0.28,0.923
			c-0.187,0.23-0.43,0.398-0.73,0.504c0.16,0.04,0.32,0.102,0.48,0.185c0.16,0.083,0.303,0.194,0.43,0.331
			c0.127,0.137,0.23,0.307,0.31,0.511s0.12,0.446,0.12,0.726C-121.681,438.121-121.875,438.641-122.266,438.984z M-123.071,433.504
			c-0.187-0.196-0.477-0.294-0.87-0.294h-1.75v2.17h1.69c0.433,0,0.743-0.108,0.93-0.323c0.187-0.216,0.28-0.476,0.28-0.781
			C-122.791,433.957-122.884,433.7-123.071,433.504z M-122.861,436.45c-0.267-0.24-0.63-0.36-1.09-0.36h-1.74v2.7h1.78
			c0.526,0,0.9-0.12,1.12-0.36c0.22-0.24,0.33-0.56,0.33-0.96C-122.46,437.03-122.594,436.69-122.861,436.45z"/>
		<path d="M-117.121,439.5v-0.64c-0.153,0.22-0.35,0.4-0.59,0.54s-0.527,0.21-0.86,0.21c-0.28,0-0.534-0.042-0.76-0.125
			c-0.227-0.083-0.42-0.213-0.58-0.39c-0.16-0.177-0.283-0.4-0.37-0.67c-0.087-0.27-0.13-0.595-0.13-0.975v-3.2h0.76v3.077
			c0,0.568,0.101,0.984,0.304,1.248s0.513,0.396,0.931,0.396c0.365,0,0.672-0.13,0.921-0.391s0.374-0.678,0.374-1.252v-3.077h0.76
			v5.25H-117.121z"/>
		<path d="M-113.906,433.155c-0.103,0.104-0.225,0.155-0.365,0.155c-0.153,0-0.284-0.052-0.39-0.155
			c-0.106-0.103-0.16-0.228-0.16-0.375c0-0.153,0.053-0.281,0.16-0.385s0.237-0.155,0.39-0.155c0.14,0,0.262,0.051,0.365,0.155
			c0.104,0.104,0.155,0.232,0.155,0.385C-113.751,432.927-113.803,433.052-113.906,433.155z M-114.661,439.5v-5.25h0.76v5.25
			H-114.661z"/>
		<path d="M-112.151,439.5v-6.87l0.76-0.42v7.29H-112.151z"/>
		<path d="M-108.721,434.89v3.412c0,0.232,0.039,0.396,0.115,0.489c0.077,0.093,0.215,0.14,0.415,0.14
			c0.153,0,0.285-0.012,0.395-0.035s0.225-0.062,0.345-0.115l-0.05,0.65c-0.147,0.06-0.295,0.105-0.445,0.135
			c-0.15,0.03-0.325,0.045-0.525,0.045c-0.329,0-0.579-0.088-0.751-0.264c-0.172-0.176-0.258-0.484-0.258-0.923v-3.532h-0.65v-0.64
			h0.65v-1.62l0.76-0.42v2.04h1.3v0.64H-108.721z"/>
		<path d="M-99.271,438.025c-0.12,0.344-0.284,0.633-0.49,0.87s-0.45,0.415-0.73,0.535c-0.28,0.12-0.58,0.18-0.9,0.18
			s-0.619-0.058-0.895-0.175c-0.277-0.117-0.515-0.29-0.715-0.52c-0.2-0.23-0.358-0.515-0.475-0.855s-0.175-0.733-0.175-1.18
			c0-0.446,0.06-0.84,0.18-1.18c0.12-0.34,0.283-0.625,0.49-0.855c0.207-0.23,0.45-0.405,0.73-0.525c0.28-0.12,0.58-0.18,0.9-0.18
			c0.32,0,0.618,0.057,0.895,0.17c0.276,0.113,0.515,0.283,0.715,0.51c0.2,0.227,0.358,0.509,0.475,0.845
			c0.117,0.337,0.175,0.729,0.175,1.175C-99.091,437.287-99.151,437.682-99.271,438.025z M-100.27,435.297
			c-0.279-0.345-0.648-0.518-1.106-0.518c-0.458,0-0.826,0.173-1.102,0.518c-0.276,0.345-0.414,0.866-0.414,1.562
			c0,0.697,0.138,1.223,0.414,1.578s0.643,0.533,1.102,0.533c0.458,0,0.827-0.178,1.106-0.533c0.279-0.355,0.418-0.881,0.418-1.578
			C-99.851,436.164-99.991,435.643-100.27,435.297z"/>
		<path d="M-94.421,439.5v-3.077c0-0.568-0.102-0.983-0.304-1.248c-0.202-0.264-0.513-0.396-0.931-0.396
			c-0.365,0-0.672,0.13-0.921,0.391s-0.374,0.678-0.374,1.252v3.077h-0.76v-5.25h0.76v0.64c0.153-0.22,0.35-0.4,0.59-0.54
			c0.24-0.14,0.526-0.21,0.86-0.21c0.28,0,0.533,0.042,0.76,0.125s0.42,0.213,0.58,0.39c0.16,0.177,0.283,0.4,0.37,0.67
			c0.086,0.27,0.13,0.595,0.13,0.975v3.2H-94.421z"/>
	</g>
</g>
<g id="Layer_5">
	<g>
		<path fill="#DB212F" d="M-119.063,465.698l-4.604,1.678c0.059,0.738,0.185,1.466,0.364,2.181l4.376-1.592
			C-119.068,467.224-119.12,466.462-119.063,465.698"/>
		<g>
			<g>
				<path fill="#DB212F" d="M-98.71,460.606c-0.321-0.663-0.693-1.303-1.122-1.905l-4.606,1.675
					c0.538,0.547,0.986,1.164,1.354,1.823L-98.71,460.606z"/>
			</g>
			<g>
				<path fill="#DB212F" d="M-108.841,459.301c0.959,0.449,1.787,1.057,2.488,1.773l4.604-1.677
					c-1.276-1.79-3.012-3.286-5.141-4.277c-6.583-3.071-14.434-0.213-17.505,6.369c-0.992,2.129-1.362,4.392-1.188,6.582
					l4.606-1.675c0.075-0.998,0.318-1.998,0.766-2.957C-118.218,459.164-113.116,457.309-108.841,459.301"/>
			</g>
		</g>
		<path fill="#EA2227" d="M-123.015,469.452l-4.376,1.594c0.401,1.594,1.101,3.11,2.057,4.458l4.596-1.67
			C-121.919,472.621-122.702,471.09-123.015,469.452"/>
		<path fill="#DB212F" d="M-103.93,467.715c-0.073,0.999-0.325,1.998-0.774,2.957c-1.994,4.277-7.094,6.134-11.371,4.14
			c-0.958-0.449-1.795-1.053-2.492-1.77l-4.594,1.673c1.271,1.789,3.007,3.285,5.137,4.279c6.582,3.069,14.434,0.211,17.502-6.372
			c0.994-2.129,1.362-4.391,1.185-6.578L-103.93,467.715z"/>
		<path fill="#EA2227" d="M-102.798,462.094l-4.374,1.592c0.811,1.457,1.195,3.134,1.071,4.819l4.594-1.672
			C-101.639,465.185-102.078,463.575-102.798,462.094"/>
		<path fill="#231F20" d="M-72.271,467.031c0-1.331-0.18-2.512-0.54-3.543c-0.344-1.049-0.837-1.931-1.478-2.651
			c-0.624-0.734-1.384-1.29-2.275-1.666c-0.876-0.392-1.845-0.586-2.909-0.586c-1.079,0-2.063,0.195-2.955,0.586
			c-0.892,0.39-1.659,0.955-2.299,1.689c-0.642,0.718-1.142,1.602-1.502,2.651c-0.345,1.047-0.516,2.236-0.516,3.565
			c0,1.33,0.171,2.52,0.516,3.566c0.36,1.031,0.853,1.915,1.479,2.651c0.64,0.718,1.399,1.273,2.275,1.665
			c0.892,0.376,1.875,0.563,2.956,0.563c1.062,0,2.039-0.195,2.931-0.586c0.892-0.391,1.659-0.947,2.3-1.665
			c0.642-0.736,1.134-1.626,1.478-2.675C-72.451,469.548-72.271,468.359-72.271,467.031L-72.271,467.031z M-75.649,467.076
			c0,1.675-0.353,2.956-1.055,3.848c-0.689,0.892-1.612,1.337-2.77,1.337c-1.158,0-2.095-0.453-2.815-1.36
			c-0.718-0.907-1.078-2.197-1.078-3.87c0-1.675,0.345-2.957,1.031-3.848c0.704-0.892,1.636-1.336,2.793-1.336
			s2.094,0.453,2.814,1.36C-76.009,464.114-75.649,465.403-75.649,467.076L-75.649,467.076z"/>
		<path fill="#231F20" d="M-55.075,464.051c0-0.876-0.149-1.634-0.446-2.275c-0.298-0.658-0.703-1.205-1.219-1.644
			c-0.518-0.437-1.12-0.758-1.807-0.96c-0.689-0.218-1.415-0.329-2.183-0.329h-7.179v16.422h3.285v-5.818h3.611
			c0.845,0,1.628-0.1,2.347-0.305c0.736-0.203,1.368-0.523,1.901-0.96c0.531-0.439,0.944-0.994,1.242-1.667
			C-55.224,465.826-55.075,465.005-55.075,464.051L-55.075,464.051z M-58.454,464.121c0,1.424-0.782,2.134-2.345,2.134h-3.824
			v-4.222h3.777c0.733,0,1.312,0.171,1.735,0.516C-58.672,462.877-58.454,463.401-58.454,464.121L-58.454,464.121z"/>
		<polygon fill="#231F20" points="-39.147,475.264 -39.147,472.05 -47.615,472.05 -47.615,468.086 -42.9,468.086 -42.9,464.896 
			-47.615,464.896 -47.615,462.057 -39.497,462.057 -39.497,458.842 -50.9,458.842 -50.9,475.264 		"/>
		<path fill="#231F20" d="M-21.292,475.264v-16.422h-3.238v7.812c0.016,0.344,0.023,0.695,0.023,1.055v0.986
			c0.016,0.297,0.023,0.524,0.023,0.679c-0.109-0.218-0.281-0.5-0.517-0.845c-0.219-0.358-0.43-0.695-0.633-1.008l-5.818-8.68
			h-3.144v16.422h3.236v-7.226c0-0.234-0.008-0.523-0.021-0.868v-1.032c0-0.36-0.008-0.688-0.023-0.986v-0.703
			c0.107,0.218,0.273,0.508,0.492,0.866c0.233,0.345,0.452,0.673,0.657,0.986l6.028,8.962H-21.292z"/>
		<path fill="#231F20" d="M-5.879,470.947c0-0.61-0.079-1.149-0.234-1.618c-0.157-0.47-0.424-0.899-0.798-1.291
			c-0.359-0.392-0.844-0.75-1.454-1.079c-0.61-0.328-1.37-0.657-2.275-0.986c-0.831-0.297-1.502-0.571-2.018-0.821
			c-0.502-0.25-0.892-0.5-1.173-0.75c-0.282-0.266-0.471-0.532-0.563-0.799c-0.095-0.282-0.142-0.593-0.142-0.937
			c0-0.329,0.056-0.634,0.163-0.916c0.126-0.297,0.313-0.555,0.565-0.773c0.266-0.22,0.601-0.392,1.008-0.518
			c0.407-0.14,0.892-0.21,1.454-0.21c0.829,0,1.541,0.133,2.136,0.399c0.608,0.25,1.211,0.626,1.805,1.126l1.174-1.431
			c-0.688-0.547-1.423-0.978-2.205-1.291c-0.766-0.313-1.696-0.469-2.791-0.469c-0.768,0-1.47,0.095-2.111,0.282
			c-0.626,0.187-1.166,0.468-1.618,0.844c-0.439,0.36-0.783,0.797-1.033,1.313c-0.25,0.518-0.376,1.104-0.376,1.76
			c0,0.594,0.078,1.118,0.235,1.572c0.172,0.453,0.438,0.868,0.798,1.244c0.376,0.358,0.86,0.703,1.454,1.032
			c0.61,0.313,1.36,0.626,2.252,0.938c0.75,0.266,1.376,0.532,1.877,0.797c0.502,0.25,0.899,0.508,1.196,0.773
			c0.313,0.266,0.532,0.555,0.658,0.868s0.187,0.657,0.187,1.033c0,0.876-0.32,1.563-0.961,2.063
			c-0.625,0.502-1.485,0.752-2.58,0.752c-0.845,0-1.628-0.181-2.346-0.54c-0.721-0.36-1.393-0.836-2.018-1.43l-1.221,1.36
			c0.657,0.657,1.454,1.205,2.394,1.642c0.952,0.422,1.994,0.634,3.12,0.634c0.859,0,1.625-0.118,2.299-0.352
			c0.672-0.234,1.244-0.555,1.711-0.96c0.469-0.408,0.821-0.892,1.056-1.455C-6.005,472.192-5.879,471.589-5.879,470.947
			L-5.879,470.947z"/>
		<polygon fill="#231F20" points="10.801,475.264 10.801,458.842 8.971,458.842 8.971,465.857 0.806,465.857 0.806,458.842 
			-1.024,458.842 -1.024,475.264 0.806,475.264 0.806,467.522 8.971,467.522 8.971,475.264 		"/>
		<rect x="16.289" y="458.842" fill="#231F20" width="1.832" height="16.422"/>
		<polygon fill="#231F20" points="33.25,460.507 33.25,458.842 23.609,458.842 23.609,475.264 25.438,475.264 25.438,467.617 
			29.943,467.617 29.943,465.95 25.438,465.95 25.438,460.507 		"/>
		<polygon fill="#231F20" points="48.008,460.507 48.008,458.842 36.512,458.842 36.512,460.507 41.344,460.507 41.344,475.264 
			43.176,475.264 43.176,460.507 		"/>
		<path fill="#231F20" d="M-41.526,488.261c-0.223,0.124-0.534,0.212-0.896,0.212c-0.649,0-1.049-0.399-1.049-1.234v-2.691h-0.665
			v-0.836h0.665v-1.331l0.896-0.479v1.809h1.155v0.836h-1.155v2.531c0,0.435,0.144,0.559,0.48,0.559
			c0.238,0,0.506-0.089,0.675-0.187L-41.526,488.261z M-45.843,486.387c-0.248-0.124-0.566-0.205-1.064-0.205
			c-0.587,0-0.959,0.268-0.959,0.693c0,0.462,0.294,0.773,0.896,0.773c0.49,0,0.916-0.303,1.128-0.596V486.387z M-45.843,488.375
			v-0.461c-0.318,0.319-0.773,0.558-1.279,0.558c-0.754,0-1.614-0.427-1.614-1.573c0-1.037,0.8-1.507,1.856-1.507
			c0.436,0,0.779,0.061,1.037,0.177v-0.346c0-0.506-0.311-0.792-0.878-0.792c-0.479,0-0.852,0.091-1.216,0.295l-0.354-0.693
			c0.443-0.275,0.94-0.419,1.597-0.419c1.039,0,1.749,0.508,1.749,1.565v3.195H-45.843z M-50.807,488.375v-2.787h-2.857v2.787
			h-0.932v-6.216h0.932v2.515h2.857v-2.515h0.934v6.216H-50.807z M-59.127,485.072c-0.204-0.275-0.63-0.61-1.092-0.61
			c-0.658,0-1.012,0.496-1.012,1.48c0,1.173,0.372,1.687,1.047,1.687c0.435,0,0.818-0.291,1.057-0.595V485.072L-59.127,485.072z
			 M-59.137,488.375v-0.443c-0.336,0.309-0.727,0.54-1.214,0.54c-1.006,0-1.796-0.727-1.796-2.503c0-1.599,0.872-2.354,1.841-2.354
			c0.471,0,0.913,0.25,1.169,0.533v-1.774l0.907-0.472v6.473H-59.137z M-64.979,484.442c-0.611,0-0.984,0.428-1.064,1.171h2.165
			C-63.921,484.976-64.223,484.442-64.979,484.442 M-62.981,486.37h-3.08c0.098,0.896,0.602,1.279,1.171,1.279
			c0.392,0,0.703-0.142,1.012-0.374l0.543,0.587c-0.409,0.39-0.897,0.612-1.607,0.612c-1.093,0-2.016-0.88-2.016-2.425
			c0-1.581,0.836-2.433,2.042-2.433c1.323,0,1.961,1.075,1.961,2.336C-62.956,486.122-62.971,486.271-62.981,486.37
			 M-69.695,483.039h-1.812v1.998h1.812c0.622,0,1.058-0.319,1.058-0.994C-68.637,483.396-69.063,483.039-69.695,483.039
			 M-69.063,485.836l1.27,2.541h-1.072l-1.237-2.46h-1.403v2.46h-0.913v-6.218h2.725c1.084,0,1.998,0.578,1.998,1.858
			C-67.697,485.011-68.22,485.624-69.063,485.836 M-78.013,490.019h-0.969l0.676-1.732l-1.715-4.572h1.004l0.762,2.281
			c0.146,0.409,0.356,1.102,0.411,1.36c0.079-0.278,0.274-0.94,0.418-1.343l0.789-2.298h0.969L-78.013,490.019z M-82.446,484.46
			c-0.435,0-0.814,0.293-1.057,0.594v1.963c0.204,0.276,0.632,0.614,1.095,0.614c0.654,0,1.011-0.498,1.011-1.482
			C-81.397,484.974-81.771,484.46-82.446,484.46 M-82.32,488.474c-0.473,0-0.915-0.248-1.173-0.533v0.435h-0.906v-6.001l0.906-0.472
			v2.255c0.338-0.309,0.728-0.54,1.216-0.54c1.004,0,1.796,0.729,1.796,2.504C-80.481,487.72-81.351,488.474-82.32,488.474"/>
		<path fill="#231F20" d="M-39.347,482.736c-0.029-0.023-0.069-0.035-0.124-0.035h-0.227v0.287h0.213
			c0.12,0,0.179-0.047,0.179-0.144C-39.306,482.797-39.32,482.762-39.347,482.736 M-39.247,483.004
			c-0.034,0.041-0.083,0.069-0.143,0.083l0.191,0.364h-0.134l-0.184-0.354h-0.183v0.354h-0.112V482.6h0.345
			c0.076,0,0.142,0.02,0.194,0.061c0.054,0.038,0.079,0.101,0.079,0.183C-39.192,482.909-39.209,482.962-39.247,483.004
			 M-38.92,482.768c-0.033-0.083-0.08-0.154-0.14-0.213c-0.059-0.058-0.13-0.104-0.211-0.136c-0.08-0.035-0.169-0.051-0.264-0.051
			c-0.092,0-0.179,0.016-0.262,0.051c-0.08,0.031-0.149,0.077-0.21,0.136c-0.06,0.06-0.106,0.131-0.143,0.213
			c-0.033,0.08-0.049,0.173-0.049,0.273c0,0.099,0.016,0.189,0.049,0.272c0.036,0.083,0.083,0.153,0.143,0.21
			c0.061,0.058,0.13,0.106,0.21,0.139c0.083,0.032,0.17,0.048,0.262,0.048c0.095,0,0.184-0.016,0.264-0.048
			c0.081-0.033,0.152-0.081,0.211-0.139c0.06-0.057,0.106-0.128,0.14-0.21c0.035-0.083,0.052-0.173,0.052-0.272
			C-38.869,482.941-38.885,482.848-38.92,482.768 M-38.822,483.354c-0.041,0.093-0.095,0.175-0.163,0.244
			c-0.069,0.065-0.15,0.118-0.244,0.156c-0.095,0.035-0.195,0.054-0.306,0.054c-0.108,0-0.208-0.02-0.303-0.054
			c-0.095-0.038-0.177-0.091-0.244-0.156c-0.069-0.069-0.124-0.151-0.163-0.244c-0.038-0.095-0.058-0.201-0.058-0.313
			c0-0.118,0.02-0.221,0.058-0.315c0.039-0.096,0.094-0.178,0.163-0.244c0.067-0.069,0.149-0.12,0.244-0.157
			c0.095-0.037,0.194-0.055,0.303-0.055c0.11,0,0.211,0.018,0.306,0.055c0.094,0.038,0.175,0.089,0.244,0.157
			c0.068,0.067,0.122,0.148,0.163,0.244c0.037,0.095,0.057,0.197,0.057,0.315C-38.765,483.153-38.785,483.26-38.822,483.354"/>
		<path fill="#221D1D" d="M51.717,459.262c-0.043-0.038-0.104-0.057-0.186-0.057h-0.346v0.441h0.326
			c0.182,0,0.271-0.075,0.271-0.221C51.783,459.353,51.764,459.297,51.717,459.262 M51.875,459.667
			c-0.055,0.061-0.129,0.104-0.219,0.127l0.289,0.553h-0.201l-0.279-0.541h-0.279v0.541h-0.17v-1.295h0.523
			c0.117,0,0.217,0.029,0.295,0.09c0.082,0.062,0.121,0.156,0.121,0.282C51.955,459.523,51.926,459.604,51.875,459.667
			 M52.371,459.307c-0.051-0.126-0.123-0.234-0.215-0.323c-0.088-0.091-0.197-0.162-0.322-0.211c-0.123-0.051-0.256-0.075-0.4-0.075
			c-0.141,0-0.273,0.024-0.396,0.075c-0.125,0.049-0.23,0.12-0.322,0.211c-0.092,0.088-0.162,0.197-0.213,0.323
			c-0.055,0.124-0.08,0.264-0.08,0.415c0,0.152,0.025,0.29,0.08,0.416c0.051,0.126,0.121,0.234,0.213,0.323
			c0.092,0.09,0.197,0.159,0.322,0.208c0.123,0.051,0.256,0.075,0.396,0.075c0.145,0,0.277-0.023,0.4-0.075
			c0.125-0.049,0.234-0.118,0.322-0.208c0.092-0.088,0.164-0.197,0.215-0.323s0.078-0.264,0.078-0.416
			C52.449,459.571,52.422,459.431,52.371,459.307 M52.52,460.203c-0.061,0.142-0.143,0.266-0.246,0.368
			c-0.107,0.105-0.229,0.184-0.373,0.238c-0.141,0.057-0.297,0.085-0.467,0.085c-0.166,0-0.32-0.028-0.465-0.085
			c-0.141-0.055-0.262-0.133-0.371-0.238c-0.102-0.102-0.186-0.226-0.244-0.368c-0.061-0.146-0.092-0.305-0.092-0.48
			c0-0.175,0.031-0.334,0.092-0.48c0.059-0.144,0.143-0.266,0.244-0.369c0.109-0.104,0.23-0.183,0.371-0.24
			c0.145-0.055,0.299-0.084,0.465-0.084c0.17,0,0.326,0.029,0.467,0.084c0.145,0.057,0.266,0.136,0.373,0.24
			c0.104,0.103,0.186,0.225,0.246,0.369c0.059,0.146,0.09,0.305,0.09,0.48C52.609,459.898,52.578,460.057,52.52,460.203"/>
	</g>
</g>
<g id="Layer_2">
</g>
<g id="Layer_4" display="none">
	<g display="inline">
		<path d="M-85.193,513.353c-3.295,0-5.483,2.655-5.483,7.425c0,4.771,2.288,7.492,5.588,7.492c3.295,0,5.478-2.654,5.478-7.426
			C-79.61,516.075-81.899,513.353-85.193,513.353 M-85.16,532.938c-6.154,0-10.359-4.5-10.359-12.094
			c0-7.587,4.272-12.16,10.432-12.16c6.116,0,10.324,4.501,10.324,12.093S-79.039,532.938-85.16,532.938"/>
		<path d="M-60.14,513.621h-5.415v6.049h5.485c2.184,0,3.362-1.009,3.362-3.061C-56.709,514.561-58.056,513.621-60.14,513.621
			 M-60.374,524.241h-5.182v8.328h-4.708v-23.516h10.291c4.439,0,8.107,2.454,8.107,7.459
			C-51.867,521.958-55.498,524.241-60.374,524.241"/>
		<polygon points="-46.994,532.567 -46.994,509.053 -30.65,509.053 -30.65,513.657 -42.289,513.657 -42.289,517.721 
			-35.529,517.721 -35.529,522.288 -42.289,522.288 -42.289,527.963 -30.145,527.963 -30.145,532.567 		"/>
		<path d="M-9.871,532.567l-8.647-12.83c-0.573-0.871-1.343-2.049-1.646-2.653c0,0.873,0.064,3.829,0.064,5.142v10.341h-4.637
			v-23.514h4.502l8.343,12.432c0.573,0.871,1.345,2.051,1.647,2.653c0-0.879-0.065-3.829-0.065-5.14v-9.947h4.638v23.514h-4.199
			V532.567z"/>
		<path d="M8.021,532.938c-3.193,0-6.053-1.381-7.9-3.258l1.746-1.949c1.783,1.713,3.836,2.823,6.258,2.823
			c3.129,0,5.08-1.544,5.08-4.031c0-2.187-1.312-3.426-5.617-4.971c-5.077-1.815-6.798-3.461-6.798-6.854
			c0-3.767,2.96-6.014,7.367-6.014c3.166,0,5.184,0.938,7.168,2.522l-1.682,2.049c-1.715-1.413-3.299-2.187-5.654-2.187
			c-3.226,0-4.574,1.612-4.574,3.46c0,1.953,0.878,3.057,5.585,4.738c5.215,1.881,6.829,3.629,6.829,7.121
			C15.828,530.085,12.934,532.938,8.021,532.938"/>
		<polygon points="35.999,532.567 35.999,521.485 24.295,521.485 24.295,532.567 21.672,532.567 21.672,509.053 24.295,509.053 
			24.295,519.098 35.999,519.098 35.999,509.053 38.623,509.053 38.623,532.567 		"/>
		<rect x="45.371" y="509.055" width="2.623" height="23.514"/>
		<polygon points="57.375,511.438 57.375,519.233 63.83,519.233 63.83,521.62 57.375,521.62 57.375,532.567 54.75,532.567 
			54.75,509.053 68.576,509.053 68.576,511.438 		"/>
		<polygon points="82.834,511.438 82.834,532.567 80.211,532.567 80.211,511.438 73.285,511.438 73.285,509.053 89.764,509.053 
			89.764,511.438 		"/>
		<path fill="#BC1C29" d="M-142.341,518.498l-7.872,2.861c0.103,1.26,0.318,2.504,0.623,3.725l7.473-2.723
			C-142.357,521.103-142.442,519.803-142.341,518.498"/>
		<path fill="#BC1C29" d="M-107.571,509.81c-0.548-1.129-1.181-2.224-1.919-3.256l-7.868,2.861c0.916,0.938,1.685,1.987,2.312,3.113
			L-107.571,509.81z"/>
		<path fill="#E22434" d="M-124.882,507.586c1.636,0.763,3.057,1.801,4.25,3.023l7.869-2.864c-2.182-3.052-5.148-5.604-8.782-7.297
			c-11.246-5.24-24.667-0.364-29.905,10.87c-1.701,3.631-2.332,7.494-2.038,11.231l7.871-2.86c0.128-1.7,0.547-3.407,1.311-5.044
			C-140.903,507.35-132.184,504.181-124.882,507.586"/>
		<path fill="#E22434" d="M-149.099,524.909l-7.475,2.717c0.688,2.719,1.88,5.309,3.516,7.607l7.853-2.851
			C-147.221,530.311-148.564,527.7-149.099,524.909"/>
		<path fill="#E22434" d="M-116.491,521.944c-0.126,1.698-0.551,3.408-1.319,5.045c-3.406,7.299-12.123,10.467-19.431,7.062
			c-1.636-0.766-3.067-1.799-4.258-3.02l-7.849,2.854c2.175,3.053,5.141,5.604,8.776,7.302c11.246,5.237,24.664,0.36,29.91-10.873
			c1.696-3.632,2.322-7.492,2.024-11.228L-116.491,521.944z"/>
		<path fill="#E22434" d="M-114.555,512.346l-7.475,2.724c1.39,2.481,2.043,5.344,1.833,8.221l7.85-2.854
			C-112.574,517.622-113.325,514.876-114.555,512.346"/>
		<path fill="#97101B" d="M-142.373,520.078c-0.019-0.524-0.012-1.051,0.032-1.58l-7.872,2.861c0.038,0.504,0.103,1.002,0.178,1.5
			L-142.373,520.078z"/>
		<path fill="#97101B" d="M-108.707,507.741c-0.25-0.4-0.507-0.8-0.781-1.187l-7.866,2.861c0.345,0.354,0.666,0.732,0.969,1.114
			L-108.707,507.741z"/>
		<path fill="#BC1C29" d="M-149.347,533.886c0.604,0.849,1.274,1.663,2,2.426l8.545-3.112c-1-0.627-1.902-1.353-2.699-2.166
			L-149.347,533.886z M-108.637,519.089l-7.854,2.856c-0.083,1.129-0.303,2.26-0.664,3.371l8.542-3.113
			C-108.547,521.159-108.559,520.119-108.637,519.089"/>
		<path d="M96.124,511.01c-0.082,0.198-0.194,0.368-0.339,0.511c-0.147,0.139-0.316,0.25-0.512,0.328
			c-0.197,0.078-0.41,0.115-0.646,0.115c-0.227,0-0.439-0.038-0.637-0.115c-0.196-0.079-0.366-0.188-0.516-0.328
			c-0.141-0.143-0.256-0.313-0.334-0.511c-0.087-0.197-0.128-0.417-0.128-0.659c0-0.241,0.041-0.461,0.128-0.657
			c0.078-0.2,0.193-0.37,0.334-0.511c0.148-0.144,0.318-0.25,0.516-0.329c0.197-0.077,0.412-0.116,0.637-0.116
			c0.236,0,0.449,0.039,0.646,0.116c0.194,0.079,0.363,0.186,0.512,0.329c0.145,0.141,0.257,0.311,0.339,0.511
			c0.081,0.196,0.122,0.417,0.122,0.657C96.246,510.593,96.205,510.813,96.124,511.01 M95.92,509.78
			c-0.073-0.175-0.17-0.323-0.296-0.444c-0.122-0.126-0.271-0.222-0.442-0.292c-0.169-0.067-0.354-0.104-0.554-0.104
			c-0.192,0-0.375,0.037-0.548,0.104c-0.168,0.07-0.315,0.166-0.438,0.292c-0.127,0.121-0.228,0.269-0.298,0.444
			c-0.072,0.173-0.109,0.361-0.109,0.571c0,0.207,0.037,0.4,0.109,0.573c0.07,0.173,0.171,0.321,0.298,0.445
			c0.124,0.123,0.272,0.217,0.438,0.286c0.174,0.072,0.354,0.104,0.548,0.104c0.198,0,0.385-0.033,0.554-0.104
			c0.172-0.069,0.321-0.164,0.442-0.286c0.126-0.124,0.224-0.272,0.296-0.445c0.074-0.173,0.107-0.364,0.107-0.573
			C96.029,510.141,95.994,509.95,95.92,509.78 M95.234,510.275c-0.072,0.086-0.172,0.143-0.297,0.174l0.399,0.763h-0.278
			l-0.384-0.746h-0.386v0.746h-0.235v-1.783h0.724c0.164,0,0.297,0.043,0.406,0.125c0.112,0.085,0.168,0.214,0.168,0.388
			C95.348,510.076,95.309,510.188,95.234,510.275 M95.02,509.717c-0.058-0.051-0.145-0.077-0.258-0.077h-0.477v0.604h0.447
			c0.252,0,0.377-0.101,0.377-0.301C95.111,509.842,95.078,509.764,95.02,509.717"/>
	</g>
</g>
<g id="Layer_3" display="none">
	
		<image display="inline" overflow="visible" width="217" height="96" xlink:href="../Desktop/Screen Shot 2013-11-19 at 4.51.37 PM.png"  transform="matrix(1 0 0 1 -145.2275 405.29)">
	</image>
</g>
</svg>
);
}
.logo a {
display: block;
width: 100%;
height: 100%;
}
*, *:before, *:after {
-moz-box-sizing: border-box;
box-sizing: border-box;
}
aside,
footer,
header,
hgroup,
section{
display: block;
}
body {
color: #404040;
font-family: "Helvetica Neue",Helvetica,"Liberation Sans",Arial,sans-serif;
font-size: 14px;
line-height: 1.4;
}
html {
font-family: sans-serif;
-ms-text-size-adjust: 100%;
-webkit-text-size-adjust: 100%;
}
ul {
margin-top: 0;
}
.container {
margin-right: auto;
margin-left: auto;
padding-left: 15px;
padding-right: 15px;
}
.container:before,
.container:after {
content: " ";
/* 1 */
display: table;
/* 2 */
}
.container:after {
clear: both;
}
.row {
margin-left: -15px;
margin-right: -15px;
}
.row:before,
.row:after {
content: " ";
/* 1 */
display: table;
/* 2 */
}
.row:after {
clear: both;
}
.col-sm-6, .col-md-6, .col-xs-12 {
position: relative;
min-height: 1px;
padding-left: 15px;
padding-right: 15px;
}
.col-xs-12 {
width: 100%;
}
@media (min-width: 768px) {
.container {
width: 750px;
}
.col-sm-6 {
float: left;
}
.col-sm-6 {
width: 50%;
}
}
@media (min-width: 992px) {
.container {
width: 970px;
}
.col-md-6 {
float: left;
}
.col-md-6 {
width: 50%;
}
}
@media (min-width: 1200px) {
.container {
width: 1170px;
}
}
a {
color: #069;
text-decoration: none;
}
a:hover {
color: #EA0011;
text-decoration: underline;
}
hgroup {
margin-top: 50px;
}
footer {
margin: 50px 0 25px;
font-size: 11px;
}
h1, h2, h3 {
color: #000;
line-height: 1.38em;
margin: 1.5em 0 .3em;
}
h1 {
font-size: 25px;
font-weight: 300;
border-bottom: 1px solid #fff;
margin-bottom: .5em;
}
h1:after {
content: "";
display: block;
width: 100%;
height: 1px;
background-color: #ddd;
}
h2 {
font-size: 19px;
font-weight: 400;
}
h3 {
font-size: 15px;
font-weight: 400;
margin: 0 0 .3em;
}
p {
margin: 0 0 2em;
}
p + h2 {
margin-top: 2em;
}
html {
background: #f5f5f5;
height: 100%;
}
code {
background-color: white;
border: 1px solid #ccc;
padding: 1px 5px;
color: #888;
}
pre {
display: block;
padding: 13.333px 20px;
margin: 0 0 20px;
font-size: 13px;
line-height: 1.4;
background-color: #fff;
border-left: 2px solid rgba(120,120,120,0.35);
white-space: pre;
white-space: pre-wrap;
word-break: normal;
word-wrap: break-word;
overflow: auto;
font-family: Menlo,Monaco,"Liberation Mono",Consolas,monospace !important;
}
</style>
</head>
<body>
<section class='container'>
<hgroup>
<h1>Welcome to your Python application on OpenShift</h1>
</hgroup>
<div class="row">
<section class='col-xs-12 col-sm-6 col-md-6'>
<section>
<h2>Deploying code changes</h2>
<p>OpenShift uses A <a href="http://git-scm.com/">Git version control system</a> for your source code, and grants you access to it via the Secure Shell (SSH) protocol. In order to upload and download code to your application you need to give us your <a href="https://developers.openshift.com/en/managing-remote-connection.html">public SSH key</a>. You can upload it within the web console or install the <a href="https://developers.openshift.com/en/managing-client-tools.html">RHC command line tool</a> and run <code>rhc setup</code> to generate and upload your key automatically.</p>
<h3>Working in your local Git repository</h3>
<p>If you created your application from the command line and uploaded your SSH key, rhc will automatically download a copy of that source code repository (Git calls this 'cloning') to your local system.</p>
<p>If you created the application from the web console, you'll need to manually clone the repository to your local system. Copy the application's source code Git URL and then run:</p>
<pre>$ git clone <git_url> <directory_to_create>
# Within your project directory
# Commit your changes and push to OpenShift
$ git commit -a -m 'Some commit message'
$ git push</pre>
<ul>
<li><a href="https://developers.openshift.com/en/managing-modifying-applications.html">Learn more about deploying and building your application</a></li>
<li>See the README file in your local application Git repository for more information on the options for deploying applications.</li>
</ul>
</section>
</section>
<section class="col-xs-12 col-sm-6 col-md-6">
<h2>Managing your application</h2>
<h3>Web Console</h3>
<p>You can use the OpenShift web console to enable additional capabilities via cartridges, add collaborator access authorizations, designate custom domain aliases, and manage domain memberships.</p>
<h3>Command Line Tools</h3>
<p>Installing the <a href="https://developers.openshift.com/en/managing-client-tools.html">OpenShift RHC client tools</a> allows you complete control of your cloud environment. Read more on how to manage your application from the command line in our <a href="https://www.openshift.com/user-guide">User Guide</a>.
</p>
<h2>Development Resources</h2>
<ul>
<li><a href="https://developers.openshift.com/en/python-overview.html">Getting Started with Python on OpenShift</a></li>
<li><a href="https://developers.openshift.com">Developer Center</a></li>
<li><a href="https://www.openshift.com/user-guide">User Guide</a></li>
<li><a href="https://help.openshift.com">Help Center</a></li>
<li><a href="http://stackoverflow.com/questions/tagged/openshift">Stack Overflow questions for OpenShift</a></li>
<li><a href="http://git-scm.com/documentation">Git documentation</a></li>
</ul>
</section>
</div>
<footer>
<div class="logo"><a href="https://www.openshift.com/"></a></div>
</footer>
</section>
</body>
</html>'''
response_body = response_body.encode('utf-8')
status = '200 OK'
response_headers = [('Content-Type', ctype), ('Content-Length', str(len(response_body)))]
#
start_response(status, response_headers)
return [response_body ]
#
# Below for testing only
#
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('localhost', 8051, application)
# Wait for a single request, serve it and quit.
httpd.handle_request()
|
[
"devmwheeler@live.com"
] |
devmwheeler@live.com
|
2b527ae08f8f0e1fc6300048d9138a988209d9aa
|
3e3ce865b7746732fe4298435cfe5cb8b23f46e7
|
/venv1/bin/easy_install-2.7
|
5fdfe2bb8764ea291ec2e732b2cdb5cb68fd2aab
|
[] |
no_license
|
siddharth12456/Plivo
|
ba48735ff1edb655737ed569d65db5619cd7f4b4
|
a6bd537b88add841325b88cd953b60b35636ddd4
|
refs/heads/master
| 2021-07-19T11:46:37.090810
| 2020-04-20T08:16:07
| 2020-04-20T15:46:01
| 132,721,649
| 0
| 0
| null | 2020-04-20T15:46:03
| 2018-05-09T07:52:32
|
Python
|
UTF-8
|
Python
| false
| false
| 278
|
7
|
#!/home/siddharth/PycharmProjects/PlivoAPI/venv1/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"sroy@sentient-energy.com"
] |
sroy@sentient-energy.com
|
7ef0e32c2bc08328f7dda3f11c84b48d28e808b8
|
34096e5f3d6569e3aaee794bf8ccc0b04f2c8c8f
|
/docusign_esign/models/envelope_transfer_rule.py
|
9850e0af941d967df7254ce7324591c2361dd884
|
[
"MIT"
] |
permissive
|
hunk/docusign-python-client
|
5c96de8a08973fe1744d902b2a3873a7376a62c7
|
a643c42c1236715e74eef6fc279a1b29da1b5455
|
refs/heads/master
| 2021-06-14T06:41:23.298368
| 2020-04-01T05:51:08
| 2020-04-01T05:51:08
| 254,482,059
| 0
| 0
|
MIT
| 2020-04-09T21:28:23
| 2020-04-09T21:28:23
| null |
UTF-8
|
Python
| false
| false
| 9,506
|
py
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class EnvelopeTransferRule(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, carbon_copy_original_owner=None, enabled=None, envelope_transfer_rule_id=None, event_type=None, from_group=None, from_user=None, modified_date=None, modified_user=None, to_folder=None, to_user=None):
"""
EnvelopeTransferRule - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'carbon_copy_original_owner': 'str',
'enabled': 'str',
'envelope_transfer_rule_id': 'str',
'event_type': 'str',
'from_group': 'Group',
'from_user': 'UserInformation',
'modified_date': 'str',
'modified_user': 'UserInformation',
'to_folder': 'Folder',
'to_user': 'UserInformation'
}
self.attribute_map = {
'carbon_copy_original_owner': 'carbonCopyOriginalOwner',
'enabled': 'enabled',
'envelope_transfer_rule_id': 'envelopeTransferRuleId',
'event_type': 'eventType',
'from_group': 'fromGroup',
'from_user': 'fromUser',
'modified_date': 'modifiedDate',
'modified_user': 'modifiedUser',
'to_folder': 'toFolder',
'to_user': 'toUser'
}
self._carbon_copy_original_owner = carbon_copy_original_owner
self._enabled = enabled
self._envelope_transfer_rule_id = envelope_transfer_rule_id
self._event_type = event_type
self._from_group = from_group
self._from_user = from_user
self._modified_date = modified_date
self._modified_user = modified_user
self._to_folder = to_folder
self._to_user = to_user
@property
def carbon_copy_original_owner(self):
"""
Gets the carbon_copy_original_owner of this EnvelopeTransferRule.
:return: The carbon_copy_original_owner of this EnvelopeTransferRule.
:rtype: str
"""
return self._carbon_copy_original_owner
@carbon_copy_original_owner.setter
def carbon_copy_original_owner(self, carbon_copy_original_owner):
"""
Sets the carbon_copy_original_owner of this EnvelopeTransferRule.
:param carbon_copy_original_owner: The carbon_copy_original_owner of this EnvelopeTransferRule.
:type: str
"""
self._carbon_copy_original_owner = carbon_copy_original_owner
@property
def enabled(self):
"""
Gets the enabled of this EnvelopeTransferRule.
:return: The enabled of this EnvelopeTransferRule.
:rtype: str
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Sets the enabled of this EnvelopeTransferRule.
:param enabled: The enabled of this EnvelopeTransferRule.
:type: str
"""
self._enabled = enabled
@property
def envelope_transfer_rule_id(self):
"""
Gets the envelope_transfer_rule_id of this EnvelopeTransferRule.
:return: The envelope_transfer_rule_id of this EnvelopeTransferRule.
:rtype: str
"""
return self._envelope_transfer_rule_id
@envelope_transfer_rule_id.setter
def envelope_transfer_rule_id(self, envelope_transfer_rule_id):
"""
Sets the envelope_transfer_rule_id of this EnvelopeTransferRule.
:param envelope_transfer_rule_id: The envelope_transfer_rule_id of this EnvelopeTransferRule.
:type: str
"""
self._envelope_transfer_rule_id = envelope_transfer_rule_id
@property
def event_type(self):
"""
Gets the event_type of this EnvelopeTransferRule.
:return: The event_type of this EnvelopeTransferRule.
:rtype: str
"""
return self._event_type
@event_type.setter
def event_type(self, event_type):
"""
Sets the event_type of this EnvelopeTransferRule.
:param event_type: The event_type of this EnvelopeTransferRule.
:type: str
"""
self._event_type = event_type
@property
def from_group(self):
"""
Gets the from_group of this EnvelopeTransferRule.
:return: The from_group of this EnvelopeTransferRule.
:rtype: Group
"""
return self._from_group
@from_group.setter
def from_group(self, from_group):
"""
Sets the from_group of this EnvelopeTransferRule.
:param from_group: The from_group of this EnvelopeTransferRule.
:type: Group
"""
self._from_group = from_group
@property
def from_user(self):
"""
Gets the from_user of this EnvelopeTransferRule.
:return: The from_user of this EnvelopeTransferRule.
:rtype: UserInformation
"""
return self._from_user
@from_user.setter
def from_user(self, from_user):
"""
Sets the from_user of this EnvelopeTransferRule.
:param from_user: The from_user of this EnvelopeTransferRule.
:type: UserInformation
"""
self._from_user = from_user
@property
def modified_date(self):
"""
Gets the modified_date of this EnvelopeTransferRule.
:return: The modified_date of this EnvelopeTransferRule.
:rtype: str
"""
return self._modified_date
@modified_date.setter
def modified_date(self, modified_date):
"""
Sets the modified_date of this EnvelopeTransferRule.
:param modified_date: The modified_date of this EnvelopeTransferRule.
:type: str
"""
self._modified_date = modified_date
@property
def modified_user(self):
"""
Gets the modified_user of this EnvelopeTransferRule.
:return: The modified_user of this EnvelopeTransferRule.
:rtype: UserInformation
"""
return self._modified_user
@modified_user.setter
def modified_user(self, modified_user):
"""
Sets the modified_user of this EnvelopeTransferRule.
:param modified_user: The modified_user of this EnvelopeTransferRule.
:type: UserInformation
"""
self._modified_user = modified_user
@property
def to_folder(self):
"""
Gets the to_folder of this EnvelopeTransferRule.
:return: The to_folder of this EnvelopeTransferRule.
:rtype: Folder
"""
return self._to_folder
@to_folder.setter
def to_folder(self, to_folder):
"""
Sets the to_folder of this EnvelopeTransferRule.
:param to_folder: The to_folder of this EnvelopeTransferRule.
:type: Folder
"""
self._to_folder = to_folder
@property
def to_user(self):
"""
Gets the to_user of this EnvelopeTransferRule.
:return: The to_user of this EnvelopeTransferRule.
:rtype: UserInformation
"""
return self._to_user
@to_user.setter
def to_user(self, to_user):
"""
Sets the to_user of this EnvelopeTransferRule.
:param to_user: The to_user of this EnvelopeTransferRule.
:type: UserInformation
"""
self._to_user = to_user
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"noreply@github.com"
] |
noreply@github.com
|
6ed312e707abaa007c3cd93e7fdc80401b65f139
|
f736f2392c6de4b8c6cd9d9bdff6de5c05d4a278
|
/blog/coments/api/serializers.py
|
5aa0958b3467c1a910fc2f7c2bcccf44198519e5
|
[] |
no_license
|
ricardocastilloisc/cursoDjangoBlog
|
a36f20021f72dc1b7b819c4f863e649707b3736a
|
13bac0f3811e7fafc3f21ed979b53cf36aae6d91
|
refs/heads/main
| 2023-06-27T19:33:26.977679
| 2021-07-26T14:59:26
| 2021-07-26T14:59:26
| 389,442,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
from rest_framework import serializers
from coments.models import Comment
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ['id','content', 'created_at', 'user', 'post']
|
[
"ricardocastilloisc@gmail.com"
] |
ricardocastilloisc@gmail.com
|
bac58cc9c2e873327fcf4652f7150e09e1f24dbc
|
9ee12b1d04a458ab84a042acc317c483bf10b53e
|
/TinyImagenet/keras_alexnet.py
|
e447fe8fade498b338e4828802796951bcbea1cb
|
[] |
no_license
|
cvasfi/light-cnns
|
c938aa952444894575253e1885bcea2d1b09c68c
|
e181e6aac1aac3e499c5318143b3fffba54186e7
|
refs/heads/master
| 2021-01-21T10:49:22.172196
| 2017-10-19T19:11:52
| 2017-10-19T19:11:52
| 101,991,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,489
|
py
|
from __future__ import division
import six
from keras.models import Model
from keras.layers import (
Input,
Activation,
Dense,
Flatten
)
from keras.layers.convolutional import (
Conv2D,
MaxPooling2D,
AveragePooling2D
)
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
from keras.layers.advanced_activations import PReLU
from keras.layers.core import Dropout
def _conv_relu(**conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
conv = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(input)
return PReLU()(conv)
return f
class AlexNetBuilder(object):
@staticmethod
def build(input_shape, num_outputs):
# Permute dimension order if necessary
if K.image_dim_ordering() == 'tf':
input_shape = (input_shape[1], input_shape[2], input_shape[0])
input = Input(shape=input_shape)
c1 = _conv_relu(filters=96, kernel_size=(3, 3), strides=(1, 1))(input)
p1 = MaxPooling2D()(c1)
c2 = _conv_relu(filters=256, kernel_size=(5, 5), strides=(1, 1))(p1)
p2 = MaxPooling2D()(c2)
c3 = _conv_relu(filters=384, kernel_size=(3, 3), strides=(1, 1))(p2)
c4 = _conv_relu(filters=385, kernel_size=(3, 3), strides=(1, 1))(c3)
c5 = _conv_relu(filters=256, kernel_size=(3, 3), strides=(1, 1))(c4)
p3 = MaxPooling2D()(c5)
fl = Flatten()(p3)
fc1 = Dense(units=4096)(fl)
fc1_d = Dropout(rate=0.5)(fc1)
fc1_a= PReLU()(fc1_d)
fc2 = Dense(units=4096)(fc1_a)
fc2_a = Dropout(rate=0.5)(fc2)
fc2_d= PReLU()(fc2_a)
output = Dense(units=200,activation="softmax")(fc2_d)
model = Model(inputs=input, outputs=output)
return model
@staticmethod
def buildAlexnet(input_shape, num_outputs):
return AlexNetBuilder.build(input_shape, num_outputs)
|
[
"yunus.ec@gmail.com"
] |
yunus.ec@gmail.com
|
2694809627d8fe84439bbd9857953fd90a2c72a8
|
8a62bbff9378187a898f336532bb49de18cb88e4
|
/2020-phone-bpe-attention/scripts/create-phone-bpe-lexicon.py
|
9cc4eba43457fe7795861e94c644ea94d3b34626
|
[] |
no_license
|
rwth-i6/returnn-experiments
|
e2cdecb67febe646d702282ced8c290f1dd8edd0
|
a46021329c030af361e0becb25ea92afca9610ce
|
refs/heads/master
| 2023-06-08T08:56:11.891782
| 2023-05-30T12:46:45
| 2023-05-30T12:46:45
| 67,426,132
| 159
| 52
| null | 2023-05-30T12:46:46
| 2016-09-05T14:07:48
|
Python
|
UTF-8
|
Python
| false
| false
| 13,569
|
py
|
#!/usr/bin/env python3
import xml.etree.ElementTree as ET
from xml.dom import minidom
import codecs
from returnn.LmDataset import Lexicon
from argparse import ArgumentParser
"""
create Lexicon, given bpe Vocab, lexicon and applied phones_bpe
"""
def convert(string_num):
if isinstance(string_num, str) and string_num.startswith("0"):
return "zero " + convert(string_num[1:])
num = int(string_num)
units = ("", "one ", "two ", "three ", "four ","five ", "six ", "seven ","eight ", "nine ", "ten ", "eleven ", "twelve ", "thirteen ", "fourteen ", "fifteen ","sixteen ", "seventeen ", "eighteen ", "nineteen ")
tens =("", "", "twenty ", "thirty ", "forty ", "fifty ","sixty ","seventy ","eighty ","ninety ")
if num<0:
return "minus "+convert(-num)
if num<20:
return units[num]
if num<100:
return tens[num // 10] +units[int(num % 10)]
if num<1000:
return units[num // 100] +"hundred " +convert(int(num % 100))
if num<1000000:
return convert(num // 1000) + "thousand " + convert(int(num % 1000))
if num < 1000000000:
return convert(num // 1000000) + "million " + convert(int(num % 1000000))
return convert(num // 1000000000)+ "billion "+ convert(int(num % 1000000000))
def hasNumber(inputString):
return any(char.isdigit() for char in inputString)
def separate(iString):
prev_char = iString[0]
tmp = []
new = iString[0]
for x, i in enumerate(iString[1:]):
if i.isalpha() and prev_char.isalpha():
new += i
elif i.isnumeric() and prev_char.isnumeric():
new += i
else:
tmp.append(new)
new = i
prev_char = i
if x == len(iString)-2:
tmp.append(new)
new = ''
if len(iString) > 1:
return tmp
return [iString]
def to_unicode_list(input_l):
res = []
for item in input_l:
res.append(to_unicode(item))
return res
def to_unicode(input):
text = input.split()
result = ""
for k in text:
result += phone_to_unicode[k]
return result
# map phone into unicode
phone_to_unicode = {'[LAUGHTER]': 'L',
'[NOISE]': 'N',
'[SILENCE]': 'S',
'[VOCALIZEDNOISE]': 'V',
'aa': 'a',
'ae': 'à',
'ah': 'á',
'ao': 'â',
'aw': 'ã',
'ax': 'ä',
'ay': 'å',
'b': 'b',
'ch': 'c',
'd': 'd',
'dh': 'ď',
'eh': 'e',
'el': 'è',
'en': 'é',
'er': 'ê',
'ey': 'ë',
'f': 'f',
'g': 'g',
'hh': 'h',
'ih': 'i',
'iy': 'ì',
'jh': 'j',
'k': 'k',
'l': 'l',
'm': 'm',
'n': 'n',
'ng': 'ñ',
'ow': 'o',
'oy': 'ò',
'p': 'p',
'r': 'r',
's': 's',
'sh': 'ś',
't': 't',
'th': 'ţ',
'uh': 'u',
'uw': 'ù',
'v': 'v',
'w': 'w',
'y': 'y',
'z': 'z',
'zh': 'ź',
' ': ' ',
'#1': '#1', # disambiquate symbols for homophones
'#2': '#2',
'#3': '#3',
'#4': '#4',
'#5': '#5',
'#6': '#6',
'#7': '#7',
'#8': '#8',
'#9': '#9',
'#10': '#10',
'#11': '#11',
'#12': '#12',
'#13': '#13',
'#14': '#14',
}
def main():
arg_parser = ArgumentParser()
arg_parser.add_argument("--bpe_vocab", required=True)
arg_parser.add_argument("--lexicon", required=True)
arg_parser.add_argument("--phones_bpe", required=True)
arg_parser.add_argument("--bpe", action="store_true")
arg_parser.add_argument("--char", action="store_true")
arg_parser.add_argument("--charbpe", action="store_true")
arg_parser.add_argument("--disamb", action="store_true")
arg_parser.add_argument("--output", required=True)
args = arg_parser.parse_args()
#if single char or phon need to comment the optional arg phones_bpe since if we dont use bpe
bpe1k_file = args.bpe_vocab
lexicon_file = args.lexicon
phones_bpe_file = args.phones_bpe
def create_specialTree(input):
if input == "</s>":
lemma = ET.SubElement(lex_root, 'lemma', special="sentence-end")
orth = ET.SubElement(lemma, 'orth')
synt = ET.SubElement(lemma, 'synt')
tok = ET.SubElement(synt, 'tok')
orth.text = '[SENTENCE-END]'
tok.text = input
eval = ET.SubElement(lemma, 'eval')
elif input == "<s>":
lemma = ET.SubElement(lex_root, 'lemma', special="sentence-begin")
orth = ET.SubElement(lemma, 'orth')
synt = ET.SubElement(lemma, 'synt')
tok = ET.SubElement(synt, 'tok')
orth.text = '[SENTENCE-BEGIN]'
tok.text = input
eval = ET.SubElement(lemma, 'eval')
elif input == "<unk>":
lemma = ET.SubElement(lex_root, 'lemma', special="unknown")
orth = ET.SubElement(lemma, 'orth')
synt = ET.SubElement(lemma, 'synt')
tok = ET.SubElement(synt, 'tok')
orth.text = '[UNKNOWN]'
tok.text = input
eval = ET.SubElement(lemma, 'eval')
# read the input phonemes file and parse it into dictionary
# output dictionary seq
with codecs.open(bpe1k_file, 'rU', 'utf-8') as file:
seq = {}
for line in file:
if line.startswith(('{', '}')):
continue
line = line.replace(',', '')
line = line.replace('\'', '')
key, value = line.strip().split(':')
value = value.strip()
seq[key] = value
# create the xml file structure
special_sign = ["L", "N", "S", "V"]
extra_sign = ["</s>", "<s>", "<unk>"]
# old lexicon handle
lex = Lexicon(lexicon_file)
count = 0
temp_lemmas = []
for word in lex.lemmas:
count += 1
if count > 9:
if args.char:
if hasNumber(lex.lemmas[word]['orth']):
word_ = ""
list_ = separate(lex.lemmas[word]['orth'])
for item in list_:
if item.isdigit():
word_ += convert(item)
temp_lemmas.append(word_.strip())
else:
temp_lemmas.append(lex.lemmas[word]['orth'])
# create new lexicon root
# create phonemes xml tree
lex_root = ET.Element('lexicon')
phone_inventory = ET.SubElement(lex_root, 'phoneme-inventory')
for key, v in sorted(seq.items()):
if key not in extra_sign:
phone = ET.SubElement(phone_inventory, 'phoneme')
p_sym = ET.SubElement(phone, 'symbol')
p_var = ET.SubElement(phone, 'variation')
if key in special_sign:
p_var.text = 'none'
if key == "L":
p_sym.text = "[LAUGHTER]"
elif key == "N":
p_sym.text = "[NOISE]"
elif key == "V":
p_sym.text = "[VOCALIZEDNOISE]"
else:
p_sym.text = "[SILENCE]"
else:
p_var.text = 'context'
p_sym.text = key
else:
if key == "<s>":
create_specialTree(key)
elif key == "</s>":
create_specialTree(key)
elif key == "<unk>":
create_specialTree(key)
for item in ["[NOISE]", "[VOCALIZEDNOISE]", "[LAUGHTER]"]:
lemma = ET.SubElement(lex_root, 'lemma')
orth = ET.SubElement(lemma, 'orth')
phon = ET.SubElement(lemma, 'phon', score="0.0")
phon.text = item
orth.text = item
synt = ET.SubElement(lemma, 'synt')
eval = ET.SubElement(lemma, 'eval')
# mapping phone sequences to word
phon_dict = {}
if args.char:
for word in lex.lemmas:
if hasNumber(word):
word_ = ""
list_ = separate(word)
for item in list_:
if item.isdigit():
word_ += convert(item)
phon_dict[word] = word_
else:
phon_dict[word] = word
#print(word, phon_dict[word])
else:
for word in lex.lemmas:
len_phons = len(lex.lemmas[word]["phons"])
list_of_phons = []
for x in range(len_phons):
list_of_phons.append(lex.lemmas[word]["phons"][x]["phon"])
if args.bpe:
phon_dict[word] = to_unicode_list(list_of_phons) #phone bpe
else:
phon_dict[word] = list_of_phons #single phone
if args.disamb:
duplicates = {} # phone -> count
for word, phones in sorted(phon_dict.items()):
for phone in phones:
if phone in duplicates:
phon_dict[word].remove(phone)
phon_dict[word].insert(0, '%s #%s' % (phone, duplicates[phone])) #bpe close#, not bpe far #
duplicates[phone] += 1
else:
duplicates[phone] = 1
# auxiliary write a output file
with open('word_phone.txt', 'w') as f:
print(phon_dict, file=f)
with open('file_to_map.txt', 'w') as file:
file.write('{\n')
for key, value in phon_dict.items():
file.write('{}:{},\n'.format(key, value))
file.write('}\n')
with open('file_to_map.txt', 'r') as inp:
with open('file_output.txt', 'w') as out:
for i in range(6):
inp.readline()
for line in inp:
if line.startswith('}'):
break
line = line.replace(',', '')
_, right = line.split(':')
lst = right[1:-2].split(',')
lst = [x.replace("'", "") for x in lst]
output = ' '.join(lst)
out.write('{}\n'.format(output)) #for other add \n, without for SingleChar
# here is the checkpoint, where ./subword-nmt/apply_bpe.py is called
# with input files: codes file and phone sequences that to be map (e.g file_output.txt)
# generate output: phones_bpe_file that will be used further
with open(phones_bpe_file, 'r') as file_r:
res_ = []
for line in file_r:
ls = line.strip().split()
phon_seq = []
merge = []
for item in ls:
if '@@' in item:
merge.append(item)
else:
merge.append(item)
phon_seq.append(' '.join(merge))
merge = []
res_.append(phon_seq)
dict_tmp = list(phon_dict.items())
for idx, x in enumerate(res_):
dict_tmp[4+idx] = (dict_tmp[4+idx][0], x)
phon_dict = dict(dict_tmp)
with open('unicode_phone.txt', 'w') as f:
print(phon_dict, file=f)
# we want to add same words (ignoring case) to the same lemma so we create a dict from orth to
# lemma to add a similar orth to the same lemma later. phon should be added only once to the lemma
# so we do that when we create the lemma
if args.char:
orth_to_lemma = {} # dict from orth to lemma
for idx, elem in enumerate(temp_lemmas):
elem_lower = elem.lower()
# wenn schon drinne ist, gucken wir einfach nach
if elem_lower in orth_to_lemma:
lemma = orth_to_lemma[elem_lower]
else:
# wenn nicht, berechnet!
lemma = ET.SubElement(lex_root, 'lemma')
orth_to_lemma[elem_lower] = lemma
#assert elem_lower in phon_dict
res = ""
for char in list(elem):
res+=char
res+=" "
phon = ET.SubElement(lemma, 'phon')
phon.text = res.strip()
orth = ET.SubElement(lemma, 'orth')
orth.text = elem
# single char
# if args.char:
# orth_to_lemma = {}
# for idx, elem in enumerate(temp_lemmas):
# elem_lower = elem.lower()
# lemma = ET.SubElement(lex_root, 'lemma')
# orth = ET.SubElement(lemma, 'orth')
# orth.text = elem
# if elem_lower in orth_to_lemma:
# lemma = orth_to_lemma[elem_lower]
# else:
# res = ""
# for c in list(elem):
# res+= c
# res+= " "
# phon = ET.SubElement(lemma, 'phon')
# res = res + "<eow>"
# phon.text = res
# else:
# orth_to_lemma = {}
# for idx, elem in enumerate(temp_lemmas):
# elem_lower = elem.lower()
# lemma = ET.SubElement(lex_root, 'lemma')
# orth = ET.SubElement(lemma, 'orth')
# orth.text = elem
# if elem_lower in orth_to_lemma:
# lemma = orth_to_lemma[elem_lower]
# else:
# for p in phon_dict[elem_lower]:
# phon = ET.SubElement(lemma, 'phon')
# phon.text = p
else:
orth_to_lemma = {} # dict from orth to lemma
for idx, elem in enumerate(temp_lemmas):
elem_lower = elem.lower()
# wenn schon drinne ist, gucken wir einfach nach
if elem_lower in orth_to_lemma:
lemma = orth_to_lemma[elem_lower]
else:
# wenn nicht, berechnet!
lemma = ET.SubElement(lex_root, 'lemma')
orth_to_lemma[elem_lower] = lemma
assert elem_lower in phon_dict
for p in phon_dict[elem_lower]:
phon = ET.SubElement(lemma, 'phon')
phon.text = p
orth = ET.SubElement(lemma, 'orth')
orth.text = elem
if(args.output):
my_data = minidom.parseString(ET.tostring(lex_root)).toprettyxml(indent=" ")
with open(args.output, "w") as f:
f.write(my_data)
if __name__ == '__main__':
import better_exchook
better_exchook.install()
main()
|
[
"thomas.ng@rwth-aachen.de"
] |
thomas.ng@rwth-aachen.de
|
2ed8d0c47dc05eb342a5011b55fde809be7ece77
|
b038128c5ecd477403f1396ae7f5be29d6ade668
|
/dataset/dataset.py
|
25e313a0f9865d75871717ce8397d6f655d704c2
|
[] |
no_license
|
BAfsharmanesh/Kaggle_Indoor_Location_Navigation
|
82fe8768b0a81f2bbc6e4a7c4d7d4f204f686b33
|
e9379061c0a0cda1a02f9e373c967a4c48f487f6
|
refs/heads/main
| 2023-04-30T19:36:38.876825
| 2021-05-16T21:48:41
| 2021-05-16T21:48:41
| 367,980,247
| 0
| 0
| null | 2021-05-16T20:40:34
| 2021-05-16T20:36:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,444
|
py
|
import pandas as pd
from icecream import ic
from pytorch_lightning import LightningDataModule
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import StratifiedKFold #,StratifiedGroupKFold
from config import Config
from utils.utils import time_function
import numpy as np
class IndoorDataset(Dataset):
def __init__(self, data, bssid_feats, rssi_feats, flag='TRAIN'):
self.data = data
self.flag = flag
self.bssid_feats = bssid_feats
self.rssi_feats = rssi_feats
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
tmp_data = self.data.iloc[index]
if self.flag == 'TRAIN':
return {
'BSSID_FEATS': tmp_data[self.bssid_feats].values.astype(int),
'RSSI_FEATS': tmp_data[self.rssi_feats].values.astype(np.float32),
'site_id': tmp_data['site_id'].astype(int),
'x': tmp_data['x'],
'y': tmp_data['y'],
'floor': tmp_data['floor'],
}
elif self.flag == 'TEST':
return {
'BSSID_FEATS': tmp_data[self.bssid_feats].values.astype(int),
'RSSI_FEATS': tmp_data[self.rssi_feats].values.astype(np.float32),
'site_id': tmp_data['site_id'].astype(int)
}
class IndoorDataModule(LightningDataModule):
def __init__(self, train_data, test_data, kfold=False):
self.train_data = train_data
self.test_data = test_data
self.kfold = kfold
def set_fold_num(self, fold_num):
self.fold_num = fold_num
def _init_feats(self):
self.bssid_feats = [f'bssid_{i}' for i in range(Config.num_wifi_feats)]
self.rssi_feats = [f'rssi_{i}' for i in range(Config.num_wifi_feats)]
def _init_wifi_bssids(self):
wifi_bssids = []
for i in range(100):
wifi_bssids += self.train_data[f'bssid_{i}'].values.tolist()
wifi_bssids += self.test_data[f'bssid_{i}'].values.tolist()
self.wifi_bssids = list(set(wifi_bssids))
self.wifi_bssids_size = len(self.wifi_bssids)
def _init_transforms(self):
self.wifi_bssids_encoder = LabelEncoder()
self.wifi_bssids_encoder.fit(self.wifi_bssids)
self.site_id_encoder = LabelEncoder()
self.site_id_encoder = self.site_id_encoder.fit(
self.train_data['site_id'])
self.rssi_normalizer = StandardScaler()
self.rssi_normalizer.fit(self.train_data[self.rssi_feats])
def _transform(self, data):
for bssid_feat in self.bssid_feats:
data[bssid_feat] = self.wifi_bssids_encoder.transform(
data[bssid_feat])
data['site_id'] = self.site_id_encoder.transform(data['site_id'])
data[self.rssi_feats] = self.rssi_normalizer.transform(
data[self.rssi_feats])
return data
def _kfold(self):
''' Group Kfold wrt path and Stratified Kfold wrt site_id
'''
skf = StratifiedKFold(n_splits=Config.fold_num,
shuffle=True, random_state=Config.seed)
self.train_data['site_id_f'] = self.train_data['site_id'] + self.train_data['floor'].astype(str)
for n, (train_index, val_index) in enumerate(
skf.split(
X = self.train_data['path'],
y = self.train_data['path']
)
):
self.train_data.loc[val_index, 'kfold'] = int(n)
@time_function
def prepare_data(self):
# Init cross validation
if self.kfold:
self._kfold()
# Init preprocessing
self._init_feats()
self._init_wifi_bssids()
self._init_transforms()
self.site_id_dim = len(self.train_data['site_id'].unique())
self.train_data = self._transform(self.train_data)
self.test_data = self._transform(self.test_data)
@time_function
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == 'fit' or stage is None:
if self.kfold:
train_df = self.train_data[self.train_data['kfold'] !=
self.fold_num].reset_index(drop=True)
val_df = self.train_data[self.train_data['kfold'] ==
self.fold_num].reset_index(drop=True)
self.train = IndoorDataset(
train_df, self.bssid_feats, self.rssi_feats, flag="TRAIN")
self.val = IndoorDataset(
val_df, self.bssid_feats, self.rssi_feats, flag="TRAIN")
# Assign test dataset for use in dataloader(s)
if stage == 'test' or stage is None:
self.test = IndoorDataset(
self.test_data, self.bssid_feats, self.rssi_feats, flag="TEST")
def train_dataloader(self):
return DataLoader(self.train, batch_size=Config.train_batch_size, num_workers=Config.num_workers, shuffle=True, pin_memory=True)
def val_dataloader(self):
return DataLoader(self.val, batch_size=Config.val_batch_size, num_workers=Config.num_workers, shuffle=True, pin_memory=True)
def test_dataloader(self):
return DataLoader(self.test, batch_size=Config.val_batch_size, num_workers=Config.num_workers, shuffle=False, pin_memory=True)
|
[
"noreply@github.com"
] |
noreply@github.com
|
3eee818cb29ce487b694fea16caba653f9d645ec
|
629f909ebe19b22d068ec1a4719c9eb303ed2826
|
/python_iugu/request/plan_request.py
|
9d483102bd3f8d101761bc3b7ad17dd17da24c93
|
[
"MIT"
] |
permissive
|
guiflemes/python_iugu
|
f564ce3e653b228a6e71e82f5f26b1b364eb7f76
|
e7efca84e76ebd5b99773f4e57a14f991fbcb520
|
refs/heads/master
| 2023-05-05T05:25:42.631921
| 2021-05-21T18:00:16
| 2021-05-21T18:00:16
| 327,623,059
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from python_iugu import enuns
@dataclass
class PlanRequest:
name: str = None
identifier: str = None
interval: int = None
interval_type: enuns.IntervalType = None
value_cents: int = None
payable_with: enuns.PayableWith = None
features: Optional[FeatureRequest] = None
billing_days: int = None
max_cycles: int = None
@dataclass
class FeatureRequest:
name: str
identifier: str
value: str
|
[
"guilherme@campusinc.com.br"
] |
guilherme@campusinc.com.br
|
317b8373cde4e8566b57759adc99ca00c1e5885f
|
d59a459f3b3bccfb6204a3f803fa465ea1297811
|
/ipynbhpc/PBS.py
|
3895e450292927f8ff6d1597d02e93c764db13c6
|
[] |
no_license
|
rainwoodman/ipynbhpc
|
90fbce679b5ae5886222b90984f5453aeceefceb
|
338973766328d5c83896daec18ae7e81514ae3b8
|
refs/heads/master
| 2021-01-20T00:58:26.675992
| 2015-05-24T18:46:54
| 2015-05-24T18:46:54
| 34,808,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
import subprocess
import numpy
import xml.etree.ElementTree as ET
import re
import time
def submit(string):
pipe = subprocess.Popen(['qsub'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = pipe.communicate(string)[0]
match = re.match('([0-9]*)\..*', stdout)
if pipe.returncode or not match:
raise Exception("qsub failed: %s", stdout)
return match.group(1)
def status(jobid):
""" returns R, Q, E, C, or U(for unknown, eg jobid is not in qstat"""
try:
xml = subprocess.check_output(['qstat', '-x', str(jobid)])
tree = ET.fromstring(xml)
ele = tree.find('Job/job_state')
return ele.text
except subprocess.CalledProcessError:
return 'U'
def delete(jobid):
return subprocess.check_call(['qdel', str(jobid)])
def wait(jobid):
timeout = 10.
if not isinstance(jobid, (list, tuple, set)):
while status(jobid) in 'RQ':
time.sleep(timeout)
timeout *= 1.2
if timeout > 60.:
timeout = 60.
else:
for job in jobid:
wait(job)
|
[
"yfeng1@berkeley.edu"
] |
yfeng1@berkeley.edu
|
2aa3c4884a4fb9cc6a1dfb40a23627bc7126d8ab
|
4e248704293e8b229d51cce077263364a98bb45f
|
/Lexical_analyzer/train.py
|
46c35de8df5855ddcc221f0d83b0e0491e7537a1
|
[] |
no_license
|
VincentLee-EN/FibreTextAnalyzer
|
0ba5c70c899f2f85aae6180ba75bb1031c6fd15d
|
2de3f9d4f18498d24be829e0f9d3a6f2c373a82c
|
refs/heads/master
| 2020-05-16T02:45:42.072795
| 2019-05-02T14:02:52
| 2019-05-02T14:02:52
| 181,429,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,112
|
py
|
#encoding=utf8
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import crf
import Lexical_analyzer.cws.model as modelDef
from Lexical_analyzer.cws.data import Data
tf.app.flags.DEFINE_string('dict_path', 'data/your_dict.pkl', 'dict path')
tf.app.flags.DEFINE_string('train_data', 'data/your_train_data.pkl', 'train data path')
tf.app.flags.DEFINE_string('ckpt_path', 'checkpoints/cws.finetune.ckpt/', 'checkpoint path')
tf.app.flags.DEFINE_integer('embed_size', 256, 'embedding size')
tf.app.flags.DEFINE_integer('hidden_size', 512, 'hidden layer node number')
tf.app.flags.DEFINE_integer('batch_size', 64, 'batch size')
tf.app.flags.DEFINE_integer('epoch', 9, 'training epoch')
tf.app.flags.DEFINE_float('lr', 0.01, 'learning rate')
tf.app.flags.DEFINE_string('save_path','checkpoints/cws.ckpt/','new model save path')
FLAGS = tf.app.flags.FLAGS
class BiLSTMTrain(object):
def __init__(self, data_train=None, data_valid=None, data_test=None, model=None):
self.data_train = data_train
self.data_valid = data_valid
self.data_test = data_test
self.model = model
def train(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
## finetune ##
# ckpt = tf.train.latest_checkpoint(FLAGS.ckpt_path)
# saver = tf.train.Saver()
# saver.restore(sess, ckpt)
# print('-->finetune the ckeckpoint:'+ckpt+'...')
##############
max_epoch = 5
tr_batch_size = FLAGS.batch_size
max_max_epoch = FLAGS.epoch # Max epoch
display_num = 5 # Display 5 pre epoch
tr_batch_num = int(self.data_train.y.shape[0] / tr_batch_size)
# tr_batch_num = tr_batch_size
display_batch = int(tr_batch_num / display_num)
saver = tf.train.Saver(max_to_keep=10)
for epoch in range(max_max_epoch):
_lr = FLAGS.lr
if epoch > max_epoch:
_lr *= 1
print('EPOCH %d, lr=%g' % (epoch + 1, _lr))
start_time = time.time()
_losstotal = 0.0
show_loss = 0.0
for batch in range(tr_batch_num):
fetches = [self.model.loss, self.model.train_op]
X_batch, y_batch = self.data_train.next_batch(tr_batch_size)
feed_dict = {self.model.X_inputs: X_batch, self.model.y_inputs: y_batch, self.model.lr: _lr,
self.model.batch_size: tr_batch_size,
self.model.keep_prob: 0.5}
_loss, _ = sess.run(fetches, feed_dict)
_losstotal += _loss
show_loss += _loss
if (batch + 1) % display_batch == 0:
valid_acc = self.test_epoch(self.data_valid, sess) # valid
print('\ttraining loss=%g ; valid acc= %g ' % (show_loss / display_batch,
valid_acc))
show_loss = 0.0
mean_loss = _losstotal / tr_batch_num
if (epoch + 1) % 1 == 0: # Save once per epoch
save_path = saver.save(sess, self.model.model_save_path+'_plus', global_step=(epoch + 1))
print('the save path is ', save_path)
print('\ttraining %d, loss=%g ' % (self.data_train.y.shape[0], mean_loss))
print('Epoch training %d, loss=%g, speed=%g s/epoch' % (
self.data_train.y.shape[0], mean_loss, time.time() - start_time))
# testing
print('**TEST RESULT:')
test_acc = self.test_epoch(self.data_test, sess)
print('**Test %d, acc=%g' % (self.data_test.y.shape[0], test_acc))
sess.close()
def test_epoch(self, dataset=None, sess=None):
_batch_size = 500
_y = dataset.y
data_size = _y.shape[0]
batch_num = int(data_size / _batch_size)
correct_labels = 0
total_labels = 0
fetches = [self.model.scores, self.model.length, self.model.transition_params]
for i in range(batch_num):
X_batch, y_batch = dataset.next_batch(_batch_size)
feed_dict = {self.model.X_inputs: X_batch, self.model.y_inputs: y_batch, self.model.lr: 1e-5,
self.model.batch_size: _batch_size,
self.model.keep_prob: 1.0}
test_score, test_length, transition_params = sess.run(fetches=fetches,
feed_dict=feed_dict)
for tf_unary_scores_, y_, sequence_length_ in zip(
test_score, y_batch, test_length):
tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
y_ = y_[:sequence_length_]
viterbi_sequence, _ = crf.viterbi_decode(
tf_unary_scores_, transition_params)
correct_labels += np.sum(np.equal(viterbi_sequence, y_))
total_labels += sequence_length_
accuracy = correct_labels / float(total_labels)
return accuracy
def main(_):
Data_ = Data(dict_path=FLAGS.dict_path, train_data=FLAGS.train_data)
print('Corpus loading completed:',FLAGS.train_data)
data_train, data_valid, data_test = Data_.builderTrainData()
print('The training set, verification set, and test set split are completed!')
model = modelDef.BiLSTMModel(max_len=Data_.max_len,
vocab_size=Data_.word2id.__len__()+1,
class_num= Data_.tag2id.__len__(),
model_save_path=FLAGS.save_path,
embed_size=FLAGS.embed_size,
hs=FLAGS.hidden_size)
print('Model definition completed!')
train = BiLSTMTrain(data_train, data_valid, data_test, model)
train.train()
print('Model training completed!')
if __name__ == '__main__':
tf.app.run()
|
[
"2392539432@qq.com"
] |
2392539432@qq.com
|
7cd9fa50c093dbb5c2b3d3496f38b231a56fb61e
|
7ed70a9ee30990c5a195ddc96ebb8b3c174d4f6d
|
/hello/world.py
|
0b79d944ce10636eccb90edcaae841f2818cfaa7
|
[] |
no_license
|
greenwell0912/helloworld-scripts
|
f69eee8462d226d3fe4286826832b4e0de8b2d9c
|
e75ed883ee0066ae6052b8e875aecbd6e1a079a0
|
refs/heads/master
| 2020-03-10T20:17:01.811318
| 2018-04-15T05:06:34
| 2018-04-15T05:06:34
| 129,567,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def main():
print("hello world!")
if __name__ == '__main__':
main()
|
[
"hiroki6357@gmail.com"
] |
hiroki6357@gmail.com
|
334f16eca95422f71e3a8b64fd17fd7ac3057b10
|
da6df71f4bc31fae2874285ecfe688540d724910
|
/pipelines/communication.py
|
7fed316af16a903c6e0f2902402af1aa48c2a015
|
[] |
no_license
|
joseilberto/dog_bark_detection
|
67be5551e1735e9bc03f3dcd4db60388f7e8af05
|
1ff993bc703727c38ed0463e546e539763c869e7
|
refs/heads/master
| 2023-03-11T01:32:42.215274
| 2021-02-20T22:29:52
| 2021-02-20T22:29:52
| 236,839,762
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,446
|
py
|
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from os.path import basename
import email
import numpy as np
import smtplib
import ssl
def create_body(files, message):
"""
Create the body of the e-mail from the keys in message and
"""
pattern_message = message["pattern"]
bark_messages = message["body_start"]
for file in files:
filename = basename(file)
name, date, hour, minute, seconds = "".join(filename.split(".")[0]).split("_")
bark_messages += pattern_message(name, hour, minute, seconds, date)
return bark_messages + message["body_end"] + message["signature"]
def send_files(files, sender, receiver, message, send_all = False):
"""
Parameters:
files (list of strings): All the files that will be sent to the receiver.
sender (dict): Dictionary with the data from sender (email, password, port and smtp server).
receiver (dict): Dictionary with the data from receiver (email).
message (dict): Dict containing the data to be used in the body of the text.
send_all (bool): Determine if it sends all files or randomly select two of them.
"""
context = ssl.create_default_context()
email_msg = MIMEMultipart()
email_msg["From"] = sender["email"]
email_msg["To"] = receiver["email"]
email_msg["Subject"] = message["subject"]
email_msg.attach(MIMEText(message["body"], "plain"))
send_files = (np.random.choice(files, size = 2, replace = False)
if not send_all else files)
for file in send_files:
with open(file, "rb") as attachment:
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header("Content-disposition",
f"attachment; filename= {basename(file)}",)
email_msg.attach(part)
text = email_msg.as_string()
with smtplib.SMTP_SSL(sender["smtp_server"], sender["port"],
context = context) as server:
server.login(sender["email"], sender["password"])
server.sendmail(sender["email"], receiver["email"], text)
print("{} File(s) sent from {} to {}".format(len(send_files),
sender["email"], receiver["email"]))
|
[
"ilbertofjunior@gmail.com"
] |
ilbertofjunior@gmail.com
|
831c204ef9a4257ac6f36dc2e05da942d2a695c0
|
c59aafd22b33cad444d5702f23dd987ab8d29a69
|
/src/fcn/__init__.py
|
6c283a41bc672f44f0cc7127e5dea94d0a700541
|
[] |
no_license
|
pbecker93/DLRC-Unicorns
|
9ddd0396f2c7d43de28903d3ddc92a430f59623e
|
9a7956e7e401b1330ed62d7120ce73ea0465d8c2
|
refs/heads/master
| 2021-07-13T15:23:25.011871
| 2017-10-17T08:49:46
| 2017-10-17T08:49:46
| 106,546,851
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41
|
py
|
from .fcn_vgg import FCN
__all__=['FCN']
|
[
"roel.wier@gmail.com"
] |
roel.wier@gmail.com
|
6bcb9db3729f35fb8aec94089af0cb9395cbe3a6
|
df513473a78ec2714025a43d673988e73d89dc9e
|
/IAM/detach_policy_group.py
|
5b6926588309e84ec94664993b1c106c3aa09ec9
|
[] |
no_license
|
sgouda0412/AWS-With-Python
|
dfcef51c07696d13a46c63236cfcd130b4916256
|
b3abfa7d324e17d22f81c7e53afc34df6f5d484c
|
refs/heads/master
| 2023-03-17T18:18:49.692190
| 2020-03-04T13:35:48
| 2020-03-04T13:35:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
import boto3
#detach policy from group using client
iam = boto3.client('iam') # IAM low level client object
response = iam.detach_group_policy(
GroupName='group1',
PolicyArn='arn:aws:iam::aws:policy/AdministratorAccess'
)
print(response)
#detach policy from group using resource
iam = boto3.resource('iam') #resource representing an AWS IAM
group = iam.Group('group2')
response = group.detach_policy(
PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess'
)
print(response)
|
[
"mogal.mahesh33@gmail.com"
] |
mogal.mahesh33@gmail.com
|
be8bec20e05cbf5aa26e1cb824b5be2ffe259628
|
541cfbacae0805d6ef61041a23b9854c15be0d55
|
/join_cases.py
|
6c9a2063c9bf65acaa6e1515742da7e32673e713
|
[] |
no_license
|
qdouasbin/postproc_explo_airbus
|
1b37444fe577d8527e71b35a580a2638c4c5b8fe
|
64f102973bb3f13660c7e0ab557fa0ffe793c07a
|
refs/heads/main
| 2023-06-05T11:15:11.673524
| 2021-07-01T10:29:29
| 2021-07-01T10:29:29
| 375,285,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,360
|
py
|
import os
import glob
import numpy as np
import pandas as pd
def join_subdirectory_csv_files(prefix, extension):
"""
1. Seek for csv files according to prefix.extension rule
2. concatenate all files
3. drop duplicates
4. re-index
5. dump clean concatenated file
"""
# Find all csv files in subdirectories
all_filenames = [_file for _file in sorted(glob.glob('*/{}.{}'.format(prefix, extension)))]
# combine all files in the list
# combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames])
combined_csv = pd.read_csv(all_filenames[0])
for _idx, _file in enumerate(all_filenames):
if _idx:
print("\t > %s" % _file)
_df = pd.read_csv(_file)
# combined_csv.merge(_df, how="inner")
combined_csv = pd.merge_ordered(combined_csv, _df, fill_method="ffill")
# Drop duplicates
combined_csv = combined_csv.drop_duplicates().reset_index(drop=True)
# export to csv
combined_csv.to_csv("%s.csv" % prefix, index=False, encoding='utf-8-sig')
if __name__ == "__main__":
# Join all csv files needed here
extension = "csv"
prefixes = ["avbp_local_probe_0", "avbp_mmm", "avbp_venting"]
for prefix in prefixes:
print(" > Joining %s.%s" % (prefix, extension))
join_subdirectory_csv_files(prefix, extension)
|
[
"qdouasbin@cerfacs.fr"
] |
qdouasbin@cerfacs.fr
|
a78acddf6eebc59cad1ebc0e8fdaf53ee0ce2702
|
44a7101ae18c84ffa0e3c674763ba7b500937773
|
/root/Desktop/Scripts/pyinstaller-1.5.1/bh_sshRcmd/bh_sshRcmd.spec
|
66707266787869a8fdd977ad9985b57711fe3880
|
[] |
no_license
|
Draft2007/Scripts
|
cbaa66ce0038f3370c42d93da9308cbd69fb701a
|
0dcc720a1edc882cfce7498ca9504cd9b12b8a44
|
refs/heads/master
| 2016-09-05T20:05:46.601503
| 2015-06-23T00:05:02
| 2015-06-23T00:05:02
| 37,945,893
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
spec
|
# -*- mode: python -*-
a = Analysis([os.path.join(HOMEPATH,'support/_mountzlib.py'), os.path.join(HOMEPATH,'support/useUnicode.py'), '/usr/local/tools/bh_sshRcmd.py'],
pathex=['/usr/local/tools/pyinstaller-1.5.1'])
pyz = PYZ(a.pure)
exe = EXE( pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name=os.path.join('dist', 'bh_sshRcmd'),
debug=False,
strip=False,
upx=True,
console=1 )
app = BUNDLE(exe,
name=os.path.join('dist', 'bh_sshRcmd.app'))
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
b93f375f3cedfc8c8ea2bc3dcac1516cf225aaa1
|
f7bbc8246a49480f58b5295a14fd0955c32c093c
|
/Desktop/python trader/backtest data/strategy8.py
|
361e722ad7fd1f00cc1ece891ce450ffab5d9c49
|
[] |
no_license
|
jobeSoffa/pythonTrader
|
cf66ea38cc95b1695e0ac66e13a713a81db78e2a
|
6ef7b97d6dcb3726f65538bdbe6641bdb92bb6d3
|
refs/heads/master
| 2020-04-09T04:53:56.805565
| 2018-12-04T09:43:27
| 2018-12-04T09:43:27
| 160,042,254
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,890
|
py
|
import trade
import candleCompressor
import candle
class strategy8(object):
highestBalance = 1000
highestDrawdown = 0
shouldPrint = True
inBuy = False
totalTrades = 0
winCounter = 0
lossCount = 0
com = 0 #.0001
pip = .0001
otherPip = 1/pip
maxTrades = 30
tempArr = []
candleArr = []
momArr = []
balance = 1000
tr = trade.Trader()
#cmp = candleCompressor.candleCompressor()
currentCandle = 0
length = 118
#strategy variables
riskReward = 8
stopLoss = 10
lotSizePercent = .001
movingAverage = 10
candles = 3 #number of 15m candles, 16 = 4hr
shouldPrint = False
def __init__(self, percent,cad,pip,length,shouldPrint):
self.shouldPrint = shouldPrint
self.length = length
self.lotSizePercent = percent
self.candles = cad
self.pip = pip
self.otherPip = 1/self.pip
self.tr = trade.Trader()
self.candleArr = []
self.tempArr = []
self.balance = 1000
def getNumTrades(self):
return self.totalTrades
def getWinRate(self):
return self.tr.getWinRate()
def drawdown(self,c):
if (self.balance+self.closeAll(c) > self.highestBalance):
self.highestBalance = self.balance+self.closeAll(c)
if ((self.highestBalance - (self.balance+self.closeAll(c))) / self.highestBalance > self.highestDrawdown):
self.highestDrawdown = (self.highestBalance - (self.balance+self.closeAll(c))) / self.highestBalance
return self.highestDrawdown
def update(self, h, l, print,c):
self.balance += self.tr.update(h, l, self.balance, print,c)
def len(self):
return len(self.candleArr)
def closeAll(self,c):
total = self.tr.closeAll(c)
return total
def calMomentum(self, length, arr):
farCandle = arr[len(arr)-1-length].getClose()
thisCandle = arr[len(arr)-1].getClose()
return thisCandle - farCandle
def calMomentum2(self, length, arr):
farCandle = arr[len(arr)-1-length]
thisCandle = arr[len(arr)-1]
return thisCandle - farCandle
def nextCandle(self,cand):
self.tempArr.append(cand)
self.currentCandle +=1
self.drawdown(cand.getClose())
if(self.currentCandle == self.candles):
thisCand = candleCompressor.candleCompressor().compress(self.tempArr)
thisMom = 0
momOfMom = 0
if(len(self.candleArr)>self.length+1):
#print("trade here")
if(len(self.candleArr)> self.length):
thisMom = self.calMomentum(self.length,self.candleArr)
self.momArr.append(thisMom)
if(len(self.momArr) > 3):
momOfMom = self.calMomentum2(1,self.momArr)
if(thisMom > 0 and momOfMom > 0 and not thisMom == 0 and not momOfMom == 0 and self.inBuy == False):
#print("buy")
self.balance += self.tr.crossClose(thisCand.getClose(),self.shouldPrint)
self.tr.crossOpen(thisCand.getClose(), self.com, True, self.balance, self.lotSizePercent,self.shouldPrint)
self.totalTrades += 1
self.inBuy = True
elif(thisMom < 0 and momOfMom < 0 and not thisMom == 0 and not momOfMom == 0 and self.inBuy ==True):
#print("sell")
self.balance += self.tr.crossClose(thisCand.getClose(),self.shouldPrint)
self.tr.crossOpen(thisCand.getClose(), self.com, False, self.balance, self.lotSizePercent,self.shouldPrint)
self.totalTrades += 1
self.inBuy = False
self.candleArr.append(thisCand)
self.currentCandle = 0
self.tempArr = []
|
[
"otisjobe123@gmail.com"
] |
otisjobe123@gmail.com
|
21d9a316ce6cfdf96f3a9f5edaacf77894c81bf4
|
e9d52dcf101aea0327c6b0d7e5244c91dfd62cf6
|
/spexy/adv/samples/simple.py
|
e2df8a641ff75635616d8894582fa8f83e6bf7dd
|
[] |
no_license
|
drufat/spexy
|
6eba9f44a5539245486cd4ef8fefd24bdb7ade6a
|
53255009c1830501986afbf6688142ddefe17b9a
|
refs/heads/master
| 2021-09-18T19:51:47.313946
| 2018-07-19T05:09:02
| 2018-07-19T05:09:02
| 100,453,374
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
# Copyright (C) 2010-2016 Dzhelil S. Rufat. All Rights Reserved.
from sympy import sin, cos
def V(x, y):
return (-sin(y), sin(x))
def p(x, y):
return -cos(x) * cos(y)
|
[
"drufat@caltech.edu"
] |
drufat@caltech.edu
|
77576f4bd93940f460a967a46375dcb841c71094
|
4a418036130cb63caa503719b4162cce9753459b
|
/nemo/collections/nlp/modules/common/transformer/transformer_modules.py
|
63998217f09b5eaa659f8bbb583c263a6befd154
|
[
"Apache-2.0"
] |
permissive
|
kssteven418/Q-ASR
|
89a7dac24d74556453e7b54b26289fd1466070c4
|
aa1ec2ef78fd7606f8f365dfe3e66691a0e48178
|
refs/heads/qasr
| 2023-08-05T15:43:42.493513
| 2021-10-11T20:06:53
| 2021-10-11T20:06:53
| 353,027,973
| 33
| 1
|
Apache-2.0
| 2021-03-30T17:33:26
| 2021-03-30T14:20:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,624
|
py
|
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.functional import gelu
__all__ = ["TransformerEmbedding"]
class FixedPositionalEncoding(nn.Module):
"""
Fixed positional encoding (embedding layer) from sine and cosine functions
of different frequencies according to https://arxiv.org/abs/1706.03762
Args:
hidden_size: size of the embeddings in the model, also known as d_model
max_sequence_length: maximum allowed length of the input sequence
"""
def __init__(self, hidden_size, max_sequence_length=512):
super().__init__()
pos_enc = torch.zeros(max_sequence_length, hidden_size)
position = torch.arange(0.0, max_sequence_length).unsqueeze(1)
coef = -math.log(10000.0) / hidden_size
div_term = torch.exp(coef * torch.arange(0.0, hidden_size, 2))
pos_enc[:, 0::2] = torch.sin(position * div_term)
pos_enc[:, 1::2] = torch.cos(position * div_term)
pos_enc.div_(math.sqrt(hidden_size))
self.register_buffer('pos_enc', pos_enc)
def forward(self, position_ids):
return torch.embedding(self.pos_enc, position_ids)
class TransformerEmbedding(nn.Module):
"""
Embedding from token and position embeddings.
Optionally add token_type embedding (e.g. type of the sentence in BERT).
Args:
vocab_size: size of the vocabulary
hidden_size: size of the embeddings in the model, also known as d_model
max_sequence_length: maximum allowed length of the input sequence
num_token_types: number of different token types
(e.g. tokens of sentence A and tokens of sentence B in BERT)
embedding_dropout: probability of dropout applied to embeddings
learn_positional_encodings: whether to learn positional encodings or
use fixed (sine-cosine) ones
"""
def __init__(
self,
vocab_size,
hidden_size,
max_sequence_length=512,
num_token_types=2,
embedding_dropout=0.0,
learn_positional_encodings=False,
):
super().__init__()
self.max_sequence_length = max_sequence_length
self.token_embedding = nn.Embedding(vocab_size, hidden_size, padding_idx=0)
if learn_positional_encodings:
self.position_embedding = nn.Embedding(max_sequence_length, hidden_size)
else:
self.position_embedding = FixedPositionalEncoding(hidden_size, max_sequence_length)
self.token_type_embedding = nn.Embedding(num_token_types, hidden_size)
self.layer_norm = nn.LayerNorm(hidden_size, eps=1e-5)
self.dropout = nn.Dropout(embedding_dropout)
def forward(self, input_ids, token_type_ids=None, start_pos=0):
seq_length = input_ids.size(1)
if seq_length > self.max_sequence_length:
raise ValueError(
f"Input sequence is longer than maximum allowed sequence length for positional encoding. "
f"Got {seq_length} and {self.max_sequence_length}"
)
position_ids = torch.arange(
start=start_pos, end=start_pos + seq_length, dtype=torch.long, device=input_ids.device
)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
token_embeddings = self.token_embedding(input_ids)
position_embeddings = self.position_embedding(position_ids)
embeddings = token_embeddings + position_embeddings
if token_type_ids is not None:
token_type_embeddings = self.token_type_embedding(token_type_ids)
embeddings = embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class MultiHeadAttention(nn.Module):
"""
Multi-head scaled dot-product attention layer.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
whole layer, but before layer normalization
"""
def __init__(self, hidden_size, num_attention_heads, attn_score_dropout=0.0, attn_layer_dropout=0.0):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number "
"of attention heads (%d)" % (hidden_size, num_attention_heads)
)
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attn_head_size = int(hidden_size / num_attention_heads)
self.attn_scale = math.sqrt(math.sqrt(self.attn_head_size))
self.query_net = nn.Linear(hidden_size, hidden_size)
self.key_net = nn.Linear(hidden_size, hidden_size)
self.value_net = nn.Linear(hidden_size, hidden_size)
self.out_projection = nn.Linear(hidden_size, hidden_size)
self.attn_dropout = nn.Dropout(attn_score_dropout)
self.layer_dropout = nn.Dropout(attn_layer_dropout)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attn_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, queries, keys, values, attention_mask):
# attention_mask is needed to hide the tokens which correspond to [PAD]
# in the case of BERT, or to hide the future tokens in the case of
# vanilla language modeling and translation
query = self.query_net(queries)
key = self.key_net(keys)
value = self.value_net(values)
query = self.transpose_for_scores(query) / self.attn_scale
key = self.transpose_for_scores(key) / self.attn_scale
value = self.transpose_for_scores(value)
# for numerical stability we pre-divide query and key by sqrt(sqrt(d))
attention_scores = torch.matmul(query, key.transpose(-1, -2))
if attention_mask is not None:
attention_scores = attention_scores + attention_mask.to(attention_scores.dtype)
attention_probs = torch.softmax(attention_scores, dim=-1)
attention_probs = self.attn_dropout(attention_probs)
context = torch.matmul(attention_probs, value)
context = context.permute(0, 2, 1, 3).contiguous()
new_context_shape = context.size()[:-2] + (self.hidden_size,)
context = context.view(*new_context_shape)
# output projection
output_states = self.out_projection(context)
output_states = self.layer_dropout(output_states)
return output_states
class PositionWiseFF(nn.Module):
"""
Position-wise feed-forward network of Transformer block.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
inner_size: number of neurons in the intermediate part of feed-forward
net, usually is (4-8 x hidden_size) in the papers
ffn_dropout: probability of dropout applied to net output
hidden_act: activation function used between two linear layers
"""
def __init__(self, hidden_size, inner_size, ffn_dropout=0.0, hidden_act="relu"):
super().__init__()
self.dense_in = nn.Linear(hidden_size, inner_size)
self.dense_out = nn.Linear(inner_size, hidden_size)
self.layer_dropout = nn.Dropout(ffn_dropout)
ACT2FN = {"gelu": gelu, "relu": torch.relu}
self.act_fn = ACT2FN[hidden_act]
def forward(self, hidden_states):
output_states = self.dense_in(hidden_states)
output_states = self.act_fn(output_states)
output_states = self.dense_out(output_states)
output_states = self.layer_dropout(output_states)
return output_states
|
[
"noreply@github.com"
] |
noreply@github.com
|
03a7b76aa472ee4f249b294ee548e8d4b9c4d794
|
a923a44d3c4815f645ca2ba84f973083c5dc29a1
|
/audio.py
|
7022ffd8026fa3ee5f185d610030341c99efd1f5
|
[] |
no_license
|
unparalleled-ysj/T2-TF2
|
49ca50fe1e844b64c75d91a22d294b83c7c449a9
|
5c0c22a569c68d6f63648c5f545fd78ffb261033
|
refs/heads/master
| 2022-11-13T17:20:33.963871
| 2020-07-06T04:15:52
| 2020-07-06T04:15:52
| 277,436,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,498
|
py
|
import librosa
import librosa.filters
import numpy as np
from scipy import signal
from scipy.io import wavfile
num_mels = 80
n_fft = 1024
sample_rate = 16000
hop_size = 200
win_size = 800
preemphasis_value = 0.97
min_level_db = -120
ref_level_db = 20
power = 1.2
griffin_lim_iters = 60
fmax = 7600
fmin = 50
max_abs_value = 4.
def dc_notch_filter(wav):
# code from speex
notch_radius = 0.982
den = notch_radius ** 2 + 0.7 * (1 - notch_radius) ** 2
b = np.array([1, -2, 1]) * notch_radius
a = np.array([1, -2 * notch_radius, den])
return signal.lfilter(b, a, wav)
def load_wav(path, sr):
return librosa.core.load(path, sr=sr)[0]
def save_wav(wav, path):
wav = dc_notch_filter(wav)
wav = wav / np.abs(wav).max() * 0.999
f1 = 0.5 * 32767 / max(0.01, np.max(np.abs(wav)))
f2 = np.sign(wav) * np.power(np.abs(wav), 0.95)
wav = f1 * f2
#proposed by @dsmiller
wavfile.write(path, sample_rate, wav.astype(np.int16))
def preemphasis(wav, k):
return signal.lfilter([1, -k], [1], wav)
def inv_preemphasis(wav, k):
return signal.lfilter([1], [1, -k], wav)
def get_hop_size():
return hop_size
def linearspectrogram(wav):
D = _stft(preemphasis(wav, preemphasis_value))
S = _amp_to_db(np.abs(D)) - ref_level_db
return _normalize(S)
def melspectrogram(wav):
D = _stft(preemphasis(wav, preemphasis_value))
S = _amp_to_db(_linear_to_mel(np.abs(D))) - ref_level_db
return _normalize(S)
def inv_linear_spectrogram(linear_spectrogram):
'''Converts linear spectrogram to waveform using librosa'''
D = _denormalize(linear_spectrogram)
S = _db_to_amp(D + ref_level_db) #Convert back to linear
return inv_preemphasis(_griffin_lim(S ** power), preemphasis_value)
def inv_mel_spectrogram(mel_spectrogram):
'''Converts mel spectrogram to waveform using librosa'''
D = _denormalize(mel_spectrogram)
S = _mel_to_linear(_db_to_amp(D + ref_level_db)) # Convert back to linear
return inv_preemphasis(_griffin_lim(S ** power), preemphasis_value)
def _griffin_lim(S):
'''librosa implementation of Griffin-Lim
Based on https://github.com/librosa/librosa/issues/434
'''
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex = np.abs(S).astype(np.complex)
y = _istft(S_complex * angles)
for i in range(griffin_lim_iters):
angles = np.exp(1j * np.angle(_stft(y)))
y = _istft(S_complex * angles)
return y
def _stft(y):
return librosa.stft(y=y, n_fft=n_fft, hop_length=get_hop_size(), win_length=win_size)
def _istft(y):
return librosa.istft(y, hop_length=get_hop_size(), win_length=win_size)
# Conversions
_mel_basis = None
_inv_mel_basis = None
def _linear_to_mel(spectogram):
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis()
return np.dot(_mel_basis, spectogram)
def _mel_to_linear(mel_spectrogram):
global _inv_mel_basis
if _inv_mel_basis is None:
_inv_mel_basis = np.linalg.pinv(_build_mel_basis())
return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))
def _build_mel_basis():
assert fmax <= sample_rate // 2
return librosa.filters.mel(sample_rate, n_fft, n_mels=num_mels,
fmin=fmin, fmax=fmax)
def _amp_to_db(x):
min_level = np.exp(min_level_db / 20 * np.log(10))
return 20 * np.log10(np.maximum(min_level, x))
def _db_to_amp(x):
return np.power(10.0, (x) * 0.05)
def _normalize(S):
return (2 * max_abs_value) * ((S - min_level_db) / (-min_level_db)) - max_abs_value
def _denormalize(D):
return (((D + max_abs_value) * -min_level_db / (2 * max_abs_value)) + min_level_db)
|
[
"unparalleled.ysj@qq.com"
] |
unparalleled.ysj@qq.com
|
e603161e2e56683dbefc6a30f0d9444b0da60f3e
|
d220e6b1a15dc384567ec30c0d80dcc51566fdac
|
/app/scrape/reload_series.py
|
bf0a9a83d7be959a0addde763d2546e2e2feed84
|
[
"Apache-2.0"
] |
permissive
|
cs373n/idb
|
481c7dae6bdb22bb5955c368b94c32e954fe9062
|
274d843609fc8958d65bfd0c04c90a67acc70ccb
|
refs/heads/master
| 2020-06-25T08:33:17.558044
| 2018-04-12T07:04:12
| 2018-04-12T07:04:12
| 94,237,770
| 2
| 16
| null | 2017-07-21T15:18:44
| 2017-06-13T17:11:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,062
|
py
|
import requests, json, time, datetime, hashlib
from models import db, Series, Event, Character, Creator
class MarvelRequest():
def __init__(self):
self.publicKey = ""
self.privateKey = ""
self.timeStamp = str(datetime.datetime.utcnow())
self.baseurl = "http://gateway.marvel.com/v1/public/"
# Marvel requires MD5 hash code for server side access
# Must be ts+publickey+privatekey
def compute_md5(self):
return hashlib.md5((self.timeStamp + self.privateKey + self.publicKey).encode('utf-8')).hexdigest()
def request(self, endpoint, offset):
# Parameters for the call to Marvel API
payload = { "ts": self.timeStamp, "apikey": self.publicKey, "hash": self.compute_md5(), "offset": offset}
# Make the HTTP request, return a Response object
return requests.get(self.baseurl + endpoint, params=payload)
def main():
#fcharacters = open('series_characters2.txt', 'a')
#fcreators = open('series_creators2.txt', 'a')
#fevents = open('series_events2.txt', 'a')
marvel = MarvelRequest()
"""
json.loads(String) takes in json formatted string, and outputs
data according to the conversion table at json library website
"""
index = 0
for offset in range(0, 10000, 20):
response = marvel.request("series", offset) # No trailing slash allowed here
print(response.status_code)
assert response.status_code == 200
series = json.loads(response.text)
idNum = 0
title = ""
desc = ""
path = ""
start = ""
end = ""
numCreators = ""
numChars = ""
numComics = ""
numEvents = ""
for series_meta_keys, series_meta_data in series['data'].items():
# series_meta_keys: offset, limit, total, count, results[] from Marvel
# JSON structure
if series_meta_keys == 'results':
for series in series_meta_data:
if series['id'] != "":
for series_attribute_keys, series_attribute in series.items():
# now stepping through title, description, thumbnail, etc.
if series_attribute_keys == 'id':
idNum = int(series_attribute)
# idNum = idNum.encode('utf-8')
elif series_attribute_keys == 'title':
title = series_attribute
title = title.encode('utf-8')
# print('Title: ' + title)
elif series_attribute_keys == 'description':
if series_attribute != None:
"""
Error arose when using str(description) and
transferring output to text file: You must not
use str(...) to strip away unicode symbols
that often appear in Marvel descriptions!
"""
desc = series_attribute
desc = desc.encode('utf-8')
# print('Description: ' + desc)
elif series_attribute_keys == 'startYear':
# print("Start Year: " + str(series_attribute))
start = str(series_attribute)
elif series_attribute_keys == 'endYear':
# print("End Year: " + str(series_attribute))
end = str(series_attribute)
elif series_attribute_keys == 'thumbnail':
path = str(series_attribute['path'])
temp = path.split('/')
for v in temp :
if v == 'image_not_available':
path = None
if path != None:
path = str(path) + '.' + str(series_attribute['extension'])
# print (path)
if series_attribute_keys == 'creators':
# print("Comics in series: " + str(series_attribute['available']))
numCreators = int(series_attribute['available'])
#creator_ids = [series['id']]
#for creator_uri in series_attribute['items']:
# resource_path = creator_uri['resourceURI'].split('/')
# creator_ids.append(int(resource_path[-1]))
#fcreators.write(str(creator_ids) + '\n')
elif series_attribute_keys == 'characters':
# print("Characters in series: " + str(series_attribute['available']))
numChars = int(series_attribute['available'])
#character_ids = [series['id']]
#for character in series_attribute['items']:
# resource_path = character['resourceURI'].split('/')
#
# character_ids.append(int(resource_path[-1]))
#fcharacters.write(str(character_ids) + '\n')
elif series_attribute_keys == 'comics':
numComics = int(series_attribute['available'])
elif series_attribute_keys == 'events':
numEvents = str(series_attribute['available'])
#event_ids = [series['id']]
#for event in series_attribute['items']:
# resource_path = event['resourceURI'].split('/')
# event_ids.append(int(resource_path[-1]))
#fevents.write(str(event_ids) + '\n')
newEntry = Series(idNum, title, desc, start, end, path, numCreators, numChars, numComics, numEvents)
db.session.merge(newEntry)
db.session.commit()
index += 1
print("processed series " + str(index))
if __name__ == '__main__':
main()
|
[
"saketsingh2018@gmail.com"
] |
saketsingh2018@gmail.com
|
c23b86d447f850e4bd75066d30e311f702ae67d0
|
9b92b21f39870e1b8a0de6bc94ff08a66690b1ea
|
/sources/webapp/SyncronisationDAO.py
|
bf64956c5930f3244b62286e3d037dc75d5ef9a1
|
[] |
no_license
|
sebastiansIT/HTML5Podcatcher
|
ac5bb3cf128d4785f478b43e23ea57c62cfadce0
|
f1d9f446df0333eec3ef59219b28d683b7f17c5f
|
refs/heads/master
| 2023-06-25T19:01:39.039093
| 2021-05-08T05:51:47
| 2021-05-08T05:51:47
| 10,554,866
| 8
| 1
| null | 2023-03-04T03:04:49
| 2013-06-07T17:10:11
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,925
|
py
|
import sqlite3
import datetime
import SyncronisationModel
import cgi, cgitb
cgitb.enable()
class Sqlite3DAO:
def __init__(self, fileName):
self.dbFileName = fileName
def DataBaseInitialisation():
connection = sqlite3.connect(self.dbFileName)
cursor = connection.cursor()
sql = "CREATE TABLE SyncPoints(ID INTEGER PRIMARY KEY, Key VARCHAR(100) UNIQUE, Value TEXT) "
cursor.execute(sql)
connection.commit()
connection.close()
def Select(self, key=None):
connection = sqlite3.connect(self.dbFileName)
cursor = connection.cursor()
sql = "SELECT ID, Key, Value FROM SyncPoints"
try:
if key != None:
sql = sql + " WHERE Key = ?"
cursor.execute(sql, (key,))
entries = []
for row in cursor:
entry = SyncronisationModel.Point(row[0], row[1], row[2])
entries.append(entry)
except:
entries = ["error"]
connection.commit()
connection.close()
return entries
def Insert(self, key, value):
connection = sqlite3.connect(self.dbFileName)
cursor = connection.cursor()
sql = "INSERT INTO SyncPoints(Key, Value) VALUES (?, ?)"
cursor.execute(sql, (key, value))
connection.commit()
connection.close()
return self.Select(key=key)
def Update(self, key, value):
connection = sqlite3.connect(self.dbFileName)
cursor = connection.cursor()
sql = "UPDATE SyncPoints SET Value = ? WHERE Key = ?"
cursor.execute(sql, (value, key))
connection.commit()
connection.close()
return self.Select(key)
def Delete(self, key):
connection = sqlite3.connect(self.dbFileName)
cursor = connection.cursor()
sql = "DELETE FROM SyncPoints WHERE Key = ?"
cursor.execute(sql, (key,))
connection.commit()
connection.close()
def Save(self, key, value):
if len(self.Select(key)) > 0:
#return [SyncronisationModel.Point(7, "test", "{test}")]
return self.Update(key, value)
else:
return self.Insert(key, value)
|
[
"sebastian@human-injection.de"
] |
sebastian@human-injection.de
|
d811f5d03ae12bdeb567632e2d82b3ecccc87751
|
a1e3e7cf1d27b85d9472c6353e7646d37528b241
|
/q11.py
|
3ea7528239387d3ae6df885be655e4e6ebe1b32f
|
[] |
no_license
|
osama1998H/standerdLearnd-string
|
421148f81c2c604f6c75dac568ff1faeb20922ce
|
0af39cd2fd43be45bb54aca2826bc8bf56e399ed
|
refs/heads/main
| 2023-09-01T04:21:52.499680
| 2021-05-15T19:54:50
| 2021-05-15T19:54:50
| 365,533,408
| 0
| 0
| null | 2023-08-29T08:31:40
| 2021-05-08T14:21:53
|
Python
|
UTF-8
|
Python
| false
| false
| 325
|
py
|
string = input("enter the string: ")
def del_odd(string: str)->str:
new_string = ""
string = [i for i in string]
for i in string:
if string.index(i) % 2 != 0:
string.remove(i)
for i in string:
new_string += i
return new_string
new_string = del_odd(string)
print(new_string)
|
[
"osamamuhammed555@gmail.com"
] |
osamamuhammed555@gmail.com
|
87990ee7c013adfed4d8152d526bab78f47feee2
|
9550ce4a80169d21b556b22679a9462f98438e32
|
/app/urls.py
|
32f3b1ab973c04cbcb9ce11ea3ea6d0850315945
|
[
"Apache-2.0"
] |
permissive
|
erics1996/questionnaire_django
|
87cc44bd745eb810861349effc126ed3dfbd6508
|
1006c61eba1e9efec0801299938eb13c16a0b292
|
refs/heads/master
| 2022-12-15T04:47:39.042594
| 2020-09-02T17:34:33
| 2020-09-02T17:34:33
| 284,580,189
| 0
| 0
|
Apache-2.0
| 2020-09-02T17:34:34
| 2020-08-03T02:02:20
|
Python
|
UTF-8
|
Python
| false
| false
| 300
|
py
|
from django.contrib import admin
from django.urls import path, re_path
from .views import backend
urlpatterns = [
path('', backend.IndexView.as_view()),
re_path('survey/(?P<pk>\d+)/', backend.SurveyDetailView.as_view()),
re_path('(?P<pk>\d+)/download/', backend.DownloadView.as_view())
]
|
[
"erics1996@yeah.net"
] |
erics1996@yeah.net
|
e7057bc48d0c58e842a5c16fe3711fae0386968b
|
5c534f0a3912ef002834398c765ed1e3f98c9173
|
/Quotes/test.py
|
1b8164565ecfabfdb0762a61f09b818a5961c220
|
[] |
no_license
|
ormanya/Supyiel
|
894c2acc7f05683f1cd9101a413f3c93fd69d149
|
77e291c5b73da2e292f6b38ff40aa2b3d70915cb
|
refs/heads/master
| 2023-03-13T13:58:20.944904
| 2023-03-02T16:53:02
| 2023-03-02T16:53:02
| 80,935,297
| 8
| 0
| null | 2022-11-10T18:06:34
| 2017-02-04T17:24:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,871
|
py
|
###
# Copyright (c) 2008,2012 Kevin Funk
# Copyright (c) 2014-2015 James Lu
# Copyright (c) 2016-2017 Ormanya
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import os
class QuotesTestCase(PluginTestCase):
plugins = ('Quotes',)
def testTay(self):
self.assertNotError("tay")
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
[
"liriel@sekrit.me"
] |
liriel@sekrit.me
|
fd3fd13935a93c20f91027c39f5327878e821fa3
|
c72fb291300941c756c4fe4e7bbd443880214367
|
/files/models.py
|
a6c1f3b0d75882226cbe0bbd77c225b9a7167397
|
[] |
no_license
|
garywangcn/django-3dshow
|
1e4893331b70630cb989b62fb95d58703cc9bc9d
|
4dad878ebbf13de89facd73c0d6d57860a01a0df
|
refs/heads/master
| 2021-05-11T10:23:59.516091
| 2018-01-24T09:33:42
| 2018-01-24T09:33:42
| 118,099,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
from django.db import models
# Create your models here.
class Document(models.Model):
name = models.CharField(max_length=255, blank=False)
description = models.CharField(max_length=1000, null=True, blank=False)
picture = models.FileField(upload_to='documents/')
modelpackage = models.FileField(upload_to='documents/')
uploaded_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
|
[
"15818651704@163.com"
] |
15818651704@163.com
|
8f634225763e18482cad60471aa5f39cadda7853
|
a00eab2cfe9566641c4c5ec99909490543e734d5
|
/BackPropagation/solutions/compare_loss_acc.py
|
abf09d21879aa494b1838e53417b23696449f17b
|
[] |
no_license
|
indianvalantine/High-Dimensional-Deep-Learning
|
55823c1d80ffee2e50bc20fcdf24f24cc6de8c14
|
47ee6263f40496e7ab5f6a030508ecd531732cb5
|
refs/heads/master
| 2022-12-27T21:00:28.090851
| 2020-09-28T14:04:44
| 2020-09-28T14:04:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
history = keras_model.history.history
fig = plt.figure(figsize=(20,5))
ax = fig.add_subplot(1,2,1)
ax.plot(history["loss"], label="keras", color="red")
ax.plot(history["val_loss"], label="keras_test", linestyle="dashed" ,color="red")
ax.plot(losses, label="numpy", color="blue")
ax.plot(losses_test, label="numpy_test", color="blue")
ax.set_xlabel("Epochs")
ax.set_ylabel("Loss")
ax.set_title("Training loss")
ax.legend(loc='best')
ax = fig.add_subplot(1,2,2)
ax.plot(history["acc"], label="keras", color="red")
ax.plot(history["val_acc"], label="keras_test", linestyle="dashed" ,color="red")
ax.plot(accuracies, label="numpy", color="blue")
ax.plot(accuracies, label="numpy_test", color="blue")
ax.set_ylabel("accuracy")
ax.set_xlabel("Epochs")
ax.legend(loc='best')
ax.set_title("Accuracy")
|
[
"brendan.guillouet@gmail.com"
] |
brendan.guillouet@gmail.com
|
6925f9d279dd7fc2386a10b7f0527b1c88816f95
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/servicebus/aaz/latest/servicebus/topic/_list.py
|
751ddf434b8c609435a955fc4eaa4a17a49bdf38
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 10,902
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"servicebus topic list",
)
class List(AAZCommand):
"""List all the topics in a namespace.
"""
_aaz_info = {
"version": "2022-01-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.servicebus/namespaces/{}/topics", "2022-01-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="The namespace name",
required=True,
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.skip = AAZIntArg(
options=["--skip"],
help="Skip is only used if a previous operation returned a partial result. If a previous response contains a nextLink element, the value of the nextLink element will include a skip parameter that specifies a starting point to use for subsequent calls.",
fmt=AAZIntArgFormat(
maximum=1000,
minimum=0,
),
)
_args_schema.top = AAZIntArg(
options=["--top"],
help="May be used to limit the number of results to the most recent N usageDetails.",
fmt=AAZIntArgFormat(
maximum=1000,
minimum=1,
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.TopicsListByNamespace(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class TopicsListByNamespace(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"$skip", self.ctx.args.skip,
),
**self.serialize_query_param(
"$top", self.ctx.args.top,
),
**self.serialize_query_param(
"api-version", "2022-01-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.location = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.accessed_at = AAZStrType(
serialized_name="accessedAt",
flags={"read_only": True},
)
properties.auto_delete_on_idle = AAZStrType(
serialized_name="autoDeleteOnIdle",
)
properties.count_details = AAZObjectType(
serialized_name="countDetails",
)
properties.created_at = AAZStrType(
serialized_name="createdAt",
flags={"read_only": True},
)
properties.default_message_time_to_live = AAZStrType(
serialized_name="defaultMessageTimeToLive",
)
properties.duplicate_detection_history_time_window = AAZStrType(
serialized_name="duplicateDetectionHistoryTimeWindow",
)
properties.enable_batched_operations = AAZBoolType(
serialized_name="enableBatchedOperations",
)
properties.enable_express = AAZBoolType(
serialized_name="enableExpress",
)
properties.enable_partitioning = AAZBoolType(
serialized_name="enablePartitioning",
)
properties.max_message_size_in_kilobytes = AAZIntType(
serialized_name="maxMessageSizeInKilobytes",
)
properties.max_size_in_megabytes = AAZIntType(
serialized_name="maxSizeInMegabytes",
)
properties.requires_duplicate_detection = AAZBoolType(
serialized_name="requiresDuplicateDetection",
)
properties.size_in_bytes = AAZIntType(
serialized_name="sizeInBytes",
flags={"read_only": True},
)
properties.status = AAZStrType()
properties.subscription_count = AAZIntType(
serialized_name="subscriptionCount",
flags={"read_only": True},
)
properties.support_ordering = AAZBoolType(
serialized_name="supportOrdering",
)
properties.updated_at = AAZStrType(
serialized_name="updatedAt",
flags={"read_only": True},
)
count_details = cls._schema_on_200.value.Element.properties.count_details
count_details.active_message_count = AAZIntType(
serialized_name="activeMessageCount",
flags={"read_only": True},
)
count_details.dead_letter_message_count = AAZIntType(
serialized_name="deadLetterMessageCount",
flags={"read_only": True},
)
count_details.scheduled_message_count = AAZIntType(
serialized_name="scheduledMessageCount",
flags={"read_only": True},
)
count_details.transfer_dead_letter_message_count = AAZIntType(
serialized_name="transferDeadLetterMessageCount",
flags={"read_only": True},
)
count_details.transfer_message_count = AAZIntType(
serialized_name="transferMessageCount",
flags={"read_only": True},
)
system_data = cls._schema_on_200.value.Element.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
__all__ = ["List"]
|
[
"noreply@github.com"
] |
noreply@github.com
|
56667ede08c017457c4c2cb5392283faa5332663
|
ba4c50d4b03e097f71e5af8ba639721fcb7e1fc5
|
/plot_tp6_2.py
|
f9bb738ef4835ca9238907b8192312d55c1bd760
|
[] |
no_license
|
EricHorvat/itbaSSfinal
|
0b1b4bc0c6de03b4a6376f2d0a9c9cd3fb310884
|
76f4bfed0c341da474595cc4d35c1a30ddd41506
|
refs/heads/master
| 2020-04-07T01:30:29.438910
| 2018-12-13T03:28:46
| 2018-12-13T03:28:46
| 157,943,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,952
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import json
def parse_filee(filename):
with open(filename,"r") as file:
return json.loads(file.readline())
def plot_surfacee(dss,dvelocities):
fig = plt.figure()
ax = plt.gca()
oavg = []
ostd = []
for index, ds in enumerate(dss):
o = []
x = np.arange(0,len(ds[0]))
for d in ds:
o.append(d[-1])
o = np.array(o)
dd = np.array(np.asarray(ds))
davg= np.average(dd, axis=0)
oavg.append(np.average(o))
ostd.append(np.std(o))
ax.plot(x,davg)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
legends = ["0.8 m/s","1.45 m/s","2.1 m/s","2.75 m/s","3.4 m/s","4.05 m/s","4.7 m/s","5.35 m/s","6.0 m/s"]
# Put a legend to the right of the current axis
ax.legend(legends, loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("Particulas egresadas")
plt.ylabel("Tiempo [s]")
plt.savefig('2.png')
fig = plt.figure()
ax = plt.gca()
ax.errorbar(dvelocities,oavg,yerr=ostd, fmt='o')
ax.errorbar(dvelocities,oavg, fmt='o')
plt.xlabel("Velocidad deseada [m/s]")
plt.ylabel("Tiempo de salida [s]")
plt.savefig('2_t.png')
plt.close()
def mains():
#desired_velocities = [1.45,2.1,2.75,3.4,4.05,4.7,5.35,6.0]
desired_velocities = [0.8,1.45,2.1,2.75,3.4,4.05,4.7,5.35,6.0]
#desired_velocities = [0.8,1.45,2.1,2.1 + 0.65/3,2.75 - 0.65/3,2.75,2.75 + 0.65/3,3.4 - 0.65/3,3.4,4.05,4.7,5.35,6.0]
#desired_velocities = [2.1,2.1 + 0.65/3,2.75 - 0.65/3,2.75,2.75 + 0.65/3,3.4 - 0.65/3,3.4]
ds = []
for dvel in desired_velocities:
d = []
for i in range(0,5):
d.append(parse_filee("people-" + str("%0.2f" % dvel) + "dVel-"+ str(i) + "time.txt"))
ds.append(d)
plot_surfacee(ds,desired_velocities)
if __name__ == '__main__':
mains()
|
[
"eric.nahuel.horvat@gmail.com"
] |
eric.nahuel.horvat@gmail.com
|
e39dd51fde7cd071010f467f6c281e6f42fb42b2
|
1968f0d6064a6947538a54371b01b13c425a56c4
|
/errorsOnSite.py
|
0e08966c7400df0732547c458ae33be2fc3b7b0b
|
[] |
no_license
|
pahkao/coursera1
|
ef695543625dfdb1fa50a78bd62b16eed600944a
|
2506e3f54258da83c9e19e1498825bd799c3d152
|
refs/heads/master
| 2022-09-12T06:15:44.159155
| 2020-05-30T21:57:15
| 2020-05-30T21:57:15
| 255,118,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,760
|
py
|
# #### Импорты
import re
from pprint import pprint
#!pip install pyaspeller
from pyaspeller import Word
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
options = Options()
options.add_argument("start-maximized")
options.add_argument("disable-infobars")
options.add_argument("--disable-extensions")
options.add_argument("--headless")
browser = webdriver.Chrome(options=options, executable_path=ChromeDriverManager().install())
# #### Парсинг
check_url = 'http://station-l.ru/'
browser.get(check_url)
all_urls = [check_url]
for a in browser.find_elements_by_tag_name('a'):
a = a.get_attribute('href')
if type(a) == str and 'jivosite' not in a and re.match('.+\.(jpg|pdf|png)$', a) == None:
domain = re.sub('https:\/\/(w{3}\.)?(.+?)\/.*', r'\2', check_url )
a = re.sub('(.*)(\?|\#)', r'\1', a)
try:
if domain in a and a not in all_urls:
all_urls.append(a)
except:
continue
all_urls
def get_words(body):
unique_words_list = []
for frase in body.split('\n'):
frase = frase.split(' ')
for word in frase:
word = re.sub('[^ёЁа-яА-Яa-zA-Z0-9-–—]', '', word)
if word not in unique_words_list and re.match('(^\d+$)|(^\W+$)|(^$)', word) == None:
unique_words_list.append(word)
return sorted(unique_words_list)
body = {}
for url in all_urls:
print(url)
browser.get(url)
browser.find_element_by_tag_name('body').send_keys(Keys.END) # scroll page to bottom
body[url] = ''
body[url] += ' ' + browser.find_element_by_tag_name('body').text
if len(browser.find_elements_by_tag_name('div')) != 0:
for div in browser.find_elements_by_tag_name('div'):
try:
body[url] += ' ' + div.text
except:
continue
print(f'Слов для проверки на странице {url}: {len(get_words(body[url]))}\n')
# #### Проверка
for url in body:
errors = {}
print(url)
for clean_word in get_words(body[url]):
try:
check = Word(clean_word)
if check.correct == False:
if clean_word not in errors:
errors[clean_word] = {}
errors[clean_word]['variants'] = check.variants
errors[clean_word]['count'] = 1
else:
errors[clean_word]['count'] += 1
except Exception as e:
print(f'Что-то пошло не так: {e}, слово: {clean_word}')
continue
pprint(errors)
print('\n')
browser.quit()
|
[
"olkhovskiy91@gmail.com"
] |
olkhovskiy91@gmail.com
|
21064aaea82657175bb68471f1411164393e0210
|
657c80336bce1cc6158cd349ce208c5e680a4d0d
|
/contrib/projection/tests/projection/base_projection.py
|
de53d6895412de112d31a959926d9cdb47b6ef9c
|
[
"BSD-3-Clause"
] |
permissive
|
Xinmudotmoe/pyglet
|
b37628618647bf3b1e3d7db28202a5e14c60450c
|
144257c365ca85528c6a4c5bed8141e683d7a9b6
|
refs/heads/master
| 2021-05-29T22:05:40.676643
| 2015-10-24T05:55:49
| 2015-10-24T05:55:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
#!/usr/bin/python
# $Id:$
from pyglet.gl import *
def fillrect(x, y, width, height):
glBegin(GL_QUADS)
glVertex2f(x, y)
glVertex2f(x + width, y)
glVertex2f(x + width, y + height)
glVertex2f(x, y + height)
glEnd()
def rect(x, y, width, height):
glBegin(GL_LINE_LOOP)
glVertex2f(x, y)
glVertex2f(x + width, y)
glVertex2f(x + width, y + height)
glVertex2f(x, y + height)
glEnd()
|
[
"leif.theden@gmail.com"
] |
leif.theden@gmail.com
|
dc0f1debf616d07e130ae2adb13b8209fd2e2f74
|
99afa83eda09cf552466ddf90314cb01d07b166a
|
/testapp/models.py
|
c1fa45c2c96048893e614bf9142070231858f126
|
[] |
no_license
|
jithinvijayan007/Lithoera
|
358c9a6191d6510ac07229e7a92eadd89d70e14f
|
33e3639e882f79b12541f92070dad74483fdfa72
|
refs/heads/master
| 2023-01-05T18:29:37.388869
| 2020-11-02T11:58:27
| 2020-11-02T11:58:27
| 309,316,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,764
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
# Create your models here.
class MyAccountManager(BaseUserManager):
def create_user(self, email, username, password=None):
if not email:
raise ValueError('Users must have an email address')
if not username:
raise ValueError('Users must have a username')
user = self.model(
email=self.normalize_email(email),
username=username,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, username, password):
user = self.create_user(
email=self.normalize_email(email),
password=password,
username=username,
)
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class Account(AbstractBaseUser):
email = models.EmailField(verbose_name="email", max_length=60, unique=True)
username = models.CharField(max_length=30, unique=True)
date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True)
last_login = models.DateTimeField(verbose_name='last login', auto_now=True)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
objects = MyAccountManager()
def __str__(self):
return self.email
# For checking permissions. to keep it simple all admin have ALL permissons
def has_perm(self, perm, obj=None):
return self.is_admin
# Does this user have permission to view this app? (ALWAYS YES FOR SIMPLICITY)
def has_module_perms(self, app_label):
return True
|
[
"jithinvijayan007@gmail.com"
] |
jithinvijayan007@gmail.com
|
09ee4a21ddc1b92f8f3846d847e7be6be388b97a
|
a8fd86dce16f7fec7a5f00ecf97270fb7a8243b9
|
/phylo3.py
|
02e5ff23a7be96c9c780ec7e9b98ff7b8ab5952b
|
[] |
no_license
|
tomopfuku/mammalian_morphological_clocks
|
8a8f68b498297f95b9222843de416912c50e2e3a
|
80b3179cb8101ac654e516f71282d7bbba288934
|
refs/heads/master
| 2022-10-18T02:39:54.477321
| 2017-11-28T17:07:34
| 2017-11-28T17:07:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,086
|
py
|
PREORDER = 0; POSTORDER = 1
BRANCHLENGTH = 0; INTERNODES = 1
#trying to deprecate this.
class Node:
def __init__(self):
self.data = {}
self.isroot = False
self.istip = False
self.label = None
self.length = 0
self.old_length = 0
self.parent = None
self.children = []
self.nchildren = 0
self.comment = None
#self.charst = 0.
#self.sigsq = 0.
#self.rate_class = 0
self.height = None
self.number = 0
self.occurrences = None
self.num_occurrences = None
def get_newick_repr(self,showbl=False,show_rate=False):
ret = ""
for i in range(len(self.children)):
if i == 0:
ret += "("
ret += self.children[i].get_newick_repr(showbl,show_rate)
if i == len(self.children)-1:
ret += ")"
else:
ret += ","
if self.label != None:
ret += self.label
if showbl == True:
ret += ":" + str(self.length)
if show_rate ==True:
ret += ":" + str(self.sigsq)
return ret
def add_child(self, child):
assert child not in self.children
self.children.append(child)
child.parent = self
self.nchildren += 1
def remove_child(self, child):
assert child in self.children
self.children.remove(child)
child.parent = None
self.nchildren -= 1
def prune_from_node(self):
for i in self.descendants("POSTORDER"):
if len(self.children) == 0:
self.prune()
def leaves(self):
return [ n for n in self.iternodes() if n.istip ]
def iternodes(self, order=PREORDER, v=None):
if order == PREORDER:
yield self
#print [i.label for i in self.children]
for child in self.children:
for d in child.iternodes(order):
yield d
if order == POSTORDER:
yield self
"""
def postorder_nodes(self):
[yield d for d in child.postorder_nodes() for child in self.children]
yield self
"""
def descendants(self, order=PREORDER, v=None):
if v is None:
v = []
#assert order in ("PREORDER", "POSTORDER")
for child in self.children:
if order == PREORDER:
v.append(child)
else:
v.insert(0, child)
if child.children:
child.descendants(order, v)
return v
def find_descendant(self, label):
if label == self.label:
return self
else:
for child in self.children:
n = child.find_descendant(label)
if n:
return n
return None
def prune(self):
p = self.parent
if p:
p.remove_child(self)
return p
def graft(self, node):
parent = self.parent
parent.remove_child(self)
n = Node()
n.add_child(self)
n.add_child(node)
parent.add_child(n)
def leaf_distances(self, store=None, measure=BRANCHLENGTH):
if store is None:
store = {}
leaf2len = {}
if self.children:
for child in self.children:
if measure == BRANCHLENGTH:
assert child.length is not None
dist = child.length
elif measure == INTERNODES:
dist = 1
else:
raise "InvalidMeasure"
child.leaf_distances(store, measure)
if child.istip:
leaf2len[child.label] = dist
else:
for k, v in store[child].items():
leaf2len[k] = v + dist
else:
leaf2len[self] = {self.label: 0}
store[self] = leaf2len
return store
def rootpath(self):
n = self
while 1:
yield n
if n.parent:
n = n.parent
else:
break
def tip_labels(self):
labs = []
for i in self.leaves():
labs.append(i.label)
return labs
def nnodes(self, type="internal"):
n = 0
if type == "internal":
for i in self.iternodes():
if i.istip or i == self:
continue
n += 1
elif type == "all":
for i in self.iternodes():
n+=1
elif type == "tips":
for i in self.iternodes():
if i.istip:
n+=1
return n
"""
# this returns all possible NNIs for a single bifurcating node with bifurcating children
# tree should probably be deep copied before using this
"""
def nni_set(self):
if len(self.children) != 2 or len(self.descendants()) < 3:
print "this only works on bifurcating selfs that parent multiple subtrees (ie. does not lead to only terminal edges)"
return None
subtrees = []
for child in self.children:
if child.istip == False:
assert len(child.children) == 2
for sub in child.children:
subtrees.append(sub)
subtrees += [i for i in self.children if i.istip] #add terminal subtree child --> 'c' in (a,b),c))
assert len(subtrees) == 3 or len(subtrees) == 4
nni_trees = []
for c1 in subtrees:
for c2 in subtrees:
p1 = c1.parent
p2 = c2.parent
if c1 == c2 or p1 == p2: #can't swap subtrees with same parent
continue
p1.remove_child(c1)
p1.add_child(c2)
p2.remove_child(c2)
p2.add_child(c1)
c1.parent = p2 #swap subtrees
c2.parent = p1
nni_trees.append(self.get_newick_repr())
nni_trees = list(set(nni_trees)) #remove duplicates
#print len(nni_trees)
return nni_trees
def reroot(oldroot, newroot):
oldroot.isroot = False
newroot.isroot = True
v = []
n = newroot
while 1:
v.append(n)
if not n.parent: break
n = n.parent
#print [ x.label for x in v ]
v.reverse()
for i, cp in enumerate(v[:-1]):
node = v[i+1]
# node is current node; cp is current parent
#print node.label, cp.label
cp.remove_child(node)
node.add_child(cp)
cp.length = node.length
return newroot
def getMRCATraverseFromPath(path1, curn2):
mrca = None
#find first match between this node and the first one
parent = curn2
x = True;
while x == True:
for i in range(len(path1)):
if parent == path1[i]:
mrca = parent
x = False
break
parent = parent.parent
return mrca
|
[
"cfukuchi@umich.edu"
] |
cfukuchi@umich.edu
|
f5d215c564dfad6c96246bd529b6f6afd273eafa
|
beac917ee396ffb33c4f13d2ceff188c3bf5148e
|
/app/evaluation.py
|
bdc7f21c84bae0c079063d2953eca979513fa410
|
[] |
no_license
|
Boj3alex/rpn-calculator
|
75532b25b312feed163e7f0bf1e45887c35ad417
|
705c21e250a1105ae02ab4e620546e77fd1d805f
|
refs/heads/master
| 2023-01-09T06:50:19.879472
| 2020-08-31T17:46:26
| 2020-08-31T17:46:26
| 290,067,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
import re
floating_point_regex = '[0-9]*\.[0-9]*'
def do_operation(element1, element2, operator):
if operator == '+':
return element1 + element2
if operator == '-':
return element1 - element2
if operator == '*':
return element1 * element2
if operator == '/':
return int(element1 / element2)
if operator == '%':
return element1 % element2
def rpn_evaluation(rpn_exp):
results_list = []
operator_list = ['+', '-', '*', '/', '%']
try:
for element in rpn_exp.split():
if element in operator_list:
operator2 = results_list.pop()
operator1 = results_list.pop()
results_list.append(do_operation(operator1, operator2, element))
elif element.isnumeric():
results_list.append(int(element))
elif re.search(floating_point_regex, element):
raise Exception('Floating-point numbers are not accepted.')
else:
raise Exception('Invalid character')
except IndexError:
print('Invalid RPN expression')
return results_list.pop() if len(results_list) > 0 else 0
if __name__ == '__main__':
print('Type the RPN expression that you want to evaluate:')
rpn_exp = input()
print('The result of the RPN expression is:', rpn_evaluation(rpn_exp))
|
[
"noreply@github.com"
] |
noreply@github.com
|
6e890dcf23489e8e89080c6b65f3762b23bdff4d
|
72a22cde6b6ca91255f25a931909502115e4e47c
|
/Alfred/SwitchLayoutWorkflow/set.py
|
4ae6ae60a0486bf6d86b48325f6a942a3ddc711a
|
[] |
no_license
|
DATADEER/dvorak-mac-setup
|
52de6f0062e75981cf6a0c6bc91de92f6095b24a
|
2f5d0eb450be9c02fd74285cd526715abe358941
|
refs/heads/master
| 2020-05-17T08:00:25.408894
| 2020-03-15T11:42:10
| 2020-03-15T11:42:10
| 183,594,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
import sys
import json
from os.path import expanduser
from collections import OrderedDict
import subprocess
CHOSEN_PROFILE = sys.argv[1]
CONFIG_PATH = '.config/karabiner/karabiner.json'
home = expanduser("~")
config = {}
with open('{}/{}'.format(home, CONFIG_PATH)) as conf_file:
config = json.load(conf_file, object_pairs_hook=OrderedDict)
for profile in config['profiles']:
profile['selected'] = profile['name'] == CHOSEN_PROFILE
with open('{}/{}'.format(home, CONFIG_PATH), 'w') as conf_file:
conf_file.write(json.dumps(config, indent=4, separators=(',', ': ')))
#log available keyboard layouts with issw -l
if(CHOSEN_PROFILE == "DVORAK" ):
#switch to US Layout
subprocess.run(["/usr/local/bin/issw", "com.apple.keylayout.US"])
else:
#switch to DEUTSCH Layout
subprocess.run(["/usr/local/bin/issw", "com.apple.keylayout.German"])
|
[
"konto@datadeer.de"
] |
konto@datadeer.de
|
da3f5d0d4b3c71ac3db45cece6411a3233f8b68a
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/webpubsub/azure-mgmt-webpubsub/generated_samples/web_pub_sub_replicas_create_or_update.py
|
81ff6144e4226d349866642540011deb03744386
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,920
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.webpubsub import WebPubSubManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-webpubsub
# USAGE
python web_pub_sub_replicas_create_or_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = WebPubSubManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.web_pub_sub_replicas.begin_create_or_update(
resource_group_name="myResourceGroup",
resource_name="myWebPubSubService",
replica_name="myWebPubSubService-eastus",
parameters={
"location": "eastus",
"properties": {},
"sku": {"capacity": 1, "name": "Premium_P1", "tier": "Premium"},
"tags": {"key1": "value1"},
},
).result()
print(response)
# x-ms-original-file: specification/webpubsub/resource-manager/Microsoft.SignalRService/preview/2023-06-01-preview/examples/WebPubSubReplicas_CreateOrUpdate.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
6d625de9d95abca7e287fd3c385bb06c6b57b4f9
|
82cd87ea45ce91bf7cc6d60a8536c39676ca7689
|
/eval.py
|
20f28b8bec5eb3d1c886dcc50f2a24ac59a6e38f
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
gtesei/ebm-anatomy
|
2be6bde61eeaa558198755b2535bbd4ec1958ef5
|
24c819b7239f554c8edc46c09085e129922962d2
|
refs/heads/master
| 2022-08-30T02:46:37.456060
| 2020-05-20T02:35:44
| 2020-05-20T02:35:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,642
|
py
|
##############################
# ## EVALUATE TRAINED EBM ## #
##############################
import torch as t
import torchvision.transforms as tr
import torchvision.datasets as datasets
import matplotlib.pyplot as plt
import json
import os
from nets import VanillaNet, NonlocalNet
from utils import download_flowers_data, plot_ims
# directory for experiment results
EXP_DIR = './out_eval/flowers_convergent_eval_1/'
# json file with experiment config
CONFIG_FILE = './config_locker/eval_flowers_convergent.json'
#######################
# ## INITIAL SETUP ## #
#######################
# load experiment config
with open(CONFIG_FILE) as file:
config = json.load(file)
# make directory for saving results
if os.path.exists(EXP_DIR):
# prevents overwriting old experiment folders by accident
raise RuntimeError('Folder "{}" already exists. Please use a different "EXP_DIR".'.format(EXP_DIR))
else:
os.makedirs(EXP_DIR)
for folder in ['code']:
os.mkdir(EXP_DIR + folder)
# save copy of code in the experiment folder
def save_code():
def save_file(file_name):
file_in = open('./' + file_name, 'r')
file_out = open(EXP_DIR + 'code/' + os.path.basename(file_name), 'w')
for line in file_in:
file_out.write(line)
for file in ['eval.py', 'nets.py', 'utils.py', CONFIG_FILE]:
save_file(file)
save_code()
# set seed for cpu and CUDA, get device
t.manual_seed(config['seed'])
if t.cuda.is_available():
t.cuda.manual_seed_all(config['seed'])
device = t.device('cuda' if t.cuda.is_available() else 'cpu')
####################
# ## EVAL SETUP # ##
####################
print('Setting up network...')
# set up network
net_bank = {'vanilla': VanillaNet, 'nonlocal': NonlocalNet}
f = net_bank[config['net_type']](n_c=config['im_ch'])
# load saved weights
f.load_state_dict(t.load(config['net_weight_path'], map_location=lambda storage, loc: storage.cpu()))
# put net on device
f.to(device)
# temperature from training
if config['train_epsilon'] > 0:
temp = config['temp_factor'] * (config['train_epsilon'] ** 2) / 2
else:
temp = config['temp_factor']
print('Processing initial MCMC states...')
if config['mcmc_init'] == 'uniform':
q = 2 * t.rand([config['batch_size'], config['im_ch'], config['im_sz'], config['im_sz']]).to(device) - 1
elif config['mcmc_init'] == 'gaussian':
q = t.randn([config['batch_size'], config['im_ch'], config['im_sz'], config['im_sz']]).to(device)
else:
# make tensor of training data
if config['mcmc_init'] == 'flowers':
download_flowers_data()
data = {'cifar10': lambda path, func: datasets.CIFAR10(root=path, transform=func, download=True),
'mnist': lambda path, func: datasets.MNIST(root=path, transform=func, download=True),
'flowers': lambda path, func: datasets.ImageFolder(root=path, transform=func)}
transform = tr.Compose([tr.Resize(config['im_sz']),
tr.CenterCrop(config['im_sz']),
tr.ToTensor(),
tr.Normalize(tuple(0.5*t.ones(config['im_ch'])), tuple(0.5*t.ones(config['im_ch'])))])
q = t.stack([x[0] for x in data[config['mcmc_init']]('./data/' + config['mcmc_init'], transform)]).to(device)
# get a random sample of initial states from image bank
x_s_t_0 = q[t.randperm(q.shape[0])[0:config['batch_size']]]
################################
# ## FUNCTIONS FOR SAMPLING ## #
################################
# langevin equation without MH adjustment
def langevin_grad():
x_s_t = t.autograd.Variable(x_s_t_0.clone(), requires_grad=True)
# sampling records
grads = t.zeros(config['num_mcmc_steps'], config['batch_size'])
ens = t.zeros(config['num_mcmc_steps'], config['batch_size'])
# iterative langevin updates of MCMC samples
for ell in range(config['num_mcmc_steps']):
en = f(x_s_t) / temp
ens[ell] = en.detach().cpu()
grad = t.autograd.grad(en.sum(), [x_s_t])[0]
if config['epsilon'] > 0:
x_s_t.data += - ((config['epsilon']**2)/2) * grad + config['epsilon'] * t.randn_like(x_s_t)
grads[ell] = ((config['epsilon']**2)/2) * grad.view(grad.shape[0], -1).norm(dim=1).cpu()
else:
x_s_t.data += - grad
grads[ell] = grad.view(grad.shape[0], -1).norm(dim=1).cpu()
if ell == 0 or (ell + 1) % config['log_freq'] == 0 or (ell + 1) == config['num_mcmc_steps']:
print('Step {} of {}. Ave. En={:>14.9f} Ave. Grad={:>14.9f}'.
format(ell+1, config['num_mcmc_steps'], ens[ell].mean(), grads[ell].mean()))
return x_s_t.detach(), ens, grads
# langevin equation with MH adjustment
def langevin_mh():
x_s_t = t.autograd.Variable(x_s_t_0.clone(), requires_grad=True)
# sampling records
ens = t.zeros(config['num_mcmc_steps'], config['batch_size'])
grads = t.zeros(config['num_mcmc_steps'], config['batch_size'])
accepts = t.zeros(config['num_mcmc_steps'])
# iterative langevin updates of MCMC samples
for ell in range(config['num_mcmc_steps']):
# get energy and gradient of current states
en = f(x_s_t) / temp
ens[ell] = en.detach().cpu()
grad = t.autograd.grad(en.sum(), [x_s_t])[0]
grads[ell] = ((config['epsilon'] ** 2)/2) * grad.view(grad.shape[0], -1).norm(dim=1).cpu()
# get initial gaussian momenta
p = t.randn_like(x_s_t)
# get proposal states
x_prop = x_s_t - ((config['epsilon'] ** 2)/2) * grad + config['epsilon'] * p
# update momentum
en_prop = f(x_prop) / temp
grad_prop = t.autograd.grad(en_prop.sum(), [x_prop])[0]
p_prop = p - (config['epsilon'] / 2) * (grad + grad_prop)
# joint energy of states and auxiliary momentum variables
joint_en_orig = en + 0.5 * t.sum((p ** 2).view(x_s_t.shape[0], -1), 1)
joint_en_prop = en_prop + 0.5 * t.sum((p_prop ** 2).view(x_s_t.shape[0], -1), 1)
# accept or reject states_prop using MH acceptance ratio
accepted_proposals = t.rand_like(en) < t.exp(joint_en_orig - joint_en_prop)
# update only states with accepted proposals
x_s_t.data[accepted_proposals] = x_prop.data[accepted_proposals]
accepts[ell] = float(accepted_proposals.sum().cpu()) / float(config['batch_size'])
if ell == 0 or (ell + 1) % config['log_freq'] == 0 or (ell + 1) == config['num_mcmc_steps']:
print('Step {} of {}. Ave. En={:>14.9f} Ave. Grad={:>14.9f} Accept Rate={:>14.9f}'.
format(ell+1, config['num_mcmc_steps'], ens[ell].mean(), grads[ell].mean(), accepts[ell]))
return x_s_t.detach(), ens, grads, accepts
###################################
# ## SAMPLE FROM LEARNED MODEL ## #
###################################
print('Sampling for {} Langevin steps.'.format(config['num_mcmc_steps']))
if config['use_mh_langevin']:
x_s_t, en_record, grad_record, accept_record = langevin_mh()
plt.plot(accept_record.numpy())
plt.savefig(EXP_DIR + 'accept.png')
plt.close()
else:
x_s_t, en_record, grad_record = langevin_grad()
# visualize initial and synthesized images
plot_ims(EXP_DIR + 'initial_states.png', x_s_t_0)
plot_ims(EXP_DIR + 'sample_states.png', x_s_t)
# plot diagnostics
plt.plot(en_record.numpy())
plt.title('Energy over sampling path')
plt.xlabel('Langevin step')
plt.ylabel('energy')
plt.savefig(EXP_DIR + 'en.png')
plt.close()
plt.plot(grad_record.numpy())
plt.title('Gradient magnitude over sampling path')
plt.xlabel('Langevin step')
plt.ylabel('Gradient magnitude')
plt.savefig(EXP_DIR + 'grad.png')
plt.close()
|
[
"point0bar1@gmail.com"
] |
point0bar1@gmail.com
|
ecc3ad925d8cd3f872845d9ba866ab7860df6f03
|
328578dc61ddfef9959e0cc6b8a0c4f95c272423
|
/web_crawler_demo/data_store_demo/csv_store.py
|
79f47abd403a63b79bcc1e47ef62387b5dde5190
|
[] |
no_license
|
newiflin/web_crawle
|
0a5bb3b0b4226d5993b39b256d2d51ececc26a4b
|
83d09119bc25be3b425cfcf5fb1c84a57a3dab67
|
refs/heads/master
| 2020-05-21T18:21:19.472825
| 2019-07-28T09:09:09
| 2019-07-28T09:09:09
| 186,131,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,516
|
py
|
import pandas as pd
import csv
#csv文件 保存含分隔符的文本
with open('data.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=' ') #delimiter指定分隔符
writer.writerow(['id', 'name', 'old'])
writer.writerow(['101', 'Bob', '23'])
writer.writerow(['102', 'Tim', '22'])
writer.writerow(['103', 'Lisa', '30'])
with open('data.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['id', 'name', 'old'])
writer.writerows([['101', 'Bob', '23'], ['102', 'Tim', '22'], ['103', 'Lisa', '30']])
with open('data.csv', 'w') as csvfile: #字典的方式写入
fieldnames = ['id', 'name', 'old']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow({'id': '101', 'name': 'Bob', 'old': '23'})
writer.writerow({'id': '102', 'name': 'Tim', 'old': '22'})
writer.writerow({'id': '103', 'name': 'Lisa', 'old': '30'})
with open('data.csv', 'a', encoding='utf-8') as csvfile: #字典的方式写入
fieldnames = ['id', 'name', 'old']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow({'id': '201', 'name': 'newiflin', 'old': '23'})
writer.writerow({'id': '202', 'name': '思绪', 'old': '25'})
writer.writerow({'id': '203', 'name': '紫薯', 'old': '19'})
with open('data.csv', 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
print(row)
df = pd.read_csv('data.csv')
print(df)
|
[
"newiflin@gmail.com"
] |
newiflin@gmail.com
|
30fed9cd0fbcc9ea11672e3c32d6f34d4cb8d46f
|
20bf84daa3894ee5625413140913350328d0d3b1
|
/data_example/practice_data.py
|
6a4e521b4dc3ac4857f6e6c145d5fff70c2e6cb1
|
[] |
no_license
|
jinsuyun/DataAnalytics
|
f9d28c424946fd2279cfbfe4ca2ffb314156ad97
|
8c60c7352aaebb421bc54e20934550e95096482f
|
refs/heads/master
| 2020-06-18T23:11:35.589302
| 2019-07-31T08:51:17
| 2019-07-31T08:51:17
| 196,487,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
import pandas as pd
df = pd.read_csv('adult.data', header=None)
# data basic
print("SIZE")
print(df.size)
print("SHAPE")
print(df.shape) # 몇 x 몇 인지
print("BEFORE COLUMNS")
print(df.columns)
df.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation',
'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'wage']
print("AFTER COLUMNS")
print(df.columns)
print("DTYPES")
print(df.dtypes)
print("HEAD")
print(df.head())
print("TAIL")
print(df.tail())
# data summary
print("DESCRIBE")
print(df.describe()) #describe() - 각 attribute마다 요약한 정보를 제공
print("MEAN")
print(df.mean()) # mean() - 6개의 데이터에서 평균값(14개의 데이터 중 numerical 데이터 6개)
print("MODE")
print(df.mode()) # mode() - 카테고리에 해당하는 데이터만 mode
# Details
print("EDUCATION UNIQUE")
print(df.education.unique()) # 컬럼이름 education에 해당하는 모든 값
print("EDUCATION VALUE COUNT")
print(df.education.value_counts()) # 값에 대한 카운트
print("WAGE VALUE COUNT")
print(df['wage'].value_counts())
print("WAGE AGE MEAN")
print(df.groupby(['wage'])['age'].mean()) # age의 평균값
print("WAGE AGE STD")
print(df.groupby(['wage'])['age'].std()) # age의 std값
print("CAPITAL GAIN CORR AGE")
print(df['capital-gain'].corr(df['age']))
|
[
"say2dbs@ajou.ac.kr"
] |
say2dbs@ajou.ac.kr
|
3a46b739fdd3269370d45b82b4103d66bc0a5353
|
1718a0e60b3df6bb23ea50e57bc2a39e268c0d53
|
/store_app/views.py
|
a452b60bee841fcbf43da93e842bf057b9cac01a
|
[] |
no_license
|
ckizer86/final
|
551be3fc3e0e6021a5103acc645238f0d5ddc905
|
c6fd0fd8ffe46c23d9fe6f6b7138cce44b32fa1c
|
refs/heads/main
| 2023-05-28T18:50:48.939996
| 2021-06-08T23:36:27
| 2021-06-08T23:36:27
| 374,507,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,172
|
py
|
from django.db.models import fields
from django.shortcuts import render, redirect
from django.http.response import JsonResponse, HttpResponse
from django.views.generic import FormView
from django.urls import reverse
from django.conf import settings
from django.http.response import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import stripe
from django.contrib import messages
import bcrypt
from time import gmtime, localtime, strftime
from datetime import date, datetime
from .models import *
import ast
# payments/views.py
@csrf_exempt
def stripe_webhook(request):
stripe.api_key = settings.STRIPE_SECRET_KEY
endpoint_secret = settings.STRIPE_ENDPOINT_SECRET
payload = request.body
sig_header = request.META['HTTP_STRIPE_SIGNATURE']
event = None
try:
event = stripe.Webhook.construct_event(
payload, sig_header, endpoint_secret
)
except ValueError as e:
# Invalid payload
return HttpResponse(status=400)
except stripe.error.SignatureVerificationError as e:
# Invalid signature
return HttpResponse(status=400)
# Handle the checkout.session.completed event
if event['type'] == 'checkout.session.completed':
print("Payment was successful.")
# TODO: run some custom code here
return HttpResponse(status=200)
def SuccessView(request):
return render(request, "success.html")
def CancelledView(request):
return render(request, "cancelled.html")
@csrf_exempt
def create_checkout_session(request):
if request.method == 'GET':
domain_url = 'http://localhost:8000/'
stripe.api_key = settings.STRIPE_SECRET_KEY
try:
# Create new Checkout Session for the order
# Other optional params include:
# [billing_address_collection] - to display billing address details on the page
# [customer] - if you have an existing Stripe Customer ID
# [payment_intent_data] - capture the payment later
# [customer_email] - prefill the email input in the form
# For full details see https://stripe.com/docs/api/checkout/sessions/create
# ?session_id={CHECKOUT_SESSION_ID} means the redirect will have the session ID set as a query param
checkout_session = stripe.checkout.Session.create(
client_reference_id=request.user.id if request.user.is_authenticated else None,
success_url=domain_url + 'success?session_id={CHECKOUT_SESSION_ID}',
cancel_url=domain_url + 'cancelled/',
payment_method_types=['card'],
mode='payment',
line_items=[
{
'name': 'T-shirt',
'quantity': 1,
'currency': 'usd',
'amount': '2000',
}
]
)
return JsonResponse({'sessionId': checkout_session['id']})
except Exception as e:
return JsonResponse({'error': str(e)})
# new
@csrf_exempt
def stripe_config(request):
if request.method == 'GET':
stripe_config = {'publicKey': settings.STRIPE_PUBLISHABLE_KEY}
return JsonResponse(stripe_config, safe=False)
# Create your views here.
def index(request):
context={
"all_products": Product.objects.all(),
"all_categories": Category.objects.all(),
"all_stores": Store.objects.all(),
}
return render(request, "index.html", context)
def login_page(request):
if "user_id" in request.session:
return redirect ('/dashboard')
return render(request, "login.html")
def login(request):
if request.method == "POST":
errors = User.objects.loginvalidation(request.POST)
if errors:
for error in errors.values():
messages.error(request,error)
return redirect('/login')
email = request.POST['email']
logged_user = User.objects.filter(email=email)
logged_user = logged_user[0]
if bcrypt.checkpw(request.POST['pw'].encode(), logged_user.password.encode()):
request.session["user_id"] = logged_user.id
request.session["username"] = f"{logged_user.first_name} {logged_user.last_name}"
return redirect('/dashboard')
else:
messages.error(request, "Invalid password")
return redirect('/login')
return redirect('/login')
def register_page(request):
return render(request, "register.html")
def register(request):
if request.method == "POST":
errors = User.objects.registervalidation(request.POST)
if errors:
for error in errors.values():
messages.error(request,error)
return redirect('/register')
first_name = request.POST['first_name']
last_name = request.POST['last_name']
email = request.POST['email']
password = bcrypt.hashpw(request.POST["pw"].encode(), bcrypt.gensalt()).decode()
dob = request.POST['dob']
address_1 = request.POST['address1']
address_2 = request.POST['address2']
city = request.POST['city']
state = request.POST['state']
zip = request.POST['zip']
user = User.objects.create(first_name=first_name, last_name=last_name, email=email, password=password, dob=dob, address_1=address_1, address_2=address_2, city=city, state=state, zip=zip)
request.session["user_id"] = user.id
request.session["username"] = f"{user.first_name} {user.last_name}"
return redirect('/dashboard')
return redirect('/register')
def category(request, id):
cat = Category.objects.get(id=id)
context={
"catproducts": cat.product.all(),
"all_categories": Category.objects.all(),
"category": cat,
}
return render(request, "category.html", context)
def product(request, id):
productid = id
productinfo = Product.objects.get(id=productid)
if "user_id" not in request.session:
context = {
"product": productinfo,
"all_categories": Category.objects.all(),
}
return render(request, "product.html", context)
userid = request.session["user_id"]
user = User.objects.get(id=userid)
context = {
"product": productinfo,
"all_categories": Category.objects.all(),
"likes": productinfo.likes.filter(id=userid),
"user": user,
}
return render(request, "product.html", context)
def addcat(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
if request.method == "POST":
errors = User.objects.catvalidation(request.POST)
if errors:
for error in errors.values():
messages.error(request,error)
return redirect('/admin/add_product')
name = request.POST['name']
Category.objects.create(name=name)
return redirect('/admin/add_product')
return redirect('/admin')
def addcart(request):
if "user_id" not in request.session:
return redirect ('/login')
if request.method == "POST":
userid = request.session["user_id"]
pid = request.POST['pid']
quantity = int(request.POST['quantity'])
user = User.objects.get(id=userid)
product = Product.objects.get(id=pid)
product.stock = product.stock - quantity
product.save()
name = product.name
amount = product.amount
pic = product.pic
total = user.total
for count in range(0, quantity):
count += 1
cart = Cart.objects.create(user=user, pid=pid, pic=pic, name=name, amount=amount)
user.total = user.total + product.amount
user.save()
return redirect('/cart')
def removecart(request,id):
if "user_id" not in request.session:
return redirect ('/login')
pid = id
userid = request.session["user_id"]
user = User.objects.get(id=userid)
cart = user.usecart.all()
product = Product.objects.get(id=pid)
for item in cart:
if item.pid == pid:
rid = item.id
removeitem = Cart.objects.get(id=rid)
product.stock += 1
product.save()
user.total = user.total - product.amount
user.save()
removeitem.delete()
return redirect('/cart')
return redirect('/cart')
def cart(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
subtotal = user.total
tax = float(subtotal * .0825)
shipping = float(5.00)
total = float(subtotal + tax + shipping)
context = {
"all_categories": Category.objects.all(),
"cart_products": user.usecart.all(),
"user": user,
"subtotal": subtotal,
"shipping": shipping,
"tax": tax,
"total": total,
}
return render(request, "cart.html", context)
def likeditems(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
context = {
"liked_products": user.userlike.all(),
"all_categories": Category.objects.all(),
}
return render(request, "like.html", context)
def likeitem(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if request.method == "POST":
id = request.POST['postid']
product = Product.objects.get(id=id)
product.likes.add(user)
return redirect(f'/product/{id}')
return redirect('/')
def unlikeitem(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if request.method == "POST":
id = request.POST['postid']
product = Product.objects.get(id=id)
product.likes.remove(user)
return redirect(f'/product/{id}')
return redirect('/')
def dashboard(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level == 3:
return redirect('/admin')
if "user_id" not in request.session:
return redirect ('/login')
return render(request, "dashboard.html")
def accountinfo(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
month = '{:02d}'.format(user.dob.month)
day = '{:02d}'.format(user.dob.day)
context = {
"user": user,
"month": month,
"day": day,
}
return render(request, "accountinfo.html", context)
def accountupdate(request):
if request.method == "POST":
first_name = request.POST['first_name']
last_name = request.POST['last_name']
email = request.POST['email']
password = bcrypt.hashpw(request.POST["new_pw"].encode(), bcrypt.gensalt()).decode()
dob = request.POST['dob']
address1 = request.POST['address1']
address2 = request.POST['address2']
city = request.POST['city']
state = request.POST['state']
zip = request.POST['zip']
userid = request.session["user_id"]
user = User.objects.get(id=userid)
user.first_name = first_name
user.last_name = last_name
user.email = email
user.password = password
user.dob = dob
user.address_1 = address1
user.address_2 = address2
user.city = city
user.state = state
user.zip = zip
user.save()
return redirect('/dashboard/account')
return redirect('/')
def recentorders(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
userorders = user.userorders.all()
context={
"userorders": userorders,
}
return render(request, "recentorders.html", context)
def submitorder(request):
if "user_id" not in request.session:
return redirect ('/login')
if request.method == "POST":
userid = request.session["user_id"]
user = User.objects.get(id=userid)
subtotal = ast.literal_eval(request.POST['subtotal'])
tax = ast.literal_eval(request.POST['tax'])
shipping = ast.literal_eval(request.POST['shipping'])
usercart = user.usecart.all()
productlist = {"product":[]}
total = float(subtotal + tax + shipping)
for product in usercart:
rid = product.id
productid = Cart.objects.get(id=rid)
pid = productid.pid
orderproduct = Product.objects.get(id=pid)
pamount = str("{:.2f}".format(orderproduct.amount))
prodid = str(orderproduct.id)
productlist["product"].append('Product ID: ' + prodid + ' - ' + orderproduct.name + " : " + pamount)
destroyitem = Cart.objects.get(id=rid)
destroyitem.delete()
Order.objects.create(product=productlist, user=user, subtotal=subtotal, tax=tax, total=total, shipping=shipping)
user.total = 0
user.save()
return redirect('/dashboard')
return redirect('/')
def vieworder(request, id):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
for order in user.userorders.all():
if order.id == id:
order = Order.objects.get(id=id)
product_dict = ast.literal_eval(order.product)
context = {
"order":order,
"productlist": product_dict,
}
return render(request, "vieworder.html", context)
return redirect('/dashboard')
def admindash(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
return render(request, "admindashboard.html")
def adminneworders(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
context ={
"orders":Order.objects.all(),
}
return render(request, "adminneworders.html", context)
def adminpastorders(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
context ={
"orders":Order.objects.all(),
}
return render(request, "adminpastorders.html", context)
def adminvieworder(request, id):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
order = Order.objects.get(id=id)
product_dict = ast.literal_eval(order.product)
context = {
"order": order,
"productlist": product_dict,
}
return render(request, "adminvieworder.html", context)
def updatetracking(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
if request.method == "POST":
tracking = request.POST['tracking']
oid = request.POST['oid']
order = Order.objects.get(id=oid)
order.tracking = tracking
order.save()
return redirect(f'/admin/order/{oid}')
return redirect('/admin')
def products(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
context = {
"all_products": Product.objects.all(),
"all_categories": Category.objects.all(),
}
return render(request, "products.html", context)
def addprod(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
context = {
'all_categories': Category.objects.all(),
}
return render(request, "addproduct.html", context)
def addingprod(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
if request.method == "POST":
errors = Product.objects.createproduct(request.POST)
if errors:
for error in errors.values():
messages.error(request,error)
return redirect('/admin/add_product')
name = request.POST['name']
desc = request.POST['desc']
amount = request.POST['amt']
pic = request.POST['pic']
stock = request.POST['stock']
product = Product.objects.create(name=name, desc=desc, amount=amount, pic=pic, stock=stock)
categories = request.POST.getlist('categories')
for category in categories:
product.categories.add(category)
return redirect(f'/product/{product.id}')
return redirect('/admin/products')
def editprod(request, id):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
product = Product.objects.get(id=id)
thesecats = product.categories.all()
context = {
"product": product,
"excats": Category.objects.exclude(product=id),
"currentcats": thesecats,
}
return render(request, "editproduct.html", context)
def edittingprod(request):
if request.method == "POST":
name = request.POST['name']
desc = request.POST['desc']
amount = request.POST['amt']
pic = request.POST['pic']
stock = request.POST['stock']
id = request.POST['pid']
all_categories = Category.objects.all()
product = Product.objects.get(id=id)
for category in all_categories:
product.categories.remove(category)
categories = request.POST.getlist('categories')
for newcategory in categories:
product.categories.add(newcategory)
product.name = name
product.desc = desc
product.amount = amount
product.pic = pic
product.stock = stock
product.save()
return redirect(f'/admin/product/edit/{id}')
return redirect('/')
def storeinfo(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
context = {
"store": Store.objects.all()
}
return render(request, "store.html", context)
def createstore(request):
if request.method == "POST":
name = request.POST['storename']
address1 = request.POST['address1']
address2 = request.POST['address2']
city = request.POST['city']
state = request.POST['state']
zip = request.POST['zip']
Store.objects.create(name=name, address_1=address1, address_2=address2, city=city, state=state, zip=zip)
return redirect('/admin/store')
return redirect('/')
def editstore(request):
if request.method == "POST":
name = request.POST['storename']
address1 = request.POST['address1']
address2 = request.POST['address2']
city = request.POST['city']
state = request.POST['state']
zip = request.POST['zip']
storeid = request.POST['storeid']
store = Store.objects.get(id=storeid)
store.name = name
store.address_1 = address1
store.address_2 = address2
store.city = city
store.state = state
store.zip = zip
store.save()
return redirect('/admin/store')
return redirect('/')
def logout(request):
request.session.flush()
return redirect('/')
|
[
"ckizer86@yahoo.com"
] |
ckizer86@yahoo.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.